2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
42 #define MGMT_VERSION 1
43 #define MGMT_REVISION 18
45 static const u16 mgmt_commands
[] = {
46 MGMT_OP_READ_INDEX_LIST
,
49 MGMT_OP_SET_DISCOVERABLE
,
50 MGMT_OP_SET_CONNECTABLE
,
51 MGMT_OP_SET_FAST_CONNECTABLE
,
53 MGMT_OP_SET_LINK_SECURITY
,
57 MGMT_OP_SET_DEV_CLASS
,
58 MGMT_OP_SET_LOCAL_NAME
,
61 MGMT_OP_LOAD_LINK_KEYS
,
62 MGMT_OP_LOAD_LONG_TERM_KEYS
,
64 MGMT_OP_GET_CONNECTIONS
,
65 MGMT_OP_PIN_CODE_REPLY
,
66 MGMT_OP_PIN_CODE_NEG_REPLY
,
67 MGMT_OP_SET_IO_CAPABILITY
,
69 MGMT_OP_CANCEL_PAIR_DEVICE
,
70 MGMT_OP_UNPAIR_DEVICE
,
71 MGMT_OP_USER_CONFIRM_REPLY
,
72 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
73 MGMT_OP_USER_PASSKEY_REPLY
,
74 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
75 MGMT_OP_READ_LOCAL_OOB_DATA
,
76 MGMT_OP_ADD_REMOTE_OOB_DATA
,
77 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
78 MGMT_OP_START_DISCOVERY
,
79 MGMT_OP_STOP_DISCOVERY
,
82 MGMT_OP_UNBLOCK_DEVICE
,
83 MGMT_OP_SET_DEVICE_ID
,
84 MGMT_OP_SET_ADVERTISING
,
86 MGMT_OP_SET_STATIC_ADDRESS
,
87 MGMT_OP_SET_SCAN_PARAMS
,
88 MGMT_OP_SET_SECURE_CONN
,
89 MGMT_OP_SET_DEBUG_KEYS
,
92 MGMT_OP_GET_CONN_INFO
,
93 MGMT_OP_GET_CLOCK_INFO
,
95 MGMT_OP_REMOVE_DEVICE
,
96 MGMT_OP_LOAD_CONN_PARAM
,
97 MGMT_OP_READ_UNCONF_INDEX_LIST
,
98 MGMT_OP_READ_CONFIG_INFO
,
99 MGMT_OP_SET_EXTERNAL_CONFIG
,
100 MGMT_OP_SET_PUBLIC_ADDRESS
,
101 MGMT_OP_START_SERVICE_DISCOVERY
,
102 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
103 MGMT_OP_READ_EXT_INDEX_LIST
,
104 MGMT_OP_READ_ADV_FEATURES
,
105 MGMT_OP_ADD_ADVERTISING
,
106 MGMT_OP_REMOVE_ADVERTISING
,
107 MGMT_OP_GET_ADV_SIZE_INFO
,
108 MGMT_OP_START_LIMITED_DISCOVERY
,
109 MGMT_OP_READ_EXT_INFO
,
110 MGMT_OP_SET_APPEARANCE
,
111 MGMT_OP_SET_BLOCKED_KEYS
,
112 MGMT_OP_SET_WIDEBAND_SPEECH
,
113 MGMT_OP_READ_SECURITY_INFO
,
114 MGMT_OP_READ_EXP_FEATURES_INFO
,
115 MGMT_OP_SET_EXP_FEATURE
,
116 MGMT_OP_READ_DEF_SYSTEM_CONFIG
,
117 MGMT_OP_SET_DEF_SYSTEM_CONFIG
,
118 MGMT_OP_READ_DEF_RUNTIME_CONFIG
,
119 MGMT_OP_SET_DEF_RUNTIME_CONFIG
,
120 MGMT_OP_GET_DEVICE_FLAGS
,
121 MGMT_OP_SET_DEVICE_FLAGS
,
122 MGMT_OP_READ_ADV_MONITOR_FEATURES
,
123 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
124 MGMT_OP_REMOVE_ADV_MONITOR
,
125 MGMT_OP_ADD_EXT_ADV_PARAMS
,
126 MGMT_OP_ADD_EXT_ADV_DATA
,
129 static const u16 mgmt_events
[] = {
130 MGMT_EV_CONTROLLER_ERROR
,
132 MGMT_EV_INDEX_REMOVED
,
133 MGMT_EV_NEW_SETTINGS
,
134 MGMT_EV_CLASS_OF_DEV_CHANGED
,
135 MGMT_EV_LOCAL_NAME_CHANGED
,
136 MGMT_EV_NEW_LINK_KEY
,
137 MGMT_EV_NEW_LONG_TERM_KEY
,
138 MGMT_EV_DEVICE_CONNECTED
,
139 MGMT_EV_DEVICE_DISCONNECTED
,
140 MGMT_EV_CONNECT_FAILED
,
141 MGMT_EV_PIN_CODE_REQUEST
,
142 MGMT_EV_USER_CONFIRM_REQUEST
,
143 MGMT_EV_USER_PASSKEY_REQUEST
,
145 MGMT_EV_DEVICE_FOUND
,
147 MGMT_EV_DEVICE_BLOCKED
,
148 MGMT_EV_DEVICE_UNBLOCKED
,
149 MGMT_EV_DEVICE_UNPAIRED
,
150 MGMT_EV_PASSKEY_NOTIFY
,
153 MGMT_EV_DEVICE_ADDED
,
154 MGMT_EV_DEVICE_REMOVED
,
155 MGMT_EV_NEW_CONN_PARAM
,
156 MGMT_EV_UNCONF_INDEX_ADDED
,
157 MGMT_EV_UNCONF_INDEX_REMOVED
,
158 MGMT_EV_NEW_CONFIG_OPTIONS
,
159 MGMT_EV_EXT_INDEX_ADDED
,
160 MGMT_EV_EXT_INDEX_REMOVED
,
161 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
162 MGMT_EV_ADVERTISING_ADDED
,
163 MGMT_EV_ADVERTISING_REMOVED
,
164 MGMT_EV_EXT_INFO_CHANGED
,
165 MGMT_EV_PHY_CONFIGURATION_CHANGED
,
166 MGMT_EV_EXP_FEATURE_CHANGED
,
167 MGMT_EV_DEVICE_FLAGS_CHANGED
,
168 MGMT_EV_CONTROLLER_SUSPEND
,
169 MGMT_EV_CONTROLLER_RESUME
,
172 static const u16 mgmt_untrusted_commands
[] = {
173 MGMT_OP_READ_INDEX_LIST
,
175 MGMT_OP_READ_UNCONF_INDEX_LIST
,
176 MGMT_OP_READ_CONFIG_INFO
,
177 MGMT_OP_READ_EXT_INDEX_LIST
,
178 MGMT_OP_READ_EXT_INFO
,
179 MGMT_OP_READ_SECURITY_INFO
,
180 MGMT_OP_READ_EXP_FEATURES_INFO
,
181 MGMT_OP_READ_DEF_SYSTEM_CONFIG
,
182 MGMT_OP_READ_DEF_RUNTIME_CONFIG
,
185 static const u16 mgmt_untrusted_events
[] = {
187 MGMT_EV_INDEX_REMOVED
,
188 MGMT_EV_NEW_SETTINGS
,
189 MGMT_EV_CLASS_OF_DEV_CHANGED
,
190 MGMT_EV_LOCAL_NAME_CHANGED
,
191 MGMT_EV_UNCONF_INDEX_ADDED
,
192 MGMT_EV_UNCONF_INDEX_REMOVED
,
193 MGMT_EV_NEW_CONFIG_OPTIONS
,
194 MGMT_EV_EXT_INDEX_ADDED
,
195 MGMT_EV_EXT_INDEX_REMOVED
,
196 MGMT_EV_EXT_INFO_CHANGED
,
197 MGMT_EV_EXP_FEATURE_CHANGED
,
198 MGMT_EV_ADV_MONITOR_ADDED
,
199 MGMT_EV_ADV_MONITOR_REMOVED
,
202 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
204 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
205 "\x00\x00\x00\x00\x00\x00\x00\x00"
207 /* HCI to MGMT error code conversion table */
208 static const u8 mgmt_status_table
[] = {
210 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
211 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
212 MGMT_STATUS_FAILED
, /* Hardware Failure */
213 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
214 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
215 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
216 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
217 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
218 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
219 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
220 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
221 MGMT_STATUS_BUSY
, /* Command Disallowed */
222 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
223 MGMT_STATUS_REJECTED
, /* Rejected Security */
224 MGMT_STATUS_REJECTED
, /* Rejected Personal */
225 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
226 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
227 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
228 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
229 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
230 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
231 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
232 MGMT_STATUS_BUSY
, /* Repeated Attempts */
233 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
234 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
235 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
236 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
237 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
238 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
239 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
240 MGMT_STATUS_FAILED
, /* Unspecified Error */
241 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
242 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
243 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
244 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
245 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
246 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
247 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
248 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
249 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
250 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
251 MGMT_STATUS_FAILED
, /* Transaction Collision */
252 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
253 MGMT_STATUS_REJECTED
, /* QoS Rejected */
254 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
255 MGMT_STATUS_REJECTED
, /* Insufficient Security */
256 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
257 MGMT_STATUS_BUSY
, /* Role Switch Pending */
258 MGMT_STATUS_FAILED
, /* Slot Violation */
259 MGMT_STATUS_FAILED
, /* Role Switch Failed */
260 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
261 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
262 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
263 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
264 MGMT_STATUS_BUSY
, /* Controller Busy */
265 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
266 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
267 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
268 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
269 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
272 static u8
mgmt_status(u8 hci_status
)
274 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
275 return mgmt_status_table
[hci_status
];
277 return MGMT_STATUS_FAILED
;
280 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
283 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
287 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
288 u16 len
, int flag
, struct sock
*skip_sk
)
290 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
294 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
295 struct sock
*skip_sk
)
297 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
298 HCI_SOCK_TRUSTED
, skip_sk
);
301 static u8
le_addr_type(u8 mgmt_addr_type
)
303 if (mgmt_addr_type
== BDADDR_LE_PUBLIC
)
304 return ADDR_LE_DEV_PUBLIC
;
306 return ADDR_LE_DEV_RANDOM
;
309 void mgmt_fill_version_info(void *ver
)
311 struct mgmt_rp_read_version
*rp
= ver
;
313 rp
->version
= MGMT_VERSION
;
314 rp
->revision
= cpu_to_le16(MGMT_REVISION
);
317 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
320 struct mgmt_rp_read_version rp
;
322 bt_dev_dbg(hdev
, "sock %p", sk
);
324 mgmt_fill_version_info(&rp
);
326 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
330 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
333 struct mgmt_rp_read_commands
*rp
;
334 u16 num_commands
, num_events
;
338 bt_dev_dbg(hdev
, "sock %p", sk
);
340 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
341 num_commands
= ARRAY_SIZE(mgmt_commands
);
342 num_events
= ARRAY_SIZE(mgmt_events
);
344 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
345 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
348 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
350 rp
= kmalloc(rp_size
, GFP_KERNEL
);
354 rp
->num_commands
= cpu_to_le16(num_commands
);
355 rp
->num_events
= cpu_to_le16(num_events
);
357 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
358 __le16
*opcode
= rp
->opcodes
;
360 for (i
= 0; i
< num_commands
; i
++, opcode
++)
361 put_unaligned_le16(mgmt_commands
[i
], opcode
);
363 for (i
= 0; i
< num_events
; i
++, opcode
++)
364 put_unaligned_le16(mgmt_events
[i
], opcode
);
366 __le16
*opcode
= rp
->opcodes
;
368 for (i
= 0; i
< num_commands
; i
++, opcode
++)
369 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
371 for (i
= 0; i
< num_events
; i
++, opcode
++)
372 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
375 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
382 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
385 struct mgmt_rp_read_index_list
*rp
;
391 bt_dev_dbg(hdev
, "sock %p", sk
);
393 read_lock(&hci_dev_list_lock
);
396 list_for_each_entry(d
, &hci_dev_list
, list
) {
397 if (d
->dev_type
== HCI_PRIMARY
&&
398 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
402 rp_len
= sizeof(*rp
) + (2 * count
);
403 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
405 read_unlock(&hci_dev_list_lock
);
410 list_for_each_entry(d
, &hci_dev_list
, list
) {
411 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
412 hci_dev_test_flag(d
, HCI_CONFIG
) ||
413 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
416 /* Devices marked as raw-only are neither configured
417 * nor unconfigured controllers.
419 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
422 if (d
->dev_type
== HCI_PRIMARY
&&
423 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
424 rp
->index
[count
++] = cpu_to_le16(d
->id
);
425 bt_dev_dbg(hdev
, "Added hci%u", d
->id
);
429 rp
->num_controllers
= cpu_to_le16(count
);
430 rp_len
= sizeof(*rp
) + (2 * count
);
432 read_unlock(&hci_dev_list_lock
);
434 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
442 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
443 void *data
, u16 data_len
)
445 struct mgmt_rp_read_unconf_index_list
*rp
;
451 bt_dev_dbg(hdev
, "sock %p", sk
);
453 read_lock(&hci_dev_list_lock
);
456 list_for_each_entry(d
, &hci_dev_list
, list
) {
457 if (d
->dev_type
== HCI_PRIMARY
&&
458 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
462 rp_len
= sizeof(*rp
) + (2 * count
);
463 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
465 read_unlock(&hci_dev_list_lock
);
470 list_for_each_entry(d
, &hci_dev_list
, list
) {
471 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
472 hci_dev_test_flag(d
, HCI_CONFIG
) ||
473 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
476 /* Devices marked as raw-only are neither configured
477 * nor unconfigured controllers.
479 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
482 if (d
->dev_type
== HCI_PRIMARY
&&
483 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
484 rp
->index
[count
++] = cpu_to_le16(d
->id
);
485 bt_dev_dbg(hdev
, "Added hci%u", d
->id
);
489 rp
->num_controllers
= cpu_to_le16(count
);
490 rp_len
= sizeof(*rp
) + (2 * count
);
492 read_unlock(&hci_dev_list_lock
);
494 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
495 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
502 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
503 void *data
, u16 data_len
)
505 struct mgmt_rp_read_ext_index_list
*rp
;
510 bt_dev_dbg(hdev
, "sock %p", sk
);
512 read_lock(&hci_dev_list_lock
);
515 list_for_each_entry(d
, &hci_dev_list
, list
) {
516 if (d
->dev_type
== HCI_PRIMARY
|| d
->dev_type
== HCI_AMP
)
520 rp
= kmalloc(struct_size(rp
, entry
, count
), GFP_ATOMIC
);
522 read_unlock(&hci_dev_list_lock
);
527 list_for_each_entry(d
, &hci_dev_list
, list
) {
528 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
529 hci_dev_test_flag(d
, HCI_CONFIG
) ||
530 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
533 /* Devices marked as raw-only are neither configured
534 * nor unconfigured controllers.
536 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
539 if (d
->dev_type
== HCI_PRIMARY
) {
540 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
541 rp
->entry
[count
].type
= 0x01;
543 rp
->entry
[count
].type
= 0x00;
544 } else if (d
->dev_type
== HCI_AMP
) {
545 rp
->entry
[count
].type
= 0x02;
550 rp
->entry
[count
].bus
= d
->bus
;
551 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
552 bt_dev_dbg(hdev
, "Added hci%u", d
->id
);
555 rp
->num_controllers
= cpu_to_le16(count
);
557 read_unlock(&hci_dev_list_lock
);
559 /* If this command is called at least once, then all the
560 * default index and unconfigured index events are disabled
561 * and from now on only extended index events are used.
563 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
564 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
565 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
567 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
568 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
,
569 struct_size(rp
, entry
, count
));
576 static bool is_configured(struct hci_dev
*hdev
)
578 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
579 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
582 if ((test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) ||
583 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY
, &hdev
->quirks
)) &&
584 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
590 static __le32
get_missing_options(struct hci_dev
*hdev
)
594 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
595 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
596 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
598 if ((test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) ||
599 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY
, &hdev
->quirks
)) &&
600 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
601 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
603 return cpu_to_le32(options
);
606 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
608 __le32 options
= get_missing_options(hdev
);
610 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
611 sizeof(options
), HCI_MGMT_OPTION_EVENTS
, skip
);
614 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
616 __le32 options
= get_missing_options(hdev
);
618 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
622 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
623 void *data
, u16 data_len
)
625 struct mgmt_rp_read_config_info rp
;
628 bt_dev_dbg(hdev
, "sock %p", sk
);
632 memset(&rp
, 0, sizeof(rp
));
633 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
635 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
636 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
638 if (hdev
->set_bdaddr
)
639 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
641 rp
.supported_options
= cpu_to_le32(options
);
642 rp
.missing_options
= get_missing_options(hdev
);
644 hci_dev_unlock(hdev
);
646 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
650 static u32
get_supported_phys(struct hci_dev
*hdev
)
652 u32 supported_phys
= 0;
654 if (lmp_bredr_capable(hdev
)) {
655 supported_phys
|= MGMT_PHY_BR_1M_1SLOT
;
657 if (hdev
->features
[0][0] & LMP_3SLOT
)
658 supported_phys
|= MGMT_PHY_BR_1M_3SLOT
;
660 if (hdev
->features
[0][0] & LMP_5SLOT
)
661 supported_phys
|= MGMT_PHY_BR_1M_5SLOT
;
663 if (lmp_edr_2m_capable(hdev
)) {
664 supported_phys
|= MGMT_PHY_EDR_2M_1SLOT
;
666 if (lmp_edr_3slot_capable(hdev
))
667 supported_phys
|= MGMT_PHY_EDR_2M_3SLOT
;
669 if (lmp_edr_5slot_capable(hdev
))
670 supported_phys
|= MGMT_PHY_EDR_2M_5SLOT
;
672 if (lmp_edr_3m_capable(hdev
)) {
673 supported_phys
|= MGMT_PHY_EDR_3M_1SLOT
;
675 if (lmp_edr_3slot_capable(hdev
))
676 supported_phys
|= MGMT_PHY_EDR_3M_3SLOT
;
678 if (lmp_edr_5slot_capable(hdev
))
679 supported_phys
|= MGMT_PHY_EDR_3M_5SLOT
;
684 if (lmp_le_capable(hdev
)) {
685 supported_phys
|= MGMT_PHY_LE_1M_TX
;
686 supported_phys
|= MGMT_PHY_LE_1M_RX
;
688 if (hdev
->le_features
[1] & HCI_LE_PHY_2M
) {
689 supported_phys
|= MGMT_PHY_LE_2M_TX
;
690 supported_phys
|= MGMT_PHY_LE_2M_RX
;
693 if (hdev
->le_features
[1] & HCI_LE_PHY_CODED
) {
694 supported_phys
|= MGMT_PHY_LE_CODED_TX
;
695 supported_phys
|= MGMT_PHY_LE_CODED_RX
;
699 return supported_phys
;
702 static u32
get_selected_phys(struct hci_dev
*hdev
)
704 u32 selected_phys
= 0;
706 if (lmp_bredr_capable(hdev
)) {
707 selected_phys
|= MGMT_PHY_BR_1M_1SLOT
;
709 if (hdev
->pkt_type
& (HCI_DM3
| HCI_DH3
))
710 selected_phys
|= MGMT_PHY_BR_1M_3SLOT
;
712 if (hdev
->pkt_type
& (HCI_DM5
| HCI_DH5
))
713 selected_phys
|= MGMT_PHY_BR_1M_5SLOT
;
715 if (lmp_edr_2m_capable(hdev
)) {
716 if (!(hdev
->pkt_type
& HCI_2DH1
))
717 selected_phys
|= MGMT_PHY_EDR_2M_1SLOT
;
719 if (lmp_edr_3slot_capable(hdev
) &&
720 !(hdev
->pkt_type
& HCI_2DH3
))
721 selected_phys
|= MGMT_PHY_EDR_2M_3SLOT
;
723 if (lmp_edr_5slot_capable(hdev
) &&
724 !(hdev
->pkt_type
& HCI_2DH5
))
725 selected_phys
|= MGMT_PHY_EDR_2M_5SLOT
;
727 if (lmp_edr_3m_capable(hdev
)) {
728 if (!(hdev
->pkt_type
& HCI_3DH1
))
729 selected_phys
|= MGMT_PHY_EDR_3M_1SLOT
;
731 if (lmp_edr_3slot_capable(hdev
) &&
732 !(hdev
->pkt_type
& HCI_3DH3
))
733 selected_phys
|= MGMT_PHY_EDR_3M_3SLOT
;
735 if (lmp_edr_5slot_capable(hdev
) &&
736 !(hdev
->pkt_type
& HCI_3DH5
))
737 selected_phys
|= MGMT_PHY_EDR_3M_5SLOT
;
742 if (lmp_le_capable(hdev
)) {
743 if (hdev
->le_tx_def_phys
& HCI_LE_SET_PHY_1M
)
744 selected_phys
|= MGMT_PHY_LE_1M_TX
;
746 if (hdev
->le_rx_def_phys
& HCI_LE_SET_PHY_1M
)
747 selected_phys
|= MGMT_PHY_LE_1M_RX
;
749 if (hdev
->le_tx_def_phys
& HCI_LE_SET_PHY_2M
)
750 selected_phys
|= MGMT_PHY_LE_2M_TX
;
752 if (hdev
->le_rx_def_phys
& HCI_LE_SET_PHY_2M
)
753 selected_phys
|= MGMT_PHY_LE_2M_RX
;
755 if (hdev
->le_tx_def_phys
& HCI_LE_SET_PHY_CODED
)
756 selected_phys
|= MGMT_PHY_LE_CODED_TX
;
758 if (hdev
->le_rx_def_phys
& HCI_LE_SET_PHY_CODED
)
759 selected_phys
|= MGMT_PHY_LE_CODED_RX
;
762 return selected_phys
;
765 static u32
get_configurable_phys(struct hci_dev
*hdev
)
767 return (get_supported_phys(hdev
) & ~MGMT_PHY_BR_1M_1SLOT
&
768 ~MGMT_PHY_LE_1M_TX
& ~MGMT_PHY_LE_1M_RX
);
771 static u32
get_supported_settings(struct hci_dev
*hdev
)
775 settings
|= MGMT_SETTING_POWERED
;
776 settings
|= MGMT_SETTING_BONDABLE
;
777 settings
|= MGMT_SETTING_DEBUG_KEYS
;
778 settings
|= MGMT_SETTING_CONNECTABLE
;
779 settings
|= MGMT_SETTING_DISCOVERABLE
;
781 if (lmp_bredr_capable(hdev
)) {
782 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
783 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
784 settings
|= MGMT_SETTING_BREDR
;
785 settings
|= MGMT_SETTING_LINK_SECURITY
;
787 if (lmp_ssp_capable(hdev
)) {
788 settings
|= MGMT_SETTING_SSP
;
789 if (IS_ENABLED(CONFIG_BT_HS
))
790 settings
|= MGMT_SETTING_HS
;
793 if (lmp_sc_capable(hdev
))
794 settings
|= MGMT_SETTING_SECURE_CONN
;
796 if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED
,
798 settings
|= MGMT_SETTING_WIDEBAND_SPEECH
;
801 if (lmp_le_capable(hdev
)) {
802 settings
|= MGMT_SETTING_LE
;
803 settings
|= MGMT_SETTING_SECURE_CONN
;
804 settings
|= MGMT_SETTING_PRIVACY
;
805 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
807 /* When the experimental feature for LL Privacy support is
808 * enabled, then advertising is no longer supported.
810 if (!hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
811 settings
|= MGMT_SETTING_ADVERTISING
;
814 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
816 settings
|= MGMT_SETTING_CONFIGURATION
;
818 settings
|= MGMT_SETTING_PHY_CONFIGURATION
;
823 static u32
get_current_settings(struct hci_dev
*hdev
)
827 if (hdev_is_powered(hdev
))
828 settings
|= MGMT_SETTING_POWERED
;
830 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
831 settings
|= MGMT_SETTING_CONNECTABLE
;
833 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
834 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
836 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
837 settings
|= MGMT_SETTING_DISCOVERABLE
;
839 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
840 settings
|= MGMT_SETTING_BONDABLE
;
842 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
843 settings
|= MGMT_SETTING_BREDR
;
845 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
846 settings
|= MGMT_SETTING_LE
;
848 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
849 settings
|= MGMT_SETTING_LINK_SECURITY
;
851 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
852 settings
|= MGMT_SETTING_SSP
;
854 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
855 settings
|= MGMT_SETTING_HS
;
857 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
858 settings
|= MGMT_SETTING_ADVERTISING
;
860 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
861 settings
|= MGMT_SETTING_SECURE_CONN
;
863 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
864 settings
|= MGMT_SETTING_DEBUG_KEYS
;
866 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
867 settings
|= MGMT_SETTING_PRIVACY
;
869 /* The current setting for static address has two purposes. The
870 * first is to indicate if the static address will be used and
871 * the second is to indicate if it is actually set.
873 * This means if the static address is not configured, this flag
874 * will never be set. If the address is configured, then if the
875 * address is actually used decides if the flag is set or not.
877 * For single mode LE only controllers and dual-mode controllers
878 * with BR/EDR disabled, the existence of the static address will
881 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
882 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
883 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
884 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
885 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
888 if (hci_dev_test_flag(hdev
, HCI_WIDEBAND_SPEECH_ENABLED
))
889 settings
|= MGMT_SETTING_WIDEBAND_SPEECH
;
894 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
896 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
899 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
900 struct hci_dev
*hdev
,
903 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
906 u8
mgmt_get_adv_discov_flags(struct hci_dev
*hdev
)
908 struct mgmt_pending_cmd
*cmd
;
910 /* If there's a pending mgmt command the flags will not yet have
911 * their final values, so check for this first.
913 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
915 struct mgmt_mode
*cp
= cmd
->param
;
917 return LE_AD_GENERAL
;
918 else if (cp
->val
== 0x02)
919 return LE_AD_LIMITED
;
921 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
922 return LE_AD_LIMITED
;
923 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
924 return LE_AD_GENERAL
;
930 bool mgmt_get_connectable(struct hci_dev
*hdev
)
932 struct mgmt_pending_cmd
*cmd
;
934 /* If there's a pending mgmt command the flag will not yet have
935 * it's final value, so check for this first.
937 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
939 struct mgmt_mode
*cp
= cmd
->param
;
944 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
947 static void service_cache_off(struct work_struct
*work
)
949 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
951 struct hci_request req
;
953 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
956 hci_req_init(&req
, hdev
);
960 __hci_req_update_eir(&req
);
961 __hci_req_update_class(&req
);
963 hci_dev_unlock(hdev
);
965 hci_req_run(&req
, NULL
);
968 static void rpa_expired(struct work_struct
*work
)
970 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
972 struct hci_request req
;
974 bt_dev_dbg(hdev
, "");
976 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
978 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
981 /* The generation of a new RPA and programming it into the
982 * controller happens in the hci_req_enable_advertising()
985 hci_req_init(&req
, hdev
);
986 if (ext_adv_capable(hdev
))
987 __hci_req_start_ext_adv(&req
, hdev
->cur_adv_instance
);
989 __hci_req_enable_advertising(&req
);
990 hci_req_run(&req
, NULL
);
993 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
995 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
998 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
999 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1001 /* Non-mgmt controlled devices get this bit set
1002 * implicitly so that pairing works for them, however
1003 * for mgmt we require user-space to explicitly enable
1006 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
1009 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1010 void *data
, u16 data_len
)
1012 struct mgmt_rp_read_info rp
;
1014 bt_dev_dbg(hdev
, "sock %p", sk
);
1018 memset(&rp
, 0, sizeof(rp
));
1020 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1022 rp
.version
= hdev
->hci_ver
;
1023 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1025 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1026 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1028 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1030 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1031 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1033 hci_dev_unlock(hdev
);
1035 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1039 static u16
append_eir_data_to_buf(struct hci_dev
*hdev
, u8
*eir
)
1044 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1045 eir_len
= eir_append_data(eir
, eir_len
, EIR_CLASS_OF_DEV
,
1046 hdev
->dev_class
, 3);
1048 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1049 eir_len
= eir_append_le16(eir
, eir_len
, EIR_APPEARANCE
,
1052 name_len
= strlen(hdev
->dev_name
);
1053 eir_len
= eir_append_data(eir
, eir_len
, EIR_NAME_COMPLETE
,
1054 hdev
->dev_name
, name_len
);
1056 name_len
= strlen(hdev
->short_name
);
1057 eir_len
= eir_append_data(eir
, eir_len
, EIR_NAME_SHORT
,
1058 hdev
->short_name
, name_len
);
1063 static int read_ext_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1064 void *data
, u16 data_len
)
1067 struct mgmt_rp_read_ext_info
*rp
= (void *)buf
;
1070 bt_dev_dbg(hdev
, "sock %p", sk
);
1072 memset(&buf
, 0, sizeof(buf
));
1076 bacpy(&rp
->bdaddr
, &hdev
->bdaddr
);
1078 rp
->version
= hdev
->hci_ver
;
1079 rp
->manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1081 rp
->supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1082 rp
->current_settings
= cpu_to_le32(get_current_settings(hdev
));
1085 eir_len
= append_eir_data_to_buf(hdev
, rp
->eir
);
1086 rp
->eir_len
= cpu_to_le16(eir_len
);
1088 hci_dev_unlock(hdev
);
1090 /* If this command is called at least once, then the events
1091 * for class of device and local name changes are disabled
1092 * and only the new extended controller information event
1095 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INFO_EVENTS
);
1096 hci_sock_clear_flag(sk
, HCI_MGMT_DEV_CLASS_EVENTS
);
1097 hci_sock_clear_flag(sk
, HCI_MGMT_LOCAL_NAME_EVENTS
);
1099 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_EXT_INFO
, 0, rp
,
1100 sizeof(*rp
) + eir_len
);
1103 static int ext_info_changed(struct hci_dev
*hdev
, struct sock
*skip
)
1106 struct mgmt_ev_ext_info_changed
*ev
= (void *)buf
;
1109 memset(buf
, 0, sizeof(buf
));
1111 eir_len
= append_eir_data_to_buf(hdev
, ev
->eir
);
1112 ev
->eir_len
= cpu_to_le16(eir_len
);
1114 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED
, hdev
, ev
,
1115 sizeof(*ev
) + eir_len
,
1116 HCI_MGMT_EXT_INFO_EVENTS
, skip
);
1119 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1121 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1123 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1127 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1129 bt_dev_dbg(hdev
, "status 0x%02x", status
);
1131 if (hci_conn_count(hdev
) == 0) {
1132 cancel_delayed_work(&hdev
->power_off
);
1133 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1137 void mgmt_advertising_added(struct sock
*sk
, struct hci_dev
*hdev
, u8 instance
)
1139 struct mgmt_ev_advertising_added ev
;
1141 ev
.instance
= instance
;
1143 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
1146 void mgmt_advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1149 struct mgmt_ev_advertising_removed ev
;
1151 ev
.instance
= instance
;
1153 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1156 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1158 if (hdev
->adv_instance_timeout
) {
1159 hdev
->adv_instance_timeout
= 0;
1160 cancel_delayed_work(&hdev
->adv_instance_expire
);
1164 static int clean_up_hci_state(struct hci_dev
*hdev
)
1166 struct hci_request req
;
1167 struct hci_conn
*conn
;
1168 bool discov_stopped
;
1171 hci_req_init(&req
, hdev
);
1173 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1174 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1176 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1179 hci_req_clear_adv_instance(hdev
, NULL
, NULL
, 0x00, false);
1181 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1182 __hci_req_disable_advertising(&req
);
1184 discov_stopped
= hci_req_stop_discovery(&req
);
1186 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1187 /* 0x15 == Terminated due to Power Off */
1188 __hci_abort_conn(&req
, conn
, 0x15);
1191 err
= hci_req_run(&req
, clean_up_hci_complete
);
1192 if (!err
&& discov_stopped
)
1193 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1198 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1201 struct mgmt_mode
*cp
= data
;
1202 struct mgmt_pending_cmd
*cmd
;
1205 bt_dev_dbg(hdev
, "sock %p", sk
);
1207 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1208 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1209 MGMT_STATUS_INVALID_PARAMS
);
1213 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1214 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1219 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1220 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1224 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1231 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1234 /* Disconnect connections, stop scans, etc */
1235 err
= clean_up_hci_state(hdev
);
1237 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1238 HCI_POWER_OFF_TIMEOUT
);
1240 /* ENODATA means there were no HCI commands queued */
1241 if (err
== -ENODATA
) {
1242 cancel_delayed_work(&hdev
->power_off
);
1243 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1249 hci_dev_unlock(hdev
);
1253 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1255 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1257 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1258 sizeof(ev
), HCI_MGMT_SETTING_EVENTS
, skip
);
1261 int mgmt_new_settings(struct hci_dev
*hdev
)
1263 return new_settings(hdev
, NULL
);
1268 struct hci_dev
*hdev
;
1272 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1274 struct cmd_lookup
*match
= data
;
1276 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1278 list_del(&cmd
->list
);
1280 if (match
->sk
== NULL
) {
1281 match
->sk
= cmd
->sk
;
1282 sock_hold(match
->sk
);
1285 mgmt_pending_free(cmd
);
1288 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1292 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1293 mgmt_pending_remove(cmd
);
1296 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1298 if (cmd
->cmd_complete
) {
1301 cmd
->cmd_complete(cmd
, *status
);
1302 mgmt_pending_remove(cmd
);
1307 cmd_status_rsp(cmd
, data
);
1310 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1312 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1313 cmd
->param
, cmd
->param_len
);
1316 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1318 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1319 cmd
->param
, sizeof(struct mgmt_addr_info
));
1322 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1324 if (!lmp_bredr_capable(hdev
))
1325 return MGMT_STATUS_NOT_SUPPORTED
;
1326 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1327 return MGMT_STATUS_REJECTED
;
1329 return MGMT_STATUS_SUCCESS
;
1332 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1334 if (!lmp_le_capable(hdev
))
1335 return MGMT_STATUS_NOT_SUPPORTED
;
1336 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1337 return MGMT_STATUS_REJECTED
;
1339 return MGMT_STATUS_SUCCESS
;
1342 void mgmt_set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1344 struct mgmt_pending_cmd
*cmd
;
1346 bt_dev_dbg(hdev
, "status 0x%02x", status
);
1350 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1355 u8 mgmt_err
= mgmt_status(status
);
1356 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1357 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1361 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1362 hdev
->discov_timeout
> 0) {
1363 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1364 queue_delayed_work(hdev
->req_workqueue
, &hdev
->discov_off
, to
);
1367 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1368 new_settings(hdev
, cmd
->sk
);
1371 mgmt_pending_remove(cmd
);
1374 hci_dev_unlock(hdev
);
1377 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1380 struct mgmt_cp_set_discoverable
*cp
= data
;
1381 struct mgmt_pending_cmd
*cmd
;
1385 bt_dev_dbg(hdev
, "sock %p", sk
);
1387 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1388 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1389 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1390 MGMT_STATUS_REJECTED
);
1392 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1393 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1394 MGMT_STATUS_INVALID_PARAMS
);
1396 timeout
= __le16_to_cpu(cp
->timeout
);
1398 /* Disabling discoverable requires that no timeout is set,
1399 * and enabling limited discoverable requires a timeout.
1401 if ((cp
->val
== 0x00 && timeout
> 0) ||
1402 (cp
->val
== 0x02 && timeout
== 0))
1403 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1404 MGMT_STATUS_INVALID_PARAMS
);
1408 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1409 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1410 MGMT_STATUS_NOT_POWERED
);
1414 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1415 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1416 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1421 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1422 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1423 MGMT_STATUS_REJECTED
);
1427 if (hdev
->advertising_paused
) {
1428 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1433 if (!hdev_is_powered(hdev
)) {
1434 bool changed
= false;
1436 /* Setting limited discoverable when powered off is
1437 * not a valid operation since it requires a timeout
1438 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1440 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1441 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1445 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1450 err
= new_settings(hdev
, sk
);
1455 /* If the current mode is the same, then just update the timeout
1456 * value with the new value. And if only the timeout gets updated,
1457 * then no need for any HCI transactions.
1459 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1460 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1461 HCI_LIMITED_DISCOVERABLE
)) {
1462 cancel_delayed_work(&hdev
->discov_off
);
1463 hdev
->discov_timeout
= timeout
;
1465 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1466 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1467 queue_delayed_work(hdev
->req_workqueue
,
1468 &hdev
->discov_off
, to
);
1471 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1475 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1481 /* Cancel any potential discoverable timeout that might be
1482 * still active and store new timeout value. The arming of
1483 * the timeout happens in the complete handler.
1485 cancel_delayed_work(&hdev
->discov_off
);
1486 hdev
->discov_timeout
= timeout
;
1489 hci_dev_set_flag(hdev
, HCI_DISCOVERABLE
);
1491 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1493 /* Limited discoverable mode */
1494 if (cp
->val
== 0x02)
1495 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1497 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1499 queue_work(hdev
->req_workqueue
, &hdev
->discoverable_update
);
1503 hci_dev_unlock(hdev
);
1507 void mgmt_set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1509 struct mgmt_pending_cmd
*cmd
;
1511 bt_dev_dbg(hdev
, "status 0x%02x", status
);
1515 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1520 u8 mgmt_err
= mgmt_status(status
);
1521 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1525 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1526 new_settings(hdev
, cmd
->sk
);
1529 mgmt_pending_remove(cmd
);
1532 hci_dev_unlock(hdev
);
1535 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1536 struct sock
*sk
, u8 val
)
1538 bool changed
= false;
1541 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
1545 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1547 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1548 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1551 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1556 hci_req_update_scan(hdev
);
1557 hci_update_background_scan(hdev
);
1558 return new_settings(hdev
, sk
);
1564 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1567 struct mgmt_mode
*cp
= data
;
1568 struct mgmt_pending_cmd
*cmd
;
1571 bt_dev_dbg(hdev
, "sock %p", sk
);
1573 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1574 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1575 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1576 MGMT_STATUS_REJECTED
);
1578 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1579 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1580 MGMT_STATUS_INVALID_PARAMS
);
1584 if (!hdev_is_powered(hdev
)) {
1585 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1589 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1590 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1591 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1596 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1603 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
1605 if (hdev
->discov_timeout
> 0)
1606 cancel_delayed_work(&hdev
->discov_off
);
1608 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1609 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
1610 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
1613 queue_work(hdev
->req_workqueue
, &hdev
->connectable_update
);
1617 hci_dev_unlock(hdev
);
1621 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1624 struct mgmt_mode
*cp
= data
;
1628 bt_dev_dbg(hdev
, "sock %p", sk
);
1630 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1631 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
1632 MGMT_STATUS_INVALID_PARAMS
);
1637 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
1639 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
1641 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
1646 /* In limited privacy mode the change of bondable mode
1647 * may affect the local advertising address.
1649 if (hdev_is_powered(hdev
) &&
1650 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
1651 hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1652 hci_dev_test_flag(hdev
, HCI_LIMITED_PRIVACY
))
1653 queue_work(hdev
->req_workqueue
,
1654 &hdev
->discoverable_update
);
1656 err
= new_settings(hdev
, sk
);
1660 hci_dev_unlock(hdev
);
1664 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1667 struct mgmt_mode
*cp
= data
;
1668 struct mgmt_pending_cmd
*cmd
;
1672 bt_dev_dbg(hdev
, "sock %p", sk
);
1674 status
= mgmt_bredr_support(hdev
);
1676 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1679 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1680 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1681 MGMT_STATUS_INVALID_PARAMS
);
1685 if (!hdev_is_powered(hdev
)) {
1686 bool changed
= false;
1688 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
1689 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
1693 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1698 err
= new_settings(hdev
, sk
);
1703 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1704 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1711 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1712 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1716 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1722 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1724 mgmt_pending_remove(cmd
);
1729 hci_dev_unlock(hdev
);
1733 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1735 struct mgmt_mode
*cp
= data
;
1736 struct mgmt_pending_cmd
*cmd
;
1740 bt_dev_dbg(hdev
, "sock %p", sk
);
1742 status
= mgmt_bredr_support(hdev
);
1744 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1746 if (!lmp_ssp_capable(hdev
))
1747 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1748 MGMT_STATUS_NOT_SUPPORTED
);
1750 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1751 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1752 MGMT_STATUS_INVALID_PARAMS
);
1756 if (!hdev_is_powered(hdev
)) {
1760 changed
= !hci_dev_test_and_set_flag(hdev
,
1763 changed
= hci_dev_test_and_clear_flag(hdev
,
1766 changed
= hci_dev_test_and_clear_flag(hdev
,
1769 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
1772 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1777 err
= new_settings(hdev
, sk
);
1782 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
1783 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1788 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
1789 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
1793 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
1799 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
1800 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
1801 sizeof(cp
->val
), &cp
->val
);
1803 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
1805 mgmt_pending_remove(cmd
);
1810 hci_dev_unlock(hdev
);
1814 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1816 struct mgmt_mode
*cp
= data
;
1821 bt_dev_dbg(hdev
, "sock %p", sk
);
1823 if (!IS_ENABLED(CONFIG_BT_HS
))
1824 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1825 MGMT_STATUS_NOT_SUPPORTED
);
1827 status
= mgmt_bredr_support(hdev
);
1829 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
1831 if (!lmp_ssp_capable(hdev
))
1832 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1833 MGMT_STATUS_NOT_SUPPORTED
);
1835 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1836 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1837 MGMT_STATUS_REJECTED
);
1839 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1840 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1841 MGMT_STATUS_INVALID_PARAMS
);
1845 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
1846 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1852 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
1854 if (hdev_is_powered(hdev
)) {
1855 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
1856 MGMT_STATUS_REJECTED
);
1860 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
1863 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
1868 err
= new_settings(hdev
, sk
);
1871 hci_dev_unlock(hdev
);
1875 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1877 struct cmd_lookup match
= { NULL
, hdev
};
1882 u8 mgmt_err
= mgmt_status(status
);
1884 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
1889 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
1891 new_settings(hdev
, match
.sk
);
1896 /* Make sure the controller has a good default for
1897 * advertising data. Restrict the update to when LE
1898 * has actually been enabled. During power on, the
1899 * update in powered_update_hci will take care of it.
1901 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
1902 struct hci_request req
;
1903 hci_req_init(&req
, hdev
);
1904 if (ext_adv_capable(hdev
)) {
1907 err
= __hci_req_setup_ext_adv_instance(&req
, 0x00);
1909 __hci_req_update_scan_rsp_data(&req
, 0x00);
1911 __hci_req_update_adv_data(&req
, 0x00);
1912 __hci_req_update_scan_rsp_data(&req
, 0x00);
1914 hci_req_run(&req
, NULL
);
1915 hci_update_background_scan(hdev
);
1919 hci_dev_unlock(hdev
);
1922 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1924 struct mgmt_mode
*cp
= data
;
1925 struct hci_cp_write_le_host_supported hci_cp
;
1926 struct mgmt_pending_cmd
*cmd
;
1927 struct hci_request req
;
1931 bt_dev_dbg(hdev
, "sock %p", sk
);
1933 if (!lmp_le_capable(hdev
))
1934 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1935 MGMT_STATUS_NOT_SUPPORTED
);
1937 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1938 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1939 MGMT_STATUS_INVALID_PARAMS
);
1941 /* Bluetooth single mode LE only controllers or dual-mode
1942 * controllers configured as LE only devices, do not allow
1943 * switching LE off. These have either LE enabled explicitly
1944 * or BR/EDR has been previously switched off.
1946 * When trying to enable an already enabled LE, then gracefully
1947 * send a positive response. Trying to disable it however will
1948 * result into rejection.
1950 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
1951 if (cp
->val
== 0x01)
1952 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1954 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1955 MGMT_STATUS_REJECTED
);
1961 enabled
= lmp_host_le_capable(hdev
);
1964 hci_req_clear_adv_instance(hdev
, NULL
, NULL
, 0x00, true);
1966 if (!hdev_is_powered(hdev
) || val
== enabled
) {
1967 bool changed
= false;
1969 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
1970 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
1974 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
1975 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
1979 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
1984 err
= new_settings(hdev
, sk
);
1989 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
1990 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
1991 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
1996 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2002 hci_req_init(&req
, hdev
);
2004 memset(&hci_cp
, 0, sizeof(hci_cp
));
2008 hci_cp
.simul
= 0x00;
2010 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
2011 __hci_req_disable_advertising(&req
);
2013 if (ext_adv_capable(hdev
))
2014 __hci_req_clear_ext_adv_sets(&req
);
2017 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2020 err
= hci_req_run(&req
, le_enable_complete
);
2022 mgmt_pending_remove(cmd
);
2025 hci_dev_unlock(hdev
);
2029 /* This is a helper function to test for pending mgmt commands that can
2030 * cause CoD or EIR HCI commands. We can only allow one such pending
2031 * mgmt command at a time since otherwise we cannot easily track what
2032 * the current values are, will be, and based on that calculate if a new
2033 * HCI command needs to be sent and if yes with what value.
2035 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2037 struct mgmt_pending_cmd
*cmd
;
2039 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2040 switch (cmd
->opcode
) {
2041 case MGMT_OP_ADD_UUID
:
2042 case MGMT_OP_REMOVE_UUID
:
2043 case MGMT_OP_SET_DEV_CLASS
:
2044 case MGMT_OP_SET_POWERED
:
2052 static const u8 bluetooth_base_uuid
[] = {
2053 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2054 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2057 static u8
get_uuid_size(const u8
*uuid
)
2061 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2064 val
= get_unaligned_le32(&uuid
[12]);
2071 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2073 struct mgmt_pending_cmd
*cmd
;
2077 cmd
= pending_find(mgmt_op
, hdev
);
2081 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
2082 mgmt_status(status
), hdev
->dev_class
, 3);
2084 mgmt_pending_remove(cmd
);
2087 hci_dev_unlock(hdev
);
2090 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2092 bt_dev_dbg(hdev
, "status 0x%02x", status
);
2094 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2097 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2099 struct mgmt_cp_add_uuid
*cp
= data
;
2100 struct mgmt_pending_cmd
*cmd
;
2101 struct hci_request req
;
2102 struct bt_uuid
*uuid
;
2105 bt_dev_dbg(hdev
, "sock %p", sk
);
2109 if (pending_eir_or_class(hdev
)) {
2110 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2115 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2121 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2122 uuid
->svc_hint
= cp
->svc_hint
;
2123 uuid
->size
= get_uuid_size(cp
->uuid
);
2125 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2127 hci_req_init(&req
, hdev
);
2129 __hci_req_update_class(&req
);
2130 __hci_req_update_eir(&req
);
2132 err
= hci_req_run(&req
, add_uuid_complete
);
2134 if (err
!= -ENODATA
)
2137 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2138 hdev
->dev_class
, 3);
2142 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2151 hci_dev_unlock(hdev
);
2155 static bool enable_service_cache(struct hci_dev
*hdev
)
2157 if (!hdev_is_powered(hdev
))
2160 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
2161 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2169 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2171 bt_dev_dbg(hdev
, "status 0x%02x", status
);
2173 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2176 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2179 struct mgmt_cp_remove_uuid
*cp
= data
;
2180 struct mgmt_pending_cmd
*cmd
;
2181 struct bt_uuid
*match
, *tmp
;
2182 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2183 struct hci_request req
;
2186 bt_dev_dbg(hdev
, "sock %p", sk
);
2190 if (pending_eir_or_class(hdev
)) {
2191 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2196 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2197 hci_uuids_clear(hdev
);
2199 if (enable_service_cache(hdev
)) {
2200 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2201 MGMT_OP_REMOVE_UUID
,
2202 0, hdev
->dev_class
, 3);
2211 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2212 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2215 list_del(&match
->list
);
2221 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2222 MGMT_STATUS_INVALID_PARAMS
);
2227 hci_req_init(&req
, hdev
);
2229 __hci_req_update_class(&req
);
2230 __hci_req_update_eir(&req
);
2232 err
= hci_req_run(&req
, remove_uuid_complete
);
2234 if (err
!= -ENODATA
)
2237 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2238 hdev
->dev_class
, 3);
2242 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2251 hci_dev_unlock(hdev
);
2255 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2257 bt_dev_dbg(hdev
, "status 0x%02x", status
);
2259 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2262 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2265 struct mgmt_cp_set_dev_class
*cp
= data
;
2266 struct mgmt_pending_cmd
*cmd
;
2267 struct hci_request req
;
2270 bt_dev_dbg(hdev
, "sock %p", sk
);
2272 if (!lmp_bredr_capable(hdev
))
2273 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2274 MGMT_STATUS_NOT_SUPPORTED
);
2278 if (pending_eir_or_class(hdev
)) {
2279 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2284 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2285 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2286 MGMT_STATUS_INVALID_PARAMS
);
2290 hdev
->major_class
= cp
->major
;
2291 hdev
->minor_class
= cp
->minor
;
2293 if (!hdev_is_powered(hdev
)) {
2294 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2295 hdev
->dev_class
, 3);
2299 hci_req_init(&req
, hdev
);
2301 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2302 hci_dev_unlock(hdev
);
2303 cancel_delayed_work_sync(&hdev
->service_cache
);
2305 __hci_req_update_eir(&req
);
2308 __hci_req_update_class(&req
);
2310 err
= hci_req_run(&req
, set_class_complete
);
2312 if (err
!= -ENODATA
)
2315 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2316 hdev
->dev_class
, 3);
2320 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2329 hci_dev_unlock(hdev
);
2333 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2336 struct mgmt_cp_load_link_keys
*cp
= data
;
2337 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2338 sizeof(struct mgmt_link_key_info
));
2339 u16 key_count
, expected_len
;
2343 bt_dev_dbg(hdev
, "sock %p", sk
);
2345 if (!lmp_bredr_capable(hdev
))
2346 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2347 MGMT_STATUS_NOT_SUPPORTED
);
2349 key_count
= __le16_to_cpu(cp
->key_count
);
2350 if (key_count
> max_key_count
) {
2351 bt_dev_err(hdev
, "load_link_keys: too big key_count value %u",
2353 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2354 MGMT_STATUS_INVALID_PARAMS
);
2357 expected_len
= struct_size(cp
, keys
, key_count
);
2358 if (expected_len
!= len
) {
2359 bt_dev_err(hdev
, "load_link_keys: expected %u bytes, got %u bytes",
2361 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2362 MGMT_STATUS_INVALID_PARAMS
);
2365 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2366 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2367 MGMT_STATUS_INVALID_PARAMS
);
2369 bt_dev_dbg(hdev
, "debug_keys %u key_count %u", cp
->debug_keys
,
2372 for (i
= 0; i
< key_count
; i
++) {
2373 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2375 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2376 return mgmt_cmd_status(sk
, hdev
->id
,
2377 MGMT_OP_LOAD_LINK_KEYS
,
2378 MGMT_STATUS_INVALID_PARAMS
);
2383 hci_link_keys_clear(hdev
);
2386 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2388 changed
= hci_dev_test_and_clear_flag(hdev
,
2389 HCI_KEEP_DEBUG_KEYS
);
2392 new_settings(hdev
, NULL
);
2394 for (i
= 0; i
< key_count
; i
++) {
2395 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2397 if (hci_is_blocked_key(hdev
,
2398 HCI_BLOCKED_KEY_TYPE_LINKKEY
,
2400 bt_dev_warn(hdev
, "Skipping blocked link key for %pMR",
2405 /* Always ignore debug keys and require a new pairing if
2406 * the user wants to use them.
2408 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2411 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2412 key
->type
, key
->pin_len
, NULL
);
2415 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2417 hci_dev_unlock(hdev
);
2422 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2423 u8 addr_type
, struct sock
*skip_sk
)
2425 struct mgmt_ev_device_unpaired ev
;
2427 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2428 ev
.addr
.type
= addr_type
;
2430 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2434 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2437 struct mgmt_cp_unpair_device
*cp
= data
;
2438 struct mgmt_rp_unpair_device rp
;
2439 struct hci_conn_params
*params
;
2440 struct mgmt_pending_cmd
*cmd
;
2441 struct hci_conn
*conn
;
2445 memset(&rp
, 0, sizeof(rp
));
2446 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2447 rp
.addr
.type
= cp
->addr
.type
;
2449 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2450 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2451 MGMT_STATUS_INVALID_PARAMS
,
2454 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2455 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2456 MGMT_STATUS_INVALID_PARAMS
,
2461 if (!hdev_is_powered(hdev
)) {
2462 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2463 MGMT_STATUS_NOT_POWERED
, &rp
,
2468 if (cp
->addr
.type
== BDADDR_BREDR
) {
2469 /* If disconnection is requested, then look up the
2470 * connection. If the remote device is connected, it
2471 * will be later used to terminate the link.
2473 * Setting it to NULL explicitly will cause no
2474 * termination of the link.
2477 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2482 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2484 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2485 MGMT_OP_UNPAIR_DEVICE
,
2486 MGMT_STATUS_NOT_PAIRED
, &rp
,
2494 /* LE address type */
2495 addr_type
= le_addr_type(cp
->addr
.type
);
2497 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2498 err
= smp_cancel_and_remove_pairing(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2500 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2501 MGMT_STATUS_NOT_PAIRED
, &rp
,
2506 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2508 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2513 /* Defer clearing up the connection parameters until closing to
2514 * give a chance of keeping them if a repairing happens.
2516 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2518 /* Disable auto-connection parameters if present */
2519 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2521 if (params
->explicit_connect
)
2522 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
2524 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2527 /* If disconnection is not requested, then clear the connection
2528 * variable so that the link is not terminated.
2530 if (!cp
->disconnect
)
2534 /* If the connection variable is set, then termination of the
2535 * link is requested.
2538 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2540 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2544 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2551 cmd
->cmd_complete
= addr_cmd_complete
;
2553 err
= hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2555 mgmt_pending_remove(cmd
);
2558 hci_dev_unlock(hdev
);
2562 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2565 struct mgmt_cp_disconnect
*cp
= data
;
2566 struct mgmt_rp_disconnect rp
;
2567 struct mgmt_pending_cmd
*cmd
;
2568 struct hci_conn
*conn
;
2571 bt_dev_dbg(hdev
, "sock %p", sk
);
2573 memset(&rp
, 0, sizeof(rp
));
2574 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2575 rp
.addr
.type
= cp
->addr
.type
;
2577 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2578 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2579 MGMT_STATUS_INVALID_PARAMS
,
2584 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2585 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2586 MGMT_STATUS_NOT_POWERED
, &rp
,
2591 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2592 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2593 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2597 if (cp
->addr
.type
== BDADDR_BREDR
)
2598 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2601 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
2602 le_addr_type(cp
->addr
.type
));
2604 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2605 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2606 MGMT_STATUS_NOT_CONNECTED
, &rp
,
2611 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2617 cmd
->cmd_complete
= generic_cmd_complete
;
2619 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
2621 mgmt_pending_remove(cmd
);
2624 hci_dev_unlock(hdev
);
2628 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2630 switch (link_type
) {
2632 switch (addr_type
) {
2633 case ADDR_LE_DEV_PUBLIC
:
2634 return BDADDR_LE_PUBLIC
;
2637 /* Fallback to LE Random address type */
2638 return BDADDR_LE_RANDOM
;
2642 /* Fallback to BR/EDR type */
2643 return BDADDR_BREDR
;
2647 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2650 struct mgmt_rp_get_connections
*rp
;
2655 bt_dev_dbg(hdev
, "sock %p", sk
);
2659 if (!hdev_is_powered(hdev
)) {
2660 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2661 MGMT_STATUS_NOT_POWERED
);
2666 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2667 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2671 rp
= kmalloc(struct_size(rp
, addr
, i
), GFP_KERNEL
);
2678 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2679 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2681 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2682 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2683 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2688 rp
->conn_count
= cpu_to_le16(i
);
2690 /* Recalculate length in case of filtered SCO connections, etc */
2691 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2692 struct_size(rp
, addr
, i
));
2697 hci_dev_unlock(hdev
);
2701 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2702 struct mgmt_cp_pin_code_neg_reply
*cp
)
2704 struct mgmt_pending_cmd
*cmd
;
2707 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2712 cmd
->cmd_complete
= addr_cmd_complete
;
2714 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2715 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2717 mgmt_pending_remove(cmd
);
2722 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2725 struct hci_conn
*conn
;
2726 struct mgmt_cp_pin_code_reply
*cp
= data
;
2727 struct hci_cp_pin_code_reply reply
;
2728 struct mgmt_pending_cmd
*cmd
;
2731 bt_dev_dbg(hdev
, "sock %p", sk
);
2735 if (!hdev_is_powered(hdev
)) {
2736 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2737 MGMT_STATUS_NOT_POWERED
);
2741 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2743 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2744 MGMT_STATUS_NOT_CONNECTED
);
2748 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2749 struct mgmt_cp_pin_code_neg_reply ncp
;
2751 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2753 bt_dev_err(hdev
, "PIN code is not 16 bytes long");
2755 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2757 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2758 MGMT_STATUS_INVALID_PARAMS
);
2763 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2769 cmd
->cmd_complete
= addr_cmd_complete
;
2771 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2772 reply
.pin_len
= cp
->pin_len
;
2773 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2775 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2777 mgmt_pending_remove(cmd
);
2780 hci_dev_unlock(hdev
);
2784 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2787 struct mgmt_cp_set_io_capability
*cp
= data
;
2789 bt_dev_dbg(hdev
, "sock %p", sk
);
2791 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
2792 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
2793 MGMT_STATUS_INVALID_PARAMS
);
2797 hdev
->io_capability
= cp
->io_capability
;
2799 bt_dev_dbg(hdev
, "IO capability set to 0x%02x", hdev
->io_capability
);
2801 hci_dev_unlock(hdev
);
2803 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
2807 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
2809 struct hci_dev
*hdev
= conn
->hdev
;
2810 struct mgmt_pending_cmd
*cmd
;
2812 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2813 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2816 if (cmd
->user_data
!= conn
)
2825 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
2827 struct mgmt_rp_pair_device rp
;
2828 struct hci_conn
*conn
= cmd
->user_data
;
2831 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2832 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2834 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
2835 status
, &rp
, sizeof(rp
));
2837 /* So we don't get further callbacks for this connection */
2838 conn
->connect_cfm_cb
= NULL
;
2839 conn
->security_cfm_cb
= NULL
;
2840 conn
->disconn_cfm_cb
= NULL
;
2842 hci_conn_drop(conn
);
2844 /* The device is paired so there is no need to remove
2845 * its connection parameters anymore.
2847 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
2854 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
2856 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
2857 struct mgmt_pending_cmd
*cmd
;
2859 cmd
= find_pairing(conn
);
2861 cmd
->cmd_complete(cmd
, status
);
2862 mgmt_pending_remove(cmd
);
2866 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2868 struct mgmt_pending_cmd
*cmd
;
2870 BT_DBG("status %u", status
);
2872 cmd
= find_pairing(conn
);
2874 BT_DBG("Unable to find a pending command");
2878 cmd
->cmd_complete(cmd
, mgmt_status(status
));
2879 mgmt_pending_remove(cmd
);
2882 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
2884 struct mgmt_pending_cmd
*cmd
;
2886 BT_DBG("status %u", status
);
2891 cmd
= find_pairing(conn
);
2893 BT_DBG("Unable to find a pending command");
2897 cmd
->cmd_complete(cmd
, mgmt_status(status
));
2898 mgmt_pending_remove(cmd
);
2901 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2904 struct mgmt_cp_pair_device
*cp
= data
;
2905 struct mgmt_rp_pair_device rp
;
2906 struct mgmt_pending_cmd
*cmd
;
2907 u8 sec_level
, auth_type
;
2908 struct hci_conn
*conn
;
2911 bt_dev_dbg(hdev
, "sock %p", sk
);
2913 memset(&rp
, 0, sizeof(rp
));
2914 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2915 rp
.addr
.type
= cp
->addr
.type
;
2917 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2918 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2919 MGMT_STATUS_INVALID_PARAMS
,
2922 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
2923 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2924 MGMT_STATUS_INVALID_PARAMS
,
2929 if (!hdev_is_powered(hdev
)) {
2930 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2931 MGMT_STATUS_NOT_POWERED
, &rp
,
2936 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
2937 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2938 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
2943 sec_level
= BT_SECURITY_MEDIUM
;
2944 auth_type
= HCI_AT_DEDICATED_BONDING
;
2946 if (cp
->addr
.type
== BDADDR_BREDR
) {
2947 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
2948 auth_type
, CONN_REASON_PAIR_DEVICE
);
2950 u8 addr_type
= le_addr_type(cp
->addr
.type
);
2951 struct hci_conn_params
*p
;
2953 /* When pairing a new device, it is expected to remember
2954 * this device for future connections. Adding the connection
2955 * parameter information ahead of time allows tracking
2956 * of the slave preferred values and will speed up any
2957 * further connection establishment.
2959 * If connection parameters already exist, then they
2960 * will be kept and this function does nothing.
2962 p
= hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2964 if (p
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
)
2965 p
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
2967 conn
= hci_connect_le_scan(hdev
, &cp
->addr
.bdaddr
, addr_type
,
2968 sec_level
, HCI_LE_CONN_TIMEOUT
,
2969 CONN_REASON_PAIR_DEVICE
);
2975 if (PTR_ERR(conn
) == -EBUSY
)
2976 status
= MGMT_STATUS_BUSY
;
2977 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
2978 status
= MGMT_STATUS_NOT_SUPPORTED
;
2979 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
2980 status
= MGMT_STATUS_REJECTED
;
2982 status
= MGMT_STATUS_CONNECT_FAILED
;
2984 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2985 status
, &rp
, sizeof(rp
));
2989 if (conn
->connect_cfm_cb
) {
2990 hci_conn_drop(conn
);
2991 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
2992 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2996 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
2999 hci_conn_drop(conn
);
3003 cmd
->cmd_complete
= pairing_complete
;
3005 /* For LE, just connecting isn't a proof that the pairing finished */
3006 if (cp
->addr
.type
== BDADDR_BREDR
) {
3007 conn
->connect_cfm_cb
= pairing_complete_cb
;
3008 conn
->security_cfm_cb
= pairing_complete_cb
;
3009 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3011 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3012 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3013 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3016 conn
->io_capability
= cp
->io_cap
;
3017 cmd
->user_data
= hci_conn_get(conn
);
3019 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3020 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3021 cmd
->cmd_complete(cmd
, 0);
3022 mgmt_pending_remove(cmd
);
3028 hci_dev_unlock(hdev
);
3032 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3035 struct mgmt_addr_info
*addr
= data
;
3036 struct mgmt_pending_cmd
*cmd
;
3037 struct hci_conn
*conn
;
3040 bt_dev_dbg(hdev
, "sock %p", sk
);
3044 if (!hdev_is_powered(hdev
)) {
3045 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3046 MGMT_STATUS_NOT_POWERED
);
3050 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3052 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3053 MGMT_STATUS_INVALID_PARAMS
);
3057 conn
= cmd
->user_data
;
3059 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3060 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3061 MGMT_STATUS_INVALID_PARAMS
);
3065 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3066 mgmt_pending_remove(cmd
);
3068 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3069 addr
, sizeof(*addr
));
3071 /* Since user doesn't want to proceed with the connection, abort any
3072 * ongoing pairing and then terminate the link if it was created
3073 * because of the pair device action.
3075 if (addr
->type
== BDADDR_BREDR
)
3076 hci_remove_link_key(hdev
, &addr
->bdaddr
);
3078 smp_cancel_and_remove_pairing(hdev
, &addr
->bdaddr
,
3079 le_addr_type(addr
->type
));
3081 if (conn
->conn_reason
== CONN_REASON_PAIR_DEVICE
)
3082 hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3085 hci_dev_unlock(hdev
);
3089 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3090 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3091 u16 hci_op
, __le32 passkey
)
3093 struct mgmt_pending_cmd
*cmd
;
3094 struct hci_conn
*conn
;
3099 if (!hdev_is_powered(hdev
)) {
3100 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3101 MGMT_STATUS_NOT_POWERED
, addr
,
3106 if (addr
->type
== BDADDR_BREDR
)
3107 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3109 conn
= hci_conn_hash_lookup_le(hdev
, &addr
->bdaddr
,
3110 le_addr_type(addr
->type
));
3113 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3114 MGMT_STATUS_NOT_CONNECTED
, addr
,
3119 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3120 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3122 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3123 MGMT_STATUS_SUCCESS
, addr
,
3126 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3127 MGMT_STATUS_FAILED
, addr
,
3133 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3139 cmd
->cmd_complete
= addr_cmd_complete
;
3141 /* Continue with pairing via HCI */
3142 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3143 struct hci_cp_user_passkey_reply cp
;
3145 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3146 cp
.passkey
= passkey
;
3147 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3149 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3153 mgmt_pending_remove(cmd
);
3156 hci_dev_unlock(hdev
);
3160 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3161 void *data
, u16 len
)
3163 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3165 bt_dev_dbg(hdev
, "sock %p", sk
);
3167 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3168 MGMT_OP_PIN_CODE_NEG_REPLY
,
3169 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3172 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3175 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3177 bt_dev_dbg(hdev
, "sock %p", sk
);
3179 if (len
!= sizeof(*cp
))
3180 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3181 MGMT_STATUS_INVALID_PARAMS
);
3183 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3184 MGMT_OP_USER_CONFIRM_REPLY
,
3185 HCI_OP_USER_CONFIRM_REPLY
, 0);
3188 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3189 void *data
, u16 len
)
3191 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3193 bt_dev_dbg(hdev
, "sock %p", sk
);
3195 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3196 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3197 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3200 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3203 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3205 bt_dev_dbg(hdev
, "sock %p", sk
);
3207 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3208 MGMT_OP_USER_PASSKEY_REPLY
,
3209 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3212 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3213 void *data
, u16 len
)
3215 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3217 bt_dev_dbg(hdev
, "sock %p", sk
);
3219 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3220 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3221 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3224 static void adv_expire(struct hci_dev
*hdev
, u32 flags
)
3226 struct adv_info
*adv_instance
;
3227 struct hci_request req
;
3230 adv_instance
= hci_find_adv_instance(hdev
, hdev
->cur_adv_instance
);
3234 /* stop if current instance doesn't need to be changed */
3235 if (!(adv_instance
->flags
& flags
))
3238 cancel_adv_timeout(hdev
);
3240 adv_instance
= hci_get_next_instance(hdev
, adv_instance
->instance
);
3244 hci_req_init(&req
, hdev
);
3245 err
= __hci_req_schedule_adv_instance(&req
, adv_instance
->instance
,
3250 hci_req_run(&req
, NULL
);
3253 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3255 struct mgmt_cp_set_local_name
*cp
;
3256 struct mgmt_pending_cmd
*cmd
;
3258 bt_dev_dbg(hdev
, "status 0x%02x", status
);
3262 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3269 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3270 mgmt_status(status
));
3272 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3275 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3276 adv_expire(hdev
, MGMT_ADV_FLAG_LOCAL_NAME
);
3279 mgmt_pending_remove(cmd
);
3282 hci_dev_unlock(hdev
);
3285 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3288 struct mgmt_cp_set_local_name
*cp
= data
;
3289 struct mgmt_pending_cmd
*cmd
;
3290 struct hci_request req
;
3293 bt_dev_dbg(hdev
, "sock %p", sk
);
3297 /* If the old values are the same as the new ones just return a
3298 * direct command complete event.
3300 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3301 !memcmp(hdev
->short_name
, cp
->short_name
,
3302 sizeof(hdev
->short_name
))) {
3303 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3308 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3310 if (!hdev_is_powered(hdev
)) {
3311 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3313 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3318 err
= mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
,
3319 len
, HCI_MGMT_LOCAL_NAME_EVENTS
, sk
);
3320 ext_info_changed(hdev
, sk
);
3325 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3331 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3333 hci_req_init(&req
, hdev
);
3335 if (lmp_bredr_capable(hdev
)) {
3336 __hci_req_update_name(&req
);
3337 __hci_req_update_eir(&req
);
3340 /* The name is stored in the scan response data and so
3341 * no need to udpate the advertising data here.
3343 if (lmp_le_capable(hdev
) && hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
3344 __hci_req_update_scan_rsp_data(&req
, hdev
->cur_adv_instance
);
3346 err
= hci_req_run(&req
, set_name_complete
);
3348 mgmt_pending_remove(cmd
);
3351 hci_dev_unlock(hdev
);
3355 static int set_appearance(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3358 struct mgmt_cp_set_appearance
*cp
= data
;
3362 bt_dev_dbg(hdev
, "sock %p", sk
);
3364 if (!lmp_le_capable(hdev
))
3365 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_APPEARANCE
,
3366 MGMT_STATUS_NOT_SUPPORTED
);
3368 appearance
= le16_to_cpu(cp
->appearance
);
3372 if (hdev
->appearance
!= appearance
) {
3373 hdev
->appearance
= appearance
;
3375 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
3376 adv_expire(hdev
, MGMT_ADV_FLAG_APPEARANCE
);
3378 ext_info_changed(hdev
, sk
);
3381 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_APPEARANCE
, 0, NULL
,
3384 hci_dev_unlock(hdev
);
3389 static int get_phy_configuration(struct sock
*sk
, struct hci_dev
*hdev
,
3390 void *data
, u16 len
)
3392 struct mgmt_rp_get_phy_configuration rp
;
3394 bt_dev_dbg(hdev
, "sock %p", sk
);
3398 memset(&rp
, 0, sizeof(rp
));
3400 rp
.supported_phys
= cpu_to_le32(get_supported_phys(hdev
));
3401 rp
.selected_phys
= cpu_to_le32(get_selected_phys(hdev
));
3402 rp
.configurable_phys
= cpu_to_le32(get_configurable_phys(hdev
));
3404 hci_dev_unlock(hdev
);
3406 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_PHY_CONFIGURATION
, 0,
3410 int mgmt_phy_configuration_changed(struct hci_dev
*hdev
, struct sock
*skip
)
3412 struct mgmt_ev_phy_configuration_changed ev
;
3414 memset(&ev
, 0, sizeof(ev
));
3416 ev
.selected_phys
= cpu_to_le32(get_selected_phys(hdev
));
3418 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED
, hdev
, &ev
,
3422 static void set_default_phy_complete(struct hci_dev
*hdev
, u8 status
,
3423 u16 opcode
, struct sk_buff
*skb
)
3425 struct mgmt_pending_cmd
*cmd
;
3427 bt_dev_dbg(hdev
, "status 0x%02x", status
);
3431 cmd
= pending_find(MGMT_OP_SET_PHY_CONFIGURATION
, hdev
);
3436 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3437 MGMT_OP_SET_PHY_CONFIGURATION
,
3438 mgmt_status(status
));
3440 mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
3441 MGMT_OP_SET_PHY_CONFIGURATION
, 0,
3444 mgmt_phy_configuration_changed(hdev
, cmd
->sk
);
3447 mgmt_pending_remove(cmd
);
3450 hci_dev_unlock(hdev
);
3453 static int set_phy_configuration(struct sock
*sk
, struct hci_dev
*hdev
,
3454 void *data
, u16 len
)
3456 struct mgmt_cp_set_phy_configuration
*cp
= data
;
3457 struct hci_cp_le_set_default_phy cp_phy
;
3458 struct mgmt_pending_cmd
*cmd
;
3459 struct hci_request req
;
3460 u32 selected_phys
, configurable_phys
, supported_phys
, unconfigure_phys
;
3461 u16 pkt_type
= (HCI_DH1
| HCI_DM1
);
3462 bool changed
= false;
3465 bt_dev_dbg(hdev
, "sock %p", sk
);
3467 configurable_phys
= get_configurable_phys(hdev
);
3468 supported_phys
= get_supported_phys(hdev
);
3469 selected_phys
= __le32_to_cpu(cp
->selected_phys
);
3471 if (selected_phys
& ~supported_phys
)
3472 return mgmt_cmd_status(sk
, hdev
->id
,
3473 MGMT_OP_SET_PHY_CONFIGURATION
,
3474 MGMT_STATUS_INVALID_PARAMS
);
3476 unconfigure_phys
= supported_phys
& ~configurable_phys
;
3478 if ((selected_phys
& unconfigure_phys
) != unconfigure_phys
)
3479 return mgmt_cmd_status(sk
, hdev
->id
,
3480 MGMT_OP_SET_PHY_CONFIGURATION
,
3481 MGMT_STATUS_INVALID_PARAMS
);
3483 if (selected_phys
== get_selected_phys(hdev
))
3484 return mgmt_cmd_complete(sk
, hdev
->id
,
3485 MGMT_OP_SET_PHY_CONFIGURATION
,
3490 if (!hdev_is_powered(hdev
)) {
3491 err
= mgmt_cmd_status(sk
, hdev
->id
,
3492 MGMT_OP_SET_PHY_CONFIGURATION
,
3493 MGMT_STATUS_REJECTED
);
3497 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION
, hdev
)) {
3498 err
= mgmt_cmd_status(sk
, hdev
->id
,
3499 MGMT_OP_SET_PHY_CONFIGURATION
,
3504 if (selected_phys
& MGMT_PHY_BR_1M_3SLOT
)
3505 pkt_type
|= (HCI_DH3
| HCI_DM3
);
3507 pkt_type
&= ~(HCI_DH3
| HCI_DM3
);
3509 if (selected_phys
& MGMT_PHY_BR_1M_5SLOT
)
3510 pkt_type
|= (HCI_DH5
| HCI_DM5
);
3512 pkt_type
&= ~(HCI_DH5
| HCI_DM5
);
3514 if (selected_phys
& MGMT_PHY_EDR_2M_1SLOT
)
3515 pkt_type
&= ~HCI_2DH1
;
3517 pkt_type
|= HCI_2DH1
;
3519 if (selected_phys
& MGMT_PHY_EDR_2M_3SLOT
)
3520 pkt_type
&= ~HCI_2DH3
;
3522 pkt_type
|= HCI_2DH3
;
3524 if (selected_phys
& MGMT_PHY_EDR_2M_5SLOT
)
3525 pkt_type
&= ~HCI_2DH5
;
3527 pkt_type
|= HCI_2DH5
;
3529 if (selected_phys
& MGMT_PHY_EDR_3M_1SLOT
)
3530 pkt_type
&= ~HCI_3DH1
;
3532 pkt_type
|= HCI_3DH1
;
3534 if (selected_phys
& MGMT_PHY_EDR_3M_3SLOT
)
3535 pkt_type
&= ~HCI_3DH3
;
3537 pkt_type
|= HCI_3DH3
;
3539 if (selected_phys
& MGMT_PHY_EDR_3M_5SLOT
)
3540 pkt_type
&= ~HCI_3DH5
;
3542 pkt_type
|= HCI_3DH5
;
3544 if (pkt_type
!= hdev
->pkt_type
) {
3545 hdev
->pkt_type
= pkt_type
;
3549 if ((selected_phys
& MGMT_PHY_LE_MASK
) ==
3550 (get_selected_phys(hdev
) & MGMT_PHY_LE_MASK
)) {
3552 mgmt_phy_configuration_changed(hdev
, sk
);
3554 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3555 MGMT_OP_SET_PHY_CONFIGURATION
,
3561 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_PHY_CONFIGURATION
, hdev
, data
,
3568 hci_req_init(&req
, hdev
);
3570 memset(&cp_phy
, 0, sizeof(cp_phy
));
3572 if (!(selected_phys
& MGMT_PHY_LE_TX_MASK
))
3573 cp_phy
.all_phys
|= 0x01;
3575 if (!(selected_phys
& MGMT_PHY_LE_RX_MASK
))
3576 cp_phy
.all_phys
|= 0x02;
3578 if (selected_phys
& MGMT_PHY_LE_1M_TX
)
3579 cp_phy
.tx_phys
|= HCI_LE_SET_PHY_1M
;
3581 if (selected_phys
& MGMT_PHY_LE_2M_TX
)
3582 cp_phy
.tx_phys
|= HCI_LE_SET_PHY_2M
;
3584 if (selected_phys
& MGMT_PHY_LE_CODED_TX
)
3585 cp_phy
.tx_phys
|= HCI_LE_SET_PHY_CODED
;
3587 if (selected_phys
& MGMT_PHY_LE_1M_RX
)
3588 cp_phy
.rx_phys
|= HCI_LE_SET_PHY_1M
;
3590 if (selected_phys
& MGMT_PHY_LE_2M_RX
)
3591 cp_phy
.rx_phys
|= HCI_LE_SET_PHY_2M
;
3593 if (selected_phys
& MGMT_PHY_LE_CODED_RX
)
3594 cp_phy
.rx_phys
|= HCI_LE_SET_PHY_CODED
;
3596 hci_req_add(&req
, HCI_OP_LE_SET_DEFAULT_PHY
, sizeof(cp_phy
), &cp_phy
);
3598 err
= hci_req_run_skb(&req
, set_default_phy_complete
);
3600 mgmt_pending_remove(cmd
);
3603 hci_dev_unlock(hdev
);
3608 static int set_blocked_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3611 int err
= MGMT_STATUS_SUCCESS
;
3612 struct mgmt_cp_set_blocked_keys
*keys
= data
;
3613 const u16 max_key_count
= ((U16_MAX
- sizeof(*keys
)) /
3614 sizeof(struct mgmt_blocked_key_info
));
3615 u16 key_count
, expected_len
;
3618 bt_dev_dbg(hdev
, "sock %p", sk
);
3620 key_count
= __le16_to_cpu(keys
->key_count
);
3621 if (key_count
> max_key_count
) {
3622 bt_dev_err(hdev
, "too big key_count value %u", key_count
);
3623 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BLOCKED_KEYS
,
3624 MGMT_STATUS_INVALID_PARAMS
);
3627 expected_len
= struct_size(keys
, keys
, key_count
);
3628 if (expected_len
!= len
) {
3629 bt_dev_err(hdev
, "expected %u bytes, got %u bytes",
3631 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BLOCKED_KEYS
,
3632 MGMT_STATUS_INVALID_PARAMS
);
3637 hci_blocked_keys_clear(hdev
);
3639 for (i
= 0; i
< keys
->key_count
; ++i
) {
3640 struct blocked_key
*b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
3643 err
= MGMT_STATUS_NO_RESOURCES
;
3647 b
->type
= keys
->keys
[i
].type
;
3648 memcpy(b
->val
, keys
->keys
[i
].val
, sizeof(b
->val
));
3649 list_add_rcu(&b
->list
, &hdev
->blocked_keys
);
3651 hci_dev_unlock(hdev
);
3653 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_BLOCKED_KEYS
,
3657 static int set_wideband_speech(struct sock
*sk
, struct hci_dev
*hdev
,
3658 void *data
, u16 len
)
3660 struct mgmt_mode
*cp
= data
;
3662 bool changed
= false;
3664 bt_dev_dbg(hdev
, "sock %p", sk
);
3666 if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED
, &hdev
->quirks
))
3667 return mgmt_cmd_status(sk
, hdev
->id
,
3668 MGMT_OP_SET_WIDEBAND_SPEECH
,
3669 MGMT_STATUS_NOT_SUPPORTED
);
3671 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
3672 return mgmt_cmd_status(sk
, hdev
->id
,
3673 MGMT_OP_SET_WIDEBAND_SPEECH
,
3674 MGMT_STATUS_INVALID_PARAMS
);
3678 if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH
, hdev
)) {
3679 err
= mgmt_cmd_status(sk
, hdev
->id
,
3680 MGMT_OP_SET_WIDEBAND_SPEECH
,
3685 if (hdev_is_powered(hdev
) &&
3686 !!cp
->val
!= hci_dev_test_flag(hdev
,
3687 HCI_WIDEBAND_SPEECH_ENABLED
)) {
3688 err
= mgmt_cmd_status(sk
, hdev
->id
,
3689 MGMT_OP_SET_WIDEBAND_SPEECH
,
3690 MGMT_STATUS_REJECTED
);
3695 changed
= !hci_dev_test_and_set_flag(hdev
,
3696 HCI_WIDEBAND_SPEECH_ENABLED
);
3698 changed
= hci_dev_test_and_clear_flag(hdev
,
3699 HCI_WIDEBAND_SPEECH_ENABLED
);
3701 err
= send_settings_rsp(sk
, MGMT_OP_SET_WIDEBAND_SPEECH
, hdev
);
3706 err
= new_settings(hdev
, sk
);
3709 hci_dev_unlock(hdev
);
3713 static int read_security_info(struct sock
*sk
, struct hci_dev
*hdev
,
3714 void *data
, u16 data_len
)
3717 struct mgmt_rp_read_security_info
*rp
= (void *)buf
;
3721 bt_dev_dbg(hdev
, "sock %p", sk
);
3723 memset(&buf
, 0, sizeof(buf
));
3727 /* When the Read Simple Pairing Options command is supported, then
3728 * the remote public key validation is supported.
3730 if (hdev
->commands
[41] & 0x08)
3731 flags
|= 0x01; /* Remote public key validation (BR/EDR) */
3733 flags
|= 0x02; /* Remote public key validation (LE) */
3735 /* When the Read Encryption Key Size command is supported, then the
3736 * encryption key size is enforced.
3738 if (hdev
->commands
[20] & 0x10)
3739 flags
|= 0x04; /* Encryption key size enforcement (BR/EDR) */
3741 flags
|= 0x08; /* Encryption key size enforcement (LE) */
3743 sec_len
= eir_append_data(rp
->sec
, sec_len
, 0x01, &flags
, 1);
3745 /* When the Read Simple Pairing Options command is supported, then
3746 * also max encryption key size information is provided.
3748 if (hdev
->commands
[41] & 0x08)
3749 sec_len
= eir_append_le16(rp
->sec
, sec_len
, 0x02,
3750 hdev
->max_enc_key_size
);
3752 sec_len
= eir_append_le16(rp
->sec
, sec_len
, 0x03, SMP_MAX_ENC_KEY_SIZE
);
3754 rp
->sec_len
= cpu_to_le16(sec_len
);
3756 hci_dev_unlock(hdev
);
3758 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_SECURITY_INFO
, 0,
3759 rp
, sizeof(*rp
) + sec_len
);
3762 #ifdef CONFIG_BT_FEATURE_DEBUG
3763 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3764 static const u8 debug_uuid
[16] = {
3765 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3766 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3770 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3771 static const u8 simult_central_periph_uuid
[16] = {
3772 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3773 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3776 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3777 static const u8 rpa_resolution_uuid
[16] = {
3778 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3779 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3782 static int read_exp_features_info(struct sock
*sk
, struct hci_dev
*hdev
,
3783 void *data
, u16 data_len
)
3785 char buf
[62]; /* Enough space for 3 features */
3786 struct mgmt_rp_read_exp_features_info
*rp
= (void *)buf
;
3790 bt_dev_dbg(hdev
, "sock %p", sk
);
3792 memset(&buf
, 0, sizeof(buf
));
3794 #ifdef CONFIG_BT_FEATURE_DEBUG
3796 flags
= bt_dbg_get() ? BIT(0) : 0;
3798 memcpy(rp
->features
[idx
].uuid
, debug_uuid
, 16);
3799 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
3805 if (test_bit(HCI_QUIRK_VALID_LE_STATES
, &hdev
->quirks
) &&
3806 (hdev
->le_states
[4] & 0x08) && /* Central */
3807 (hdev
->le_states
[4] & 0x40) && /* Peripheral */
3808 (hdev
->le_states
[3] & 0x10)) /* Simultaneous */
3813 memcpy(rp
->features
[idx
].uuid
, simult_central_periph_uuid
, 16);
3814 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
3818 if (hdev
&& use_ll_privacy(hdev
)) {
3819 if (hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
3820 flags
= BIT(0) | BIT(1);
3824 memcpy(rp
->features
[idx
].uuid
, rpa_resolution_uuid
, 16);
3825 rp
->features
[idx
].flags
= cpu_to_le32(flags
);
3829 rp
->feature_count
= cpu_to_le16(idx
);
3831 /* After reading the experimental features information, enable
3832 * the events to update client on any future change.
3834 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
3836 return mgmt_cmd_complete(sk
, hdev
? hdev
->id
: MGMT_INDEX_NONE
,
3837 MGMT_OP_READ_EXP_FEATURES_INFO
,
3838 0, rp
, sizeof(*rp
) + (20 * idx
));
3841 static int exp_ll_privacy_feature_changed(bool enabled
, struct hci_dev
*hdev
,
3844 struct mgmt_ev_exp_feature_changed ev
;
3846 memset(&ev
, 0, sizeof(ev
));
3847 memcpy(ev
.uuid
, rpa_resolution_uuid
, 16);
3848 ev
.flags
= cpu_to_le32((enabled
? BIT(0) : 0) | BIT(1));
3850 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED
, hdev
,
3852 HCI_MGMT_EXP_FEATURE_EVENTS
, skip
);
3856 #ifdef CONFIG_BT_FEATURE_DEBUG
3857 static int exp_debug_feature_changed(bool enabled
, struct sock
*skip
)
3859 struct mgmt_ev_exp_feature_changed ev
;
3861 memset(&ev
, 0, sizeof(ev
));
3862 memcpy(ev
.uuid
, debug_uuid
, 16);
3863 ev
.flags
= cpu_to_le32(enabled
? BIT(0) : 0);
3865 return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED
, NULL
,
3867 HCI_MGMT_EXP_FEATURE_EVENTS
, skip
);
3871 static int set_exp_feature(struct sock
*sk
, struct hci_dev
*hdev
,
3872 void *data
, u16 data_len
)
3874 struct mgmt_cp_set_exp_feature
*cp
= data
;
3875 struct mgmt_rp_set_exp_feature rp
;
3877 bt_dev_dbg(hdev
, "sock %p", sk
);
3879 if (!memcmp(cp
->uuid
, ZERO_KEY
, 16)) {
3880 memset(rp
.uuid
, 0, 16);
3881 rp
.flags
= cpu_to_le32(0);
3883 #ifdef CONFIG_BT_FEATURE_DEBUG
3885 bool changed
= bt_dbg_get();
3890 exp_debug_feature_changed(false, sk
);
3894 if (hdev
&& use_ll_privacy(hdev
) && !hdev_is_powered(hdev
)) {
3895 bool changed
= hci_dev_test_flag(hdev
,
3896 HCI_ENABLE_LL_PRIVACY
);
3898 hci_dev_clear_flag(hdev
, HCI_ENABLE_LL_PRIVACY
);
3901 exp_ll_privacy_feature_changed(false, hdev
, sk
);
3904 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
3906 return mgmt_cmd_complete(sk
, hdev
? hdev
->id
: MGMT_INDEX_NONE
,
3907 MGMT_OP_SET_EXP_FEATURE
, 0,
3911 #ifdef CONFIG_BT_FEATURE_DEBUG
3912 if (!memcmp(cp
->uuid
, debug_uuid
, 16)) {
3916 /* Command requires to use the non-controller index */
3918 return mgmt_cmd_status(sk
, hdev
->id
,
3919 MGMT_OP_SET_EXP_FEATURE
,
3920 MGMT_STATUS_INVALID_INDEX
);
3922 /* Parameters are limited to a single octet */
3923 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
3924 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
3925 MGMT_OP_SET_EXP_FEATURE
,
3926 MGMT_STATUS_INVALID_PARAMS
);
3928 /* Only boolean on/off is supported */
3929 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
3930 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
3931 MGMT_OP_SET_EXP_FEATURE
,
3932 MGMT_STATUS_INVALID_PARAMS
);
3934 val
= !!cp
->param
[0];
3935 changed
= val
? !bt_dbg_get() : bt_dbg_get();
3938 memcpy(rp
.uuid
, debug_uuid
, 16);
3939 rp
.flags
= cpu_to_le32(val
? BIT(0) : 0);
3941 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
3943 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
3944 MGMT_OP_SET_EXP_FEATURE
, 0,
3948 exp_debug_feature_changed(val
, sk
);
3954 if (!memcmp(cp
->uuid
, rpa_resolution_uuid
, 16)) {
3959 /* Command requires to use the controller index */
3961 return mgmt_cmd_status(sk
, MGMT_INDEX_NONE
,
3962 MGMT_OP_SET_EXP_FEATURE
,
3963 MGMT_STATUS_INVALID_INDEX
);
3965 /* Changes can only be made when controller is powered down */
3966 if (hdev_is_powered(hdev
))
3967 return mgmt_cmd_status(sk
, hdev
->id
,
3968 MGMT_OP_SET_EXP_FEATURE
,
3969 MGMT_STATUS_NOT_POWERED
);
3971 /* Parameters are limited to a single octet */
3972 if (data_len
!= MGMT_SET_EXP_FEATURE_SIZE
+ 1)
3973 return mgmt_cmd_status(sk
, hdev
->id
,
3974 MGMT_OP_SET_EXP_FEATURE
,
3975 MGMT_STATUS_INVALID_PARAMS
);
3977 /* Only boolean on/off is supported */
3978 if (cp
->param
[0] != 0x00 && cp
->param
[0] != 0x01)
3979 return mgmt_cmd_status(sk
, hdev
->id
,
3980 MGMT_OP_SET_EXP_FEATURE
,
3981 MGMT_STATUS_INVALID_PARAMS
);
3983 val
= !!cp
->param
[0];
3986 changed
= !hci_dev_test_flag(hdev
,
3987 HCI_ENABLE_LL_PRIVACY
);
3988 hci_dev_set_flag(hdev
, HCI_ENABLE_LL_PRIVACY
);
3989 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
3991 /* Enable LL privacy + supported settings changed */
3992 flags
= BIT(0) | BIT(1);
3994 changed
= hci_dev_test_flag(hdev
,
3995 HCI_ENABLE_LL_PRIVACY
);
3996 hci_dev_clear_flag(hdev
, HCI_ENABLE_LL_PRIVACY
);
3998 /* Disable LL privacy + supported settings changed */
4002 memcpy(rp
.uuid
, rpa_resolution_uuid
, 16);
4003 rp
.flags
= cpu_to_le32(flags
);
4005 hci_sock_set_flag(sk
, HCI_MGMT_EXP_FEATURE_EVENTS
);
4007 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4008 MGMT_OP_SET_EXP_FEATURE
, 0,
4012 exp_ll_privacy_feature_changed(val
, hdev
, sk
);
4017 return mgmt_cmd_status(sk
, hdev
? hdev
->id
: MGMT_INDEX_NONE
,
4018 MGMT_OP_SET_EXP_FEATURE
,
4019 MGMT_STATUS_NOT_SUPPORTED
);
4022 #define SUPPORTED_DEVICE_FLAGS() ((1U << HCI_CONN_FLAG_MAX) - 1)
4024 static int get_device_flags(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4027 struct mgmt_cp_get_device_flags
*cp
= data
;
4028 struct mgmt_rp_get_device_flags rp
;
4029 struct bdaddr_list_with_flags
*br_params
;
4030 struct hci_conn_params
*params
;
4031 u32 supported_flags
= SUPPORTED_DEVICE_FLAGS();
4032 u32 current_flags
= 0;
4033 u8 status
= MGMT_STATUS_INVALID_PARAMS
;
4035 bt_dev_dbg(hdev
, "Get device flags %pMR (type 0x%x)\n",
4036 &cp
->addr
.bdaddr
, cp
->addr
.type
);
4040 if (cp
->addr
.type
== BDADDR_BREDR
) {
4041 br_params
= hci_bdaddr_list_lookup_with_flags(&hdev
->whitelist
,
4047 current_flags
= br_params
->current_flags
;
4049 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
4050 le_addr_type(cp
->addr
.type
));
4055 current_flags
= params
->current_flags
;
4058 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4059 rp
.addr
.type
= cp
->addr
.type
;
4060 rp
.supported_flags
= cpu_to_le32(supported_flags
);
4061 rp
.current_flags
= cpu_to_le32(current_flags
);
4063 status
= MGMT_STATUS_SUCCESS
;
4066 hci_dev_unlock(hdev
);
4068 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_DEVICE_FLAGS
, status
,
4072 static void device_flags_changed(struct sock
*sk
, struct hci_dev
*hdev
,
4073 bdaddr_t
*bdaddr
, u8 bdaddr_type
,
4074 u32 supported_flags
, u32 current_flags
)
4076 struct mgmt_ev_device_flags_changed ev
;
4078 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
4079 ev
.addr
.type
= bdaddr_type
;
4080 ev
.supported_flags
= cpu_to_le32(supported_flags
);
4081 ev
.current_flags
= cpu_to_le32(current_flags
);
4083 mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED
, hdev
, &ev
, sizeof(ev
), sk
);
4086 static int set_device_flags(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4089 struct mgmt_cp_set_device_flags
*cp
= data
;
4090 struct bdaddr_list_with_flags
*br_params
;
4091 struct hci_conn_params
*params
;
4092 u8 status
= MGMT_STATUS_INVALID_PARAMS
;
4093 u32 supported_flags
= SUPPORTED_DEVICE_FLAGS();
4094 u32 current_flags
= __le32_to_cpu(cp
->current_flags
);
4096 bt_dev_dbg(hdev
, "Set device flags %pMR (type 0x%x) = 0x%x",
4097 &cp
->addr
.bdaddr
, cp
->addr
.type
,
4098 __le32_to_cpu(current_flags
));
4100 if ((supported_flags
| current_flags
) != supported_flags
) {
4101 bt_dev_warn(hdev
, "Bad flag given (0x%x) vs supported (0x%0x)",
4102 current_flags
, supported_flags
);
4108 if (cp
->addr
.type
== BDADDR_BREDR
) {
4109 br_params
= hci_bdaddr_list_lookup_with_flags(&hdev
->whitelist
,
4114 br_params
->current_flags
= current_flags
;
4115 status
= MGMT_STATUS_SUCCESS
;
4117 bt_dev_warn(hdev
, "No such BR/EDR device %pMR (0x%x)",
4118 &cp
->addr
.bdaddr
, cp
->addr
.type
);
4121 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
4122 le_addr_type(cp
->addr
.type
));
4124 params
->current_flags
= current_flags
;
4125 status
= MGMT_STATUS_SUCCESS
;
4127 bt_dev_warn(hdev
, "No such LE device %pMR (0x%x)",
4129 le_addr_type(cp
->addr
.type
));
4134 hci_dev_unlock(hdev
);
4136 if (status
== MGMT_STATUS_SUCCESS
)
4137 device_flags_changed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
,
4138 supported_flags
, current_flags
);
4140 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_FLAGS
, status
,
4141 &cp
->addr
, sizeof(cp
->addr
));
4144 static void mgmt_adv_monitor_added(struct sock
*sk
, struct hci_dev
*hdev
,
4147 struct mgmt_ev_adv_monitor_added ev
;
4149 ev
.monitor_handle
= cpu_to_le16(handle
);
4151 mgmt_event(MGMT_EV_ADV_MONITOR_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
4154 static void mgmt_adv_monitor_removed(struct sock
*sk
, struct hci_dev
*hdev
,
4157 struct mgmt_ev_adv_monitor_added ev
;
4159 ev
.monitor_handle
= cpu_to_le16(handle
);
4161 mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
4164 static int read_adv_mon_features(struct sock
*sk
, struct hci_dev
*hdev
,
4165 void *data
, u16 len
)
4167 struct adv_monitor
*monitor
= NULL
;
4168 struct mgmt_rp_read_adv_monitor_features
*rp
= NULL
;
4171 __u32 supported
= 0;
4172 __u16 num_handles
= 0;
4173 __u16 handles
[HCI_MAX_ADV_MONITOR_NUM_HANDLES
];
4175 BT_DBG("request for %s", hdev
->name
);
4179 if (msft_get_features(hdev
) & MSFT_FEATURE_MASK_LE_ADV_MONITOR
)
4180 supported
|= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS
;
4182 idr_for_each_entry(&hdev
->adv_monitors_idr
, monitor
, handle
) {
4183 handles
[num_handles
++] = monitor
->handle
;
4186 hci_dev_unlock(hdev
);
4188 rp_size
= sizeof(*rp
) + (num_handles
* sizeof(u16
));
4189 rp
= kmalloc(rp_size
, GFP_KERNEL
);
4193 /* Once controller-based monitoring is in place, the enabled_features
4194 * should reflect the use.
4196 rp
->supported_features
= cpu_to_le32(supported
);
4197 rp
->enabled_features
= 0;
4198 rp
->max_num_handles
= cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES
);
4199 rp
->max_num_patterns
= HCI_MAX_ADV_MONITOR_NUM_PATTERNS
;
4200 rp
->num_handles
= cpu_to_le16(num_handles
);
4202 memcpy(&rp
->handles
, &handles
, (num_handles
* sizeof(u16
)));
4204 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4205 MGMT_OP_READ_ADV_MONITOR_FEATURES
,
4206 MGMT_STATUS_SUCCESS
, rp
, rp_size
);
4213 static int add_adv_patterns_monitor(struct sock
*sk
, struct hci_dev
*hdev
,
4214 void *data
, u16 len
)
4216 struct mgmt_cp_add_adv_patterns_monitor
*cp
= data
;
4217 struct mgmt_rp_add_adv_patterns_monitor rp
;
4218 struct adv_monitor
*m
= NULL
;
4219 struct adv_pattern
*p
= NULL
;
4220 unsigned int mp_cnt
= 0, prev_adv_monitors_cnt
;
4221 __u8 cp_ofst
= 0, cp_len
= 0;
4224 BT_DBG("request for %s", hdev
->name
);
4226 if (len
<= sizeof(*cp
) || cp
->pattern_count
== 0) {
4227 err
= mgmt_cmd_status(sk
, hdev
->id
,
4228 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
4229 MGMT_STATUS_INVALID_PARAMS
);
4233 m
= kmalloc(sizeof(*m
), GFP_KERNEL
);
4239 INIT_LIST_HEAD(&m
->patterns
);
4242 for (i
= 0; i
< cp
->pattern_count
; i
++) {
4243 if (++mp_cnt
> HCI_MAX_ADV_MONITOR_NUM_PATTERNS
) {
4244 err
= mgmt_cmd_status(sk
, hdev
->id
,
4245 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
4246 MGMT_STATUS_INVALID_PARAMS
);
4250 cp_ofst
= cp
->patterns
[i
].offset
;
4251 cp_len
= cp
->patterns
[i
].length
;
4252 if (cp_ofst
>= HCI_MAX_AD_LENGTH
||
4253 cp_len
> HCI_MAX_AD_LENGTH
||
4254 (cp_ofst
+ cp_len
) > HCI_MAX_AD_LENGTH
) {
4255 err
= mgmt_cmd_status(sk
, hdev
->id
,
4256 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
4257 MGMT_STATUS_INVALID_PARAMS
);
4261 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
4267 p
->ad_type
= cp
->patterns
[i
].ad_type
;
4268 p
->offset
= cp
->patterns
[i
].offset
;
4269 p
->length
= cp
->patterns
[i
].length
;
4270 memcpy(p
->value
, cp
->patterns
[i
].value
, p
->length
);
4272 INIT_LIST_HEAD(&p
->list
);
4273 list_add(&p
->list
, &m
->patterns
);
4276 if (mp_cnt
!= cp
->pattern_count
) {
4277 err
= mgmt_cmd_status(sk
, hdev
->id
,
4278 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
4279 MGMT_STATUS_INVALID_PARAMS
);
4285 prev_adv_monitors_cnt
= hdev
->adv_monitors_cnt
;
4287 err
= hci_add_adv_monitor(hdev
, m
);
4289 if (err
== -ENOSPC
) {
4290 mgmt_cmd_status(sk
, hdev
->id
,
4291 MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
4292 MGMT_STATUS_NO_RESOURCES
);
4297 if (hdev
->adv_monitors_cnt
> prev_adv_monitors_cnt
)
4298 mgmt_adv_monitor_added(sk
, hdev
, m
->handle
);
4300 hci_dev_unlock(hdev
);
4302 rp
.monitor_handle
= cpu_to_le16(m
->handle
);
4304 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADV_PATTERNS_MONITOR
,
4305 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
4308 hci_dev_unlock(hdev
);
4311 hci_free_adv_monitor(m
);
4315 static int remove_adv_monitor(struct sock
*sk
, struct hci_dev
*hdev
,
4316 void *data
, u16 len
)
4318 struct mgmt_cp_remove_adv_monitor
*cp
= data
;
4319 struct mgmt_rp_remove_adv_monitor rp
;
4320 unsigned int prev_adv_monitors_cnt
;
4324 BT_DBG("request for %s", hdev
->name
);
4328 handle
= __le16_to_cpu(cp
->monitor_handle
);
4329 prev_adv_monitors_cnt
= hdev
->adv_monitors_cnt
;
4331 err
= hci_remove_adv_monitor(hdev
, handle
);
4332 if (err
== -ENOENT
) {
4333 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADV_MONITOR
,
4334 MGMT_STATUS_INVALID_INDEX
);
4338 if (hdev
->adv_monitors_cnt
< prev_adv_monitors_cnt
)
4339 mgmt_adv_monitor_removed(sk
, hdev
, handle
);
4341 hci_dev_unlock(hdev
);
4343 rp
.monitor_handle
= cp
->monitor_handle
;
4345 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_ADV_MONITOR
,
4346 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
4349 hci_dev_unlock(hdev
);
4353 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
4354 u16 opcode
, struct sk_buff
*skb
)
4356 struct mgmt_rp_read_local_oob_data mgmt_rp
;
4357 size_t rp_size
= sizeof(mgmt_rp
);
4358 struct mgmt_pending_cmd
*cmd
;
4360 bt_dev_dbg(hdev
, "status %u", status
);
4362 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
4366 if (status
|| !skb
) {
4367 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4368 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
4372 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
4374 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
4375 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
4377 if (skb
->len
< sizeof(*rp
)) {
4378 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
4379 MGMT_OP_READ_LOCAL_OOB_DATA
,
4380 MGMT_STATUS_FAILED
);
4384 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
4385 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
4387 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
4389 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
4391 if (skb
->len
< sizeof(*rp
)) {
4392 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
4393 MGMT_OP_READ_LOCAL_OOB_DATA
,
4394 MGMT_STATUS_FAILED
);
4398 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
4399 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
4401 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
4402 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
4405 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4406 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
4409 mgmt_pending_remove(cmd
);
4412 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4413 void *data
, u16 data_len
)
4415 struct mgmt_pending_cmd
*cmd
;
4416 struct hci_request req
;
4419 bt_dev_dbg(hdev
, "sock %p", sk
);
4423 if (!hdev_is_powered(hdev
)) {
4424 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4425 MGMT_STATUS_NOT_POWERED
);
4429 if (!lmp_ssp_capable(hdev
)) {
4430 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4431 MGMT_STATUS_NOT_SUPPORTED
);
4435 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
4436 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
4441 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
4447 hci_req_init(&req
, hdev
);
4449 if (bredr_sc_enabled(hdev
))
4450 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
4452 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
4454 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
4456 mgmt_pending_remove(cmd
);
4459 hci_dev_unlock(hdev
);
4463 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4464 void *data
, u16 len
)
4466 struct mgmt_addr_info
*addr
= data
;
4469 bt_dev_dbg(hdev
, "sock %p", sk
);
4471 if (!bdaddr_type_is_valid(addr
->type
))
4472 return mgmt_cmd_complete(sk
, hdev
->id
,
4473 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4474 MGMT_STATUS_INVALID_PARAMS
,
4475 addr
, sizeof(*addr
));
4479 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
4480 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
4483 if (cp
->addr
.type
!= BDADDR_BREDR
) {
4484 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4485 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4486 MGMT_STATUS_INVALID_PARAMS
,
4487 &cp
->addr
, sizeof(cp
->addr
));
4491 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4492 cp
->addr
.type
, cp
->hash
,
4493 cp
->rand
, NULL
, NULL
);
4495 status
= MGMT_STATUS_FAILED
;
4497 status
= MGMT_STATUS_SUCCESS
;
4499 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4500 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
4501 &cp
->addr
, sizeof(cp
->addr
));
4502 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
4503 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
4504 u8
*rand192
, *hash192
, *rand256
, *hash256
;
4507 if (bdaddr_type_is_le(cp
->addr
.type
)) {
4508 /* Enforce zero-valued 192-bit parameters as
4509 * long as legacy SMP OOB isn't implemented.
4511 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4512 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4513 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4514 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4515 MGMT_STATUS_INVALID_PARAMS
,
4516 addr
, sizeof(*addr
));
4523 /* In case one of the P-192 values is set to zero,
4524 * then just disable OOB data for P-192.
4526 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4527 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4531 rand192
= cp
->rand192
;
4532 hash192
= cp
->hash192
;
4536 /* In case one of the P-256 values is set to zero, then just
4537 * disable OOB data for P-256.
4539 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
4540 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
4544 rand256
= cp
->rand256
;
4545 hash256
= cp
->hash256
;
4548 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4549 cp
->addr
.type
, hash192
, rand192
,
4552 status
= MGMT_STATUS_FAILED
;
4554 status
= MGMT_STATUS_SUCCESS
;
4556 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4557 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4558 status
, &cp
->addr
, sizeof(cp
->addr
));
4560 bt_dev_err(hdev
, "add_remote_oob_data: invalid len of %u bytes",
4562 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
4563 MGMT_STATUS_INVALID_PARAMS
);
4567 hci_dev_unlock(hdev
);
4571 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4572 void *data
, u16 len
)
4574 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
4578 bt_dev_dbg(hdev
, "sock %p", sk
);
4580 if (cp
->addr
.type
!= BDADDR_BREDR
)
4581 return mgmt_cmd_complete(sk
, hdev
->id
,
4582 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4583 MGMT_STATUS_INVALID_PARAMS
,
4584 &cp
->addr
, sizeof(cp
->addr
));
4588 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4589 hci_remote_oob_data_clear(hdev
);
4590 status
= MGMT_STATUS_SUCCESS
;
4594 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
4596 status
= MGMT_STATUS_INVALID_PARAMS
;
4598 status
= MGMT_STATUS_SUCCESS
;
4601 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4602 status
, &cp
->addr
, sizeof(cp
->addr
));
4604 hci_dev_unlock(hdev
);
4608 void mgmt_start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
4610 struct mgmt_pending_cmd
*cmd
;
4612 bt_dev_dbg(hdev
, "status %d", status
);
4616 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
4618 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
4621 cmd
= pending_find(MGMT_OP_START_LIMITED_DISCOVERY
, hdev
);
4624 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4625 mgmt_pending_remove(cmd
);
4628 hci_dev_unlock(hdev
);
4630 /* Handle suspend notifier */
4631 if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY
,
4632 hdev
->suspend_tasks
)) {
4633 bt_dev_dbg(hdev
, "Unpaused discovery");
4634 wake_up(&hdev
->suspend_wait_q
);
4638 static bool discovery_type_is_valid(struct hci_dev
*hdev
, uint8_t type
,
4639 uint8_t *mgmt_status
)
4642 case DISCOV_TYPE_LE
:
4643 *mgmt_status
= mgmt_le_support(hdev
);
4647 case DISCOV_TYPE_INTERLEAVED
:
4648 *mgmt_status
= mgmt_le_support(hdev
);
4652 case DISCOV_TYPE_BREDR
:
4653 *mgmt_status
= mgmt_bredr_support(hdev
);
4658 *mgmt_status
= MGMT_STATUS_INVALID_PARAMS
;
4665 static int start_discovery_internal(struct sock
*sk
, struct hci_dev
*hdev
,
4666 u16 op
, void *data
, u16 len
)
4668 struct mgmt_cp_start_discovery
*cp
= data
;
4669 struct mgmt_pending_cmd
*cmd
;
4673 bt_dev_dbg(hdev
, "sock %p", sk
);
4677 if (!hdev_is_powered(hdev
)) {
4678 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
,
4679 MGMT_STATUS_NOT_POWERED
,
4680 &cp
->type
, sizeof(cp
->type
));
4684 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4685 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4686 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, MGMT_STATUS_BUSY
,
4687 &cp
->type
, sizeof(cp
->type
));
4691 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
4692 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, status
,
4693 &cp
->type
, sizeof(cp
->type
));
4697 /* Can't start discovery when it is paused */
4698 if (hdev
->discovery_paused
) {
4699 err
= mgmt_cmd_complete(sk
, hdev
->id
, op
, MGMT_STATUS_BUSY
,
4700 &cp
->type
, sizeof(cp
->type
));
4704 /* Clear the discovery filter first to free any previously
4705 * allocated memory for the UUID list.
4707 hci_discovery_filter_clear(hdev
);
4709 hdev
->discovery
.type
= cp
->type
;
4710 hdev
->discovery
.report_invalid_rssi
= false;
4711 if (op
== MGMT_OP_START_LIMITED_DISCOVERY
)
4712 hdev
->discovery
.limited
= true;
4714 hdev
->discovery
.limited
= false;
4716 cmd
= mgmt_pending_add(sk
, op
, hdev
, data
, len
);
4722 cmd
->cmd_complete
= generic_cmd_complete
;
4724 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4725 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
4729 hci_dev_unlock(hdev
);
4733 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4734 void *data
, u16 len
)
4736 return start_discovery_internal(sk
, hdev
, MGMT_OP_START_DISCOVERY
,
4740 static int start_limited_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4741 void *data
, u16 len
)
4743 return start_discovery_internal(sk
, hdev
,
4744 MGMT_OP_START_LIMITED_DISCOVERY
,
4748 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
4751 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
4755 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4756 void *data
, u16 len
)
4758 struct mgmt_cp_start_service_discovery
*cp
= data
;
4759 struct mgmt_pending_cmd
*cmd
;
4760 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
4761 u16 uuid_count
, expected_len
;
4765 bt_dev_dbg(hdev
, "sock %p", sk
);
4769 if (!hdev_is_powered(hdev
)) {
4770 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4771 MGMT_OP_START_SERVICE_DISCOVERY
,
4772 MGMT_STATUS_NOT_POWERED
,
4773 &cp
->type
, sizeof(cp
->type
));
4777 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4778 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4779 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4780 MGMT_OP_START_SERVICE_DISCOVERY
,
4781 MGMT_STATUS_BUSY
, &cp
->type
,
4786 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
4787 if (uuid_count
> max_uuid_count
) {
4788 bt_dev_err(hdev
, "service_discovery: too big uuid_count value %u",
4790 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4791 MGMT_OP_START_SERVICE_DISCOVERY
,
4792 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4797 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4798 if (expected_len
!= len
) {
4799 bt_dev_err(hdev
, "service_discovery: expected %u bytes, got %u bytes",
4801 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4802 MGMT_OP_START_SERVICE_DISCOVERY
,
4803 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4808 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
4809 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4810 MGMT_OP_START_SERVICE_DISCOVERY
,
4811 status
, &cp
->type
, sizeof(cp
->type
));
4815 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4822 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4824 /* Clear the discovery filter first to free any previously
4825 * allocated memory for the UUID list.
4827 hci_discovery_filter_clear(hdev
);
4829 hdev
->discovery
.result_filtering
= true;
4830 hdev
->discovery
.type
= cp
->type
;
4831 hdev
->discovery
.rssi
= cp
->rssi
;
4832 hdev
->discovery
.uuid_count
= uuid_count
;
4834 if (uuid_count
> 0) {
4835 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4837 if (!hdev
->discovery
.uuids
) {
4838 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4839 MGMT_OP_START_SERVICE_DISCOVERY
,
4841 &cp
->type
, sizeof(cp
->type
));
4842 mgmt_pending_remove(cmd
);
4847 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4848 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
4852 hci_dev_unlock(hdev
);
4856 void mgmt_stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
4858 struct mgmt_pending_cmd
*cmd
;
4860 bt_dev_dbg(hdev
, "status %d", status
);
4864 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4866 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4867 mgmt_pending_remove(cmd
);
4870 hci_dev_unlock(hdev
);
4872 /* Handle suspend notifier */
4873 if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY
, hdev
->suspend_tasks
)) {
4874 bt_dev_dbg(hdev
, "Paused discovery");
4875 wake_up(&hdev
->suspend_wait_q
);
4879 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4882 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4883 struct mgmt_pending_cmd
*cmd
;
4886 bt_dev_dbg(hdev
, "sock %p", sk
);
4890 if (!hci_discovery_active(hdev
)) {
4891 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4892 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4893 sizeof(mgmt_cp
->type
));
4897 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4898 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4899 MGMT_STATUS_INVALID_PARAMS
,
4900 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4904 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4910 cmd
->cmd_complete
= generic_cmd_complete
;
4912 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4913 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
4917 hci_dev_unlock(hdev
);
4921 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4924 struct mgmt_cp_confirm_name
*cp
= data
;
4925 struct inquiry_entry
*e
;
4928 bt_dev_dbg(hdev
, "sock %p", sk
);
4932 if (!hci_discovery_active(hdev
)) {
4933 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4934 MGMT_STATUS_FAILED
, &cp
->addr
,
4939 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4941 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4942 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4947 if (cp
->name_known
) {
4948 e
->name_state
= NAME_KNOWN
;
4951 e
->name_state
= NAME_NEEDED
;
4952 hci_inquiry_cache_update_resolve(hdev
, e
);
4955 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
4956 &cp
->addr
, sizeof(cp
->addr
));
4959 hci_dev_unlock(hdev
);
4963 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4966 struct mgmt_cp_block_device
*cp
= data
;
4970 bt_dev_dbg(hdev
, "sock %p", sk
);
4972 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4973 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4974 MGMT_STATUS_INVALID_PARAMS
,
4975 &cp
->addr
, sizeof(cp
->addr
));
4979 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4982 status
= MGMT_STATUS_FAILED
;
4986 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4988 status
= MGMT_STATUS_SUCCESS
;
4991 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4992 &cp
->addr
, sizeof(cp
->addr
));
4994 hci_dev_unlock(hdev
);
4999 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5002 struct mgmt_cp_unblock_device
*cp
= data
;
5006 bt_dev_dbg(hdev
, "sock %p", sk
);
5008 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5009 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
5010 MGMT_STATUS_INVALID_PARAMS
,
5011 &cp
->addr
, sizeof(cp
->addr
));
5015 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
5018 status
= MGMT_STATUS_INVALID_PARAMS
;
5022 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
5024 status
= MGMT_STATUS_SUCCESS
;
5027 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
5028 &cp
->addr
, sizeof(cp
->addr
));
5030 hci_dev_unlock(hdev
);
5035 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5038 struct mgmt_cp_set_device_id
*cp
= data
;
5039 struct hci_request req
;
5043 bt_dev_dbg(hdev
, "sock %p", sk
);
5045 source
= __le16_to_cpu(cp
->source
);
5047 if (source
> 0x0002)
5048 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
5049 MGMT_STATUS_INVALID_PARAMS
);
5053 hdev
->devid_source
= source
;
5054 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
5055 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
5056 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
5058 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
5061 hci_req_init(&req
, hdev
);
5062 __hci_req_update_eir(&req
);
5063 hci_req_run(&req
, NULL
);
5065 hci_dev_unlock(hdev
);
5070 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
5073 bt_dev_dbg(hdev
, "status %d", status
);
5076 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
5079 struct cmd_lookup match
= { NULL
, hdev
};
5080 struct hci_request req
;
5082 struct adv_info
*adv_instance
;
5088 u8 mgmt_err
= mgmt_status(status
);
5090 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
5091 cmd_status_rsp
, &mgmt_err
);
5095 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
5096 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
5098 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
5100 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
5103 new_settings(hdev
, match
.sk
);
5108 /* Handle suspend notifier */
5109 if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING
,
5110 hdev
->suspend_tasks
)) {
5111 bt_dev_dbg(hdev
, "Paused advertising");
5112 wake_up(&hdev
->suspend_wait_q
);
5113 } else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING
,
5114 hdev
->suspend_tasks
)) {
5115 bt_dev_dbg(hdev
, "Unpaused advertising");
5116 wake_up(&hdev
->suspend_wait_q
);
5119 /* If "Set Advertising" was just disabled and instance advertising was
5120 * set up earlier, then re-enable multi-instance advertising.
5122 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
5123 list_empty(&hdev
->adv_instances
))
5126 instance
= hdev
->cur_adv_instance
;
5128 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
5129 struct adv_info
, list
);
5133 instance
= adv_instance
->instance
;
5136 hci_req_init(&req
, hdev
);
5138 err
= __hci_req_schedule_adv_instance(&req
, instance
, true);
5141 err
= hci_req_run(&req
, enable_advertising_instance
);
5144 bt_dev_err(hdev
, "failed to re-configure advertising");
5147 hci_dev_unlock(hdev
);
5150 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5153 struct mgmt_mode
*cp
= data
;
5154 struct mgmt_pending_cmd
*cmd
;
5155 struct hci_request req
;
5159 bt_dev_dbg(hdev
, "sock %p", sk
);
5161 status
= mgmt_le_support(hdev
);
5163 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
5166 /* Enabling the experimental LL Privay support disables support for
5169 if (hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
5170 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
5171 MGMT_STATUS_NOT_SUPPORTED
);
5173 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5174 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
5175 MGMT_STATUS_INVALID_PARAMS
);
5177 if (hdev
->advertising_paused
)
5178 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
5185 /* The following conditions are ones which mean that we should
5186 * not do any HCI communication but directly send a mgmt
5187 * response to user space (after toggling the flag if
5190 if (!hdev_is_powered(hdev
) ||
5191 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
5192 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
5193 hci_conn_num(hdev
, LE_LINK
) > 0 ||
5194 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
5195 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
5199 hdev
->cur_adv_instance
= 0x00;
5200 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
5201 if (cp
->val
== 0x02)
5202 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
5204 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
5206 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
5207 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
5210 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
5215 err
= new_settings(hdev
, sk
);
5220 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
5221 pending_find(MGMT_OP_SET_LE
, hdev
)) {
5222 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
5227 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
5233 hci_req_init(&req
, hdev
);
5235 if (cp
->val
== 0x02)
5236 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
5238 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
5240 cancel_adv_timeout(hdev
);
5243 /* Switch to instance "0" for the Set Advertising setting.
5244 * We cannot use update_[adv|scan_rsp]_data() here as the
5245 * HCI_ADVERTISING flag is not yet set.
5247 hdev
->cur_adv_instance
= 0x00;
5249 if (ext_adv_capable(hdev
)) {
5250 __hci_req_start_ext_adv(&req
, 0x00);
5252 __hci_req_update_adv_data(&req
, 0x00);
5253 __hci_req_update_scan_rsp_data(&req
, 0x00);
5254 __hci_req_enable_advertising(&req
);
5257 __hci_req_disable_advertising(&req
);
5260 err
= hci_req_run(&req
, set_advertising_complete
);
5262 mgmt_pending_remove(cmd
);
5265 hci_dev_unlock(hdev
);
5269 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
5270 void *data
, u16 len
)
5272 struct mgmt_cp_set_static_address
*cp
= data
;
5275 bt_dev_dbg(hdev
, "sock %p", sk
);
5277 if (!lmp_le_capable(hdev
))
5278 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
5279 MGMT_STATUS_NOT_SUPPORTED
);
5281 if (hdev_is_powered(hdev
))
5282 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
5283 MGMT_STATUS_REJECTED
);
5285 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
5286 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
5287 return mgmt_cmd_status(sk
, hdev
->id
,
5288 MGMT_OP_SET_STATIC_ADDRESS
,
5289 MGMT_STATUS_INVALID_PARAMS
);
5291 /* Two most significant bits shall be set */
5292 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5293 return mgmt_cmd_status(sk
, hdev
->id
,
5294 MGMT_OP_SET_STATIC_ADDRESS
,
5295 MGMT_STATUS_INVALID_PARAMS
);
5300 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
5302 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
5306 err
= new_settings(hdev
, sk
);
5309 hci_dev_unlock(hdev
);
5313 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
5314 void *data
, u16 len
)
5316 struct mgmt_cp_set_scan_params
*cp
= data
;
5317 __u16 interval
, window
;
5320 bt_dev_dbg(hdev
, "sock %p", sk
);
5322 if (!lmp_le_capable(hdev
))
5323 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5324 MGMT_STATUS_NOT_SUPPORTED
);
5326 interval
= __le16_to_cpu(cp
->interval
);
5328 if (interval
< 0x0004 || interval
> 0x4000)
5329 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5330 MGMT_STATUS_INVALID_PARAMS
);
5332 window
= __le16_to_cpu(cp
->window
);
5334 if (window
< 0x0004 || window
> 0x4000)
5335 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5336 MGMT_STATUS_INVALID_PARAMS
);
5338 if (window
> interval
)
5339 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
5340 MGMT_STATUS_INVALID_PARAMS
);
5344 hdev
->le_scan_interval
= interval
;
5345 hdev
->le_scan_window
= window
;
5347 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
5350 /* If background scan is running, restart it so new parameters are
5353 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
5354 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
5355 struct hci_request req
;
5357 hci_req_init(&req
, hdev
);
5359 hci_req_add_le_scan_disable(&req
, false);
5360 hci_req_add_le_passive_scan(&req
);
5362 hci_req_run(&req
, NULL
);
5365 hci_dev_unlock(hdev
);
5370 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
5373 struct mgmt_pending_cmd
*cmd
;
5375 bt_dev_dbg(hdev
, "status 0x%02x", status
);
5379 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
5384 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5385 mgmt_status(status
));
5387 struct mgmt_mode
*cp
= cmd
->param
;
5390 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
5392 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5394 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
5395 new_settings(hdev
, cmd
->sk
);
5398 mgmt_pending_remove(cmd
);
5401 hci_dev_unlock(hdev
);
5404 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
5405 void *data
, u16 len
)
5407 struct mgmt_mode
*cp
= data
;
5408 struct mgmt_pending_cmd
*cmd
;
5409 struct hci_request req
;
5412 bt_dev_dbg(hdev
, "sock %p", sk
);
5414 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
5415 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
5416 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5417 MGMT_STATUS_NOT_SUPPORTED
);
5419 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5420 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5421 MGMT_STATUS_INVALID_PARAMS
);
5425 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
5426 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5431 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
5432 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
5437 if (!hdev_is_powered(hdev
)) {
5438 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
5439 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
5441 new_settings(hdev
, sk
);
5445 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
5452 hci_req_init(&req
, hdev
);
5454 __hci_req_write_fast_connectable(&req
, cp
->val
);
5456 err
= hci_req_run(&req
, fast_connectable_complete
);
5458 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
5459 MGMT_STATUS_FAILED
);
5460 mgmt_pending_remove(cmd
);
5464 hci_dev_unlock(hdev
);
5469 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5471 struct mgmt_pending_cmd
*cmd
;
5473 bt_dev_dbg(hdev
, "status 0x%02x", status
);
5477 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
5482 u8 mgmt_err
= mgmt_status(status
);
5484 /* We need to restore the flag if related HCI commands
5487 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
5489 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
5491 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
5492 new_settings(hdev
, cmd
->sk
);
5495 mgmt_pending_remove(cmd
);
5498 hci_dev_unlock(hdev
);
5501 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
5503 struct mgmt_mode
*cp
= data
;
5504 struct mgmt_pending_cmd
*cmd
;
5505 struct hci_request req
;
5508 bt_dev_dbg(hdev
, "sock %p", sk
);
5510 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
5511 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5512 MGMT_STATUS_NOT_SUPPORTED
);
5514 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5515 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5516 MGMT_STATUS_REJECTED
);
5518 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
5519 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5520 MGMT_STATUS_INVALID_PARAMS
);
5524 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5525 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5529 if (!hdev_is_powered(hdev
)) {
5531 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
5532 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
5533 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
5534 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
5535 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
5538 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
5540 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
5544 err
= new_settings(hdev
, sk
);
5548 /* Reject disabling when powered on */
5550 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5551 MGMT_STATUS_REJECTED
);
5554 /* When configuring a dual-mode controller to operate
5555 * with LE only and using a static address, then switching
5556 * BR/EDR back on is not allowed.
5558 * Dual-mode controllers shall operate with the public
5559 * address as its identity address for BR/EDR and LE. So
5560 * reject the attempt to create an invalid configuration.
5562 * The same restrictions applies when secure connections
5563 * has been enabled. For BR/EDR this is a controller feature
5564 * while for LE it is a host stack feature. This means that
5565 * switching BR/EDR back on when secure connections has been
5566 * enabled is not a supported transaction.
5568 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5569 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
5570 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
5571 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5572 MGMT_STATUS_REJECTED
);
5577 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
5578 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5583 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
5589 /* We need to flip the bit already here so that
5590 * hci_req_update_adv_data generates the correct flags.
5592 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
5594 hci_req_init(&req
, hdev
);
5596 __hci_req_write_fast_connectable(&req
, false);
5597 __hci_req_update_scan(&req
);
5599 /* Since only the advertising data flags will change, there
5600 * is no need to update the scan response data.
5602 __hci_req_update_adv_data(&req
, hdev
->cur_adv_instance
);
5604 err
= hci_req_run(&req
, set_bredr_complete
);
5606 mgmt_pending_remove(cmd
);
5609 hci_dev_unlock(hdev
);
5613 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5615 struct mgmt_pending_cmd
*cmd
;
5616 struct mgmt_mode
*cp
;
5618 bt_dev_dbg(hdev
, "status %u", status
);
5622 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
5627 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5628 mgmt_status(status
));
5636 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
5637 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5640 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5641 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5644 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5645 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5649 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5650 new_settings(hdev
, cmd
->sk
);
5653 mgmt_pending_remove(cmd
);
5655 hci_dev_unlock(hdev
);
5658 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
5659 void *data
, u16 len
)
5661 struct mgmt_mode
*cp
= data
;
5662 struct mgmt_pending_cmd
*cmd
;
5663 struct hci_request req
;
5667 bt_dev_dbg(hdev
, "sock %p", sk
);
5669 if (!lmp_sc_capable(hdev
) &&
5670 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5671 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5672 MGMT_STATUS_NOT_SUPPORTED
);
5674 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5675 lmp_sc_capable(hdev
) &&
5676 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
5677 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5678 MGMT_STATUS_REJECTED
);
5680 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5681 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5682 MGMT_STATUS_INVALID_PARAMS
);
5686 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
5687 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5691 changed
= !hci_dev_test_and_set_flag(hdev
,
5693 if (cp
->val
== 0x02)
5694 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5696 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5698 changed
= hci_dev_test_and_clear_flag(hdev
,
5700 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5703 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5708 err
= new_settings(hdev
, sk
);
5713 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
5714 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5721 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5722 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5723 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5727 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
5733 hci_req_init(&req
, hdev
);
5734 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
5735 err
= hci_req_run(&req
, sc_enable_complete
);
5737 mgmt_pending_remove(cmd
);
5742 hci_dev_unlock(hdev
);
5746 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5747 void *data
, u16 len
)
5749 struct mgmt_mode
*cp
= data
;
5750 bool changed
, use_changed
;
5753 bt_dev_dbg(hdev
, "sock %p", sk
);
5755 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5756 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
5757 MGMT_STATUS_INVALID_PARAMS
);
5762 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
5764 changed
= hci_dev_test_and_clear_flag(hdev
,
5765 HCI_KEEP_DEBUG_KEYS
);
5767 if (cp
->val
== 0x02)
5768 use_changed
= !hci_dev_test_and_set_flag(hdev
,
5769 HCI_USE_DEBUG_KEYS
);
5771 use_changed
= hci_dev_test_and_clear_flag(hdev
,
5772 HCI_USE_DEBUG_KEYS
);
5774 if (hdev_is_powered(hdev
) && use_changed
&&
5775 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5776 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
5777 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
5778 sizeof(mode
), &mode
);
5781 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
5786 err
= new_settings(hdev
, sk
);
5789 hci_dev_unlock(hdev
);
5793 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5796 struct mgmt_cp_set_privacy
*cp
= cp_data
;
5800 bt_dev_dbg(hdev
, "sock %p", sk
);
5802 if (!lmp_le_capable(hdev
))
5803 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5804 MGMT_STATUS_NOT_SUPPORTED
);
5806 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01 && cp
->privacy
!= 0x02)
5807 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5808 MGMT_STATUS_INVALID_PARAMS
);
5810 if (hdev_is_powered(hdev
))
5811 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5812 MGMT_STATUS_REJECTED
);
5816 /* If user space supports this command it is also expected to
5817 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5819 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5822 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
5823 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
5824 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
5825 hci_adv_instances_set_rpa_expired(hdev
, true);
5826 if (cp
->privacy
== 0x02)
5827 hci_dev_set_flag(hdev
, HCI_LIMITED_PRIVACY
);
5829 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
5831 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
5832 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
5833 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
5834 hci_adv_instances_set_rpa_expired(hdev
, false);
5835 hci_dev_clear_flag(hdev
, HCI_LIMITED_PRIVACY
);
5838 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
5843 err
= new_settings(hdev
, sk
);
5846 hci_dev_unlock(hdev
);
5850 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
5852 switch (irk
->addr
.type
) {
5853 case BDADDR_LE_PUBLIC
:
5856 case BDADDR_LE_RANDOM
:
5857 /* Two most significant bits shall be set */
5858 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5866 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5869 struct mgmt_cp_load_irks
*cp
= cp_data
;
5870 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
5871 sizeof(struct mgmt_irk_info
));
5872 u16 irk_count
, expected_len
;
5875 bt_dev_dbg(hdev
, "sock %p", sk
);
5877 if (!lmp_le_capable(hdev
))
5878 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5879 MGMT_STATUS_NOT_SUPPORTED
);
5881 irk_count
= __le16_to_cpu(cp
->irk_count
);
5882 if (irk_count
> max_irk_count
) {
5883 bt_dev_err(hdev
, "load_irks: too big irk_count value %u",
5885 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5886 MGMT_STATUS_INVALID_PARAMS
);
5889 expected_len
= struct_size(cp
, irks
, irk_count
);
5890 if (expected_len
!= len
) {
5891 bt_dev_err(hdev
, "load_irks: expected %u bytes, got %u bytes",
5893 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5894 MGMT_STATUS_INVALID_PARAMS
);
5897 bt_dev_dbg(hdev
, "irk_count %u", irk_count
);
5899 for (i
= 0; i
< irk_count
; i
++) {
5900 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
5902 if (!irk_is_valid(key
))
5903 return mgmt_cmd_status(sk
, hdev
->id
,
5905 MGMT_STATUS_INVALID_PARAMS
);
5910 hci_smp_irks_clear(hdev
);
5912 for (i
= 0; i
< irk_count
; i
++) {
5913 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
5915 if (hci_is_blocked_key(hdev
,
5916 HCI_BLOCKED_KEY_TYPE_IRK
,
5918 bt_dev_warn(hdev
, "Skipping blocked IRK for %pMR",
5923 hci_add_irk(hdev
, &irk
->addr
.bdaddr
,
5924 le_addr_type(irk
->addr
.type
), irk
->val
,
5928 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5930 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
5932 hci_dev_unlock(hdev
);
5937 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
5939 if (key
->master
!= 0x00 && key
->master
!= 0x01)
5942 switch (key
->addr
.type
) {
5943 case BDADDR_LE_PUBLIC
:
5946 case BDADDR_LE_RANDOM
:
5947 /* Two most significant bits shall be set */
5948 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5956 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5957 void *cp_data
, u16 len
)
5959 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5960 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5961 sizeof(struct mgmt_ltk_info
));
5962 u16 key_count
, expected_len
;
5965 bt_dev_dbg(hdev
, "sock %p", sk
);
5967 if (!lmp_le_capable(hdev
))
5968 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5969 MGMT_STATUS_NOT_SUPPORTED
);
5971 key_count
= __le16_to_cpu(cp
->key_count
);
5972 if (key_count
> max_key_count
) {
5973 bt_dev_err(hdev
, "load_ltks: too big key_count value %u",
5975 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5976 MGMT_STATUS_INVALID_PARAMS
);
5979 expected_len
= struct_size(cp
, keys
, key_count
);
5980 if (expected_len
!= len
) {
5981 bt_dev_err(hdev
, "load_keys: expected %u bytes, got %u bytes",
5983 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5984 MGMT_STATUS_INVALID_PARAMS
);
5987 bt_dev_dbg(hdev
, "key_count %u", key_count
);
5989 for (i
= 0; i
< key_count
; i
++) {
5990 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5992 if (!ltk_is_valid(key
))
5993 return mgmt_cmd_status(sk
, hdev
->id
,
5994 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5995 MGMT_STATUS_INVALID_PARAMS
);
6000 hci_smp_ltks_clear(hdev
);
6002 for (i
= 0; i
< key_count
; i
++) {
6003 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
6004 u8 type
, authenticated
;
6006 if (hci_is_blocked_key(hdev
,
6007 HCI_BLOCKED_KEY_TYPE_LTK
,
6009 bt_dev_warn(hdev
, "Skipping blocked LTK for %pMR",
6014 switch (key
->type
) {
6015 case MGMT_LTK_UNAUTHENTICATED
:
6016 authenticated
= 0x00;
6017 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
6019 case MGMT_LTK_AUTHENTICATED
:
6020 authenticated
= 0x01;
6021 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
6023 case MGMT_LTK_P256_UNAUTH
:
6024 authenticated
= 0x00;
6025 type
= SMP_LTK_P256
;
6027 case MGMT_LTK_P256_AUTH
:
6028 authenticated
= 0x01;
6029 type
= SMP_LTK_P256
;
6031 case MGMT_LTK_P256_DEBUG
:
6032 authenticated
= 0x00;
6033 type
= SMP_LTK_P256_DEBUG
;
6039 hci_add_ltk(hdev
, &key
->addr
.bdaddr
,
6040 le_addr_type(key
->addr
.type
), type
, authenticated
,
6041 key
->val
, key
->enc_size
, key
->ediv
, key
->rand
);
6044 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
6047 hci_dev_unlock(hdev
);
6052 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
6054 struct hci_conn
*conn
= cmd
->user_data
;
6055 struct mgmt_rp_get_conn_info rp
;
6058 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
6060 if (status
== MGMT_STATUS_SUCCESS
) {
6061 rp
.rssi
= conn
->rssi
;
6062 rp
.tx_power
= conn
->tx_power
;
6063 rp
.max_tx_power
= conn
->max_tx_power
;
6065 rp
.rssi
= HCI_RSSI_INVALID
;
6066 rp
.tx_power
= HCI_TX_POWER_INVALID
;
6067 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
6070 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
6071 status
, &rp
, sizeof(rp
));
6073 hci_conn_drop(conn
);
6079 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
6082 struct hci_cp_read_rssi
*cp
;
6083 struct mgmt_pending_cmd
*cmd
;
6084 struct hci_conn
*conn
;
6088 bt_dev_dbg(hdev
, "status 0x%02x", hci_status
);
6092 /* Commands sent in request are either Read RSSI or Read Transmit Power
6093 * Level so we check which one was last sent to retrieve connection
6094 * handle. Both commands have handle as first parameter so it's safe to
6095 * cast data on the same command struct.
6097 * First command sent is always Read RSSI and we fail only if it fails.
6098 * In other case we simply override error to indicate success as we
6099 * already remembered if TX power value is actually valid.
6101 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
6103 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
6104 status
= MGMT_STATUS_SUCCESS
;
6106 status
= mgmt_status(hci_status
);
6110 bt_dev_err(hdev
, "invalid sent_cmd in conn_info response");
6114 handle
= __le16_to_cpu(cp
->handle
);
6115 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
6117 bt_dev_err(hdev
, "unknown handle (%d) in conn_info response",
6122 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
6126 cmd
->cmd_complete(cmd
, status
);
6127 mgmt_pending_remove(cmd
);
6130 hci_dev_unlock(hdev
);
6133 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6136 struct mgmt_cp_get_conn_info
*cp
= data
;
6137 struct mgmt_rp_get_conn_info rp
;
6138 struct hci_conn
*conn
;
6139 unsigned long conn_info_age
;
6142 bt_dev_dbg(hdev
, "sock %p", sk
);
6144 memset(&rp
, 0, sizeof(rp
));
6145 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6146 rp
.addr
.type
= cp
->addr
.type
;
6148 if (!bdaddr_type_is_valid(cp
->addr
.type
))
6149 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
6150 MGMT_STATUS_INVALID_PARAMS
,
6155 if (!hdev_is_powered(hdev
)) {
6156 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
6157 MGMT_STATUS_NOT_POWERED
, &rp
,
6162 if (cp
->addr
.type
== BDADDR_BREDR
)
6163 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
6166 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
6168 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
6169 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
6170 MGMT_STATUS_NOT_CONNECTED
, &rp
,
6175 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
6176 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
6177 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
6181 /* To avoid client trying to guess when to poll again for information we
6182 * calculate conn info age as random value between min/max set in hdev.
6184 conn_info_age
= hdev
->conn_info_min_age
+
6185 prandom_u32_max(hdev
->conn_info_max_age
-
6186 hdev
->conn_info_min_age
);
6188 /* Query controller to refresh cached values if they are too old or were
6191 if (time_after(jiffies
, conn
->conn_info_timestamp
+
6192 msecs_to_jiffies(conn_info_age
)) ||
6193 !conn
->conn_info_timestamp
) {
6194 struct hci_request req
;
6195 struct hci_cp_read_tx_power req_txp_cp
;
6196 struct hci_cp_read_rssi req_rssi_cp
;
6197 struct mgmt_pending_cmd
*cmd
;
6199 hci_req_init(&req
, hdev
);
6200 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
6201 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
6204 /* For LE links TX power does not change thus we don't need to
6205 * query for it once value is known.
6207 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
6208 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
6209 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
6210 req_txp_cp
.type
= 0x00;
6211 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
6212 sizeof(req_txp_cp
), &req_txp_cp
);
6215 /* Max TX power needs to be read only once per connection */
6216 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
6217 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
6218 req_txp_cp
.type
= 0x01;
6219 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
6220 sizeof(req_txp_cp
), &req_txp_cp
);
6223 err
= hci_req_run(&req
, conn_info_refresh_complete
);
6227 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
6234 hci_conn_hold(conn
);
6235 cmd
->user_data
= hci_conn_get(conn
);
6236 cmd
->cmd_complete
= conn_info_cmd_complete
;
6238 conn
->conn_info_timestamp
= jiffies
;
6240 /* Cache is valid, just reply with values cached in hci_conn */
6241 rp
.rssi
= conn
->rssi
;
6242 rp
.tx_power
= conn
->tx_power
;
6243 rp
.max_tx_power
= conn
->max_tx_power
;
6245 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
6246 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6250 hci_dev_unlock(hdev
);
6254 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
6256 struct hci_conn
*conn
= cmd
->user_data
;
6257 struct mgmt_rp_get_clock_info rp
;
6258 struct hci_dev
*hdev
;
6261 memset(&rp
, 0, sizeof(rp
));
6262 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
6267 hdev
= hci_dev_get(cmd
->index
);
6269 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
6274 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
6275 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
6279 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
6283 hci_conn_drop(conn
);
6290 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
6292 struct hci_cp_read_clock
*hci_cp
;
6293 struct mgmt_pending_cmd
*cmd
;
6294 struct hci_conn
*conn
;
6296 bt_dev_dbg(hdev
, "status %u", status
);
6300 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
6304 if (hci_cp
->which
) {
6305 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
6306 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
6311 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
6315 cmd
->cmd_complete(cmd
, mgmt_status(status
));
6316 mgmt_pending_remove(cmd
);
6319 hci_dev_unlock(hdev
);
6322 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6325 struct mgmt_cp_get_clock_info
*cp
= data
;
6326 struct mgmt_rp_get_clock_info rp
;
6327 struct hci_cp_read_clock hci_cp
;
6328 struct mgmt_pending_cmd
*cmd
;
6329 struct hci_request req
;
6330 struct hci_conn
*conn
;
6333 bt_dev_dbg(hdev
, "sock %p", sk
);
6335 memset(&rp
, 0, sizeof(rp
));
6336 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6337 rp
.addr
.type
= cp
->addr
.type
;
6339 if (cp
->addr
.type
!= BDADDR_BREDR
)
6340 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
6341 MGMT_STATUS_INVALID_PARAMS
,
6346 if (!hdev_is_powered(hdev
)) {
6347 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
6348 MGMT_STATUS_NOT_POWERED
, &rp
,
6353 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6354 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
6356 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
6357 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6358 MGMT_OP_GET_CLOCK_INFO
,
6359 MGMT_STATUS_NOT_CONNECTED
,
6367 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
6373 cmd
->cmd_complete
= clock_info_cmd_complete
;
6375 hci_req_init(&req
, hdev
);
6377 memset(&hci_cp
, 0, sizeof(hci_cp
));
6378 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
6381 hci_conn_hold(conn
);
6382 cmd
->user_data
= hci_conn_get(conn
);
6384 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
6385 hci_cp
.which
= 0x01; /* Piconet clock */
6386 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
6389 err
= hci_req_run(&req
, get_clock_info_complete
);
6391 mgmt_pending_remove(cmd
);
6394 hci_dev_unlock(hdev
);
6398 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
6400 struct hci_conn
*conn
;
6402 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
6406 if (conn
->dst_type
!= type
)
6409 if (conn
->state
!= BT_CONNECTED
)
6415 /* This function requires the caller holds hdev->lock */
6416 static int hci_conn_params_set(struct hci_dev
*hdev
, bdaddr_t
*addr
,
6417 u8 addr_type
, u8 auto_connect
)
6419 struct hci_conn_params
*params
;
6421 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
6425 if (params
->auto_connect
== auto_connect
)
6428 list_del_init(¶ms
->action
);
6430 switch (auto_connect
) {
6431 case HCI_AUTO_CONN_DISABLED
:
6432 case HCI_AUTO_CONN_LINK_LOSS
:
6433 /* If auto connect is being disabled when we're trying to
6434 * connect to device, keep connecting.
6436 if (params
->explicit_connect
)
6437 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6439 case HCI_AUTO_CONN_REPORT
:
6440 if (params
->explicit_connect
)
6441 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6443 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
6445 case HCI_AUTO_CONN_DIRECT
:
6446 case HCI_AUTO_CONN_ALWAYS
:
6447 if (!is_connected(hdev
, addr
, addr_type
))
6448 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
6452 params
->auto_connect
= auto_connect
;
6454 bt_dev_dbg(hdev
, "addr %pMR (type %u) auto_connect %u",
6455 addr
, addr_type
, auto_connect
);
6460 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
6461 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
6463 struct mgmt_ev_device_added ev
;
6465 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6466 ev
.addr
.type
= type
;
6469 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
6472 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
6473 void *data
, u16 len
)
6475 struct mgmt_cp_add_device
*cp
= data
;
6476 u8 auto_conn
, addr_type
;
6477 struct hci_conn_params
*params
;
6479 u32 current_flags
= 0;
6481 bt_dev_dbg(hdev
, "sock %p", sk
);
6483 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
6484 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
6485 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6486 MGMT_STATUS_INVALID_PARAMS
,
6487 &cp
->addr
, sizeof(cp
->addr
));
6489 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
6490 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6491 MGMT_STATUS_INVALID_PARAMS
,
6492 &cp
->addr
, sizeof(cp
->addr
));
6496 if (cp
->addr
.type
== BDADDR_BREDR
) {
6497 /* Only incoming connections action is supported for now */
6498 if (cp
->action
!= 0x01) {
6499 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6501 MGMT_STATUS_INVALID_PARAMS
,
6502 &cp
->addr
, sizeof(cp
->addr
));
6506 err
= hci_bdaddr_list_add_with_flags(&hdev
->whitelist
,
6512 hci_req_update_scan(hdev
);
6517 addr_type
= le_addr_type(cp
->addr
.type
);
6519 if (cp
->action
== 0x02)
6520 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
6521 else if (cp
->action
== 0x01)
6522 auto_conn
= HCI_AUTO_CONN_DIRECT
;
6524 auto_conn
= HCI_AUTO_CONN_REPORT
;
6526 /* Kernel internally uses conn_params with resolvable private
6527 * address, but Add Device allows only identity addresses.
6528 * Make sure it is enforced before calling
6529 * hci_conn_params_lookup.
6531 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
6532 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6533 MGMT_STATUS_INVALID_PARAMS
,
6534 &cp
->addr
, sizeof(cp
->addr
));
6538 /* If the connection parameters don't exist for this device,
6539 * they will be created and configured with defaults.
6541 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
6543 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6544 MGMT_STATUS_FAILED
, &cp
->addr
,
6548 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
6551 current_flags
= params
->current_flags
;
6554 hci_update_background_scan(hdev
);
6557 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
6558 device_flags_changed(NULL
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
,
6559 SUPPORTED_DEVICE_FLAGS(), current_flags
);
6561 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
6562 MGMT_STATUS_SUCCESS
, &cp
->addr
,
6566 hci_dev_unlock(hdev
);
6570 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
6571 bdaddr_t
*bdaddr
, u8 type
)
6573 struct mgmt_ev_device_removed ev
;
6575 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6576 ev
.addr
.type
= type
;
6578 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
6581 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
6582 void *data
, u16 len
)
6584 struct mgmt_cp_remove_device
*cp
= data
;
6587 bt_dev_dbg(hdev
, "sock %p", sk
);
6591 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6592 struct hci_conn_params
*params
;
6595 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
6596 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6597 MGMT_OP_REMOVE_DEVICE
,
6598 MGMT_STATUS_INVALID_PARAMS
,
6599 &cp
->addr
, sizeof(cp
->addr
));
6603 if (cp
->addr
.type
== BDADDR_BREDR
) {
6604 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
6608 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6609 MGMT_OP_REMOVE_DEVICE
,
6610 MGMT_STATUS_INVALID_PARAMS
,
6616 hci_req_update_scan(hdev
);
6618 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
6623 addr_type
= le_addr_type(cp
->addr
.type
);
6625 /* Kernel internally uses conn_params with resolvable private
6626 * address, but Remove Device allows only identity addresses.
6627 * Make sure it is enforced before calling
6628 * hci_conn_params_lookup.
6630 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
6631 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6632 MGMT_OP_REMOVE_DEVICE
,
6633 MGMT_STATUS_INVALID_PARAMS
,
6634 &cp
->addr
, sizeof(cp
->addr
));
6638 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
6641 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6642 MGMT_OP_REMOVE_DEVICE
,
6643 MGMT_STATUS_INVALID_PARAMS
,
6644 &cp
->addr
, sizeof(cp
->addr
));
6648 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
||
6649 params
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
) {
6650 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6651 MGMT_OP_REMOVE_DEVICE
,
6652 MGMT_STATUS_INVALID_PARAMS
,
6653 &cp
->addr
, sizeof(cp
->addr
));
6657 list_del(¶ms
->action
);
6658 list_del(¶ms
->list
);
6660 hci_update_background_scan(hdev
);
6662 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
6664 struct hci_conn_params
*p
, *tmp
;
6665 struct bdaddr_list
*b
, *btmp
;
6667 if (cp
->addr
.type
) {
6668 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6669 MGMT_OP_REMOVE_DEVICE
,
6670 MGMT_STATUS_INVALID_PARAMS
,
6671 &cp
->addr
, sizeof(cp
->addr
));
6675 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
6676 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
6681 hci_req_update_scan(hdev
);
6683 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
6684 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
6686 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
6687 if (p
->explicit_connect
) {
6688 p
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
6691 list_del(&p
->action
);
6696 bt_dev_dbg(hdev
, "All LE connection parameters were removed");
6698 hci_update_background_scan(hdev
);
6702 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
6703 MGMT_STATUS_SUCCESS
, &cp
->addr
,
6706 hci_dev_unlock(hdev
);
6710 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6713 struct mgmt_cp_load_conn_param
*cp
= data
;
6714 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
6715 sizeof(struct mgmt_conn_param
));
6716 u16 param_count
, expected_len
;
6719 if (!lmp_le_capable(hdev
))
6720 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6721 MGMT_STATUS_NOT_SUPPORTED
);
6723 param_count
= __le16_to_cpu(cp
->param_count
);
6724 if (param_count
> max_param_count
) {
6725 bt_dev_err(hdev
, "load_conn_param: too big param_count value %u",
6727 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6728 MGMT_STATUS_INVALID_PARAMS
);
6731 expected_len
= struct_size(cp
, params
, param_count
);
6732 if (expected_len
!= len
) {
6733 bt_dev_err(hdev
, "load_conn_param: expected %u bytes, got %u bytes",
6735 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6736 MGMT_STATUS_INVALID_PARAMS
);
6739 bt_dev_dbg(hdev
, "param_count %u", param_count
);
6743 hci_conn_params_clear_disabled(hdev
);
6745 for (i
= 0; i
< param_count
; i
++) {
6746 struct mgmt_conn_param
*param
= &cp
->params
[i
];
6747 struct hci_conn_params
*hci_param
;
6748 u16 min
, max
, latency
, timeout
;
6751 bt_dev_dbg(hdev
, "Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
6754 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
6755 addr_type
= ADDR_LE_DEV_PUBLIC
;
6756 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
6757 addr_type
= ADDR_LE_DEV_RANDOM
;
6759 bt_dev_err(hdev
, "ignoring invalid connection parameters");
6763 min
= le16_to_cpu(param
->min_interval
);
6764 max
= le16_to_cpu(param
->max_interval
);
6765 latency
= le16_to_cpu(param
->latency
);
6766 timeout
= le16_to_cpu(param
->timeout
);
6768 bt_dev_dbg(hdev
, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6769 min
, max
, latency
, timeout
);
6771 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
6772 bt_dev_err(hdev
, "ignoring invalid connection parameters");
6776 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
6779 bt_dev_err(hdev
, "failed to add connection parameters");
6783 hci_param
->conn_min_interval
= min
;
6784 hci_param
->conn_max_interval
= max
;
6785 hci_param
->conn_latency
= latency
;
6786 hci_param
->supervision_timeout
= timeout
;
6789 hci_dev_unlock(hdev
);
6791 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
6795 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
6796 void *data
, u16 len
)
6798 struct mgmt_cp_set_external_config
*cp
= data
;
6802 bt_dev_dbg(hdev
, "sock %p", sk
);
6804 if (hdev_is_powered(hdev
))
6805 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6806 MGMT_STATUS_REJECTED
);
6808 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
6809 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6810 MGMT_STATUS_INVALID_PARAMS
);
6812 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
6813 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6814 MGMT_STATUS_NOT_SUPPORTED
);
6819 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
6821 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
6823 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
6830 err
= new_options(hdev
, sk
);
6832 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
6833 mgmt_index_removed(hdev
);
6835 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
6836 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6837 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6839 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6841 set_bit(HCI_RAW
, &hdev
->flags
);
6842 mgmt_index_added(hdev
);
6847 hci_dev_unlock(hdev
);
6851 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
6852 void *data
, u16 len
)
6854 struct mgmt_cp_set_public_address
*cp
= data
;
6858 bt_dev_dbg(hdev
, "sock %p", sk
);
6860 if (hdev_is_powered(hdev
))
6861 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6862 MGMT_STATUS_REJECTED
);
6864 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
6865 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6866 MGMT_STATUS_INVALID_PARAMS
);
6868 if (!hdev
->set_bdaddr
)
6869 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6870 MGMT_STATUS_NOT_SUPPORTED
);
6874 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
6875 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
6877 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
6884 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
6885 err
= new_options(hdev
, sk
);
6887 if (is_configured(hdev
)) {
6888 mgmt_index_removed(hdev
);
6890 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
6892 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6893 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6895 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6899 hci_dev_unlock(hdev
);
6903 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
6904 u16 opcode
, struct sk_buff
*skb
)
6906 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
6907 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
6908 u8
*h192
, *r192
, *h256
, *r256
;
6909 struct mgmt_pending_cmd
*cmd
;
6913 bt_dev_dbg(hdev
, "status %u", status
);
6915 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
6919 mgmt_cp
= cmd
->param
;
6922 status
= mgmt_status(status
);
6929 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
6930 struct hci_rp_read_local_oob_data
*rp
;
6932 if (skb
->len
!= sizeof(*rp
)) {
6933 status
= MGMT_STATUS_FAILED
;
6936 status
= MGMT_STATUS_SUCCESS
;
6937 rp
= (void *)skb
->data
;
6939 eir_len
= 5 + 18 + 18;
6946 struct hci_rp_read_local_oob_ext_data
*rp
;
6948 if (skb
->len
!= sizeof(*rp
)) {
6949 status
= MGMT_STATUS_FAILED
;
6952 status
= MGMT_STATUS_SUCCESS
;
6953 rp
= (void *)skb
->data
;
6955 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
6956 eir_len
= 5 + 18 + 18;
6960 eir_len
= 5 + 18 + 18 + 18 + 18;
6970 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
6977 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
6978 hdev
->dev_class
, 3);
6981 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6982 EIR_SSP_HASH_C192
, h192
, 16);
6983 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6984 EIR_SSP_RAND_R192
, r192
, 16);
6988 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6989 EIR_SSP_HASH_C256
, h256
, 16);
6990 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6991 EIR_SSP_RAND_R256
, r256
, 16);
6995 mgmt_rp
->type
= mgmt_cp
->type
;
6996 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
6998 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
6999 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
7000 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
7001 if (err
< 0 || status
)
7004 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
7006 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
7007 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
7008 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
7011 mgmt_pending_remove(cmd
);
7014 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
7015 struct mgmt_cp_read_local_oob_ext_data
*cp
)
7017 struct mgmt_pending_cmd
*cmd
;
7018 struct hci_request req
;
7021 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
7026 hci_req_init(&req
, hdev
);
7028 if (bredr_sc_enabled(hdev
))
7029 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
7031 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
7033 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
7035 mgmt_pending_remove(cmd
);
7042 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
7043 void *data
, u16 data_len
)
7045 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
7046 struct mgmt_rp_read_local_oob_ext_data
*rp
;
7049 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
7052 bt_dev_dbg(hdev
, "sock %p", sk
);
7054 if (hdev_is_powered(hdev
)) {
7056 case BIT(BDADDR_BREDR
):
7057 status
= mgmt_bredr_support(hdev
);
7063 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
7064 status
= mgmt_le_support(hdev
);
7068 eir_len
= 9 + 3 + 18 + 18 + 3;
7071 status
= MGMT_STATUS_INVALID_PARAMS
;
7076 status
= MGMT_STATUS_NOT_POWERED
;
7080 rp_len
= sizeof(*rp
) + eir_len
;
7081 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
7092 case BIT(BDADDR_BREDR
):
7093 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
7094 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
7095 hci_dev_unlock(hdev
);
7099 status
= MGMT_STATUS_FAILED
;
7102 eir_len
= eir_append_data(rp
->eir
, eir_len
,
7104 hdev
->dev_class
, 3);
7107 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
7108 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
7109 smp_generate_oob(hdev
, hash
, rand
) < 0) {
7110 hci_dev_unlock(hdev
);
7111 status
= MGMT_STATUS_FAILED
;
7115 /* This should return the active RPA, but since the RPA
7116 * is only programmed on demand, it is really hard to fill
7117 * this in at the moment. For now disallow retrieving
7118 * local out-of-band data when privacy is in use.
7120 * Returning the identity address will not help here since
7121 * pairing happens before the identity resolving key is
7122 * known and thus the connection establishment happens
7123 * based on the RPA and not the identity address.
7125 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
7126 hci_dev_unlock(hdev
);
7127 status
= MGMT_STATUS_REJECTED
;
7131 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
7132 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
7133 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
7134 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
7135 memcpy(addr
, &hdev
->static_addr
, 6);
7138 memcpy(addr
, &hdev
->bdaddr
, 6);
7142 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
7143 addr
, sizeof(addr
));
7145 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7150 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
7151 &role
, sizeof(role
));
7153 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
7154 eir_len
= eir_append_data(rp
->eir
, eir_len
,
7156 hash
, sizeof(hash
));
7158 eir_len
= eir_append_data(rp
->eir
, eir_len
,
7160 rand
, sizeof(rand
));
7163 flags
= mgmt_get_adv_discov_flags(hdev
);
7165 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
7166 flags
|= LE_AD_NO_BREDR
;
7168 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
7169 &flags
, sizeof(flags
));
7173 hci_dev_unlock(hdev
);
7175 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
7177 status
= MGMT_STATUS_SUCCESS
;
7180 rp
->type
= cp
->type
;
7181 rp
->eir_len
= cpu_to_le16(eir_len
);
7183 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
7184 status
, rp
, sizeof(*rp
) + eir_len
);
7185 if (err
< 0 || status
)
7188 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
7189 rp
, sizeof(*rp
) + eir_len
,
7190 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
7198 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
7202 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
7203 flags
|= MGMT_ADV_FLAG_DISCOV
;
7204 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
7205 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
7206 flags
|= MGMT_ADV_FLAG_APPEARANCE
;
7207 flags
|= MGMT_ADV_FLAG_LOCAL_NAME
;
7208 flags
|= MGMT_ADV_PARAM_DURATION
;
7209 flags
|= MGMT_ADV_PARAM_TIMEOUT
;
7210 flags
|= MGMT_ADV_PARAM_INTERVALS
;
7211 flags
|= MGMT_ADV_PARAM_TX_POWER
;
7213 /* In extended adv TX_POWER returned from Set Adv Param
7214 * will be always valid.
7216 if ((hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) ||
7217 ext_adv_capable(hdev
))
7218 flags
|= MGMT_ADV_FLAG_TX_POWER
;
7220 if (ext_adv_capable(hdev
)) {
7221 flags
|= MGMT_ADV_FLAG_SEC_1M
;
7222 flags
|= MGMT_ADV_FLAG_HW_OFFLOAD
;
7223 flags
|= MGMT_ADV_FLAG_CAN_SET_TX_POWER
;
7225 if (hdev
->le_features
[1] & HCI_LE_PHY_2M
)
7226 flags
|= MGMT_ADV_FLAG_SEC_2M
;
7228 if (hdev
->le_features
[1] & HCI_LE_PHY_CODED
)
7229 flags
|= MGMT_ADV_FLAG_SEC_CODED
;
7235 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
7236 void *data
, u16 data_len
)
7238 struct mgmt_rp_read_adv_features
*rp
;
7241 struct adv_info
*adv_instance
;
7242 u32 supported_flags
;
7245 bt_dev_dbg(hdev
, "sock %p", sk
);
7247 if (!lmp_le_capable(hdev
))
7248 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
7249 MGMT_STATUS_REJECTED
);
7251 /* Enabling the experimental LL Privay support disables support for
7254 if (hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
7255 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
7256 MGMT_STATUS_NOT_SUPPORTED
);
7260 rp_len
= sizeof(*rp
) + hdev
->adv_instance_cnt
;
7261 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
7263 hci_dev_unlock(hdev
);
7267 supported_flags
= get_supported_adv_flags(hdev
);
7269 rp
->supported_flags
= cpu_to_le32(supported_flags
);
7270 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
7271 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
7272 rp
->max_instances
= hdev
->le_num_of_adv_sets
;
7273 rp
->num_instances
= hdev
->adv_instance_cnt
;
7275 instance
= rp
->instance
;
7276 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
7277 *instance
= adv_instance
->instance
;
7281 hci_dev_unlock(hdev
);
7283 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
7284 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
7291 static u8
calculate_name_len(struct hci_dev
*hdev
)
7293 u8 buf
[HCI_MAX_SHORT_NAME_LENGTH
+ 3];
7295 return append_local_name(hdev
, buf
, 0);
7298 static u8
tlv_data_max_len(struct hci_dev
*hdev
, u32 adv_flags
,
7301 u8 max_len
= HCI_MAX_AD_LENGTH
;
7304 if (adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
7305 MGMT_ADV_FLAG_LIMITED_DISCOV
|
7306 MGMT_ADV_FLAG_MANAGED_FLAGS
))
7309 if (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)
7312 if (adv_flags
& MGMT_ADV_FLAG_LOCAL_NAME
)
7313 max_len
-= calculate_name_len(hdev
);
7315 if (adv_flags
& (MGMT_ADV_FLAG_APPEARANCE
))
7322 static bool flags_managed(u32 adv_flags
)
7324 return adv_flags
& (MGMT_ADV_FLAG_DISCOV
|
7325 MGMT_ADV_FLAG_LIMITED_DISCOV
|
7326 MGMT_ADV_FLAG_MANAGED_FLAGS
);
7329 static bool tx_power_managed(u32 adv_flags
)
7331 return adv_flags
& MGMT_ADV_FLAG_TX_POWER
;
7334 static bool name_managed(u32 adv_flags
)
7336 return adv_flags
& MGMT_ADV_FLAG_LOCAL_NAME
;
7339 static bool appearance_managed(u32 adv_flags
)
7341 return adv_flags
& MGMT_ADV_FLAG_APPEARANCE
;
7344 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
7345 u8 len
, bool is_adv_data
)
7350 max_len
= tlv_data_max_len(hdev
, adv_flags
, is_adv_data
);
7355 /* Make sure that the data is correctly formatted. */
7356 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
7359 if (data
[i
+ 1] == EIR_FLAGS
&&
7360 (!is_adv_data
|| flags_managed(adv_flags
)))
7363 if (data
[i
+ 1] == EIR_TX_POWER
&& tx_power_managed(adv_flags
))
7366 if (data
[i
+ 1] == EIR_NAME_COMPLETE
&& name_managed(adv_flags
))
7369 if (data
[i
+ 1] == EIR_NAME_SHORT
&& name_managed(adv_flags
))
7372 if (data
[i
+ 1] == EIR_APPEARANCE
&&
7373 appearance_managed(adv_flags
))
7376 /* If the current field length would exceed the total data
7377 * length, then it's invalid.
7379 if (i
+ cur_len
>= len
)
7386 static bool requested_adv_flags_are_valid(struct hci_dev
*hdev
, u32 adv_flags
)
7388 u32 supported_flags
, phy_flags
;
7390 /* The current implementation only supports a subset of the specified
7391 * flags. Also need to check mutual exclusiveness of sec flags.
7393 supported_flags
= get_supported_adv_flags(hdev
);
7394 phy_flags
= adv_flags
& MGMT_ADV_FLAG_SEC_MASK
;
7395 if (adv_flags
& ~supported_flags
||
7396 ((phy_flags
&& (phy_flags
^ (phy_flags
& -phy_flags
)))))
7402 static bool adv_busy(struct hci_dev
*hdev
)
7404 return (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7405 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7406 pending_find(MGMT_OP_SET_LE
, hdev
) ||
7407 pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS
, hdev
) ||
7408 pending_find(MGMT_OP_ADD_EXT_ADV_DATA
, hdev
));
7411 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7414 struct mgmt_pending_cmd
*cmd
;
7415 struct mgmt_cp_add_advertising
*cp
;
7416 struct mgmt_rp_add_advertising rp
;
7417 struct adv_info
*adv_instance
, *n
;
7420 bt_dev_dbg(hdev
, "status %d", status
);
7424 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
7426 cmd
= pending_find(MGMT_OP_ADD_EXT_ADV_DATA
, hdev
);
7428 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
7429 if (!adv_instance
->pending
)
7433 adv_instance
->pending
= false;
7437 instance
= adv_instance
->instance
;
7439 if (hdev
->cur_adv_instance
== instance
)
7440 cancel_adv_timeout(hdev
);
7442 hci_remove_adv_instance(hdev
, instance
);
7443 mgmt_advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, instance
);
7450 rp
.instance
= cp
->instance
;
7453 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7454 mgmt_status(status
));
7456 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7457 mgmt_status(status
), &rp
, sizeof(rp
));
7459 mgmt_pending_remove(cmd
);
7462 hci_dev_unlock(hdev
);
7465 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7466 void *data
, u16 data_len
)
7468 struct mgmt_cp_add_advertising
*cp
= data
;
7469 struct mgmt_rp_add_advertising rp
;
7472 u16 timeout
, duration
;
7473 unsigned int prev_instance_cnt
= hdev
->adv_instance_cnt
;
7474 u8 schedule_instance
= 0;
7475 struct adv_info
*next_instance
;
7477 struct mgmt_pending_cmd
*cmd
;
7478 struct hci_request req
;
7480 bt_dev_dbg(hdev
, "sock %p", sk
);
7482 status
= mgmt_le_support(hdev
);
7484 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7487 /* Enabling the experimental LL Privay support disables support for
7490 if (hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
7491 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
7492 MGMT_STATUS_NOT_SUPPORTED
);
7494 if (cp
->instance
< 1 || cp
->instance
> hdev
->le_num_of_adv_sets
)
7495 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7496 MGMT_STATUS_INVALID_PARAMS
);
7498 if (data_len
!= sizeof(*cp
) + cp
->adv_data_len
+ cp
->scan_rsp_len
)
7499 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7500 MGMT_STATUS_INVALID_PARAMS
);
7502 flags
= __le32_to_cpu(cp
->flags
);
7503 timeout
= __le16_to_cpu(cp
->timeout
);
7504 duration
= __le16_to_cpu(cp
->duration
);
7506 if (!requested_adv_flags_are_valid(hdev
, flags
))
7507 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7508 MGMT_STATUS_INVALID_PARAMS
);
7512 if (timeout
&& !hdev_is_powered(hdev
)) {
7513 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7514 MGMT_STATUS_REJECTED
);
7518 if (adv_busy(hdev
)) {
7519 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7524 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
7525 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
7526 cp
->scan_rsp_len
, false)) {
7527 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7528 MGMT_STATUS_INVALID_PARAMS
);
7532 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
7533 cp
->adv_data_len
, cp
->data
,
7535 cp
->data
+ cp
->adv_data_len
,
7537 HCI_ADV_TX_POWER_NO_PREFERENCE
,
7538 hdev
->le_adv_min_interval
,
7539 hdev
->le_adv_max_interval
);
7541 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7542 MGMT_STATUS_FAILED
);
7546 /* Only trigger an advertising added event if a new instance was
7549 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
7550 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
7552 if (hdev
->cur_adv_instance
== cp
->instance
) {
7553 /* If the currently advertised instance is being changed then
7554 * cancel the current advertising and schedule the next
7555 * instance. If there is only one instance then the overridden
7556 * advertising data will be visible right away.
7558 cancel_adv_timeout(hdev
);
7560 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
7562 schedule_instance
= next_instance
->instance
;
7563 } else if (!hdev
->adv_instance_timeout
) {
7564 /* Immediately advertise the new instance if no other
7565 * instance is currently being advertised.
7567 schedule_instance
= cp
->instance
;
7570 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
7571 * there is no instance to be advertised then we have no HCI
7572 * communication to make. Simply return.
7574 if (!hdev_is_powered(hdev
) ||
7575 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7576 !schedule_instance
) {
7577 rp
.instance
= cp
->instance
;
7578 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7579 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7583 /* We're good to go, update advertising data, parameters, and start
7586 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
7593 hci_req_init(&req
, hdev
);
7595 err
= __hci_req_schedule_adv_instance(&req
, schedule_instance
, true);
7598 err
= hci_req_run(&req
, add_advertising_complete
);
7601 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7602 MGMT_STATUS_FAILED
);
7603 mgmt_pending_remove(cmd
);
7607 hci_dev_unlock(hdev
);
7612 static void add_ext_adv_params_complete(struct hci_dev
*hdev
, u8 status
,
7615 struct mgmt_pending_cmd
*cmd
;
7616 struct mgmt_cp_add_ext_adv_params
*cp
;
7617 struct mgmt_rp_add_ext_adv_params rp
;
7618 struct adv_info
*adv_instance
;
7621 BT_DBG("%s", hdev
->name
);
7625 cmd
= pending_find(MGMT_OP_ADD_EXT_ADV_PARAMS
, hdev
);
7630 adv_instance
= hci_find_adv_instance(hdev
, cp
->instance
);
7634 rp
.instance
= cp
->instance
;
7635 rp
.tx_power
= adv_instance
->tx_power
;
7637 /* While we're at it, inform userspace of the available space for this
7638 * advertisement, given the flags that will be used.
7640 flags
= __le32_to_cpu(cp
->flags
);
7641 rp
.max_adv_data_len
= tlv_data_max_len(hdev
, flags
, true);
7642 rp
.max_scan_rsp_len
= tlv_data_max_len(hdev
, flags
, false);
7645 /* If this advertisement was previously advertising and we
7646 * failed to update it, we signal that it has been removed and
7647 * delete its structure
7649 if (!adv_instance
->pending
)
7650 mgmt_advertising_removed(cmd
->sk
, hdev
, cp
->instance
);
7652 hci_remove_adv_instance(hdev
, cp
->instance
);
7654 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7655 mgmt_status(status
));
7658 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
7659 mgmt_status(status
), &rp
, sizeof(rp
));
7664 mgmt_pending_remove(cmd
);
7666 hci_dev_unlock(hdev
);
7669 static int add_ext_adv_params(struct sock
*sk
, struct hci_dev
*hdev
,
7670 void *data
, u16 data_len
)
7672 struct mgmt_cp_add_ext_adv_params
*cp
= data
;
7673 struct mgmt_rp_add_ext_adv_params rp
;
7674 struct mgmt_pending_cmd
*cmd
= NULL
;
7675 struct adv_info
*adv_instance
;
7676 struct hci_request req
;
7677 u32 flags
, min_interval
, max_interval
;
7678 u16 timeout
, duration
;
7683 BT_DBG("%s", hdev
->name
);
7685 status
= mgmt_le_support(hdev
);
7687 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7690 if (cp
->instance
< 1 || cp
->instance
> hdev
->le_num_of_adv_sets
)
7691 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7692 MGMT_STATUS_INVALID_PARAMS
);
7694 /* The purpose of breaking add_advertising into two separate MGMT calls
7695 * for params and data is to allow more parameters to be added to this
7696 * structure in the future. For this reason, we verify that we have the
7697 * bare minimum structure we know of when the interface was defined. Any
7698 * extra parameters we don't know about will be ignored in this request.
7700 if (data_len
< MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE
)
7701 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
7702 MGMT_STATUS_INVALID_PARAMS
);
7704 flags
= __le32_to_cpu(cp
->flags
);
7706 if (!requested_adv_flags_are_valid(hdev
, flags
))
7707 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7708 MGMT_STATUS_INVALID_PARAMS
);
7712 /* In new interface, we require that we are powered to register */
7713 if (!hdev_is_powered(hdev
)) {
7714 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7715 MGMT_STATUS_REJECTED
);
7719 if (adv_busy(hdev
)) {
7720 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7725 /* Parse defined parameters from request, use defaults otherwise */
7726 timeout
= (flags
& MGMT_ADV_PARAM_TIMEOUT
) ?
7727 __le16_to_cpu(cp
->timeout
) : 0;
7729 duration
= (flags
& MGMT_ADV_PARAM_DURATION
) ?
7730 __le16_to_cpu(cp
->duration
) :
7731 hdev
->def_multi_adv_rotation_duration
;
7733 min_interval
= (flags
& MGMT_ADV_PARAM_INTERVALS
) ?
7734 __le32_to_cpu(cp
->min_interval
) :
7735 hdev
->le_adv_min_interval
;
7737 max_interval
= (flags
& MGMT_ADV_PARAM_INTERVALS
) ?
7738 __le32_to_cpu(cp
->max_interval
) :
7739 hdev
->le_adv_max_interval
;
7741 tx_power
= (flags
& MGMT_ADV_PARAM_TX_POWER
) ?
7743 HCI_ADV_TX_POWER_NO_PREFERENCE
;
7745 /* Create advertising instance with no advertising or response data */
7746 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
7747 0, NULL
, 0, NULL
, timeout
, duration
,
7748 tx_power
, min_interval
, max_interval
);
7751 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7752 MGMT_STATUS_FAILED
);
7756 hdev
->cur_adv_instance
= cp
->instance
;
7757 /* Submit request for advertising params if ext adv available */
7758 if (ext_adv_capable(hdev
)) {
7759 hci_req_init(&req
, hdev
);
7760 adv_instance
= hci_find_adv_instance(hdev
, cp
->instance
);
7762 /* Updating parameters of an active instance will return a
7763 * Command Disallowed error, so we must first disable the
7764 * instance if it is active.
7766 if (!adv_instance
->pending
)
7767 __hci_req_disable_ext_adv_instance(&req
, cp
->instance
);
7769 __hci_req_setup_ext_adv_instance(&req
, cp
->instance
);
7771 err
= hci_req_run(&req
, add_ext_adv_params_complete
);
7774 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_EXT_ADV_PARAMS
,
7775 hdev
, data
, data_len
);
7778 hci_remove_adv_instance(hdev
, cp
->instance
);
7783 rp
.instance
= cp
->instance
;
7784 rp
.tx_power
= HCI_ADV_TX_POWER_NO_PREFERENCE
;
7785 rp
.max_adv_data_len
= tlv_data_max_len(hdev
, flags
, true);
7786 rp
.max_scan_rsp_len
= tlv_data_max_len(hdev
, flags
, false);
7787 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7788 MGMT_OP_ADD_EXT_ADV_PARAMS
,
7789 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7793 hci_dev_unlock(hdev
);
7798 static int add_ext_adv_data(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
7801 struct mgmt_cp_add_ext_adv_data
*cp
= data
;
7802 struct mgmt_rp_add_ext_adv_data rp
;
7803 u8 schedule_instance
= 0;
7804 struct adv_info
*next_instance
;
7805 struct adv_info
*adv_instance
;
7807 struct mgmt_pending_cmd
*cmd
;
7808 struct hci_request req
;
7810 BT_DBG("%s", hdev
->name
);
7814 adv_instance
= hci_find_adv_instance(hdev
, cp
->instance
);
7816 if (!adv_instance
) {
7817 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
7818 MGMT_STATUS_INVALID_PARAMS
);
7822 /* In new interface, we require that we are powered to register */
7823 if (!hdev_is_powered(hdev
)) {
7824 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
7825 MGMT_STATUS_REJECTED
);
7826 goto clear_new_instance
;
7829 if (adv_busy(hdev
)) {
7830 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
7832 goto clear_new_instance
;
7835 /* Validate new data */
7836 if (!tlv_data_is_valid(hdev
, adv_instance
->flags
, cp
->data
,
7837 cp
->adv_data_len
, true) ||
7838 !tlv_data_is_valid(hdev
, adv_instance
->flags
, cp
->data
+
7839 cp
->adv_data_len
, cp
->scan_rsp_len
, false)) {
7840 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
7841 MGMT_STATUS_INVALID_PARAMS
);
7842 goto clear_new_instance
;
7845 /* Set the data in the advertising instance */
7846 hci_set_adv_instance_data(hdev
, cp
->instance
, cp
->adv_data_len
,
7847 cp
->data
, cp
->scan_rsp_len
,
7848 cp
->data
+ cp
->adv_data_len
);
7850 /* We're good to go, update advertising data, parameters, and start
7854 hci_req_init(&req
, hdev
);
7856 hci_req_add(&req
, HCI_OP_READ_LOCAL_NAME
, 0, NULL
);
7858 if (ext_adv_capable(hdev
)) {
7859 __hci_req_update_adv_data(&req
, cp
->instance
);
7860 __hci_req_update_scan_rsp_data(&req
, cp
->instance
);
7861 __hci_req_enable_ext_advertising(&req
, cp
->instance
);
7864 /* If using software rotation, determine next instance to use */
7866 if (hdev
->cur_adv_instance
== cp
->instance
) {
7867 /* If the currently advertised instance is being changed
7868 * then cancel the current advertising and schedule the
7869 * next instance. If there is only one instance then the
7870 * overridden advertising data will be visible right
7873 cancel_adv_timeout(hdev
);
7875 next_instance
= hci_get_next_instance(hdev
,
7878 schedule_instance
= next_instance
->instance
;
7879 } else if (!hdev
->adv_instance_timeout
) {
7880 /* Immediately advertise the new instance if no other
7881 * instance is currently being advertised.
7883 schedule_instance
= cp
->instance
;
7886 /* If the HCI_ADVERTISING flag is set or there is no instance to
7887 * be advertised then we have no HCI communication to make.
7890 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7891 !schedule_instance
) {
7892 if (adv_instance
->pending
) {
7893 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
7894 adv_instance
->pending
= false;
7896 rp
.instance
= cp
->instance
;
7897 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7898 MGMT_OP_ADD_EXT_ADV_DATA
,
7899 MGMT_STATUS_SUCCESS
, &rp
,
7904 err
= __hci_req_schedule_adv_instance(&req
, schedule_instance
,
7908 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_EXT_ADV_DATA
, hdev
, data
,
7912 goto clear_new_instance
;
7916 err
= hci_req_run(&req
, add_advertising_complete
);
7919 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_EXT_ADV_DATA
,
7920 MGMT_STATUS_FAILED
);
7921 mgmt_pending_remove(cmd
);
7922 goto clear_new_instance
;
7925 /* We were successful in updating data, so trigger advertising_added
7926 * event if this is an instance that wasn't previously advertising. If
7927 * a failure occurs in the requests we initiated, we will remove the
7928 * instance again in add_advertising_complete
7930 if (adv_instance
->pending
)
7931 mgmt_advertising_added(sk
, hdev
, cp
->instance
);
7936 hci_remove_adv_instance(hdev
, cp
->instance
);
7939 hci_dev_unlock(hdev
);
7944 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
7947 struct mgmt_pending_cmd
*cmd
;
7948 struct mgmt_cp_remove_advertising
*cp
;
7949 struct mgmt_rp_remove_advertising rp
;
7951 bt_dev_dbg(hdev
, "status %d", status
);
7955 /* A failure status here only means that we failed to disable
7956 * advertising. Otherwise, the advertising instance has been removed,
7957 * so report success.
7959 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
7964 rp
.instance
= cp
->instance
;
7966 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
7968 mgmt_pending_remove(cmd
);
7971 hci_dev_unlock(hdev
);
7974 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
7975 void *data
, u16 data_len
)
7977 struct mgmt_cp_remove_advertising
*cp
= data
;
7978 struct mgmt_rp_remove_advertising rp
;
7979 struct mgmt_pending_cmd
*cmd
;
7980 struct hci_request req
;
7983 bt_dev_dbg(hdev
, "sock %p", sk
);
7985 /* Enabling the experimental LL Privay support disables support for
7988 if (hci_dev_test_flag(hdev
, HCI_ENABLE_LL_PRIVACY
))
7989 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
7990 MGMT_STATUS_NOT_SUPPORTED
);
7994 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
7995 err
= mgmt_cmd_status(sk
, hdev
->id
,
7996 MGMT_OP_REMOVE_ADVERTISING
,
7997 MGMT_STATUS_INVALID_PARAMS
);
8001 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
8002 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
8003 pending_find(MGMT_OP_SET_LE
, hdev
)) {
8004 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
8009 if (list_empty(&hdev
->adv_instances
)) {
8010 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
8011 MGMT_STATUS_INVALID_PARAMS
);
8015 hci_req_init(&req
, hdev
);
8017 /* If we use extended advertising, instance is disabled and removed */
8018 if (ext_adv_capable(hdev
)) {
8019 __hci_req_disable_ext_adv_instance(&req
, cp
->instance
);
8020 __hci_req_remove_ext_adv_instance(&req
, cp
->instance
);
8023 hci_req_clear_adv_instance(hdev
, sk
, &req
, cp
->instance
, true);
8025 if (list_empty(&hdev
->adv_instances
))
8026 __hci_req_disable_advertising(&req
);
8028 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
8029 * flag is set or the device isn't powered then we have no HCI
8030 * communication to make. Simply return.
8032 if (skb_queue_empty(&req
.cmd_q
) ||
8033 !hdev_is_powered(hdev
) ||
8034 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
8035 hci_req_purge(&req
);
8036 rp
.instance
= cp
->instance
;
8037 err
= mgmt_cmd_complete(sk
, hdev
->id
,
8038 MGMT_OP_REMOVE_ADVERTISING
,
8039 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
8043 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
8050 err
= hci_req_run(&req
, remove_advertising_complete
);
8052 mgmt_pending_remove(cmd
);
8055 hci_dev_unlock(hdev
);
8060 static int get_adv_size_info(struct sock
*sk
, struct hci_dev
*hdev
,
8061 void *data
, u16 data_len
)
8063 struct mgmt_cp_get_adv_size_info
*cp
= data
;
8064 struct mgmt_rp_get_adv_size_info rp
;
8065 u32 flags
, supported_flags
;
8068 bt_dev_dbg(hdev
, "sock %p", sk
);
8070 if (!lmp_le_capable(hdev
))
8071 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
8072 MGMT_STATUS_REJECTED
);
8074 if (cp
->instance
< 1 || cp
->instance
> hdev
->le_num_of_adv_sets
)
8075 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
8076 MGMT_STATUS_INVALID_PARAMS
);
8078 flags
= __le32_to_cpu(cp
->flags
);
8080 /* The current implementation only supports a subset of the specified
8083 supported_flags
= get_supported_adv_flags(hdev
);
8084 if (flags
& ~supported_flags
)
8085 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
8086 MGMT_STATUS_INVALID_PARAMS
);
8088 rp
.instance
= cp
->instance
;
8089 rp
.flags
= cp
->flags
;
8090 rp
.max_adv_data_len
= tlv_data_max_len(hdev
, flags
, true);
8091 rp
.max_scan_rsp_len
= tlv_data_max_len(hdev
, flags
, false);
8093 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_ADV_SIZE_INFO
,
8094 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
8099 static const struct hci_mgmt_handler mgmt_handlers
[] = {
8100 { NULL
}, /* 0x0000 (no command) */
8101 { read_version
, MGMT_READ_VERSION_SIZE
,
8103 HCI_MGMT_UNTRUSTED
},
8104 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
8106 HCI_MGMT_UNTRUSTED
},
8107 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
8109 HCI_MGMT_UNTRUSTED
},
8110 { read_controller_info
, MGMT_READ_INFO_SIZE
,
8111 HCI_MGMT_UNTRUSTED
},
8112 { set_powered
, MGMT_SETTING_SIZE
},
8113 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
8114 { set_connectable
, MGMT_SETTING_SIZE
},
8115 { set_fast_connectable
, MGMT_SETTING_SIZE
},
8116 { set_bondable
, MGMT_SETTING_SIZE
},
8117 { set_link_security
, MGMT_SETTING_SIZE
},
8118 { set_ssp
, MGMT_SETTING_SIZE
},
8119 { set_hs
, MGMT_SETTING_SIZE
},
8120 { set_le
, MGMT_SETTING_SIZE
},
8121 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
8122 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
8123 { add_uuid
, MGMT_ADD_UUID_SIZE
},
8124 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
8125 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
8127 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
8129 { disconnect
, MGMT_DISCONNECT_SIZE
},
8130 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
8131 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
8132 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
8133 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
8134 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
8135 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
8136 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
8137 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
8138 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
8139 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
8140 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
8141 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
8142 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
8144 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
8145 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
8146 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
8147 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
8148 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
8149 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
8150 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
8151 { set_advertising
, MGMT_SETTING_SIZE
},
8152 { set_bredr
, MGMT_SETTING_SIZE
},
8153 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
8154 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
8155 { set_secure_conn
, MGMT_SETTING_SIZE
},
8156 { set_debug_keys
, MGMT_SETTING_SIZE
},
8157 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
8158 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
8160 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
8161 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
8162 { add_device
, MGMT_ADD_DEVICE_SIZE
},
8163 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
8164 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
8166 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
8168 HCI_MGMT_UNTRUSTED
},
8169 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
8170 HCI_MGMT_UNCONFIGURED
|
8171 HCI_MGMT_UNTRUSTED
},
8172 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
8173 HCI_MGMT_UNCONFIGURED
},
8174 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
8175 HCI_MGMT_UNCONFIGURED
},
8176 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
8178 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
8179 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
8181 HCI_MGMT_UNTRUSTED
},
8182 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
8183 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
8185 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
8186 { get_adv_size_info
, MGMT_GET_ADV_SIZE_INFO_SIZE
},
8187 { start_limited_discovery
, MGMT_START_DISCOVERY_SIZE
},
8188 { read_ext_controller_info
,MGMT_READ_EXT_INFO_SIZE
,
8189 HCI_MGMT_UNTRUSTED
},
8190 { set_appearance
, MGMT_SET_APPEARANCE_SIZE
},
8191 { get_phy_configuration
, MGMT_GET_PHY_CONFIGURATION_SIZE
},
8192 { set_phy_configuration
, MGMT_SET_PHY_CONFIGURATION_SIZE
},
8193 { set_blocked_keys
, MGMT_OP_SET_BLOCKED_KEYS_SIZE
,
8195 { set_wideband_speech
, MGMT_SETTING_SIZE
},
8196 { read_security_info
, MGMT_READ_SECURITY_INFO_SIZE
,
8197 HCI_MGMT_UNTRUSTED
},
8198 { read_exp_features_info
, MGMT_READ_EXP_FEATURES_INFO_SIZE
,
8199 HCI_MGMT_UNTRUSTED
|
8200 HCI_MGMT_HDEV_OPTIONAL
},
8201 { set_exp_feature
, MGMT_SET_EXP_FEATURE_SIZE
,
8203 HCI_MGMT_HDEV_OPTIONAL
},
8204 { read_def_system_config
, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE
,
8205 HCI_MGMT_UNTRUSTED
},
8206 { set_def_system_config
, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE
,
8208 { read_def_runtime_config
, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE
,
8209 HCI_MGMT_UNTRUSTED
},
8210 { set_def_runtime_config
, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE
,
8212 { get_device_flags
, MGMT_GET_DEVICE_FLAGS_SIZE
},
8213 { set_device_flags
, MGMT_SET_DEVICE_FLAGS_SIZE
},
8214 { read_adv_mon_features
, MGMT_READ_ADV_MONITOR_FEATURES_SIZE
},
8215 { add_adv_patterns_monitor
,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE
,
8217 { remove_adv_monitor
, MGMT_REMOVE_ADV_MONITOR_SIZE
},
8218 { add_ext_adv_params
, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE
,
8220 { add_ext_adv_data
, MGMT_ADD_EXT_ADV_DATA_SIZE
,
8224 void mgmt_index_added(struct hci_dev
*hdev
)
8226 struct mgmt_ev_ext_index ev
;
8228 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
8231 switch (hdev
->dev_type
) {
8233 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
8234 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
8235 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
8238 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
8239 HCI_MGMT_INDEX_EVENTS
);
8252 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
8253 HCI_MGMT_EXT_INDEX_EVENTS
);
8256 void mgmt_index_removed(struct hci_dev
*hdev
)
8258 struct mgmt_ev_ext_index ev
;
8259 u8 status
= MGMT_STATUS_INVALID_INDEX
;
8261 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
8264 switch (hdev
->dev_type
) {
8266 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
8268 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
8269 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
8270 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
8273 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
8274 HCI_MGMT_INDEX_EVENTS
);
8287 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
8288 HCI_MGMT_EXT_INDEX_EVENTS
);
8291 /* This function requires the caller holds hdev->lock */
8292 static void restart_le_actions(struct hci_dev
*hdev
)
8294 struct hci_conn_params
*p
;
8296 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
8297 /* Needed for AUTO_OFF case where might not "really"
8298 * have been powered off.
8300 list_del_init(&p
->action
);
8302 switch (p
->auto_connect
) {
8303 case HCI_AUTO_CONN_DIRECT
:
8304 case HCI_AUTO_CONN_ALWAYS
:
8305 list_add(&p
->action
, &hdev
->pend_le_conns
);
8307 case HCI_AUTO_CONN_REPORT
:
8308 list_add(&p
->action
, &hdev
->pend_le_reports
);
8316 void mgmt_power_on(struct hci_dev
*hdev
, int err
)
8318 struct cmd_lookup match
= { NULL
, hdev
};
8320 bt_dev_dbg(hdev
, "err %d", err
);
8325 restart_le_actions(hdev
);
8326 hci_update_background_scan(hdev
);
8329 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
8331 new_settings(hdev
, match
.sk
);
8336 hci_dev_unlock(hdev
);
8339 void __mgmt_power_off(struct hci_dev
*hdev
)
8341 struct cmd_lookup match
= { NULL
, hdev
};
8342 u8 status
, zero_cod
[] = { 0, 0, 0 };
8344 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
8346 /* If the power off is because of hdev unregistration let
8347 * use the appropriate INVALID_INDEX status. Otherwise use
8348 * NOT_POWERED. We cover both scenarios here since later in
8349 * mgmt_index_removed() any hci_conn callbacks will have already
8350 * been triggered, potentially causing misleading DISCONNECTED
8353 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
8354 status
= MGMT_STATUS_INVALID_INDEX
;
8356 status
= MGMT_STATUS_NOT_POWERED
;
8358 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
8360 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0) {
8361 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
8362 zero_cod
, sizeof(zero_cod
),
8363 HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
8364 ext_info_changed(hdev
, NULL
);
8367 new_settings(hdev
, match
.sk
);
8373 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
8375 struct mgmt_pending_cmd
*cmd
;
8378 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
8382 if (err
== -ERFKILL
)
8383 status
= MGMT_STATUS_RFKILLED
;
8385 status
= MGMT_STATUS_FAILED
;
8387 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
8389 mgmt_pending_remove(cmd
);
8392 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
8395 struct mgmt_ev_new_link_key ev
;
8397 memset(&ev
, 0, sizeof(ev
));
8399 ev
.store_hint
= persistent
;
8400 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
8401 ev
.key
.addr
.type
= BDADDR_BREDR
;
8402 ev
.key
.type
= key
->type
;
8403 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
8404 ev
.key
.pin_len
= key
->pin_len
;
8406 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
8409 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
8411 switch (ltk
->type
) {
8414 if (ltk
->authenticated
)
8415 return MGMT_LTK_AUTHENTICATED
;
8416 return MGMT_LTK_UNAUTHENTICATED
;
8418 if (ltk
->authenticated
)
8419 return MGMT_LTK_P256_AUTH
;
8420 return MGMT_LTK_P256_UNAUTH
;
8421 case SMP_LTK_P256_DEBUG
:
8422 return MGMT_LTK_P256_DEBUG
;
8425 return MGMT_LTK_UNAUTHENTICATED
;
8428 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
8430 struct mgmt_ev_new_long_term_key ev
;
8432 memset(&ev
, 0, sizeof(ev
));
8434 /* Devices using resolvable or non-resolvable random addresses
8435 * without providing an identity resolving key don't require
8436 * to store long term keys. Their addresses will change the
8439 * Only when a remote device provides an identity address
8440 * make sure the long term key is stored. If the remote
8441 * identity is known, the long term keys are internally
8442 * mapped to the identity address. So allow static random
8443 * and public addresses here.
8445 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
8446 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
8447 ev
.store_hint
= 0x00;
8449 ev
.store_hint
= persistent
;
8451 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
8452 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
8453 ev
.key
.type
= mgmt_ltk_type(key
);
8454 ev
.key
.enc_size
= key
->enc_size
;
8455 ev
.key
.ediv
= key
->ediv
;
8456 ev
.key
.rand
= key
->rand
;
8458 if (key
->type
== SMP_LTK
)
8461 /* Make sure we copy only the significant bytes based on the
8462 * encryption key size, and set the rest of the value to zeroes.
8464 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
8465 memset(ev
.key
.val
+ key
->enc_size
, 0,
8466 sizeof(ev
.key
.val
) - key
->enc_size
);
8468 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
8471 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
, bool persistent
)
8473 struct mgmt_ev_new_irk ev
;
8475 memset(&ev
, 0, sizeof(ev
));
8477 ev
.store_hint
= persistent
;
8479 bacpy(&ev
.rpa
, &irk
->rpa
);
8480 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
8481 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
8482 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
8484 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
8487 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
8490 struct mgmt_ev_new_csrk ev
;
8492 memset(&ev
, 0, sizeof(ev
));
8494 /* Devices using resolvable or non-resolvable random addresses
8495 * without providing an identity resolving key don't require
8496 * to store signature resolving keys. Their addresses will change
8497 * the next time around.
8499 * Only when a remote device provides an identity address
8500 * make sure the signature resolving key is stored. So allow
8501 * static random and public addresses here.
8503 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
8504 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
8505 ev
.store_hint
= 0x00;
8507 ev
.store_hint
= persistent
;
8509 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
8510 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
8511 ev
.key
.type
= csrk
->type
;
8512 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
8514 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
8517 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8518 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
8519 u16 max_interval
, u16 latency
, u16 timeout
)
8521 struct mgmt_ev_new_conn_param ev
;
8523 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
8526 memset(&ev
, 0, sizeof(ev
));
8527 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8528 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
8529 ev
.store_hint
= store_hint
;
8530 ev
.min_interval
= cpu_to_le16(min_interval
);
8531 ev
.max_interval
= cpu_to_le16(max_interval
);
8532 ev
.latency
= cpu_to_le16(latency
);
8533 ev
.timeout
= cpu_to_le16(timeout
);
8535 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
8538 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
8539 u32 flags
, u8
*name
, u8 name_len
)
8542 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
8545 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
8546 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
8548 ev
->flags
= __cpu_to_le32(flags
);
8550 /* We must ensure that the EIR Data fields are ordered and
8551 * unique. Keep it simple for now and avoid the problem by not
8552 * adding any BR/EDR data to the LE adv.
8554 if (conn
->le_adv_data_len
> 0) {
8555 memcpy(&ev
->eir
[eir_len
],
8556 conn
->le_adv_data
, conn
->le_adv_data_len
);
8557 eir_len
= conn
->le_adv_data_len
;
8560 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
8563 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
8564 eir_len
= eir_append_data(ev
->eir
, eir_len
,
8566 conn
->dev_class
, 3);
8569 ev
->eir_len
= cpu_to_le16(eir_len
);
8571 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
8572 sizeof(*ev
) + eir_len
, NULL
);
8575 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
8577 struct sock
**sk
= data
;
8579 cmd
->cmd_complete(cmd
, 0);
8584 mgmt_pending_remove(cmd
);
8587 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
8589 struct hci_dev
*hdev
= data
;
8590 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
8592 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
8594 cmd
->cmd_complete(cmd
, 0);
8595 mgmt_pending_remove(cmd
);
8598 bool mgmt_powering_down(struct hci_dev
*hdev
)
8600 struct mgmt_pending_cmd
*cmd
;
8601 struct mgmt_mode
*cp
;
8603 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
8614 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8615 u8 link_type
, u8 addr_type
, u8 reason
,
8616 bool mgmt_connected
)
8618 struct mgmt_ev_device_disconnected ev
;
8619 struct sock
*sk
= NULL
;
8621 /* The connection is still in hci_conn_hash so test for 1
8622 * instead of 0 to know if this is the last one.
8624 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
8625 cancel_delayed_work(&hdev
->power_off
);
8626 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
8629 if (!mgmt_connected
)
8632 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
8635 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
8637 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8638 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8641 /* Report disconnects due to suspend */
8642 if (hdev
->suspended
)
8643 ev
.reason
= MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND
;
8645 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
8650 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
8654 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8655 u8 link_type
, u8 addr_type
, u8 status
)
8657 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
8658 struct mgmt_cp_disconnect
*cp
;
8659 struct mgmt_pending_cmd
*cmd
;
8661 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
8664 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
8670 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
8673 if (cp
->addr
.type
!= bdaddr_type
)
8676 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8677 mgmt_pending_remove(cmd
);
8680 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8681 u8 addr_type
, u8 status
)
8683 struct mgmt_ev_connect_failed ev
;
8685 /* The connection is still in hci_conn_hash so test for 1
8686 * instead of 0 to know if this is the last one.
8688 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
8689 cancel_delayed_work(&hdev
->power_off
);
8690 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
8693 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8694 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8695 ev
.status
= mgmt_status(status
);
8697 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
8700 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
8702 struct mgmt_ev_pin_code_request ev
;
8704 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8705 ev
.addr
.type
= BDADDR_BREDR
;
8708 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
8711 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8714 struct mgmt_pending_cmd
*cmd
;
8716 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
8720 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8721 mgmt_pending_remove(cmd
);
8724 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8727 struct mgmt_pending_cmd
*cmd
;
8729 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
8733 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8734 mgmt_pending_remove(cmd
);
8737 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8738 u8 link_type
, u8 addr_type
, u32 value
,
8741 struct mgmt_ev_user_confirm_request ev
;
8743 bt_dev_dbg(hdev
, "bdaddr %pMR", bdaddr
);
8745 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8746 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8747 ev
.confirm_hint
= confirm_hint
;
8748 ev
.value
= cpu_to_le32(value
);
8750 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
8754 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8755 u8 link_type
, u8 addr_type
)
8757 struct mgmt_ev_user_passkey_request ev
;
8759 bt_dev_dbg(hdev
, "bdaddr %pMR", bdaddr
);
8761 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8762 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8764 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
8768 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8769 u8 link_type
, u8 addr_type
, u8 status
,
8772 struct mgmt_pending_cmd
*cmd
;
8774 cmd
= pending_find(opcode
, hdev
);
8778 cmd
->cmd_complete(cmd
, mgmt_status(status
));
8779 mgmt_pending_remove(cmd
);
8784 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8785 u8 link_type
, u8 addr_type
, u8 status
)
8787 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8788 status
, MGMT_OP_USER_CONFIRM_REPLY
);
8791 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8792 u8 link_type
, u8 addr_type
, u8 status
)
8794 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8796 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
8799 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8800 u8 link_type
, u8 addr_type
, u8 status
)
8802 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8803 status
, MGMT_OP_USER_PASSKEY_REPLY
);
8806 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8807 u8 link_type
, u8 addr_type
, u8 status
)
8809 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
8811 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
8814 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
8815 u8 link_type
, u8 addr_type
, u32 passkey
,
8818 struct mgmt_ev_passkey_notify ev
;
8820 bt_dev_dbg(hdev
, "bdaddr %pMR", bdaddr
);
8822 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
8823 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8824 ev
.passkey
= __cpu_to_le32(passkey
);
8825 ev
.entered
= entered
;
8827 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
8830 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
8832 struct mgmt_ev_auth_failed ev
;
8833 struct mgmt_pending_cmd
*cmd
;
8834 u8 status
= mgmt_status(hci_status
);
8836 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
8837 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
8840 cmd
= find_pairing(conn
);
8842 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
8843 cmd
? cmd
->sk
: NULL
);
8846 cmd
->cmd_complete(cmd
, status
);
8847 mgmt_pending_remove(cmd
);
8851 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
8853 struct cmd_lookup match
= { NULL
, hdev
};
8857 u8 mgmt_err
= mgmt_status(status
);
8858 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
8859 cmd_status_rsp
, &mgmt_err
);
8863 if (test_bit(HCI_AUTH
, &hdev
->flags
))
8864 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
8866 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
8868 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
8872 new_settings(hdev
, match
.sk
);
8878 static void clear_eir(struct hci_request
*req
)
8880 struct hci_dev
*hdev
= req
->hdev
;
8881 struct hci_cp_write_eir cp
;
8883 if (!lmp_ext_inq_capable(hdev
))
8886 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
8888 memset(&cp
, 0, sizeof(cp
));
8890 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
8893 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
8895 struct cmd_lookup match
= { NULL
, hdev
};
8896 struct hci_request req
;
8897 bool changed
= false;
8900 u8 mgmt_err
= mgmt_status(status
);
8902 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
8904 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8905 new_settings(hdev
, NULL
);
8908 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
8914 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
8916 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
8918 changed
= hci_dev_test_and_clear_flag(hdev
,
8921 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
8924 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
8927 new_settings(hdev
, match
.sk
);
8932 hci_req_init(&req
, hdev
);
8934 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
8935 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
8936 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
8937 sizeof(enable
), &enable
);
8938 __hci_req_update_eir(&req
);
8943 hci_req_run(&req
, NULL
);
8946 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
8948 struct cmd_lookup
*match
= data
;
8950 if (match
->sk
== NULL
) {
8951 match
->sk
= cmd
->sk
;
8952 sock_hold(match
->sk
);
8956 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
8959 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
8961 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
8962 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
8963 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
8966 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
,
8967 3, HCI_MGMT_DEV_CLASS_EVENTS
, NULL
);
8968 ext_info_changed(hdev
, NULL
);
8975 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
8977 struct mgmt_cp_set_local_name ev
;
8978 struct mgmt_pending_cmd
*cmd
;
8983 memset(&ev
, 0, sizeof(ev
));
8984 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
8985 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
8987 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
8989 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
8991 /* If this is a HCI command related to powering on the
8992 * HCI dev don't send any mgmt signals.
8994 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
8998 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
8999 HCI_MGMT_LOCAL_NAME_EVENTS
, cmd
? cmd
->sk
: NULL
);
9000 ext_info_changed(hdev
, cmd
? cmd
->sk
: NULL
);
9003 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
9007 for (i
= 0; i
< uuid_count
; i
++) {
9008 if (!memcmp(uuid
, uuids
[i
], 16))
9015 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
9019 while (parsed
< eir_len
) {
9020 u8 field_len
= eir
[0];
9027 if (eir_len
- parsed
< field_len
+ 1)
9031 case EIR_UUID16_ALL
:
9032 case EIR_UUID16_SOME
:
9033 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
9034 memcpy(uuid
, bluetooth_base_uuid
, 16);
9035 uuid
[13] = eir
[i
+ 3];
9036 uuid
[12] = eir
[i
+ 2];
9037 if (has_uuid(uuid
, uuid_count
, uuids
))
9041 case EIR_UUID32_ALL
:
9042 case EIR_UUID32_SOME
:
9043 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
9044 memcpy(uuid
, bluetooth_base_uuid
, 16);
9045 uuid
[15] = eir
[i
+ 5];
9046 uuid
[14] = eir
[i
+ 4];
9047 uuid
[13] = eir
[i
+ 3];
9048 uuid
[12] = eir
[i
+ 2];
9049 if (has_uuid(uuid
, uuid_count
, uuids
))
9053 case EIR_UUID128_ALL
:
9054 case EIR_UUID128_SOME
:
9055 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
9056 memcpy(uuid
, eir
+ i
+ 2, 16);
9057 if (has_uuid(uuid
, uuid_count
, uuids
))
9063 parsed
+= field_len
+ 1;
9064 eir
+= field_len
+ 1;
9070 static void restart_le_scan(struct hci_dev
*hdev
)
9072 /* If controller is not scanning we are done. */
9073 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
9076 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
9077 hdev
->discovery
.scan_start
+
9078 hdev
->discovery
.scan_duration
))
9081 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_restart
,
9082 DISCOV_LE_RESTART_DELAY
);
9085 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
9086 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
9088 /* If a RSSI threshold has been specified, and
9089 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9090 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9091 * is set, let it through for further processing, as we might need to
9094 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9095 * the results are also dropped.
9097 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
9098 (rssi
== HCI_RSSI_INVALID
||
9099 (rssi
< hdev
->discovery
.rssi
&&
9100 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
9103 if (hdev
->discovery
.uuid_count
!= 0) {
9104 /* If a list of UUIDs is provided in filter, results with no
9105 * matching UUID should be dropped.
9107 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
9108 hdev
->discovery
.uuids
) &&
9109 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
9110 hdev
->discovery
.uuid_count
,
9111 hdev
->discovery
.uuids
))
9115 /* If duplicate filtering does not report RSSI changes, then restart
9116 * scanning to ensure updated result with updated RSSI values.
9118 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
9119 restart_le_scan(hdev
);
9121 /* Validate RSSI value against the RSSI threshold once more. */
9122 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
9123 rssi
< hdev
->discovery
.rssi
)
9130 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
9131 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
9132 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
9135 struct mgmt_ev_device_found
*ev
= (void *)buf
;
9138 /* Don't send events for a non-kernel initiated discovery. With
9139 * LE one exception is if we have pend_le_reports > 0 in which
9140 * case we're doing passive scanning and want these events.
9142 if (!hci_discovery_active(hdev
)) {
9143 if (link_type
== ACL_LINK
)
9145 if (link_type
== LE_LINK
&&
9146 list_empty(&hdev
->pend_le_reports
) &&
9147 !hci_is_adv_monitoring(hdev
)) {
9152 if (hdev
->discovery
.result_filtering
) {
9153 /* We are using service discovery */
9154 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
9159 if (hdev
->discovery
.limited
) {
9160 /* Check for limited discoverable bit */
9162 if (!(dev_class
[1] & 0x20))
9165 u8
*flags
= eir_get_data(eir
, eir_len
, EIR_FLAGS
, NULL
);
9166 if (!flags
|| !(flags
[0] & LE_AD_LIMITED
))
9171 /* Make sure that the buffer is big enough. The 5 extra bytes
9172 * are for the potential CoD field.
9174 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
9177 memset(buf
, 0, sizeof(buf
));
9179 /* In case of device discovery with BR/EDR devices (pre 1.2), the
9180 * RSSI value was reported as 0 when not available. This behavior
9181 * is kept when using device discovery. This is required for full
9182 * backwards compatibility with the API.
9184 * However when using service discovery, the value 127 will be
9185 * returned when the RSSI is not available.
9187 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
9188 link_type
== ACL_LINK
)
9191 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
9192 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
9194 ev
->flags
= cpu_to_le32(flags
);
9197 /* Copy EIR or advertising data into event */
9198 memcpy(ev
->eir
, eir
, eir_len
);
9200 if (dev_class
&& !eir_get_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
9202 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
9205 if (scan_rsp_len
> 0)
9206 /* Append scan response data to event */
9207 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
9209 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
9210 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
9212 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
9215 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
9216 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
9218 struct mgmt_ev_device_found
*ev
;
9219 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
9222 ev
= (struct mgmt_ev_device_found
*) buf
;
9224 memset(buf
, 0, sizeof(buf
));
9226 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
9227 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
9230 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
9233 ev
->eir_len
= cpu_to_le16(eir_len
);
9235 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
9238 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
9240 struct mgmt_ev_discovering ev
;
9242 bt_dev_dbg(hdev
, "discovering %u", discovering
);
9244 memset(&ev
, 0, sizeof(ev
));
9245 ev
.type
= hdev
->discovery
.type
;
9246 ev
.discovering
= discovering
;
9248 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
9251 void mgmt_suspending(struct hci_dev
*hdev
, u8 state
)
9253 struct mgmt_ev_controller_suspend ev
;
9255 ev
.suspend_state
= state
;
9256 mgmt_event(MGMT_EV_CONTROLLER_SUSPEND
, hdev
, &ev
, sizeof(ev
), NULL
);
9259 void mgmt_resuming(struct hci_dev
*hdev
, u8 reason
, bdaddr_t
*bdaddr
,
9262 struct mgmt_ev_controller_resume ev
;
9264 ev
.wake_reason
= reason
;
9266 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
9267 ev
.addr
.type
= addr_type
;
9269 memset(&ev
.addr
, 0, sizeof(ev
.addr
));
9272 mgmt_event(MGMT_EV_CONTROLLER_RESUME
, hdev
, &ev
, sizeof(ev
), NULL
);
9275 static struct hci_mgmt_chan chan
= {
9276 .channel
= HCI_CHANNEL_CONTROL
,
9277 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
9278 .handlers
= mgmt_handlers
,
9279 .hdev_init
= mgmt_init_hdev
,
9284 return hci_mgmt_chan_register(&chan
);
9287 void mgmt_exit(void)
9289 hci_mgmt_chan_unregister(&chan
);