2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 10
43 static const u16 mgmt_commands
[] = {
44 MGMT_OP_READ_INDEX_LIST
,
47 MGMT_OP_SET_DISCOVERABLE
,
48 MGMT_OP_SET_CONNECTABLE
,
49 MGMT_OP_SET_FAST_CONNECTABLE
,
51 MGMT_OP_SET_LINK_SECURITY
,
55 MGMT_OP_SET_DEV_CLASS
,
56 MGMT_OP_SET_LOCAL_NAME
,
59 MGMT_OP_LOAD_LINK_KEYS
,
60 MGMT_OP_LOAD_LONG_TERM_KEYS
,
62 MGMT_OP_GET_CONNECTIONS
,
63 MGMT_OP_PIN_CODE_REPLY
,
64 MGMT_OP_PIN_CODE_NEG_REPLY
,
65 MGMT_OP_SET_IO_CAPABILITY
,
67 MGMT_OP_CANCEL_PAIR_DEVICE
,
68 MGMT_OP_UNPAIR_DEVICE
,
69 MGMT_OP_USER_CONFIRM_REPLY
,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
71 MGMT_OP_USER_PASSKEY_REPLY
,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
73 MGMT_OP_READ_LOCAL_OOB_DATA
,
74 MGMT_OP_ADD_REMOTE_OOB_DATA
,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
76 MGMT_OP_START_DISCOVERY
,
77 MGMT_OP_STOP_DISCOVERY
,
80 MGMT_OP_UNBLOCK_DEVICE
,
81 MGMT_OP_SET_DEVICE_ID
,
82 MGMT_OP_SET_ADVERTISING
,
84 MGMT_OP_SET_STATIC_ADDRESS
,
85 MGMT_OP_SET_SCAN_PARAMS
,
86 MGMT_OP_SET_SECURE_CONN
,
87 MGMT_OP_SET_DEBUG_KEYS
,
90 MGMT_OP_GET_CONN_INFO
,
91 MGMT_OP_GET_CLOCK_INFO
,
93 MGMT_OP_REMOVE_DEVICE
,
94 MGMT_OP_LOAD_CONN_PARAM
,
95 MGMT_OP_READ_UNCONF_INDEX_LIST
,
96 MGMT_OP_READ_CONFIG_INFO
,
97 MGMT_OP_SET_EXTERNAL_CONFIG
,
98 MGMT_OP_SET_PUBLIC_ADDRESS
,
99 MGMT_OP_START_SERVICE_DISCOVERY
,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
101 MGMT_OP_READ_EXT_INDEX_LIST
,
102 MGMT_OP_READ_ADV_FEATURES
,
103 MGMT_OP_ADD_ADVERTISING
,
104 MGMT_OP_REMOVE_ADVERTISING
,
107 static const u16 mgmt_events
[] = {
108 MGMT_EV_CONTROLLER_ERROR
,
110 MGMT_EV_INDEX_REMOVED
,
111 MGMT_EV_NEW_SETTINGS
,
112 MGMT_EV_CLASS_OF_DEV_CHANGED
,
113 MGMT_EV_LOCAL_NAME_CHANGED
,
114 MGMT_EV_NEW_LINK_KEY
,
115 MGMT_EV_NEW_LONG_TERM_KEY
,
116 MGMT_EV_DEVICE_CONNECTED
,
117 MGMT_EV_DEVICE_DISCONNECTED
,
118 MGMT_EV_CONNECT_FAILED
,
119 MGMT_EV_PIN_CODE_REQUEST
,
120 MGMT_EV_USER_CONFIRM_REQUEST
,
121 MGMT_EV_USER_PASSKEY_REQUEST
,
123 MGMT_EV_DEVICE_FOUND
,
125 MGMT_EV_DEVICE_BLOCKED
,
126 MGMT_EV_DEVICE_UNBLOCKED
,
127 MGMT_EV_DEVICE_UNPAIRED
,
128 MGMT_EV_PASSKEY_NOTIFY
,
131 MGMT_EV_DEVICE_ADDED
,
132 MGMT_EV_DEVICE_REMOVED
,
133 MGMT_EV_NEW_CONN_PARAM
,
134 MGMT_EV_UNCONF_INDEX_ADDED
,
135 MGMT_EV_UNCONF_INDEX_REMOVED
,
136 MGMT_EV_NEW_CONFIG_OPTIONS
,
137 MGMT_EV_EXT_INDEX_ADDED
,
138 MGMT_EV_EXT_INDEX_REMOVED
,
139 MGMT_EV_LOCAL_OOB_DATA_UPDATED
,
140 MGMT_EV_ADVERTISING_ADDED
,
141 MGMT_EV_ADVERTISING_REMOVED
,
144 static const u16 mgmt_untrusted_commands
[] = {
145 MGMT_OP_READ_INDEX_LIST
,
147 MGMT_OP_READ_UNCONF_INDEX_LIST
,
148 MGMT_OP_READ_CONFIG_INFO
,
149 MGMT_OP_READ_EXT_INDEX_LIST
,
152 static const u16 mgmt_untrusted_events
[] = {
154 MGMT_EV_INDEX_REMOVED
,
155 MGMT_EV_NEW_SETTINGS
,
156 MGMT_EV_CLASS_OF_DEV_CHANGED
,
157 MGMT_EV_LOCAL_NAME_CHANGED
,
158 MGMT_EV_UNCONF_INDEX_ADDED
,
159 MGMT_EV_UNCONF_INDEX_REMOVED
,
160 MGMT_EV_NEW_CONFIG_OPTIONS
,
161 MGMT_EV_EXT_INDEX_ADDED
,
162 MGMT_EV_EXT_INDEX_REMOVED
,
165 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
167 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
168 "\x00\x00\x00\x00\x00\x00\x00\x00"
170 /* HCI to MGMT error code conversion table */
171 static u8 mgmt_status_table
[] = {
173 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
174 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
175 MGMT_STATUS_FAILED
, /* Hardware Failure */
176 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
177 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
178 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
179 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
180 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
181 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
182 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
183 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
184 MGMT_STATUS_BUSY
, /* Command Disallowed */
185 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
186 MGMT_STATUS_REJECTED
, /* Rejected Security */
187 MGMT_STATUS_REJECTED
, /* Rejected Personal */
188 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
189 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
190 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
191 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
192 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
193 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
194 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
195 MGMT_STATUS_BUSY
, /* Repeated Attempts */
196 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
197 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
198 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
199 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
200 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
201 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
202 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
203 MGMT_STATUS_FAILED
, /* Unspecified Error */
204 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
205 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
206 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
207 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
208 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
209 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
210 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
211 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
212 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
213 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
214 MGMT_STATUS_FAILED
, /* Transaction Collision */
215 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
216 MGMT_STATUS_REJECTED
, /* QoS Rejected */
217 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
218 MGMT_STATUS_REJECTED
, /* Insufficient Security */
219 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
220 MGMT_STATUS_BUSY
, /* Role Switch Pending */
221 MGMT_STATUS_FAILED
, /* Slot Violation */
222 MGMT_STATUS_FAILED
, /* Role Switch Failed */
223 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
224 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
225 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
226 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
227 MGMT_STATUS_BUSY
, /* Controller Busy */
228 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
229 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
230 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
231 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
232 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
235 static u8
mgmt_status(u8 hci_status
)
237 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
238 return mgmt_status_table
[hci_status
];
240 return MGMT_STATUS_FAILED
;
243 static int mgmt_index_event(u16 event
, struct hci_dev
*hdev
, void *data
,
246 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
250 static int mgmt_limited_event(u16 event
, struct hci_dev
*hdev
, void *data
,
251 u16 len
, int flag
, struct sock
*skip_sk
)
253 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
257 static int mgmt_generic_event(u16 event
, struct hci_dev
*hdev
, void *data
,
258 u16 len
, struct sock
*skip_sk
)
260 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
261 HCI_MGMT_GENERIC_EVENTS
, skip_sk
);
264 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 len
,
265 struct sock
*skip_sk
)
267 return mgmt_send_event(event
, hdev
, HCI_CHANNEL_CONTROL
, data
, len
,
268 HCI_SOCK_TRUSTED
, skip_sk
);
271 static u8
le_addr_type(u8 mgmt_addr_type
)
273 if (mgmt_addr_type
== BDADDR_LE_PUBLIC
)
274 return ADDR_LE_DEV_PUBLIC
;
276 return ADDR_LE_DEV_RANDOM
;
279 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
282 struct mgmt_rp_read_version rp
;
284 BT_DBG("sock %p", sk
);
286 rp
.version
= MGMT_VERSION
;
287 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
289 return mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0,
293 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
296 struct mgmt_rp_read_commands
*rp
;
297 u16 num_commands
, num_events
;
301 BT_DBG("sock %p", sk
);
303 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
304 num_commands
= ARRAY_SIZE(mgmt_commands
);
305 num_events
= ARRAY_SIZE(mgmt_events
);
307 num_commands
= ARRAY_SIZE(mgmt_untrusted_commands
);
308 num_events
= ARRAY_SIZE(mgmt_untrusted_events
);
311 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
313 rp
= kmalloc(rp_size
, GFP_KERNEL
);
317 rp
->num_commands
= cpu_to_le16(num_commands
);
318 rp
->num_events
= cpu_to_le16(num_events
);
320 if (hci_sock_test_flag(sk
, HCI_SOCK_TRUSTED
)) {
321 __le16
*opcode
= rp
->opcodes
;
323 for (i
= 0; i
< num_commands
; i
++, opcode
++)
324 put_unaligned_le16(mgmt_commands
[i
], opcode
);
326 for (i
= 0; i
< num_events
; i
++, opcode
++)
327 put_unaligned_le16(mgmt_events
[i
], opcode
);
329 __le16
*opcode
= rp
->opcodes
;
331 for (i
= 0; i
< num_commands
; i
++, opcode
++)
332 put_unaligned_le16(mgmt_untrusted_commands
[i
], opcode
);
334 for (i
= 0; i
< num_events
; i
++, opcode
++)
335 put_unaligned_le16(mgmt_untrusted_events
[i
], opcode
);
338 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0,
345 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
348 struct mgmt_rp_read_index_list
*rp
;
354 BT_DBG("sock %p", sk
);
356 read_lock(&hci_dev_list_lock
);
359 list_for_each_entry(d
, &hci_dev_list
, list
) {
360 if (d
->dev_type
== HCI_BREDR
&&
361 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
365 rp_len
= sizeof(*rp
) + (2 * count
);
366 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
368 read_unlock(&hci_dev_list_lock
);
373 list_for_each_entry(d
, &hci_dev_list
, list
) {
374 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
375 hci_dev_test_flag(d
, HCI_CONFIG
) ||
376 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
379 /* Devices marked as raw-only are neither configured
380 * nor unconfigured controllers.
382 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
385 if (d
->dev_type
== HCI_BREDR
&&
386 !hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
387 rp
->index
[count
++] = cpu_to_le16(d
->id
);
388 BT_DBG("Added hci%u", d
->id
);
392 rp
->num_controllers
= cpu_to_le16(count
);
393 rp_len
= sizeof(*rp
) + (2 * count
);
395 read_unlock(&hci_dev_list_lock
);
397 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
,
405 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
406 void *data
, u16 data_len
)
408 struct mgmt_rp_read_unconf_index_list
*rp
;
414 BT_DBG("sock %p", sk
);
416 read_lock(&hci_dev_list_lock
);
419 list_for_each_entry(d
, &hci_dev_list
, list
) {
420 if (d
->dev_type
== HCI_BREDR
&&
421 hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
425 rp_len
= sizeof(*rp
) + (2 * count
);
426 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
428 read_unlock(&hci_dev_list_lock
);
433 list_for_each_entry(d
, &hci_dev_list
, list
) {
434 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
435 hci_dev_test_flag(d
, HCI_CONFIG
) ||
436 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
439 /* Devices marked as raw-only are neither configured
440 * nor unconfigured controllers.
442 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
445 if (d
->dev_type
== HCI_BREDR
&&
446 hci_dev_test_flag(d
, HCI_UNCONFIGURED
)) {
447 rp
->index
[count
++] = cpu_to_le16(d
->id
);
448 BT_DBG("Added hci%u", d
->id
);
452 rp
->num_controllers
= cpu_to_le16(count
);
453 rp_len
= sizeof(*rp
) + (2 * count
);
455 read_unlock(&hci_dev_list_lock
);
457 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
458 MGMT_OP_READ_UNCONF_INDEX_LIST
, 0, rp
, rp_len
);
465 static int read_ext_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
466 void *data
, u16 data_len
)
468 struct mgmt_rp_read_ext_index_list
*rp
;
474 BT_DBG("sock %p", sk
);
476 read_lock(&hci_dev_list_lock
);
479 list_for_each_entry(d
, &hci_dev_list
, list
) {
480 if (d
->dev_type
== HCI_BREDR
|| d
->dev_type
== HCI_AMP
)
484 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
485 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
487 read_unlock(&hci_dev_list_lock
);
492 list_for_each_entry(d
, &hci_dev_list
, list
) {
493 if (hci_dev_test_flag(d
, HCI_SETUP
) ||
494 hci_dev_test_flag(d
, HCI_CONFIG
) ||
495 hci_dev_test_flag(d
, HCI_USER_CHANNEL
))
498 /* Devices marked as raw-only are neither configured
499 * nor unconfigured controllers.
501 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
504 if (d
->dev_type
== HCI_BREDR
) {
505 if (hci_dev_test_flag(d
, HCI_UNCONFIGURED
))
506 rp
->entry
[count
].type
= 0x01;
508 rp
->entry
[count
].type
= 0x00;
509 } else if (d
->dev_type
== HCI_AMP
) {
510 rp
->entry
[count
].type
= 0x02;
515 rp
->entry
[count
].bus
= d
->bus
;
516 rp
->entry
[count
++].index
= cpu_to_le16(d
->id
);
517 BT_DBG("Added hci%u", d
->id
);
520 rp
->num_controllers
= cpu_to_le16(count
);
521 rp_len
= sizeof(*rp
) + (sizeof(rp
->entry
[0]) * count
);
523 read_unlock(&hci_dev_list_lock
);
525 /* If this command is called at least once, then all the
526 * default index and unconfigured index events are disabled
527 * and from now on only extended index events are used.
529 hci_sock_set_flag(sk
, HCI_MGMT_EXT_INDEX_EVENTS
);
530 hci_sock_clear_flag(sk
, HCI_MGMT_INDEX_EVENTS
);
531 hci_sock_clear_flag(sk
, HCI_MGMT_UNCONF_INDEX_EVENTS
);
533 err
= mgmt_cmd_complete(sk
, MGMT_INDEX_NONE
,
534 MGMT_OP_READ_EXT_INDEX_LIST
, 0, rp
, rp_len
);
541 static bool is_configured(struct hci_dev
*hdev
)
543 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
544 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
547 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
548 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
554 static __le32
get_missing_options(struct hci_dev
*hdev
)
558 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
559 !hci_dev_test_flag(hdev
, HCI_EXT_CONFIGURED
))
560 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
562 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
563 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
564 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
566 return cpu_to_le32(options
);
569 static int new_options(struct hci_dev
*hdev
, struct sock
*skip
)
571 __le32 options
= get_missing_options(hdev
);
573 return mgmt_generic_event(MGMT_EV_NEW_CONFIG_OPTIONS
, hdev
, &options
,
574 sizeof(options
), skip
);
577 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
579 __le32 options
= get_missing_options(hdev
);
581 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
585 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
586 void *data
, u16 data_len
)
588 struct mgmt_rp_read_config_info rp
;
591 BT_DBG("sock %p %s", sk
, hdev
->name
);
595 memset(&rp
, 0, sizeof(rp
));
596 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
598 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
599 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
601 if (hdev
->set_bdaddr
)
602 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
604 rp
.supported_options
= cpu_to_le32(options
);
605 rp
.missing_options
= get_missing_options(hdev
);
607 hci_dev_unlock(hdev
);
609 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0,
613 static u32
get_supported_settings(struct hci_dev
*hdev
)
617 settings
|= MGMT_SETTING_POWERED
;
618 settings
|= MGMT_SETTING_BONDABLE
;
619 settings
|= MGMT_SETTING_DEBUG_KEYS
;
620 settings
|= MGMT_SETTING_CONNECTABLE
;
621 settings
|= MGMT_SETTING_DISCOVERABLE
;
623 if (lmp_bredr_capable(hdev
)) {
624 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
625 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
626 settings
|= MGMT_SETTING_BREDR
;
627 settings
|= MGMT_SETTING_LINK_SECURITY
;
629 if (lmp_ssp_capable(hdev
)) {
630 settings
|= MGMT_SETTING_SSP
;
631 settings
|= MGMT_SETTING_HS
;
634 if (lmp_sc_capable(hdev
))
635 settings
|= MGMT_SETTING_SECURE_CONN
;
638 if (lmp_le_capable(hdev
)) {
639 settings
|= MGMT_SETTING_LE
;
640 settings
|= MGMT_SETTING_ADVERTISING
;
641 settings
|= MGMT_SETTING_SECURE_CONN
;
642 settings
|= MGMT_SETTING_PRIVACY
;
643 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
646 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
648 settings
|= MGMT_SETTING_CONFIGURATION
;
653 static u32
get_current_settings(struct hci_dev
*hdev
)
657 if (hdev_is_powered(hdev
))
658 settings
|= MGMT_SETTING_POWERED
;
660 if (hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
661 settings
|= MGMT_SETTING_CONNECTABLE
;
663 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
664 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
666 if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
667 settings
|= MGMT_SETTING_DISCOVERABLE
;
669 if (hci_dev_test_flag(hdev
, HCI_BONDABLE
))
670 settings
|= MGMT_SETTING_BONDABLE
;
672 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
673 settings
|= MGMT_SETTING_BREDR
;
675 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
676 settings
|= MGMT_SETTING_LE
;
678 if (hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
))
679 settings
|= MGMT_SETTING_LINK_SECURITY
;
681 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
682 settings
|= MGMT_SETTING_SSP
;
684 if (hci_dev_test_flag(hdev
, HCI_HS_ENABLED
))
685 settings
|= MGMT_SETTING_HS
;
687 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
688 settings
|= MGMT_SETTING_ADVERTISING
;
690 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))
691 settings
|= MGMT_SETTING_SECURE_CONN
;
693 if (hci_dev_test_flag(hdev
, HCI_KEEP_DEBUG_KEYS
))
694 settings
|= MGMT_SETTING_DEBUG_KEYS
;
696 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
))
697 settings
|= MGMT_SETTING_PRIVACY
;
699 /* The current setting for static address has two purposes. The
700 * first is to indicate if the static address will be used and
701 * the second is to indicate if it is actually set.
703 * This means if the static address is not configured, this flag
704 * will never be set. If the address is configured, then if the
705 * address is actually used decides if the flag is set or not.
707 * For single mode LE only controllers and dual-mode controllers
708 * with BR/EDR disabled, the existence of the static address will
711 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
712 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
713 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
)) {
714 if (bacmp(&hdev
->static_addr
, BDADDR_ANY
))
715 settings
|= MGMT_SETTING_STATIC_ADDRESS
;
721 #define PNP_INFO_SVCLASS_ID 0x1200
723 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
725 u8
*ptr
= data
, *uuids_start
= NULL
;
726 struct bt_uuid
*uuid
;
731 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
734 if (uuid
->size
!= 16)
737 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
741 if (uuid16
== PNP_INFO_SVCLASS_ID
)
747 uuids_start
[1] = EIR_UUID16_ALL
;
751 /* Stop if not enough space to put next UUID */
752 if ((ptr
- data
) + sizeof(u16
) > len
) {
753 uuids_start
[1] = EIR_UUID16_SOME
;
757 *ptr
++ = (uuid16
& 0x00ff);
758 *ptr
++ = (uuid16
& 0xff00) >> 8;
759 uuids_start
[0] += sizeof(uuid16
);
765 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
767 u8
*ptr
= data
, *uuids_start
= NULL
;
768 struct bt_uuid
*uuid
;
773 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
774 if (uuid
->size
!= 32)
780 uuids_start
[1] = EIR_UUID32_ALL
;
784 /* Stop if not enough space to put next UUID */
785 if ((ptr
- data
) + sizeof(u32
) > len
) {
786 uuids_start
[1] = EIR_UUID32_SOME
;
790 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
792 uuids_start
[0] += sizeof(u32
);
798 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
800 u8
*ptr
= data
, *uuids_start
= NULL
;
801 struct bt_uuid
*uuid
;
806 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
807 if (uuid
->size
!= 128)
813 uuids_start
[1] = EIR_UUID128_ALL
;
817 /* Stop if not enough space to put next UUID */
818 if ((ptr
- data
) + 16 > len
) {
819 uuids_start
[1] = EIR_UUID128_SOME
;
823 memcpy(ptr
, uuid
->uuid
, 16);
825 uuids_start
[0] += 16;
831 static struct mgmt_pending_cmd
*pending_find(u16 opcode
, struct hci_dev
*hdev
)
833 return mgmt_pending_find(HCI_CHANNEL_CONTROL
, opcode
, hdev
);
836 static struct mgmt_pending_cmd
*pending_find_data(u16 opcode
,
837 struct hci_dev
*hdev
,
840 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL
, opcode
, hdev
, data
);
843 static u8
get_current_adv_instance(struct hci_dev
*hdev
)
845 /* The "Set Advertising" setting supersedes the "Add Advertising"
846 * setting. Here we set the advertising data based on which
847 * setting was set. When neither apply, default to the global settings,
848 * represented by instance "0".
850 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
851 !hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
852 return hdev
->cur_adv_instance
;
857 static u8
create_default_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
862 name_len
= strlen(hdev
->dev_name
);
864 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
866 if (name_len
> max_len
) {
868 ptr
[1] = EIR_NAME_SHORT
;
870 ptr
[1] = EIR_NAME_COMPLETE
;
872 ptr
[0] = name_len
+ 1;
874 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
876 ad_len
+= (name_len
+ 2);
877 ptr
+= (name_len
+ 2);
883 static u8
create_instance_scan_rsp_data(struct hci_dev
*hdev
, u8 instance
,
886 struct adv_info
*adv_instance
;
888 adv_instance
= hci_find_adv_instance(hdev
, instance
);
892 /* TODO: Set the appropriate entries based on advertising instance flags
893 * here once flags other than 0 are supported.
895 memcpy(ptr
, adv_instance
->scan_rsp_data
,
896 adv_instance
->scan_rsp_len
);
898 return adv_instance
->scan_rsp_len
;
901 static void update_inst_scan_rsp_data(struct hci_request
*req
, u8 instance
)
903 struct hci_dev
*hdev
= req
->hdev
;
904 struct hci_cp_le_set_scan_rsp_data cp
;
907 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
910 memset(&cp
, 0, sizeof(cp
));
913 len
= create_instance_scan_rsp_data(hdev
, instance
, cp
.data
);
915 len
= create_default_scan_rsp_data(hdev
, cp
.data
);
917 if (hdev
->scan_rsp_data_len
== len
&&
918 !memcmp(cp
.data
, hdev
->scan_rsp_data
, len
))
921 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
922 hdev
->scan_rsp_data_len
= len
;
926 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
929 static void update_scan_rsp_data(struct hci_request
*req
)
931 update_inst_scan_rsp_data(req
, get_current_adv_instance(req
->hdev
));
934 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
936 struct mgmt_pending_cmd
*cmd
;
938 /* If there's a pending mgmt command the flags will not yet have
939 * their final values, so check for this first.
941 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
943 struct mgmt_mode
*cp
= cmd
->param
;
945 return LE_AD_GENERAL
;
946 else if (cp
->val
== 0x02)
947 return LE_AD_LIMITED
;
949 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
950 return LE_AD_LIMITED
;
951 else if (hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
))
952 return LE_AD_GENERAL
;
958 static bool get_connectable(struct hci_dev
*hdev
)
960 struct mgmt_pending_cmd
*cmd
;
962 /* If there's a pending mgmt command the flag will not yet have
963 * it's final value, so check for this first.
965 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
967 struct mgmt_mode
*cp
= cmd
->param
;
972 return hci_dev_test_flag(hdev
, HCI_CONNECTABLE
);
975 static u32
get_adv_instance_flags(struct hci_dev
*hdev
, u8 instance
)
978 struct adv_info
*adv_instance
;
980 if (instance
== 0x00) {
981 /* Instance 0 always manages the "Tx Power" and "Flags"
984 flags
= MGMT_ADV_FLAG_TX_POWER
| MGMT_ADV_FLAG_MANAGED_FLAGS
;
986 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
987 * corresponds to the "connectable" instance flag.
989 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
))
990 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
995 adv_instance
= hci_find_adv_instance(hdev
, instance
);
997 /* Return 0 when we got an invalid instance identifier. */
1001 return adv_instance
->flags
;
1004 static u8
get_cur_adv_instance_scan_rsp_len(struct hci_dev
*hdev
)
1006 u8 instance
= get_current_adv_instance(hdev
);
1007 struct adv_info
*adv_instance
;
1009 /* Ignore instance 0 */
1010 if (instance
== 0x00)
1013 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1017 /* TODO: Take into account the "appearance" and "local-name" flags here.
1018 * These are currently being ignored as they are not supported.
1020 return adv_instance
->scan_rsp_len
;
1023 static u8
create_instance_adv_data(struct hci_dev
*hdev
, u8 instance
, u8
*ptr
)
1025 struct adv_info
*adv_instance
= NULL
;
1026 u8 ad_len
= 0, flags
= 0;
1029 /* Return 0 when the current instance identifier is invalid. */
1031 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1036 instance_flags
= get_adv_instance_flags(hdev
, instance
);
1038 /* The Add Advertising command allows userspace to set both the general
1039 * and limited discoverable flags.
1041 if (instance_flags
& MGMT_ADV_FLAG_DISCOV
)
1042 flags
|= LE_AD_GENERAL
;
1044 if (instance_flags
& MGMT_ADV_FLAG_LIMITED_DISCOV
)
1045 flags
|= LE_AD_LIMITED
;
1047 if (flags
|| (instance_flags
& MGMT_ADV_FLAG_MANAGED_FLAGS
)) {
1048 /* If a discovery flag wasn't provided, simply use the global
1052 flags
|= get_adv_discov_flags(hdev
);
1054 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1055 flags
|= LE_AD_NO_BREDR
;
1057 /* If flags would still be empty, then there is no need to
1058 * include the "Flags" AD field".
1071 memcpy(ptr
, adv_instance
->adv_data
,
1072 adv_instance
->adv_data_len
);
1073 ad_len
+= adv_instance
->adv_data_len
;
1074 ptr
+= adv_instance
->adv_data_len
;
1077 /* Provide Tx Power only if we can provide a valid value for it */
1078 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
&&
1079 (instance_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
1081 ptr
[1] = EIR_TX_POWER
;
1082 ptr
[2] = (u8
)hdev
->adv_tx_power
;
1091 static void update_inst_adv_data(struct hci_request
*req
, u8 instance
)
1093 struct hci_dev
*hdev
= req
->hdev
;
1094 struct hci_cp_le_set_adv_data cp
;
1097 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1100 memset(&cp
, 0, sizeof(cp
));
1102 len
= create_instance_adv_data(hdev
, instance
, cp
.data
);
1104 /* There's nothing to do if the data hasn't changed */
1105 if (hdev
->adv_data_len
== len
&&
1106 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
1109 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
1110 hdev
->adv_data_len
= len
;
1114 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
1117 static void update_adv_data(struct hci_request
*req
)
1119 update_inst_adv_data(req
, get_current_adv_instance(req
->hdev
));
1122 int mgmt_update_adv_data(struct hci_dev
*hdev
)
1124 struct hci_request req
;
1126 hci_req_init(&req
, hdev
);
1127 update_adv_data(&req
);
1129 return hci_req_run(&req
, NULL
);
1132 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
1137 name_len
= strlen(hdev
->dev_name
);
1141 if (name_len
> 48) {
1143 ptr
[1] = EIR_NAME_SHORT
;
1145 ptr
[1] = EIR_NAME_COMPLETE
;
1147 /* EIR Data length */
1148 ptr
[0] = name_len
+ 1;
1150 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
1152 ptr
+= (name_len
+ 2);
1155 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
1157 ptr
[1] = EIR_TX_POWER
;
1158 ptr
[2] = (u8
) hdev
->inq_tx_power
;
1163 if (hdev
->devid_source
> 0) {
1165 ptr
[1] = EIR_DEVICE_ID
;
1167 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
1168 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
1169 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
1170 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
1175 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1176 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1177 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
1180 static void update_eir(struct hci_request
*req
)
1182 struct hci_dev
*hdev
= req
->hdev
;
1183 struct hci_cp_write_eir cp
;
1185 if (!hdev_is_powered(hdev
))
1188 if (!lmp_ext_inq_capable(hdev
))
1191 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
1194 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1197 memset(&cp
, 0, sizeof(cp
));
1199 create_eir(hdev
, cp
.data
);
1201 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
1204 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
1206 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
1209 static u8
get_service_classes(struct hci_dev
*hdev
)
1211 struct bt_uuid
*uuid
;
1214 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
1215 val
|= uuid
->svc_hint
;
1220 static void update_class(struct hci_request
*req
)
1222 struct hci_dev
*hdev
= req
->hdev
;
1225 BT_DBG("%s", hdev
->name
);
1227 if (!hdev_is_powered(hdev
))
1230 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1233 if (hci_dev_test_flag(hdev
, HCI_SERVICE_CACHE
))
1236 cod
[0] = hdev
->minor_class
;
1237 cod
[1] = hdev
->major_class
;
1238 cod
[2] = get_service_classes(hdev
);
1240 if (hci_dev_test_flag(hdev
, HCI_LIMITED_DISCOVERABLE
))
1243 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
1246 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
1249 static void disable_advertising(struct hci_request
*req
)
1253 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1256 static void enable_advertising(struct hci_request
*req
)
1258 struct hci_dev
*hdev
= req
->hdev
;
1259 struct hci_cp_le_set_adv_param cp
;
1260 u8 own_addr_type
, enable
= 0x01;
1265 if (hci_conn_num(hdev
, LE_LINK
) > 0)
1268 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1269 disable_advertising(req
);
1271 /* Clear the HCI_LE_ADV bit temporarily so that the
1272 * hci_update_random_address knows that it's safe to go ahead
1273 * and write a new random address. The flag will be set back on
1274 * as soon as the SET_ADV_ENABLE HCI command completes.
1276 hci_dev_clear_flag(hdev
, HCI_LE_ADV
);
1278 instance
= get_current_adv_instance(hdev
);
1279 flags
= get_adv_instance_flags(hdev
, instance
);
1281 /* If the "connectable" instance flag was not set, then choose between
1282 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1284 connectable
= (flags
& MGMT_ADV_FLAG_CONNECTABLE
) ||
1285 get_connectable(hdev
);
1287 /* Set require_privacy to true only when non-connectable
1288 * advertising is used. In that case it is fine to use a
1289 * non-resolvable private address.
1291 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1294 memset(&cp
, 0, sizeof(cp
));
1295 cp
.min_interval
= cpu_to_le16(hdev
->le_adv_min_interval
);
1296 cp
.max_interval
= cpu_to_le16(hdev
->le_adv_max_interval
);
1299 cp
.type
= LE_ADV_IND
;
1300 else if (get_cur_adv_instance_scan_rsp_len(hdev
))
1301 cp
.type
= LE_ADV_SCAN_IND
;
1303 cp
.type
= LE_ADV_NONCONN_IND
;
1305 cp
.own_address_type
= own_addr_type
;
1306 cp
.channel_map
= hdev
->le_adv_channel_map
;
1308 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1310 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1313 static void service_cache_off(struct work_struct
*work
)
1315 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1316 service_cache
.work
);
1317 struct hci_request req
;
1319 if (!hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
))
1322 hci_req_init(&req
, hdev
);
1329 hci_dev_unlock(hdev
);
1331 hci_req_run(&req
, NULL
);
1334 static void rpa_expired(struct work_struct
*work
)
1336 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1338 struct hci_request req
;
1342 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
1344 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1347 /* The generation of a new RPA and programming it into the
1348 * controller happens in the enable_advertising() function.
1350 hci_req_init(&req
, hdev
);
1351 enable_advertising(&req
);
1352 hci_req_run(&req
, NULL
);
1355 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1357 if (hci_dev_test_and_set_flag(hdev
, HCI_MGMT
))
1360 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1361 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1363 /* Non-mgmt controlled devices get this bit set
1364 * implicitly so that pairing works for them, however
1365 * for mgmt we require user-space to explicitly enable
1368 hci_dev_clear_flag(hdev
, HCI_BONDABLE
);
1371 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1372 void *data
, u16 data_len
)
1374 struct mgmt_rp_read_info rp
;
1376 BT_DBG("sock %p %s", sk
, hdev
->name
);
1380 memset(&rp
, 0, sizeof(rp
));
1382 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1384 rp
.version
= hdev
->hci_ver
;
1385 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1387 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1388 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1390 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1392 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1393 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1395 hci_dev_unlock(hdev
);
1397 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1401 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1403 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1405 return mgmt_cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1409 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
1411 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1413 if (hci_conn_count(hdev
) == 0) {
1414 cancel_delayed_work(&hdev
->power_off
);
1415 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1419 static void advertising_added(struct sock
*sk
, struct hci_dev
*hdev
,
1422 struct mgmt_ev_advertising_added ev
;
1424 ev
.instance
= instance
;
1426 mgmt_event(MGMT_EV_ADVERTISING_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
1429 static void advertising_removed(struct sock
*sk
, struct hci_dev
*hdev
,
1432 struct mgmt_ev_advertising_removed ev
;
1434 ev
.instance
= instance
;
1436 mgmt_event(MGMT_EV_ADVERTISING_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
1439 static int schedule_adv_instance(struct hci_request
*req
, u8 instance
,
1441 struct hci_dev
*hdev
= req
->hdev
;
1442 struct adv_info
*adv_instance
= NULL
;
1445 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
1446 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
1449 if (hdev
->adv_instance_timeout
)
1452 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1456 /* A zero timeout means unlimited advertising. As long as there is
1457 * only one instance, duration should be ignored. We still set a timeout
1458 * in case further instances are being added later on.
1460 * If the remaining lifetime of the instance is more than the duration
1461 * then the timeout corresponds to the duration, otherwise it will be
1462 * reduced to the remaining instance lifetime.
1464 if (adv_instance
->timeout
== 0 ||
1465 adv_instance
->duration
<= adv_instance
->remaining_time
)
1466 timeout
= adv_instance
->duration
;
1468 timeout
= adv_instance
->remaining_time
;
1470 /* The remaining time is being reduced unless the instance is being
1471 * advertised without time limit.
1473 if (adv_instance
->timeout
)
1474 adv_instance
->remaining_time
=
1475 adv_instance
->remaining_time
- timeout
;
1477 hdev
->adv_instance_timeout
= timeout
;
1478 queue_delayed_work(hdev
->workqueue
,
1479 &hdev
->adv_instance_expire
,
1480 msecs_to_jiffies(timeout
* 1000));
1482 /* If we're just re-scheduling the same instance again then do not
1483 * execute any HCI commands. This happens when a single instance is
1486 if (!force
&& hdev
->cur_adv_instance
== instance
&&
1487 hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1490 hdev
->cur_adv_instance
= instance
;
1491 update_adv_data(req
);
1492 update_scan_rsp_data(req
);
1493 enable_advertising(req
);
1498 static void cancel_adv_timeout(struct hci_dev
*hdev
)
1500 if (hdev
->adv_instance_timeout
) {
1501 hdev
->adv_instance_timeout
= 0;
1502 cancel_delayed_work(&hdev
->adv_instance_expire
);
1506 /* For a single instance:
1507 * - force == true: The instance will be removed even when its remaining
1508 * lifetime is not zero.
1509 * - force == false: the instance will be deactivated but kept stored unless
1510 * the remaining lifetime is zero.
1512 * For instance == 0x00:
1513 * - force == true: All instances will be removed regardless of their timeout
1515 * - force == false: Only instances that have a timeout will be removed.
1517 static void clear_adv_instance(struct hci_dev
*hdev
, struct hci_request
*req
,
1518 u8 instance
, bool force
)
1520 struct adv_info
*adv_instance
, *n
, *next_instance
= NULL
;
1524 /* Cancel any timeout concerning the removed instance(s). */
1525 if (!instance
|| hdev
->cur_adv_instance
== instance
)
1526 cancel_adv_timeout(hdev
);
1528 /* Get the next instance to advertise BEFORE we remove
1529 * the current one. This can be the same instance again
1530 * if there is only one instance.
1532 if (instance
&& hdev
->cur_adv_instance
== instance
)
1533 next_instance
= hci_get_next_instance(hdev
, instance
);
1535 if (instance
== 0x00) {
1536 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
,
1538 if (!(force
|| adv_instance
->timeout
))
1541 rem_inst
= adv_instance
->instance
;
1542 err
= hci_remove_adv_instance(hdev
, rem_inst
);
1544 advertising_removed(NULL
, hdev
, rem_inst
);
1546 hdev
->cur_adv_instance
= 0x00;
1548 adv_instance
= hci_find_adv_instance(hdev
, instance
);
1550 if (force
|| (adv_instance
&& adv_instance
->timeout
&&
1551 !adv_instance
->remaining_time
)) {
1552 /* Don't advertise a removed instance. */
1553 if (next_instance
&&
1554 next_instance
->instance
== instance
)
1555 next_instance
= NULL
;
1557 err
= hci_remove_adv_instance(hdev
, instance
);
1559 advertising_removed(NULL
, hdev
, instance
);
1563 if (list_empty(&hdev
->adv_instances
)) {
1564 hdev
->cur_adv_instance
= 0x00;
1565 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
1568 if (!req
|| !hdev_is_powered(hdev
) ||
1569 hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
1573 schedule_adv_instance(req
, next_instance
->instance
, false);
1576 static int clean_up_hci_state(struct hci_dev
*hdev
)
1578 struct hci_request req
;
1579 struct hci_conn
*conn
;
1580 bool discov_stopped
;
1583 hci_req_init(&req
, hdev
);
1585 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1586 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1588 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1591 clear_adv_instance(hdev
, NULL
, 0x00, false);
1593 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
1594 disable_advertising(&req
);
1596 discov_stopped
= hci_req_stop_discovery(&req
);
1598 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1599 /* 0x15 == Terminated due to Power Off */
1600 __hci_abort_conn(&req
, conn
, 0x15);
1603 err
= hci_req_run(&req
, clean_up_hci_complete
);
1604 if (!err
&& discov_stopped
)
1605 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
1610 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1613 struct mgmt_mode
*cp
= data
;
1614 struct mgmt_pending_cmd
*cmd
;
1617 BT_DBG("request for %s", hdev
->name
);
1619 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1620 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1621 MGMT_STATUS_INVALID_PARAMS
);
1625 if (pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1626 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1631 if (hci_dev_test_and_clear_flag(hdev
, HCI_AUTO_OFF
)) {
1632 cancel_delayed_work(&hdev
->power_off
);
1635 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1637 err
= mgmt_powered(hdev
, 1);
1642 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1643 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1647 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1654 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1657 /* Disconnect connections, stop scans, etc */
1658 err
= clean_up_hci_state(hdev
);
1660 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1661 HCI_POWER_OFF_TIMEOUT
);
1663 /* ENODATA means there were no HCI commands queued */
1664 if (err
== -ENODATA
) {
1665 cancel_delayed_work(&hdev
->power_off
);
1666 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1672 hci_dev_unlock(hdev
);
1676 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1678 __le32 ev
= cpu_to_le32(get_current_settings(hdev
));
1680 return mgmt_generic_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
,
1684 int mgmt_new_settings(struct hci_dev
*hdev
)
1686 return new_settings(hdev
, NULL
);
1691 struct hci_dev
*hdev
;
1695 static void settings_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1697 struct cmd_lookup
*match
= data
;
1699 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1701 list_del(&cmd
->list
);
1703 if (match
->sk
== NULL
) {
1704 match
->sk
= cmd
->sk
;
1705 sock_hold(match
->sk
);
1708 mgmt_pending_free(cmd
);
1711 static void cmd_status_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1715 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1716 mgmt_pending_remove(cmd
);
1719 static void cmd_complete_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
1721 if (cmd
->cmd_complete
) {
1724 cmd
->cmd_complete(cmd
, *status
);
1725 mgmt_pending_remove(cmd
);
1730 cmd_status_rsp(cmd
, data
);
1733 static int generic_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1735 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1736 cmd
->param
, cmd
->param_len
);
1739 static int addr_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
1741 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
1742 cmd
->param
, sizeof(struct mgmt_addr_info
));
1745 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1747 if (!lmp_bredr_capable(hdev
))
1748 return MGMT_STATUS_NOT_SUPPORTED
;
1749 else if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1750 return MGMT_STATUS_REJECTED
;
1752 return MGMT_STATUS_SUCCESS
;
1755 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1757 if (!lmp_le_capable(hdev
))
1758 return MGMT_STATUS_NOT_SUPPORTED
;
1759 else if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
1760 return MGMT_STATUS_REJECTED
;
1762 return MGMT_STATUS_SUCCESS
;
1765 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
,
1768 struct mgmt_pending_cmd
*cmd
;
1769 struct mgmt_mode
*cp
;
1770 struct hci_request req
;
1773 BT_DBG("status 0x%02x", status
);
1777 cmd
= pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1782 u8 mgmt_err
= mgmt_status(status
);
1783 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1784 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1790 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_DISCOVERABLE
);
1792 if (hdev
->discov_timeout
> 0) {
1793 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1794 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1798 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_DISCOVERABLE
);
1801 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1804 new_settings(hdev
, cmd
->sk
);
1806 /* When the discoverable mode gets changed, make sure
1807 * that class of device has the limited discoverable
1808 * bit correctly set. Also update page scan based on whitelist
1811 hci_req_init(&req
, hdev
);
1812 __hci_update_page_scan(&req
);
1814 hci_req_run(&req
, NULL
);
1817 mgmt_pending_remove(cmd
);
1820 hci_dev_unlock(hdev
);
1823 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1826 struct mgmt_cp_set_discoverable
*cp
= data
;
1827 struct mgmt_pending_cmd
*cmd
;
1828 struct hci_request req
;
1833 BT_DBG("request for %s", hdev
->name
);
1835 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
1836 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1837 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1838 MGMT_STATUS_REJECTED
);
1840 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1841 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1842 MGMT_STATUS_INVALID_PARAMS
);
1844 timeout
= __le16_to_cpu(cp
->timeout
);
1846 /* Disabling discoverable requires that no timeout is set,
1847 * and enabling limited discoverable requires a timeout.
1849 if ((cp
->val
== 0x00 && timeout
> 0) ||
1850 (cp
->val
== 0x02 && timeout
== 0))
1851 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1852 MGMT_STATUS_INVALID_PARAMS
);
1856 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1857 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1858 MGMT_STATUS_NOT_POWERED
);
1862 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1863 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1864 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1869 if (!hci_dev_test_flag(hdev
, HCI_CONNECTABLE
)) {
1870 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1871 MGMT_STATUS_REJECTED
);
1875 if (!hdev_is_powered(hdev
)) {
1876 bool changed
= false;
1878 /* Setting limited discoverable when powered off is
1879 * not a valid operation since it requires a timeout
1880 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1882 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
)) {
1883 hci_dev_change_flag(hdev
, HCI_DISCOVERABLE
);
1887 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1892 err
= new_settings(hdev
, sk
);
1897 /* If the current mode is the same, then just update the timeout
1898 * value with the new value. And if only the timeout gets updated,
1899 * then no need for any HCI transactions.
1901 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_DISCOVERABLE
) &&
1902 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
,
1903 HCI_LIMITED_DISCOVERABLE
)) {
1904 cancel_delayed_work(&hdev
->discov_off
);
1905 hdev
->discov_timeout
= timeout
;
1907 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1908 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1909 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1913 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1917 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1923 /* Cancel any potential discoverable timeout that might be
1924 * still active and store new timeout value. The arming of
1925 * the timeout happens in the complete handler.
1927 cancel_delayed_work(&hdev
->discov_off
);
1928 hdev
->discov_timeout
= timeout
;
1930 /* Limited discoverable mode */
1931 if (cp
->val
== 0x02)
1932 hci_dev_set_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1934 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1936 hci_req_init(&req
, hdev
);
1938 /* The procedure for LE-only controllers is much simpler - just
1939 * update the advertising data.
1941 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1947 struct hci_cp_write_current_iac_lap hci_cp
;
1949 if (cp
->val
== 0x02) {
1950 /* Limited discoverable mode */
1951 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1952 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1953 hci_cp
.iac_lap
[1] = 0x8b;
1954 hci_cp
.iac_lap
[2] = 0x9e;
1955 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1956 hci_cp
.iac_lap
[4] = 0x8b;
1957 hci_cp
.iac_lap
[5] = 0x9e;
1959 /* General discoverable mode */
1961 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1962 hci_cp
.iac_lap
[1] = 0x8b;
1963 hci_cp
.iac_lap
[2] = 0x9e;
1966 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1967 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1969 scan
|= SCAN_INQUIRY
;
1971 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
1974 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1977 update_adv_data(&req
);
1979 err
= hci_req_run(&req
, set_discoverable_complete
);
1981 mgmt_pending_remove(cmd
);
1984 hci_dev_unlock(hdev
);
1988 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1990 struct hci_dev
*hdev
= req
->hdev
;
1991 struct hci_cp_write_page_scan_activity acp
;
1994 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
1997 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
2001 type
= PAGE_SCAN_TYPE_INTERLACED
;
2003 /* 160 msec page scan interval */
2004 acp
.interval
= cpu_to_le16(0x0100);
2006 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
2008 /* default 1.28 sec page scan */
2009 acp
.interval
= cpu_to_le16(0x0800);
2012 acp
.window
= cpu_to_le16(0x0012);
2014 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
2015 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
2016 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
2019 if (hdev
->page_scan_type
!= type
)
2020 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
2023 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
,
2026 struct mgmt_pending_cmd
*cmd
;
2027 struct mgmt_mode
*cp
;
2028 bool conn_changed
, discov_changed
;
2030 BT_DBG("status 0x%02x", status
);
2034 cmd
= pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
2039 u8 mgmt_err
= mgmt_status(status
);
2040 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
2046 conn_changed
= !hci_dev_test_and_set_flag(hdev
,
2048 discov_changed
= false;
2050 conn_changed
= hci_dev_test_and_clear_flag(hdev
,
2052 discov_changed
= hci_dev_test_and_clear_flag(hdev
,
2056 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2058 if (conn_changed
|| discov_changed
) {
2059 new_settings(hdev
, cmd
->sk
);
2060 hci_update_page_scan(hdev
);
2062 mgmt_update_adv_data(hdev
);
2063 hci_update_background_scan(hdev
);
2067 mgmt_pending_remove(cmd
);
2070 hci_dev_unlock(hdev
);
2073 static int set_connectable_update_settings(struct hci_dev
*hdev
,
2074 struct sock
*sk
, u8 val
)
2076 bool changed
= false;
2079 if (!!val
!= hci_dev_test_flag(hdev
, HCI_CONNECTABLE
))
2083 hci_dev_set_flag(hdev
, HCI_CONNECTABLE
);
2085 hci_dev_clear_flag(hdev
, HCI_CONNECTABLE
);
2086 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2089 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
2094 hci_update_page_scan(hdev
);
2095 hci_update_background_scan(hdev
);
2096 return new_settings(hdev
, sk
);
2102 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2105 struct mgmt_mode
*cp
= data
;
2106 struct mgmt_pending_cmd
*cmd
;
2107 struct hci_request req
;
2111 BT_DBG("request for %s", hdev
->name
);
2113 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
2114 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
2115 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2116 MGMT_STATUS_REJECTED
);
2118 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2119 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2120 MGMT_STATUS_INVALID_PARAMS
);
2124 if (!hdev_is_powered(hdev
)) {
2125 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
2129 if (pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
2130 pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
2131 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
2136 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
2142 hci_req_init(&req
, hdev
);
2144 /* If BR/EDR is not enabled and we disable advertising as a
2145 * by-product of disabling connectable, we need to update the
2146 * advertising flags.
2148 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2150 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
2151 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
2153 update_adv_data(&req
);
2154 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
2158 /* If we don't have any whitelist entries just
2159 * disable all scanning. If there are entries
2160 * and we had both page and inquiry scanning
2161 * enabled then fall back to only page scanning.
2162 * Otherwise no changes are needed.
2164 if (list_empty(&hdev
->whitelist
))
2165 scan
= SCAN_DISABLED
;
2166 else if (test_bit(HCI_ISCAN
, &hdev
->flags
))
2169 goto no_scan_update
;
2171 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
2172 hdev
->discov_timeout
> 0)
2173 cancel_delayed_work(&hdev
->discov_off
);
2176 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
2180 /* Update the advertising parameters if necessary */
2181 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
2182 hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
2183 enable_advertising(&req
);
2185 err
= hci_req_run(&req
, set_connectable_complete
);
2187 mgmt_pending_remove(cmd
);
2188 if (err
== -ENODATA
)
2189 err
= set_connectable_update_settings(hdev
, sk
,
2195 hci_dev_unlock(hdev
);
2199 static int set_bondable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2202 struct mgmt_mode
*cp
= data
;
2206 BT_DBG("request for %s", hdev
->name
);
2208 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2209 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BONDABLE
,
2210 MGMT_STATUS_INVALID_PARAMS
);
2215 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_BONDABLE
);
2217 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_BONDABLE
);
2219 err
= send_settings_rsp(sk
, MGMT_OP_SET_BONDABLE
, hdev
);
2224 err
= new_settings(hdev
, sk
);
2227 hci_dev_unlock(hdev
);
2231 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2234 struct mgmt_mode
*cp
= data
;
2235 struct mgmt_pending_cmd
*cmd
;
2239 BT_DBG("request for %s", hdev
->name
);
2241 status
= mgmt_bredr_support(hdev
);
2243 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2246 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2247 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2248 MGMT_STATUS_INVALID_PARAMS
);
2252 if (!hdev_is_powered(hdev
)) {
2253 bool changed
= false;
2255 if (!!cp
->val
!= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
)) {
2256 hci_dev_change_flag(hdev
, HCI_LINK_SECURITY
);
2260 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2265 err
= new_settings(hdev
, sk
);
2270 if (pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
2271 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
2278 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
2279 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
2283 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
2289 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
2291 mgmt_pending_remove(cmd
);
2296 hci_dev_unlock(hdev
);
2300 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2302 struct mgmt_mode
*cp
= data
;
2303 struct mgmt_pending_cmd
*cmd
;
2307 BT_DBG("request for %s", hdev
->name
);
2309 status
= mgmt_bredr_support(hdev
);
2311 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
2313 if (!lmp_ssp_capable(hdev
))
2314 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2315 MGMT_STATUS_NOT_SUPPORTED
);
2317 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2318 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2319 MGMT_STATUS_INVALID_PARAMS
);
2323 if (!hdev_is_powered(hdev
)) {
2327 changed
= !hci_dev_test_and_set_flag(hdev
,
2330 changed
= hci_dev_test_and_clear_flag(hdev
,
2333 changed
= hci_dev_test_and_clear_flag(hdev
,
2336 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
2339 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2344 err
= new_settings(hdev
, sk
);
2349 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2350 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2355 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
2356 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2360 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2366 if (!cp
->val
&& hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
2367 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2368 sizeof(cp
->val
), &cp
->val
);
2370 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2372 mgmt_pending_remove(cmd
);
2377 hci_dev_unlock(hdev
);
2381 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2383 struct mgmt_mode
*cp
= data
;
2388 BT_DBG("request for %s", hdev
->name
);
2390 status
= mgmt_bredr_support(hdev
);
2392 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2394 if (!lmp_ssp_capable(hdev
))
2395 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2396 MGMT_STATUS_NOT_SUPPORTED
);
2398 if (!hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
2399 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2400 MGMT_STATUS_REJECTED
);
2402 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2403 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2404 MGMT_STATUS_INVALID_PARAMS
);
2408 if (pending_find(MGMT_OP_SET_SSP
, hdev
)) {
2409 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2415 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_HS_ENABLED
);
2417 if (hdev_is_powered(hdev
)) {
2418 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2419 MGMT_STATUS_REJECTED
);
2423 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_HS_ENABLED
);
2426 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2431 err
= new_settings(hdev
, sk
);
2434 hci_dev_unlock(hdev
);
2438 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2440 struct cmd_lookup match
= { NULL
, hdev
};
2445 u8 mgmt_err
= mgmt_status(status
);
2447 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2452 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2454 new_settings(hdev
, match
.sk
);
2459 /* Make sure the controller has a good default for
2460 * advertising data. Restrict the update to when LE
2461 * has actually been enabled. During power on, the
2462 * update in powered_update_hci will take care of it.
2464 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2465 struct hci_request req
;
2467 hci_req_init(&req
, hdev
);
2468 update_adv_data(&req
);
2469 update_scan_rsp_data(&req
);
2470 hci_req_run(&req
, NULL
);
2471 hci_update_background_scan(hdev
);
2475 hci_dev_unlock(hdev
);
2478 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2480 struct mgmt_mode
*cp
= data
;
2481 struct hci_cp_write_le_host_supported hci_cp
;
2482 struct mgmt_pending_cmd
*cmd
;
2483 struct hci_request req
;
2487 BT_DBG("request for %s", hdev
->name
);
2489 if (!lmp_le_capable(hdev
))
2490 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2491 MGMT_STATUS_NOT_SUPPORTED
);
2493 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2494 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2495 MGMT_STATUS_INVALID_PARAMS
);
2497 /* Bluetooth single mode LE only controllers or dual-mode
2498 * controllers configured as LE only devices, do not allow
2499 * switching LE off. These have either LE enabled explicitly
2500 * or BR/EDR has been previously switched off.
2502 * When trying to enable an already enabled LE, then gracefully
2503 * send a positive response. Trying to disable it however will
2504 * result into rejection.
2506 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
2507 if (cp
->val
== 0x01)
2508 return send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2510 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2511 MGMT_STATUS_REJECTED
);
2517 enabled
= lmp_host_le_capable(hdev
);
2520 clear_adv_instance(hdev
, NULL
, 0x00, true);
2522 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2523 bool changed
= false;
2525 if (val
!= hci_dev_test_flag(hdev
, HCI_LE_ENABLED
)) {
2526 hci_dev_change_flag(hdev
, HCI_LE_ENABLED
);
2530 if (!val
&& hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
2531 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
2535 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2540 err
= new_settings(hdev
, sk
);
2545 if (pending_find(MGMT_OP_SET_LE
, hdev
) ||
2546 pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2547 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2552 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2558 hci_req_init(&req
, hdev
);
2560 memset(&hci_cp
, 0, sizeof(hci_cp
));
2564 hci_cp
.simul
= 0x00;
2566 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
2567 disable_advertising(&req
);
2570 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2573 err
= hci_req_run(&req
, le_enable_complete
);
2575 mgmt_pending_remove(cmd
);
2578 hci_dev_unlock(hdev
);
2582 /* This is a helper function to test for pending mgmt commands that can
2583 * cause CoD or EIR HCI commands. We can only allow one such pending
2584 * mgmt command at a time since otherwise we cannot easily track what
2585 * the current values are, will be, and based on that calculate if a new
2586 * HCI command needs to be sent and if yes with what value.
2588 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2590 struct mgmt_pending_cmd
*cmd
;
2592 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2593 switch (cmd
->opcode
) {
2594 case MGMT_OP_ADD_UUID
:
2595 case MGMT_OP_REMOVE_UUID
:
2596 case MGMT_OP_SET_DEV_CLASS
:
2597 case MGMT_OP_SET_POWERED
:
2605 static const u8 bluetooth_base_uuid
[] = {
2606 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2607 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2610 static u8
get_uuid_size(const u8
*uuid
)
2614 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2617 val
= get_unaligned_le32(&uuid
[12]);
2624 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2626 struct mgmt_pending_cmd
*cmd
;
2630 cmd
= pending_find(mgmt_op
, hdev
);
2634 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
2635 mgmt_status(status
), hdev
->dev_class
, 3);
2637 mgmt_pending_remove(cmd
);
2640 hci_dev_unlock(hdev
);
2643 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2645 BT_DBG("status 0x%02x", status
);
2647 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2650 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2652 struct mgmt_cp_add_uuid
*cp
= data
;
2653 struct mgmt_pending_cmd
*cmd
;
2654 struct hci_request req
;
2655 struct bt_uuid
*uuid
;
2658 BT_DBG("request for %s", hdev
->name
);
2662 if (pending_eir_or_class(hdev
)) {
2663 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2668 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2674 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2675 uuid
->svc_hint
= cp
->svc_hint
;
2676 uuid
->size
= get_uuid_size(cp
->uuid
);
2678 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2680 hci_req_init(&req
, hdev
);
2685 err
= hci_req_run(&req
, add_uuid_complete
);
2687 if (err
!= -ENODATA
)
2690 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2691 hdev
->dev_class
, 3);
2695 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2704 hci_dev_unlock(hdev
);
2708 static bool enable_service_cache(struct hci_dev
*hdev
)
2710 if (!hdev_is_powered(hdev
))
2713 if (!hci_dev_test_and_set_flag(hdev
, HCI_SERVICE_CACHE
)) {
2714 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2722 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2724 BT_DBG("status 0x%02x", status
);
2726 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2729 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2732 struct mgmt_cp_remove_uuid
*cp
= data
;
2733 struct mgmt_pending_cmd
*cmd
;
2734 struct bt_uuid
*match
, *tmp
;
2735 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2736 struct hci_request req
;
2739 BT_DBG("request for %s", hdev
->name
);
2743 if (pending_eir_or_class(hdev
)) {
2744 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2749 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2750 hci_uuids_clear(hdev
);
2752 if (enable_service_cache(hdev
)) {
2753 err
= mgmt_cmd_complete(sk
, hdev
->id
,
2754 MGMT_OP_REMOVE_UUID
,
2755 0, hdev
->dev_class
, 3);
2764 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2765 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2768 list_del(&match
->list
);
2774 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2775 MGMT_STATUS_INVALID_PARAMS
);
2780 hci_req_init(&req
, hdev
);
2785 err
= hci_req_run(&req
, remove_uuid_complete
);
2787 if (err
!= -ENODATA
)
2790 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2791 hdev
->dev_class
, 3);
2795 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2804 hci_dev_unlock(hdev
);
2808 static void set_class_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
2810 BT_DBG("status 0x%02x", status
);
2812 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2815 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2818 struct mgmt_cp_set_dev_class
*cp
= data
;
2819 struct mgmt_pending_cmd
*cmd
;
2820 struct hci_request req
;
2823 BT_DBG("request for %s", hdev
->name
);
2825 if (!lmp_bredr_capable(hdev
))
2826 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2827 MGMT_STATUS_NOT_SUPPORTED
);
2831 if (pending_eir_or_class(hdev
)) {
2832 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2837 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2838 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2839 MGMT_STATUS_INVALID_PARAMS
);
2843 hdev
->major_class
= cp
->major
;
2844 hdev
->minor_class
= cp
->minor
;
2846 if (!hdev_is_powered(hdev
)) {
2847 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2848 hdev
->dev_class
, 3);
2852 hci_req_init(&req
, hdev
);
2854 if (hci_dev_test_and_clear_flag(hdev
, HCI_SERVICE_CACHE
)) {
2855 hci_dev_unlock(hdev
);
2856 cancel_delayed_work_sync(&hdev
->service_cache
);
2863 err
= hci_req_run(&req
, set_class_complete
);
2865 if (err
!= -ENODATA
)
2868 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2869 hdev
->dev_class
, 3);
2873 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2882 hci_dev_unlock(hdev
);
2886 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2889 struct mgmt_cp_load_link_keys
*cp
= data
;
2890 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2891 sizeof(struct mgmt_link_key_info
));
2892 u16 key_count
, expected_len
;
2896 BT_DBG("request for %s", hdev
->name
);
2898 if (!lmp_bredr_capable(hdev
))
2899 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2900 MGMT_STATUS_NOT_SUPPORTED
);
2902 key_count
= __le16_to_cpu(cp
->key_count
);
2903 if (key_count
> max_key_count
) {
2904 BT_ERR("load_link_keys: too big key_count value %u",
2906 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2907 MGMT_STATUS_INVALID_PARAMS
);
2910 expected_len
= sizeof(*cp
) + key_count
*
2911 sizeof(struct mgmt_link_key_info
);
2912 if (expected_len
!= len
) {
2913 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2915 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2916 MGMT_STATUS_INVALID_PARAMS
);
2919 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2920 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2921 MGMT_STATUS_INVALID_PARAMS
);
2923 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2926 for (i
= 0; i
< key_count
; i
++) {
2927 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2929 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2930 return mgmt_cmd_status(sk
, hdev
->id
,
2931 MGMT_OP_LOAD_LINK_KEYS
,
2932 MGMT_STATUS_INVALID_PARAMS
);
2937 hci_link_keys_clear(hdev
);
2940 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
2942 changed
= hci_dev_test_and_clear_flag(hdev
,
2943 HCI_KEEP_DEBUG_KEYS
);
2946 new_settings(hdev
, NULL
);
2948 for (i
= 0; i
< key_count
; i
++) {
2949 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2951 /* Always ignore debug keys and require a new pairing if
2952 * the user wants to use them.
2954 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2957 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2958 key
->type
, key
->pin_len
, NULL
);
2961 mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2963 hci_dev_unlock(hdev
);
2968 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2969 u8 addr_type
, struct sock
*skip_sk
)
2971 struct mgmt_ev_device_unpaired ev
;
2973 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2974 ev
.addr
.type
= addr_type
;
2976 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2980 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2983 struct mgmt_cp_unpair_device
*cp
= data
;
2984 struct mgmt_rp_unpair_device rp
;
2985 struct hci_conn_params
*params
;
2986 struct mgmt_pending_cmd
*cmd
;
2987 struct hci_conn
*conn
;
2991 memset(&rp
, 0, sizeof(rp
));
2992 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2993 rp
.addr
.type
= cp
->addr
.type
;
2995 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2996 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2997 MGMT_STATUS_INVALID_PARAMS
,
3000 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
3001 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3002 MGMT_STATUS_INVALID_PARAMS
,
3007 if (!hdev_is_powered(hdev
)) {
3008 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3009 MGMT_STATUS_NOT_POWERED
, &rp
,
3014 if (cp
->addr
.type
== BDADDR_BREDR
) {
3015 /* If disconnection is requested, then look up the
3016 * connection. If the remote device is connected, it
3017 * will be later used to terminate the link.
3019 * Setting it to NULL explicitly will cause no
3020 * termination of the link.
3023 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3028 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
3030 err
= mgmt_cmd_complete(sk
, hdev
->id
,
3031 MGMT_OP_UNPAIR_DEVICE
,
3032 MGMT_STATUS_NOT_PAIRED
, &rp
,
3040 /* LE address type */
3041 addr_type
= le_addr_type(cp
->addr
.type
);
3043 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3045 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3047 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
3048 MGMT_STATUS_NOT_PAIRED
, &rp
,
3053 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3055 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3059 /* Abort any ongoing SMP pairing */
3060 smp_cancel_pairing(conn
);
3062 /* Defer clearing up the connection parameters until closing to
3063 * give a chance of keeping them if a repairing happens.
3065 set_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3067 /* Disable auto-connection parameters if present */
3068 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3070 if (params
->explicit_connect
)
3071 params
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
3073 params
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3076 /* If disconnection is not requested, then clear the connection
3077 * variable so that the link is not terminated.
3079 if (!cp
->disconnect
)
3083 /* If the connection variable is set, then termination of the
3084 * link is requested.
3087 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
3089 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
3093 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
3100 cmd
->cmd_complete
= addr_cmd_complete
;
3102 err
= hci_abort_conn(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3104 mgmt_pending_remove(cmd
);
3107 hci_dev_unlock(hdev
);
3111 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3114 struct mgmt_cp_disconnect
*cp
= data
;
3115 struct mgmt_rp_disconnect rp
;
3116 struct mgmt_pending_cmd
*cmd
;
3117 struct hci_conn
*conn
;
3122 memset(&rp
, 0, sizeof(rp
));
3123 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3124 rp
.addr
.type
= cp
->addr
.type
;
3126 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3127 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3128 MGMT_STATUS_INVALID_PARAMS
,
3133 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
3134 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3135 MGMT_STATUS_NOT_POWERED
, &rp
,
3140 if (pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
3141 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3142 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3146 if (cp
->addr
.type
== BDADDR_BREDR
)
3147 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
3150 conn
= hci_conn_hash_lookup_le(hdev
, &cp
->addr
.bdaddr
,
3151 le_addr_type(cp
->addr
.type
));
3153 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
3154 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
3155 MGMT_STATUS_NOT_CONNECTED
, &rp
,
3160 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
3166 cmd
->cmd_complete
= generic_cmd_complete
;
3168 err
= hci_disconnect(conn
, HCI_ERROR_REMOTE_USER_TERM
);
3170 mgmt_pending_remove(cmd
);
3173 hci_dev_unlock(hdev
);
3177 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
3179 switch (link_type
) {
3181 switch (addr_type
) {
3182 case ADDR_LE_DEV_PUBLIC
:
3183 return BDADDR_LE_PUBLIC
;
3186 /* Fallback to LE Random address type */
3187 return BDADDR_LE_RANDOM
;
3191 /* Fallback to BR/EDR type */
3192 return BDADDR_BREDR
;
3196 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3199 struct mgmt_rp_get_connections
*rp
;
3209 if (!hdev_is_powered(hdev
)) {
3210 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
3211 MGMT_STATUS_NOT_POWERED
);
3216 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3217 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3221 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3222 rp
= kmalloc(rp_len
, GFP_KERNEL
);
3229 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
3230 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
3232 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
3233 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
3234 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
3239 rp
->conn_count
= cpu_to_le16(i
);
3241 /* Recalculate length in case of filtered SCO connections, etc */
3242 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
3244 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
3250 hci_dev_unlock(hdev
);
3254 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3255 struct mgmt_cp_pin_code_neg_reply
*cp
)
3257 struct mgmt_pending_cmd
*cmd
;
3260 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
3265 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
3266 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
3268 mgmt_pending_remove(cmd
);
3273 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3276 struct hci_conn
*conn
;
3277 struct mgmt_cp_pin_code_reply
*cp
= data
;
3278 struct hci_cp_pin_code_reply reply
;
3279 struct mgmt_pending_cmd
*cmd
;
3286 if (!hdev_is_powered(hdev
)) {
3287 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3288 MGMT_STATUS_NOT_POWERED
);
3292 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
3294 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3295 MGMT_STATUS_NOT_CONNECTED
);
3299 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
3300 struct mgmt_cp_pin_code_neg_reply ncp
;
3302 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
3304 BT_ERR("PIN code is not 16 bytes long");
3306 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
3308 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
3309 MGMT_STATUS_INVALID_PARAMS
);
3314 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
3320 cmd
->cmd_complete
= addr_cmd_complete
;
3322 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
3323 reply
.pin_len
= cp
->pin_len
;
3324 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
3326 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
3328 mgmt_pending_remove(cmd
);
3331 hci_dev_unlock(hdev
);
3335 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3338 struct mgmt_cp_set_io_capability
*cp
= data
;
3342 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
3343 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
3344 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
3348 hdev
->io_capability
= cp
->io_capability
;
3350 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
3351 hdev
->io_capability
);
3353 hci_dev_unlock(hdev
);
3355 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0,
3359 static struct mgmt_pending_cmd
*find_pairing(struct hci_conn
*conn
)
3361 struct hci_dev
*hdev
= conn
->hdev
;
3362 struct mgmt_pending_cmd
*cmd
;
3364 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
3365 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
3368 if (cmd
->user_data
!= conn
)
3377 static int pairing_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
3379 struct mgmt_rp_pair_device rp
;
3380 struct hci_conn
*conn
= cmd
->user_data
;
3383 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
3384 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
3386 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
,
3387 status
, &rp
, sizeof(rp
));
3389 /* So we don't get further callbacks for this connection */
3390 conn
->connect_cfm_cb
= NULL
;
3391 conn
->security_cfm_cb
= NULL
;
3392 conn
->disconn_cfm_cb
= NULL
;
3394 hci_conn_drop(conn
);
3396 /* The device is paired so there is no need to remove
3397 * its connection parameters anymore.
3399 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND
, &conn
->flags
);
3406 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3408 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3409 struct mgmt_pending_cmd
*cmd
;
3411 cmd
= find_pairing(conn
);
3413 cmd
->cmd_complete(cmd
, status
);
3414 mgmt_pending_remove(cmd
);
3418 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3420 struct mgmt_pending_cmd
*cmd
;
3422 BT_DBG("status %u", status
);
3424 cmd
= find_pairing(conn
);
3426 BT_DBG("Unable to find a pending command");
3430 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3431 mgmt_pending_remove(cmd
);
3434 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3436 struct mgmt_pending_cmd
*cmd
;
3438 BT_DBG("status %u", status
);
3443 cmd
= find_pairing(conn
);
3445 BT_DBG("Unable to find a pending command");
3449 cmd
->cmd_complete(cmd
, mgmt_status(status
));
3450 mgmt_pending_remove(cmd
);
3453 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3456 struct mgmt_cp_pair_device
*cp
= data
;
3457 struct mgmt_rp_pair_device rp
;
3458 struct mgmt_pending_cmd
*cmd
;
3459 u8 sec_level
, auth_type
;
3460 struct hci_conn
*conn
;
3465 memset(&rp
, 0, sizeof(rp
));
3466 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3467 rp
.addr
.type
= cp
->addr
.type
;
3469 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3470 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3471 MGMT_STATUS_INVALID_PARAMS
,
3474 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3475 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3476 MGMT_STATUS_INVALID_PARAMS
,
3481 if (!hdev_is_powered(hdev
)) {
3482 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3483 MGMT_STATUS_NOT_POWERED
, &rp
,
3488 if (hci_bdaddr_is_paired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
)) {
3489 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3490 MGMT_STATUS_ALREADY_PAIRED
, &rp
,
3495 sec_level
= BT_SECURITY_MEDIUM
;
3496 auth_type
= HCI_AT_DEDICATED_BONDING
;
3498 if (cp
->addr
.type
== BDADDR_BREDR
) {
3499 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3502 u8 addr_type
= le_addr_type(cp
->addr
.type
);
3503 struct hci_conn_params
*p
;
3505 /* When pairing a new device, it is expected to remember
3506 * this device for future connections. Adding the connection
3507 * parameter information ahead of time allows tracking
3508 * of the slave preferred values and will speed up any
3509 * further connection establishment.
3511 * If connection parameters already exist, then they
3512 * will be kept and this function does nothing.
3514 p
= hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3516 if (p
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
)
3517 p
->auto_connect
= HCI_AUTO_CONN_DISABLED
;
3519 conn
= hci_connect_le_scan(hdev
, &cp
->addr
.bdaddr
,
3520 addr_type
, sec_level
,
3521 HCI_LE_CONN_TIMEOUT
);
3527 if (PTR_ERR(conn
) == -EBUSY
)
3528 status
= MGMT_STATUS_BUSY
;
3529 else if (PTR_ERR(conn
) == -EOPNOTSUPP
)
3530 status
= MGMT_STATUS_NOT_SUPPORTED
;
3531 else if (PTR_ERR(conn
) == -ECONNREFUSED
)
3532 status
= MGMT_STATUS_REJECTED
;
3534 status
= MGMT_STATUS_CONNECT_FAILED
;
3536 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3537 status
, &rp
, sizeof(rp
));
3541 if (conn
->connect_cfm_cb
) {
3542 hci_conn_drop(conn
);
3543 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3544 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3548 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3551 hci_conn_drop(conn
);
3555 cmd
->cmd_complete
= pairing_complete
;
3557 /* For LE, just connecting isn't a proof that the pairing finished */
3558 if (cp
->addr
.type
== BDADDR_BREDR
) {
3559 conn
->connect_cfm_cb
= pairing_complete_cb
;
3560 conn
->security_cfm_cb
= pairing_complete_cb
;
3561 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3563 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3564 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3565 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3568 conn
->io_capability
= cp
->io_cap
;
3569 cmd
->user_data
= hci_conn_get(conn
);
3571 if ((conn
->state
== BT_CONNECTED
|| conn
->state
== BT_CONFIG
) &&
3572 hci_conn_security(conn
, sec_level
, auth_type
, true)) {
3573 cmd
->cmd_complete(cmd
, 0);
3574 mgmt_pending_remove(cmd
);
3580 hci_dev_unlock(hdev
);
3584 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3587 struct mgmt_addr_info
*addr
= data
;
3588 struct mgmt_pending_cmd
*cmd
;
3589 struct hci_conn
*conn
;
3596 if (!hdev_is_powered(hdev
)) {
3597 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3598 MGMT_STATUS_NOT_POWERED
);
3602 cmd
= pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3604 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3605 MGMT_STATUS_INVALID_PARAMS
);
3609 conn
= cmd
->user_data
;
3611 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3612 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3613 MGMT_STATUS_INVALID_PARAMS
);
3617 cmd
->cmd_complete(cmd
, MGMT_STATUS_CANCELLED
);
3618 mgmt_pending_remove(cmd
);
3620 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3621 addr
, sizeof(*addr
));
3623 hci_dev_unlock(hdev
);
3627 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3628 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3629 u16 hci_op
, __le32 passkey
)
3631 struct mgmt_pending_cmd
*cmd
;
3632 struct hci_conn
*conn
;
3637 if (!hdev_is_powered(hdev
)) {
3638 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3639 MGMT_STATUS_NOT_POWERED
, addr
,
3644 if (addr
->type
== BDADDR_BREDR
)
3645 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3647 conn
= hci_conn_hash_lookup_le(hdev
, &addr
->bdaddr
,
3648 le_addr_type(addr
->type
));
3651 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3652 MGMT_STATUS_NOT_CONNECTED
, addr
,
3657 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3658 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3660 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3661 MGMT_STATUS_SUCCESS
, addr
,
3664 err
= mgmt_cmd_complete(sk
, hdev
->id
, mgmt_op
,
3665 MGMT_STATUS_FAILED
, addr
,
3671 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3677 cmd
->cmd_complete
= addr_cmd_complete
;
3679 /* Continue with pairing via HCI */
3680 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3681 struct hci_cp_user_passkey_reply cp
;
3683 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3684 cp
.passkey
= passkey
;
3685 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3687 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3691 mgmt_pending_remove(cmd
);
3694 hci_dev_unlock(hdev
);
3698 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3699 void *data
, u16 len
)
3701 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3705 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3706 MGMT_OP_PIN_CODE_NEG_REPLY
,
3707 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3710 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3713 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3717 if (len
!= sizeof(*cp
))
3718 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3719 MGMT_STATUS_INVALID_PARAMS
);
3721 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3722 MGMT_OP_USER_CONFIRM_REPLY
,
3723 HCI_OP_USER_CONFIRM_REPLY
, 0);
3726 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3727 void *data
, u16 len
)
3729 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3733 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3734 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3735 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3738 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3741 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3745 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3746 MGMT_OP_USER_PASSKEY_REPLY
,
3747 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3750 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3751 void *data
, u16 len
)
3753 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3757 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3758 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3759 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3762 static void update_name(struct hci_request
*req
)
3764 struct hci_dev
*hdev
= req
->hdev
;
3765 struct hci_cp_write_local_name cp
;
3767 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3769 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3772 static void set_name_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
3774 struct mgmt_cp_set_local_name
*cp
;
3775 struct mgmt_pending_cmd
*cmd
;
3777 BT_DBG("status 0x%02x", status
);
3781 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3788 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3789 mgmt_status(status
));
3791 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3794 mgmt_pending_remove(cmd
);
3797 hci_dev_unlock(hdev
);
3800 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3803 struct mgmt_cp_set_local_name
*cp
= data
;
3804 struct mgmt_pending_cmd
*cmd
;
3805 struct hci_request req
;
3812 /* If the old values are the same as the new ones just return a
3813 * direct command complete event.
3815 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3816 !memcmp(hdev
->short_name
, cp
->short_name
,
3817 sizeof(hdev
->short_name
))) {
3818 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3823 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3825 if (!hdev_is_powered(hdev
)) {
3826 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3828 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3833 err
= mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
,
3839 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3845 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3847 hci_req_init(&req
, hdev
);
3849 if (lmp_bredr_capable(hdev
)) {
3854 /* The name is stored in the scan response data and so
3855 * no need to udpate the advertising data here.
3857 if (lmp_le_capable(hdev
))
3858 update_scan_rsp_data(&req
);
3860 err
= hci_req_run(&req
, set_name_complete
);
3862 mgmt_pending_remove(cmd
);
3865 hci_dev_unlock(hdev
);
3869 static void read_local_oob_data_complete(struct hci_dev
*hdev
, u8 status
,
3870 u16 opcode
, struct sk_buff
*skb
)
3872 struct mgmt_rp_read_local_oob_data mgmt_rp
;
3873 size_t rp_size
= sizeof(mgmt_rp
);
3874 struct mgmt_pending_cmd
*cmd
;
3876 BT_DBG("%s status %u", hdev
->name
, status
);
3878 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
3882 if (status
|| !skb
) {
3883 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3884 status
? mgmt_status(status
) : MGMT_STATUS_FAILED
);
3888 memset(&mgmt_rp
, 0, sizeof(mgmt_rp
));
3890 if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
3891 struct hci_rp_read_local_oob_data
*rp
= (void *) skb
->data
;
3893 if (skb
->len
< sizeof(*rp
)) {
3894 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3895 MGMT_OP_READ_LOCAL_OOB_DATA
,
3896 MGMT_STATUS_FAILED
);
3900 memcpy(mgmt_rp
.hash192
, rp
->hash
, sizeof(rp
->hash
));
3901 memcpy(mgmt_rp
.rand192
, rp
->rand
, sizeof(rp
->rand
));
3903 rp_size
-= sizeof(mgmt_rp
.hash256
) + sizeof(mgmt_rp
.rand256
);
3905 struct hci_rp_read_local_oob_ext_data
*rp
= (void *) skb
->data
;
3907 if (skb
->len
< sizeof(*rp
)) {
3908 mgmt_cmd_status(cmd
->sk
, hdev
->id
,
3909 MGMT_OP_READ_LOCAL_OOB_DATA
,
3910 MGMT_STATUS_FAILED
);
3914 memcpy(mgmt_rp
.hash192
, rp
->hash192
, sizeof(rp
->hash192
));
3915 memcpy(mgmt_rp
.rand192
, rp
->rand192
, sizeof(rp
->rand192
));
3917 memcpy(mgmt_rp
.hash256
, rp
->hash256
, sizeof(rp
->hash256
));
3918 memcpy(mgmt_rp
.rand256
, rp
->rand256
, sizeof(rp
->rand256
));
3921 mgmt_cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3922 MGMT_STATUS_SUCCESS
, &mgmt_rp
, rp_size
);
3925 mgmt_pending_remove(cmd
);
3928 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3929 void *data
, u16 data_len
)
3931 struct mgmt_pending_cmd
*cmd
;
3932 struct hci_request req
;
3935 BT_DBG("%s", hdev
->name
);
3939 if (!hdev_is_powered(hdev
)) {
3940 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3941 MGMT_STATUS_NOT_POWERED
);
3945 if (!lmp_ssp_capable(hdev
)) {
3946 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3947 MGMT_STATUS_NOT_SUPPORTED
);
3951 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3952 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3957 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3963 hci_req_init(&req
, hdev
);
3965 if (bredr_sc_enabled(hdev
))
3966 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
3968 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3970 err
= hci_req_run_skb(&req
, read_local_oob_data_complete
);
3972 mgmt_pending_remove(cmd
);
3975 hci_dev_unlock(hdev
);
3979 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3980 void *data
, u16 len
)
3982 struct mgmt_addr_info
*addr
= data
;
3985 BT_DBG("%s ", hdev
->name
);
3987 if (!bdaddr_type_is_valid(addr
->type
))
3988 return mgmt_cmd_complete(sk
, hdev
->id
,
3989 MGMT_OP_ADD_REMOTE_OOB_DATA
,
3990 MGMT_STATUS_INVALID_PARAMS
,
3991 addr
, sizeof(*addr
));
3995 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3996 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3999 if (cp
->addr
.type
!= BDADDR_BREDR
) {
4000 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4001 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4002 MGMT_STATUS_INVALID_PARAMS
,
4003 &cp
->addr
, sizeof(cp
->addr
));
4007 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4008 cp
->addr
.type
, cp
->hash
,
4009 cp
->rand
, NULL
, NULL
);
4011 status
= MGMT_STATUS_FAILED
;
4013 status
= MGMT_STATUS_SUCCESS
;
4015 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4016 MGMT_OP_ADD_REMOTE_OOB_DATA
, status
,
4017 &cp
->addr
, sizeof(cp
->addr
));
4018 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
4019 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
4020 u8
*rand192
, *hash192
, *rand256
, *hash256
;
4023 if (bdaddr_type_is_le(cp
->addr
.type
)) {
4024 /* Enforce zero-valued 192-bit parameters as
4025 * long as legacy SMP OOB isn't implemented.
4027 if (memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4028 memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4029 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4030 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4031 MGMT_STATUS_INVALID_PARAMS
,
4032 addr
, sizeof(*addr
));
4039 /* In case one of the P-192 values is set to zero,
4040 * then just disable OOB data for P-192.
4042 if (!memcmp(cp
->rand192
, ZERO_KEY
, 16) ||
4043 !memcmp(cp
->hash192
, ZERO_KEY
, 16)) {
4047 rand192
= cp
->rand192
;
4048 hash192
= cp
->hash192
;
4052 /* In case one of the P-256 values is set to zero, then just
4053 * disable OOB data for P-256.
4055 if (!memcmp(cp
->rand256
, ZERO_KEY
, 16) ||
4056 !memcmp(cp
->hash256
, ZERO_KEY
, 16)) {
4060 rand256
= cp
->rand256
;
4061 hash256
= cp
->hash256
;
4064 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
4065 cp
->addr
.type
, hash192
, rand192
,
4068 status
= MGMT_STATUS_FAILED
;
4070 status
= MGMT_STATUS_SUCCESS
;
4072 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4073 MGMT_OP_ADD_REMOTE_OOB_DATA
,
4074 status
, &cp
->addr
, sizeof(cp
->addr
));
4076 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
4077 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
4078 MGMT_STATUS_INVALID_PARAMS
);
4082 hci_dev_unlock(hdev
);
4086 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
4087 void *data
, u16 len
)
4089 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
4093 BT_DBG("%s", hdev
->name
);
4095 if (cp
->addr
.type
!= BDADDR_BREDR
)
4096 return mgmt_cmd_complete(sk
, hdev
->id
,
4097 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4098 MGMT_STATUS_INVALID_PARAMS
,
4099 &cp
->addr
, sizeof(cp
->addr
));
4103 if (!bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
4104 hci_remote_oob_data_clear(hdev
);
4105 status
= MGMT_STATUS_SUCCESS
;
4109 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
4111 status
= MGMT_STATUS_INVALID_PARAMS
;
4113 status
= MGMT_STATUS_SUCCESS
;
4116 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
4117 status
, &cp
->addr
, sizeof(cp
->addr
));
4119 hci_dev_unlock(hdev
);
4123 void mgmt_start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
4125 struct mgmt_pending_cmd
*cmd
;
4127 BT_DBG("status %d", status
);
4131 cmd
= pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
4133 cmd
= pending_find(MGMT_OP_START_SERVICE_DISCOVERY
, hdev
);
4136 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4137 mgmt_pending_remove(cmd
);
4140 hci_dev_unlock(hdev
);
4143 static bool discovery_type_is_valid(struct hci_dev
*hdev
, uint8_t type
,
4144 uint8_t *mgmt_status
)
4147 case DISCOV_TYPE_LE
:
4148 *mgmt_status
= mgmt_le_support(hdev
);
4152 case DISCOV_TYPE_INTERLEAVED
:
4153 *mgmt_status
= mgmt_le_support(hdev
);
4156 /* Intentional fall-through */
4157 case DISCOV_TYPE_BREDR
:
4158 *mgmt_status
= mgmt_bredr_support(hdev
);
4163 *mgmt_status
= MGMT_STATUS_INVALID_PARAMS
;
4170 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4171 void *data
, u16 len
)
4173 struct mgmt_cp_start_discovery
*cp
= data
;
4174 struct mgmt_pending_cmd
*cmd
;
4178 BT_DBG("%s", hdev
->name
);
4182 if (!hdev_is_powered(hdev
)) {
4183 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4184 MGMT_STATUS_NOT_POWERED
,
4185 &cp
->type
, sizeof(cp
->type
));
4189 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4190 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4191 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4192 MGMT_STATUS_BUSY
, &cp
->type
,
4197 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
4198 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
4199 status
, &cp
->type
, sizeof(cp
->type
));
4203 /* Clear the discovery filter first to free any previously
4204 * allocated memory for the UUID list.
4206 hci_discovery_filter_clear(hdev
);
4208 hdev
->discovery
.type
= cp
->type
;
4209 hdev
->discovery
.report_invalid_rssi
= false;
4211 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, data
, len
);
4217 cmd
->cmd_complete
= generic_cmd_complete
;
4219 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4220 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
4224 hci_dev_unlock(hdev
);
4228 static int service_discovery_cmd_complete(struct mgmt_pending_cmd
*cmd
,
4231 return mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
,
4235 static int start_service_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
4236 void *data
, u16 len
)
4238 struct mgmt_cp_start_service_discovery
*cp
= data
;
4239 struct mgmt_pending_cmd
*cmd
;
4240 const u16 max_uuid_count
= ((U16_MAX
- sizeof(*cp
)) / 16);
4241 u16 uuid_count
, expected_len
;
4245 BT_DBG("%s", hdev
->name
);
4249 if (!hdev_is_powered(hdev
)) {
4250 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4251 MGMT_OP_START_SERVICE_DISCOVERY
,
4252 MGMT_STATUS_NOT_POWERED
,
4253 &cp
->type
, sizeof(cp
->type
));
4257 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
||
4258 hci_dev_test_flag(hdev
, HCI_PERIODIC_INQ
)) {
4259 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4260 MGMT_OP_START_SERVICE_DISCOVERY
,
4261 MGMT_STATUS_BUSY
, &cp
->type
,
4266 uuid_count
= __le16_to_cpu(cp
->uuid_count
);
4267 if (uuid_count
> max_uuid_count
) {
4268 BT_ERR("service_discovery: too big uuid_count value %u",
4270 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4271 MGMT_OP_START_SERVICE_DISCOVERY
,
4272 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4277 expected_len
= sizeof(*cp
) + uuid_count
* 16;
4278 if (expected_len
!= len
) {
4279 BT_ERR("service_discovery: expected %u bytes, got %u bytes",
4281 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4282 MGMT_OP_START_SERVICE_DISCOVERY
,
4283 MGMT_STATUS_INVALID_PARAMS
, &cp
->type
,
4288 if (!discovery_type_is_valid(hdev
, cp
->type
, &status
)) {
4289 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4290 MGMT_OP_START_SERVICE_DISCOVERY
,
4291 status
, &cp
->type
, sizeof(cp
->type
));
4295 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_SERVICE_DISCOVERY
,
4302 cmd
->cmd_complete
= service_discovery_cmd_complete
;
4304 /* Clear the discovery filter first to free any previously
4305 * allocated memory for the UUID list.
4307 hci_discovery_filter_clear(hdev
);
4309 hdev
->discovery
.result_filtering
= true;
4310 hdev
->discovery
.type
= cp
->type
;
4311 hdev
->discovery
.rssi
= cp
->rssi
;
4312 hdev
->discovery
.uuid_count
= uuid_count
;
4314 if (uuid_count
> 0) {
4315 hdev
->discovery
.uuids
= kmemdup(cp
->uuids
, uuid_count
* 16,
4317 if (!hdev
->discovery
.uuids
) {
4318 err
= mgmt_cmd_complete(sk
, hdev
->id
,
4319 MGMT_OP_START_SERVICE_DISCOVERY
,
4321 &cp
->type
, sizeof(cp
->type
));
4322 mgmt_pending_remove(cmd
);
4327 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
4328 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
4332 hci_dev_unlock(hdev
);
4336 void mgmt_stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
4338 struct mgmt_pending_cmd
*cmd
;
4340 BT_DBG("status %d", status
);
4344 cmd
= pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
4346 cmd
->cmd_complete(cmd
, mgmt_status(status
));
4347 mgmt_pending_remove(cmd
);
4350 hci_dev_unlock(hdev
);
4353 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4356 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
4357 struct mgmt_pending_cmd
*cmd
;
4360 BT_DBG("%s", hdev
->name
);
4364 if (!hci_discovery_active(hdev
)) {
4365 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4366 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
4367 sizeof(mgmt_cp
->type
));
4371 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
4372 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
4373 MGMT_STATUS_INVALID_PARAMS
,
4374 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
4378 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, data
, len
);
4384 cmd
->cmd_complete
= generic_cmd_complete
;
4386 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
4387 queue_work(hdev
->req_workqueue
, &hdev
->discov_update
);
4391 hci_dev_unlock(hdev
);
4395 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4398 struct mgmt_cp_confirm_name
*cp
= data
;
4399 struct inquiry_entry
*e
;
4402 BT_DBG("%s", hdev
->name
);
4406 if (!hci_discovery_active(hdev
)) {
4407 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4408 MGMT_STATUS_FAILED
, &cp
->addr
,
4413 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
4415 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
4416 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
4421 if (cp
->name_known
) {
4422 e
->name_state
= NAME_KNOWN
;
4425 e
->name_state
= NAME_NEEDED
;
4426 hci_inquiry_cache_update_resolve(hdev
, e
);
4429 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0,
4430 &cp
->addr
, sizeof(cp
->addr
));
4433 hci_dev_unlock(hdev
);
4437 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4440 struct mgmt_cp_block_device
*cp
= data
;
4444 BT_DBG("%s", hdev
->name
);
4446 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4447 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
4448 MGMT_STATUS_INVALID_PARAMS
,
4449 &cp
->addr
, sizeof(cp
->addr
));
4453 err
= hci_bdaddr_list_add(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4456 status
= MGMT_STATUS_FAILED
;
4460 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4462 status
= MGMT_STATUS_SUCCESS
;
4465 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
4466 &cp
->addr
, sizeof(cp
->addr
));
4468 hci_dev_unlock(hdev
);
4473 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4476 struct mgmt_cp_unblock_device
*cp
= data
;
4480 BT_DBG("%s", hdev
->name
);
4482 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4483 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
4484 MGMT_STATUS_INVALID_PARAMS
,
4485 &cp
->addr
, sizeof(cp
->addr
));
4489 err
= hci_bdaddr_list_del(&hdev
->blacklist
, &cp
->addr
.bdaddr
,
4492 status
= MGMT_STATUS_INVALID_PARAMS
;
4496 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
4498 status
= MGMT_STATUS_SUCCESS
;
4501 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
4502 &cp
->addr
, sizeof(cp
->addr
));
4504 hci_dev_unlock(hdev
);
4509 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4512 struct mgmt_cp_set_device_id
*cp
= data
;
4513 struct hci_request req
;
4517 BT_DBG("%s", hdev
->name
);
4519 source
= __le16_to_cpu(cp
->source
);
4521 if (source
> 0x0002)
4522 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
4523 MGMT_STATUS_INVALID_PARAMS
);
4527 hdev
->devid_source
= source
;
4528 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
4529 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
4530 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
4532 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0,
4535 hci_req_init(&req
, hdev
);
4537 hci_req_run(&req
, NULL
);
4539 hci_dev_unlock(hdev
);
4544 static void enable_advertising_instance(struct hci_dev
*hdev
, u8 status
,
4547 BT_DBG("status %d", status
);
4550 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
,
4553 struct cmd_lookup match
= { NULL
, hdev
};
4554 struct hci_request req
;
4556 struct adv_info
*adv_instance
;
4562 u8 mgmt_err
= mgmt_status(status
);
4564 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4565 cmd_status_rsp
, &mgmt_err
);
4569 if (hci_dev_test_flag(hdev
, HCI_LE_ADV
))
4570 hci_dev_set_flag(hdev
, HCI_ADVERTISING
);
4572 hci_dev_clear_flag(hdev
, HCI_ADVERTISING
);
4574 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4577 new_settings(hdev
, match
.sk
);
4582 /* If "Set Advertising" was just disabled and instance advertising was
4583 * set up earlier, then re-enable multi-instance advertising.
4585 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
4586 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) ||
4587 list_empty(&hdev
->adv_instances
))
4590 instance
= hdev
->cur_adv_instance
;
4592 adv_instance
= list_first_entry_or_null(&hdev
->adv_instances
,
4593 struct adv_info
, list
);
4597 instance
= adv_instance
->instance
;
4600 hci_req_init(&req
, hdev
);
4602 err
= schedule_adv_instance(&req
, instance
, true);
4605 err
= hci_req_run(&req
, enable_advertising_instance
);
4608 BT_ERR("Failed to re-configure advertising");
4611 hci_dev_unlock(hdev
);
4614 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4617 struct mgmt_mode
*cp
= data
;
4618 struct mgmt_pending_cmd
*cmd
;
4619 struct hci_request req
;
4623 BT_DBG("request for %s", hdev
->name
);
4625 status
= mgmt_le_support(hdev
);
4627 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4630 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4631 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4632 MGMT_STATUS_INVALID_PARAMS
);
4638 /* The following conditions are ones which mean that we should
4639 * not do any HCI communication but directly send a mgmt
4640 * response to user space (after toggling the flag if
4643 if (!hdev_is_powered(hdev
) ||
4644 (val
== hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
4645 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
)) ||
4646 hci_conn_num(hdev
, LE_LINK
) > 0 ||
4647 (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4648 hdev
->le_scan_type
== LE_SCAN_ACTIVE
)) {
4652 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_ADVERTISING
);
4653 if (cp
->val
== 0x02)
4654 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4656 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4658 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_ADVERTISING
);
4659 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4662 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4667 err
= new_settings(hdev
, sk
);
4672 if (pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4673 pending_find(MGMT_OP_SET_LE
, hdev
)) {
4674 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4679 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4685 hci_req_init(&req
, hdev
);
4687 if (cp
->val
== 0x02)
4688 hci_dev_set_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4690 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_CONNECTABLE
);
4692 cancel_adv_timeout(hdev
);
4695 /* Switch to instance "0" for the Set Advertising setting.
4696 * We cannot use update_[adv|scan_rsp]_data() here as the
4697 * HCI_ADVERTISING flag is not yet set.
4699 update_inst_adv_data(&req
, 0x00);
4700 update_inst_scan_rsp_data(&req
, 0x00);
4701 enable_advertising(&req
);
4703 disable_advertising(&req
);
4706 err
= hci_req_run(&req
, set_advertising_complete
);
4708 mgmt_pending_remove(cmd
);
4711 hci_dev_unlock(hdev
);
4715 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4716 void *data
, u16 len
)
4718 struct mgmt_cp_set_static_address
*cp
= data
;
4721 BT_DBG("%s", hdev
->name
);
4723 if (!lmp_le_capable(hdev
))
4724 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4725 MGMT_STATUS_NOT_SUPPORTED
);
4727 if (hdev_is_powered(hdev
))
4728 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4729 MGMT_STATUS_REJECTED
);
4731 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4732 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4733 return mgmt_cmd_status(sk
, hdev
->id
,
4734 MGMT_OP_SET_STATIC_ADDRESS
,
4735 MGMT_STATUS_INVALID_PARAMS
);
4737 /* Two most significant bits shall be set */
4738 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4739 return mgmt_cmd_status(sk
, hdev
->id
,
4740 MGMT_OP_SET_STATIC_ADDRESS
,
4741 MGMT_STATUS_INVALID_PARAMS
);
4746 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4748 err
= send_settings_rsp(sk
, MGMT_OP_SET_STATIC_ADDRESS
, hdev
);
4752 err
= new_settings(hdev
, sk
);
4755 hci_dev_unlock(hdev
);
4759 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4760 void *data
, u16 len
)
4762 struct mgmt_cp_set_scan_params
*cp
= data
;
4763 __u16 interval
, window
;
4766 BT_DBG("%s", hdev
->name
);
4768 if (!lmp_le_capable(hdev
))
4769 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4770 MGMT_STATUS_NOT_SUPPORTED
);
4772 interval
= __le16_to_cpu(cp
->interval
);
4774 if (interval
< 0x0004 || interval
> 0x4000)
4775 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4776 MGMT_STATUS_INVALID_PARAMS
);
4778 window
= __le16_to_cpu(cp
->window
);
4780 if (window
< 0x0004 || window
> 0x4000)
4781 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4782 MGMT_STATUS_INVALID_PARAMS
);
4784 if (window
> interval
)
4785 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4786 MGMT_STATUS_INVALID_PARAMS
);
4790 hdev
->le_scan_interval
= interval
;
4791 hdev
->le_scan_window
= window
;
4793 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0,
4796 /* If background scan is running, restart it so new parameters are
4799 if (hci_dev_test_flag(hdev
, HCI_LE_SCAN
) &&
4800 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4801 struct hci_request req
;
4803 hci_req_init(&req
, hdev
);
4805 hci_req_add_le_scan_disable(&req
);
4806 hci_req_add_le_passive_scan(&req
);
4808 hci_req_run(&req
, NULL
);
4811 hci_dev_unlock(hdev
);
4816 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
,
4819 struct mgmt_pending_cmd
*cmd
;
4821 BT_DBG("status 0x%02x", status
);
4825 cmd
= pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4830 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4831 mgmt_status(status
));
4833 struct mgmt_mode
*cp
= cmd
->param
;
4836 hci_dev_set_flag(hdev
, HCI_FAST_CONNECTABLE
);
4838 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4840 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4841 new_settings(hdev
, cmd
->sk
);
4844 mgmt_pending_remove(cmd
);
4847 hci_dev_unlock(hdev
);
4850 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4851 void *data
, u16 len
)
4853 struct mgmt_mode
*cp
= data
;
4854 struct mgmt_pending_cmd
*cmd
;
4855 struct hci_request req
;
4858 BT_DBG("%s", hdev
->name
);
4860 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) ||
4861 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4862 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4863 MGMT_STATUS_NOT_SUPPORTED
);
4865 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4866 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4867 MGMT_STATUS_INVALID_PARAMS
);
4871 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4872 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4877 if (!!cp
->val
== hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
)) {
4878 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4883 if (!hdev_is_powered(hdev
)) {
4884 hci_dev_change_flag(hdev
, HCI_FAST_CONNECTABLE
);
4885 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4887 new_settings(hdev
, sk
);
4891 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4898 hci_req_init(&req
, hdev
);
4900 write_fast_connectable(&req
, cp
->val
);
4902 err
= hci_req_run(&req
, fast_connectable_complete
);
4904 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4905 MGMT_STATUS_FAILED
);
4906 mgmt_pending_remove(cmd
);
4910 hci_dev_unlock(hdev
);
4915 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
4917 struct mgmt_pending_cmd
*cmd
;
4919 BT_DBG("status 0x%02x", status
);
4923 cmd
= pending_find(MGMT_OP_SET_BREDR
, hdev
);
4928 u8 mgmt_err
= mgmt_status(status
);
4930 /* We need to restore the flag if related HCI commands
4933 hci_dev_clear_flag(hdev
, HCI_BREDR_ENABLED
);
4935 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4937 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4938 new_settings(hdev
, cmd
->sk
);
4941 mgmt_pending_remove(cmd
);
4944 hci_dev_unlock(hdev
);
4947 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4949 struct mgmt_mode
*cp
= data
;
4950 struct mgmt_pending_cmd
*cmd
;
4951 struct hci_request req
;
4954 BT_DBG("request for %s", hdev
->name
);
4956 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4957 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4958 MGMT_STATUS_NOT_SUPPORTED
);
4960 if (!hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
4961 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4962 MGMT_STATUS_REJECTED
);
4964 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4965 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4966 MGMT_STATUS_INVALID_PARAMS
);
4970 if (cp
->val
== hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
4971 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4975 if (!hdev_is_powered(hdev
)) {
4977 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
4978 hci_dev_clear_flag(hdev
, HCI_SSP_ENABLED
);
4979 hci_dev_clear_flag(hdev
, HCI_LINK_SECURITY
);
4980 hci_dev_clear_flag(hdev
, HCI_FAST_CONNECTABLE
);
4981 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
4984 hci_dev_change_flag(hdev
, HCI_BREDR_ENABLED
);
4986 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4990 err
= new_settings(hdev
, sk
);
4994 /* Reject disabling when powered on */
4996 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4997 MGMT_STATUS_REJECTED
);
5000 /* When configuring a dual-mode controller to operate
5001 * with LE only and using a static address, then switching
5002 * BR/EDR back on is not allowed.
5004 * Dual-mode controllers shall operate with the public
5005 * address as its identity address for BR/EDR and LE. So
5006 * reject the attempt to create an invalid configuration.
5008 * The same restrictions applies when secure connections
5009 * has been enabled. For BR/EDR this is a controller feature
5010 * while for LE it is a host stack feature. This means that
5011 * switching BR/EDR back on when secure connections has been
5012 * enabled is not a supported transaction.
5014 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5015 (bacmp(&hdev
->static_addr
, BDADDR_ANY
) ||
5016 hci_dev_test_flag(hdev
, HCI_SC_ENABLED
))) {
5017 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5018 MGMT_STATUS_REJECTED
);
5023 if (pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
5024 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
5029 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
5035 /* We need to flip the bit already here so that update_adv_data
5036 * generates the correct flags.
5038 hci_dev_set_flag(hdev
, HCI_BREDR_ENABLED
);
5040 hci_req_init(&req
, hdev
);
5042 write_fast_connectable(&req
, false);
5043 __hci_update_page_scan(&req
);
5045 /* Since only the advertising data flags will change, there
5046 * is no need to update the scan response data.
5048 update_adv_data(&req
);
5050 err
= hci_req_run(&req
, set_bredr_complete
);
5052 mgmt_pending_remove(cmd
);
5055 hci_dev_unlock(hdev
);
5059 static void sc_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5061 struct mgmt_pending_cmd
*cmd
;
5062 struct mgmt_mode
*cp
;
5064 BT_DBG("%s status %u", hdev
->name
, status
);
5068 cmd
= pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
);
5073 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
5074 mgmt_status(status
));
5082 hci_dev_clear_flag(hdev
, HCI_SC_ENABLED
);
5083 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5086 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5087 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5090 hci_dev_set_flag(hdev
, HCI_SC_ENABLED
);
5091 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5095 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5096 new_settings(hdev
, cmd
->sk
);
5099 mgmt_pending_remove(cmd
);
5101 hci_dev_unlock(hdev
);
5104 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
5105 void *data
, u16 len
)
5107 struct mgmt_mode
*cp
= data
;
5108 struct mgmt_pending_cmd
*cmd
;
5109 struct hci_request req
;
5113 BT_DBG("request for %s", hdev
->name
);
5115 if (!lmp_sc_capable(hdev
) &&
5116 !hci_dev_test_flag(hdev
, HCI_LE_ENABLED
))
5117 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5118 MGMT_STATUS_NOT_SUPPORTED
);
5120 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
5121 lmp_sc_capable(hdev
) &&
5122 !hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
))
5123 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5124 MGMT_STATUS_REJECTED
);
5126 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5127 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5128 MGMT_STATUS_INVALID_PARAMS
);
5132 if (!hdev_is_powered(hdev
) || !lmp_sc_capable(hdev
) ||
5133 !hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
5137 changed
= !hci_dev_test_and_set_flag(hdev
,
5139 if (cp
->val
== 0x02)
5140 hci_dev_set_flag(hdev
, HCI_SC_ONLY
);
5142 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5144 changed
= hci_dev_test_and_clear_flag(hdev
,
5146 hci_dev_clear_flag(hdev
, HCI_SC_ONLY
);
5149 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5154 err
= new_settings(hdev
, sk
);
5159 if (pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
5160 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
5167 if (val
== hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
5168 (cp
->val
== 0x02) == hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
5169 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
5173 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
5179 hci_req_init(&req
, hdev
);
5180 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
5181 err
= hci_req_run(&req
, sc_enable_complete
);
5183 mgmt_pending_remove(cmd
);
5188 hci_dev_unlock(hdev
);
5192 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5193 void *data
, u16 len
)
5195 struct mgmt_mode
*cp
= data
;
5196 bool changed
, use_changed
;
5199 BT_DBG("request for %s", hdev
->name
);
5201 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
5202 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
5203 MGMT_STATUS_INVALID_PARAMS
);
5208 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_KEEP_DEBUG_KEYS
);
5210 changed
= hci_dev_test_and_clear_flag(hdev
,
5211 HCI_KEEP_DEBUG_KEYS
);
5213 if (cp
->val
== 0x02)
5214 use_changed
= !hci_dev_test_and_set_flag(hdev
,
5215 HCI_USE_DEBUG_KEYS
);
5217 use_changed
= hci_dev_test_and_clear_flag(hdev
,
5218 HCI_USE_DEBUG_KEYS
);
5220 if (hdev_is_powered(hdev
) && use_changed
&&
5221 hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
5222 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
5223 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
5224 sizeof(mode
), &mode
);
5227 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
5232 err
= new_settings(hdev
, sk
);
5235 hci_dev_unlock(hdev
);
5239 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5242 struct mgmt_cp_set_privacy
*cp
= cp_data
;
5246 BT_DBG("request for %s", hdev
->name
);
5248 if (!lmp_le_capable(hdev
))
5249 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5250 MGMT_STATUS_NOT_SUPPORTED
);
5252 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
5253 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5254 MGMT_STATUS_INVALID_PARAMS
);
5256 if (hdev_is_powered(hdev
))
5257 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
5258 MGMT_STATUS_REJECTED
);
5262 /* If user space supports this command it is also expected to
5263 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5265 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5268 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_PRIVACY
);
5269 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
5270 hci_dev_set_flag(hdev
, HCI_RPA_EXPIRED
);
5272 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_PRIVACY
);
5273 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
5274 hci_dev_clear_flag(hdev
, HCI_RPA_EXPIRED
);
5277 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
5282 err
= new_settings(hdev
, sk
);
5285 hci_dev_unlock(hdev
);
5289 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
5291 switch (irk
->addr
.type
) {
5292 case BDADDR_LE_PUBLIC
:
5295 case BDADDR_LE_RANDOM
:
5296 /* Two most significant bits shall be set */
5297 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5305 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
5308 struct mgmt_cp_load_irks
*cp
= cp_data
;
5309 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
5310 sizeof(struct mgmt_irk_info
));
5311 u16 irk_count
, expected_len
;
5314 BT_DBG("request for %s", hdev
->name
);
5316 if (!lmp_le_capable(hdev
))
5317 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5318 MGMT_STATUS_NOT_SUPPORTED
);
5320 irk_count
= __le16_to_cpu(cp
->irk_count
);
5321 if (irk_count
> max_irk_count
) {
5322 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
5323 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5324 MGMT_STATUS_INVALID_PARAMS
);
5327 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
5328 if (expected_len
!= len
) {
5329 BT_ERR("load_irks: expected %u bytes, got %u bytes",
5331 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
5332 MGMT_STATUS_INVALID_PARAMS
);
5335 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
5337 for (i
= 0; i
< irk_count
; i
++) {
5338 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
5340 if (!irk_is_valid(key
))
5341 return mgmt_cmd_status(sk
, hdev
->id
,
5343 MGMT_STATUS_INVALID_PARAMS
);
5348 hci_smp_irks_clear(hdev
);
5350 for (i
= 0; i
< irk_count
; i
++) {
5351 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
5353 hci_add_irk(hdev
, &irk
->addr
.bdaddr
,
5354 le_addr_type(irk
->addr
.type
), irk
->val
,
5358 hci_dev_set_flag(hdev
, HCI_RPA_RESOLVING
);
5360 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
5362 hci_dev_unlock(hdev
);
5367 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
5369 if (key
->master
!= 0x00 && key
->master
!= 0x01)
5372 switch (key
->addr
.type
) {
5373 case BDADDR_LE_PUBLIC
:
5376 case BDADDR_LE_RANDOM
:
5377 /* Two most significant bits shall be set */
5378 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
5386 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
5387 void *cp_data
, u16 len
)
5389 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
5390 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
5391 sizeof(struct mgmt_ltk_info
));
5392 u16 key_count
, expected_len
;
5395 BT_DBG("request for %s", hdev
->name
);
5397 if (!lmp_le_capable(hdev
))
5398 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5399 MGMT_STATUS_NOT_SUPPORTED
);
5401 key_count
= __le16_to_cpu(cp
->key_count
);
5402 if (key_count
> max_key_count
) {
5403 BT_ERR("load_ltks: too big key_count value %u", key_count
);
5404 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5405 MGMT_STATUS_INVALID_PARAMS
);
5408 expected_len
= sizeof(*cp
) + key_count
*
5409 sizeof(struct mgmt_ltk_info
);
5410 if (expected_len
!= len
) {
5411 BT_ERR("load_keys: expected %u bytes, got %u bytes",
5413 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
5414 MGMT_STATUS_INVALID_PARAMS
);
5417 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
5419 for (i
= 0; i
< key_count
; i
++) {
5420 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5422 if (!ltk_is_valid(key
))
5423 return mgmt_cmd_status(sk
, hdev
->id
,
5424 MGMT_OP_LOAD_LONG_TERM_KEYS
,
5425 MGMT_STATUS_INVALID_PARAMS
);
5430 hci_smp_ltks_clear(hdev
);
5432 for (i
= 0; i
< key_count
; i
++) {
5433 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
5434 u8 type
, authenticated
;
5436 switch (key
->type
) {
5437 case MGMT_LTK_UNAUTHENTICATED
:
5438 authenticated
= 0x00;
5439 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5441 case MGMT_LTK_AUTHENTICATED
:
5442 authenticated
= 0x01;
5443 type
= key
->master
? SMP_LTK
: SMP_LTK_SLAVE
;
5445 case MGMT_LTK_P256_UNAUTH
:
5446 authenticated
= 0x00;
5447 type
= SMP_LTK_P256
;
5449 case MGMT_LTK_P256_AUTH
:
5450 authenticated
= 0x01;
5451 type
= SMP_LTK_P256
;
5453 case MGMT_LTK_P256_DEBUG
:
5454 authenticated
= 0x00;
5455 type
= SMP_LTK_P256_DEBUG
;
5460 hci_add_ltk(hdev
, &key
->addr
.bdaddr
,
5461 le_addr_type(key
->addr
.type
), type
, authenticated
,
5462 key
->val
, key
->enc_size
, key
->ediv
, key
->rand
);
5465 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
5468 hci_dev_unlock(hdev
);
5473 static int conn_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5475 struct hci_conn
*conn
= cmd
->user_data
;
5476 struct mgmt_rp_get_conn_info rp
;
5479 memcpy(&rp
.addr
, cmd
->param
, sizeof(rp
.addr
));
5481 if (status
== MGMT_STATUS_SUCCESS
) {
5482 rp
.rssi
= conn
->rssi
;
5483 rp
.tx_power
= conn
->tx_power
;
5484 rp
.max_tx_power
= conn
->max_tx_power
;
5486 rp
.rssi
= HCI_RSSI_INVALID
;
5487 rp
.tx_power
= HCI_TX_POWER_INVALID
;
5488 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
5491 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
5492 status
, &rp
, sizeof(rp
));
5494 hci_conn_drop(conn
);
5500 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 hci_status
,
5503 struct hci_cp_read_rssi
*cp
;
5504 struct mgmt_pending_cmd
*cmd
;
5505 struct hci_conn
*conn
;
5509 BT_DBG("status 0x%02x", hci_status
);
5513 /* Commands sent in request are either Read RSSI or Read Transmit Power
5514 * Level so we check which one was last sent to retrieve connection
5515 * handle. Both commands have handle as first parameter so it's safe to
5516 * cast data on the same command struct.
5518 * First command sent is always Read RSSI and we fail only if it fails.
5519 * In other case we simply override error to indicate success as we
5520 * already remembered if TX power value is actually valid.
5522 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
5524 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
5525 status
= MGMT_STATUS_SUCCESS
;
5527 status
= mgmt_status(hci_status
);
5531 BT_ERR("invalid sent_cmd in conn_info response");
5535 handle
= __le16_to_cpu(cp
->handle
);
5536 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5538 BT_ERR("unknown handle (%d) in conn_info response", handle
);
5542 cmd
= pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
);
5546 cmd
->cmd_complete(cmd
, status
);
5547 mgmt_pending_remove(cmd
);
5550 hci_dev_unlock(hdev
);
5553 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5556 struct mgmt_cp_get_conn_info
*cp
= data
;
5557 struct mgmt_rp_get_conn_info rp
;
5558 struct hci_conn
*conn
;
5559 unsigned long conn_info_age
;
5562 BT_DBG("%s", hdev
->name
);
5564 memset(&rp
, 0, sizeof(rp
));
5565 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5566 rp
.addr
.type
= cp
->addr
.type
;
5568 if (!bdaddr_type_is_valid(cp
->addr
.type
))
5569 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5570 MGMT_STATUS_INVALID_PARAMS
,
5575 if (!hdev_is_powered(hdev
)) {
5576 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5577 MGMT_STATUS_NOT_POWERED
, &rp
,
5582 if (cp
->addr
.type
== BDADDR_BREDR
)
5583 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5586 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
5588 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5589 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5590 MGMT_STATUS_NOT_CONNECTED
, &rp
,
5595 if (pending_find_data(MGMT_OP_GET_CONN_INFO
, hdev
, conn
)) {
5596 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5597 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
5601 /* To avoid client trying to guess when to poll again for information we
5602 * calculate conn info age as random value between min/max set in hdev.
5604 conn_info_age
= hdev
->conn_info_min_age
+
5605 prandom_u32_max(hdev
->conn_info_max_age
-
5606 hdev
->conn_info_min_age
);
5608 /* Query controller to refresh cached values if they are too old or were
5611 if (time_after(jiffies
, conn
->conn_info_timestamp
+
5612 msecs_to_jiffies(conn_info_age
)) ||
5613 !conn
->conn_info_timestamp
) {
5614 struct hci_request req
;
5615 struct hci_cp_read_tx_power req_txp_cp
;
5616 struct hci_cp_read_rssi req_rssi_cp
;
5617 struct mgmt_pending_cmd
*cmd
;
5619 hci_req_init(&req
, hdev
);
5620 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
5621 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
5624 /* For LE links TX power does not change thus we don't need to
5625 * query for it once value is known.
5627 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5628 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
5629 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5630 req_txp_cp
.type
= 0x00;
5631 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5632 sizeof(req_txp_cp
), &req_txp_cp
);
5635 /* Max TX power needs to be read only once per connection */
5636 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
5637 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
5638 req_txp_cp
.type
= 0x01;
5639 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
5640 sizeof(req_txp_cp
), &req_txp_cp
);
5643 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5647 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5654 hci_conn_hold(conn
);
5655 cmd
->user_data
= hci_conn_get(conn
);
5656 cmd
->cmd_complete
= conn_info_cmd_complete
;
5658 conn
->conn_info_timestamp
= jiffies
;
5660 /* Cache is valid, just reply with values cached in hci_conn */
5661 rp
.rssi
= conn
->rssi
;
5662 rp
.tx_power
= conn
->tx_power
;
5663 rp
.max_tx_power
= conn
->max_tx_power
;
5665 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5666 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5670 hci_dev_unlock(hdev
);
5674 static int clock_info_cmd_complete(struct mgmt_pending_cmd
*cmd
, u8 status
)
5676 struct hci_conn
*conn
= cmd
->user_data
;
5677 struct mgmt_rp_get_clock_info rp
;
5678 struct hci_dev
*hdev
;
5681 memset(&rp
, 0, sizeof(rp
));
5682 memcpy(&rp
.addr
, &cmd
->param
, sizeof(rp
.addr
));
5687 hdev
= hci_dev_get(cmd
->index
);
5689 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5694 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5695 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5699 err
= mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, status
, &rp
,
5703 hci_conn_drop(conn
);
5710 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
5712 struct hci_cp_read_clock
*hci_cp
;
5713 struct mgmt_pending_cmd
*cmd
;
5714 struct hci_conn
*conn
;
5716 BT_DBG("%s status %u", hdev
->name
, status
);
5720 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5724 if (hci_cp
->which
) {
5725 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5726 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5731 cmd
= pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5735 cmd
->cmd_complete(cmd
, mgmt_status(status
));
5736 mgmt_pending_remove(cmd
);
5739 hci_dev_unlock(hdev
);
5742 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5745 struct mgmt_cp_get_clock_info
*cp
= data
;
5746 struct mgmt_rp_get_clock_info rp
;
5747 struct hci_cp_read_clock hci_cp
;
5748 struct mgmt_pending_cmd
*cmd
;
5749 struct hci_request req
;
5750 struct hci_conn
*conn
;
5753 BT_DBG("%s", hdev
->name
);
5755 memset(&rp
, 0, sizeof(rp
));
5756 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5757 rp
.addr
.type
= cp
->addr
.type
;
5759 if (cp
->addr
.type
!= BDADDR_BREDR
)
5760 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5761 MGMT_STATUS_INVALID_PARAMS
,
5766 if (!hdev_is_powered(hdev
)) {
5767 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5768 MGMT_STATUS_NOT_POWERED
, &rp
,
5773 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5774 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5776 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5777 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5778 MGMT_OP_GET_CLOCK_INFO
,
5779 MGMT_STATUS_NOT_CONNECTED
,
5787 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5793 cmd
->cmd_complete
= clock_info_cmd_complete
;
5795 hci_req_init(&req
, hdev
);
5797 memset(&hci_cp
, 0, sizeof(hci_cp
));
5798 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5801 hci_conn_hold(conn
);
5802 cmd
->user_data
= hci_conn_get(conn
);
5804 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5805 hci_cp
.which
= 0x01; /* Piconet clock */
5806 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5809 err
= hci_req_run(&req
, get_clock_info_complete
);
5811 mgmt_pending_remove(cmd
);
5814 hci_dev_unlock(hdev
);
5818 static bool is_connected(struct hci_dev
*hdev
, bdaddr_t
*addr
, u8 type
)
5820 struct hci_conn
*conn
;
5822 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, addr
);
5826 if (conn
->dst_type
!= type
)
5829 if (conn
->state
!= BT_CONNECTED
)
5835 /* This function requires the caller holds hdev->lock */
5836 static int hci_conn_params_set(struct hci_dev
*hdev
, bdaddr_t
*addr
,
5837 u8 addr_type
, u8 auto_connect
)
5839 struct hci_conn_params
*params
;
5841 params
= hci_conn_params_add(hdev
, addr
, addr_type
);
5845 if (params
->auto_connect
== auto_connect
)
5848 list_del_init(¶ms
->action
);
5850 switch (auto_connect
) {
5851 case HCI_AUTO_CONN_DISABLED
:
5852 case HCI_AUTO_CONN_LINK_LOSS
:
5853 /* If auto connect is being disabled when we're trying to
5854 * connect to device, keep connecting.
5856 if (params
->explicit_connect
)
5857 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5859 case HCI_AUTO_CONN_REPORT
:
5860 if (params
->explicit_connect
)
5861 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5863 list_add(¶ms
->action
, &hdev
->pend_le_reports
);
5865 case HCI_AUTO_CONN_DIRECT
:
5866 case HCI_AUTO_CONN_ALWAYS
:
5867 if (!is_connected(hdev
, addr
, addr_type
))
5868 list_add(¶ms
->action
, &hdev
->pend_le_conns
);
5872 params
->auto_connect
= auto_connect
;
5874 BT_DBG("addr %pMR (type %u) auto_connect %u", addr
, addr_type
,
5880 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5881 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5883 struct mgmt_ev_device_added ev
;
5885 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5886 ev
.addr
.type
= type
;
5889 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5892 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5893 void *data
, u16 len
)
5895 struct mgmt_cp_add_device
*cp
= data
;
5896 u8 auto_conn
, addr_type
;
5899 BT_DBG("%s", hdev
->name
);
5901 if (!bdaddr_type_is_valid(cp
->addr
.type
) ||
5902 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5903 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5904 MGMT_STATUS_INVALID_PARAMS
,
5905 &cp
->addr
, sizeof(cp
->addr
));
5907 if (cp
->action
!= 0x00 && cp
->action
!= 0x01 && cp
->action
!= 0x02)
5908 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5909 MGMT_STATUS_INVALID_PARAMS
,
5910 &cp
->addr
, sizeof(cp
->addr
));
5914 if (cp
->addr
.type
== BDADDR_BREDR
) {
5915 /* Only incoming connections action is supported for now */
5916 if (cp
->action
!= 0x01) {
5917 err
= mgmt_cmd_complete(sk
, hdev
->id
,
5919 MGMT_STATUS_INVALID_PARAMS
,
5920 &cp
->addr
, sizeof(cp
->addr
));
5924 err
= hci_bdaddr_list_add(&hdev
->whitelist
, &cp
->addr
.bdaddr
,
5929 hci_update_page_scan(hdev
);
5934 addr_type
= le_addr_type(cp
->addr
.type
);
5936 if (cp
->action
== 0x02)
5937 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5938 else if (cp
->action
== 0x01)
5939 auto_conn
= HCI_AUTO_CONN_DIRECT
;
5941 auto_conn
= HCI_AUTO_CONN_REPORT
;
5943 /* Kernel internally uses conn_params with resolvable private
5944 * address, but Add Device allows only identity addresses.
5945 * Make sure it is enforced before calling
5946 * hci_conn_params_lookup.
5948 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
5949 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5950 MGMT_STATUS_INVALID_PARAMS
,
5951 &cp
->addr
, sizeof(cp
->addr
));
5955 /* If the connection parameters don't exist for this device,
5956 * they will be created and configured with defaults.
5958 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5960 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5961 MGMT_STATUS_FAILED
, &cp
->addr
,
5966 hci_update_background_scan(hdev
);
5969 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5971 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5972 MGMT_STATUS_SUCCESS
, &cp
->addr
,
5976 hci_dev_unlock(hdev
);
5980 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5981 bdaddr_t
*bdaddr
, u8 type
)
5983 struct mgmt_ev_device_removed ev
;
5985 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5986 ev
.addr
.type
= type
;
5988 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5991 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5992 void *data
, u16 len
)
5994 struct mgmt_cp_remove_device
*cp
= data
;
5997 BT_DBG("%s", hdev
->name
);
6001 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
6002 struct hci_conn_params
*params
;
6005 if (!bdaddr_type_is_valid(cp
->addr
.type
)) {
6006 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6007 MGMT_OP_REMOVE_DEVICE
,
6008 MGMT_STATUS_INVALID_PARAMS
,
6009 &cp
->addr
, sizeof(cp
->addr
));
6013 if (cp
->addr
.type
== BDADDR_BREDR
) {
6014 err
= hci_bdaddr_list_del(&hdev
->whitelist
,
6018 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6019 MGMT_OP_REMOVE_DEVICE
,
6020 MGMT_STATUS_INVALID_PARAMS
,
6026 hci_update_page_scan(hdev
);
6028 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
,
6033 addr_type
= le_addr_type(cp
->addr
.type
);
6035 /* Kernel internally uses conn_params with resolvable private
6036 * address, but Remove Device allows only identity addresses.
6037 * Make sure it is enforced before calling
6038 * hci_conn_params_lookup.
6040 if (!hci_is_identity_address(&cp
->addr
.bdaddr
, addr_type
)) {
6041 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6042 MGMT_OP_REMOVE_DEVICE
,
6043 MGMT_STATUS_INVALID_PARAMS
,
6044 &cp
->addr
, sizeof(cp
->addr
));
6048 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
6051 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6052 MGMT_OP_REMOVE_DEVICE
,
6053 MGMT_STATUS_INVALID_PARAMS
,
6054 &cp
->addr
, sizeof(cp
->addr
));
6058 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
||
6059 params
->auto_connect
== HCI_AUTO_CONN_EXPLICIT
) {
6060 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6061 MGMT_OP_REMOVE_DEVICE
,
6062 MGMT_STATUS_INVALID_PARAMS
,
6063 &cp
->addr
, sizeof(cp
->addr
));
6067 list_del(¶ms
->action
);
6068 list_del(¶ms
->list
);
6070 hci_update_background_scan(hdev
);
6072 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
6074 struct hci_conn_params
*p
, *tmp
;
6075 struct bdaddr_list
*b
, *btmp
;
6077 if (cp
->addr
.type
) {
6078 err
= mgmt_cmd_complete(sk
, hdev
->id
,
6079 MGMT_OP_REMOVE_DEVICE
,
6080 MGMT_STATUS_INVALID_PARAMS
,
6081 &cp
->addr
, sizeof(cp
->addr
));
6085 list_for_each_entry_safe(b
, btmp
, &hdev
->whitelist
, list
) {
6086 device_removed(sk
, hdev
, &b
->bdaddr
, b
->bdaddr_type
);
6091 hci_update_page_scan(hdev
);
6093 list_for_each_entry_safe(p
, tmp
, &hdev
->le_conn_params
, list
) {
6094 if (p
->auto_connect
== HCI_AUTO_CONN_DISABLED
)
6096 device_removed(sk
, hdev
, &p
->addr
, p
->addr_type
);
6097 if (p
->explicit_connect
) {
6098 p
->auto_connect
= HCI_AUTO_CONN_EXPLICIT
;
6101 list_del(&p
->action
);
6106 BT_DBG("All LE connection parameters were removed");
6108 hci_update_background_scan(hdev
);
6112 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
6113 MGMT_STATUS_SUCCESS
, &cp
->addr
,
6116 hci_dev_unlock(hdev
);
6120 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
6123 struct mgmt_cp_load_conn_param
*cp
= data
;
6124 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
6125 sizeof(struct mgmt_conn_param
));
6126 u16 param_count
, expected_len
;
6129 if (!lmp_le_capable(hdev
))
6130 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6131 MGMT_STATUS_NOT_SUPPORTED
);
6133 param_count
= __le16_to_cpu(cp
->param_count
);
6134 if (param_count
> max_param_count
) {
6135 BT_ERR("load_conn_param: too big param_count value %u",
6137 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6138 MGMT_STATUS_INVALID_PARAMS
);
6141 expected_len
= sizeof(*cp
) + param_count
*
6142 sizeof(struct mgmt_conn_param
);
6143 if (expected_len
!= len
) {
6144 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
6146 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
6147 MGMT_STATUS_INVALID_PARAMS
);
6150 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
6154 hci_conn_params_clear_disabled(hdev
);
6156 for (i
= 0; i
< param_count
; i
++) {
6157 struct mgmt_conn_param
*param
= &cp
->params
[i
];
6158 struct hci_conn_params
*hci_param
;
6159 u16 min
, max
, latency
, timeout
;
6162 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
6165 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
6166 addr_type
= ADDR_LE_DEV_PUBLIC
;
6167 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
6168 addr_type
= ADDR_LE_DEV_RANDOM
;
6170 BT_ERR("Ignoring invalid connection parameters");
6174 min
= le16_to_cpu(param
->min_interval
);
6175 max
= le16_to_cpu(param
->max_interval
);
6176 latency
= le16_to_cpu(param
->latency
);
6177 timeout
= le16_to_cpu(param
->timeout
);
6179 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
6180 min
, max
, latency
, timeout
);
6182 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
6183 BT_ERR("Ignoring invalid connection parameters");
6187 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
6190 BT_ERR("Failed to add connection parameters");
6194 hci_param
->conn_min_interval
= min
;
6195 hci_param
->conn_max_interval
= max
;
6196 hci_param
->conn_latency
= latency
;
6197 hci_param
->supervision_timeout
= timeout
;
6200 hci_dev_unlock(hdev
);
6202 return mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0,
6206 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
6207 void *data
, u16 len
)
6209 struct mgmt_cp_set_external_config
*cp
= data
;
6213 BT_DBG("%s", hdev
->name
);
6215 if (hdev_is_powered(hdev
))
6216 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6217 MGMT_STATUS_REJECTED
);
6219 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
6220 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6221 MGMT_STATUS_INVALID_PARAMS
);
6223 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
6224 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
6225 MGMT_STATUS_NOT_SUPPORTED
);
6230 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_EXT_CONFIGURED
);
6232 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_EXT_CONFIGURED
);
6234 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
6241 err
= new_options(hdev
, sk
);
6243 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
) == is_configured(hdev
)) {
6244 mgmt_index_removed(hdev
);
6246 if (hci_dev_test_and_change_flag(hdev
, HCI_UNCONFIGURED
)) {
6247 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6248 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6250 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6252 set_bit(HCI_RAW
, &hdev
->flags
);
6253 mgmt_index_added(hdev
);
6258 hci_dev_unlock(hdev
);
6262 static int set_public_address(struct sock
*sk
, struct hci_dev
*hdev
,
6263 void *data
, u16 len
)
6265 struct mgmt_cp_set_public_address
*cp
= data
;
6269 BT_DBG("%s", hdev
->name
);
6271 if (hdev_is_powered(hdev
))
6272 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6273 MGMT_STATUS_REJECTED
);
6275 if (!bacmp(&cp
->bdaddr
, BDADDR_ANY
))
6276 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6277 MGMT_STATUS_INVALID_PARAMS
);
6279 if (!hdev
->set_bdaddr
)
6280 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PUBLIC_ADDRESS
,
6281 MGMT_STATUS_NOT_SUPPORTED
);
6285 changed
= !!bacmp(&hdev
->public_addr
, &cp
->bdaddr
);
6286 bacpy(&hdev
->public_addr
, &cp
->bdaddr
);
6288 err
= send_options_rsp(sk
, MGMT_OP_SET_PUBLIC_ADDRESS
, hdev
);
6295 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
))
6296 err
= new_options(hdev
, sk
);
6298 if (is_configured(hdev
)) {
6299 mgmt_index_removed(hdev
);
6301 hci_dev_clear_flag(hdev
, HCI_UNCONFIGURED
);
6303 hci_dev_set_flag(hdev
, HCI_CONFIG
);
6304 hci_dev_set_flag(hdev
, HCI_AUTO_OFF
);
6306 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
6310 hci_dev_unlock(hdev
);
6314 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6317 eir
[eir_len
++] = sizeof(type
) + data_len
;
6318 eir
[eir_len
++] = type
;
6319 memcpy(&eir
[eir_len
], data
, data_len
);
6320 eir_len
+= data_len
;
6325 static void read_local_oob_ext_data_complete(struct hci_dev
*hdev
, u8 status
,
6326 u16 opcode
, struct sk_buff
*skb
)
6328 const struct mgmt_cp_read_local_oob_ext_data
*mgmt_cp
;
6329 struct mgmt_rp_read_local_oob_ext_data
*mgmt_rp
;
6330 u8
*h192
, *r192
, *h256
, *r256
;
6331 struct mgmt_pending_cmd
*cmd
;
6335 BT_DBG("%s status %u", hdev
->name
, status
);
6337 cmd
= pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
);
6341 mgmt_cp
= cmd
->param
;
6344 status
= mgmt_status(status
);
6351 } else if (opcode
== HCI_OP_READ_LOCAL_OOB_DATA
) {
6352 struct hci_rp_read_local_oob_data
*rp
;
6354 if (skb
->len
!= sizeof(*rp
)) {
6355 status
= MGMT_STATUS_FAILED
;
6358 status
= MGMT_STATUS_SUCCESS
;
6359 rp
= (void *)skb
->data
;
6361 eir_len
= 5 + 18 + 18;
6368 struct hci_rp_read_local_oob_ext_data
*rp
;
6370 if (skb
->len
!= sizeof(*rp
)) {
6371 status
= MGMT_STATUS_FAILED
;
6374 status
= MGMT_STATUS_SUCCESS
;
6375 rp
= (void *)skb
->data
;
6377 if (hci_dev_test_flag(hdev
, HCI_SC_ONLY
)) {
6378 eir_len
= 5 + 18 + 18;
6382 eir_len
= 5 + 18 + 18 + 18 + 18;
6392 mgmt_rp
= kmalloc(sizeof(*mgmt_rp
) + eir_len
, GFP_KERNEL
);
6399 eir_len
= eir_append_data(mgmt_rp
->eir
, 0, EIR_CLASS_OF_DEV
,
6400 hdev
->dev_class
, 3);
6403 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6404 EIR_SSP_HASH_C192
, h192
, 16);
6405 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6406 EIR_SSP_RAND_R192
, r192
, 16);
6410 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6411 EIR_SSP_HASH_C256
, h256
, 16);
6412 eir_len
= eir_append_data(mgmt_rp
->eir
, eir_len
,
6413 EIR_SSP_RAND_R256
, r256
, 16);
6417 mgmt_rp
->type
= mgmt_cp
->type
;
6418 mgmt_rp
->eir_len
= cpu_to_le16(eir_len
);
6420 err
= mgmt_cmd_complete(cmd
->sk
, hdev
->id
,
6421 MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, status
,
6422 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
);
6423 if (err
< 0 || status
)
6426 hci_sock_set_flag(cmd
->sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6428 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6429 mgmt_rp
, sizeof(*mgmt_rp
) + eir_len
,
6430 HCI_MGMT_OOB_DATA_EVENTS
, cmd
->sk
);
6433 mgmt_pending_remove(cmd
);
6436 static int read_local_ssp_oob_req(struct hci_dev
*hdev
, struct sock
*sk
,
6437 struct mgmt_cp_read_local_oob_ext_data
*cp
)
6439 struct mgmt_pending_cmd
*cmd
;
6440 struct hci_request req
;
6443 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
, hdev
,
6448 hci_req_init(&req
, hdev
);
6450 if (bredr_sc_enabled(hdev
))
6451 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
, 0, NULL
);
6453 hci_req_add(&req
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
6455 err
= hci_req_run_skb(&req
, read_local_oob_ext_data_complete
);
6457 mgmt_pending_remove(cmd
);
6464 static int read_local_oob_ext_data(struct sock
*sk
, struct hci_dev
*hdev
,
6465 void *data
, u16 data_len
)
6467 struct mgmt_cp_read_local_oob_ext_data
*cp
= data
;
6468 struct mgmt_rp_read_local_oob_ext_data
*rp
;
6471 u8 status
, flags
, role
, addr
[7], hash
[16], rand
[16];
6474 BT_DBG("%s", hdev
->name
);
6476 if (hdev_is_powered(hdev
)) {
6478 case BIT(BDADDR_BREDR
):
6479 status
= mgmt_bredr_support(hdev
);
6485 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6486 status
= mgmt_le_support(hdev
);
6490 eir_len
= 9 + 3 + 18 + 18 + 3;
6493 status
= MGMT_STATUS_INVALID_PARAMS
;
6498 status
= MGMT_STATUS_NOT_POWERED
;
6502 rp_len
= sizeof(*rp
) + eir_len
;
6503 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6514 case BIT(BDADDR_BREDR
):
6515 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
6516 err
= read_local_ssp_oob_req(hdev
, sk
, cp
);
6517 hci_dev_unlock(hdev
);
6521 status
= MGMT_STATUS_FAILED
;
6524 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6526 hdev
->dev_class
, 3);
6529 case (BIT(BDADDR_LE_PUBLIC
) | BIT(BDADDR_LE_RANDOM
)):
6530 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
) &&
6531 smp_generate_oob(hdev
, hash
, rand
) < 0) {
6532 hci_dev_unlock(hdev
);
6533 status
= MGMT_STATUS_FAILED
;
6537 /* This should return the active RPA, but since the RPA
6538 * is only programmed on demand, it is really hard to fill
6539 * this in at the moment. For now disallow retrieving
6540 * local out-of-band data when privacy is in use.
6542 * Returning the identity address will not help here since
6543 * pairing happens before the identity resolving key is
6544 * known and thus the connection establishment happens
6545 * based on the RPA and not the identity address.
6547 if (hci_dev_test_flag(hdev
, HCI_PRIVACY
)) {
6548 hci_dev_unlock(hdev
);
6549 status
= MGMT_STATUS_REJECTED
;
6553 if (hci_dev_test_flag(hdev
, HCI_FORCE_STATIC_ADDR
) ||
6554 !bacmp(&hdev
->bdaddr
, BDADDR_ANY
) ||
6555 (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
) &&
6556 bacmp(&hdev
->static_addr
, BDADDR_ANY
))) {
6557 memcpy(addr
, &hdev
->static_addr
, 6);
6560 memcpy(addr
, &hdev
->bdaddr
, 6);
6564 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_BDADDR
,
6565 addr
, sizeof(addr
));
6567 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
6572 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_LE_ROLE
,
6573 &role
, sizeof(role
));
6575 if (hci_dev_test_flag(hdev
, HCI_SC_ENABLED
)) {
6576 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6578 hash
, sizeof(hash
));
6580 eir_len
= eir_append_data(rp
->eir
, eir_len
,
6582 rand
, sizeof(rand
));
6585 flags
= get_adv_discov_flags(hdev
);
6587 if (!hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
))
6588 flags
|= LE_AD_NO_BREDR
;
6590 eir_len
= eir_append_data(rp
->eir
, eir_len
, EIR_FLAGS
,
6591 &flags
, sizeof(flags
));
6595 hci_dev_unlock(hdev
);
6597 hci_sock_set_flag(sk
, HCI_MGMT_OOB_DATA_EVENTS
);
6599 status
= MGMT_STATUS_SUCCESS
;
6602 rp
->type
= cp
->type
;
6603 rp
->eir_len
= cpu_to_le16(eir_len
);
6605 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_EXT_DATA
,
6606 status
, rp
, sizeof(*rp
) + eir_len
);
6607 if (err
< 0 || status
)
6610 err
= mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED
, hdev
,
6611 rp
, sizeof(*rp
) + eir_len
,
6612 HCI_MGMT_OOB_DATA_EVENTS
, sk
);
6620 static u32
get_supported_adv_flags(struct hci_dev
*hdev
)
6624 flags
|= MGMT_ADV_FLAG_CONNECTABLE
;
6625 flags
|= MGMT_ADV_FLAG_DISCOV
;
6626 flags
|= MGMT_ADV_FLAG_LIMITED_DISCOV
;
6627 flags
|= MGMT_ADV_FLAG_MANAGED_FLAGS
;
6629 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
)
6630 flags
|= MGMT_ADV_FLAG_TX_POWER
;
6635 static int read_adv_features(struct sock
*sk
, struct hci_dev
*hdev
,
6636 void *data
, u16 data_len
)
6638 struct mgmt_rp_read_adv_features
*rp
;
6642 struct adv_info
*adv_instance
;
6643 u32 supported_flags
;
6645 BT_DBG("%s", hdev
->name
);
6647 if (!lmp_le_capable(hdev
))
6648 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6649 MGMT_STATUS_REJECTED
);
6653 rp_len
= sizeof(*rp
);
6655 instance
= hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6657 rp_len
+= hdev
->adv_instance_cnt
;
6659 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
6661 hci_dev_unlock(hdev
);
6665 supported_flags
= get_supported_adv_flags(hdev
);
6667 rp
->supported_flags
= cpu_to_le32(supported_flags
);
6668 rp
->max_adv_data_len
= HCI_MAX_AD_LENGTH
;
6669 rp
->max_scan_rsp_len
= HCI_MAX_AD_LENGTH
;
6670 rp
->max_instances
= HCI_MAX_ADV_INSTANCES
;
6674 list_for_each_entry(adv_instance
, &hdev
->adv_instances
, list
) {
6675 if (i
>= hdev
->adv_instance_cnt
)
6678 rp
->instance
[i
] = adv_instance
->instance
;
6681 rp
->num_instances
= hdev
->adv_instance_cnt
;
6683 rp
->num_instances
= 0;
6686 hci_dev_unlock(hdev
);
6688 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_ADV_FEATURES
,
6689 MGMT_STATUS_SUCCESS
, rp
, rp_len
);
6696 static bool tlv_data_is_valid(struct hci_dev
*hdev
, u32 adv_flags
, u8
*data
,
6697 u8 len
, bool is_adv_data
)
6699 u8 max_len
= HCI_MAX_AD_LENGTH
;
6701 bool flags_managed
= false;
6702 bool tx_power_managed
= false;
6703 u32 flags_params
= MGMT_ADV_FLAG_DISCOV
| MGMT_ADV_FLAG_LIMITED_DISCOV
|
6704 MGMT_ADV_FLAG_MANAGED_FLAGS
;
6706 if (is_adv_data
&& (adv_flags
& flags_params
)) {
6707 flags_managed
= true;
6711 if (is_adv_data
&& (adv_flags
& MGMT_ADV_FLAG_TX_POWER
)) {
6712 tx_power_managed
= true;
6719 /* Make sure that the data is correctly formatted. */
6720 for (i
= 0, cur_len
= 0; i
< len
; i
+= (cur_len
+ 1)) {
6723 if (flags_managed
&& data
[i
+ 1] == EIR_FLAGS
)
6726 if (tx_power_managed
&& data
[i
+ 1] == EIR_TX_POWER
)
6729 /* If the current field length would exceed the total data
6730 * length, then it's invalid.
6732 if (i
+ cur_len
>= len
)
6739 static void add_advertising_complete(struct hci_dev
*hdev
, u8 status
,
6742 struct mgmt_pending_cmd
*cmd
;
6743 struct mgmt_cp_add_advertising
*cp
;
6744 struct mgmt_rp_add_advertising rp
;
6745 struct adv_info
*adv_instance
, *n
;
6748 BT_DBG("status %d", status
);
6752 cmd
= pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
);
6755 hci_dev_clear_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6757 list_for_each_entry_safe(adv_instance
, n
, &hdev
->adv_instances
, list
) {
6758 if (!adv_instance
->pending
)
6762 adv_instance
->pending
= false;
6766 instance
= adv_instance
->instance
;
6768 if (hdev
->cur_adv_instance
== instance
)
6769 cancel_adv_timeout(hdev
);
6771 hci_remove_adv_instance(hdev
, instance
);
6772 advertising_removed(cmd
? cmd
->sk
: NULL
, hdev
, instance
);
6779 rp
.instance
= cp
->instance
;
6782 mgmt_cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6783 mgmt_status(status
));
6785 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
,
6786 mgmt_status(status
), &rp
, sizeof(rp
));
6788 mgmt_pending_remove(cmd
);
6791 hci_dev_unlock(hdev
);
6794 void mgmt_adv_timeout_expired(struct hci_dev
*hdev
)
6797 struct hci_request req
;
6799 hdev
->adv_instance_timeout
= 0;
6801 instance
= get_current_adv_instance(hdev
);
6802 if (instance
== 0x00)
6806 hci_req_init(&req
, hdev
);
6808 clear_adv_instance(hdev
, &req
, instance
, false);
6810 if (list_empty(&hdev
->adv_instances
))
6811 disable_advertising(&req
);
6813 if (!skb_queue_empty(&req
.cmd_q
))
6814 hci_req_run(&req
, NULL
);
6816 hci_dev_unlock(hdev
);
6819 static int add_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6820 void *data
, u16 data_len
)
6822 struct mgmt_cp_add_advertising
*cp
= data
;
6823 struct mgmt_rp_add_advertising rp
;
6825 u32 supported_flags
;
6827 u16 timeout
, duration
;
6828 unsigned int prev_instance_cnt
= hdev
->adv_instance_cnt
;
6829 u8 schedule_instance
= 0;
6830 struct adv_info
*next_instance
;
6832 struct mgmt_pending_cmd
*cmd
;
6833 struct hci_request req
;
6835 BT_DBG("%s", hdev
->name
);
6837 status
= mgmt_le_support(hdev
);
6839 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6842 if (cp
->instance
< 1 || cp
->instance
> HCI_MAX_ADV_INSTANCES
)
6843 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6844 MGMT_STATUS_INVALID_PARAMS
);
6846 flags
= __le32_to_cpu(cp
->flags
);
6847 timeout
= __le16_to_cpu(cp
->timeout
);
6848 duration
= __le16_to_cpu(cp
->duration
);
6850 /* The current implementation only supports a subset of the specified
6853 supported_flags
= get_supported_adv_flags(hdev
);
6854 if (flags
& ~supported_flags
)
6855 return mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6856 MGMT_STATUS_INVALID_PARAMS
);
6860 if (timeout
&& !hdev_is_powered(hdev
)) {
6861 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6862 MGMT_STATUS_REJECTED
);
6866 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
6867 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
6868 pending_find(MGMT_OP_SET_LE
, hdev
)) {
6869 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6874 if (!tlv_data_is_valid(hdev
, flags
, cp
->data
, cp
->adv_data_len
, true) ||
6875 !tlv_data_is_valid(hdev
, flags
, cp
->data
+ cp
->adv_data_len
,
6876 cp
->scan_rsp_len
, false)) {
6877 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6878 MGMT_STATUS_INVALID_PARAMS
);
6882 err
= hci_add_adv_instance(hdev
, cp
->instance
, flags
,
6883 cp
->adv_data_len
, cp
->data
,
6885 cp
->data
+ cp
->adv_data_len
,
6888 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6889 MGMT_STATUS_FAILED
);
6893 /* Only trigger an advertising added event if a new instance was
6896 if (hdev
->adv_instance_cnt
> prev_instance_cnt
)
6897 advertising_added(sk
, hdev
, cp
->instance
);
6899 hci_dev_set_flag(hdev
, HCI_ADVERTISING_INSTANCE
);
6901 if (hdev
->cur_adv_instance
== cp
->instance
) {
6902 /* If the currently advertised instance is being changed then
6903 * cancel the current advertising and schedule the next
6904 * instance. If there is only one instance then the overridden
6905 * advertising data will be visible right away.
6907 cancel_adv_timeout(hdev
);
6909 next_instance
= hci_get_next_instance(hdev
, cp
->instance
);
6911 schedule_instance
= next_instance
->instance
;
6912 } else if (!hdev
->adv_instance_timeout
) {
6913 /* Immediately advertise the new instance if no other
6914 * instance is currently being advertised.
6916 schedule_instance
= cp
->instance
;
6919 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6920 * there is no instance to be advertised then we have no HCI
6921 * communication to make. Simply return.
6923 if (!hdev_is_powered(hdev
) ||
6924 hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
6925 !schedule_instance
) {
6926 rp
.instance
= cp
->instance
;
6927 err
= mgmt_cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_ADVERTISING
,
6928 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
6932 /* We're good to go, update advertising data, parameters, and start
6935 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_ADVERTISING
, hdev
, data
,
6942 hci_req_init(&req
, hdev
);
6944 err
= schedule_adv_instance(&req
, schedule_instance
, true);
6947 err
= hci_req_run(&req
, add_advertising_complete
);
6950 mgmt_pending_remove(cmd
);
6953 hci_dev_unlock(hdev
);
6958 static void remove_advertising_complete(struct hci_dev
*hdev
, u8 status
,
6961 struct mgmt_pending_cmd
*cmd
;
6962 struct mgmt_cp_remove_advertising
*cp
;
6963 struct mgmt_rp_remove_advertising rp
;
6965 BT_DBG("status %d", status
);
6969 /* A failure status here only means that we failed to disable
6970 * advertising. Otherwise, the advertising instance has been removed,
6971 * so report success.
6973 cmd
= pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
);
6978 rp
.instance
= cp
->instance
;
6980 mgmt_cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, MGMT_STATUS_SUCCESS
,
6982 mgmt_pending_remove(cmd
);
6985 hci_dev_unlock(hdev
);
6988 static int remove_advertising(struct sock
*sk
, struct hci_dev
*hdev
,
6989 void *data
, u16 data_len
)
6991 struct mgmt_cp_remove_advertising
*cp
= data
;
6992 struct mgmt_rp_remove_advertising rp
;
6993 struct mgmt_pending_cmd
*cmd
;
6994 struct hci_request req
;
6997 BT_DBG("%s", hdev
->name
);
7001 if (cp
->instance
&& !hci_find_adv_instance(hdev
, cp
->instance
)) {
7002 err
= mgmt_cmd_status(sk
, hdev
->id
,
7003 MGMT_OP_REMOVE_ADVERTISING
,
7004 MGMT_STATUS_INVALID_PARAMS
);
7008 if (pending_find(MGMT_OP_ADD_ADVERTISING
, hdev
) ||
7009 pending_find(MGMT_OP_REMOVE_ADVERTISING
, hdev
) ||
7010 pending_find(MGMT_OP_SET_LE
, hdev
)) {
7011 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7016 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
)) {
7017 err
= mgmt_cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_ADVERTISING
,
7018 MGMT_STATUS_INVALID_PARAMS
);
7022 hci_req_init(&req
, hdev
);
7024 clear_adv_instance(hdev
, &req
, cp
->instance
, true);
7026 if (list_empty(&hdev
->adv_instances
))
7027 disable_advertising(&req
);
7029 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
7030 * flag is set or the device isn't powered then we have no HCI
7031 * communication to make. Simply return.
7033 if (skb_queue_empty(&req
.cmd_q
) ||
7034 !hdev_is_powered(hdev
) ||
7035 hci_dev_test_flag(hdev
, HCI_ADVERTISING
)) {
7036 rp
.instance
= cp
->instance
;
7037 err
= mgmt_cmd_complete(sk
, hdev
->id
,
7038 MGMT_OP_REMOVE_ADVERTISING
,
7039 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
7043 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_ADVERTISING
, hdev
, data
,
7050 err
= hci_req_run(&req
, remove_advertising_complete
);
7052 mgmt_pending_remove(cmd
);
7055 hci_dev_unlock(hdev
);
7060 static const struct hci_mgmt_handler mgmt_handlers
[] = {
7061 { NULL
}, /* 0x0000 (no command) */
7062 { read_version
, MGMT_READ_VERSION_SIZE
,
7064 HCI_MGMT_UNTRUSTED
},
7065 { read_commands
, MGMT_READ_COMMANDS_SIZE
,
7067 HCI_MGMT_UNTRUSTED
},
7068 { read_index_list
, MGMT_READ_INDEX_LIST_SIZE
,
7070 HCI_MGMT_UNTRUSTED
},
7071 { read_controller_info
, MGMT_READ_INFO_SIZE
,
7072 HCI_MGMT_UNTRUSTED
},
7073 { set_powered
, MGMT_SETTING_SIZE
},
7074 { set_discoverable
, MGMT_SET_DISCOVERABLE_SIZE
},
7075 { set_connectable
, MGMT_SETTING_SIZE
},
7076 { set_fast_connectable
, MGMT_SETTING_SIZE
},
7077 { set_bondable
, MGMT_SETTING_SIZE
},
7078 { set_link_security
, MGMT_SETTING_SIZE
},
7079 { set_ssp
, MGMT_SETTING_SIZE
},
7080 { set_hs
, MGMT_SETTING_SIZE
},
7081 { set_le
, MGMT_SETTING_SIZE
},
7082 { set_dev_class
, MGMT_SET_DEV_CLASS_SIZE
},
7083 { set_local_name
, MGMT_SET_LOCAL_NAME_SIZE
},
7084 { add_uuid
, MGMT_ADD_UUID_SIZE
},
7085 { remove_uuid
, MGMT_REMOVE_UUID_SIZE
},
7086 { load_link_keys
, MGMT_LOAD_LINK_KEYS_SIZE
,
7088 { load_long_term_keys
, MGMT_LOAD_LONG_TERM_KEYS_SIZE
,
7090 { disconnect
, MGMT_DISCONNECT_SIZE
},
7091 { get_connections
, MGMT_GET_CONNECTIONS_SIZE
},
7092 { pin_code_reply
, MGMT_PIN_CODE_REPLY_SIZE
},
7093 { pin_code_neg_reply
, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
7094 { set_io_capability
, MGMT_SET_IO_CAPABILITY_SIZE
},
7095 { pair_device
, MGMT_PAIR_DEVICE_SIZE
},
7096 { cancel_pair_device
, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
7097 { unpair_device
, MGMT_UNPAIR_DEVICE_SIZE
},
7098 { user_confirm_reply
, MGMT_USER_CONFIRM_REPLY_SIZE
},
7099 { user_confirm_neg_reply
, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
7100 { user_passkey_reply
, MGMT_USER_PASSKEY_REPLY_SIZE
},
7101 { user_passkey_neg_reply
, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
7102 { read_local_oob_data
, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
7103 { add_remote_oob_data
, MGMT_ADD_REMOTE_OOB_DATA_SIZE
,
7105 { remove_remote_oob_data
, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
7106 { start_discovery
, MGMT_START_DISCOVERY_SIZE
},
7107 { stop_discovery
, MGMT_STOP_DISCOVERY_SIZE
},
7108 { confirm_name
, MGMT_CONFIRM_NAME_SIZE
},
7109 { block_device
, MGMT_BLOCK_DEVICE_SIZE
},
7110 { unblock_device
, MGMT_UNBLOCK_DEVICE_SIZE
},
7111 { set_device_id
, MGMT_SET_DEVICE_ID_SIZE
},
7112 { set_advertising
, MGMT_SETTING_SIZE
},
7113 { set_bredr
, MGMT_SETTING_SIZE
},
7114 { set_static_address
, MGMT_SET_STATIC_ADDRESS_SIZE
},
7115 { set_scan_params
, MGMT_SET_SCAN_PARAMS_SIZE
},
7116 { set_secure_conn
, MGMT_SETTING_SIZE
},
7117 { set_debug_keys
, MGMT_SETTING_SIZE
},
7118 { set_privacy
, MGMT_SET_PRIVACY_SIZE
},
7119 { load_irks
, MGMT_LOAD_IRKS_SIZE
,
7121 { get_conn_info
, MGMT_GET_CONN_INFO_SIZE
},
7122 { get_clock_info
, MGMT_GET_CLOCK_INFO_SIZE
},
7123 { add_device
, MGMT_ADD_DEVICE_SIZE
},
7124 { remove_device
, MGMT_REMOVE_DEVICE_SIZE
},
7125 { load_conn_param
, MGMT_LOAD_CONN_PARAM_SIZE
,
7127 { read_unconf_index_list
, MGMT_READ_UNCONF_INDEX_LIST_SIZE
,
7129 HCI_MGMT_UNTRUSTED
},
7130 { read_config_info
, MGMT_READ_CONFIG_INFO_SIZE
,
7131 HCI_MGMT_UNCONFIGURED
|
7132 HCI_MGMT_UNTRUSTED
},
7133 { set_external_config
, MGMT_SET_EXTERNAL_CONFIG_SIZE
,
7134 HCI_MGMT_UNCONFIGURED
},
7135 { set_public_address
, MGMT_SET_PUBLIC_ADDRESS_SIZE
,
7136 HCI_MGMT_UNCONFIGURED
},
7137 { start_service_discovery
, MGMT_START_SERVICE_DISCOVERY_SIZE
,
7139 { read_local_oob_ext_data
, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE
},
7140 { read_ext_index_list
, MGMT_READ_EXT_INDEX_LIST_SIZE
,
7142 HCI_MGMT_UNTRUSTED
},
7143 { read_adv_features
, MGMT_READ_ADV_FEATURES_SIZE
},
7144 { add_advertising
, MGMT_ADD_ADVERTISING_SIZE
,
7146 { remove_advertising
, MGMT_REMOVE_ADVERTISING_SIZE
},
7149 void mgmt_index_added(struct hci_dev
*hdev
)
7151 struct mgmt_ev_ext_index ev
;
7153 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7156 switch (hdev
->dev_type
) {
7158 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7159 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
,
7160 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7163 mgmt_index_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0,
7164 HCI_MGMT_INDEX_EVENTS
);
7177 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED
, hdev
, &ev
, sizeof(ev
),
7178 HCI_MGMT_EXT_INDEX_EVENTS
);
7181 void mgmt_index_removed(struct hci_dev
*hdev
)
7183 struct mgmt_ev_ext_index ev
;
7184 u8 status
= MGMT_STATUS_INVALID_INDEX
;
7186 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
7189 switch (hdev
->dev_type
) {
7191 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7193 if (hci_dev_test_flag(hdev
, HCI_UNCONFIGURED
)) {
7194 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
,
7195 NULL
, 0, HCI_MGMT_UNCONF_INDEX_EVENTS
);
7198 mgmt_index_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0,
7199 HCI_MGMT_INDEX_EVENTS
);
7212 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED
, hdev
, &ev
, sizeof(ev
),
7213 HCI_MGMT_EXT_INDEX_EVENTS
);
7216 /* This function requires the caller holds hdev->lock */
7217 static void restart_le_actions(struct hci_dev
*hdev
)
7219 struct hci_conn_params
*p
;
7221 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
7222 /* Needed for AUTO_OFF case where might not "really"
7223 * have been powered off.
7225 list_del_init(&p
->action
);
7227 switch (p
->auto_connect
) {
7228 case HCI_AUTO_CONN_DIRECT
:
7229 case HCI_AUTO_CONN_ALWAYS
:
7230 list_add(&p
->action
, &hdev
->pend_le_conns
);
7232 case HCI_AUTO_CONN_REPORT
:
7233 list_add(&p
->action
, &hdev
->pend_le_reports
);
7241 static void powered_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
7243 struct cmd_lookup match
= { NULL
, hdev
};
7245 BT_DBG("status 0x%02x", status
);
7248 /* Register the available SMP channels (BR/EDR and LE) only
7249 * when successfully powering on the controller. This late
7250 * registration is required so that LE SMP can clearly
7251 * decide if the public address or static address is used.
7255 restart_le_actions(hdev
);
7256 hci_update_background_scan(hdev
);
7261 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7263 new_settings(hdev
, match
.sk
);
7265 hci_dev_unlock(hdev
);
7271 static int powered_update_hci(struct hci_dev
*hdev
)
7273 struct hci_request req
;
7274 struct adv_info
*adv_instance
;
7277 hci_req_init(&req
, hdev
);
7279 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
) &&
7280 !lmp_host_ssp_capable(hdev
)) {
7283 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, sizeof(mode
), &mode
);
7285 if (bredr_sc_enabled(hdev
) && !lmp_host_sc_capable(hdev
)) {
7288 hci_req_add(&req
, HCI_OP_WRITE_SC_SUPPORT
,
7289 sizeof(support
), &support
);
7293 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7294 lmp_bredr_capable(hdev
)) {
7295 struct hci_cp_write_le_host_supported cp
;
7300 /* Check first if we already have the right
7301 * host state (host features set)
7303 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
7304 cp
.simul
!= lmp_host_le_br_capable(hdev
))
7305 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
7309 if (lmp_le_capable(hdev
)) {
7310 /* Make sure the controller has a good default for
7311 * advertising data. This also applies to the case
7312 * where BR/EDR was toggled during the AUTO_OFF phase.
7314 if (hci_dev_test_flag(hdev
, HCI_LE_ENABLED
) &&
7315 (hci_dev_test_flag(hdev
, HCI_ADVERTISING
) ||
7316 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))) {
7317 update_adv_data(&req
);
7318 update_scan_rsp_data(&req
);
7321 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
7322 hdev
->cur_adv_instance
== 0x00 &&
7323 !list_empty(&hdev
->adv_instances
)) {
7324 adv_instance
= list_first_entry(&hdev
->adv_instances
,
7325 struct adv_info
, list
);
7326 hdev
->cur_adv_instance
= adv_instance
->instance
;
7329 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7330 enable_advertising(&req
);
7331 else if (hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
) &&
7332 hdev
->cur_adv_instance
)
7333 schedule_adv_instance(&req
, hdev
->cur_adv_instance
,
7337 link_sec
= hci_dev_test_flag(hdev
, HCI_LINK_SECURITY
);
7338 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
7339 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
7340 sizeof(link_sec
), &link_sec
);
7342 if (lmp_bredr_capable(hdev
)) {
7343 if (hci_dev_test_flag(hdev
, HCI_FAST_CONNECTABLE
))
7344 write_fast_connectable(&req
, true);
7346 write_fast_connectable(&req
, false);
7347 __hci_update_page_scan(&req
);
7353 return hci_req_run(&req
, powered_complete
);
7356 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
7358 struct cmd_lookup match
= { NULL
, hdev
};
7359 u8 status
, zero_cod
[] = { 0, 0, 0 };
7362 if (!hci_dev_test_flag(hdev
, HCI_MGMT
))
7366 if (powered_update_hci(hdev
) == 0)
7369 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
7374 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
7376 /* If the power off is because of hdev unregistration let
7377 * use the appropriate INVALID_INDEX status. Otherwise use
7378 * NOT_POWERED. We cover both scenarios here since later in
7379 * mgmt_index_removed() any hci_conn callbacks will have already
7380 * been triggered, potentially causing misleading DISCONNECTED
7383 if (hci_dev_test_flag(hdev
, HCI_UNREGISTER
))
7384 status
= MGMT_STATUS_INVALID_INDEX
;
7386 status
= MGMT_STATUS_NOT_POWERED
;
7388 mgmt_pending_foreach(0, hdev
, cmd_complete_rsp
, &status
);
7390 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
7391 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
7392 zero_cod
, sizeof(zero_cod
), NULL
);
7395 err
= new_settings(hdev
, match
.sk
);
7403 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
7405 struct mgmt_pending_cmd
*cmd
;
7408 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7412 if (err
== -ERFKILL
)
7413 status
= MGMT_STATUS_RFKILLED
;
7415 status
= MGMT_STATUS_FAILED
;
7417 mgmt_cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
7419 mgmt_pending_remove(cmd
);
7422 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
7424 struct hci_request req
;
7428 /* When discoverable timeout triggers, then just make sure
7429 * the limited discoverable flag is cleared. Even in the case
7430 * of a timeout triggered from general discoverable, it is
7431 * safe to unconditionally clear the flag.
7433 hci_dev_clear_flag(hdev
, HCI_LIMITED_DISCOVERABLE
);
7434 hci_dev_clear_flag(hdev
, HCI_DISCOVERABLE
);
7436 hci_req_init(&req
, hdev
);
7437 if (hci_dev_test_flag(hdev
, HCI_BREDR_ENABLED
)) {
7438 u8 scan
= SCAN_PAGE
;
7439 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
7440 sizeof(scan
), &scan
);
7444 /* Advertising instances don't use the global discoverable setting, so
7445 * only update AD if advertising was enabled using Set Advertising.
7447 if (hci_dev_test_flag(hdev
, HCI_ADVERTISING
))
7448 update_adv_data(&req
);
7450 hci_req_run(&req
, NULL
);
7452 hdev
->discov_timeout
= 0;
7454 new_settings(hdev
, NULL
);
7456 hci_dev_unlock(hdev
);
7459 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
7462 struct mgmt_ev_new_link_key ev
;
7464 memset(&ev
, 0, sizeof(ev
));
7466 ev
.store_hint
= persistent
;
7467 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7468 ev
.key
.addr
.type
= BDADDR_BREDR
;
7469 ev
.key
.type
= key
->type
;
7470 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
7471 ev
.key
.pin_len
= key
->pin_len
;
7473 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7476 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
7478 switch (ltk
->type
) {
7481 if (ltk
->authenticated
)
7482 return MGMT_LTK_AUTHENTICATED
;
7483 return MGMT_LTK_UNAUTHENTICATED
;
7485 if (ltk
->authenticated
)
7486 return MGMT_LTK_P256_AUTH
;
7487 return MGMT_LTK_P256_UNAUTH
;
7488 case SMP_LTK_P256_DEBUG
:
7489 return MGMT_LTK_P256_DEBUG
;
7492 return MGMT_LTK_UNAUTHENTICATED
;
7495 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
7497 struct mgmt_ev_new_long_term_key ev
;
7499 memset(&ev
, 0, sizeof(ev
));
7501 /* Devices using resolvable or non-resolvable random addresses
7502 * without providing an identity resolving key don't require
7503 * to store long term keys. Their addresses will change the
7506 * Only when a remote device provides an identity address
7507 * make sure the long term key is stored. If the remote
7508 * identity is known, the long term keys are internally
7509 * mapped to the identity address. So allow static random
7510 * and public addresses here.
7512 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7513 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7514 ev
.store_hint
= 0x00;
7516 ev
.store_hint
= persistent
;
7518 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
7519 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
7520 ev
.key
.type
= mgmt_ltk_type(key
);
7521 ev
.key
.enc_size
= key
->enc_size
;
7522 ev
.key
.ediv
= key
->ediv
;
7523 ev
.key
.rand
= key
->rand
;
7525 if (key
->type
== SMP_LTK
)
7528 /* Make sure we copy only the significant bytes based on the
7529 * encryption key size, and set the rest of the value to zeroes.
7531 memcpy(ev
.key
.val
, key
->val
, key
->enc_size
);
7532 memset(ev
.key
.val
+ key
->enc_size
, 0,
7533 sizeof(ev
.key
.val
) - key
->enc_size
);
7535 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
7538 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
, bool persistent
)
7540 struct mgmt_ev_new_irk ev
;
7542 memset(&ev
, 0, sizeof(ev
));
7544 ev
.store_hint
= persistent
;
7546 bacpy(&ev
.rpa
, &irk
->rpa
);
7547 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
7548 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
7549 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
7551 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7554 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
7557 struct mgmt_ev_new_csrk ev
;
7559 memset(&ev
, 0, sizeof(ev
));
7561 /* Devices using resolvable or non-resolvable random addresses
7562 * without providing an identity resolving key don't require
7563 * to store signature resolving keys. Their addresses will change
7564 * the next time around.
7566 * Only when a remote device provides an identity address
7567 * make sure the signature resolving key is stored. So allow
7568 * static random and public addresses here.
7570 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
7571 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
7572 ev
.store_hint
= 0x00;
7574 ev
.store_hint
= persistent
;
7576 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
7577 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
7578 ev
.key
.type
= csrk
->type
;
7579 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
7581 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
7584 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7585 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
7586 u16 max_interval
, u16 latency
, u16 timeout
)
7588 struct mgmt_ev_new_conn_param ev
;
7590 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
7593 memset(&ev
, 0, sizeof(ev
));
7594 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7595 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
7596 ev
.store_hint
= store_hint
;
7597 ev
.min_interval
= cpu_to_le16(min_interval
);
7598 ev
.max_interval
= cpu_to_le16(max_interval
);
7599 ev
.latency
= cpu_to_le16(latency
);
7600 ev
.timeout
= cpu_to_le16(timeout
);
7602 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
7605 void mgmt_device_connected(struct hci_dev
*hdev
, struct hci_conn
*conn
,
7606 u32 flags
, u8
*name
, u8 name_len
)
7609 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
7612 bacpy(&ev
->addr
.bdaddr
, &conn
->dst
);
7613 ev
->addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7615 ev
->flags
= __cpu_to_le32(flags
);
7617 /* We must ensure that the EIR Data fields are ordered and
7618 * unique. Keep it simple for now and avoid the problem by not
7619 * adding any BR/EDR data to the LE adv.
7621 if (conn
->le_adv_data_len
> 0) {
7622 memcpy(&ev
->eir
[eir_len
],
7623 conn
->le_adv_data
, conn
->le_adv_data_len
);
7624 eir_len
= conn
->le_adv_data_len
;
7627 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
7630 if (memcmp(conn
->dev_class
, "\0\0\0", 3) != 0)
7631 eir_len
= eir_append_data(ev
->eir
, eir_len
,
7633 conn
->dev_class
, 3);
7636 ev
->eir_len
= cpu_to_le16(eir_len
);
7638 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
7639 sizeof(*ev
) + eir_len
, NULL
);
7642 static void disconnect_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7644 struct sock
**sk
= data
;
7646 cmd
->cmd_complete(cmd
, 0);
7651 mgmt_pending_remove(cmd
);
7654 static void unpair_device_rsp(struct mgmt_pending_cmd
*cmd
, void *data
)
7656 struct hci_dev
*hdev
= data
;
7657 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
7659 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
7661 cmd
->cmd_complete(cmd
, 0);
7662 mgmt_pending_remove(cmd
);
7665 bool mgmt_powering_down(struct hci_dev
*hdev
)
7667 struct mgmt_pending_cmd
*cmd
;
7668 struct mgmt_mode
*cp
;
7670 cmd
= pending_find(MGMT_OP_SET_POWERED
, hdev
);
7681 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7682 u8 link_type
, u8 addr_type
, u8 reason
,
7683 bool mgmt_connected
)
7685 struct mgmt_ev_device_disconnected ev
;
7686 struct sock
*sk
= NULL
;
7688 /* The connection is still in hci_conn_hash so test for 1
7689 * instead of 0 to know if this is the last one.
7691 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
7692 cancel_delayed_work(&hdev
->power_off
);
7693 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
7696 if (!mgmt_connected
)
7699 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
7702 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
7704 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7705 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7708 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
7713 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
7717 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7718 u8 link_type
, u8 addr_type
, u8 status
)
7720 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
7721 struct mgmt_cp_disconnect
*cp
;
7722 struct mgmt_pending_cmd
*cmd
;
7724 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
7727 cmd
= pending_find(MGMT_OP_DISCONNECT
, hdev
);
7733 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
7736 if (cp
->addr
.type
!= bdaddr_type
)
7739 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7740 mgmt_pending_remove(cmd
);
7743 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
7744 u8 addr_type
, u8 status
)
7746 struct mgmt_ev_connect_failed ev
;
7748 /* The connection is still in hci_conn_hash so test for 1
7749 * instead of 0 to know if this is the last one.
7751 if (mgmt_powering_down(hdev
) && hci_conn_count(hdev
) == 1) {
7752 cancel_delayed_work(&hdev
->power_off
);
7753 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
7756 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7757 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7758 ev
.status
= mgmt_status(status
);
7760 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
7763 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
7765 struct mgmt_ev_pin_code_request ev
;
7767 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7768 ev
.addr
.type
= BDADDR_BREDR
;
7771 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
7774 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7777 struct mgmt_pending_cmd
*cmd
;
7779 cmd
= pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
7783 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7784 mgmt_pending_remove(cmd
);
7787 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7790 struct mgmt_pending_cmd
*cmd
;
7792 cmd
= pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
7796 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7797 mgmt_pending_remove(cmd
);
7800 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7801 u8 link_type
, u8 addr_type
, u32 value
,
7804 struct mgmt_ev_user_confirm_request ev
;
7806 BT_DBG("%s", hdev
->name
);
7808 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7809 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7810 ev
.confirm_hint
= confirm_hint
;
7811 ev
.value
= cpu_to_le32(value
);
7813 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
7817 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7818 u8 link_type
, u8 addr_type
)
7820 struct mgmt_ev_user_passkey_request ev
;
7822 BT_DBG("%s", hdev
->name
);
7824 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7825 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7827 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
7831 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7832 u8 link_type
, u8 addr_type
, u8 status
,
7835 struct mgmt_pending_cmd
*cmd
;
7837 cmd
= pending_find(opcode
, hdev
);
7841 cmd
->cmd_complete(cmd
, mgmt_status(status
));
7842 mgmt_pending_remove(cmd
);
7847 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7848 u8 link_type
, u8 addr_type
, u8 status
)
7850 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7851 status
, MGMT_OP_USER_CONFIRM_REPLY
);
7854 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7855 u8 link_type
, u8 addr_type
, u8 status
)
7857 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7859 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
7862 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7863 u8 link_type
, u8 addr_type
, u8 status
)
7865 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7866 status
, MGMT_OP_USER_PASSKEY_REPLY
);
7869 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7870 u8 link_type
, u8 addr_type
, u8 status
)
7872 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
7874 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
7877 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
7878 u8 link_type
, u8 addr_type
, u32 passkey
,
7881 struct mgmt_ev_passkey_notify ev
;
7883 BT_DBG("%s", hdev
->name
);
7885 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
7886 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
7887 ev
.passkey
= __cpu_to_le32(passkey
);
7888 ev
.entered
= entered
;
7890 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
7893 void mgmt_auth_failed(struct hci_conn
*conn
, u8 hci_status
)
7895 struct mgmt_ev_auth_failed ev
;
7896 struct mgmt_pending_cmd
*cmd
;
7897 u8 status
= mgmt_status(hci_status
);
7899 bacpy(&ev
.addr
.bdaddr
, &conn
->dst
);
7900 ev
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
7903 cmd
= find_pairing(conn
);
7905 mgmt_event(MGMT_EV_AUTH_FAILED
, conn
->hdev
, &ev
, sizeof(ev
),
7906 cmd
? cmd
->sk
: NULL
);
7909 cmd
->cmd_complete(cmd
, status
);
7910 mgmt_pending_remove(cmd
);
7914 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
7916 struct cmd_lookup match
= { NULL
, hdev
};
7920 u8 mgmt_err
= mgmt_status(status
);
7921 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
7922 cmd_status_rsp
, &mgmt_err
);
7926 if (test_bit(HCI_AUTH
, &hdev
->flags
))
7927 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_LINK_SECURITY
);
7929 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_LINK_SECURITY
);
7931 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
7935 new_settings(hdev
, match
.sk
);
7941 static void clear_eir(struct hci_request
*req
)
7943 struct hci_dev
*hdev
= req
->hdev
;
7944 struct hci_cp_write_eir cp
;
7946 if (!lmp_ext_inq_capable(hdev
))
7949 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
7951 memset(&cp
, 0, sizeof(cp
));
7953 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
7956 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
7958 struct cmd_lookup match
= { NULL
, hdev
};
7959 struct hci_request req
;
7960 bool changed
= false;
7963 u8 mgmt_err
= mgmt_status(status
);
7965 if (enable
&& hci_dev_test_and_clear_flag(hdev
,
7967 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
7968 new_settings(hdev
, NULL
);
7971 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
7977 changed
= !hci_dev_test_and_set_flag(hdev
, HCI_SSP_ENABLED
);
7979 changed
= hci_dev_test_and_clear_flag(hdev
, HCI_SSP_ENABLED
);
7981 changed
= hci_dev_test_and_clear_flag(hdev
,
7984 hci_dev_clear_flag(hdev
, HCI_HS_ENABLED
);
7987 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
7990 new_settings(hdev
, match
.sk
);
7995 hci_req_init(&req
, hdev
);
7997 if (hci_dev_test_flag(hdev
, HCI_SSP_ENABLED
)) {
7998 if (hci_dev_test_flag(hdev
, HCI_USE_DEBUG_KEYS
))
7999 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
8000 sizeof(enable
), &enable
);
8006 hci_req_run(&req
, NULL
);
8009 static void sk_lookup(struct mgmt_pending_cmd
*cmd
, void *data
)
8011 struct cmd_lookup
*match
= data
;
8013 if (match
->sk
== NULL
) {
8014 match
->sk
= cmd
->sk
;
8015 sock_hold(match
->sk
);
8019 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
8022 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
8024 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
8025 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
8026 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
8029 mgmt_generic_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
8030 dev_class
, 3, NULL
);
8036 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
8038 struct mgmt_cp_set_local_name ev
;
8039 struct mgmt_pending_cmd
*cmd
;
8044 memset(&ev
, 0, sizeof(ev
));
8045 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
8046 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
8048 cmd
= pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
8050 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
8052 /* If this is a HCI command related to powering on the
8053 * HCI dev don't send any mgmt signals.
8055 if (pending_find(MGMT_OP_SET_POWERED
, hdev
))
8059 mgmt_generic_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
8060 cmd
? cmd
->sk
: NULL
);
8063 static inline bool has_uuid(u8
*uuid
, u16 uuid_count
, u8 (*uuids
)[16])
8067 for (i
= 0; i
< uuid_count
; i
++) {
8068 if (!memcmp(uuid
, uuids
[i
], 16))
8075 static bool eir_has_uuids(u8
*eir
, u16 eir_len
, u16 uuid_count
, u8 (*uuids
)[16])
8079 while (parsed
< eir_len
) {
8080 u8 field_len
= eir
[0];
8087 if (eir_len
- parsed
< field_len
+ 1)
8091 case EIR_UUID16_ALL
:
8092 case EIR_UUID16_SOME
:
8093 for (i
= 0; i
+ 3 <= field_len
; i
+= 2) {
8094 memcpy(uuid
, bluetooth_base_uuid
, 16);
8095 uuid
[13] = eir
[i
+ 3];
8096 uuid
[12] = eir
[i
+ 2];
8097 if (has_uuid(uuid
, uuid_count
, uuids
))
8101 case EIR_UUID32_ALL
:
8102 case EIR_UUID32_SOME
:
8103 for (i
= 0; i
+ 5 <= field_len
; i
+= 4) {
8104 memcpy(uuid
, bluetooth_base_uuid
, 16);
8105 uuid
[15] = eir
[i
+ 5];
8106 uuid
[14] = eir
[i
+ 4];
8107 uuid
[13] = eir
[i
+ 3];
8108 uuid
[12] = eir
[i
+ 2];
8109 if (has_uuid(uuid
, uuid_count
, uuids
))
8113 case EIR_UUID128_ALL
:
8114 case EIR_UUID128_SOME
:
8115 for (i
= 0; i
+ 17 <= field_len
; i
+= 16) {
8116 memcpy(uuid
, eir
+ i
+ 2, 16);
8117 if (has_uuid(uuid
, uuid_count
, uuids
))
8123 parsed
+= field_len
+ 1;
8124 eir
+= field_len
+ 1;
8130 static void restart_le_scan(struct hci_dev
*hdev
)
8132 /* If controller is not scanning we are done. */
8133 if (!hci_dev_test_flag(hdev
, HCI_LE_SCAN
))
8136 if (time_after(jiffies
+ DISCOV_LE_RESTART_DELAY
,
8137 hdev
->discovery
.scan_start
+
8138 hdev
->discovery
.scan_duration
))
8141 queue_delayed_work(hdev
->req_workqueue
, &hdev
->le_scan_restart
,
8142 DISCOV_LE_RESTART_DELAY
);
8145 static bool is_filter_match(struct hci_dev
*hdev
, s8 rssi
, u8
*eir
,
8146 u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8148 /* If a RSSI threshold has been specified, and
8149 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
8150 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
8151 * is set, let it through for further processing, as we might need to
8154 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
8155 * the results are also dropped.
8157 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8158 (rssi
== HCI_RSSI_INVALID
||
8159 (rssi
< hdev
->discovery
.rssi
&&
8160 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
))))
8163 if (hdev
->discovery
.uuid_count
!= 0) {
8164 /* If a list of UUIDs is provided in filter, results with no
8165 * matching UUID should be dropped.
8167 if (!eir_has_uuids(eir
, eir_len
, hdev
->discovery
.uuid_count
,
8168 hdev
->discovery
.uuids
) &&
8169 !eir_has_uuids(scan_rsp
, scan_rsp_len
,
8170 hdev
->discovery
.uuid_count
,
8171 hdev
->discovery
.uuids
))
8175 /* If duplicate filtering does not report RSSI changes, then restart
8176 * scanning to ensure updated result with updated RSSI values.
8178 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER
, &hdev
->quirks
)) {
8179 restart_le_scan(hdev
);
8181 /* Validate RSSI value against the RSSI threshold once more. */
8182 if (hdev
->discovery
.rssi
!= HCI_RSSI_INVALID
&&
8183 rssi
< hdev
->discovery
.rssi
)
8190 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8191 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
8192 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
8195 struct mgmt_ev_device_found
*ev
= (void *)buf
;
8198 /* Don't send events for a non-kernel initiated discovery. With
8199 * LE one exception is if we have pend_le_reports > 0 in which
8200 * case we're doing passive scanning and want these events.
8202 if (!hci_discovery_active(hdev
)) {
8203 if (link_type
== ACL_LINK
)
8205 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
8209 if (hdev
->discovery
.result_filtering
) {
8210 /* We are using service discovery */
8211 if (!is_filter_match(hdev
, rssi
, eir
, eir_len
, scan_rsp
,
8216 /* Make sure that the buffer is big enough. The 5 extra bytes
8217 * are for the potential CoD field.
8219 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
8222 memset(buf
, 0, sizeof(buf
));
8224 /* In case of device discovery with BR/EDR devices (pre 1.2), the
8225 * RSSI value was reported as 0 when not available. This behavior
8226 * is kept when using device discovery. This is required for full
8227 * backwards compatibility with the API.
8229 * However when using service discovery, the value 127 will be
8230 * returned when the RSSI is not available.
8232 if (rssi
== HCI_RSSI_INVALID
&& !hdev
->discovery
.report_invalid_rssi
&&
8233 link_type
== ACL_LINK
)
8236 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8237 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8239 ev
->flags
= cpu_to_le32(flags
);
8242 /* Copy EIR or advertising data into event */
8243 memcpy(ev
->eir
, eir
, eir_len
);
8245 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
8246 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
8249 if (scan_rsp_len
> 0)
8250 /* Append scan response data to event */
8251 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
8253 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
8254 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
8256 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
8259 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
8260 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
8262 struct mgmt_ev_device_found
*ev
;
8263 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
8266 ev
= (struct mgmt_ev_device_found
*) buf
;
8268 memset(buf
, 0, sizeof(buf
));
8270 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
8271 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
8274 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
8277 ev
->eir_len
= cpu_to_le16(eir_len
);
8279 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
8282 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
8284 struct mgmt_ev_discovering ev
;
8286 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
8288 memset(&ev
, 0, sizeof(ev
));
8289 ev
.type
= hdev
->discovery
.type
;
8290 ev
.discovering
= discovering
;
8292 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
8295 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
, u16 opcode
)
8297 BT_DBG("%s status %u", hdev
->name
, status
);
8300 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
8302 struct hci_request req
;
8305 if (!hci_dev_test_flag(hdev
, HCI_ADVERTISING
) &&
8306 !hci_dev_test_flag(hdev
, HCI_ADVERTISING_INSTANCE
))
8309 instance
= get_current_adv_instance(hdev
);
8311 hci_req_init(&req
, hdev
);
8314 schedule_adv_instance(&req
, instance
, true);
8316 update_adv_data(&req
);
8317 update_scan_rsp_data(&req
);
8318 enable_advertising(&req
);
8321 hci_req_run(&req
, adv_enable_complete
);
8324 static struct hci_mgmt_chan chan
= {
8325 .channel
= HCI_CHANNEL_CONTROL
,
8326 .handler_count
= ARRAY_SIZE(mgmt_handlers
),
8327 .handlers
= mgmt_handlers
,
8328 .hdev_init
= mgmt_init_hdev
,
8333 return hci_mgmt_chan_register(&chan
);
8336 void mgmt_exit(void)
8338 hci_mgmt_chan_unregister(&chan
);