2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands
[] = {
41 MGMT_OP_READ_INDEX_LIST
,
44 MGMT_OP_SET_DISCOVERABLE
,
45 MGMT_OP_SET_CONNECTABLE
,
46 MGMT_OP_SET_FAST_CONNECTABLE
,
48 MGMT_OP_SET_LINK_SECURITY
,
52 MGMT_OP_SET_DEV_CLASS
,
53 MGMT_OP_SET_LOCAL_NAME
,
56 MGMT_OP_LOAD_LINK_KEYS
,
57 MGMT_OP_LOAD_LONG_TERM_KEYS
,
59 MGMT_OP_GET_CONNECTIONS
,
60 MGMT_OP_PIN_CODE_REPLY
,
61 MGMT_OP_PIN_CODE_NEG_REPLY
,
62 MGMT_OP_SET_IO_CAPABILITY
,
64 MGMT_OP_CANCEL_PAIR_DEVICE
,
65 MGMT_OP_UNPAIR_DEVICE
,
66 MGMT_OP_USER_CONFIRM_REPLY
,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
68 MGMT_OP_USER_PASSKEY_REPLY
,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
70 MGMT_OP_READ_LOCAL_OOB_DATA
,
71 MGMT_OP_ADD_REMOTE_OOB_DATA
,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
73 MGMT_OP_START_DISCOVERY
,
74 MGMT_OP_STOP_DISCOVERY
,
77 MGMT_OP_UNBLOCK_DEVICE
,
78 MGMT_OP_SET_DEVICE_ID
,
79 MGMT_OP_SET_ADVERTISING
,
81 MGMT_OP_SET_STATIC_ADDRESS
,
82 MGMT_OP_SET_SCAN_PARAMS
,
83 MGMT_OP_SET_SECURE_CONN
,
84 MGMT_OP_SET_DEBUG_KEYS
,
87 MGMT_OP_GET_CONN_INFO
,
88 MGMT_OP_GET_CLOCK_INFO
,
90 MGMT_OP_REMOVE_DEVICE
,
91 MGMT_OP_LOAD_CONN_PARAM
,
92 MGMT_OP_READ_UNCONF_INDEX_LIST
,
93 MGMT_OP_READ_CONFIG_INFO
,
94 MGMT_OP_SET_EXTERNAL_CONFIG
,
97 static const u16 mgmt_events
[] = {
98 MGMT_EV_CONTROLLER_ERROR
,
100 MGMT_EV_INDEX_REMOVED
,
101 MGMT_EV_NEW_SETTINGS
,
102 MGMT_EV_CLASS_OF_DEV_CHANGED
,
103 MGMT_EV_LOCAL_NAME_CHANGED
,
104 MGMT_EV_NEW_LINK_KEY
,
105 MGMT_EV_NEW_LONG_TERM_KEY
,
106 MGMT_EV_DEVICE_CONNECTED
,
107 MGMT_EV_DEVICE_DISCONNECTED
,
108 MGMT_EV_CONNECT_FAILED
,
109 MGMT_EV_PIN_CODE_REQUEST
,
110 MGMT_EV_USER_CONFIRM_REQUEST
,
111 MGMT_EV_USER_PASSKEY_REQUEST
,
113 MGMT_EV_DEVICE_FOUND
,
115 MGMT_EV_DEVICE_BLOCKED
,
116 MGMT_EV_DEVICE_UNBLOCKED
,
117 MGMT_EV_DEVICE_UNPAIRED
,
118 MGMT_EV_PASSKEY_NOTIFY
,
121 MGMT_EV_DEVICE_ADDED
,
122 MGMT_EV_DEVICE_REMOVED
,
123 MGMT_EV_NEW_CONN_PARAM
,
124 MGMT_EV_UNCONF_INDEX_ADDED
,
125 MGMT_EV_UNCONF_INDEX_REMOVED
,
128 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
130 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
131 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134 struct list_head list
;
142 /* HCI to MGMT error code conversion table */
143 static u8 mgmt_status_table
[] = {
145 MGMT_STATUS_UNKNOWN_COMMAND
, /* Unknown Command */
146 MGMT_STATUS_NOT_CONNECTED
, /* No Connection */
147 MGMT_STATUS_FAILED
, /* Hardware Failure */
148 MGMT_STATUS_CONNECT_FAILED
, /* Page Timeout */
149 MGMT_STATUS_AUTH_FAILED
, /* Authentication Failed */
150 MGMT_STATUS_AUTH_FAILED
, /* PIN or Key Missing */
151 MGMT_STATUS_NO_RESOURCES
, /* Memory Full */
152 MGMT_STATUS_TIMEOUT
, /* Connection Timeout */
153 MGMT_STATUS_NO_RESOURCES
, /* Max Number of Connections */
154 MGMT_STATUS_NO_RESOURCES
, /* Max Number of SCO Connections */
155 MGMT_STATUS_ALREADY_CONNECTED
, /* ACL Connection Exists */
156 MGMT_STATUS_BUSY
, /* Command Disallowed */
157 MGMT_STATUS_NO_RESOURCES
, /* Rejected Limited Resources */
158 MGMT_STATUS_REJECTED
, /* Rejected Security */
159 MGMT_STATUS_REJECTED
, /* Rejected Personal */
160 MGMT_STATUS_TIMEOUT
, /* Host Timeout */
161 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Feature */
162 MGMT_STATUS_INVALID_PARAMS
, /* Invalid Parameters */
163 MGMT_STATUS_DISCONNECTED
, /* OE User Ended Connection */
164 MGMT_STATUS_NO_RESOURCES
, /* OE Low Resources */
165 MGMT_STATUS_DISCONNECTED
, /* OE Power Off */
166 MGMT_STATUS_DISCONNECTED
, /* Connection Terminated */
167 MGMT_STATUS_BUSY
, /* Repeated Attempts */
168 MGMT_STATUS_REJECTED
, /* Pairing Not Allowed */
169 MGMT_STATUS_FAILED
, /* Unknown LMP PDU */
170 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported Remote Feature */
171 MGMT_STATUS_REJECTED
, /* SCO Offset Rejected */
172 MGMT_STATUS_REJECTED
, /* SCO Interval Rejected */
173 MGMT_STATUS_REJECTED
, /* Air Mode Rejected */
174 MGMT_STATUS_INVALID_PARAMS
, /* Invalid LMP Parameters */
175 MGMT_STATUS_FAILED
, /* Unspecified Error */
176 MGMT_STATUS_NOT_SUPPORTED
, /* Unsupported LMP Parameter Value */
177 MGMT_STATUS_FAILED
, /* Role Change Not Allowed */
178 MGMT_STATUS_TIMEOUT
, /* LMP Response Timeout */
179 MGMT_STATUS_FAILED
, /* LMP Error Transaction Collision */
180 MGMT_STATUS_FAILED
, /* LMP PDU Not Allowed */
181 MGMT_STATUS_REJECTED
, /* Encryption Mode Not Accepted */
182 MGMT_STATUS_FAILED
, /* Unit Link Key Used */
183 MGMT_STATUS_NOT_SUPPORTED
, /* QoS Not Supported */
184 MGMT_STATUS_TIMEOUT
, /* Instant Passed */
185 MGMT_STATUS_NOT_SUPPORTED
, /* Pairing Not Supported */
186 MGMT_STATUS_FAILED
, /* Transaction Collision */
187 MGMT_STATUS_INVALID_PARAMS
, /* Unacceptable Parameter */
188 MGMT_STATUS_REJECTED
, /* QoS Rejected */
189 MGMT_STATUS_NOT_SUPPORTED
, /* Classification Not Supported */
190 MGMT_STATUS_REJECTED
, /* Insufficient Security */
191 MGMT_STATUS_INVALID_PARAMS
, /* Parameter Out Of Range */
192 MGMT_STATUS_BUSY
, /* Role Switch Pending */
193 MGMT_STATUS_FAILED
, /* Slot Violation */
194 MGMT_STATUS_FAILED
, /* Role Switch Failed */
195 MGMT_STATUS_INVALID_PARAMS
, /* EIR Too Large */
196 MGMT_STATUS_NOT_SUPPORTED
, /* Simple Pairing Not Supported */
197 MGMT_STATUS_BUSY
, /* Host Busy Pairing */
198 MGMT_STATUS_REJECTED
, /* Rejected, No Suitable Channel */
199 MGMT_STATUS_BUSY
, /* Controller Busy */
200 MGMT_STATUS_INVALID_PARAMS
, /* Unsuitable Connection Interval */
201 MGMT_STATUS_TIMEOUT
, /* Directed Advertising Timeout */
202 MGMT_STATUS_AUTH_FAILED
, /* Terminated Due to MIC Failure */
203 MGMT_STATUS_CONNECT_FAILED
, /* Connection Establishment Failed */
204 MGMT_STATUS_CONNECT_FAILED
, /* MAC Connection Failed */
207 static u8
mgmt_status(u8 hci_status
)
209 if (hci_status
< ARRAY_SIZE(mgmt_status_table
))
210 return mgmt_status_table
[hci_status
];
212 return MGMT_STATUS_FAILED
;
215 static int cmd_status(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
)
218 struct mgmt_hdr
*hdr
;
219 struct mgmt_ev_cmd_status
*ev
;
222 BT_DBG("sock %p, index %u, cmd %u, status %u", sk
, index
, cmd
, status
);
224 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
), GFP_KERNEL
);
228 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
230 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_STATUS
);
231 hdr
->index
= cpu_to_le16(index
);
232 hdr
->len
= cpu_to_le16(sizeof(*ev
));
234 ev
= (void *) skb_put(skb
, sizeof(*ev
));
236 ev
->opcode
= cpu_to_le16(cmd
);
238 err
= sock_queue_rcv_skb(sk
, skb
);
245 static int cmd_complete(struct sock
*sk
, u16 index
, u16 cmd
, u8 status
,
246 void *rp
, size_t rp_len
)
249 struct mgmt_hdr
*hdr
;
250 struct mgmt_ev_cmd_complete
*ev
;
253 BT_DBG("sock %p", sk
);
255 skb
= alloc_skb(sizeof(*hdr
) + sizeof(*ev
) + rp_len
, GFP_KERNEL
);
259 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
261 hdr
->opcode
= cpu_to_le16(MGMT_EV_CMD_COMPLETE
);
262 hdr
->index
= cpu_to_le16(index
);
263 hdr
->len
= cpu_to_le16(sizeof(*ev
) + rp_len
);
265 ev
= (void *) skb_put(skb
, sizeof(*ev
) + rp_len
);
266 ev
->opcode
= cpu_to_le16(cmd
);
270 memcpy(ev
->data
, rp
, rp_len
);
272 err
= sock_queue_rcv_skb(sk
, skb
);
279 static int read_version(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
282 struct mgmt_rp_read_version rp
;
284 BT_DBG("sock %p", sk
);
286 rp
.version
= MGMT_VERSION
;
287 rp
.revision
= cpu_to_le16(MGMT_REVISION
);
289 return cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_VERSION
, 0, &rp
,
293 static int read_commands(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
296 struct mgmt_rp_read_commands
*rp
;
297 const u16 num_commands
= ARRAY_SIZE(mgmt_commands
);
298 const u16 num_events
= ARRAY_SIZE(mgmt_events
);
303 BT_DBG("sock %p", sk
);
305 rp_size
= sizeof(*rp
) + ((num_commands
+ num_events
) * sizeof(u16
));
307 rp
= kmalloc(rp_size
, GFP_KERNEL
);
311 rp
->num_commands
= cpu_to_le16(num_commands
);
312 rp
->num_events
= cpu_to_le16(num_events
);
314 for (i
= 0, opcode
= rp
->opcodes
; i
< num_commands
; i
++, opcode
++)
315 put_unaligned_le16(mgmt_commands
[i
], opcode
);
317 for (i
= 0; i
< num_events
; i
++, opcode
++)
318 put_unaligned_le16(mgmt_events
[i
], opcode
);
320 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_COMMANDS
, 0, rp
,
327 static int read_index_list(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
330 struct mgmt_rp_read_index_list
*rp
;
336 BT_DBG("sock %p", sk
);
338 read_lock(&hci_dev_list_lock
);
341 list_for_each_entry(d
, &hci_dev_list
, list
) {
342 if (d
->dev_type
== HCI_BREDR
&&
343 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
347 rp_len
= sizeof(*rp
) + (2 * count
);
348 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
350 read_unlock(&hci_dev_list_lock
);
355 list_for_each_entry(d
, &hci_dev_list
, list
) {
356 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
357 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
360 /* Devices marked as raw-only are neither configured
361 * nor unconfigured controllers.
363 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
366 if (d
->dev_type
== HCI_BREDR
&&
367 !test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
368 rp
->index
[count
++] = cpu_to_le16(d
->id
);
369 BT_DBG("Added hci%u", d
->id
);
373 rp
->num_controllers
= cpu_to_le16(count
);
374 rp_len
= sizeof(*rp
) + (2 * count
);
376 read_unlock(&hci_dev_list_lock
);
378 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_INDEX_LIST
, 0, rp
,
386 static int read_unconf_index_list(struct sock
*sk
, struct hci_dev
*hdev
,
387 void *data
, u16 data_len
)
389 struct mgmt_rp_read_unconf_index_list
*rp
;
395 BT_DBG("sock %p", sk
);
397 read_lock(&hci_dev_list_lock
);
400 list_for_each_entry(d
, &hci_dev_list
, list
) {
401 if (d
->dev_type
== HCI_BREDR
&&
402 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
))
406 rp_len
= sizeof(*rp
) + (2 * count
);
407 rp
= kmalloc(rp_len
, GFP_ATOMIC
);
409 read_unlock(&hci_dev_list_lock
);
414 list_for_each_entry(d
, &hci_dev_list
, list
) {
415 if (test_bit(HCI_SETUP
, &d
->dev_flags
) ||
416 test_bit(HCI_USER_CHANNEL
, &d
->dev_flags
))
419 /* Devices marked as raw-only are neither configured
420 * nor unconfigured controllers.
422 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &d
->quirks
))
425 if (d
->dev_type
== HCI_BREDR
&&
426 test_bit(HCI_UNCONFIGURED
, &d
->dev_flags
)) {
427 rp
->index
[count
++] = cpu_to_le16(d
->id
);
428 BT_DBG("Added hci%u", d
->id
);
432 rp
->num_controllers
= cpu_to_le16(count
);
433 rp_len
= sizeof(*rp
) + (2 * count
);
435 read_unlock(&hci_dev_list_lock
);
437 err
= cmd_complete(sk
, MGMT_INDEX_NONE
, MGMT_OP_READ_UNCONF_INDEX_LIST
,
445 static bool is_configured(struct hci_dev
*hdev
)
447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
448 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
451 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
452 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
458 static __le32
get_missing_options(struct hci_dev
*hdev
)
462 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) &&
463 !test_bit(HCI_EXT_CONFIGURED
, &hdev
->dev_flags
))
464 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
466 if (test_bit(HCI_QUIRK_INVALID_BDADDR
, &hdev
->quirks
) &&
467 !bacmp(&hdev
->public_addr
, BDADDR_ANY
))
468 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
470 return cpu_to_le32(options
);
473 static int send_options_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
475 __le32 options
= get_missing_options(hdev
);
477 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &options
,
481 static int read_config_info(struct sock
*sk
, struct hci_dev
*hdev
,
482 void *data
, u16 data_len
)
484 struct mgmt_rp_read_config_info rp
;
487 BT_DBG("sock %p %s", sk
, hdev
->name
);
491 memset(&rp
, 0, sizeof(rp
));
492 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
494 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
495 options
|= MGMT_OPTION_EXTERNAL_CONFIG
;
497 if (hdev
->set_bdaddr
)
498 options
|= MGMT_OPTION_PUBLIC_ADDRESS
;
500 rp
.supported_options
= cpu_to_le32(options
);
501 rp
.missing_options
= get_missing_options(hdev
);
503 hci_dev_unlock(hdev
);
505 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_CONFIG_INFO
, 0, &rp
,
509 static u32
get_supported_settings(struct hci_dev
*hdev
)
513 settings
|= MGMT_SETTING_POWERED
;
514 settings
|= MGMT_SETTING_PAIRABLE
;
515 settings
|= MGMT_SETTING_DEBUG_KEYS
;
517 if (lmp_bredr_capable(hdev
)) {
518 settings
|= MGMT_SETTING_CONNECTABLE
;
519 if (hdev
->hci_ver
>= BLUETOOTH_VER_1_2
)
520 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
521 settings
|= MGMT_SETTING_DISCOVERABLE
;
522 settings
|= MGMT_SETTING_BREDR
;
523 settings
|= MGMT_SETTING_LINK_SECURITY
;
525 if (lmp_ssp_capable(hdev
)) {
526 settings
|= MGMT_SETTING_SSP
;
527 settings
|= MGMT_SETTING_HS
;
530 if (lmp_sc_capable(hdev
) ||
531 test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
532 settings
|= MGMT_SETTING_SECURE_CONN
;
535 if (lmp_le_capable(hdev
)) {
536 settings
|= MGMT_SETTING_LE
;
537 settings
|= MGMT_SETTING_ADVERTISING
;
538 settings
|= MGMT_SETTING_PRIVACY
;
541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
) ||
543 settings
|= MGMT_SETTING_CONFIGURATION
;
548 static u32
get_current_settings(struct hci_dev
*hdev
)
552 if (hdev_is_powered(hdev
))
553 settings
|= MGMT_SETTING_POWERED
;
555 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
556 settings
|= MGMT_SETTING_CONNECTABLE
;
558 if (test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
559 settings
|= MGMT_SETTING_FAST_CONNECTABLE
;
561 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
562 settings
|= MGMT_SETTING_DISCOVERABLE
;
564 if (test_bit(HCI_PAIRABLE
, &hdev
->dev_flags
))
565 settings
|= MGMT_SETTING_PAIRABLE
;
567 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
568 settings
|= MGMT_SETTING_BREDR
;
570 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
571 settings
|= MGMT_SETTING_LE
;
573 if (test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
))
574 settings
|= MGMT_SETTING_LINK_SECURITY
;
576 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
577 settings
|= MGMT_SETTING_SSP
;
579 if (test_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
))
580 settings
|= MGMT_SETTING_HS
;
582 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
583 settings
|= MGMT_SETTING_ADVERTISING
;
585 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
586 settings
|= MGMT_SETTING_SECURE_CONN
;
588 if (test_bit(HCI_KEEP_DEBUG_KEYS
, &hdev
->dev_flags
))
589 settings
|= MGMT_SETTING_DEBUG_KEYS
;
591 if (test_bit(HCI_PRIVACY
, &hdev
->dev_flags
))
592 settings
|= MGMT_SETTING_PRIVACY
;
597 #define PNP_INFO_SVCLASS_ID 0x1200
599 static u8
*create_uuid16_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
601 u8
*ptr
= data
, *uuids_start
= NULL
;
602 struct bt_uuid
*uuid
;
607 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
610 if (uuid
->size
!= 16)
613 uuid16
= get_unaligned_le16(&uuid
->uuid
[12]);
617 if (uuid16
== PNP_INFO_SVCLASS_ID
)
623 uuids_start
[1] = EIR_UUID16_ALL
;
627 /* Stop if not enough space to put next UUID */
628 if ((ptr
- data
) + sizeof(u16
) > len
) {
629 uuids_start
[1] = EIR_UUID16_SOME
;
633 *ptr
++ = (uuid16
& 0x00ff);
634 *ptr
++ = (uuid16
& 0xff00) >> 8;
635 uuids_start
[0] += sizeof(uuid16
);
641 static u8
*create_uuid32_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
643 u8
*ptr
= data
, *uuids_start
= NULL
;
644 struct bt_uuid
*uuid
;
649 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
650 if (uuid
->size
!= 32)
656 uuids_start
[1] = EIR_UUID32_ALL
;
660 /* Stop if not enough space to put next UUID */
661 if ((ptr
- data
) + sizeof(u32
) > len
) {
662 uuids_start
[1] = EIR_UUID32_SOME
;
666 memcpy(ptr
, &uuid
->uuid
[12], sizeof(u32
));
668 uuids_start
[0] += sizeof(u32
);
674 static u8
*create_uuid128_list(struct hci_dev
*hdev
, u8
*data
, ptrdiff_t len
)
676 u8
*ptr
= data
, *uuids_start
= NULL
;
677 struct bt_uuid
*uuid
;
682 list_for_each_entry(uuid
, &hdev
->uuids
, list
) {
683 if (uuid
->size
!= 128)
689 uuids_start
[1] = EIR_UUID128_ALL
;
693 /* Stop if not enough space to put next UUID */
694 if ((ptr
- data
) + 16 > len
) {
695 uuids_start
[1] = EIR_UUID128_SOME
;
699 memcpy(ptr
, uuid
->uuid
, 16);
701 uuids_start
[0] += 16;
707 static struct pending_cmd
*mgmt_pending_find(u16 opcode
, struct hci_dev
*hdev
)
709 struct pending_cmd
*cmd
;
711 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
712 if (cmd
->opcode
== opcode
)
719 static struct pending_cmd
*mgmt_pending_find_data(u16 opcode
,
720 struct hci_dev
*hdev
,
723 struct pending_cmd
*cmd
;
725 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
726 if (cmd
->user_data
!= data
)
728 if (cmd
->opcode
== opcode
)
735 static u8
create_scan_rsp_data(struct hci_dev
*hdev
, u8
*ptr
)
740 name_len
= strlen(hdev
->dev_name
);
742 size_t max_len
= HCI_MAX_AD_LENGTH
- ad_len
- 2;
744 if (name_len
> max_len
) {
746 ptr
[1] = EIR_NAME_SHORT
;
748 ptr
[1] = EIR_NAME_COMPLETE
;
750 ptr
[0] = name_len
+ 1;
752 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
754 ad_len
+= (name_len
+ 2);
755 ptr
+= (name_len
+ 2);
761 static void update_scan_rsp_data(struct hci_request
*req
)
763 struct hci_dev
*hdev
= req
->hdev
;
764 struct hci_cp_le_set_scan_rsp_data cp
;
767 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
770 memset(&cp
, 0, sizeof(cp
));
772 len
= create_scan_rsp_data(hdev
, cp
.data
);
774 if (hdev
->scan_rsp_data_len
== len
&&
775 memcmp(cp
.data
, hdev
->scan_rsp_data
, len
) == 0)
778 memcpy(hdev
->scan_rsp_data
, cp
.data
, sizeof(cp
.data
));
779 hdev
->scan_rsp_data_len
= len
;
783 hci_req_add(req
, HCI_OP_LE_SET_SCAN_RSP_DATA
, sizeof(cp
), &cp
);
786 static u8
get_adv_discov_flags(struct hci_dev
*hdev
)
788 struct pending_cmd
*cmd
;
790 /* If there's a pending mgmt command the flags will not yet have
791 * their final values, so check for this first.
793 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
795 struct mgmt_mode
*cp
= cmd
->param
;
797 return LE_AD_GENERAL
;
798 else if (cp
->val
== 0x02)
799 return LE_AD_LIMITED
;
801 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
802 return LE_AD_LIMITED
;
803 else if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
804 return LE_AD_GENERAL
;
810 static u8
create_adv_data(struct hci_dev
*hdev
, u8
*ptr
)
812 u8 ad_len
= 0, flags
= 0;
814 flags
|= get_adv_discov_flags(hdev
);
816 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
817 flags
|= LE_AD_NO_BREDR
;
820 BT_DBG("adv flags 0x%02x", flags
);
830 if (hdev
->adv_tx_power
!= HCI_TX_POWER_INVALID
) {
832 ptr
[1] = EIR_TX_POWER
;
833 ptr
[2] = (u8
) hdev
->adv_tx_power
;
842 static void update_adv_data(struct hci_request
*req
)
844 struct hci_dev
*hdev
= req
->hdev
;
845 struct hci_cp_le_set_adv_data cp
;
848 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
851 memset(&cp
, 0, sizeof(cp
));
853 len
= create_adv_data(hdev
, cp
.data
);
855 if (hdev
->adv_data_len
== len
&&
856 memcmp(cp
.data
, hdev
->adv_data
, len
) == 0)
859 memcpy(hdev
->adv_data
, cp
.data
, sizeof(cp
.data
));
860 hdev
->adv_data_len
= len
;
864 hci_req_add(req
, HCI_OP_LE_SET_ADV_DATA
, sizeof(cp
), &cp
);
867 static void create_eir(struct hci_dev
*hdev
, u8
*data
)
872 name_len
= strlen(hdev
->dev_name
);
878 ptr
[1] = EIR_NAME_SHORT
;
880 ptr
[1] = EIR_NAME_COMPLETE
;
882 /* EIR Data length */
883 ptr
[0] = name_len
+ 1;
885 memcpy(ptr
+ 2, hdev
->dev_name
, name_len
);
887 ptr
+= (name_len
+ 2);
890 if (hdev
->inq_tx_power
!= HCI_TX_POWER_INVALID
) {
892 ptr
[1] = EIR_TX_POWER
;
893 ptr
[2] = (u8
) hdev
->inq_tx_power
;
898 if (hdev
->devid_source
> 0) {
900 ptr
[1] = EIR_DEVICE_ID
;
902 put_unaligned_le16(hdev
->devid_source
, ptr
+ 2);
903 put_unaligned_le16(hdev
->devid_vendor
, ptr
+ 4);
904 put_unaligned_le16(hdev
->devid_product
, ptr
+ 6);
905 put_unaligned_le16(hdev
->devid_version
, ptr
+ 8);
910 ptr
= create_uuid16_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
911 ptr
= create_uuid32_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
912 ptr
= create_uuid128_list(hdev
, ptr
, HCI_MAX_EIR_LENGTH
- (ptr
- data
));
915 static void update_eir(struct hci_request
*req
)
917 struct hci_dev
*hdev
= req
->hdev
;
918 struct hci_cp_write_eir cp
;
920 if (!hdev_is_powered(hdev
))
923 if (!lmp_ext_inq_capable(hdev
))
926 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
929 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
932 memset(&cp
, 0, sizeof(cp
));
934 create_eir(hdev
, cp
.data
);
936 if (memcmp(cp
.data
, hdev
->eir
, sizeof(cp
.data
)) == 0)
939 memcpy(hdev
->eir
, cp
.data
, sizeof(cp
.data
));
941 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
944 static u8
get_service_classes(struct hci_dev
*hdev
)
946 struct bt_uuid
*uuid
;
949 list_for_each_entry(uuid
, &hdev
->uuids
, list
)
950 val
|= uuid
->svc_hint
;
955 static void update_class(struct hci_request
*req
)
957 struct hci_dev
*hdev
= req
->hdev
;
960 BT_DBG("%s", hdev
->name
);
962 if (!hdev_is_powered(hdev
))
965 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
968 if (test_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
971 cod
[0] = hdev
->minor_class
;
972 cod
[1] = hdev
->major_class
;
973 cod
[2] = get_service_classes(hdev
);
975 if (test_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
))
978 if (memcmp(cod
, hdev
->dev_class
, 3) == 0)
981 hci_req_add(req
, HCI_OP_WRITE_CLASS_OF_DEV
, sizeof(cod
), cod
);
984 static bool get_connectable(struct hci_dev
*hdev
)
986 struct pending_cmd
*cmd
;
988 /* If there's a pending mgmt command the flag will not yet have
989 * it's final value, so check for this first.
991 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
993 struct mgmt_mode
*cp
= cmd
->param
;
997 return test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1000 static void enable_advertising(struct hci_request
*req
)
1002 struct hci_dev
*hdev
= req
->hdev
;
1003 struct hci_cp_le_set_adv_param cp
;
1004 u8 own_addr_type
, enable
= 0x01;
1007 /* Clear the HCI_ADVERTISING bit temporarily so that the
1008 * hci_update_random_address knows that it's safe to go ahead
1009 * and write a new random address. The flag will be set back on
1010 * as soon as the SET_ADV_ENABLE HCI command completes.
1012 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
1014 connectable
= get_connectable(hdev
);
1016 /* Set require_privacy to true only when non-connectable
1017 * advertising is used. In that case it is fine to use a
1018 * non-resolvable private address.
1020 if (hci_update_random_address(req
, !connectable
, &own_addr_type
) < 0)
1023 memset(&cp
, 0, sizeof(cp
));
1024 cp
.min_interval
= cpu_to_le16(0x0800);
1025 cp
.max_interval
= cpu_to_le16(0x0800);
1026 cp
.type
= connectable
? LE_ADV_IND
: LE_ADV_NONCONN_IND
;
1027 cp
.own_address_type
= own_addr_type
;
1028 cp
.channel_map
= hdev
->le_adv_channel_map
;
1030 hci_req_add(req
, HCI_OP_LE_SET_ADV_PARAM
, sizeof(cp
), &cp
);
1032 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1035 static void disable_advertising(struct hci_request
*req
)
1039 hci_req_add(req
, HCI_OP_LE_SET_ADV_ENABLE
, sizeof(enable
), &enable
);
1042 static void service_cache_off(struct work_struct
*work
)
1044 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1045 service_cache
.work
);
1046 struct hci_request req
;
1048 if (!test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
))
1051 hci_req_init(&req
, hdev
);
1058 hci_dev_unlock(hdev
);
1060 hci_req_run(&req
, NULL
);
1063 static void rpa_expired(struct work_struct
*work
)
1065 struct hci_dev
*hdev
= container_of(work
, struct hci_dev
,
1067 struct hci_request req
;
1071 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
1073 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) ||
1074 hci_conn_num(hdev
, LE_LINK
) > 0)
1077 /* The generation of a new RPA and programming it into the
1078 * controller happens in the enable_advertising() function.
1081 hci_req_init(&req
, hdev
);
1083 disable_advertising(&req
);
1084 enable_advertising(&req
);
1086 hci_req_run(&req
, NULL
);
1089 static void mgmt_init_hdev(struct sock
*sk
, struct hci_dev
*hdev
)
1091 if (test_and_set_bit(HCI_MGMT
, &hdev
->dev_flags
))
1094 INIT_DELAYED_WORK(&hdev
->service_cache
, service_cache_off
);
1095 INIT_DELAYED_WORK(&hdev
->rpa_expired
, rpa_expired
);
1097 /* Non-mgmt controlled devices get this bit set
1098 * implicitly so that pairing works for them, however
1099 * for mgmt we require user-space to explicitly enable
1102 clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1105 static int read_controller_info(struct sock
*sk
, struct hci_dev
*hdev
,
1106 void *data
, u16 data_len
)
1108 struct mgmt_rp_read_info rp
;
1110 BT_DBG("sock %p %s", sk
, hdev
->name
);
1114 memset(&rp
, 0, sizeof(rp
));
1116 bacpy(&rp
.bdaddr
, &hdev
->bdaddr
);
1118 rp
.version
= hdev
->hci_ver
;
1119 rp
.manufacturer
= cpu_to_le16(hdev
->manufacturer
);
1121 rp
.supported_settings
= cpu_to_le32(get_supported_settings(hdev
));
1122 rp
.current_settings
= cpu_to_le32(get_current_settings(hdev
));
1124 memcpy(rp
.dev_class
, hdev
->dev_class
, 3);
1126 memcpy(rp
.name
, hdev
->dev_name
, sizeof(hdev
->dev_name
));
1127 memcpy(rp
.short_name
, hdev
->short_name
, sizeof(hdev
->short_name
));
1129 hci_dev_unlock(hdev
);
1131 return cmd_complete(sk
, hdev
->id
, MGMT_OP_READ_INFO
, 0, &rp
,
1135 static void mgmt_pending_free(struct pending_cmd
*cmd
)
1142 static struct pending_cmd
*mgmt_pending_add(struct sock
*sk
, u16 opcode
,
1143 struct hci_dev
*hdev
, void *data
,
1146 struct pending_cmd
*cmd
;
1148 cmd
= kzalloc(sizeof(*cmd
), GFP_KERNEL
);
1152 cmd
->opcode
= opcode
;
1153 cmd
->index
= hdev
->id
;
1155 cmd
->param
= kmalloc(len
, GFP_KERNEL
);
1162 memcpy(cmd
->param
, data
, len
);
1167 list_add(&cmd
->list
, &hdev
->mgmt_pending
);
1172 static void mgmt_pending_foreach(u16 opcode
, struct hci_dev
*hdev
,
1173 void (*cb
)(struct pending_cmd
*cmd
,
1177 struct pending_cmd
*cmd
, *tmp
;
1179 list_for_each_entry_safe(cmd
, tmp
, &hdev
->mgmt_pending
, list
) {
1180 if (opcode
> 0 && cmd
->opcode
!= opcode
)
1187 static void mgmt_pending_remove(struct pending_cmd
*cmd
)
1189 list_del(&cmd
->list
);
1190 mgmt_pending_free(cmd
);
1193 static int send_settings_rsp(struct sock
*sk
, u16 opcode
, struct hci_dev
*hdev
)
1195 __le32 settings
= cpu_to_le32(get_current_settings(hdev
));
1197 return cmd_complete(sk
, hdev
->id
, opcode
, 0, &settings
,
1201 static void clean_up_hci_complete(struct hci_dev
*hdev
, u8 status
)
1203 BT_DBG("%s status 0x%02x", hdev
->name
, status
);
1205 if (hci_conn_count(hdev
) == 0) {
1206 cancel_delayed_work(&hdev
->power_off
);
1207 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1211 static void hci_stop_discovery(struct hci_request
*req
)
1213 struct hci_dev
*hdev
= req
->hdev
;
1214 struct hci_cp_remote_name_req_cancel cp
;
1215 struct inquiry_entry
*e
;
1217 switch (hdev
->discovery
.state
) {
1218 case DISCOVERY_FINDING
:
1219 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
1220 hci_req_add(req
, HCI_OP_INQUIRY_CANCEL
, 0, NULL
);
1222 cancel_delayed_work(&hdev
->le_scan_disable
);
1223 hci_req_add_le_scan_disable(req
);
1228 case DISCOVERY_RESOLVING
:
1229 e
= hci_inquiry_cache_lookup_resolve(hdev
, BDADDR_ANY
,
1234 bacpy(&cp
.bdaddr
, &e
->data
.bdaddr
);
1235 hci_req_add(req
, HCI_OP_REMOTE_NAME_REQ_CANCEL
, sizeof(cp
),
1241 /* Passive scanning */
1242 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
1243 hci_req_add_le_scan_disable(req
);
1248 static int clean_up_hci_state(struct hci_dev
*hdev
)
1250 struct hci_request req
;
1251 struct hci_conn
*conn
;
1253 hci_req_init(&req
, hdev
);
1255 if (test_bit(HCI_ISCAN
, &hdev
->flags
) ||
1256 test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1258 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1261 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
1262 disable_advertising(&req
);
1264 hci_stop_discovery(&req
);
1266 list_for_each_entry(conn
, &hdev
->conn_hash
.list
, list
) {
1267 struct hci_cp_disconnect dc
;
1268 struct hci_cp_reject_conn_req rej
;
1270 switch (conn
->state
) {
1273 dc
.handle
= cpu_to_le16(conn
->handle
);
1274 dc
.reason
= 0x15; /* Terminated due to Power Off */
1275 hci_req_add(&req
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
1278 if (conn
->type
== LE_LINK
)
1279 hci_req_add(&req
, HCI_OP_LE_CREATE_CONN_CANCEL
,
1281 else if (conn
->type
== ACL_LINK
)
1282 hci_req_add(&req
, HCI_OP_CREATE_CONN_CANCEL
,
1286 bacpy(&rej
.bdaddr
, &conn
->dst
);
1287 rej
.reason
= 0x15; /* Terminated due to Power Off */
1288 if (conn
->type
== ACL_LINK
)
1289 hci_req_add(&req
, HCI_OP_REJECT_CONN_REQ
,
1291 else if (conn
->type
== SCO_LINK
)
1292 hci_req_add(&req
, HCI_OP_REJECT_SYNC_CONN_REQ
,
1298 return hci_req_run(&req
, clean_up_hci_complete
);
1301 static int set_powered(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1304 struct mgmt_mode
*cp
= data
;
1305 struct pending_cmd
*cmd
;
1308 BT_DBG("request for %s", hdev
->name
);
1310 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1311 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1312 MGMT_STATUS_INVALID_PARAMS
);
1316 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
)) {
1317 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_POWERED
,
1322 if (test_and_clear_bit(HCI_AUTO_OFF
, &hdev
->dev_flags
)) {
1323 cancel_delayed_work(&hdev
->power_off
);
1326 mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
,
1328 err
= mgmt_powered(hdev
, 1);
1333 if (!!cp
->val
== hdev_is_powered(hdev
)) {
1334 err
= send_settings_rsp(sk
, MGMT_OP_SET_POWERED
, hdev
);
1338 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_POWERED
, hdev
, data
, len
);
1345 queue_work(hdev
->req_workqueue
, &hdev
->power_on
);
1348 /* Disconnect connections, stop scans, etc */
1349 err
= clean_up_hci_state(hdev
);
1351 queue_delayed_work(hdev
->req_workqueue
, &hdev
->power_off
,
1352 HCI_POWER_OFF_TIMEOUT
);
1354 /* ENODATA means there were no HCI commands queued */
1355 if (err
== -ENODATA
) {
1356 cancel_delayed_work(&hdev
->power_off
);
1357 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
1363 hci_dev_unlock(hdev
);
1367 static int mgmt_event(u16 event
, struct hci_dev
*hdev
, void *data
, u16 data_len
,
1368 struct sock
*skip_sk
)
1370 struct sk_buff
*skb
;
1371 struct mgmt_hdr
*hdr
;
1373 skb
= alloc_skb(sizeof(*hdr
) + data_len
, GFP_KERNEL
);
1377 hdr
= (void *) skb_put(skb
, sizeof(*hdr
));
1378 hdr
->opcode
= cpu_to_le16(event
);
1380 hdr
->index
= cpu_to_le16(hdev
->id
);
1382 hdr
->index
= cpu_to_le16(MGMT_INDEX_NONE
);
1383 hdr
->len
= cpu_to_le16(data_len
);
1386 memcpy(skb_put(skb
, data_len
), data
, data_len
);
1389 __net_timestamp(skb
);
1391 hci_send_to_control(skb
, skip_sk
);
1397 static int new_settings(struct hci_dev
*hdev
, struct sock
*skip
)
1401 ev
= cpu_to_le32(get_current_settings(hdev
));
1403 return mgmt_event(MGMT_EV_NEW_SETTINGS
, hdev
, &ev
, sizeof(ev
), skip
);
1408 struct hci_dev
*hdev
;
1412 static void settings_rsp(struct pending_cmd
*cmd
, void *data
)
1414 struct cmd_lookup
*match
= data
;
1416 send_settings_rsp(cmd
->sk
, cmd
->opcode
, match
->hdev
);
1418 list_del(&cmd
->list
);
1420 if (match
->sk
== NULL
) {
1421 match
->sk
= cmd
->sk
;
1422 sock_hold(match
->sk
);
1425 mgmt_pending_free(cmd
);
1428 static void cmd_status_rsp(struct pending_cmd
*cmd
, void *data
)
1432 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, *status
);
1433 mgmt_pending_remove(cmd
);
1436 static u8
mgmt_bredr_support(struct hci_dev
*hdev
)
1438 if (!lmp_bredr_capable(hdev
))
1439 return MGMT_STATUS_NOT_SUPPORTED
;
1440 else if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1441 return MGMT_STATUS_REJECTED
;
1443 return MGMT_STATUS_SUCCESS
;
1446 static u8
mgmt_le_support(struct hci_dev
*hdev
)
1448 if (!lmp_le_capable(hdev
))
1449 return MGMT_STATUS_NOT_SUPPORTED
;
1450 else if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
1451 return MGMT_STATUS_REJECTED
;
1453 return MGMT_STATUS_SUCCESS
;
1456 static void set_discoverable_complete(struct hci_dev
*hdev
, u8 status
)
1458 struct pending_cmd
*cmd
;
1459 struct mgmt_mode
*cp
;
1460 struct hci_request req
;
1463 BT_DBG("status 0x%02x", status
);
1467 cmd
= mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
);
1472 u8 mgmt_err
= mgmt_status(status
);
1473 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1474 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1480 changed
= !test_and_set_bit(HCI_DISCOVERABLE
,
1483 if (hdev
->discov_timeout
> 0) {
1484 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1485 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1489 changed
= test_and_clear_bit(HCI_DISCOVERABLE
,
1493 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1496 new_settings(hdev
, cmd
->sk
);
1498 /* When the discoverable mode gets changed, make sure
1499 * that class of device has the limited discoverable
1500 * bit correctly set.
1502 hci_req_init(&req
, hdev
);
1504 hci_req_run(&req
, NULL
);
1507 mgmt_pending_remove(cmd
);
1510 hci_dev_unlock(hdev
);
1513 static int set_discoverable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1516 struct mgmt_cp_set_discoverable
*cp
= data
;
1517 struct pending_cmd
*cmd
;
1518 struct hci_request req
;
1523 BT_DBG("request for %s", hdev
->name
);
1525 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1526 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1527 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1528 MGMT_STATUS_REJECTED
);
1530 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
1531 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1532 MGMT_STATUS_INVALID_PARAMS
);
1534 timeout
= __le16_to_cpu(cp
->timeout
);
1536 /* Disabling discoverable requires that no timeout is set,
1537 * and enabling limited discoverable requires a timeout.
1539 if ((cp
->val
== 0x00 && timeout
> 0) ||
1540 (cp
->val
== 0x02 && timeout
== 0))
1541 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1542 MGMT_STATUS_INVALID_PARAMS
);
1546 if (!hdev_is_powered(hdev
) && timeout
> 0) {
1547 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1548 MGMT_STATUS_NOT_POWERED
);
1552 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1553 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1554 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1559 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
)) {
1560 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DISCOVERABLE
,
1561 MGMT_STATUS_REJECTED
);
1565 if (!hdev_is_powered(hdev
)) {
1566 bool changed
= false;
1568 /* Setting limited discoverable when powered off is
1569 * not a valid operation since it requires a timeout
1570 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1572 if (!!cp
->val
!= test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
)) {
1573 change_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1577 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1582 err
= new_settings(hdev
, sk
);
1587 /* If the current mode is the same, then just update the timeout
1588 * value with the new value. And if only the timeout gets updated,
1589 * then no need for any HCI transactions.
1591 if (!!cp
->val
== test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
) &&
1592 (cp
->val
== 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE
,
1593 &hdev
->dev_flags
)) {
1594 cancel_delayed_work(&hdev
->discov_off
);
1595 hdev
->discov_timeout
= timeout
;
1597 if (cp
->val
&& hdev
->discov_timeout
> 0) {
1598 int to
= msecs_to_jiffies(hdev
->discov_timeout
* 1000);
1599 queue_delayed_work(hdev
->workqueue
, &hdev
->discov_off
,
1603 err
= send_settings_rsp(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
);
1607 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DISCOVERABLE
, hdev
, data
, len
);
1613 /* Cancel any potential discoverable timeout that might be
1614 * still active and store new timeout value. The arming of
1615 * the timeout happens in the complete handler.
1617 cancel_delayed_work(&hdev
->discov_off
);
1618 hdev
->discov_timeout
= timeout
;
1620 /* Limited discoverable mode */
1621 if (cp
->val
== 0x02)
1622 set_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1624 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1626 hci_req_init(&req
, hdev
);
1628 /* The procedure for LE-only controllers is much simpler - just
1629 * update the advertising data.
1631 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1637 struct hci_cp_write_current_iac_lap hci_cp
;
1639 if (cp
->val
== 0x02) {
1640 /* Limited discoverable mode */
1641 hci_cp
.num_iac
= min_t(u8
, hdev
->num_iac
, 2);
1642 hci_cp
.iac_lap
[0] = 0x00; /* LIAC */
1643 hci_cp
.iac_lap
[1] = 0x8b;
1644 hci_cp
.iac_lap
[2] = 0x9e;
1645 hci_cp
.iac_lap
[3] = 0x33; /* GIAC */
1646 hci_cp
.iac_lap
[4] = 0x8b;
1647 hci_cp
.iac_lap
[5] = 0x9e;
1649 /* General discoverable mode */
1651 hci_cp
.iac_lap
[0] = 0x33; /* GIAC */
1652 hci_cp
.iac_lap
[1] = 0x8b;
1653 hci_cp
.iac_lap
[2] = 0x9e;
1656 hci_req_add(&req
, HCI_OP_WRITE_CURRENT_IAC_LAP
,
1657 (hci_cp
.num_iac
* 3) + 1, &hci_cp
);
1659 scan
|= SCAN_INQUIRY
;
1661 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1664 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, sizeof(scan
), &scan
);
1667 update_adv_data(&req
);
1669 err
= hci_req_run(&req
, set_discoverable_complete
);
1671 mgmt_pending_remove(cmd
);
1674 hci_dev_unlock(hdev
);
1678 static void write_fast_connectable(struct hci_request
*req
, bool enable
)
1680 struct hci_dev
*hdev
= req
->hdev
;
1681 struct hci_cp_write_page_scan_activity acp
;
1684 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1687 if (hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
1691 type
= PAGE_SCAN_TYPE_INTERLACED
;
1693 /* 160 msec page scan interval */
1694 acp
.interval
= cpu_to_le16(0x0100);
1696 type
= PAGE_SCAN_TYPE_STANDARD
; /* default */
1698 /* default 1.28 sec page scan */
1699 acp
.interval
= cpu_to_le16(0x0800);
1702 acp
.window
= cpu_to_le16(0x0012);
1704 if (__cpu_to_le16(hdev
->page_scan_interval
) != acp
.interval
||
1705 __cpu_to_le16(hdev
->page_scan_window
) != acp
.window
)
1706 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY
,
1709 if (hdev
->page_scan_type
!= type
)
1710 hci_req_add(req
, HCI_OP_WRITE_PAGE_SCAN_TYPE
, 1, &type
);
1713 static void set_connectable_complete(struct hci_dev
*hdev
, u8 status
)
1715 struct pending_cmd
*cmd
;
1716 struct mgmt_mode
*cp
;
1719 BT_DBG("status 0x%02x", status
);
1723 cmd
= mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
);
1728 u8 mgmt_err
= mgmt_status(status
);
1729 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
1735 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1737 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1739 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1742 new_settings(hdev
, cmd
->sk
);
1745 mgmt_pending_remove(cmd
);
1748 hci_dev_unlock(hdev
);
1751 static int set_connectable_update_settings(struct hci_dev
*hdev
,
1752 struct sock
*sk
, u8 val
)
1754 bool changed
= false;
1757 if (!!val
!= test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
1761 set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1763 clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
1764 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1767 err
= send_settings_rsp(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
);
1772 return new_settings(hdev
, sk
);
1777 static int set_connectable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1780 struct mgmt_mode
*cp
= data
;
1781 struct pending_cmd
*cmd
;
1782 struct hci_request req
;
1786 BT_DBG("request for %s", hdev
->name
);
1788 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
1789 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
1790 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1791 MGMT_STATUS_REJECTED
);
1793 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1794 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1795 MGMT_STATUS_INVALID_PARAMS
);
1799 if (!hdev_is_powered(hdev
)) {
1800 err
= set_connectable_update_settings(hdev
, sk
, cp
->val
);
1804 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
) ||
1805 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
)) {
1806 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_CONNECTABLE
,
1811 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_CONNECTABLE
, hdev
, data
, len
);
1817 hci_req_init(&req
, hdev
);
1819 /* If BR/EDR is not enabled and we disable advertising as a
1820 * by-product of disabling connectable, we need to update the
1821 * advertising flags.
1823 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
1825 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
1826 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
1828 update_adv_data(&req
);
1829 } else if (cp
->val
!= test_bit(HCI_PSCAN
, &hdev
->flags
)) {
1835 if (test_bit(HCI_ISCAN
, &hdev
->flags
) &&
1836 hdev
->discov_timeout
> 0)
1837 cancel_delayed_work(&hdev
->discov_off
);
1840 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
1843 /* If we're going from non-connectable to connectable or
1844 * vice-versa when fast connectable is enabled ensure that fast
1845 * connectable gets disabled. write_fast_connectable won't do
1846 * anything if the page scan parameters are already what they
1849 if (cp
->val
|| test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
))
1850 write_fast_connectable(&req
, false);
1852 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
) &&
1853 hci_conn_num(hdev
, LE_LINK
) == 0) {
1854 disable_advertising(&req
);
1855 enable_advertising(&req
);
1858 err
= hci_req_run(&req
, set_connectable_complete
);
1860 mgmt_pending_remove(cmd
);
1861 if (err
== -ENODATA
)
1862 err
= set_connectable_update_settings(hdev
, sk
,
1868 hci_dev_unlock(hdev
);
1872 static int set_pairable(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1875 struct mgmt_mode
*cp
= data
;
1879 BT_DBG("request for %s", hdev
->name
);
1881 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1882 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PAIRABLE
,
1883 MGMT_STATUS_INVALID_PARAMS
);
1888 changed
= !test_and_set_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1890 changed
= test_and_clear_bit(HCI_PAIRABLE
, &hdev
->dev_flags
);
1892 err
= send_settings_rsp(sk
, MGMT_OP_SET_PAIRABLE
, hdev
);
1897 err
= new_settings(hdev
, sk
);
1900 hci_dev_unlock(hdev
);
1904 static int set_link_security(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
1907 struct mgmt_mode
*cp
= data
;
1908 struct pending_cmd
*cmd
;
1912 BT_DBG("request for %s", hdev
->name
);
1914 status
= mgmt_bredr_support(hdev
);
1916 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1919 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1920 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1921 MGMT_STATUS_INVALID_PARAMS
);
1925 if (!hdev_is_powered(hdev
)) {
1926 bool changed
= false;
1928 if (!!cp
->val
!= test_bit(HCI_LINK_SECURITY
,
1929 &hdev
->dev_flags
)) {
1930 change_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
1934 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1939 err
= new_settings(hdev
, sk
);
1944 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY
, hdev
)) {
1945 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LINK_SECURITY
,
1952 if (test_bit(HCI_AUTH
, &hdev
->flags
) == val
) {
1953 err
= send_settings_rsp(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
);
1957 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LINK_SECURITY
, hdev
, data
, len
);
1963 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_AUTH_ENABLE
, sizeof(val
), &val
);
1965 mgmt_pending_remove(cmd
);
1970 hci_dev_unlock(hdev
);
1974 static int set_ssp(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
1976 struct mgmt_mode
*cp
= data
;
1977 struct pending_cmd
*cmd
;
1981 BT_DBG("request for %s", hdev
->name
);
1983 status
= mgmt_bredr_support(hdev
);
1985 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
, status
);
1987 if (!lmp_ssp_capable(hdev
))
1988 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1989 MGMT_STATUS_NOT_SUPPORTED
);
1991 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
1992 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
1993 MGMT_STATUS_INVALID_PARAMS
);
1997 if (!hdev_is_powered(hdev
)) {
2001 changed
= !test_and_set_bit(HCI_SSP_ENABLED
,
2004 changed
= test_and_clear_bit(HCI_SSP_ENABLED
,
2007 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
2010 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2013 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2018 err
= new_settings(hdev
, sk
);
2023 if (mgmt_pending_find(MGMT_OP_SET_SSP
, hdev
) ||
2024 mgmt_pending_find(MGMT_OP_SET_HS
, hdev
)) {
2025 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SSP
,
2030 if (!!cp
->val
== test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
2031 err
= send_settings_rsp(sk
, MGMT_OP_SET_SSP
, hdev
);
2035 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SSP
, hdev
, data
, len
);
2041 if (!cp
->val
&& test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
2042 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
2043 sizeof(cp
->val
), &cp
->val
);
2045 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_MODE
, 1, &cp
->val
);
2047 mgmt_pending_remove(cmd
);
2052 hci_dev_unlock(hdev
);
2056 static int set_hs(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2058 struct mgmt_mode
*cp
= data
;
2063 BT_DBG("request for %s", hdev
->name
);
2065 status
= mgmt_bredr_support(hdev
);
2067 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
, status
);
2069 if (!lmp_ssp_capable(hdev
))
2070 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2071 MGMT_STATUS_NOT_SUPPORTED
);
2073 if (!test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
))
2074 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2075 MGMT_STATUS_REJECTED
);
2077 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2078 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2079 MGMT_STATUS_INVALID_PARAMS
);
2084 changed
= !test_and_set_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2086 if (hdev_is_powered(hdev
)) {
2087 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_HS
,
2088 MGMT_STATUS_REJECTED
);
2092 changed
= test_and_clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
2095 err
= send_settings_rsp(sk
, MGMT_OP_SET_HS
, hdev
);
2100 err
= new_settings(hdev
, sk
);
2103 hci_dev_unlock(hdev
);
2107 static void le_enable_complete(struct hci_dev
*hdev
, u8 status
)
2109 struct cmd_lookup match
= { NULL
, hdev
};
2112 u8 mgmt_err
= mgmt_status(status
);
2114 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, cmd_status_rsp
,
2119 mgmt_pending_foreach(MGMT_OP_SET_LE
, hdev
, settings_rsp
, &match
);
2121 new_settings(hdev
, match
.sk
);
2126 /* Make sure the controller has a good default for
2127 * advertising data. Restrict the update to when LE
2128 * has actually been enabled. During power on, the
2129 * update in powered_update_hci will take care of it.
2131 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2132 struct hci_request req
;
2136 hci_req_init(&req
, hdev
);
2137 update_adv_data(&req
);
2138 update_scan_rsp_data(&req
);
2139 hci_req_run(&req
, NULL
);
2141 hci_dev_unlock(hdev
);
2145 static int set_le(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2147 struct mgmt_mode
*cp
= data
;
2148 struct hci_cp_write_le_host_supported hci_cp
;
2149 struct pending_cmd
*cmd
;
2150 struct hci_request req
;
2154 BT_DBG("request for %s", hdev
->name
);
2156 if (!lmp_le_capable(hdev
))
2157 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2158 MGMT_STATUS_NOT_SUPPORTED
);
2160 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
2161 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2162 MGMT_STATUS_INVALID_PARAMS
);
2164 /* LE-only devices do not allow toggling LE on/off */
2165 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
2166 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2167 MGMT_STATUS_REJECTED
);
2172 enabled
= lmp_host_le_capable(hdev
);
2174 if (!hdev_is_powered(hdev
) || val
== enabled
) {
2175 bool changed
= false;
2177 if (val
!= test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
2178 change_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
);
2182 if (!val
&& test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
2183 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
2187 err
= send_settings_rsp(sk
, MGMT_OP_SET_LE
, hdev
);
2192 err
= new_settings(hdev
, sk
);
2197 if (mgmt_pending_find(MGMT_OP_SET_LE
, hdev
) ||
2198 mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
)) {
2199 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_LE
,
2204 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LE
, hdev
, data
, len
);
2210 hci_req_init(&req
, hdev
);
2212 memset(&hci_cp
, 0, sizeof(hci_cp
));
2216 hci_cp
.simul
= lmp_le_br_capable(hdev
);
2218 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
2219 disable_advertising(&req
);
2222 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
, sizeof(hci_cp
),
2225 err
= hci_req_run(&req
, le_enable_complete
);
2227 mgmt_pending_remove(cmd
);
2230 hci_dev_unlock(hdev
);
2234 /* This is a helper function to test for pending mgmt commands that can
2235 * cause CoD or EIR HCI commands. We can only allow one such pending
2236 * mgmt command at a time since otherwise we cannot easily track what
2237 * the current values are, will be, and based on that calculate if a new
2238 * HCI command needs to be sent and if yes with what value.
2240 static bool pending_eir_or_class(struct hci_dev
*hdev
)
2242 struct pending_cmd
*cmd
;
2244 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2245 switch (cmd
->opcode
) {
2246 case MGMT_OP_ADD_UUID
:
2247 case MGMT_OP_REMOVE_UUID
:
2248 case MGMT_OP_SET_DEV_CLASS
:
2249 case MGMT_OP_SET_POWERED
:
2257 static const u8 bluetooth_base_uuid
[] = {
2258 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2259 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2262 static u8
get_uuid_size(const u8
*uuid
)
2266 if (memcmp(uuid
, bluetooth_base_uuid
, 12))
2269 val
= get_unaligned_le32(&uuid
[12]);
2276 static void mgmt_class_complete(struct hci_dev
*hdev
, u16 mgmt_op
, u8 status
)
2278 struct pending_cmd
*cmd
;
2282 cmd
= mgmt_pending_find(mgmt_op
, hdev
);
2286 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
2287 hdev
->dev_class
, 3);
2289 mgmt_pending_remove(cmd
);
2292 hci_dev_unlock(hdev
);
2295 static void add_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2297 BT_DBG("status 0x%02x", status
);
2299 mgmt_class_complete(hdev
, MGMT_OP_ADD_UUID
, status
);
2302 static int add_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
2304 struct mgmt_cp_add_uuid
*cp
= data
;
2305 struct pending_cmd
*cmd
;
2306 struct hci_request req
;
2307 struct bt_uuid
*uuid
;
2310 BT_DBG("request for %s", hdev
->name
);
2314 if (pending_eir_or_class(hdev
)) {
2315 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_UUID
,
2320 uuid
= kmalloc(sizeof(*uuid
), GFP_KERNEL
);
2326 memcpy(uuid
->uuid
, cp
->uuid
, 16);
2327 uuid
->svc_hint
= cp
->svc_hint
;
2328 uuid
->size
= get_uuid_size(cp
->uuid
);
2330 list_add_tail(&uuid
->list
, &hdev
->uuids
);
2332 hci_req_init(&req
, hdev
);
2337 err
= hci_req_run(&req
, add_uuid_complete
);
2339 if (err
!= -ENODATA
)
2342 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_UUID
, 0,
2343 hdev
->dev_class
, 3);
2347 cmd
= mgmt_pending_add(sk
, MGMT_OP_ADD_UUID
, hdev
, data
, len
);
2356 hci_dev_unlock(hdev
);
2360 static bool enable_service_cache(struct hci_dev
*hdev
)
2362 if (!hdev_is_powered(hdev
))
2365 if (!test_and_set_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2366 queue_delayed_work(hdev
->workqueue
, &hdev
->service_cache
,
2374 static void remove_uuid_complete(struct hci_dev
*hdev
, u8 status
)
2376 BT_DBG("status 0x%02x", status
);
2378 mgmt_class_complete(hdev
, MGMT_OP_REMOVE_UUID
, status
);
2381 static int remove_uuid(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2384 struct mgmt_cp_remove_uuid
*cp
= data
;
2385 struct pending_cmd
*cmd
;
2386 struct bt_uuid
*match
, *tmp
;
2387 u8 bt_uuid_any
[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2388 struct hci_request req
;
2391 BT_DBG("request for %s", hdev
->name
);
2395 if (pending_eir_or_class(hdev
)) {
2396 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2401 if (memcmp(cp
->uuid
, bt_uuid_any
, 16) == 0) {
2402 hci_uuids_clear(hdev
);
2404 if (enable_service_cache(hdev
)) {
2405 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2406 0, hdev
->dev_class
, 3);
2415 list_for_each_entry_safe(match
, tmp
, &hdev
->uuids
, list
) {
2416 if (memcmp(match
->uuid
, cp
->uuid
, 16) != 0)
2419 list_del(&match
->list
);
2425 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
,
2426 MGMT_STATUS_INVALID_PARAMS
);
2431 hci_req_init(&req
, hdev
);
2436 err
= hci_req_run(&req
, remove_uuid_complete
);
2438 if (err
!= -ENODATA
)
2441 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_UUID
, 0,
2442 hdev
->dev_class
, 3);
2446 cmd
= mgmt_pending_add(sk
, MGMT_OP_REMOVE_UUID
, hdev
, data
, len
);
2455 hci_dev_unlock(hdev
);
2459 static void set_class_complete(struct hci_dev
*hdev
, u8 status
)
2461 BT_DBG("status 0x%02x", status
);
2463 mgmt_class_complete(hdev
, MGMT_OP_SET_DEV_CLASS
, status
);
2466 static int set_dev_class(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2469 struct mgmt_cp_set_dev_class
*cp
= data
;
2470 struct pending_cmd
*cmd
;
2471 struct hci_request req
;
2474 BT_DBG("request for %s", hdev
->name
);
2476 if (!lmp_bredr_capable(hdev
))
2477 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2478 MGMT_STATUS_NOT_SUPPORTED
);
2482 if (pending_eir_or_class(hdev
)) {
2483 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2488 if ((cp
->minor
& 0x03) != 0 || (cp
->major
& 0xe0) != 0) {
2489 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
,
2490 MGMT_STATUS_INVALID_PARAMS
);
2494 hdev
->major_class
= cp
->major
;
2495 hdev
->minor_class
= cp
->minor
;
2497 if (!hdev_is_powered(hdev
)) {
2498 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2499 hdev
->dev_class
, 3);
2503 hci_req_init(&req
, hdev
);
2505 if (test_and_clear_bit(HCI_SERVICE_CACHE
, &hdev
->dev_flags
)) {
2506 hci_dev_unlock(hdev
);
2507 cancel_delayed_work_sync(&hdev
->service_cache
);
2514 err
= hci_req_run(&req
, set_class_complete
);
2516 if (err
!= -ENODATA
)
2519 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEV_CLASS
, 0,
2520 hdev
->dev_class
, 3);
2524 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_DEV_CLASS
, hdev
, data
, len
);
2533 hci_dev_unlock(hdev
);
2537 static int load_link_keys(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2540 struct mgmt_cp_load_link_keys
*cp
= data
;
2541 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
2542 sizeof(struct mgmt_link_key_info
));
2543 u16 key_count
, expected_len
;
2547 BT_DBG("request for %s", hdev
->name
);
2549 if (!lmp_bredr_capable(hdev
))
2550 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2551 MGMT_STATUS_NOT_SUPPORTED
);
2553 key_count
= __le16_to_cpu(cp
->key_count
);
2554 if (key_count
> max_key_count
) {
2555 BT_ERR("load_link_keys: too big key_count value %u",
2557 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2558 MGMT_STATUS_INVALID_PARAMS
);
2561 expected_len
= sizeof(*cp
) + key_count
*
2562 sizeof(struct mgmt_link_key_info
);
2563 if (expected_len
!= len
) {
2564 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2566 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2567 MGMT_STATUS_INVALID_PARAMS
);
2570 if (cp
->debug_keys
!= 0x00 && cp
->debug_keys
!= 0x01)
2571 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2572 MGMT_STATUS_INVALID_PARAMS
);
2574 BT_DBG("%s debug_keys %u key_count %u", hdev
->name
, cp
->debug_keys
,
2577 for (i
= 0; i
< key_count
; i
++) {
2578 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2580 if (key
->addr
.type
!= BDADDR_BREDR
|| key
->type
> 0x08)
2581 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
,
2582 MGMT_STATUS_INVALID_PARAMS
);
2587 hci_link_keys_clear(hdev
);
2590 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
2593 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
2597 new_settings(hdev
, NULL
);
2599 for (i
= 0; i
< key_count
; i
++) {
2600 struct mgmt_link_key_info
*key
= &cp
->keys
[i
];
2602 /* Always ignore debug keys and require a new pairing if
2603 * the user wants to use them.
2605 if (key
->type
== HCI_LK_DEBUG_COMBINATION
)
2608 hci_add_link_key(hdev
, NULL
, &key
->addr
.bdaddr
, key
->val
,
2609 key
->type
, key
->pin_len
, NULL
);
2612 cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LINK_KEYS
, 0, NULL
, 0);
2614 hci_dev_unlock(hdev
);
2619 static int device_unpaired(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
2620 u8 addr_type
, struct sock
*skip_sk
)
2622 struct mgmt_ev_device_unpaired ev
;
2624 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
2625 ev
.addr
.type
= addr_type
;
2627 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED
, hdev
, &ev
, sizeof(ev
),
2631 static int unpair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2634 struct mgmt_cp_unpair_device
*cp
= data
;
2635 struct mgmt_rp_unpair_device rp
;
2636 struct hci_cp_disconnect dc
;
2637 struct pending_cmd
*cmd
;
2638 struct hci_conn
*conn
;
2641 memset(&rp
, 0, sizeof(rp
));
2642 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2643 rp
.addr
.type
= cp
->addr
.type
;
2645 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2646 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2647 MGMT_STATUS_INVALID_PARAMS
,
2650 if (cp
->disconnect
!= 0x00 && cp
->disconnect
!= 0x01)
2651 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2652 MGMT_STATUS_INVALID_PARAMS
,
2657 if (!hdev_is_powered(hdev
)) {
2658 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2659 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2663 if (cp
->addr
.type
== BDADDR_BREDR
) {
2664 err
= hci_remove_link_key(hdev
, &cp
->addr
.bdaddr
);
2668 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
2669 addr_type
= ADDR_LE_DEV_PUBLIC
;
2671 addr_type
= ADDR_LE_DEV_RANDOM
;
2673 hci_remove_irk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2675 hci_conn_params_del(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2677 err
= hci_remove_ltk(hdev
, &cp
->addr
.bdaddr
, addr_type
);
2681 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
,
2682 MGMT_STATUS_NOT_PAIRED
, &rp
, sizeof(rp
));
2686 if (cp
->disconnect
) {
2687 if (cp
->addr
.type
== BDADDR_BREDR
)
2688 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2691 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
,
2698 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNPAIR_DEVICE
, 0,
2700 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, sk
);
2704 cmd
= mgmt_pending_add(sk
, MGMT_OP_UNPAIR_DEVICE
, hdev
, cp
,
2711 dc
.handle
= cpu_to_le16(conn
->handle
);
2712 dc
.reason
= 0x13; /* Remote User Terminated Connection */
2713 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2715 mgmt_pending_remove(cmd
);
2718 hci_dev_unlock(hdev
);
2722 static int disconnect(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2725 struct mgmt_cp_disconnect
*cp
= data
;
2726 struct mgmt_rp_disconnect rp
;
2727 struct hci_cp_disconnect dc
;
2728 struct pending_cmd
*cmd
;
2729 struct hci_conn
*conn
;
2734 memset(&rp
, 0, sizeof(rp
));
2735 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
2736 rp
.addr
.type
= cp
->addr
.type
;
2738 if (!bdaddr_type_is_valid(cp
->addr
.type
))
2739 return cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2740 MGMT_STATUS_INVALID_PARAMS
,
2745 if (!test_bit(HCI_UP
, &hdev
->flags
)) {
2746 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2747 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
2751 if (mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
)) {
2752 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2753 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
2757 if (cp
->addr
.type
== BDADDR_BREDR
)
2758 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
2761 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
2763 if (!conn
|| conn
->state
== BT_OPEN
|| conn
->state
== BT_CLOSED
) {
2764 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_DISCONNECT
,
2765 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
2769 cmd
= mgmt_pending_add(sk
, MGMT_OP_DISCONNECT
, hdev
, data
, len
);
2775 dc
.handle
= cpu_to_le16(conn
->handle
);
2776 dc
.reason
= HCI_ERROR_REMOTE_USER_TERM
;
2778 err
= hci_send_cmd(hdev
, HCI_OP_DISCONNECT
, sizeof(dc
), &dc
);
2780 mgmt_pending_remove(cmd
);
2783 hci_dev_unlock(hdev
);
2787 static u8
link_to_bdaddr(u8 link_type
, u8 addr_type
)
2789 switch (link_type
) {
2791 switch (addr_type
) {
2792 case ADDR_LE_DEV_PUBLIC
:
2793 return BDADDR_LE_PUBLIC
;
2796 /* Fallback to LE Random address type */
2797 return BDADDR_LE_RANDOM
;
2801 /* Fallback to BR/EDR type */
2802 return BDADDR_BREDR
;
2806 static int get_connections(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2809 struct mgmt_rp_get_connections
*rp
;
2819 if (!hdev_is_powered(hdev
)) {
2820 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
,
2821 MGMT_STATUS_NOT_POWERED
);
2826 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2827 if (test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2831 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2832 rp
= kmalloc(rp_len
, GFP_KERNEL
);
2839 list_for_each_entry(c
, &hdev
->conn_hash
.list
, list
) {
2840 if (!test_bit(HCI_CONN_MGMT_CONNECTED
, &c
->flags
))
2842 bacpy(&rp
->addr
[i
].bdaddr
, &c
->dst
);
2843 rp
->addr
[i
].type
= link_to_bdaddr(c
->type
, c
->dst_type
);
2844 if (c
->type
== SCO_LINK
|| c
->type
== ESCO_LINK
)
2849 rp
->conn_count
= cpu_to_le16(i
);
2851 /* Recalculate length in case of filtered SCO connections, etc */
2852 rp_len
= sizeof(*rp
) + (i
* sizeof(struct mgmt_addr_info
));
2854 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONNECTIONS
, 0, rp
,
2860 hci_dev_unlock(hdev
);
2864 static int send_pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
2865 struct mgmt_cp_pin_code_neg_reply
*cp
)
2867 struct pending_cmd
*cmd
;
2870 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
, cp
,
2875 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_NEG_REPLY
,
2876 sizeof(cp
->addr
.bdaddr
), &cp
->addr
.bdaddr
);
2878 mgmt_pending_remove(cmd
);
2883 static int pin_code_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2886 struct hci_conn
*conn
;
2887 struct mgmt_cp_pin_code_reply
*cp
= data
;
2888 struct hci_cp_pin_code_reply reply
;
2889 struct pending_cmd
*cmd
;
2896 if (!hdev_is_powered(hdev
)) {
2897 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2898 MGMT_STATUS_NOT_POWERED
);
2902 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &cp
->addr
.bdaddr
);
2904 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2905 MGMT_STATUS_NOT_CONNECTED
);
2909 if (conn
->pending_sec_level
== BT_SECURITY_HIGH
&& cp
->pin_len
!= 16) {
2910 struct mgmt_cp_pin_code_neg_reply ncp
;
2912 memcpy(&ncp
.addr
, &cp
->addr
, sizeof(ncp
.addr
));
2914 BT_ERR("PIN code is not 16 bytes long");
2916 err
= send_pin_code_neg_reply(sk
, hdev
, &ncp
);
2918 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
2919 MGMT_STATUS_INVALID_PARAMS
);
2924 cmd
= mgmt_pending_add(sk
, MGMT_OP_PIN_CODE_REPLY
, hdev
, data
, len
);
2930 bacpy(&reply
.bdaddr
, &cp
->addr
.bdaddr
);
2931 reply
.pin_len
= cp
->pin_len
;
2932 memcpy(reply
.pin_code
, cp
->pin_code
, sizeof(reply
.pin_code
));
2934 err
= hci_send_cmd(hdev
, HCI_OP_PIN_CODE_REPLY
, sizeof(reply
), &reply
);
2936 mgmt_pending_remove(cmd
);
2939 hci_dev_unlock(hdev
);
2943 static int set_io_capability(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
2946 struct mgmt_cp_set_io_capability
*cp
= data
;
2950 if (cp
->io_capability
> SMP_IO_KEYBOARD_DISPLAY
)
2951 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
,
2952 MGMT_STATUS_INVALID_PARAMS
, NULL
, 0);
2956 hdev
->io_capability
= cp
->io_capability
;
2958 BT_DBG("%s IO capability set to 0x%02x", hdev
->name
,
2959 hdev
->io_capability
);
2961 hci_dev_unlock(hdev
);
2963 return cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_IO_CAPABILITY
, 0, NULL
,
2967 static struct pending_cmd
*find_pairing(struct hci_conn
*conn
)
2969 struct hci_dev
*hdev
= conn
->hdev
;
2970 struct pending_cmd
*cmd
;
2972 list_for_each_entry(cmd
, &hdev
->mgmt_pending
, list
) {
2973 if (cmd
->opcode
!= MGMT_OP_PAIR_DEVICE
)
2976 if (cmd
->user_data
!= conn
)
2985 static void pairing_complete(struct pending_cmd
*cmd
, u8 status
)
2987 struct mgmt_rp_pair_device rp
;
2988 struct hci_conn
*conn
= cmd
->user_data
;
2990 bacpy(&rp
.addr
.bdaddr
, &conn
->dst
);
2991 rp
.addr
.type
= link_to_bdaddr(conn
->type
, conn
->dst_type
);
2993 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_PAIR_DEVICE
, status
,
2996 /* So we don't get further callbacks for this connection */
2997 conn
->connect_cfm_cb
= NULL
;
2998 conn
->security_cfm_cb
= NULL
;
2999 conn
->disconn_cfm_cb
= NULL
;
3001 hci_conn_drop(conn
);
3003 mgmt_pending_remove(cmd
);
3006 void mgmt_smp_complete(struct hci_conn
*conn
, bool complete
)
3008 u8 status
= complete
? MGMT_STATUS_SUCCESS
: MGMT_STATUS_FAILED
;
3009 struct pending_cmd
*cmd
;
3011 cmd
= find_pairing(conn
);
3013 pairing_complete(cmd
, status
);
3016 static void pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3018 struct pending_cmd
*cmd
;
3020 BT_DBG("status %u", status
);
3022 cmd
= find_pairing(conn
);
3024 BT_DBG("Unable to find a pending command");
3026 pairing_complete(cmd
, mgmt_status(status
));
3029 static void le_pairing_complete_cb(struct hci_conn
*conn
, u8 status
)
3031 struct pending_cmd
*cmd
;
3033 BT_DBG("status %u", status
);
3038 cmd
= find_pairing(conn
);
3040 BT_DBG("Unable to find a pending command");
3042 pairing_complete(cmd
, mgmt_status(status
));
3045 static int pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3048 struct mgmt_cp_pair_device
*cp
= data
;
3049 struct mgmt_rp_pair_device rp
;
3050 struct pending_cmd
*cmd
;
3051 u8 sec_level
, auth_type
;
3052 struct hci_conn
*conn
;
3057 memset(&rp
, 0, sizeof(rp
));
3058 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
3059 rp
.addr
.type
= cp
->addr
.type
;
3061 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3062 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3063 MGMT_STATUS_INVALID_PARAMS
,
3066 if (cp
->io_cap
> SMP_IO_KEYBOARD_DISPLAY
)
3067 return cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3068 MGMT_STATUS_INVALID_PARAMS
,
3073 if (!hdev_is_powered(hdev
)) {
3074 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3075 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
3079 sec_level
= BT_SECURITY_MEDIUM
;
3080 auth_type
= HCI_AT_DEDICATED_BONDING
;
3082 if (cp
->addr
.type
== BDADDR_BREDR
) {
3083 conn
= hci_connect_acl(hdev
, &cp
->addr
.bdaddr
, sec_level
,
3088 /* Convert from L2CAP channel address type to HCI address type
3090 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
3091 addr_type
= ADDR_LE_DEV_PUBLIC
;
3093 addr_type
= ADDR_LE_DEV_RANDOM
;
3095 /* When pairing a new device, it is expected to remember
3096 * this device for future connections. Adding the connection
3097 * parameter information ahead of time allows tracking
3098 * of the slave preferred values and will speed up any
3099 * further connection establishment.
3101 * If connection parameters already exist, then they
3102 * will be kept and this function does nothing.
3104 hci_conn_params_add(hdev
, &cp
->addr
.bdaddr
, addr_type
);
3106 conn
= hci_connect_le(hdev
, &cp
->addr
.bdaddr
, addr_type
,
3107 sec_level
, auth_type
);
3113 if (PTR_ERR(conn
) == -EBUSY
)
3114 status
= MGMT_STATUS_BUSY
;
3116 status
= MGMT_STATUS_CONNECT_FAILED
;
3118 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3124 if (conn
->connect_cfm_cb
) {
3125 hci_conn_drop(conn
);
3126 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_PAIR_DEVICE
,
3127 MGMT_STATUS_BUSY
, &rp
, sizeof(rp
));
3131 cmd
= mgmt_pending_add(sk
, MGMT_OP_PAIR_DEVICE
, hdev
, data
, len
);
3134 hci_conn_drop(conn
);
3138 /* For LE, just connecting isn't a proof that the pairing finished */
3139 if (cp
->addr
.type
== BDADDR_BREDR
) {
3140 conn
->connect_cfm_cb
= pairing_complete_cb
;
3141 conn
->security_cfm_cb
= pairing_complete_cb
;
3142 conn
->disconn_cfm_cb
= pairing_complete_cb
;
3144 conn
->connect_cfm_cb
= le_pairing_complete_cb
;
3145 conn
->security_cfm_cb
= le_pairing_complete_cb
;
3146 conn
->disconn_cfm_cb
= le_pairing_complete_cb
;
3149 conn
->io_capability
= cp
->io_cap
;
3150 cmd
->user_data
= conn
;
3152 if (conn
->state
== BT_CONNECTED
&&
3153 hci_conn_security(conn
, sec_level
, auth_type
))
3154 pairing_complete(cmd
, 0);
3159 hci_dev_unlock(hdev
);
3163 static int cancel_pair_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3166 struct mgmt_addr_info
*addr
= data
;
3167 struct pending_cmd
*cmd
;
3168 struct hci_conn
*conn
;
3175 if (!hdev_is_powered(hdev
)) {
3176 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3177 MGMT_STATUS_NOT_POWERED
);
3181 cmd
= mgmt_pending_find(MGMT_OP_PAIR_DEVICE
, hdev
);
3183 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3184 MGMT_STATUS_INVALID_PARAMS
);
3188 conn
= cmd
->user_data
;
3190 if (bacmp(&addr
->bdaddr
, &conn
->dst
) != 0) {
3191 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
,
3192 MGMT_STATUS_INVALID_PARAMS
);
3196 pairing_complete(cmd
, MGMT_STATUS_CANCELLED
);
3198 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CANCEL_PAIR_DEVICE
, 0,
3199 addr
, sizeof(*addr
));
3201 hci_dev_unlock(hdev
);
3205 static int user_pairing_resp(struct sock
*sk
, struct hci_dev
*hdev
,
3206 struct mgmt_addr_info
*addr
, u16 mgmt_op
,
3207 u16 hci_op
, __le32 passkey
)
3209 struct pending_cmd
*cmd
;
3210 struct hci_conn
*conn
;
3215 if (!hdev_is_powered(hdev
)) {
3216 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3217 MGMT_STATUS_NOT_POWERED
, addr
,
3222 if (addr
->type
== BDADDR_BREDR
)
3223 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
, &addr
->bdaddr
);
3225 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &addr
->bdaddr
);
3228 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3229 MGMT_STATUS_NOT_CONNECTED
, addr
,
3234 if (addr
->type
== BDADDR_LE_PUBLIC
|| addr
->type
== BDADDR_LE_RANDOM
) {
3235 err
= smp_user_confirm_reply(conn
, mgmt_op
, passkey
);
3237 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3238 MGMT_STATUS_SUCCESS
, addr
,
3241 err
= cmd_complete(sk
, hdev
->id
, mgmt_op
,
3242 MGMT_STATUS_FAILED
, addr
,
3248 cmd
= mgmt_pending_add(sk
, mgmt_op
, hdev
, addr
, sizeof(*addr
));
3254 /* Continue with pairing via HCI */
3255 if (hci_op
== HCI_OP_USER_PASSKEY_REPLY
) {
3256 struct hci_cp_user_passkey_reply cp
;
3258 bacpy(&cp
.bdaddr
, &addr
->bdaddr
);
3259 cp
.passkey
= passkey
;
3260 err
= hci_send_cmd(hdev
, hci_op
, sizeof(cp
), &cp
);
3262 err
= hci_send_cmd(hdev
, hci_op
, sizeof(addr
->bdaddr
),
3266 mgmt_pending_remove(cmd
);
3269 hci_dev_unlock(hdev
);
3273 static int pin_code_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3274 void *data
, u16 len
)
3276 struct mgmt_cp_pin_code_neg_reply
*cp
= data
;
3280 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3281 MGMT_OP_PIN_CODE_NEG_REPLY
,
3282 HCI_OP_PIN_CODE_NEG_REPLY
, 0);
3285 static int user_confirm_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3288 struct mgmt_cp_user_confirm_reply
*cp
= data
;
3292 if (len
!= sizeof(*cp
))
3293 return cmd_status(sk
, hdev
->id
, MGMT_OP_USER_CONFIRM_REPLY
,
3294 MGMT_STATUS_INVALID_PARAMS
);
3296 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3297 MGMT_OP_USER_CONFIRM_REPLY
,
3298 HCI_OP_USER_CONFIRM_REPLY
, 0);
3301 static int user_confirm_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3302 void *data
, u16 len
)
3304 struct mgmt_cp_user_confirm_neg_reply
*cp
= data
;
3308 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3309 MGMT_OP_USER_CONFIRM_NEG_REPLY
,
3310 HCI_OP_USER_CONFIRM_NEG_REPLY
, 0);
3313 static int user_passkey_reply(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3316 struct mgmt_cp_user_passkey_reply
*cp
= data
;
3320 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3321 MGMT_OP_USER_PASSKEY_REPLY
,
3322 HCI_OP_USER_PASSKEY_REPLY
, cp
->passkey
);
3325 static int user_passkey_neg_reply(struct sock
*sk
, struct hci_dev
*hdev
,
3326 void *data
, u16 len
)
3328 struct mgmt_cp_user_passkey_neg_reply
*cp
= data
;
3332 return user_pairing_resp(sk
, hdev
, &cp
->addr
,
3333 MGMT_OP_USER_PASSKEY_NEG_REPLY
,
3334 HCI_OP_USER_PASSKEY_NEG_REPLY
, 0);
3337 static void update_name(struct hci_request
*req
)
3339 struct hci_dev
*hdev
= req
->hdev
;
3340 struct hci_cp_write_local_name cp
;
3342 memcpy(cp
.name
, hdev
->dev_name
, sizeof(cp
.name
));
3344 hci_req_add(req
, HCI_OP_WRITE_LOCAL_NAME
, sizeof(cp
), &cp
);
3347 static void set_name_complete(struct hci_dev
*hdev
, u8 status
)
3349 struct mgmt_cp_set_local_name
*cp
;
3350 struct pending_cmd
*cmd
;
3352 BT_DBG("status 0x%02x", status
);
3356 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
3363 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
,
3364 mgmt_status(status
));
3366 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3369 mgmt_pending_remove(cmd
);
3372 hci_dev_unlock(hdev
);
3375 static int set_local_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3378 struct mgmt_cp_set_local_name
*cp
= data
;
3379 struct pending_cmd
*cmd
;
3380 struct hci_request req
;
3387 /* If the old values are the same as the new ones just return a
3388 * direct command complete event.
3390 if (!memcmp(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
)) &&
3391 !memcmp(hdev
->short_name
, cp
->short_name
,
3392 sizeof(hdev
->short_name
))) {
3393 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3398 memcpy(hdev
->short_name
, cp
->short_name
, sizeof(hdev
->short_name
));
3400 if (!hdev_is_powered(hdev
)) {
3401 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3403 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_LOCAL_NAME
, 0,
3408 err
= mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, data
, len
,
3414 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_LOCAL_NAME
, hdev
, data
, len
);
3420 memcpy(hdev
->dev_name
, cp
->name
, sizeof(hdev
->dev_name
));
3422 hci_req_init(&req
, hdev
);
3424 if (lmp_bredr_capable(hdev
)) {
3429 /* The name is stored in the scan response data and so
3430 * no need to udpate the advertising data here.
3432 if (lmp_le_capable(hdev
))
3433 update_scan_rsp_data(&req
);
3435 err
= hci_req_run(&req
, set_name_complete
);
3437 mgmt_pending_remove(cmd
);
3440 hci_dev_unlock(hdev
);
3444 static int read_local_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3445 void *data
, u16 data_len
)
3447 struct pending_cmd
*cmd
;
3450 BT_DBG("%s", hdev
->name
);
3454 if (!hdev_is_powered(hdev
)) {
3455 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3456 MGMT_STATUS_NOT_POWERED
);
3460 if (!lmp_ssp_capable(hdev
)) {
3461 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3462 MGMT_STATUS_NOT_SUPPORTED
);
3466 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
)) {
3467 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
3472 cmd
= mgmt_pending_add(sk
, MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
, NULL
, 0);
3478 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
))
3479 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_EXT_DATA
,
3482 err
= hci_send_cmd(hdev
, HCI_OP_READ_LOCAL_OOB_DATA
, 0, NULL
);
3485 mgmt_pending_remove(cmd
);
3488 hci_dev_unlock(hdev
);
3492 static int add_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3493 void *data
, u16 len
)
3497 BT_DBG("%s ", hdev
->name
);
3501 if (len
== MGMT_ADD_REMOTE_OOB_DATA_SIZE
) {
3502 struct mgmt_cp_add_remote_oob_data
*cp
= data
;
3505 err
= hci_add_remote_oob_data(hdev
, &cp
->addr
.bdaddr
,
3506 cp
->hash
, cp
->randomizer
);
3508 status
= MGMT_STATUS_FAILED
;
3510 status
= MGMT_STATUS_SUCCESS
;
3512 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3513 status
, &cp
->addr
, sizeof(cp
->addr
));
3514 } else if (len
== MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE
) {
3515 struct mgmt_cp_add_remote_oob_ext_data
*cp
= data
;
3518 err
= hci_add_remote_oob_ext_data(hdev
, &cp
->addr
.bdaddr
,
3524 status
= MGMT_STATUS_FAILED
;
3526 status
= MGMT_STATUS_SUCCESS
;
3528 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3529 status
, &cp
->addr
, sizeof(cp
->addr
));
3531 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len
);
3532 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_ADD_REMOTE_OOB_DATA
,
3533 MGMT_STATUS_INVALID_PARAMS
);
3536 hci_dev_unlock(hdev
);
3540 static int remove_remote_oob_data(struct sock
*sk
, struct hci_dev
*hdev
,
3541 void *data
, u16 len
)
3543 struct mgmt_cp_remove_remote_oob_data
*cp
= data
;
3547 BT_DBG("%s", hdev
->name
);
3551 err
= hci_remove_remote_oob_data(hdev
, &cp
->addr
.bdaddr
);
3553 status
= MGMT_STATUS_INVALID_PARAMS
;
3555 status
= MGMT_STATUS_SUCCESS
;
3557 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_REMOTE_OOB_DATA
,
3558 status
, &cp
->addr
, sizeof(cp
->addr
));
3560 hci_dev_unlock(hdev
);
3564 static int mgmt_start_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3566 struct pending_cmd
*cmd
;
3570 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3572 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
3576 type
= hdev
->discovery
.type
;
3578 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3579 &type
, sizeof(type
));
3580 mgmt_pending_remove(cmd
);
3585 static void start_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3587 unsigned long timeout
= 0;
3589 BT_DBG("status %d", status
);
3593 mgmt_start_discovery_failed(hdev
, status
);
3594 hci_dev_unlock(hdev
);
3599 hci_discovery_set_state(hdev
, DISCOVERY_FINDING
);
3600 hci_dev_unlock(hdev
);
3602 switch (hdev
->discovery
.type
) {
3603 case DISCOV_TYPE_LE
:
3604 timeout
= msecs_to_jiffies(DISCOV_LE_TIMEOUT
);
3607 case DISCOV_TYPE_INTERLEAVED
:
3608 timeout
= msecs_to_jiffies(hdev
->discov_interleaved_timeout
);
3611 case DISCOV_TYPE_BREDR
:
3615 BT_ERR("Invalid discovery type %d", hdev
->discovery
.type
);
3621 queue_delayed_work(hdev
->workqueue
, &hdev
->le_scan_disable
, timeout
);
3624 static int start_discovery(struct sock
*sk
, struct hci_dev
*hdev
,
3625 void *data
, u16 len
)
3627 struct mgmt_cp_start_discovery
*cp
= data
;
3628 struct pending_cmd
*cmd
;
3629 struct hci_cp_le_set_scan_param param_cp
;
3630 struct hci_cp_le_set_scan_enable enable_cp
;
3631 struct hci_cp_inquiry inq_cp
;
3632 struct hci_request req
;
3633 /* General inquiry access code (GIAC) */
3634 u8 lap
[3] = { 0x33, 0x8b, 0x9e };
3635 u8 status
, own_addr_type
;
3638 BT_DBG("%s", hdev
->name
);
3642 if (!hdev_is_powered(hdev
)) {
3643 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3644 MGMT_STATUS_NOT_POWERED
);
3648 if (test_bit(HCI_PERIODIC_INQ
, &hdev
->dev_flags
)) {
3649 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3654 if (hdev
->discovery
.state
!= DISCOVERY_STOPPED
) {
3655 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3660 cmd
= mgmt_pending_add(sk
, MGMT_OP_START_DISCOVERY
, hdev
, NULL
, 0);
3666 hdev
->discovery
.type
= cp
->type
;
3668 hci_req_init(&req
, hdev
);
3670 switch (hdev
->discovery
.type
) {
3671 case DISCOV_TYPE_BREDR
:
3672 status
= mgmt_bredr_support(hdev
);
3674 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3676 mgmt_pending_remove(cmd
);
3680 if (test_bit(HCI_INQUIRY
, &hdev
->flags
)) {
3681 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3683 mgmt_pending_remove(cmd
);
3687 hci_inquiry_cache_flush(hdev
);
3689 memset(&inq_cp
, 0, sizeof(inq_cp
));
3690 memcpy(&inq_cp
.lap
, lap
, sizeof(inq_cp
.lap
));
3691 inq_cp
.length
= DISCOV_BREDR_INQUIRY_LEN
;
3692 hci_req_add(&req
, HCI_OP_INQUIRY
, sizeof(inq_cp
), &inq_cp
);
3695 case DISCOV_TYPE_LE
:
3696 case DISCOV_TYPE_INTERLEAVED
:
3697 status
= mgmt_le_support(hdev
);
3699 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3701 mgmt_pending_remove(cmd
);
3705 if (hdev
->discovery
.type
== DISCOV_TYPE_INTERLEAVED
&&
3706 !test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
3707 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3708 MGMT_STATUS_NOT_SUPPORTED
);
3709 mgmt_pending_remove(cmd
);
3713 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
3714 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3715 MGMT_STATUS_REJECTED
);
3716 mgmt_pending_remove(cmd
);
3720 /* If controller is scanning, it means the background scanning
3721 * is running. Thus, we should temporarily stop it in order to
3722 * set the discovery scanning parameters.
3724 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
))
3725 hci_req_add_le_scan_disable(&req
);
3727 memset(¶m_cp
, 0, sizeof(param_cp
));
3729 /* All active scans will be done with either a resolvable
3730 * private address (when privacy feature has been enabled)
3731 * or unresolvable private address.
3733 err
= hci_update_random_address(&req
, true, &own_addr_type
);
3735 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3736 MGMT_STATUS_FAILED
);
3737 mgmt_pending_remove(cmd
);
3741 param_cp
.type
= LE_SCAN_ACTIVE
;
3742 param_cp
.interval
= cpu_to_le16(DISCOV_LE_SCAN_INT
);
3743 param_cp
.window
= cpu_to_le16(DISCOV_LE_SCAN_WIN
);
3744 param_cp
.own_address_type
= own_addr_type
;
3745 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_PARAM
, sizeof(param_cp
),
3748 memset(&enable_cp
, 0, sizeof(enable_cp
));
3749 enable_cp
.enable
= LE_SCAN_ENABLE
;
3750 enable_cp
.filter_dup
= LE_SCAN_FILTER_DUP_ENABLE
;
3751 hci_req_add(&req
, HCI_OP_LE_SET_SCAN_ENABLE
, sizeof(enable_cp
),
3756 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_START_DISCOVERY
,
3757 MGMT_STATUS_INVALID_PARAMS
);
3758 mgmt_pending_remove(cmd
);
3762 err
= hci_req_run(&req
, start_discovery_complete
);
3764 mgmt_pending_remove(cmd
);
3766 hci_discovery_set_state(hdev
, DISCOVERY_STARTING
);
3769 hci_dev_unlock(hdev
);
3773 static int mgmt_stop_discovery_failed(struct hci_dev
*hdev
, u8 status
)
3775 struct pending_cmd
*cmd
;
3778 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
3782 err
= cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, mgmt_status(status
),
3783 &hdev
->discovery
.type
, sizeof(hdev
->discovery
.type
));
3784 mgmt_pending_remove(cmd
);
3789 static void stop_discovery_complete(struct hci_dev
*hdev
, u8 status
)
3791 BT_DBG("status %d", status
);
3796 mgmt_stop_discovery_failed(hdev
, status
);
3800 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3803 hci_dev_unlock(hdev
);
3806 static int stop_discovery(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3809 struct mgmt_cp_stop_discovery
*mgmt_cp
= data
;
3810 struct pending_cmd
*cmd
;
3811 struct hci_request req
;
3814 BT_DBG("%s", hdev
->name
);
3818 if (!hci_discovery_active(hdev
)) {
3819 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3820 MGMT_STATUS_REJECTED
, &mgmt_cp
->type
,
3821 sizeof(mgmt_cp
->type
));
3825 if (hdev
->discovery
.type
!= mgmt_cp
->type
) {
3826 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
,
3827 MGMT_STATUS_INVALID_PARAMS
, &mgmt_cp
->type
,
3828 sizeof(mgmt_cp
->type
));
3832 cmd
= mgmt_pending_add(sk
, MGMT_OP_STOP_DISCOVERY
, hdev
, NULL
, 0);
3838 hci_req_init(&req
, hdev
);
3840 hci_stop_discovery(&req
);
3842 err
= hci_req_run(&req
, stop_discovery_complete
);
3844 hci_discovery_set_state(hdev
, DISCOVERY_STOPPING
);
3848 mgmt_pending_remove(cmd
);
3850 /* If no HCI commands were sent we're done */
3851 if (err
== -ENODATA
) {
3852 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_STOP_DISCOVERY
, 0,
3853 &mgmt_cp
->type
, sizeof(mgmt_cp
->type
));
3854 hci_discovery_set_state(hdev
, DISCOVERY_STOPPED
);
3858 hci_dev_unlock(hdev
);
3862 static int confirm_name(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3865 struct mgmt_cp_confirm_name
*cp
= data
;
3866 struct inquiry_entry
*e
;
3869 BT_DBG("%s", hdev
->name
);
3873 if (!hci_discovery_active(hdev
)) {
3874 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3875 MGMT_STATUS_FAILED
, &cp
->addr
,
3880 e
= hci_inquiry_cache_lookup_unknown(hdev
, &cp
->addr
.bdaddr
);
3882 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
,
3883 MGMT_STATUS_INVALID_PARAMS
, &cp
->addr
,
3888 if (cp
->name_known
) {
3889 e
->name_state
= NAME_KNOWN
;
3892 e
->name_state
= NAME_NEEDED
;
3893 hci_inquiry_cache_update_resolve(hdev
, e
);
3896 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_CONFIRM_NAME
, 0, &cp
->addr
,
3900 hci_dev_unlock(hdev
);
3904 static int block_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3907 struct mgmt_cp_block_device
*cp
= data
;
3911 BT_DBG("%s", hdev
->name
);
3913 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3914 return cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
,
3915 MGMT_STATUS_INVALID_PARAMS
,
3916 &cp
->addr
, sizeof(cp
->addr
));
3920 err
= hci_blacklist_add(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3922 status
= MGMT_STATUS_FAILED
;
3926 mgmt_event(MGMT_EV_DEVICE_BLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3928 status
= MGMT_STATUS_SUCCESS
;
3931 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_BLOCK_DEVICE
, status
,
3932 &cp
->addr
, sizeof(cp
->addr
));
3934 hci_dev_unlock(hdev
);
3939 static int unblock_device(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3942 struct mgmt_cp_unblock_device
*cp
= data
;
3946 BT_DBG("%s", hdev
->name
);
3948 if (!bdaddr_type_is_valid(cp
->addr
.type
))
3949 return cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
,
3950 MGMT_STATUS_INVALID_PARAMS
,
3951 &cp
->addr
, sizeof(cp
->addr
));
3955 err
= hci_blacklist_del(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
3957 status
= MGMT_STATUS_INVALID_PARAMS
;
3961 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED
, hdev
, &cp
->addr
, sizeof(cp
->addr
),
3963 status
= MGMT_STATUS_SUCCESS
;
3966 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_UNBLOCK_DEVICE
, status
,
3967 &cp
->addr
, sizeof(cp
->addr
));
3969 hci_dev_unlock(hdev
);
3974 static int set_device_id(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
3977 struct mgmt_cp_set_device_id
*cp
= data
;
3978 struct hci_request req
;
3982 BT_DBG("%s", hdev
->name
);
3984 source
= __le16_to_cpu(cp
->source
);
3986 if (source
> 0x0002)
3987 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
,
3988 MGMT_STATUS_INVALID_PARAMS
);
3992 hdev
->devid_source
= source
;
3993 hdev
->devid_vendor
= __le16_to_cpu(cp
->vendor
);
3994 hdev
->devid_product
= __le16_to_cpu(cp
->product
);
3995 hdev
->devid_version
= __le16_to_cpu(cp
->version
);
3997 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_DEVICE_ID
, 0, NULL
, 0);
3999 hci_req_init(&req
, hdev
);
4001 hci_req_run(&req
, NULL
);
4003 hci_dev_unlock(hdev
);
4008 static void set_advertising_complete(struct hci_dev
*hdev
, u8 status
)
4010 struct cmd_lookup match
= { NULL
, hdev
};
4013 u8 mgmt_err
= mgmt_status(status
);
4015 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
,
4016 cmd_status_rsp
, &mgmt_err
);
4020 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING
, hdev
, settings_rsp
,
4023 new_settings(hdev
, match
.sk
);
4029 static int set_advertising(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4032 struct mgmt_mode
*cp
= data
;
4033 struct pending_cmd
*cmd
;
4034 struct hci_request req
;
4035 u8 val
, enabled
, status
;
4038 BT_DBG("request for %s", hdev
->name
);
4040 status
= mgmt_le_support(hdev
);
4042 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4045 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4046 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4047 MGMT_STATUS_INVALID_PARAMS
);
4052 enabled
= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4054 /* The following conditions are ones which mean that we should
4055 * not do any HCI communication but directly send a mgmt
4056 * response to user space (after toggling the flag if
4059 if (!hdev_is_powered(hdev
) || val
== enabled
||
4060 hci_conn_num(hdev
, LE_LINK
) > 0) {
4061 bool changed
= false;
4063 if (val
!= test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
)) {
4064 change_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
4068 err
= send_settings_rsp(sk
, MGMT_OP_SET_ADVERTISING
, hdev
);
4073 err
= new_settings(hdev
, sk
);
4078 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING
, hdev
) ||
4079 mgmt_pending_find(MGMT_OP_SET_LE
, hdev
)) {
4080 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_ADVERTISING
,
4085 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_ADVERTISING
, hdev
, data
, len
);
4091 hci_req_init(&req
, hdev
);
4094 enable_advertising(&req
);
4096 disable_advertising(&req
);
4098 err
= hci_req_run(&req
, set_advertising_complete
);
4100 mgmt_pending_remove(cmd
);
4103 hci_dev_unlock(hdev
);
4107 static int set_static_address(struct sock
*sk
, struct hci_dev
*hdev
,
4108 void *data
, u16 len
)
4110 struct mgmt_cp_set_static_address
*cp
= data
;
4113 BT_DBG("%s", hdev
->name
);
4115 if (!lmp_le_capable(hdev
))
4116 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4117 MGMT_STATUS_NOT_SUPPORTED
);
4119 if (hdev_is_powered(hdev
))
4120 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
,
4121 MGMT_STATUS_REJECTED
);
4123 if (bacmp(&cp
->bdaddr
, BDADDR_ANY
)) {
4124 if (!bacmp(&cp
->bdaddr
, BDADDR_NONE
))
4125 return cmd_status(sk
, hdev
->id
,
4126 MGMT_OP_SET_STATIC_ADDRESS
,
4127 MGMT_STATUS_INVALID_PARAMS
);
4129 /* Two most significant bits shall be set */
4130 if ((cp
->bdaddr
.b
[5] & 0xc0) != 0xc0)
4131 return cmd_status(sk
, hdev
->id
,
4132 MGMT_OP_SET_STATIC_ADDRESS
,
4133 MGMT_STATUS_INVALID_PARAMS
);
4138 bacpy(&hdev
->static_addr
, &cp
->bdaddr
);
4140 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_STATIC_ADDRESS
, 0, NULL
, 0);
4142 hci_dev_unlock(hdev
);
4147 static int set_scan_params(struct sock
*sk
, struct hci_dev
*hdev
,
4148 void *data
, u16 len
)
4150 struct mgmt_cp_set_scan_params
*cp
= data
;
4151 __u16 interval
, window
;
4154 BT_DBG("%s", hdev
->name
);
4156 if (!lmp_le_capable(hdev
))
4157 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4158 MGMT_STATUS_NOT_SUPPORTED
);
4160 interval
= __le16_to_cpu(cp
->interval
);
4162 if (interval
< 0x0004 || interval
> 0x4000)
4163 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4164 MGMT_STATUS_INVALID_PARAMS
);
4166 window
= __le16_to_cpu(cp
->window
);
4168 if (window
< 0x0004 || window
> 0x4000)
4169 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4170 MGMT_STATUS_INVALID_PARAMS
);
4172 if (window
> interval
)
4173 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
,
4174 MGMT_STATUS_INVALID_PARAMS
);
4178 hdev
->le_scan_interval
= interval
;
4179 hdev
->le_scan_window
= window
;
4181 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_SET_SCAN_PARAMS
, 0, NULL
, 0);
4183 /* If background scan is running, restart it so new parameters are
4186 if (test_bit(HCI_LE_SCAN
, &hdev
->dev_flags
) &&
4187 hdev
->discovery
.state
== DISCOVERY_STOPPED
) {
4188 struct hci_request req
;
4190 hci_req_init(&req
, hdev
);
4192 hci_req_add_le_scan_disable(&req
);
4193 hci_req_add_le_passive_scan(&req
);
4195 hci_req_run(&req
, NULL
);
4198 hci_dev_unlock(hdev
);
4203 static void fast_connectable_complete(struct hci_dev
*hdev
, u8 status
)
4205 struct pending_cmd
*cmd
;
4207 BT_DBG("status 0x%02x", status
);
4211 cmd
= mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4216 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4217 mgmt_status(status
));
4219 struct mgmt_mode
*cp
= cmd
->param
;
4222 set_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4224 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4226 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
);
4227 new_settings(hdev
, cmd
->sk
);
4230 mgmt_pending_remove(cmd
);
4233 hci_dev_unlock(hdev
);
4236 static int set_fast_connectable(struct sock
*sk
, struct hci_dev
*hdev
,
4237 void *data
, u16 len
)
4239 struct mgmt_mode
*cp
= data
;
4240 struct pending_cmd
*cmd
;
4241 struct hci_request req
;
4244 BT_DBG("%s", hdev
->name
);
4246 if (!test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
) ||
4247 hdev
->hci_ver
< BLUETOOTH_VER_1_2
)
4248 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4249 MGMT_STATUS_NOT_SUPPORTED
);
4251 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4252 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4253 MGMT_STATUS_INVALID_PARAMS
);
4255 if (!hdev_is_powered(hdev
))
4256 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4257 MGMT_STATUS_NOT_POWERED
);
4259 if (!test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4260 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4261 MGMT_STATUS_REJECTED
);
4265 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE
, hdev
)) {
4266 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4271 if (!!cp
->val
== test_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
)) {
4272 err
= send_settings_rsp(sk
, MGMT_OP_SET_FAST_CONNECTABLE
,
4277 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_FAST_CONNECTABLE
, hdev
,
4284 hci_req_init(&req
, hdev
);
4286 write_fast_connectable(&req
, cp
->val
);
4288 err
= hci_req_run(&req
, fast_connectable_complete
);
4290 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_FAST_CONNECTABLE
,
4291 MGMT_STATUS_FAILED
);
4292 mgmt_pending_remove(cmd
);
4296 hci_dev_unlock(hdev
);
4301 static void set_bredr_scan(struct hci_request
*req
)
4303 struct hci_dev
*hdev
= req
->hdev
;
4306 /* Ensure that fast connectable is disabled. This function will
4307 * not do anything if the page scan parameters are already what
4310 write_fast_connectable(req
, false);
4312 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4314 if (test_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
))
4315 scan
|= SCAN_INQUIRY
;
4318 hci_req_add(req
, HCI_OP_WRITE_SCAN_ENABLE
, 1, &scan
);
4321 static void set_bredr_complete(struct hci_dev
*hdev
, u8 status
)
4323 struct pending_cmd
*cmd
;
4325 BT_DBG("status 0x%02x", status
);
4329 cmd
= mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
);
4334 u8 mgmt_err
= mgmt_status(status
);
4336 /* We need to restore the flag if related HCI commands
4339 clear_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4341 cmd_status(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_err
);
4343 send_settings_rsp(cmd
->sk
, MGMT_OP_SET_BREDR
, hdev
);
4344 new_settings(hdev
, cmd
->sk
);
4347 mgmt_pending_remove(cmd
);
4350 hci_dev_unlock(hdev
);
4353 static int set_bredr(struct sock
*sk
, struct hci_dev
*hdev
, void *data
, u16 len
)
4355 struct mgmt_mode
*cp
= data
;
4356 struct pending_cmd
*cmd
;
4357 struct hci_request req
;
4360 BT_DBG("request for %s", hdev
->name
);
4362 if (!lmp_bredr_capable(hdev
) || !lmp_le_capable(hdev
))
4363 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4364 MGMT_STATUS_NOT_SUPPORTED
);
4366 if (!test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
))
4367 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4368 MGMT_STATUS_REJECTED
);
4370 if (cp
->val
!= 0x00 && cp
->val
!= 0x01)
4371 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4372 MGMT_STATUS_INVALID_PARAMS
);
4376 if (cp
->val
== test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
4377 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4381 if (!hdev_is_powered(hdev
)) {
4383 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
4384 clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
4385 clear_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
4386 clear_bit(HCI_FAST_CONNECTABLE
, &hdev
->dev_flags
);
4387 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
4390 change_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4392 err
= send_settings_rsp(sk
, MGMT_OP_SET_BREDR
, hdev
);
4396 err
= new_settings(hdev
, sk
);
4400 /* Reject disabling when powered on */
4402 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4403 MGMT_STATUS_REJECTED
);
4407 if (mgmt_pending_find(MGMT_OP_SET_BREDR
, hdev
)) {
4408 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_BREDR
,
4413 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_BREDR
, hdev
, data
, len
);
4419 /* We need to flip the bit already here so that update_adv_data
4420 * generates the correct flags.
4422 set_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
);
4424 hci_req_init(&req
, hdev
);
4426 if (test_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
))
4427 set_bredr_scan(&req
);
4429 /* Since only the advertising data flags will change, there
4430 * is no need to update the scan response data.
4432 update_adv_data(&req
);
4434 err
= hci_req_run(&req
, set_bredr_complete
);
4436 mgmt_pending_remove(cmd
);
4439 hci_dev_unlock(hdev
);
4443 static int set_secure_conn(struct sock
*sk
, struct hci_dev
*hdev
,
4444 void *data
, u16 len
)
4446 struct mgmt_mode
*cp
= data
;
4447 struct pending_cmd
*cmd
;
4451 BT_DBG("request for %s", hdev
->name
);
4453 status
= mgmt_bredr_support(hdev
);
4455 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4458 if (!lmp_sc_capable(hdev
) &&
4459 !test_bit(HCI_FORCE_SC
, &hdev
->dbg_flags
))
4460 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4461 MGMT_STATUS_NOT_SUPPORTED
);
4463 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4464 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4465 MGMT_STATUS_INVALID_PARAMS
);
4469 if (!hdev_is_powered(hdev
)) {
4473 changed
= !test_and_set_bit(HCI_SC_ENABLED
,
4475 if (cp
->val
== 0x02)
4476 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4478 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4480 changed
= test_and_clear_bit(HCI_SC_ENABLED
,
4482 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4485 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4490 err
= new_settings(hdev
, sk
);
4495 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN
, hdev
)) {
4496 err
= cmd_status(sk
, hdev
->id
, MGMT_OP_SET_SECURE_CONN
,
4503 if (val
== test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
4504 (cp
->val
== 0x02) == test_bit(HCI_SC_ONLY
, &hdev
->dev_flags
)) {
4505 err
= send_settings_rsp(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
);
4509 cmd
= mgmt_pending_add(sk
, MGMT_OP_SET_SECURE_CONN
, hdev
, data
, len
);
4515 err
= hci_send_cmd(hdev
, HCI_OP_WRITE_SC_SUPPORT
, 1, &val
);
4517 mgmt_pending_remove(cmd
);
4521 if (cp
->val
== 0x02)
4522 set_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4524 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
4527 hci_dev_unlock(hdev
);
4531 static int set_debug_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4532 void *data
, u16 len
)
4534 struct mgmt_mode
*cp
= data
;
4535 bool changed
, use_changed
;
4538 BT_DBG("request for %s", hdev
->name
);
4540 if (cp
->val
!= 0x00 && cp
->val
!= 0x01 && cp
->val
!= 0x02)
4541 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_DEBUG_KEYS
,
4542 MGMT_STATUS_INVALID_PARAMS
);
4547 changed
= !test_and_set_bit(HCI_KEEP_DEBUG_KEYS
,
4550 changed
= test_and_clear_bit(HCI_KEEP_DEBUG_KEYS
,
4553 if (cp
->val
== 0x02)
4554 use_changed
= !test_and_set_bit(HCI_USE_DEBUG_KEYS
,
4557 use_changed
= test_and_clear_bit(HCI_USE_DEBUG_KEYS
,
4560 if (hdev_is_powered(hdev
) && use_changed
&&
4561 test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
4562 u8 mode
= (cp
->val
== 0x02) ? 0x01 : 0x00;
4563 hci_send_cmd(hdev
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
4564 sizeof(mode
), &mode
);
4567 err
= send_settings_rsp(sk
, MGMT_OP_SET_DEBUG_KEYS
, hdev
);
4572 err
= new_settings(hdev
, sk
);
4575 hci_dev_unlock(hdev
);
4579 static int set_privacy(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4582 struct mgmt_cp_set_privacy
*cp
= cp_data
;
4586 BT_DBG("request for %s", hdev
->name
);
4588 if (!lmp_le_capable(hdev
))
4589 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4590 MGMT_STATUS_NOT_SUPPORTED
);
4592 if (cp
->privacy
!= 0x00 && cp
->privacy
!= 0x01)
4593 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4594 MGMT_STATUS_INVALID_PARAMS
);
4596 if (hdev_is_powered(hdev
))
4597 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_PRIVACY
,
4598 MGMT_STATUS_REJECTED
);
4602 /* If user space supports this command it is also expected to
4603 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4605 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4608 changed
= !test_and_set_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4609 memcpy(hdev
->irk
, cp
->irk
, sizeof(hdev
->irk
));
4610 set_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4612 changed
= test_and_clear_bit(HCI_PRIVACY
, &hdev
->dev_flags
);
4613 memset(hdev
->irk
, 0, sizeof(hdev
->irk
));
4614 clear_bit(HCI_RPA_EXPIRED
, &hdev
->dev_flags
);
4617 err
= send_settings_rsp(sk
, MGMT_OP_SET_PRIVACY
, hdev
);
4622 err
= new_settings(hdev
, sk
);
4625 hci_dev_unlock(hdev
);
4629 static bool irk_is_valid(struct mgmt_irk_info
*irk
)
4631 switch (irk
->addr
.type
) {
4632 case BDADDR_LE_PUBLIC
:
4635 case BDADDR_LE_RANDOM
:
4636 /* Two most significant bits shall be set */
4637 if ((irk
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4645 static int load_irks(struct sock
*sk
, struct hci_dev
*hdev
, void *cp_data
,
4648 struct mgmt_cp_load_irks
*cp
= cp_data
;
4649 const u16 max_irk_count
= ((U16_MAX
- sizeof(*cp
)) /
4650 sizeof(struct mgmt_irk_info
));
4651 u16 irk_count
, expected_len
;
4654 BT_DBG("request for %s", hdev
->name
);
4656 if (!lmp_le_capable(hdev
))
4657 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4658 MGMT_STATUS_NOT_SUPPORTED
);
4660 irk_count
= __le16_to_cpu(cp
->irk_count
);
4661 if (irk_count
> max_irk_count
) {
4662 BT_ERR("load_irks: too big irk_count value %u", irk_count
);
4663 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4664 MGMT_STATUS_INVALID_PARAMS
);
4667 expected_len
= sizeof(*cp
) + irk_count
* sizeof(struct mgmt_irk_info
);
4668 if (expected_len
!= len
) {
4669 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4671 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
,
4672 MGMT_STATUS_INVALID_PARAMS
);
4675 BT_DBG("%s irk_count %u", hdev
->name
, irk_count
);
4677 for (i
= 0; i
< irk_count
; i
++) {
4678 struct mgmt_irk_info
*key
= &cp
->irks
[i
];
4680 if (!irk_is_valid(key
))
4681 return cmd_status(sk
, hdev
->id
,
4683 MGMT_STATUS_INVALID_PARAMS
);
4688 hci_smp_irks_clear(hdev
);
4690 for (i
= 0; i
< irk_count
; i
++) {
4691 struct mgmt_irk_info
*irk
= &cp
->irks
[i
];
4694 if (irk
->addr
.type
== BDADDR_LE_PUBLIC
)
4695 addr_type
= ADDR_LE_DEV_PUBLIC
;
4697 addr_type
= ADDR_LE_DEV_RANDOM
;
4699 hci_add_irk(hdev
, &irk
->addr
.bdaddr
, addr_type
, irk
->val
,
4703 set_bit(HCI_RPA_RESOLVING
, &hdev
->dev_flags
);
4705 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_IRKS
, 0, NULL
, 0);
4707 hci_dev_unlock(hdev
);
4712 static bool ltk_is_valid(struct mgmt_ltk_info
*key
)
4714 if (key
->master
!= 0x00 && key
->master
!= 0x01)
4717 switch (key
->addr
.type
) {
4718 case BDADDR_LE_PUBLIC
:
4721 case BDADDR_LE_RANDOM
:
4722 /* Two most significant bits shall be set */
4723 if ((key
->addr
.bdaddr
.b
[5] & 0xc0) != 0xc0)
4731 static int load_long_term_keys(struct sock
*sk
, struct hci_dev
*hdev
,
4732 void *cp_data
, u16 len
)
4734 struct mgmt_cp_load_long_term_keys
*cp
= cp_data
;
4735 const u16 max_key_count
= ((U16_MAX
- sizeof(*cp
)) /
4736 sizeof(struct mgmt_ltk_info
));
4737 u16 key_count
, expected_len
;
4740 BT_DBG("request for %s", hdev
->name
);
4742 if (!lmp_le_capable(hdev
))
4743 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4744 MGMT_STATUS_NOT_SUPPORTED
);
4746 key_count
= __le16_to_cpu(cp
->key_count
);
4747 if (key_count
> max_key_count
) {
4748 BT_ERR("load_ltks: too big key_count value %u", key_count
);
4749 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4750 MGMT_STATUS_INVALID_PARAMS
);
4753 expected_len
= sizeof(*cp
) + key_count
*
4754 sizeof(struct mgmt_ltk_info
);
4755 if (expected_len
!= len
) {
4756 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4758 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
,
4759 MGMT_STATUS_INVALID_PARAMS
);
4762 BT_DBG("%s key_count %u", hdev
->name
, key_count
);
4764 for (i
= 0; i
< key_count
; i
++) {
4765 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4767 if (!ltk_is_valid(key
))
4768 return cmd_status(sk
, hdev
->id
,
4769 MGMT_OP_LOAD_LONG_TERM_KEYS
,
4770 MGMT_STATUS_INVALID_PARAMS
);
4775 hci_smp_ltks_clear(hdev
);
4777 for (i
= 0; i
< key_count
; i
++) {
4778 struct mgmt_ltk_info
*key
= &cp
->keys
[i
];
4779 u8 type
, addr_type
, authenticated
;
4781 if (key
->addr
.type
== BDADDR_LE_PUBLIC
)
4782 addr_type
= ADDR_LE_DEV_PUBLIC
;
4784 addr_type
= ADDR_LE_DEV_RANDOM
;
4789 type
= SMP_LTK_SLAVE
;
4791 switch (key
->type
) {
4792 case MGMT_LTK_UNAUTHENTICATED
:
4793 authenticated
= 0x00;
4795 case MGMT_LTK_AUTHENTICATED
:
4796 authenticated
= 0x01;
4802 hci_add_ltk(hdev
, &key
->addr
.bdaddr
, addr_type
, type
,
4803 authenticated
, key
->val
, key
->enc_size
, key
->ediv
,
4807 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_LONG_TERM_KEYS
, 0,
4810 hci_dev_unlock(hdev
);
4815 struct cmd_conn_lookup
{
4816 struct hci_conn
*conn
;
4817 bool valid_tx_power
;
4821 static void get_conn_info_complete(struct pending_cmd
*cmd
, void *data
)
4823 struct cmd_conn_lookup
*match
= data
;
4824 struct mgmt_cp_get_conn_info
*cp
;
4825 struct mgmt_rp_get_conn_info rp
;
4826 struct hci_conn
*conn
= cmd
->user_data
;
4828 if (conn
!= match
->conn
)
4831 cp
= (struct mgmt_cp_get_conn_info
*) cmd
->param
;
4833 memset(&rp
, 0, sizeof(rp
));
4834 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4835 rp
.addr
.type
= cp
->addr
.type
;
4837 if (!match
->mgmt_status
) {
4838 rp
.rssi
= conn
->rssi
;
4840 if (match
->valid_tx_power
) {
4841 rp
.tx_power
= conn
->tx_power
;
4842 rp
.max_tx_power
= conn
->max_tx_power
;
4844 rp
.tx_power
= HCI_TX_POWER_INVALID
;
4845 rp
.max_tx_power
= HCI_TX_POWER_INVALID
;
4849 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_GET_CONN_INFO
,
4850 match
->mgmt_status
, &rp
, sizeof(rp
));
4852 hci_conn_drop(conn
);
4854 mgmt_pending_remove(cmd
);
4857 static void conn_info_refresh_complete(struct hci_dev
*hdev
, u8 status
)
4859 struct hci_cp_read_rssi
*cp
;
4860 struct hci_conn
*conn
;
4861 struct cmd_conn_lookup match
;
4864 BT_DBG("status 0x%02x", status
);
4868 /* TX power data is valid in case request completed successfully,
4869 * otherwise we assume it's not valid. At the moment we assume that
4870 * either both or none of current and max values are valid to keep code
4873 match
.valid_tx_power
= !status
;
4875 /* Commands sent in request are either Read RSSI or Read Transmit Power
4876 * Level so we check which one was last sent to retrieve connection
4877 * handle. Both commands have handle as first parameter so it's safe to
4878 * cast data on the same command struct.
4880 * First command sent is always Read RSSI and we fail only if it fails.
4881 * In other case we simply override error to indicate success as we
4882 * already remembered if TX power value is actually valid.
4884 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_RSSI
);
4886 cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_TX_POWER
);
4891 BT_ERR("invalid sent_cmd in response");
4895 handle
= __le16_to_cpu(cp
->handle
);
4896 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
4898 BT_ERR("unknown handle (%d) in response", handle
);
4903 match
.mgmt_status
= mgmt_status(status
);
4905 /* Cache refresh is complete, now reply for mgmt request for given
4908 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO
, hdev
,
4909 get_conn_info_complete
, &match
);
4912 hci_dev_unlock(hdev
);
4915 static int get_conn_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
4918 struct mgmt_cp_get_conn_info
*cp
= data
;
4919 struct mgmt_rp_get_conn_info rp
;
4920 struct hci_conn
*conn
;
4921 unsigned long conn_info_age
;
4924 BT_DBG("%s", hdev
->name
);
4926 memset(&rp
, 0, sizeof(rp
));
4927 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
4928 rp
.addr
.type
= cp
->addr
.type
;
4930 if (!bdaddr_type_is_valid(cp
->addr
.type
))
4931 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4932 MGMT_STATUS_INVALID_PARAMS
,
4937 if (!hdev_is_powered(hdev
)) {
4938 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4939 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
4943 if (cp
->addr
.type
== BDADDR_BREDR
)
4944 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
4947 conn
= hci_conn_hash_lookup_ba(hdev
, LE_LINK
, &cp
->addr
.bdaddr
);
4949 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
4950 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
4951 MGMT_STATUS_NOT_CONNECTED
, &rp
, sizeof(rp
));
4955 /* To avoid client trying to guess when to poll again for information we
4956 * calculate conn info age as random value between min/max set in hdev.
4958 conn_info_age
= hdev
->conn_info_min_age
+
4959 prandom_u32_max(hdev
->conn_info_max_age
-
4960 hdev
->conn_info_min_age
);
4962 /* Query controller to refresh cached values if they are too old or were
4965 if (time_after(jiffies
, conn
->conn_info_timestamp
+
4966 msecs_to_jiffies(conn_info_age
)) ||
4967 !conn
->conn_info_timestamp
) {
4968 struct hci_request req
;
4969 struct hci_cp_read_tx_power req_txp_cp
;
4970 struct hci_cp_read_rssi req_rssi_cp
;
4971 struct pending_cmd
*cmd
;
4973 hci_req_init(&req
, hdev
);
4974 req_rssi_cp
.handle
= cpu_to_le16(conn
->handle
);
4975 hci_req_add(&req
, HCI_OP_READ_RSSI
, sizeof(req_rssi_cp
),
4978 /* For LE links TX power does not change thus we don't need to
4979 * query for it once value is known.
4981 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
4982 conn
->tx_power
== HCI_TX_POWER_INVALID
) {
4983 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4984 req_txp_cp
.type
= 0x00;
4985 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4986 sizeof(req_txp_cp
), &req_txp_cp
);
4989 /* Max TX power needs to be read only once per connection */
4990 if (conn
->max_tx_power
== HCI_TX_POWER_INVALID
) {
4991 req_txp_cp
.handle
= cpu_to_le16(conn
->handle
);
4992 req_txp_cp
.type
= 0x01;
4993 hci_req_add(&req
, HCI_OP_READ_TX_POWER
,
4994 sizeof(req_txp_cp
), &req_txp_cp
);
4997 err
= hci_req_run(&req
, conn_info_refresh_complete
);
5001 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CONN_INFO
, hdev
,
5008 hci_conn_hold(conn
);
5009 cmd
->user_data
= conn
;
5011 conn
->conn_info_timestamp
= jiffies
;
5013 /* Cache is valid, just reply with values cached in hci_conn */
5014 rp
.rssi
= conn
->rssi
;
5015 rp
.tx_power
= conn
->tx_power
;
5016 rp
.max_tx_power
= conn
->max_tx_power
;
5018 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CONN_INFO
,
5019 MGMT_STATUS_SUCCESS
, &rp
, sizeof(rp
));
5023 hci_dev_unlock(hdev
);
5027 static void get_clock_info_complete(struct hci_dev
*hdev
, u8 status
)
5029 struct mgmt_cp_get_clock_info
*cp
;
5030 struct mgmt_rp_get_clock_info rp
;
5031 struct hci_cp_read_clock
*hci_cp
;
5032 struct pending_cmd
*cmd
;
5033 struct hci_conn
*conn
;
5035 BT_DBG("%s status %u", hdev
->name
, status
);
5039 hci_cp
= hci_sent_cmd_data(hdev
, HCI_OP_READ_CLOCK
);
5043 if (hci_cp
->which
) {
5044 u16 handle
= __le16_to_cpu(hci_cp
->handle
);
5045 conn
= hci_conn_hash_lookup_handle(hdev
, handle
);
5050 cmd
= mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO
, hdev
, conn
);
5056 memset(&rp
, 0, sizeof(rp
));
5057 memcpy(&rp
.addr
, &cp
->addr
, sizeof(rp
.addr
));
5062 rp
.local_clock
= cpu_to_le32(hdev
->clock
);
5065 rp
.piconet_clock
= cpu_to_le32(conn
->clock
);
5066 rp
.accuracy
= cpu_to_le16(conn
->clock_accuracy
);
5070 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, mgmt_status(status
),
5072 mgmt_pending_remove(cmd
);
5074 hci_conn_drop(conn
);
5077 hci_dev_unlock(hdev
);
5080 static int get_clock_info(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5083 struct mgmt_cp_get_clock_info
*cp
= data
;
5084 struct mgmt_rp_get_clock_info rp
;
5085 struct hci_cp_read_clock hci_cp
;
5086 struct pending_cmd
*cmd
;
5087 struct hci_request req
;
5088 struct hci_conn
*conn
;
5091 BT_DBG("%s", hdev
->name
);
5093 memset(&rp
, 0, sizeof(rp
));
5094 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
5095 rp
.addr
.type
= cp
->addr
.type
;
5097 if (cp
->addr
.type
!= BDADDR_BREDR
)
5098 return cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5099 MGMT_STATUS_INVALID_PARAMS
,
5104 if (!hdev_is_powered(hdev
)) {
5105 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_GET_CLOCK_INFO
,
5106 MGMT_STATUS_NOT_POWERED
, &rp
, sizeof(rp
));
5110 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5111 conn
= hci_conn_hash_lookup_ba(hdev
, ACL_LINK
,
5113 if (!conn
|| conn
->state
!= BT_CONNECTED
) {
5114 err
= cmd_complete(sk
, hdev
->id
,
5115 MGMT_OP_GET_CLOCK_INFO
,
5116 MGMT_STATUS_NOT_CONNECTED
,
5124 cmd
= mgmt_pending_add(sk
, MGMT_OP_GET_CLOCK_INFO
, hdev
, data
, len
);
5130 hci_req_init(&req
, hdev
);
5132 memset(&hci_cp
, 0, sizeof(hci_cp
));
5133 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5136 hci_conn_hold(conn
);
5137 cmd
->user_data
= conn
;
5139 hci_cp
.handle
= cpu_to_le16(conn
->handle
);
5140 hci_cp
.which
= 0x01; /* Piconet clock */
5141 hci_req_add(&req
, HCI_OP_READ_CLOCK
, sizeof(hci_cp
), &hci_cp
);
5144 err
= hci_req_run(&req
, get_clock_info_complete
);
5146 mgmt_pending_remove(cmd
);
5149 hci_dev_unlock(hdev
);
5153 static void device_added(struct sock
*sk
, struct hci_dev
*hdev
,
5154 bdaddr_t
*bdaddr
, u8 type
, u8 action
)
5156 struct mgmt_ev_device_added ev
;
5158 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5159 ev
.addr
.type
= type
;
5162 mgmt_event(MGMT_EV_DEVICE_ADDED
, hdev
, &ev
, sizeof(ev
), sk
);
5165 static int add_device(struct sock
*sk
, struct hci_dev
*hdev
,
5166 void *data
, u16 len
)
5168 struct mgmt_cp_add_device
*cp
= data
;
5169 u8 auto_conn
, addr_type
;
5172 BT_DBG("%s", hdev
->name
);
5174 if (!bdaddr_type_is_le(cp
->addr
.type
) ||
5175 !bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
))
5176 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5177 MGMT_STATUS_INVALID_PARAMS
,
5178 &cp
->addr
, sizeof(cp
->addr
));
5180 if (cp
->action
!= 0x00 && cp
->action
!= 0x01)
5181 return cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5182 MGMT_STATUS_INVALID_PARAMS
,
5183 &cp
->addr
, sizeof(cp
->addr
));
5187 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5188 addr_type
= ADDR_LE_DEV_PUBLIC
;
5190 addr_type
= ADDR_LE_DEV_RANDOM
;
5193 auto_conn
= HCI_AUTO_CONN_ALWAYS
;
5195 auto_conn
= HCI_AUTO_CONN_REPORT
;
5197 /* If the connection parameters don't exist for this device,
5198 * they will be created and configured with defaults.
5200 if (hci_conn_params_set(hdev
, &cp
->addr
.bdaddr
, addr_type
,
5202 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5204 &cp
->addr
, sizeof(cp
->addr
));
5208 device_added(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cp
->action
);
5210 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_ADD_DEVICE
,
5211 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5214 hci_dev_unlock(hdev
);
5218 static void device_removed(struct sock
*sk
, struct hci_dev
*hdev
,
5219 bdaddr_t
*bdaddr
, u8 type
)
5221 struct mgmt_ev_device_removed ev
;
5223 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
5224 ev
.addr
.type
= type
;
5226 mgmt_event(MGMT_EV_DEVICE_REMOVED
, hdev
, &ev
, sizeof(ev
), sk
);
5229 static int remove_device(struct sock
*sk
, struct hci_dev
*hdev
,
5230 void *data
, u16 len
)
5232 struct mgmt_cp_remove_device
*cp
= data
;
5235 BT_DBG("%s", hdev
->name
);
5239 if (bacmp(&cp
->addr
.bdaddr
, BDADDR_ANY
)) {
5240 struct hci_conn_params
*params
;
5243 if (!bdaddr_type_is_le(cp
->addr
.type
)) {
5244 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5245 MGMT_STATUS_INVALID_PARAMS
,
5246 &cp
->addr
, sizeof(cp
->addr
));
5250 if (cp
->addr
.type
== BDADDR_LE_PUBLIC
)
5251 addr_type
= ADDR_LE_DEV_PUBLIC
;
5253 addr_type
= ADDR_LE_DEV_RANDOM
;
5255 params
= hci_conn_params_lookup(hdev
, &cp
->addr
.bdaddr
,
5258 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5259 MGMT_STATUS_INVALID_PARAMS
,
5260 &cp
->addr
, sizeof(cp
->addr
));
5264 if (params
->auto_connect
== HCI_AUTO_CONN_DISABLED
) {
5265 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5266 MGMT_STATUS_INVALID_PARAMS
,
5267 &cp
->addr
, sizeof(cp
->addr
));
5271 list_del(¶ms
->action
);
5272 list_del(¶ms
->list
);
5274 hci_update_background_scan(hdev
);
5276 device_removed(sk
, hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
);
5278 if (cp
->addr
.type
) {
5279 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5280 MGMT_STATUS_INVALID_PARAMS
,
5281 &cp
->addr
, sizeof(cp
->addr
));
5285 hci_conn_params_clear_enabled(hdev
);
5288 err
= cmd_complete(sk
, hdev
->id
, MGMT_OP_REMOVE_DEVICE
,
5289 MGMT_STATUS_SUCCESS
, &cp
->addr
, sizeof(cp
->addr
));
5292 hci_dev_unlock(hdev
);
5296 static int load_conn_param(struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5299 struct mgmt_cp_load_conn_param
*cp
= data
;
5300 const u16 max_param_count
= ((U16_MAX
- sizeof(*cp
)) /
5301 sizeof(struct mgmt_conn_param
));
5302 u16 param_count
, expected_len
;
5305 if (!lmp_le_capable(hdev
))
5306 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5307 MGMT_STATUS_NOT_SUPPORTED
);
5309 param_count
= __le16_to_cpu(cp
->param_count
);
5310 if (param_count
> max_param_count
) {
5311 BT_ERR("load_conn_param: too big param_count value %u",
5313 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5314 MGMT_STATUS_INVALID_PARAMS
);
5317 expected_len
= sizeof(*cp
) + param_count
*
5318 sizeof(struct mgmt_conn_param
);
5319 if (expected_len
!= len
) {
5320 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5322 return cmd_status(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
,
5323 MGMT_STATUS_INVALID_PARAMS
);
5326 BT_DBG("%s param_count %u", hdev
->name
, param_count
);
5330 hci_conn_params_clear_disabled(hdev
);
5332 for (i
= 0; i
< param_count
; i
++) {
5333 struct mgmt_conn_param
*param
= &cp
->params
[i
];
5334 struct hci_conn_params
*hci_param
;
5335 u16 min
, max
, latency
, timeout
;
5338 BT_DBG("Adding %pMR (type %u)", ¶m
->addr
.bdaddr
,
5341 if (param
->addr
.type
== BDADDR_LE_PUBLIC
) {
5342 addr_type
= ADDR_LE_DEV_PUBLIC
;
5343 } else if (param
->addr
.type
== BDADDR_LE_RANDOM
) {
5344 addr_type
= ADDR_LE_DEV_RANDOM
;
5346 BT_ERR("Ignoring invalid connection parameters");
5350 min
= le16_to_cpu(param
->min_interval
);
5351 max
= le16_to_cpu(param
->max_interval
);
5352 latency
= le16_to_cpu(param
->latency
);
5353 timeout
= le16_to_cpu(param
->timeout
);
5355 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5356 min
, max
, latency
, timeout
);
5358 if (hci_check_conn_params(min
, max
, latency
, timeout
) < 0) {
5359 BT_ERR("Ignoring invalid connection parameters");
5363 hci_param
= hci_conn_params_add(hdev
, ¶m
->addr
.bdaddr
,
5366 BT_ERR("Failed to add connection parameters");
5370 hci_param
->conn_min_interval
= min
;
5371 hci_param
->conn_max_interval
= max
;
5372 hci_param
->conn_latency
= latency
;
5373 hci_param
->supervision_timeout
= timeout
;
5376 hci_dev_unlock(hdev
);
5378 return cmd_complete(sk
, hdev
->id
, MGMT_OP_LOAD_CONN_PARAM
, 0, NULL
, 0);
5381 static int set_external_config(struct sock
*sk
, struct hci_dev
*hdev
,
5382 void *data
, u16 len
)
5384 struct mgmt_cp_set_external_config
*cp
= data
;
5388 BT_DBG("%s", hdev
->name
);
5390 if (hdev_is_powered(hdev
))
5391 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5392 MGMT_STATUS_REJECTED
);
5394 if (cp
->config
!= 0x00 && cp
->config
!= 0x01)
5395 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5396 MGMT_STATUS_INVALID_PARAMS
);
5398 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG
, &hdev
->quirks
))
5399 return cmd_status(sk
, hdev
->id
, MGMT_OP_SET_EXTERNAL_CONFIG
,
5400 MGMT_STATUS_NOT_SUPPORTED
);
5405 changed
= !test_and_set_bit(HCI_EXT_CONFIGURED
,
5408 changed
= test_and_clear_bit(HCI_EXT_CONFIGURED
,
5411 err
= send_options_rsp(sk
, MGMT_OP_SET_EXTERNAL_CONFIG
, hdev
);
5418 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) == is_configured(hdev
)) {
5419 mgmt_index_removed(hdev
);
5420 change_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
);
5421 mgmt_index_added(hdev
);
5425 hci_dev_unlock(hdev
);
5429 static const struct mgmt_handler
{
5430 int (*func
) (struct sock
*sk
, struct hci_dev
*hdev
, void *data
,
5434 } mgmt_handlers
[] = {
5435 { NULL
}, /* 0x0000 (no command) */
5436 { read_version
, false, MGMT_READ_VERSION_SIZE
},
5437 { read_commands
, false, MGMT_READ_COMMANDS_SIZE
},
5438 { read_index_list
, false, MGMT_READ_INDEX_LIST_SIZE
},
5439 { read_controller_info
, false, MGMT_READ_INFO_SIZE
},
5440 { set_powered
, false, MGMT_SETTING_SIZE
},
5441 { set_discoverable
, false, MGMT_SET_DISCOVERABLE_SIZE
},
5442 { set_connectable
, false, MGMT_SETTING_SIZE
},
5443 { set_fast_connectable
, false, MGMT_SETTING_SIZE
},
5444 { set_pairable
, false, MGMT_SETTING_SIZE
},
5445 { set_link_security
, false, MGMT_SETTING_SIZE
},
5446 { set_ssp
, false, MGMT_SETTING_SIZE
},
5447 { set_hs
, false, MGMT_SETTING_SIZE
},
5448 { set_le
, false, MGMT_SETTING_SIZE
},
5449 { set_dev_class
, false, MGMT_SET_DEV_CLASS_SIZE
},
5450 { set_local_name
, false, MGMT_SET_LOCAL_NAME_SIZE
},
5451 { add_uuid
, false, MGMT_ADD_UUID_SIZE
},
5452 { remove_uuid
, false, MGMT_REMOVE_UUID_SIZE
},
5453 { load_link_keys
, true, MGMT_LOAD_LINK_KEYS_SIZE
},
5454 { load_long_term_keys
, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE
},
5455 { disconnect
, false, MGMT_DISCONNECT_SIZE
},
5456 { get_connections
, false, MGMT_GET_CONNECTIONS_SIZE
},
5457 { pin_code_reply
, false, MGMT_PIN_CODE_REPLY_SIZE
},
5458 { pin_code_neg_reply
, false, MGMT_PIN_CODE_NEG_REPLY_SIZE
},
5459 { set_io_capability
, false, MGMT_SET_IO_CAPABILITY_SIZE
},
5460 { pair_device
, false, MGMT_PAIR_DEVICE_SIZE
},
5461 { cancel_pair_device
, false, MGMT_CANCEL_PAIR_DEVICE_SIZE
},
5462 { unpair_device
, false, MGMT_UNPAIR_DEVICE_SIZE
},
5463 { user_confirm_reply
, false, MGMT_USER_CONFIRM_REPLY_SIZE
},
5464 { user_confirm_neg_reply
, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE
},
5465 { user_passkey_reply
, false, MGMT_USER_PASSKEY_REPLY_SIZE
},
5466 { user_passkey_neg_reply
, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE
},
5467 { read_local_oob_data
, false, MGMT_READ_LOCAL_OOB_DATA_SIZE
},
5468 { add_remote_oob_data
, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE
},
5469 { remove_remote_oob_data
, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE
},
5470 { start_discovery
, false, MGMT_START_DISCOVERY_SIZE
},
5471 { stop_discovery
, false, MGMT_STOP_DISCOVERY_SIZE
},
5472 { confirm_name
, false, MGMT_CONFIRM_NAME_SIZE
},
5473 { block_device
, false, MGMT_BLOCK_DEVICE_SIZE
},
5474 { unblock_device
, false, MGMT_UNBLOCK_DEVICE_SIZE
},
5475 { set_device_id
, false, MGMT_SET_DEVICE_ID_SIZE
},
5476 { set_advertising
, false, MGMT_SETTING_SIZE
},
5477 { set_bredr
, false, MGMT_SETTING_SIZE
},
5478 { set_static_address
, false, MGMT_SET_STATIC_ADDRESS_SIZE
},
5479 { set_scan_params
, false, MGMT_SET_SCAN_PARAMS_SIZE
},
5480 { set_secure_conn
, false, MGMT_SETTING_SIZE
},
5481 { set_debug_keys
, false, MGMT_SETTING_SIZE
},
5482 { set_privacy
, false, MGMT_SET_PRIVACY_SIZE
},
5483 { load_irks
, true, MGMT_LOAD_IRKS_SIZE
},
5484 { get_conn_info
, false, MGMT_GET_CONN_INFO_SIZE
},
5485 { get_clock_info
, false, MGMT_GET_CLOCK_INFO_SIZE
},
5486 { add_device
, false, MGMT_ADD_DEVICE_SIZE
},
5487 { remove_device
, false, MGMT_REMOVE_DEVICE_SIZE
},
5488 { load_conn_param
, true, MGMT_LOAD_CONN_PARAM_SIZE
},
5489 { read_unconf_index_list
, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE
},
5490 { read_config_info
, false, MGMT_READ_CONFIG_INFO_SIZE
},
5491 { set_external_config
, false, MGMT_SET_EXTERNAL_CONFIG_SIZE
},
5494 int mgmt_control(struct sock
*sk
, struct msghdr
*msg
, size_t msglen
)
5498 struct mgmt_hdr
*hdr
;
5499 u16 opcode
, index
, len
;
5500 struct hci_dev
*hdev
= NULL
;
5501 const struct mgmt_handler
*handler
;
5504 BT_DBG("got %zu bytes", msglen
);
5506 if (msglen
< sizeof(*hdr
))
5509 buf
= kmalloc(msglen
, GFP_KERNEL
);
5513 if (memcpy_fromiovec(buf
, msg
->msg_iov
, msglen
)) {
5519 opcode
= __le16_to_cpu(hdr
->opcode
);
5520 index
= __le16_to_cpu(hdr
->index
);
5521 len
= __le16_to_cpu(hdr
->len
);
5523 if (len
!= msglen
- sizeof(*hdr
)) {
5528 if (index
!= MGMT_INDEX_NONE
) {
5529 hdev
= hci_dev_get(index
);
5531 err
= cmd_status(sk
, index
, opcode
,
5532 MGMT_STATUS_INVALID_INDEX
);
5536 if (test_bit(HCI_SETUP
, &hdev
->dev_flags
) ||
5537 test_bit(HCI_USER_CHANNEL
, &hdev
->dev_flags
)) {
5538 err
= cmd_status(sk
, index
, opcode
,
5539 MGMT_STATUS_INVALID_INDEX
);
5543 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
) &&
5544 opcode
!= MGMT_OP_READ_CONFIG_INFO
&&
5545 opcode
!= MGMT_OP_SET_EXTERNAL_CONFIG
) {
5546 err
= cmd_status(sk
, index
, opcode
,
5547 MGMT_STATUS_INVALID_INDEX
);
5552 if (opcode
>= ARRAY_SIZE(mgmt_handlers
) ||
5553 mgmt_handlers
[opcode
].func
== NULL
) {
5554 BT_DBG("Unknown op %u", opcode
);
5555 err
= cmd_status(sk
, index
, opcode
,
5556 MGMT_STATUS_UNKNOWN_COMMAND
);
5560 if (hdev
&& (opcode
<= MGMT_OP_READ_INDEX_LIST
||
5561 opcode
== MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5562 err
= cmd_status(sk
, index
, opcode
,
5563 MGMT_STATUS_INVALID_INDEX
);
5567 if (!hdev
&& (opcode
> MGMT_OP_READ_INDEX_LIST
&&
5568 opcode
!= MGMT_OP_READ_UNCONF_INDEX_LIST
)) {
5569 err
= cmd_status(sk
, index
, opcode
,
5570 MGMT_STATUS_INVALID_INDEX
);
5574 handler
= &mgmt_handlers
[opcode
];
5576 if ((handler
->var_len
&& len
< handler
->data_len
) ||
5577 (!handler
->var_len
&& len
!= handler
->data_len
)) {
5578 err
= cmd_status(sk
, index
, opcode
,
5579 MGMT_STATUS_INVALID_PARAMS
);
5584 mgmt_init_hdev(sk
, hdev
);
5586 cp
= buf
+ sizeof(*hdr
);
5588 err
= handler
->func(sk
, hdev
, cp
, len
);
5602 void mgmt_index_added(struct hci_dev
*hdev
)
5604 if (hdev
->dev_type
!= HCI_BREDR
)
5607 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
5610 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5611 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
5613 mgmt_event(MGMT_EV_INDEX_ADDED
, hdev
, NULL
, 0, NULL
);
5616 void mgmt_index_removed(struct hci_dev
*hdev
)
5618 u8 status
= MGMT_STATUS_INVALID_INDEX
;
5620 if (hdev
->dev_type
!= HCI_BREDR
)
5623 if (test_bit(HCI_QUIRK_RAW_DEVICE
, &hdev
->quirks
))
5626 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status
);
5628 if (test_bit(HCI_UNCONFIGURED
, &hdev
->dev_flags
))
5629 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
5631 mgmt_event(MGMT_EV_INDEX_REMOVED
, hdev
, NULL
, 0, NULL
);
5634 /* This function requires the caller holds hdev->lock */
5635 static void restart_le_actions(struct hci_dev
*hdev
)
5637 struct hci_conn_params
*p
;
5639 list_for_each_entry(p
, &hdev
->le_conn_params
, list
) {
5640 /* Needed for AUTO_OFF case where might not "really"
5641 * have been powered off.
5643 list_del_init(&p
->action
);
5645 switch (p
->auto_connect
) {
5646 case HCI_AUTO_CONN_ALWAYS
:
5647 list_add(&p
->action
, &hdev
->pend_le_conns
);
5649 case HCI_AUTO_CONN_REPORT
:
5650 list_add(&p
->action
, &hdev
->pend_le_reports
);
5657 hci_update_background_scan(hdev
);
5660 static void powered_complete(struct hci_dev
*hdev
, u8 status
)
5662 struct cmd_lookup match
= { NULL
, hdev
};
5664 BT_DBG("status 0x%02x", status
);
5668 restart_le_actions(hdev
);
5670 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5672 new_settings(hdev
, match
.sk
);
5674 hci_dev_unlock(hdev
);
5680 static int powered_update_hci(struct hci_dev
*hdev
)
5682 struct hci_request req
;
5685 hci_req_init(&req
, hdev
);
5687 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
) &&
5688 !lmp_host_ssp_capable(hdev
)) {
5691 hci_req_add(&req
, HCI_OP_WRITE_SSP_MODE
, 1, &ssp
);
5694 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
) &&
5695 lmp_bredr_capable(hdev
)) {
5696 struct hci_cp_write_le_host_supported cp
;
5699 cp
.simul
= lmp_le_br_capable(hdev
);
5701 /* Check first if we already have the right
5702 * host state (host features set)
5704 if (cp
.le
!= lmp_host_le_capable(hdev
) ||
5705 cp
.simul
!= lmp_host_le_br_capable(hdev
))
5706 hci_req_add(&req
, HCI_OP_WRITE_LE_HOST_SUPPORTED
,
5710 if (lmp_le_capable(hdev
)) {
5711 /* Make sure the controller has a good default for
5712 * advertising data. This also applies to the case
5713 * where BR/EDR was toggled during the AUTO_OFF phase.
5715 if (test_bit(HCI_LE_ENABLED
, &hdev
->dev_flags
)) {
5716 update_adv_data(&req
);
5717 update_scan_rsp_data(&req
);
5720 if (test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
5721 enable_advertising(&req
);
5724 link_sec
= test_bit(HCI_LINK_SECURITY
, &hdev
->dev_flags
);
5725 if (link_sec
!= test_bit(HCI_AUTH
, &hdev
->flags
))
5726 hci_req_add(&req
, HCI_OP_WRITE_AUTH_ENABLE
,
5727 sizeof(link_sec
), &link_sec
);
5729 if (lmp_bredr_capable(hdev
)) {
5730 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
))
5731 set_bredr_scan(&req
);
5737 return hci_req_run(&req
, powered_complete
);
5740 int mgmt_powered(struct hci_dev
*hdev
, u8 powered
)
5742 struct cmd_lookup match
= { NULL
, hdev
};
5743 u8 status_not_powered
= MGMT_STATUS_NOT_POWERED
;
5744 u8 zero_cod
[] = { 0, 0, 0 };
5747 if (!test_bit(HCI_MGMT
, &hdev
->dev_flags
))
5751 if (powered_update_hci(hdev
) == 0)
5754 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
,
5759 mgmt_pending_foreach(MGMT_OP_SET_POWERED
, hdev
, settings_rsp
, &match
);
5760 mgmt_pending_foreach(0, hdev
, cmd_status_rsp
, &status_not_powered
);
5762 if (memcmp(hdev
->dev_class
, zero_cod
, sizeof(zero_cod
)) != 0)
5763 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
,
5764 zero_cod
, sizeof(zero_cod
), NULL
);
5767 err
= new_settings(hdev
, match
.sk
);
5775 void mgmt_set_powered_failed(struct hci_dev
*hdev
, int err
)
5777 struct pending_cmd
*cmd
;
5780 cmd
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
5784 if (err
== -ERFKILL
)
5785 status
= MGMT_STATUS_RFKILLED
;
5787 status
= MGMT_STATUS_FAILED
;
5789 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_SET_POWERED
, status
);
5791 mgmt_pending_remove(cmd
);
5794 void mgmt_discoverable_timeout(struct hci_dev
*hdev
)
5796 struct hci_request req
;
5800 /* When discoverable timeout triggers, then just make sure
5801 * the limited discoverable flag is cleared. Even in the case
5802 * of a timeout triggered from general discoverable, it is
5803 * safe to unconditionally clear the flag.
5805 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5806 clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5808 hci_req_init(&req
, hdev
);
5809 if (test_bit(HCI_BREDR_ENABLED
, &hdev
->dev_flags
)) {
5810 u8 scan
= SCAN_PAGE
;
5811 hci_req_add(&req
, HCI_OP_WRITE_SCAN_ENABLE
,
5812 sizeof(scan
), &scan
);
5815 update_adv_data(&req
);
5816 hci_req_run(&req
, NULL
);
5818 hdev
->discov_timeout
= 0;
5820 new_settings(hdev
, NULL
);
5822 hci_dev_unlock(hdev
);
5825 void mgmt_discoverable(struct hci_dev
*hdev
, u8 discoverable
)
5829 /* Nothing needed here if there's a pending command since that
5830 * commands request completion callback takes care of everything
5833 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE
, hdev
))
5836 /* Powering off may clear the scan mode - don't let that interfere */
5837 if (!discoverable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5841 changed
= !test_and_set_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5843 clear_bit(HCI_LIMITED_DISCOVERABLE
, &hdev
->dev_flags
);
5844 changed
= test_and_clear_bit(HCI_DISCOVERABLE
, &hdev
->dev_flags
);
5848 struct hci_request req
;
5850 /* In case this change in discoverable was triggered by
5851 * a disabling of connectable there could be a need to
5852 * update the advertising flags.
5854 hci_req_init(&req
, hdev
);
5855 update_adv_data(&req
);
5856 hci_req_run(&req
, NULL
);
5858 new_settings(hdev
, NULL
);
5862 void mgmt_connectable(struct hci_dev
*hdev
, u8 connectable
)
5866 /* Nothing needed here if there's a pending command since that
5867 * commands request completion callback takes care of everything
5870 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE
, hdev
))
5873 /* Powering off may clear the scan mode - don't let that interfere */
5874 if (!connectable
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5878 changed
= !test_and_set_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5880 changed
= test_and_clear_bit(HCI_CONNECTABLE
, &hdev
->dev_flags
);
5883 new_settings(hdev
, NULL
);
5886 void mgmt_advertising(struct hci_dev
*hdev
, u8 advertising
)
5888 /* Powering off may stop advertising - don't let that interfere */
5889 if (!advertising
&& mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
5893 set_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5895 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
5898 void mgmt_write_scan_failed(struct hci_dev
*hdev
, u8 scan
, u8 status
)
5900 u8 mgmt_err
= mgmt_status(status
);
5902 if (scan
& SCAN_PAGE
)
5903 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE
, hdev
,
5904 cmd_status_rsp
, &mgmt_err
);
5906 if (scan
& SCAN_INQUIRY
)
5907 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE
, hdev
,
5908 cmd_status_rsp
, &mgmt_err
);
5911 void mgmt_new_link_key(struct hci_dev
*hdev
, struct link_key
*key
,
5914 struct mgmt_ev_new_link_key ev
;
5916 memset(&ev
, 0, sizeof(ev
));
5918 ev
.store_hint
= persistent
;
5919 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5920 ev
.key
.addr
.type
= BDADDR_BREDR
;
5921 ev
.key
.type
= key
->type
;
5922 memcpy(ev
.key
.val
, key
->val
, HCI_LINK_KEY_SIZE
);
5923 ev
.key
.pin_len
= key
->pin_len
;
5925 mgmt_event(MGMT_EV_NEW_LINK_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5928 static u8
mgmt_ltk_type(struct smp_ltk
*ltk
)
5930 if (ltk
->authenticated
)
5931 return MGMT_LTK_AUTHENTICATED
;
5933 return MGMT_LTK_UNAUTHENTICATED
;
5936 void mgmt_new_ltk(struct hci_dev
*hdev
, struct smp_ltk
*key
, bool persistent
)
5938 struct mgmt_ev_new_long_term_key ev
;
5940 memset(&ev
, 0, sizeof(ev
));
5942 /* Devices using resolvable or non-resolvable random addresses
5943 * without providing an indentity resolving key don't require
5944 * to store long term keys. Their addresses will change the
5947 * Only when a remote device provides an identity address
5948 * make sure the long term key is stored. If the remote
5949 * identity is known, the long term keys are internally
5950 * mapped to the identity address. So allow static random
5951 * and public addresses here.
5953 if (key
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
5954 (key
->bdaddr
.b
[5] & 0xc0) != 0xc0)
5955 ev
.store_hint
= 0x00;
5957 ev
.store_hint
= persistent
;
5959 bacpy(&ev
.key
.addr
.bdaddr
, &key
->bdaddr
);
5960 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, key
->bdaddr_type
);
5961 ev
.key
.type
= mgmt_ltk_type(key
);
5962 ev
.key
.enc_size
= key
->enc_size
;
5963 ev
.key
.ediv
= key
->ediv
;
5964 ev
.key
.rand
= key
->rand
;
5966 if (key
->type
== SMP_LTK
)
5969 memcpy(ev
.key
.val
, key
->val
, sizeof(key
->val
));
5971 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY
, hdev
, &ev
, sizeof(ev
), NULL
);
5974 void mgmt_new_irk(struct hci_dev
*hdev
, struct smp_irk
*irk
)
5976 struct mgmt_ev_new_irk ev
;
5978 memset(&ev
, 0, sizeof(ev
));
5980 /* For identity resolving keys from devices that are already
5981 * using a public address or static random address, do not
5982 * ask for storing this key. The identity resolving key really
5983 * is only mandatory for devices using resovlable random
5986 * Storing all identity resolving keys has the downside that
5987 * they will be also loaded on next boot of they system. More
5988 * identity resolving keys, means more time during scanning is
5989 * needed to actually resolve these addresses.
5991 if (bacmp(&irk
->rpa
, BDADDR_ANY
))
5992 ev
.store_hint
= 0x01;
5994 ev
.store_hint
= 0x00;
5996 bacpy(&ev
.rpa
, &irk
->rpa
);
5997 bacpy(&ev
.irk
.addr
.bdaddr
, &irk
->bdaddr
);
5998 ev
.irk
.addr
.type
= link_to_bdaddr(LE_LINK
, irk
->addr_type
);
5999 memcpy(ev
.irk
.val
, irk
->val
, sizeof(irk
->val
));
6001 mgmt_event(MGMT_EV_NEW_IRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6004 void mgmt_new_csrk(struct hci_dev
*hdev
, struct smp_csrk
*csrk
,
6007 struct mgmt_ev_new_csrk ev
;
6009 memset(&ev
, 0, sizeof(ev
));
6011 /* Devices using resolvable or non-resolvable random addresses
6012 * without providing an indentity resolving key don't require
6013 * to store signature resolving keys. Their addresses will change
6014 * the next time around.
6016 * Only when a remote device provides an identity address
6017 * make sure the signature resolving key is stored. So allow
6018 * static random and public addresses here.
6020 if (csrk
->bdaddr_type
== ADDR_LE_DEV_RANDOM
&&
6021 (csrk
->bdaddr
.b
[5] & 0xc0) != 0xc0)
6022 ev
.store_hint
= 0x00;
6024 ev
.store_hint
= persistent
;
6026 bacpy(&ev
.key
.addr
.bdaddr
, &csrk
->bdaddr
);
6027 ev
.key
.addr
.type
= link_to_bdaddr(LE_LINK
, csrk
->bdaddr_type
);
6028 ev
.key
.master
= csrk
->master
;
6029 memcpy(ev
.key
.val
, csrk
->val
, sizeof(csrk
->val
));
6031 mgmt_event(MGMT_EV_NEW_CSRK
, hdev
, &ev
, sizeof(ev
), NULL
);
6034 void mgmt_new_conn_param(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6035 u8 bdaddr_type
, u8 store_hint
, u16 min_interval
,
6036 u16 max_interval
, u16 latency
, u16 timeout
)
6038 struct mgmt_ev_new_conn_param ev
;
6040 if (!hci_is_identity_address(bdaddr
, bdaddr_type
))
6043 memset(&ev
, 0, sizeof(ev
));
6044 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6045 ev
.addr
.type
= link_to_bdaddr(LE_LINK
, bdaddr_type
);
6046 ev
.store_hint
= store_hint
;
6047 ev
.min_interval
= cpu_to_le16(min_interval
);
6048 ev
.max_interval
= cpu_to_le16(max_interval
);
6049 ev
.latency
= cpu_to_le16(latency
);
6050 ev
.timeout
= cpu_to_le16(timeout
);
6052 mgmt_event(MGMT_EV_NEW_CONN_PARAM
, hdev
, &ev
, sizeof(ev
), NULL
);
6055 static inline u16
eir_append_data(u8
*eir
, u16 eir_len
, u8 type
, u8
*data
,
6058 eir
[eir_len
++] = sizeof(type
) + data_len
;
6059 eir
[eir_len
++] = type
;
6060 memcpy(&eir
[eir_len
], data
, data_len
);
6061 eir_len
+= data_len
;
6066 void mgmt_device_connected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6067 u8 addr_type
, u32 flags
, u8
*name
, u8 name_len
,
6071 struct mgmt_ev_device_connected
*ev
= (void *) buf
;
6074 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6075 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6077 ev
->flags
= __cpu_to_le32(flags
);
6080 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
,
6083 if (dev_class
&& memcmp(dev_class
, "\0\0\0", 3) != 0)
6084 eir_len
= eir_append_data(ev
->eir
, eir_len
,
6085 EIR_CLASS_OF_DEV
, dev_class
, 3);
6087 ev
->eir_len
= cpu_to_le16(eir_len
);
6089 mgmt_event(MGMT_EV_DEVICE_CONNECTED
, hdev
, buf
,
6090 sizeof(*ev
) + eir_len
, NULL
);
6093 static void disconnect_rsp(struct pending_cmd
*cmd
, void *data
)
6095 struct mgmt_cp_disconnect
*cp
= cmd
->param
;
6096 struct sock
**sk
= data
;
6097 struct mgmt_rp_disconnect rp
;
6099 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6100 rp
.addr
.type
= cp
->addr
.type
;
6102 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
, 0, &rp
,
6108 mgmt_pending_remove(cmd
);
6111 static void unpair_device_rsp(struct pending_cmd
*cmd
, void *data
)
6113 struct hci_dev
*hdev
= data
;
6114 struct mgmt_cp_unpair_device
*cp
= cmd
->param
;
6115 struct mgmt_rp_unpair_device rp
;
6117 memset(&rp
, 0, sizeof(rp
));
6118 bacpy(&rp
.addr
.bdaddr
, &cp
->addr
.bdaddr
);
6119 rp
.addr
.type
= cp
->addr
.type
;
6121 device_unpaired(hdev
, &cp
->addr
.bdaddr
, cp
->addr
.type
, cmd
->sk
);
6123 cmd_complete(cmd
->sk
, cmd
->index
, cmd
->opcode
, 0, &rp
, sizeof(rp
));
6125 mgmt_pending_remove(cmd
);
6128 void mgmt_device_disconnected(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6129 u8 link_type
, u8 addr_type
, u8 reason
,
6130 bool mgmt_connected
)
6132 struct mgmt_ev_device_disconnected ev
;
6133 struct pending_cmd
*power_off
;
6134 struct sock
*sk
= NULL
;
6136 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6138 struct mgmt_mode
*cp
= power_off
->param
;
6140 /* The connection is still in hci_conn_hash so test for 1
6141 * instead of 0 to know if this is the last one.
6143 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
6144 cancel_delayed_work(&hdev
->power_off
);
6145 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6149 if (!mgmt_connected
)
6152 if (link_type
!= ACL_LINK
&& link_type
!= LE_LINK
)
6155 mgmt_pending_foreach(MGMT_OP_DISCONNECT
, hdev
, disconnect_rsp
, &sk
);
6157 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6158 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6161 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED
, hdev
, &ev
, sizeof(ev
), sk
);
6166 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6170 void mgmt_disconnect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6171 u8 link_type
, u8 addr_type
, u8 status
)
6173 u8 bdaddr_type
= link_to_bdaddr(link_type
, addr_type
);
6174 struct mgmt_cp_disconnect
*cp
;
6175 struct mgmt_rp_disconnect rp
;
6176 struct pending_cmd
*cmd
;
6178 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE
, hdev
, unpair_device_rsp
,
6181 cmd
= mgmt_pending_find(MGMT_OP_DISCONNECT
, hdev
);
6187 if (bacmp(bdaddr
, &cp
->addr
.bdaddr
))
6190 if (cp
->addr
.type
!= bdaddr_type
)
6193 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6194 rp
.addr
.type
= bdaddr_type
;
6196 cmd_complete(cmd
->sk
, cmd
->index
, MGMT_OP_DISCONNECT
,
6197 mgmt_status(status
), &rp
, sizeof(rp
));
6199 mgmt_pending_remove(cmd
);
6202 void mgmt_connect_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6203 u8 addr_type
, u8 status
)
6205 struct mgmt_ev_connect_failed ev
;
6206 struct pending_cmd
*power_off
;
6208 power_off
= mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
);
6210 struct mgmt_mode
*cp
= power_off
->param
;
6212 /* The connection is still in hci_conn_hash so test for 1
6213 * instead of 0 to know if this is the last one.
6215 if (!cp
->val
&& hci_conn_count(hdev
) == 1) {
6216 cancel_delayed_work(&hdev
->power_off
);
6217 queue_work(hdev
->req_workqueue
, &hdev
->power_off
.work
);
6221 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6222 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6223 ev
.status
= mgmt_status(status
);
6225 mgmt_event(MGMT_EV_CONNECT_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6228 void mgmt_pin_code_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 secure
)
6230 struct mgmt_ev_pin_code_request ev
;
6232 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6233 ev
.addr
.type
= BDADDR_BREDR
;
6236 mgmt_event(MGMT_EV_PIN_CODE_REQUEST
, hdev
, &ev
, sizeof(ev
), NULL
);
6239 void mgmt_pin_code_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6242 struct pending_cmd
*cmd
;
6243 struct mgmt_rp_pin_code_reply rp
;
6245 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY
, hdev
);
6249 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6250 rp
.addr
.type
= BDADDR_BREDR
;
6252 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_REPLY
,
6253 mgmt_status(status
), &rp
, sizeof(rp
));
6255 mgmt_pending_remove(cmd
);
6258 void mgmt_pin_code_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6261 struct pending_cmd
*cmd
;
6262 struct mgmt_rp_pin_code_reply rp
;
6264 cmd
= mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY
, hdev
);
6268 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6269 rp
.addr
.type
= BDADDR_BREDR
;
6271 cmd_complete(cmd
->sk
, hdev
->id
, MGMT_OP_PIN_CODE_NEG_REPLY
,
6272 mgmt_status(status
), &rp
, sizeof(rp
));
6274 mgmt_pending_remove(cmd
);
6277 int mgmt_user_confirm_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6278 u8 link_type
, u8 addr_type
, u32 value
,
6281 struct mgmt_ev_user_confirm_request ev
;
6283 BT_DBG("%s", hdev
->name
);
6285 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6286 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6287 ev
.confirm_hint
= confirm_hint
;
6288 ev
.value
= cpu_to_le32(value
);
6290 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST
, hdev
, &ev
, sizeof(ev
),
6294 int mgmt_user_passkey_request(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6295 u8 link_type
, u8 addr_type
)
6297 struct mgmt_ev_user_passkey_request ev
;
6299 BT_DBG("%s", hdev
->name
);
6301 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6302 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6304 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST
, hdev
, &ev
, sizeof(ev
),
6308 static int user_pairing_resp_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6309 u8 link_type
, u8 addr_type
, u8 status
,
6312 struct pending_cmd
*cmd
;
6313 struct mgmt_rp_user_confirm_reply rp
;
6316 cmd
= mgmt_pending_find(opcode
, hdev
);
6320 bacpy(&rp
.addr
.bdaddr
, bdaddr
);
6321 rp
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6322 err
= cmd_complete(cmd
->sk
, hdev
->id
, opcode
, mgmt_status(status
),
6325 mgmt_pending_remove(cmd
);
6330 int mgmt_user_confirm_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6331 u8 link_type
, u8 addr_type
, u8 status
)
6333 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6334 status
, MGMT_OP_USER_CONFIRM_REPLY
);
6337 int mgmt_user_confirm_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6338 u8 link_type
, u8 addr_type
, u8 status
)
6340 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6342 MGMT_OP_USER_CONFIRM_NEG_REPLY
);
6345 int mgmt_user_passkey_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6346 u8 link_type
, u8 addr_type
, u8 status
)
6348 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6349 status
, MGMT_OP_USER_PASSKEY_REPLY
);
6352 int mgmt_user_passkey_neg_reply_complete(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6353 u8 link_type
, u8 addr_type
, u8 status
)
6355 return user_pairing_resp_complete(hdev
, bdaddr
, link_type
, addr_type
,
6357 MGMT_OP_USER_PASSKEY_NEG_REPLY
);
6360 int mgmt_user_passkey_notify(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
,
6361 u8 link_type
, u8 addr_type
, u32 passkey
,
6364 struct mgmt_ev_passkey_notify ev
;
6366 BT_DBG("%s", hdev
->name
);
6368 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6369 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6370 ev
.passkey
= __cpu_to_le32(passkey
);
6371 ev
.entered
= entered
;
6373 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY
, hdev
, &ev
, sizeof(ev
), NULL
);
6376 void mgmt_auth_failed(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6377 u8 addr_type
, u8 status
)
6379 struct mgmt_ev_auth_failed ev
;
6381 bacpy(&ev
.addr
.bdaddr
, bdaddr
);
6382 ev
.addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6383 ev
.status
= mgmt_status(status
);
6385 mgmt_event(MGMT_EV_AUTH_FAILED
, hdev
, &ev
, sizeof(ev
), NULL
);
6388 void mgmt_auth_enable_complete(struct hci_dev
*hdev
, u8 status
)
6390 struct cmd_lookup match
= { NULL
, hdev
};
6394 u8 mgmt_err
= mgmt_status(status
);
6395 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
,
6396 cmd_status_rsp
, &mgmt_err
);
6400 if (test_bit(HCI_AUTH
, &hdev
->flags
))
6401 changed
= !test_and_set_bit(HCI_LINK_SECURITY
,
6404 changed
= test_and_clear_bit(HCI_LINK_SECURITY
,
6407 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY
, hdev
, settings_rsp
,
6411 new_settings(hdev
, match
.sk
);
6417 static void clear_eir(struct hci_request
*req
)
6419 struct hci_dev
*hdev
= req
->hdev
;
6420 struct hci_cp_write_eir cp
;
6422 if (!lmp_ext_inq_capable(hdev
))
6425 memset(hdev
->eir
, 0, sizeof(hdev
->eir
));
6427 memset(&cp
, 0, sizeof(cp
));
6429 hci_req_add(req
, HCI_OP_WRITE_EIR
, sizeof(cp
), &cp
);
6432 void mgmt_ssp_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6434 struct cmd_lookup match
= { NULL
, hdev
};
6435 struct hci_request req
;
6436 bool changed
= false;
6439 u8 mgmt_err
= mgmt_status(status
);
6441 if (enable
&& test_and_clear_bit(HCI_SSP_ENABLED
,
6442 &hdev
->dev_flags
)) {
6443 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6444 new_settings(hdev
, NULL
);
6447 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, cmd_status_rsp
,
6453 changed
= !test_and_set_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6455 changed
= test_and_clear_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
);
6457 changed
= test_and_clear_bit(HCI_HS_ENABLED
,
6460 clear_bit(HCI_HS_ENABLED
, &hdev
->dev_flags
);
6463 mgmt_pending_foreach(MGMT_OP_SET_SSP
, hdev
, settings_rsp
, &match
);
6466 new_settings(hdev
, match
.sk
);
6471 hci_req_init(&req
, hdev
);
6473 if (test_bit(HCI_SSP_ENABLED
, &hdev
->dev_flags
)) {
6474 if (test_bit(HCI_USE_DEBUG_KEYS
, &hdev
->dev_flags
))
6475 hci_req_add(&req
, HCI_OP_WRITE_SSP_DEBUG_MODE
,
6476 sizeof(enable
), &enable
);
6482 hci_req_run(&req
, NULL
);
6485 void mgmt_sc_enable_complete(struct hci_dev
*hdev
, u8 enable
, u8 status
)
6487 struct cmd_lookup match
= { NULL
, hdev
};
6488 bool changed
= false;
6491 u8 mgmt_err
= mgmt_status(status
);
6494 if (test_and_clear_bit(HCI_SC_ENABLED
,
6496 new_settings(hdev
, NULL
);
6497 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6500 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6501 cmd_status_rsp
, &mgmt_err
);
6506 changed
= !test_and_set_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6508 changed
= test_and_clear_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
);
6509 clear_bit(HCI_SC_ONLY
, &hdev
->dev_flags
);
6512 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN
, hdev
,
6513 settings_rsp
, &match
);
6516 new_settings(hdev
, match
.sk
);
6522 static void sk_lookup(struct pending_cmd
*cmd
, void *data
)
6524 struct cmd_lookup
*match
= data
;
6526 if (match
->sk
== NULL
) {
6527 match
->sk
= cmd
->sk
;
6528 sock_hold(match
->sk
);
6532 void mgmt_set_class_of_dev_complete(struct hci_dev
*hdev
, u8
*dev_class
,
6535 struct cmd_lookup match
= { NULL
, hdev
, mgmt_status(status
) };
6537 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS
, hdev
, sk_lookup
, &match
);
6538 mgmt_pending_foreach(MGMT_OP_ADD_UUID
, hdev
, sk_lookup
, &match
);
6539 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID
, hdev
, sk_lookup
, &match
);
6542 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED
, hdev
, dev_class
, 3,
6549 void mgmt_set_local_name_complete(struct hci_dev
*hdev
, u8
*name
, u8 status
)
6551 struct mgmt_cp_set_local_name ev
;
6552 struct pending_cmd
*cmd
;
6557 memset(&ev
, 0, sizeof(ev
));
6558 memcpy(ev
.name
, name
, HCI_MAX_NAME_LENGTH
);
6559 memcpy(ev
.short_name
, hdev
->short_name
, HCI_MAX_SHORT_NAME_LENGTH
);
6561 cmd
= mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME
, hdev
);
6563 memcpy(hdev
->dev_name
, name
, sizeof(hdev
->dev_name
));
6565 /* If this is a HCI command related to powering on the
6566 * HCI dev don't send any mgmt signals.
6568 if (mgmt_pending_find(MGMT_OP_SET_POWERED
, hdev
))
6572 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED
, hdev
, &ev
, sizeof(ev
),
6573 cmd
? cmd
->sk
: NULL
);
6576 void mgmt_read_local_oob_data_complete(struct hci_dev
*hdev
, u8
*hash192
,
6577 u8
*randomizer192
, u8
*hash256
,
6578 u8
*randomizer256
, u8 status
)
6580 struct pending_cmd
*cmd
;
6582 BT_DBG("%s status %u", hdev
->name
, status
);
6584 cmd
= mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA
, hdev
);
6589 cmd_status(cmd
->sk
, hdev
->id
, MGMT_OP_READ_LOCAL_OOB_DATA
,
6590 mgmt_status(status
));
6592 if (test_bit(HCI_SC_ENABLED
, &hdev
->dev_flags
) &&
6593 hash256
&& randomizer256
) {
6594 struct mgmt_rp_read_local_oob_ext_data rp
;
6596 memcpy(rp
.hash192
, hash192
, sizeof(rp
.hash192
));
6597 memcpy(rp
.randomizer192
, randomizer192
,
6598 sizeof(rp
.randomizer192
));
6600 memcpy(rp
.hash256
, hash256
, sizeof(rp
.hash256
));
6601 memcpy(rp
.randomizer256
, randomizer256
,
6602 sizeof(rp
.randomizer256
));
6604 cmd_complete(cmd
->sk
, hdev
->id
,
6605 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6608 struct mgmt_rp_read_local_oob_data rp
;
6610 memcpy(rp
.hash
, hash192
, sizeof(rp
.hash
));
6611 memcpy(rp
.randomizer
, randomizer192
,
6612 sizeof(rp
.randomizer
));
6614 cmd_complete(cmd
->sk
, hdev
->id
,
6615 MGMT_OP_READ_LOCAL_OOB_DATA
, 0,
6620 mgmt_pending_remove(cmd
);
6623 void mgmt_device_found(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6624 u8 addr_type
, u8
*dev_class
, s8 rssi
, u32 flags
,
6625 u8
*eir
, u16 eir_len
, u8
*scan_rsp
, u8 scan_rsp_len
)
6628 struct mgmt_ev_device_found
*ev
= (void *) buf
;
6629 struct smp_irk
*irk
;
6632 /* Don't send events for a non-kernel initiated discovery. With
6633 * LE one exception is if we have pend_le_reports > 0 in which
6634 * case we're doing passive scanning and want these events.
6636 if (!hci_discovery_active(hdev
)) {
6637 if (link_type
== ACL_LINK
)
6639 if (link_type
== LE_LINK
&& list_empty(&hdev
->pend_le_reports
))
6643 /* Make sure that the buffer is big enough. The 5 extra bytes
6644 * are for the potential CoD field.
6646 if (sizeof(*ev
) + eir_len
+ scan_rsp_len
+ 5 > sizeof(buf
))
6649 memset(buf
, 0, sizeof(buf
));
6651 irk
= hci_get_irk(hdev
, bdaddr
, addr_type
);
6653 bacpy(&ev
->addr
.bdaddr
, &irk
->bdaddr
);
6654 ev
->addr
.type
= link_to_bdaddr(link_type
, irk
->addr_type
);
6656 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6657 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6661 ev
->flags
= cpu_to_le32(flags
);
6664 memcpy(ev
->eir
, eir
, eir_len
);
6666 if (dev_class
&& !eir_has_data_type(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
))
6667 eir_len
= eir_append_data(ev
->eir
, eir_len
, EIR_CLASS_OF_DEV
,
6670 if (scan_rsp_len
> 0)
6671 memcpy(ev
->eir
+ eir_len
, scan_rsp
, scan_rsp_len
);
6673 ev
->eir_len
= cpu_to_le16(eir_len
+ scan_rsp_len
);
6674 ev_size
= sizeof(*ev
) + eir_len
+ scan_rsp_len
;
6676 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, ev_size
, NULL
);
6679 void mgmt_remote_name(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 link_type
,
6680 u8 addr_type
, s8 rssi
, u8
*name
, u8 name_len
)
6682 struct mgmt_ev_device_found
*ev
;
6683 char buf
[sizeof(*ev
) + HCI_MAX_NAME_LENGTH
+ 2];
6686 ev
= (struct mgmt_ev_device_found
*) buf
;
6688 memset(buf
, 0, sizeof(buf
));
6690 bacpy(&ev
->addr
.bdaddr
, bdaddr
);
6691 ev
->addr
.type
= link_to_bdaddr(link_type
, addr_type
);
6694 eir_len
= eir_append_data(ev
->eir
, 0, EIR_NAME_COMPLETE
, name
,
6697 ev
->eir_len
= cpu_to_le16(eir_len
);
6699 mgmt_event(MGMT_EV_DEVICE_FOUND
, hdev
, ev
, sizeof(*ev
) + eir_len
, NULL
);
6702 void mgmt_discovering(struct hci_dev
*hdev
, u8 discovering
)
6704 struct mgmt_ev_discovering ev
;
6705 struct pending_cmd
*cmd
;
6707 BT_DBG("%s discovering %u", hdev
->name
, discovering
);
6710 cmd
= mgmt_pending_find(MGMT_OP_START_DISCOVERY
, hdev
);
6712 cmd
= mgmt_pending_find(MGMT_OP_STOP_DISCOVERY
, hdev
);
6715 u8 type
= hdev
->discovery
.type
;
6717 cmd_complete(cmd
->sk
, hdev
->id
, cmd
->opcode
, 0, &type
,
6719 mgmt_pending_remove(cmd
);
6722 memset(&ev
, 0, sizeof(ev
));
6723 ev
.type
= hdev
->discovery
.type
;
6724 ev
.discovering
= discovering
;
6726 mgmt_event(MGMT_EV_DISCOVERING
, hdev
, &ev
, sizeof(ev
), NULL
);
6729 static void adv_enable_complete(struct hci_dev
*hdev
, u8 status
)
6731 BT_DBG("%s status %u", hdev
->name
, status
);
6733 /* Clear the advertising mgmt setting if we failed to re-enable it */
6735 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6736 new_settings(hdev
, NULL
);
6740 void mgmt_reenable_advertising(struct hci_dev
*hdev
)
6742 struct hci_request req
;
6744 if (hci_conn_num(hdev
, LE_LINK
) > 0)
6747 if (!test_bit(HCI_ADVERTISING
, &hdev
->dev_flags
))
6750 hci_req_init(&req
, hdev
);
6751 enable_advertising(&req
);
6753 /* If this fails we have no option but to let user space know
6754 * that we've disabled advertising.
6756 if (hci_req_run(&req
, adv_enable_complete
) < 0) {
6757 clear_bit(HCI_ADVERTISING
, &hdev
->dev_flags
);
6758 new_settings(hdev
, NULL
);