]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/mgmt.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_BONDABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134
135 struct pending_cmd {
136 struct list_head list;
137 u16 opcode;
138 int index;
139 void *param;
140 struct sock *sk;
141 void *user_data;
142 };
143
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
207 };
208
209 static u8 mgmt_status(u8 hci_status)
210 {
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
213
214 return MGMT_STATUS_FAILED;
215 }
216
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219 {
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245 }
246
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
253
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
259
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
261
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
265
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
269
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
273
274 return err;
275 }
276
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
279 {
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
284
285 BT_DBG("sock %p", sk);
286
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
290
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
292
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
300
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
303
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
307
308 return err;
309 }
310
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_version rp;
315
316 BT_DBG("sock %p", sk);
317
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
320
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
323 }
324
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
327 {
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
334
335 BT_DBG("sock %p", sk);
336
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
342
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
345
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
348
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
351
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
355
356 return err;
357 }
358
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
361 {
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
367
368 BT_DBG("sock %p", sk);
369
370 read_lock(&hci_dev_list_lock);
371
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
377 }
378
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
384 }
385
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
392
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
398
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
403 }
404 }
405
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
408
409 read_unlock(&hci_dev_list_lock);
410
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
413
414 kfree(rp);
415
416 return err;
417 }
418
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421 {
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477 }
478
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490 }
491
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505 }
506
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513 }
514
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521 }
522
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525 {
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549 }
550
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 u32 settings = 0;
554
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_BONDABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
560
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
566
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
570 }
571
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
575 }
576
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
581 }
582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
587 return settings;
588 }
589
590 static u32 get_current_settings(struct hci_dev *hdev)
591 {
592 u32 settings = 0;
593
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
596
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
599
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
602
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
605
606 if (test_bit(HCI_BONDABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_BONDABLE;
608
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
611
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
614
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
617
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
620
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
623
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
626
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
629
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
632
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
635
636 return settings;
637 }
638
639 #define PNP_INFO_SVCLASS_ID 0x1200
640
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642 {
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
645
646 if (len < 4)
647 return ptr;
648
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 u16 uuid16;
651
652 if (uuid->size != 16)
653 continue;
654
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 < 0x1100)
657 continue;
658
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
660 continue;
661
662 if (!uuids_start) {
663 uuids_start = ptr;
664 uuids_start[0] = 1;
665 uuids_start[1] = EIR_UUID16_ALL;
666 ptr += 2;
667 }
668
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
672 break;
673 }
674
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
678 }
679
680 return ptr;
681 }
682
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
684 {
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
687
688 if (len < 6)
689 return ptr;
690
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
693 continue;
694
695 if (!uuids_start) {
696 uuids_start = ptr;
697 uuids_start[0] = 1;
698 uuids_start[1] = EIR_UUID32_ALL;
699 ptr += 2;
700 }
701
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
705 break;
706 }
707
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709 ptr += sizeof(u32);
710 uuids_start[0] += sizeof(u32);
711 }
712
713 return ptr;
714 }
715
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 {
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
720
721 if (len < 18)
722 return ptr;
723
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
726 continue;
727
728 if (!uuids_start) {
729 uuids_start = ptr;
730 uuids_start[0] = 1;
731 uuids_start[1] = EIR_UUID128_ALL;
732 ptr += 2;
733 }
734
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
738 break;
739 }
740
741 memcpy(ptr, uuid->uuid, 16);
742 ptr += 16;
743 uuids_start[0] += 16;
744 }
745
746 return ptr;
747 }
748
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
750 {
751 struct pending_cmd *cmd;
752
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
755 return cmd;
756 }
757
758 return NULL;
759 }
760
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764 {
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775 }
776
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
778 {
779 u8 ad_len = 0;
780 size_t name_len;
781
782 name_len = strlen(hdev->dev_name);
783 if (name_len > 0) {
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
785
786 if (name_len > max_len) {
787 name_len = max_len;
788 ptr[1] = EIR_NAME_SHORT;
789 } else
790 ptr[1] = EIR_NAME_COMPLETE;
791
792 ptr[0] = name_len + 1;
793
794 memcpy(ptr + 2, hdev->dev_name, name_len);
795
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
798 }
799
800 return ad_len;
801 }
802
803 static void update_scan_rsp_data(struct hci_request *req)
804 {
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
807 u8 len;
808
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 return;
811
812 memset(&cp, 0, sizeof(cp));
813
814 len = create_scan_rsp_data(hdev, cp.data);
815
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 return;
819
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
822
823 cp.length = len;
824
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826 }
827
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
829 {
830 struct pending_cmd *cmd;
831
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
834 */
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836 if (cmd) {
837 struct mgmt_mode *cp = cmd->param;
838 if (cp->val == 0x01)
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
842 } else {
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
847 }
848
849 return 0;
850 }
851
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
853 {
854 u8 ad_len = 0, flags = 0;
855
856 flags |= get_adv_discov_flags(hdev);
857
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
860
861 if (flags) {
862 BT_DBG("adv flags 0x%02x", flags);
863
864 ptr[0] = 2;
865 ptr[1] = EIR_FLAGS;
866 ptr[2] = flags;
867
868 ad_len += 3;
869 ptr += 3;
870 }
871
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873 ptr[0] = 2;
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
876
877 ad_len += 3;
878 ptr += 3;
879 }
880
881 return ad_len;
882 }
883
884 static void update_adv_data(struct hci_request *req)
885 {
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
888 u8 len;
889
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 return;
892
893 memset(&cp, 0, sizeof(cp));
894
895 len = create_adv_data(hdev, cp.data);
896
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
899 return;
900
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
903
904 cp.length = len;
905
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907 }
908
909 int mgmt_update_adv_data(struct hci_dev *hdev)
910 {
911 struct hci_request req;
912
913 hci_req_init(&req, hdev);
914 update_adv_data(&req);
915
916 return hci_req_run(&req, NULL);
917 }
918
919 static void create_eir(struct hci_dev *hdev, u8 *data)
920 {
921 u8 *ptr = data;
922 size_t name_len;
923
924 name_len = strlen(hdev->dev_name);
925
926 if (name_len > 0) {
927 /* EIR Data type */
928 if (name_len > 48) {
929 name_len = 48;
930 ptr[1] = EIR_NAME_SHORT;
931 } else
932 ptr[1] = EIR_NAME_COMPLETE;
933
934 /* EIR Data length */
935 ptr[0] = name_len + 1;
936
937 memcpy(ptr + 2, hdev->dev_name, name_len);
938
939 ptr += (name_len + 2);
940 }
941
942 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
943 ptr[0] = 2;
944 ptr[1] = EIR_TX_POWER;
945 ptr[2] = (u8) hdev->inq_tx_power;
946
947 ptr += 3;
948 }
949
950 if (hdev->devid_source > 0) {
951 ptr[0] = 9;
952 ptr[1] = EIR_DEVICE_ID;
953
954 put_unaligned_le16(hdev->devid_source, ptr + 2);
955 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
956 put_unaligned_le16(hdev->devid_product, ptr + 6);
957 put_unaligned_le16(hdev->devid_version, ptr + 8);
958
959 ptr += 10;
960 }
961
962 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
963 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
964 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
965 }
966
967 static void update_eir(struct hci_request *req)
968 {
969 struct hci_dev *hdev = req->hdev;
970 struct hci_cp_write_eir cp;
971
972 if (!hdev_is_powered(hdev))
973 return;
974
975 if (!lmp_ext_inq_capable(hdev))
976 return;
977
978 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
979 return;
980
981 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
982 return;
983
984 memset(&cp, 0, sizeof(cp));
985
986 create_eir(hdev, cp.data);
987
988 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
989 return;
990
991 memcpy(hdev->eir, cp.data, sizeof(cp.data));
992
993 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
994 }
995
996 static u8 get_service_classes(struct hci_dev *hdev)
997 {
998 struct bt_uuid *uuid;
999 u8 val = 0;
1000
1001 list_for_each_entry(uuid, &hdev->uuids, list)
1002 val |= uuid->svc_hint;
1003
1004 return val;
1005 }
1006
1007 static void update_class(struct hci_request *req)
1008 {
1009 struct hci_dev *hdev = req->hdev;
1010 u8 cod[3];
1011
1012 BT_DBG("%s", hdev->name);
1013
1014 if (!hdev_is_powered(hdev))
1015 return;
1016
1017 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1018 return;
1019
1020 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1021 return;
1022
1023 cod[0] = hdev->minor_class;
1024 cod[1] = hdev->major_class;
1025 cod[2] = get_service_classes(hdev);
1026
1027 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1028 cod[1] |= 0x20;
1029
1030 if (memcmp(cod, hdev->dev_class, 3) == 0)
1031 return;
1032
1033 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1034 }
1035
1036 static bool get_connectable(struct hci_dev *hdev)
1037 {
1038 struct pending_cmd *cmd;
1039
1040 /* If there's a pending mgmt command the flag will not yet have
1041 * it's final value, so check for this first.
1042 */
1043 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1044 if (cmd) {
1045 struct mgmt_mode *cp = cmd->param;
1046 return cp->val;
1047 }
1048
1049 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1050 }
1051
1052 static void disable_advertising(struct hci_request *req)
1053 {
1054 u8 enable = 0x00;
1055
1056 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1057 }
1058
1059 static void enable_advertising(struct hci_request *req)
1060 {
1061 struct hci_dev *hdev = req->hdev;
1062 struct hci_cp_le_set_adv_param cp;
1063 u8 own_addr_type, enable = 0x01;
1064 bool connectable;
1065
1066 if (hci_conn_num(hdev, LE_LINK) > 0)
1067 return;
1068
1069 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1070 disable_advertising(req);
1071
1072 /* Clear the HCI_LE_ADV bit temporarily so that the
1073 * hci_update_random_address knows that it's safe to go ahead
1074 * and write a new random address. The flag will be set back on
1075 * as soon as the SET_ADV_ENABLE HCI command completes.
1076 */
1077 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1078
1079 connectable = get_connectable(hdev);
1080
1081 /* Set require_privacy to true only when non-connectable
1082 * advertising is used. In that case it is fine to use a
1083 * non-resolvable private address.
1084 */
1085 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1086 return;
1087
1088 memset(&cp, 0, sizeof(cp));
1089 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1090 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1091 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1092 cp.own_address_type = own_addr_type;
1093 cp.channel_map = hdev->le_adv_channel_map;
1094
1095 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1096
1097 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1098 }
1099
1100 static void service_cache_off(struct work_struct *work)
1101 {
1102 struct hci_dev *hdev = container_of(work, struct hci_dev,
1103 service_cache.work);
1104 struct hci_request req;
1105
1106 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1107 return;
1108
1109 hci_req_init(&req, hdev);
1110
1111 hci_dev_lock(hdev);
1112
1113 update_eir(&req);
1114 update_class(&req);
1115
1116 hci_dev_unlock(hdev);
1117
1118 hci_req_run(&req, NULL);
1119 }
1120
1121 static void rpa_expired(struct work_struct *work)
1122 {
1123 struct hci_dev *hdev = container_of(work, struct hci_dev,
1124 rpa_expired.work);
1125 struct hci_request req;
1126
1127 BT_DBG("");
1128
1129 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1130
1131 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1132 return;
1133
1134 /* The generation of a new RPA and programming it into the
1135 * controller happens in the enable_advertising() function.
1136 */
1137 hci_req_init(&req, hdev);
1138 enable_advertising(&req);
1139 hci_req_run(&req, NULL);
1140 }
1141
1142 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1143 {
1144 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1145 return;
1146
1147 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1148 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1149
1150 /* Non-mgmt controlled devices get this bit set
1151 * implicitly so that pairing works for them, however
1152 * for mgmt we require user-space to explicitly enable
1153 * it
1154 */
1155 clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1156 }
1157
1158 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1159 void *data, u16 data_len)
1160 {
1161 struct mgmt_rp_read_info rp;
1162
1163 BT_DBG("sock %p %s", sk, hdev->name);
1164
1165 hci_dev_lock(hdev);
1166
1167 memset(&rp, 0, sizeof(rp));
1168
1169 bacpy(&rp.bdaddr, &hdev->bdaddr);
1170
1171 rp.version = hdev->hci_ver;
1172 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1173
1174 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1175 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1176
1177 memcpy(rp.dev_class, hdev->dev_class, 3);
1178
1179 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1180 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1181
1182 hci_dev_unlock(hdev);
1183
1184 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1185 sizeof(rp));
1186 }
1187
1188 static void mgmt_pending_free(struct pending_cmd *cmd)
1189 {
1190 sock_put(cmd->sk);
1191 kfree(cmd->param);
1192 kfree(cmd);
1193 }
1194
1195 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1196 struct hci_dev *hdev, void *data,
1197 u16 len)
1198 {
1199 struct pending_cmd *cmd;
1200
1201 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1202 if (!cmd)
1203 return NULL;
1204
1205 cmd->opcode = opcode;
1206 cmd->index = hdev->id;
1207
1208 cmd->param = kmalloc(len, GFP_KERNEL);
1209 if (!cmd->param) {
1210 kfree(cmd);
1211 return NULL;
1212 }
1213
1214 if (data)
1215 memcpy(cmd->param, data, len);
1216
1217 cmd->sk = sk;
1218 sock_hold(sk);
1219
1220 list_add(&cmd->list, &hdev->mgmt_pending);
1221
1222 return cmd;
1223 }
1224
1225 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1226 void (*cb)(struct pending_cmd *cmd,
1227 void *data),
1228 void *data)
1229 {
1230 struct pending_cmd *cmd, *tmp;
1231
1232 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1233 if (opcode > 0 && cmd->opcode != opcode)
1234 continue;
1235
1236 cb(cmd, data);
1237 }
1238 }
1239
1240 static void mgmt_pending_remove(struct pending_cmd *cmd)
1241 {
1242 list_del(&cmd->list);
1243 mgmt_pending_free(cmd);
1244 }
1245
1246 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1247 {
1248 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1249
1250 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1251 sizeof(settings));
1252 }
1253
1254 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1255 {
1256 BT_DBG("%s status 0x%02x", hdev->name, status);
1257
1258 if (hci_conn_count(hdev) == 0) {
1259 cancel_delayed_work(&hdev->power_off);
1260 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1261 }
1262 }
1263
1264 static bool hci_stop_discovery(struct hci_request *req)
1265 {
1266 struct hci_dev *hdev = req->hdev;
1267 struct hci_cp_remote_name_req_cancel cp;
1268 struct inquiry_entry *e;
1269
1270 switch (hdev->discovery.state) {
1271 case DISCOVERY_FINDING:
1272 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1273 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1274 } else {
1275 cancel_delayed_work(&hdev->le_scan_disable);
1276 hci_req_add_le_scan_disable(req);
1277 }
1278
1279 return true;
1280
1281 case DISCOVERY_RESOLVING:
1282 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1283 NAME_PENDING);
1284 if (!e)
1285 break;
1286
1287 bacpy(&cp.bdaddr, &e->data.bdaddr);
1288 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1289 &cp);
1290
1291 return true;
1292
1293 default:
1294 /* Passive scanning */
1295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1296 hci_req_add_le_scan_disable(req);
1297 return true;
1298 }
1299
1300 break;
1301 }
1302
1303 return false;
1304 }
1305
1306 static int clean_up_hci_state(struct hci_dev *hdev)
1307 {
1308 struct hci_request req;
1309 struct hci_conn *conn;
1310 bool discov_stopped;
1311 int err;
1312
1313 hci_req_init(&req, hdev);
1314
1315 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1316 test_bit(HCI_PSCAN, &hdev->flags)) {
1317 u8 scan = 0x00;
1318 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1319 }
1320
1321 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1322 disable_advertising(&req);
1323
1324 discov_stopped = hci_stop_discovery(&req);
1325
1326 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1327 struct hci_cp_disconnect dc;
1328 struct hci_cp_reject_conn_req rej;
1329
1330 switch (conn->state) {
1331 case BT_CONNECTED:
1332 case BT_CONFIG:
1333 dc.handle = cpu_to_le16(conn->handle);
1334 dc.reason = 0x15; /* Terminated due to Power Off */
1335 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1336 break;
1337 case BT_CONNECT:
1338 if (conn->type == LE_LINK)
1339 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1340 0, NULL);
1341 else if (conn->type == ACL_LINK)
1342 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1343 6, &conn->dst);
1344 break;
1345 case BT_CONNECT2:
1346 bacpy(&rej.bdaddr, &conn->dst);
1347 rej.reason = 0x15; /* Terminated due to Power Off */
1348 if (conn->type == ACL_LINK)
1349 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1350 sizeof(rej), &rej);
1351 else if (conn->type == SCO_LINK)
1352 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1353 sizeof(rej), &rej);
1354 break;
1355 }
1356 }
1357
1358 err = hci_req_run(&req, clean_up_hci_complete);
1359 if (!err && discov_stopped)
1360 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1361
1362 return err;
1363 }
1364
1365 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1366 u16 len)
1367 {
1368 struct mgmt_mode *cp = data;
1369 struct pending_cmd *cmd;
1370 int err;
1371
1372 BT_DBG("request for %s", hdev->name);
1373
1374 if (cp->val != 0x00 && cp->val != 0x01)
1375 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 MGMT_STATUS_INVALID_PARAMS);
1377
1378 hci_dev_lock(hdev);
1379
1380 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1381 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1382 MGMT_STATUS_BUSY);
1383 goto failed;
1384 }
1385
1386 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1387 cancel_delayed_work(&hdev->power_off);
1388
1389 if (cp->val) {
1390 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1391 data, len);
1392 err = mgmt_powered(hdev, 1);
1393 goto failed;
1394 }
1395 }
1396
1397 if (!!cp->val == hdev_is_powered(hdev)) {
1398 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1399 goto failed;
1400 }
1401
1402 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1403 if (!cmd) {
1404 err = -ENOMEM;
1405 goto failed;
1406 }
1407
1408 if (cp->val) {
1409 queue_work(hdev->req_workqueue, &hdev->power_on);
1410 err = 0;
1411 } else {
1412 /* Disconnect connections, stop scans, etc */
1413 err = clean_up_hci_state(hdev);
1414 if (!err)
1415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1416 HCI_POWER_OFF_TIMEOUT);
1417
1418 /* ENODATA means there were no HCI commands queued */
1419 if (err == -ENODATA) {
1420 cancel_delayed_work(&hdev->power_off);
1421 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1422 err = 0;
1423 }
1424 }
1425
1426 failed:
1427 hci_dev_unlock(hdev);
1428 return err;
1429 }
1430
1431 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1432 {
1433 __le32 ev;
1434
1435 ev = cpu_to_le32(get_current_settings(hdev));
1436
1437 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1438 }
1439
1440 int mgmt_new_settings(struct hci_dev *hdev)
1441 {
1442 return new_settings(hdev, NULL);
1443 }
1444
1445 struct cmd_lookup {
1446 struct sock *sk;
1447 struct hci_dev *hdev;
1448 u8 mgmt_status;
1449 };
1450
1451 static void settings_rsp(struct pending_cmd *cmd, void *data)
1452 {
1453 struct cmd_lookup *match = data;
1454
1455 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1456
1457 list_del(&cmd->list);
1458
1459 if (match->sk == NULL) {
1460 match->sk = cmd->sk;
1461 sock_hold(match->sk);
1462 }
1463
1464 mgmt_pending_free(cmd);
1465 }
1466
1467 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1468 {
1469 u8 *status = data;
1470
1471 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1472 mgmt_pending_remove(cmd);
1473 }
1474
1475 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1476 {
1477 if (!lmp_bredr_capable(hdev))
1478 return MGMT_STATUS_NOT_SUPPORTED;
1479 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1480 return MGMT_STATUS_REJECTED;
1481 else
1482 return MGMT_STATUS_SUCCESS;
1483 }
1484
1485 static u8 mgmt_le_support(struct hci_dev *hdev)
1486 {
1487 if (!lmp_le_capable(hdev))
1488 return MGMT_STATUS_NOT_SUPPORTED;
1489 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1490 return MGMT_STATUS_REJECTED;
1491 else
1492 return MGMT_STATUS_SUCCESS;
1493 }
1494
1495 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1496 {
1497 struct pending_cmd *cmd;
1498 struct mgmt_mode *cp;
1499 struct hci_request req;
1500 bool changed;
1501
1502 BT_DBG("status 0x%02x", status);
1503
1504 hci_dev_lock(hdev);
1505
1506 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1507 if (!cmd)
1508 goto unlock;
1509
1510 if (status) {
1511 u8 mgmt_err = mgmt_status(status);
1512 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1513 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1514 goto remove_cmd;
1515 }
1516
1517 cp = cmd->param;
1518 if (cp->val) {
1519 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1520 &hdev->dev_flags);
1521
1522 if (hdev->discov_timeout > 0) {
1523 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1524 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1525 to);
1526 }
1527 } else {
1528 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1529 &hdev->dev_flags);
1530 }
1531
1532 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1533
1534 if (changed)
1535 new_settings(hdev, cmd->sk);
1536
1537 /* When the discoverable mode gets changed, make sure
1538 * that class of device has the limited discoverable
1539 * bit correctly set.
1540 */
1541 hci_req_init(&req, hdev);
1542 update_class(&req);
1543 hci_req_run(&req, NULL);
1544
1545 remove_cmd:
1546 mgmt_pending_remove(cmd);
1547
1548 unlock:
1549 hci_dev_unlock(hdev);
1550 }
1551
1552 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1553 u16 len)
1554 {
1555 struct mgmt_cp_set_discoverable *cp = data;
1556 struct pending_cmd *cmd;
1557 struct hci_request req;
1558 u16 timeout;
1559 u8 scan;
1560 int err;
1561
1562 BT_DBG("request for %s", hdev->name);
1563
1564 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1565 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1566 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1567 MGMT_STATUS_REJECTED);
1568
1569 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1570 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1571 MGMT_STATUS_INVALID_PARAMS);
1572
1573 timeout = __le16_to_cpu(cp->timeout);
1574
1575 /* Disabling discoverable requires that no timeout is set,
1576 * and enabling limited discoverable requires a timeout.
1577 */
1578 if ((cp->val == 0x00 && timeout > 0) ||
1579 (cp->val == 0x02 && timeout == 0))
1580 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1581 MGMT_STATUS_INVALID_PARAMS);
1582
1583 hci_dev_lock(hdev);
1584
1585 if (!hdev_is_powered(hdev) && timeout > 0) {
1586 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1587 MGMT_STATUS_NOT_POWERED);
1588 goto failed;
1589 }
1590
1591 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1592 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1593 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1594 MGMT_STATUS_BUSY);
1595 goto failed;
1596 }
1597
1598 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1599 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1600 MGMT_STATUS_REJECTED);
1601 goto failed;
1602 }
1603
1604 if (!hdev_is_powered(hdev)) {
1605 bool changed = false;
1606
1607 /* Setting limited discoverable when powered off is
1608 * not a valid operation since it requires a timeout
1609 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1610 */
1611 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1612 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1613 changed = true;
1614 }
1615
1616 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1617 if (err < 0)
1618 goto failed;
1619
1620 if (changed)
1621 err = new_settings(hdev, sk);
1622
1623 goto failed;
1624 }
1625
1626 /* If the current mode is the same, then just update the timeout
1627 * value with the new value. And if only the timeout gets updated,
1628 * then no need for any HCI transactions.
1629 */
1630 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1631 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1632 &hdev->dev_flags)) {
1633 cancel_delayed_work(&hdev->discov_off);
1634 hdev->discov_timeout = timeout;
1635
1636 if (cp->val && hdev->discov_timeout > 0) {
1637 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1638 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1639 to);
1640 }
1641
1642 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1643 goto failed;
1644 }
1645
1646 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1647 if (!cmd) {
1648 err = -ENOMEM;
1649 goto failed;
1650 }
1651
1652 /* Cancel any potential discoverable timeout that might be
1653 * still active and store new timeout value. The arming of
1654 * the timeout happens in the complete handler.
1655 */
1656 cancel_delayed_work(&hdev->discov_off);
1657 hdev->discov_timeout = timeout;
1658
1659 /* Limited discoverable mode */
1660 if (cp->val == 0x02)
1661 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1662 else
1663 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1664
1665 hci_req_init(&req, hdev);
1666
1667 /* The procedure for LE-only controllers is much simpler - just
1668 * update the advertising data.
1669 */
1670 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1671 goto update_ad;
1672
1673 scan = SCAN_PAGE;
1674
1675 if (cp->val) {
1676 struct hci_cp_write_current_iac_lap hci_cp;
1677
1678 if (cp->val == 0x02) {
1679 /* Limited discoverable mode */
1680 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1681 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1682 hci_cp.iac_lap[1] = 0x8b;
1683 hci_cp.iac_lap[2] = 0x9e;
1684 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1685 hci_cp.iac_lap[4] = 0x8b;
1686 hci_cp.iac_lap[5] = 0x9e;
1687 } else {
1688 /* General discoverable mode */
1689 hci_cp.num_iac = 1;
1690 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1691 hci_cp.iac_lap[1] = 0x8b;
1692 hci_cp.iac_lap[2] = 0x9e;
1693 }
1694
1695 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1696 (hci_cp.num_iac * 3) + 1, &hci_cp);
1697
1698 scan |= SCAN_INQUIRY;
1699 } else {
1700 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1701 }
1702
1703 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1704
1705 update_ad:
1706 update_adv_data(&req);
1707
1708 err = hci_req_run(&req, set_discoverable_complete);
1709 if (err < 0)
1710 mgmt_pending_remove(cmd);
1711
1712 failed:
1713 hci_dev_unlock(hdev);
1714 return err;
1715 }
1716
1717 static void write_fast_connectable(struct hci_request *req, bool enable)
1718 {
1719 struct hci_dev *hdev = req->hdev;
1720 struct hci_cp_write_page_scan_activity acp;
1721 u8 type;
1722
1723 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1724 return;
1725
1726 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1727 return;
1728
1729 if (enable) {
1730 type = PAGE_SCAN_TYPE_INTERLACED;
1731
1732 /* 160 msec page scan interval */
1733 acp.interval = cpu_to_le16(0x0100);
1734 } else {
1735 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1736
1737 /* default 1.28 sec page scan */
1738 acp.interval = cpu_to_le16(0x0800);
1739 }
1740
1741 acp.window = cpu_to_le16(0x0012);
1742
1743 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1744 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1745 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1746 sizeof(acp), &acp);
1747
1748 if (hdev->page_scan_type != type)
1749 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1750 }
1751
1752 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1753 {
1754 struct pending_cmd *cmd;
1755 struct mgmt_mode *cp;
1756 bool conn_changed, discov_changed;
1757
1758 BT_DBG("status 0x%02x", status);
1759
1760 hci_dev_lock(hdev);
1761
1762 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1763 if (!cmd)
1764 goto unlock;
1765
1766 if (status) {
1767 u8 mgmt_err = mgmt_status(status);
1768 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1769 goto remove_cmd;
1770 }
1771
1772 cp = cmd->param;
1773 if (cp->val) {
1774 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
1775 &hdev->dev_flags);
1776 discov_changed = false;
1777 } else {
1778 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
1779 &hdev->dev_flags);
1780 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
1781 &hdev->dev_flags);
1782 }
1783
1784 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1785
1786 if (conn_changed || discov_changed) {
1787 new_settings(hdev, cmd->sk);
1788 if (discov_changed)
1789 mgmt_update_adv_data(hdev);
1790 hci_update_background_scan(hdev);
1791 }
1792
1793 remove_cmd:
1794 mgmt_pending_remove(cmd);
1795
1796 unlock:
1797 hci_dev_unlock(hdev);
1798 }
1799
1800 static int set_connectable_update_settings(struct hci_dev *hdev,
1801 struct sock *sk, u8 val)
1802 {
1803 bool changed = false;
1804 int err;
1805
1806 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1807 changed = true;
1808
1809 if (val) {
1810 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1811 } else {
1812 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1813 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1814 }
1815
1816 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1817 if (err < 0)
1818 return err;
1819
1820 if (changed) {
1821 hci_update_background_scan(hdev);
1822 return new_settings(hdev, sk);
1823 }
1824
1825 return 0;
1826 }
1827
1828 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1829 u16 len)
1830 {
1831 struct mgmt_mode *cp = data;
1832 struct pending_cmd *cmd;
1833 struct hci_request req;
1834 u8 scan;
1835 int err;
1836
1837 BT_DBG("request for %s", hdev->name);
1838
1839 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1840 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1841 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1842 MGMT_STATUS_REJECTED);
1843
1844 if (cp->val != 0x00 && cp->val != 0x01)
1845 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1846 MGMT_STATUS_INVALID_PARAMS);
1847
1848 hci_dev_lock(hdev);
1849
1850 if (!hdev_is_powered(hdev)) {
1851 err = set_connectable_update_settings(hdev, sk, cp->val);
1852 goto failed;
1853 }
1854
1855 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1856 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1857 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1858 MGMT_STATUS_BUSY);
1859 goto failed;
1860 }
1861
1862 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1863 if (!cmd) {
1864 err = -ENOMEM;
1865 goto failed;
1866 }
1867
1868 hci_req_init(&req, hdev);
1869
1870 /* If BR/EDR is not enabled and we disable advertising as a
1871 * by-product of disabling connectable, we need to update the
1872 * advertising flags.
1873 */
1874 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1875 if (!cp->val) {
1876 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1877 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1878 }
1879 update_adv_data(&req);
1880 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1881 if (cp->val) {
1882 scan = SCAN_PAGE;
1883 } else {
1884 /* If we don't have any whitelist entries just
1885 * disable all scanning. If there are entries
1886 * and we had both page and inquiry scanning
1887 * enabled then fall back to only page scanning.
1888 * Otherwise no changes are needed.
1889 */
1890 if (list_empty(&hdev->whitelist))
1891 scan = SCAN_DISABLED;
1892 else if (test_bit(HCI_ISCAN, &hdev->flags))
1893 scan = SCAN_PAGE;
1894 else
1895 goto no_scan_update;
1896
1897 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1898 hdev->discov_timeout > 0)
1899 cancel_delayed_work(&hdev->discov_off);
1900 }
1901
1902 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1903 }
1904
1905 no_scan_update:
1906 /* If we're going from non-connectable to connectable or
1907 * vice-versa when fast connectable is enabled ensure that fast
1908 * connectable gets disabled. write_fast_connectable won't do
1909 * anything if the page scan parameters are already what they
1910 * should be.
1911 */
1912 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1913 write_fast_connectable(&req, false);
1914
1915 /* Update the advertising parameters if necessary */
1916 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1917 enable_advertising(&req);
1918
1919 err = hci_req_run(&req, set_connectable_complete);
1920 if (err < 0) {
1921 mgmt_pending_remove(cmd);
1922 if (err == -ENODATA)
1923 err = set_connectable_update_settings(hdev, sk,
1924 cp->val);
1925 goto failed;
1926 }
1927
1928 failed:
1929 hci_dev_unlock(hdev);
1930 return err;
1931 }
1932
1933 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1934 u16 len)
1935 {
1936 struct mgmt_mode *cp = data;
1937 bool changed;
1938 int err;
1939
1940 BT_DBG("request for %s", hdev->name);
1941
1942 if (cp->val != 0x00 && cp->val != 0x01)
1943 return cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1944 MGMT_STATUS_INVALID_PARAMS);
1945
1946 hci_dev_lock(hdev);
1947
1948 if (cp->val)
1949 changed = !test_and_set_bit(HCI_BONDABLE, &hdev->dev_flags);
1950 else
1951 changed = test_and_clear_bit(HCI_BONDABLE, &hdev->dev_flags);
1952
1953 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1954 if (err < 0)
1955 goto unlock;
1956
1957 if (changed)
1958 err = new_settings(hdev, sk);
1959
1960 unlock:
1961 hci_dev_unlock(hdev);
1962 return err;
1963 }
1964
1965 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1966 u16 len)
1967 {
1968 struct mgmt_mode *cp = data;
1969 struct pending_cmd *cmd;
1970 u8 val, status;
1971 int err;
1972
1973 BT_DBG("request for %s", hdev->name);
1974
1975 status = mgmt_bredr_support(hdev);
1976 if (status)
1977 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1978 status);
1979
1980 if (cp->val != 0x00 && cp->val != 0x01)
1981 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1982 MGMT_STATUS_INVALID_PARAMS);
1983
1984 hci_dev_lock(hdev);
1985
1986 if (!hdev_is_powered(hdev)) {
1987 bool changed = false;
1988
1989 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1990 &hdev->dev_flags)) {
1991 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1992 changed = true;
1993 }
1994
1995 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1996 if (err < 0)
1997 goto failed;
1998
1999 if (changed)
2000 err = new_settings(hdev, sk);
2001
2002 goto failed;
2003 }
2004
2005 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
2006 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
2007 MGMT_STATUS_BUSY);
2008 goto failed;
2009 }
2010
2011 val = !!cp->val;
2012
2013 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
2014 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
2015 goto failed;
2016 }
2017
2018 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
2019 if (!cmd) {
2020 err = -ENOMEM;
2021 goto failed;
2022 }
2023
2024 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
2025 if (err < 0) {
2026 mgmt_pending_remove(cmd);
2027 goto failed;
2028 }
2029
2030 failed:
2031 hci_dev_unlock(hdev);
2032 return err;
2033 }
2034
2035 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2036 {
2037 struct mgmt_mode *cp = data;
2038 struct pending_cmd *cmd;
2039 u8 status;
2040 int err;
2041
2042 BT_DBG("request for %s", hdev->name);
2043
2044 status = mgmt_bredr_support(hdev);
2045 if (status)
2046 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2047
2048 if (!lmp_ssp_capable(hdev))
2049 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2050 MGMT_STATUS_NOT_SUPPORTED);
2051
2052 if (cp->val != 0x00 && cp->val != 0x01)
2053 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2054 MGMT_STATUS_INVALID_PARAMS);
2055
2056 hci_dev_lock(hdev);
2057
2058 if (!hdev_is_powered(hdev)) {
2059 bool changed;
2060
2061 if (cp->val) {
2062 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2063 &hdev->dev_flags);
2064 } else {
2065 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2066 &hdev->dev_flags);
2067 if (!changed)
2068 changed = test_and_clear_bit(HCI_HS_ENABLED,
2069 &hdev->dev_flags);
2070 else
2071 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2072 }
2073
2074 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2075 if (err < 0)
2076 goto failed;
2077
2078 if (changed)
2079 err = new_settings(hdev, sk);
2080
2081 goto failed;
2082 }
2083
2084 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2085 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2086 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2087 MGMT_STATUS_BUSY);
2088 goto failed;
2089 }
2090
2091 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2092 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2093 goto failed;
2094 }
2095
2096 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2097 if (!cmd) {
2098 err = -ENOMEM;
2099 goto failed;
2100 }
2101
2102 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2103 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2104 sizeof(cp->val), &cp->val);
2105
2106 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2107 if (err < 0) {
2108 mgmt_pending_remove(cmd);
2109 goto failed;
2110 }
2111
2112 failed:
2113 hci_dev_unlock(hdev);
2114 return err;
2115 }
2116
2117 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2118 {
2119 struct mgmt_mode *cp = data;
2120 bool changed;
2121 u8 status;
2122 int err;
2123
2124 BT_DBG("request for %s", hdev->name);
2125
2126 status = mgmt_bredr_support(hdev);
2127 if (status)
2128 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2129
2130 if (!lmp_ssp_capable(hdev))
2131 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2132 MGMT_STATUS_NOT_SUPPORTED);
2133
2134 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2135 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2136 MGMT_STATUS_REJECTED);
2137
2138 if (cp->val != 0x00 && cp->val != 0x01)
2139 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2140 MGMT_STATUS_INVALID_PARAMS);
2141
2142 hci_dev_lock(hdev);
2143
2144 if (cp->val) {
2145 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2146 } else {
2147 if (hdev_is_powered(hdev)) {
2148 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2149 MGMT_STATUS_REJECTED);
2150 goto unlock;
2151 }
2152
2153 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2154 }
2155
2156 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2157 if (err < 0)
2158 goto unlock;
2159
2160 if (changed)
2161 err = new_settings(hdev, sk);
2162
2163 unlock:
2164 hci_dev_unlock(hdev);
2165 return err;
2166 }
2167
2168 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2169 {
2170 struct cmd_lookup match = { NULL, hdev };
2171
2172 if (status) {
2173 u8 mgmt_err = mgmt_status(status);
2174
2175 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2176 &mgmt_err);
2177 return;
2178 }
2179
2180 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2181
2182 new_settings(hdev, match.sk);
2183
2184 if (match.sk)
2185 sock_put(match.sk);
2186
2187 /* Make sure the controller has a good default for
2188 * advertising data. Restrict the update to when LE
2189 * has actually been enabled. During power on, the
2190 * update in powered_update_hci will take care of it.
2191 */
2192 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2193 struct hci_request req;
2194
2195 hci_dev_lock(hdev);
2196
2197 hci_req_init(&req, hdev);
2198 update_adv_data(&req);
2199 update_scan_rsp_data(&req);
2200 hci_req_run(&req, NULL);
2201
2202 hci_update_background_scan(hdev);
2203
2204 hci_dev_unlock(hdev);
2205 }
2206 }
2207
2208 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2209 {
2210 struct mgmt_mode *cp = data;
2211 struct hci_cp_write_le_host_supported hci_cp;
2212 struct pending_cmd *cmd;
2213 struct hci_request req;
2214 int err;
2215 u8 val, enabled;
2216
2217 BT_DBG("request for %s", hdev->name);
2218
2219 if (!lmp_le_capable(hdev))
2220 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2221 MGMT_STATUS_NOT_SUPPORTED);
2222
2223 if (cp->val != 0x00 && cp->val != 0x01)
2224 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2225 MGMT_STATUS_INVALID_PARAMS);
2226
2227 /* LE-only devices do not allow toggling LE on/off */
2228 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2229 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2230 MGMT_STATUS_REJECTED);
2231
2232 hci_dev_lock(hdev);
2233
2234 val = !!cp->val;
2235 enabled = lmp_host_le_capable(hdev);
2236
2237 if (!hdev_is_powered(hdev) || val == enabled) {
2238 bool changed = false;
2239
2240 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2241 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2242 changed = true;
2243 }
2244
2245 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2246 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2247 changed = true;
2248 }
2249
2250 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2251 if (err < 0)
2252 goto unlock;
2253
2254 if (changed)
2255 err = new_settings(hdev, sk);
2256
2257 goto unlock;
2258 }
2259
2260 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2261 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2262 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2263 MGMT_STATUS_BUSY);
2264 goto unlock;
2265 }
2266
2267 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2268 if (!cmd) {
2269 err = -ENOMEM;
2270 goto unlock;
2271 }
2272
2273 hci_req_init(&req, hdev);
2274
2275 memset(&hci_cp, 0, sizeof(hci_cp));
2276
2277 if (val) {
2278 hci_cp.le = val;
2279 hci_cp.simul = 0x00;
2280 } else {
2281 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2282 disable_advertising(&req);
2283 }
2284
2285 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2286 &hci_cp);
2287
2288 err = hci_req_run(&req, le_enable_complete);
2289 if (err < 0)
2290 mgmt_pending_remove(cmd);
2291
2292 unlock:
2293 hci_dev_unlock(hdev);
2294 return err;
2295 }
2296
2297 /* This is a helper function to test for pending mgmt commands that can
2298 * cause CoD or EIR HCI commands. We can only allow one such pending
2299 * mgmt command at a time since otherwise we cannot easily track what
2300 * the current values are, will be, and based on that calculate if a new
2301 * HCI command needs to be sent and if yes with what value.
2302 */
2303 static bool pending_eir_or_class(struct hci_dev *hdev)
2304 {
2305 struct pending_cmd *cmd;
2306
2307 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2308 switch (cmd->opcode) {
2309 case MGMT_OP_ADD_UUID:
2310 case MGMT_OP_REMOVE_UUID:
2311 case MGMT_OP_SET_DEV_CLASS:
2312 case MGMT_OP_SET_POWERED:
2313 return true;
2314 }
2315 }
2316
2317 return false;
2318 }
2319
2320 static const u8 bluetooth_base_uuid[] = {
2321 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2322 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2323 };
2324
2325 static u8 get_uuid_size(const u8 *uuid)
2326 {
2327 u32 val;
2328
2329 if (memcmp(uuid, bluetooth_base_uuid, 12))
2330 return 128;
2331
2332 val = get_unaligned_le32(&uuid[12]);
2333 if (val > 0xffff)
2334 return 32;
2335
2336 return 16;
2337 }
2338
2339 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2340 {
2341 struct pending_cmd *cmd;
2342
2343 hci_dev_lock(hdev);
2344
2345 cmd = mgmt_pending_find(mgmt_op, hdev);
2346 if (!cmd)
2347 goto unlock;
2348
2349 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2350 hdev->dev_class, 3);
2351
2352 mgmt_pending_remove(cmd);
2353
2354 unlock:
2355 hci_dev_unlock(hdev);
2356 }
2357
2358 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2359 {
2360 BT_DBG("status 0x%02x", status);
2361
2362 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2363 }
2364
2365 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2366 {
2367 struct mgmt_cp_add_uuid *cp = data;
2368 struct pending_cmd *cmd;
2369 struct hci_request req;
2370 struct bt_uuid *uuid;
2371 int err;
2372
2373 BT_DBG("request for %s", hdev->name);
2374
2375 hci_dev_lock(hdev);
2376
2377 if (pending_eir_or_class(hdev)) {
2378 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2379 MGMT_STATUS_BUSY);
2380 goto failed;
2381 }
2382
2383 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2384 if (!uuid) {
2385 err = -ENOMEM;
2386 goto failed;
2387 }
2388
2389 memcpy(uuid->uuid, cp->uuid, 16);
2390 uuid->svc_hint = cp->svc_hint;
2391 uuid->size = get_uuid_size(cp->uuid);
2392
2393 list_add_tail(&uuid->list, &hdev->uuids);
2394
2395 hci_req_init(&req, hdev);
2396
2397 update_class(&req);
2398 update_eir(&req);
2399
2400 err = hci_req_run(&req, add_uuid_complete);
2401 if (err < 0) {
2402 if (err != -ENODATA)
2403 goto failed;
2404
2405 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2406 hdev->dev_class, 3);
2407 goto failed;
2408 }
2409
2410 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2411 if (!cmd) {
2412 err = -ENOMEM;
2413 goto failed;
2414 }
2415
2416 err = 0;
2417
2418 failed:
2419 hci_dev_unlock(hdev);
2420 return err;
2421 }
2422
2423 static bool enable_service_cache(struct hci_dev *hdev)
2424 {
2425 if (!hdev_is_powered(hdev))
2426 return false;
2427
2428 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2429 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2430 CACHE_TIMEOUT);
2431 return true;
2432 }
2433
2434 return false;
2435 }
2436
2437 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2438 {
2439 BT_DBG("status 0x%02x", status);
2440
2441 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2442 }
2443
2444 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2445 u16 len)
2446 {
2447 struct mgmt_cp_remove_uuid *cp = data;
2448 struct pending_cmd *cmd;
2449 struct bt_uuid *match, *tmp;
2450 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2451 struct hci_request req;
2452 int err, found;
2453
2454 BT_DBG("request for %s", hdev->name);
2455
2456 hci_dev_lock(hdev);
2457
2458 if (pending_eir_or_class(hdev)) {
2459 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2460 MGMT_STATUS_BUSY);
2461 goto unlock;
2462 }
2463
2464 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2465 hci_uuids_clear(hdev);
2466
2467 if (enable_service_cache(hdev)) {
2468 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2469 0, hdev->dev_class, 3);
2470 goto unlock;
2471 }
2472
2473 goto update_class;
2474 }
2475
2476 found = 0;
2477
2478 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2479 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2480 continue;
2481
2482 list_del(&match->list);
2483 kfree(match);
2484 found++;
2485 }
2486
2487 if (found == 0) {
2488 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2489 MGMT_STATUS_INVALID_PARAMS);
2490 goto unlock;
2491 }
2492
2493 update_class:
2494 hci_req_init(&req, hdev);
2495
2496 update_class(&req);
2497 update_eir(&req);
2498
2499 err = hci_req_run(&req, remove_uuid_complete);
2500 if (err < 0) {
2501 if (err != -ENODATA)
2502 goto unlock;
2503
2504 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2505 hdev->dev_class, 3);
2506 goto unlock;
2507 }
2508
2509 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2510 if (!cmd) {
2511 err = -ENOMEM;
2512 goto unlock;
2513 }
2514
2515 err = 0;
2516
2517 unlock:
2518 hci_dev_unlock(hdev);
2519 return err;
2520 }
2521
2522 static void set_class_complete(struct hci_dev *hdev, u8 status)
2523 {
2524 BT_DBG("status 0x%02x", status);
2525
2526 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2527 }
2528
2529 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2530 u16 len)
2531 {
2532 struct mgmt_cp_set_dev_class *cp = data;
2533 struct pending_cmd *cmd;
2534 struct hci_request req;
2535 int err;
2536
2537 BT_DBG("request for %s", hdev->name);
2538
2539 if (!lmp_bredr_capable(hdev))
2540 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2541 MGMT_STATUS_NOT_SUPPORTED);
2542
2543 hci_dev_lock(hdev);
2544
2545 if (pending_eir_or_class(hdev)) {
2546 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2547 MGMT_STATUS_BUSY);
2548 goto unlock;
2549 }
2550
2551 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2552 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2553 MGMT_STATUS_INVALID_PARAMS);
2554 goto unlock;
2555 }
2556
2557 hdev->major_class = cp->major;
2558 hdev->minor_class = cp->minor;
2559
2560 if (!hdev_is_powered(hdev)) {
2561 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2562 hdev->dev_class, 3);
2563 goto unlock;
2564 }
2565
2566 hci_req_init(&req, hdev);
2567
2568 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2569 hci_dev_unlock(hdev);
2570 cancel_delayed_work_sync(&hdev->service_cache);
2571 hci_dev_lock(hdev);
2572 update_eir(&req);
2573 }
2574
2575 update_class(&req);
2576
2577 err = hci_req_run(&req, set_class_complete);
2578 if (err < 0) {
2579 if (err != -ENODATA)
2580 goto unlock;
2581
2582 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2583 hdev->dev_class, 3);
2584 goto unlock;
2585 }
2586
2587 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2588 if (!cmd) {
2589 err = -ENOMEM;
2590 goto unlock;
2591 }
2592
2593 err = 0;
2594
2595 unlock:
2596 hci_dev_unlock(hdev);
2597 return err;
2598 }
2599
2600 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2601 u16 len)
2602 {
2603 struct mgmt_cp_load_link_keys *cp = data;
2604 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2605 sizeof(struct mgmt_link_key_info));
2606 u16 key_count, expected_len;
2607 bool changed;
2608 int i;
2609
2610 BT_DBG("request for %s", hdev->name);
2611
2612 if (!lmp_bredr_capable(hdev))
2613 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2614 MGMT_STATUS_NOT_SUPPORTED);
2615
2616 key_count = __le16_to_cpu(cp->key_count);
2617 if (key_count > max_key_count) {
2618 BT_ERR("load_link_keys: too big key_count value %u",
2619 key_count);
2620 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2621 MGMT_STATUS_INVALID_PARAMS);
2622 }
2623
2624 expected_len = sizeof(*cp) + key_count *
2625 sizeof(struct mgmt_link_key_info);
2626 if (expected_len != len) {
2627 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2628 expected_len, len);
2629 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2630 MGMT_STATUS_INVALID_PARAMS);
2631 }
2632
2633 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2634 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2635 MGMT_STATUS_INVALID_PARAMS);
2636
2637 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2638 key_count);
2639
2640 for (i = 0; i < key_count; i++) {
2641 struct mgmt_link_key_info *key = &cp->keys[i];
2642
2643 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2644 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2645 MGMT_STATUS_INVALID_PARAMS);
2646 }
2647
2648 hci_dev_lock(hdev);
2649
2650 hci_link_keys_clear(hdev);
2651
2652 if (cp->debug_keys)
2653 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2654 &hdev->dev_flags);
2655 else
2656 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2657 &hdev->dev_flags);
2658
2659 if (changed)
2660 new_settings(hdev, NULL);
2661
2662 for (i = 0; i < key_count; i++) {
2663 struct mgmt_link_key_info *key = &cp->keys[i];
2664
2665 /* Always ignore debug keys and require a new pairing if
2666 * the user wants to use them.
2667 */
2668 if (key->type == HCI_LK_DEBUG_COMBINATION)
2669 continue;
2670
2671 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2672 key->type, key->pin_len, NULL);
2673 }
2674
2675 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2676
2677 hci_dev_unlock(hdev);
2678
2679 return 0;
2680 }
2681
2682 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2683 u8 addr_type, struct sock *skip_sk)
2684 {
2685 struct mgmt_ev_device_unpaired ev;
2686
2687 bacpy(&ev.addr.bdaddr, bdaddr);
2688 ev.addr.type = addr_type;
2689
2690 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2691 skip_sk);
2692 }
2693
2694 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2695 u16 len)
2696 {
2697 struct mgmt_cp_unpair_device *cp = data;
2698 struct mgmt_rp_unpair_device rp;
2699 struct hci_cp_disconnect dc;
2700 struct pending_cmd *cmd;
2701 struct hci_conn *conn;
2702 int err;
2703
2704 memset(&rp, 0, sizeof(rp));
2705 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2706 rp.addr.type = cp->addr.type;
2707
2708 if (!bdaddr_type_is_valid(cp->addr.type))
2709 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_INVALID_PARAMS,
2711 &rp, sizeof(rp));
2712
2713 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2714 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2715 MGMT_STATUS_INVALID_PARAMS,
2716 &rp, sizeof(rp));
2717
2718 hci_dev_lock(hdev);
2719
2720 if (!hdev_is_powered(hdev)) {
2721 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2722 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2723 goto unlock;
2724 }
2725
2726 if (cp->addr.type == BDADDR_BREDR) {
2727 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2728 } else {
2729 u8 addr_type;
2730
2731 if (cp->addr.type == BDADDR_LE_PUBLIC)
2732 addr_type = ADDR_LE_DEV_PUBLIC;
2733 else
2734 addr_type = ADDR_LE_DEV_RANDOM;
2735
2736 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2737
2738 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2739
2740 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2741 }
2742
2743 if (err < 0) {
2744 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2745 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2746 goto unlock;
2747 }
2748
2749 if (cp->disconnect) {
2750 if (cp->addr.type == BDADDR_BREDR)
2751 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2752 &cp->addr.bdaddr);
2753 else
2754 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2755 &cp->addr.bdaddr);
2756 } else {
2757 conn = NULL;
2758 }
2759
2760 if (!conn) {
2761 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2762 &rp, sizeof(rp));
2763 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2764 goto unlock;
2765 }
2766
2767 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2768 sizeof(*cp));
2769 if (!cmd) {
2770 err = -ENOMEM;
2771 goto unlock;
2772 }
2773
2774 dc.handle = cpu_to_le16(conn->handle);
2775 dc.reason = 0x13; /* Remote User Terminated Connection */
2776 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2777 if (err < 0)
2778 mgmt_pending_remove(cmd);
2779
2780 unlock:
2781 hci_dev_unlock(hdev);
2782 return err;
2783 }
2784
2785 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2786 u16 len)
2787 {
2788 struct mgmt_cp_disconnect *cp = data;
2789 struct mgmt_rp_disconnect rp;
2790 struct hci_cp_disconnect dc;
2791 struct pending_cmd *cmd;
2792 struct hci_conn *conn;
2793 int err;
2794
2795 BT_DBG("");
2796
2797 memset(&rp, 0, sizeof(rp));
2798 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2799 rp.addr.type = cp->addr.type;
2800
2801 if (!bdaddr_type_is_valid(cp->addr.type))
2802 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2803 MGMT_STATUS_INVALID_PARAMS,
2804 &rp, sizeof(rp));
2805
2806 hci_dev_lock(hdev);
2807
2808 if (!test_bit(HCI_UP, &hdev->flags)) {
2809 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2810 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2811 goto failed;
2812 }
2813
2814 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2815 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2816 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2817 goto failed;
2818 }
2819
2820 if (cp->addr.type == BDADDR_BREDR)
2821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2822 &cp->addr.bdaddr);
2823 else
2824 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2825
2826 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2827 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2828 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2829 goto failed;
2830 }
2831
2832 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2833 if (!cmd) {
2834 err = -ENOMEM;
2835 goto failed;
2836 }
2837
2838 dc.handle = cpu_to_le16(conn->handle);
2839 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2840
2841 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2842 if (err < 0)
2843 mgmt_pending_remove(cmd);
2844
2845 failed:
2846 hci_dev_unlock(hdev);
2847 return err;
2848 }
2849
2850 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2851 {
2852 switch (link_type) {
2853 case LE_LINK:
2854 switch (addr_type) {
2855 case ADDR_LE_DEV_PUBLIC:
2856 return BDADDR_LE_PUBLIC;
2857
2858 default:
2859 /* Fallback to LE Random address type */
2860 return BDADDR_LE_RANDOM;
2861 }
2862
2863 default:
2864 /* Fallback to BR/EDR type */
2865 return BDADDR_BREDR;
2866 }
2867 }
2868
2869 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2870 u16 data_len)
2871 {
2872 struct mgmt_rp_get_connections *rp;
2873 struct hci_conn *c;
2874 size_t rp_len;
2875 int err;
2876 u16 i;
2877
2878 BT_DBG("");
2879
2880 hci_dev_lock(hdev);
2881
2882 if (!hdev_is_powered(hdev)) {
2883 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2884 MGMT_STATUS_NOT_POWERED);
2885 goto unlock;
2886 }
2887
2888 i = 0;
2889 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2890 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2891 i++;
2892 }
2893
2894 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2895 rp = kmalloc(rp_len, GFP_KERNEL);
2896 if (!rp) {
2897 err = -ENOMEM;
2898 goto unlock;
2899 }
2900
2901 i = 0;
2902 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2903 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2904 continue;
2905 bacpy(&rp->addr[i].bdaddr, &c->dst);
2906 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2907 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2908 continue;
2909 i++;
2910 }
2911
2912 rp->conn_count = cpu_to_le16(i);
2913
2914 /* Recalculate length in case of filtered SCO connections, etc */
2915 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2916
2917 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2918 rp_len);
2919
2920 kfree(rp);
2921
2922 unlock:
2923 hci_dev_unlock(hdev);
2924 return err;
2925 }
2926
2927 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2928 struct mgmt_cp_pin_code_neg_reply *cp)
2929 {
2930 struct pending_cmd *cmd;
2931 int err;
2932
2933 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2934 sizeof(*cp));
2935 if (!cmd)
2936 return -ENOMEM;
2937
2938 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2939 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2940 if (err < 0)
2941 mgmt_pending_remove(cmd);
2942
2943 return err;
2944 }
2945
2946 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2947 u16 len)
2948 {
2949 struct hci_conn *conn;
2950 struct mgmt_cp_pin_code_reply *cp = data;
2951 struct hci_cp_pin_code_reply reply;
2952 struct pending_cmd *cmd;
2953 int err;
2954
2955 BT_DBG("");
2956
2957 hci_dev_lock(hdev);
2958
2959 if (!hdev_is_powered(hdev)) {
2960 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2961 MGMT_STATUS_NOT_POWERED);
2962 goto failed;
2963 }
2964
2965 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2966 if (!conn) {
2967 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2968 MGMT_STATUS_NOT_CONNECTED);
2969 goto failed;
2970 }
2971
2972 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2973 struct mgmt_cp_pin_code_neg_reply ncp;
2974
2975 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2976
2977 BT_ERR("PIN code is not 16 bytes long");
2978
2979 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2980 if (err >= 0)
2981 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2982 MGMT_STATUS_INVALID_PARAMS);
2983
2984 goto failed;
2985 }
2986
2987 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2988 if (!cmd) {
2989 err = -ENOMEM;
2990 goto failed;
2991 }
2992
2993 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2994 reply.pin_len = cp->pin_len;
2995 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2996
2997 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2998 if (err < 0)
2999 mgmt_pending_remove(cmd);
3000
3001 failed:
3002 hci_dev_unlock(hdev);
3003 return err;
3004 }
3005
3006 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
3007 u16 len)
3008 {
3009 struct mgmt_cp_set_io_capability *cp = data;
3010
3011 BT_DBG("");
3012
3013 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
3014 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
3015 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
3016
3017 hci_dev_lock(hdev);
3018
3019 hdev->io_capability = cp->io_capability;
3020
3021 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
3022 hdev->io_capability);
3023
3024 hci_dev_unlock(hdev);
3025
3026 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
3027 0);
3028 }
3029
3030 static struct pending_cmd *find_pairing(struct hci_conn *conn)
3031 {
3032 struct hci_dev *hdev = conn->hdev;
3033 struct pending_cmd *cmd;
3034
3035 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3036 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3037 continue;
3038
3039 if (cmd->user_data != conn)
3040 continue;
3041
3042 return cmd;
3043 }
3044
3045 return NULL;
3046 }
3047
3048 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3049 {
3050 struct mgmt_rp_pair_device rp;
3051 struct hci_conn *conn = cmd->user_data;
3052
3053 bacpy(&rp.addr.bdaddr, &conn->dst);
3054 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3055
3056 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3057 &rp, sizeof(rp));
3058
3059 /* So we don't get further callbacks for this connection */
3060 conn->connect_cfm_cb = NULL;
3061 conn->security_cfm_cb = NULL;
3062 conn->disconn_cfm_cb = NULL;
3063
3064 hci_conn_drop(conn);
3065
3066 mgmt_pending_remove(cmd);
3067 }
3068
3069 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3070 {
3071 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3072 struct pending_cmd *cmd;
3073
3074 cmd = find_pairing(conn);
3075 if (cmd)
3076 pairing_complete(cmd, status);
3077 }
3078
3079 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3080 {
3081 struct pending_cmd *cmd;
3082
3083 BT_DBG("status %u", status);
3084
3085 cmd = find_pairing(conn);
3086 if (!cmd)
3087 BT_DBG("Unable to find a pending command");
3088 else
3089 pairing_complete(cmd, mgmt_status(status));
3090 }
3091
3092 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3093 {
3094 struct pending_cmd *cmd;
3095
3096 BT_DBG("status %u", status);
3097
3098 if (!status)
3099 return;
3100
3101 cmd = find_pairing(conn);
3102 if (!cmd)
3103 BT_DBG("Unable to find a pending command");
3104 else
3105 pairing_complete(cmd, mgmt_status(status));
3106 }
3107
3108 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3109 u16 len)
3110 {
3111 struct mgmt_cp_pair_device *cp = data;
3112 struct mgmt_rp_pair_device rp;
3113 struct pending_cmd *cmd;
3114 u8 sec_level, auth_type;
3115 struct hci_conn *conn;
3116 int err;
3117
3118 BT_DBG("");
3119
3120 memset(&rp, 0, sizeof(rp));
3121 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3122 rp.addr.type = cp->addr.type;
3123
3124 if (!bdaddr_type_is_valid(cp->addr.type))
3125 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3126 MGMT_STATUS_INVALID_PARAMS,
3127 &rp, sizeof(rp));
3128
3129 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3130 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3131 MGMT_STATUS_INVALID_PARAMS,
3132 &rp, sizeof(rp));
3133
3134 hci_dev_lock(hdev);
3135
3136 if (!hdev_is_powered(hdev)) {
3137 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3138 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3139 goto unlock;
3140 }
3141
3142 sec_level = BT_SECURITY_MEDIUM;
3143 auth_type = HCI_AT_DEDICATED_BONDING;
3144
3145 if (cp->addr.type == BDADDR_BREDR) {
3146 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3147 auth_type);
3148 } else {
3149 u8 addr_type;
3150
3151 /* Convert from L2CAP channel address type to HCI address type
3152 */
3153 if (cp->addr.type == BDADDR_LE_PUBLIC)
3154 addr_type = ADDR_LE_DEV_PUBLIC;
3155 else
3156 addr_type = ADDR_LE_DEV_RANDOM;
3157
3158 /* When pairing a new device, it is expected to remember
3159 * this device for future connections. Adding the connection
3160 * parameter information ahead of time allows tracking
3161 * of the slave preferred values and will speed up any
3162 * further connection establishment.
3163 *
3164 * If connection parameters already exist, then they
3165 * will be kept and this function does nothing.
3166 */
3167 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3168
3169 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3170 sec_level, HCI_LE_CONN_TIMEOUT,
3171 HCI_ROLE_MASTER);
3172 }
3173
3174 if (IS_ERR(conn)) {
3175 int status;
3176
3177 if (PTR_ERR(conn) == -EBUSY)
3178 status = MGMT_STATUS_BUSY;
3179 else
3180 status = MGMT_STATUS_CONNECT_FAILED;
3181
3182 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3183 status, &rp,
3184 sizeof(rp));
3185 goto unlock;
3186 }
3187
3188 if (conn->connect_cfm_cb) {
3189 hci_conn_drop(conn);
3190 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3191 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3192 goto unlock;
3193 }
3194
3195 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3196 if (!cmd) {
3197 err = -ENOMEM;
3198 hci_conn_drop(conn);
3199 goto unlock;
3200 }
3201
3202 /* For LE, just connecting isn't a proof that the pairing finished */
3203 if (cp->addr.type == BDADDR_BREDR) {
3204 conn->connect_cfm_cb = pairing_complete_cb;
3205 conn->security_cfm_cb = pairing_complete_cb;
3206 conn->disconn_cfm_cb = pairing_complete_cb;
3207 } else {
3208 conn->connect_cfm_cb = le_pairing_complete_cb;
3209 conn->security_cfm_cb = le_pairing_complete_cb;
3210 conn->disconn_cfm_cb = le_pairing_complete_cb;
3211 }
3212
3213 conn->io_capability = cp->io_cap;
3214 cmd->user_data = conn;
3215
3216 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3217 hci_conn_security(conn, sec_level, auth_type, true))
3218 pairing_complete(cmd, 0);
3219
3220 err = 0;
3221
3222 unlock:
3223 hci_dev_unlock(hdev);
3224 return err;
3225 }
3226
3227 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3228 u16 len)
3229 {
3230 struct mgmt_addr_info *addr = data;
3231 struct pending_cmd *cmd;
3232 struct hci_conn *conn;
3233 int err;
3234
3235 BT_DBG("");
3236
3237 hci_dev_lock(hdev);
3238
3239 if (!hdev_is_powered(hdev)) {
3240 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3241 MGMT_STATUS_NOT_POWERED);
3242 goto unlock;
3243 }
3244
3245 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3246 if (!cmd) {
3247 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3248 MGMT_STATUS_INVALID_PARAMS);
3249 goto unlock;
3250 }
3251
3252 conn = cmd->user_data;
3253
3254 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3255 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3256 MGMT_STATUS_INVALID_PARAMS);
3257 goto unlock;
3258 }
3259
3260 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3261
3262 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3263 addr, sizeof(*addr));
3264 unlock:
3265 hci_dev_unlock(hdev);
3266 return err;
3267 }
3268
3269 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3270 struct mgmt_addr_info *addr, u16 mgmt_op,
3271 u16 hci_op, __le32 passkey)
3272 {
3273 struct pending_cmd *cmd;
3274 struct hci_conn *conn;
3275 int err;
3276
3277 hci_dev_lock(hdev);
3278
3279 if (!hdev_is_powered(hdev)) {
3280 err = cmd_complete(sk, hdev->id, mgmt_op,
3281 MGMT_STATUS_NOT_POWERED, addr,
3282 sizeof(*addr));
3283 goto done;
3284 }
3285
3286 if (addr->type == BDADDR_BREDR)
3287 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3288 else
3289 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3290
3291 if (!conn) {
3292 err = cmd_complete(sk, hdev->id, mgmt_op,
3293 MGMT_STATUS_NOT_CONNECTED, addr,
3294 sizeof(*addr));
3295 goto done;
3296 }
3297
3298 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3299 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3300 if (!err)
3301 err = cmd_complete(sk, hdev->id, mgmt_op,
3302 MGMT_STATUS_SUCCESS, addr,
3303 sizeof(*addr));
3304 else
3305 err = cmd_complete(sk, hdev->id, mgmt_op,
3306 MGMT_STATUS_FAILED, addr,
3307 sizeof(*addr));
3308
3309 goto done;
3310 }
3311
3312 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3313 if (!cmd) {
3314 err = -ENOMEM;
3315 goto done;
3316 }
3317
3318 /* Continue with pairing via HCI */
3319 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3320 struct hci_cp_user_passkey_reply cp;
3321
3322 bacpy(&cp.bdaddr, &addr->bdaddr);
3323 cp.passkey = passkey;
3324 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3325 } else
3326 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3327 &addr->bdaddr);
3328
3329 if (err < 0)
3330 mgmt_pending_remove(cmd);
3331
3332 done:
3333 hci_dev_unlock(hdev);
3334 return err;
3335 }
3336
3337 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3338 void *data, u16 len)
3339 {
3340 struct mgmt_cp_pin_code_neg_reply *cp = data;
3341
3342 BT_DBG("");
3343
3344 return user_pairing_resp(sk, hdev, &cp->addr,
3345 MGMT_OP_PIN_CODE_NEG_REPLY,
3346 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3347 }
3348
3349 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3350 u16 len)
3351 {
3352 struct mgmt_cp_user_confirm_reply *cp = data;
3353
3354 BT_DBG("");
3355
3356 if (len != sizeof(*cp))
3357 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3358 MGMT_STATUS_INVALID_PARAMS);
3359
3360 return user_pairing_resp(sk, hdev, &cp->addr,
3361 MGMT_OP_USER_CONFIRM_REPLY,
3362 HCI_OP_USER_CONFIRM_REPLY, 0);
3363 }
3364
3365 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3366 void *data, u16 len)
3367 {
3368 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3369
3370 BT_DBG("");
3371
3372 return user_pairing_resp(sk, hdev, &cp->addr,
3373 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3374 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3375 }
3376
3377 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3378 u16 len)
3379 {
3380 struct mgmt_cp_user_passkey_reply *cp = data;
3381
3382 BT_DBG("");
3383
3384 return user_pairing_resp(sk, hdev, &cp->addr,
3385 MGMT_OP_USER_PASSKEY_REPLY,
3386 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3387 }
3388
3389 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3390 void *data, u16 len)
3391 {
3392 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3393
3394 BT_DBG("");
3395
3396 return user_pairing_resp(sk, hdev, &cp->addr,
3397 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3398 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3399 }
3400
3401 static void update_name(struct hci_request *req)
3402 {
3403 struct hci_dev *hdev = req->hdev;
3404 struct hci_cp_write_local_name cp;
3405
3406 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3407
3408 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3409 }
3410
3411 static void set_name_complete(struct hci_dev *hdev, u8 status)
3412 {
3413 struct mgmt_cp_set_local_name *cp;
3414 struct pending_cmd *cmd;
3415
3416 BT_DBG("status 0x%02x", status);
3417
3418 hci_dev_lock(hdev);
3419
3420 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3421 if (!cmd)
3422 goto unlock;
3423
3424 cp = cmd->param;
3425
3426 if (status)
3427 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3428 mgmt_status(status));
3429 else
3430 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3431 cp, sizeof(*cp));
3432
3433 mgmt_pending_remove(cmd);
3434
3435 unlock:
3436 hci_dev_unlock(hdev);
3437 }
3438
3439 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3440 u16 len)
3441 {
3442 struct mgmt_cp_set_local_name *cp = data;
3443 struct pending_cmd *cmd;
3444 struct hci_request req;
3445 int err;
3446
3447 BT_DBG("");
3448
3449 hci_dev_lock(hdev);
3450
3451 /* If the old values are the same as the new ones just return a
3452 * direct command complete event.
3453 */
3454 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3455 !memcmp(hdev->short_name, cp->short_name,
3456 sizeof(hdev->short_name))) {
3457 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3458 data, len);
3459 goto failed;
3460 }
3461
3462 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3463
3464 if (!hdev_is_powered(hdev)) {
3465 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3466
3467 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3468 data, len);
3469 if (err < 0)
3470 goto failed;
3471
3472 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3473 sk);
3474
3475 goto failed;
3476 }
3477
3478 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3479 if (!cmd) {
3480 err = -ENOMEM;
3481 goto failed;
3482 }
3483
3484 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3485
3486 hci_req_init(&req, hdev);
3487
3488 if (lmp_bredr_capable(hdev)) {
3489 update_name(&req);
3490 update_eir(&req);
3491 }
3492
3493 /* The name is stored in the scan response data and so
3494 * no need to udpate the advertising data here.
3495 */
3496 if (lmp_le_capable(hdev))
3497 update_scan_rsp_data(&req);
3498
3499 err = hci_req_run(&req, set_name_complete);
3500 if (err < 0)
3501 mgmt_pending_remove(cmd);
3502
3503 failed:
3504 hci_dev_unlock(hdev);
3505 return err;
3506 }
3507
3508 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3509 void *data, u16 data_len)
3510 {
3511 struct pending_cmd *cmd;
3512 int err;
3513
3514 BT_DBG("%s", hdev->name);
3515
3516 hci_dev_lock(hdev);
3517
3518 if (!hdev_is_powered(hdev)) {
3519 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3520 MGMT_STATUS_NOT_POWERED);
3521 goto unlock;
3522 }
3523
3524 if (!lmp_ssp_capable(hdev)) {
3525 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3526 MGMT_STATUS_NOT_SUPPORTED);
3527 goto unlock;
3528 }
3529
3530 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3531 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3532 MGMT_STATUS_BUSY);
3533 goto unlock;
3534 }
3535
3536 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3537 if (!cmd) {
3538 err = -ENOMEM;
3539 goto unlock;
3540 }
3541
3542 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3543 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3544 0, NULL);
3545 else
3546 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3547
3548 if (err < 0)
3549 mgmt_pending_remove(cmd);
3550
3551 unlock:
3552 hci_dev_unlock(hdev);
3553 return err;
3554 }
3555
3556 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3557 void *data, u16 len)
3558 {
3559 int err;
3560
3561 BT_DBG("%s ", hdev->name);
3562
3563 hci_dev_lock(hdev);
3564
3565 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3566 struct mgmt_cp_add_remote_oob_data *cp = data;
3567 u8 status;
3568
3569 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3570 cp->hash, cp->randomizer);
3571 if (err < 0)
3572 status = MGMT_STATUS_FAILED;
3573 else
3574 status = MGMT_STATUS_SUCCESS;
3575
3576 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3577 status, &cp->addr, sizeof(cp->addr));
3578 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3579 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3580 u8 status;
3581
3582 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3583 cp->hash192,
3584 cp->randomizer192,
3585 cp->hash256,
3586 cp->randomizer256);
3587 if (err < 0)
3588 status = MGMT_STATUS_FAILED;
3589 else
3590 status = MGMT_STATUS_SUCCESS;
3591
3592 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3593 status, &cp->addr, sizeof(cp->addr));
3594 } else {
3595 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3596 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3597 MGMT_STATUS_INVALID_PARAMS);
3598 }
3599
3600 hci_dev_unlock(hdev);
3601 return err;
3602 }
3603
3604 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3605 void *data, u16 len)
3606 {
3607 struct mgmt_cp_remove_remote_oob_data *cp = data;
3608 u8 status;
3609 int err;
3610
3611 BT_DBG("%s", hdev->name);
3612
3613 hci_dev_lock(hdev);
3614
3615 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3616 if (err < 0)
3617 status = MGMT_STATUS_INVALID_PARAMS;
3618 else
3619 status = MGMT_STATUS_SUCCESS;
3620
3621 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3622 status, &cp->addr, sizeof(cp->addr));
3623
3624 hci_dev_unlock(hdev);
3625 return err;
3626 }
3627
3628 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3629 {
3630 struct pending_cmd *cmd;
3631 u8 type;
3632 int err;
3633
3634 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3635
3636 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3637 if (!cmd)
3638 return -ENOENT;
3639
3640 type = hdev->discovery.type;
3641
3642 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3643 &type, sizeof(type));
3644 mgmt_pending_remove(cmd);
3645
3646 return err;
3647 }
3648
3649 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3650 {
3651 unsigned long timeout = 0;
3652
3653 BT_DBG("status %d", status);
3654
3655 if (status) {
3656 hci_dev_lock(hdev);
3657 mgmt_start_discovery_failed(hdev, status);
3658 hci_dev_unlock(hdev);
3659 return;
3660 }
3661
3662 hci_dev_lock(hdev);
3663 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3664 hci_dev_unlock(hdev);
3665
3666 switch (hdev->discovery.type) {
3667 case DISCOV_TYPE_LE:
3668 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3669 break;
3670
3671 case DISCOV_TYPE_INTERLEAVED:
3672 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3673 break;
3674
3675 case DISCOV_TYPE_BREDR:
3676 break;
3677
3678 default:
3679 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3680 }
3681
3682 if (!timeout)
3683 return;
3684
3685 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3686 }
3687
3688 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3689 void *data, u16 len)
3690 {
3691 struct mgmt_cp_start_discovery *cp = data;
3692 struct pending_cmd *cmd;
3693 struct hci_cp_le_set_scan_param param_cp;
3694 struct hci_cp_le_set_scan_enable enable_cp;
3695 struct hci_cp_inquiry inq_cp;
3696 struct hci_request req;
3697 /* General inquiry access code (GIAC) */
3698 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3699 u8 status, own_addr_type;
3700 int err;
3701
3702 BT_DBG("%s", hdev->name);
3703
3704 hci_dev_lock(hdev);
3705
3706 if (!hdev_is_powered(hdev)) {
3707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3708 MGMT_STATUS_NOT_POWERED);
3709 goto failed;
3710 }
3711
3712 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3713 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3714 MGMT_STATUS_BUSY);
3715 goto failed;
3716 }
3717
3718 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3719 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3720 MGMT_STATUS_BUSY);
3721 goto failed;
3722 }
3723
3724 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3725 if (!cmd) {
3726 err = -ENOMEM;
3727 goto failed;
3728 }
3729
3730 hdev->discovery.type = cp->type;
3731
3732 hci_req_init(&req, hdev);
3733
3734 switch (hdev->discovery.type) {
3735 case DISCOV_TYPE_BREDR:
3736 status = mgmt_bredr_support(hdev);
3737 if (status) {
3738 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3739 status);
3740 mgmt_pending_remove(cmd);
3741 goto failed;
3742 }
3743
3744 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3745 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3746 MGMT_STATUS_BUSY);
3747 mgmt_pending_remove(cmd);
3748 goto failed;
3749 }
3750
3751 hci_inquiry_cache_flush(hdev);
3752
3753 memset(&inq_cp, 0, sizeof(inq_cp));
3754 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3755 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3756 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3757 break;
3758
3759 case DISCOV_TYPE_LE:
3760 case DISCOV_TYPE_INTERLEAVED:
3761 status = mgmt_le_support(hdev);
3762 if (status) {
3763 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3764 status);
3765 mgmt_pending_remove(cmd);
3766 goto failed;
3767 }
3768
3769 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3770 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3771 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3772 MGMT_STATUS_NOT_SUPPORTED);
3773 mgmt_pending_remove(cmd);
3774 goto failed;
3775 }
3776
3777 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3778 /* Don't let discovery abort an outgoing
3779 * connection attempt that's using directed
3780 * advertising.
3781 */
3782 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3783 BT_CONNECT)) {
3784 err = cmd_status(sk, hdev->id,
3785 MGMT_OP_START_DISCOVERY,
3786 MGMT_STATUS_REJECTED);
3787 mgmt_pending_remove(cmd);
3788 goto failed;
3789 }
3790
3791 disable_advertising(&req);
3792 }
3793
3794 /* If controller is scanning, it means the background scanning
3795 * is running. Thus, we should temporarily stop it in order to
3796 * set the discovery scanning parameters.
3797 */
3798 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3799 hci_req_add_le_scan_disable(&req);
3800
3801 memset(&param_cp, 0, sizeof(param_cp));
3802
3803 /* All active scans will be done with either a resolvable
3804 * private address (when privacy feature has been enabled)
3805 * or unresolvable private address.
3806 */
3807 err = hci_update_random_address(&req, true, &own_addr_type);
3808 if (err < 0) {
3809 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3810 MGMT_STATUS_FAILED);
3811 mgmt_pending_remove(cmd);
3812 goto failed;
3813 }
3814
3815 param_cp.type = LE_SCAN_ACTIVE;
3816 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3817 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3818 param_cp.own_address_type = own_addr_type;
3819 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3820 &param_cp);
3821
3822 memset(&enable_cp, 0, sizeof(enable_cp));
3823 enable_cp.enable = LE_SCAN_ENABLE;
3824 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3825 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3826 &enable_cp);
3827 break;
3828
3829 default:
3830 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3831 MGMT_STATUS_INVALID_PARAMS);
3832 mgmt_pending_remove(cmd);
3833 goto failed;
3834 }
3835
3836 err = hci_req_run(&req, start_discovery_complete);
3837 if (err < 0)
3838 mgmt_pending_remove(cmd);
3839 else
3840 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3841
3842 failed:
3843 hci_dev_unlock(hdev);
3844 return err;
3845 }
3846
3847 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3848 {
3849 struct pending_cmd *cmd;
3850 int err;
3851
3852 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3853 if (!cmd)
3854 return -ENOENT;
3855
3856 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3857 &hdev->discovery.type, sizeof(hdev->discovery.type));
3858 mgmt_pending_remove(cmd);
3859
3860 return err;
3861 }
3862
3863 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3864 {
3865 BT_DBG("status %d", status);
3866
3867 hci_dev_lock(hdev);
3868
3869 if (status) {
3870 mgmt_stop_discovery_failed(hdev, status);
3871 goto unlock;
3872 }
3873
3874 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3875
3876 unlock:
3877 hci_dev_unlock(hdev);
3878 }
3879
3880 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3881 u16 len)
3882 {
3883 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3884 struct pending_cmd *cmd;
3885 struct hci_request req;
3886 int err;
3887
3888 BT_DBG("%s", hdev->name);
3889
3890 hci_dev_lock(hdev);
3891
3892 if (!hci_discovery_active(hdev)) {
3893 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3894 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3895 sizeof(mgmt_cp->type));
3896 goto unlock;
3897 }
3898
3899 if (hdev->discovery.type != mgmt_cp->type) {
3900 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3901 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3902 sizeof(mgmt_cp->type));
3903 goto unlock;
3904 }
3905
3906 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3907 if (!cmd) {
3908 err = -ENOMEM;
3909 goto unlock;
3910 }
3911
3912 hci_req_init(&req, hdev);
3913
3914 hci_stop_discovery(&req);
3915
3916 err = hci_req_run(&req, stop_discovery_complete);
3917 if (!err) {
3918 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3919 goto unlock;
3920 }
3921
3922 mgmt_pending_remove(cmd);
3923
3924 /* If no HCI commands were sent we're done */
3925 if (err == -ENODATA) {
3926 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3927 &mgmt_cp->type, sizeof(mgmt_cp->type));
3928 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3929 }
3930
3931 unlock:
3932 hci_dev_unlock(hdev);
3933 return err;
3934 }
3935
3936 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3937 u16 len)
3938 {
3939 struct mgmt_cp_confirm_name *cp = data;
3940 struct inquiry_entry *e;
3941 int err;
3942
3943 BT_DBG("%s", hdev->name);
3944
3945 hci_dev_lock(hdev);
3946
3947 if (!hci_discovery_active(hdev)) {
3948 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3949 MGMT_STATUS_FAILED, &cp->addr,
3950 sizeof(cp->addr));
3951 goto failed;
3952 }
3953
3954 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3955 if (!e) {
3956 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3957 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3958 sizeof(cp->addr));
3959 goto failed;
3960 }
3961
3962 if (cp->name_known) {
3963 e->name_state = NAME_KNOWN;
3964 list_del(&e->list);
3965 } else {
3966 e->name_state = NAME_NEEDED;
3967 hci_inquiry_cache_update_resolve(hdev, e);
3968 }
3969
3970 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3971 sizeof(cp->addr));
3972
3973 failed:
3974 hci_dev_unlock(hdev);
3975 return err;
3976 }
3977
3978 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3979 u16 len)
3980 {
3981 struct mgmt_cp_block_device *cp = data;
3982 u8 status;
3983 int err;
3984
3985 BT_DBG("%s", hdev->name);
3986
3987 if (!bdaddr_type_is_valid(cp->addr.type))
3988 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3989 MGMT_STATUS_INVALID_PARAMS,
3990 &cp->addr, sizeof(cp->addr));
3991
3992 hci_dev_lock(hdev);
3993
3994 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3995 cp->addr.type);
3996 if (err < 0) {
3997 status = MGMT_STATUS_FAILED;
3998 goto done;
3999 }
4000
4001 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4002 sk);
4003 status = MGMT_STATUS_SUCCESS;
4004
4005 done:
4006 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4007 &cp->addr, sizeof(cp->addr));
4008
4009 hci_dev_unlock(hdev);
4010
4011 return err;
4012 }
4013
4014 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4015 u16 len)
4016 {
4017 struct mgmt_cp_unblock_device *cp = data;
4018 u8 status;
4019 int err;
4020
4021 BT_DBG("%s", hdev->name);
4022
4023 if (!bdaddr_type_is_valid(cp->addr.type))
4024 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4025 MGMT_STATUS_INVALID_PARAMS,
4026 &cp->addr, sizeof(cp->addr));
4027
4028 hci_dev_lock(hdev);
4029
4030 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4031 cp->addr.type);
4032 if (err < 0) {
4033 status = MGMT_STATUS_INVALID_PARAMS;
4034 goto done;
4035 }
4036
4037 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4038 sk);
4039 status = MGMT_STATUS_SUCCESS;
4040
4041 done:
4042 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4043 &cp->addr, sizeof(cp->addr));
4044
4045 hci_dev_unlock(hdev);
4046
4047 return err;
4048 }
4049
4050 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4051 u16 len)
4052 {
4053 struct mgmt_cp_set_device_id *cp = data;
4054 struct hci_request req;
4055 int err;
4056 __u16 source;
4057
4058 BT_DBG("%s", hdev->name);
4059
4060 source = __le16_to_cpu(cp->source);
4061
4062 if (source > 0x0002)
4063 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4064 MGMT_STATUS_INVALID_PARAMS);
4065
4066 hci_dev_lock(hdev);
4067
4068 hdev->devid_source = source;
4069 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4070 hdev->devid_product = __le16_to_cpu(cp->product);
4071 hdev->devid_version = __le16_to_cpu(cp->version);
4072
4073 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4074
4075 hci_req_init(&req, hdev);
4076 update_eir(&req);
4077 hci_req_run(&req, NULL);
4078
4079 hci_dev_unlock(hdev);
4080
4081 return err;
4082 }
4083
4084 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4085 {
4086 struct cmd_lookup match = { NULL, hdev };
4087
4088 if (status) {
4089 u8 mgmt_err = mgmt_status(status);
4090
4091 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4092 cmd_status_rsp, &mgmt_err);
4093 return;
4094 }
4095
4096 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4097 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4098 else
4099 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4100
4101 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4102 &match);
4103
4104 new_settings(hdev, match.sk);
4105
4106 if (match.sk)
4107 sock_put(match.sk);
4108 }
4109
4110 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4111 u16 len)
4112 {
4113 struct mgmt_mode *cp = data;
4114 struct pending_cmd *cmd;
4115 struct hci_request req;
4116 u8 val, enabled, status;
4117 int err;
4118
4119 BT_DBG("request for %s", hdev->name);
4120
4121 status = mgmt_le_support(hdev);
4122 if (status)
4123 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4124 status);
4125
4126 if (cp->val != 0x00 && cp->val != 0x01)
4127 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4128 MGMT_STATUS_INVALID_PARAMS);
4129
4130 hci_dev_lock(hdev);
4131
4132 val = !!cp->val;
4133 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4134
4135 /* The following conditions are ones which mean that we should
4136 * not do any HCI communication but directly send a mgmt
4137 * response to user space (after toggling the flag if
4138 * necessary).
4139 */
4140 if (!hdev_is_powered(hdev) || val == enabled ||
4141 hci_conn_num(hdev, LE_LINK) > 0 ||
4142 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4143 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4144 bool changed = false;
4145
4146 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4147 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4148 changed = true;
4149 }
4150
4151 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4152 if (err < 0)
4153 goto unlock;
4154
4155 if (changed)
4156 err = new_settings(hdev, sk);
4157
4158 goto unlock;
4159 }
4160
4161 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4162 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4163 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4164 MGMT_STATUS_BUSY);
4165 goto unlock;
4166 }
4167
4168 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4169 if (!cmd) {
4170 err = -ENOMEM;
4171 goto unlock;
4172 }
4173
4174 hci_req_init(&req, hdev);
4175
4176 if (val)
4177 enable_advertising(&req);
4178 else
4179 disable_advertising(&req);
4180
4181 err = hci_req_run(&req, set_advertising_complete);
4182 if (err < 0)
4183 mgmt_pending_remove(cmd);
4184
4185 unlock:
4186 hci_dev_unlock(hdev);
4187 return err;
4188 }
4189
4190 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4191 void *data, u16 len)
4192 {
4193 struct mgmt_cp_set_static_address *cp = data;
4194 int err;
4195
4196 BT_DBG("%s", hdev->name);
4197
4198 if (!lmp_le_capable(hdev))
4199 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4200 MGMT_STATUS_NOT_SUPPORTED);
4201
4202 if (hdev_is_powered(hdev))
4203 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4204 MGMT_STATUS_REJECTED);
4205
4206 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4207 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4208 return cmd_status(sk, hdev->id,
4209 MGMT_OP_SET_STATIC_ADDRESS,
4210 MGMT_STATUS_INVALID_PARAMS);
4211
4212 /* Two most significant bits shall be set */
4213 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4214 return cmd_status(sk, hdev->id,
4215 MGMT_OP_SET_STATIC_ADDRESS,
4216 MGMT_STATUS_INVALID_PARAMS);
4217 }
4218
4219 hci_dev_lock(hdev);
4220
4221 bacpy(&hdev->static_addr, &cp->bdaddr);
4222
4223 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4224
4225 hci_dev_unlock(hdev);
4226
4227 return err;
4228 }
4229
4230 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4231 void *data, u16 len)
4232 {
4233 struct mgmt_cp_set_scan_params *cp = data;
4234 __u16 interval, window;
4235 int err;
4236
4237 BT_DBG("%s", hdev->name);
4238
4239 if (!lmp_le_capable(hdev))
4240 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4241 MGMT_STATUS_NOT_SUPPORTED);
4242
4243 interval = __le16_to_cpu(cp->interval);
4244
4245 if (interval < 0x0004 || interval > 0x4000)
4246 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4247 MGMT_STATUS_INVALID_PARAMS);
4248
4249 window = __le16_to_cpu(cp->window);
4250
4251 if (window < 0x0004 || window > 0x4000)
4252 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4253 MGMT_STATUS_INVALID_PARAMS);
4254
4255 if (window > interval)
4256 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4257 MGMT_STATUS_INVALID_PARAMS);
4258
4259 hci_dev_lock(hdev);
4260
4261 hdev->le_scan_interval = interval;
4262 hdev->le_scan_window = window;
4263
4264 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4265
4266 /* If background scan is running, restart it so new parameters are
4267 * loaded.
4268 */
4269 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4270 hdev->discovery.state == DISCOVERY_STOPPED) {
4271 struct hci_request req;
4272
4273 hci_req_init(&req, hdev);
4274
4275 hci_req_add_le_scan_disable(&req);
4276 hci_req_add_le_passive_scan(&req);
4277
4278 hci_req_run(&req, NULL);
4279 }
4280
4281 hci_dev_unlock(hdev);
4282
4283 return err;
4284 }
4285
4286 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4287 {
4288 struct pending_cmd *cmd;
4289
4290 BT_DBG("status 0x%02x", status);
4291
4292 hci_dev_lock(hdev);
4293
4294 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4295 if (!cmd)
4296 goto unlock;
4297
4298 if (status) {
4299 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4300 mgmt_status(status));
4301 } else {
4302 struct mgmt_mode *cp = cmd->param;
4303
4304 if (cp->val)
4305 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4306 else
4307 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4308
4309 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4310 new_settings(hdev, cmd->sk);
4311 }
4312
4313 mgmt_pending_remove(cmd);
4314
4315 unlock:
4316 hci_dev_unlock(hdev);
4317 }
4318
4319 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4320 void *data, u16 len)
4321 {
4322 struct mgmt_mode *cp = data;
4323 struct pending_cmd *cmd;
4324 struct hci_request req;
4325 int err;
4326
4327 BT_DBG("%s", hdev->name);
4328
4329 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4330 hdev->hci_ver < BLUETOOTH_VER_1_2)
4331 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4332 MGMT_STATUS_NOT_SUPPORTED);
4333
4334 if (cp->val != 0x00 && cp->val != 0x01)
4335 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4336 MGMT_STATUS_INVALID_PARAMS);
4337
4338 if (!hdev_is_powered(hdev))
4339 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4340 MGMT_STATUS_NOT_POWERED);
4341
4342 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4343 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4344 MGMT_STATUS_REJECTED);
4345
4346 hci_dev_lock(hdev);
4347
4348 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4349 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4350 MGMT_STATUS_BUSY);
4351 goto unlock;
4352 }
4353
4354 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4355 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4356 hdev);
4357 goto unlock;
4358 }
4359
4360 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4361 data, len);
4362 if (!cmd) {
4363 err = -ENOMEM;
4364 goto unlock;
4365 }
4366
4367 hci_req_init(&req, hdev);
4368
4369 write_fast_connectable(&req, cp->val);
4370
4371 err = hci_req_run(&req, fast_connectable_complete);
4372 if (err < 0) {
4373 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4374 MGMT_STATUS_FAILED);
4375 mgmt_pending_remove(cmd);
4376 }
4377
4378 unlock:
4379 hci_dev_unlock(hdev);
4380
4381 return err;
4382 }
4383
4384 static void set_bredr_scan(struct hci_request *req)
4385 {
4386 struct hci_dev *hdev = req->hdev;
4387 u8 scan = 0;
4388
4389 /* Ensure that fast connectable is disabled. This function will
4390 * not do anything if the page scan parameters are already what
4391 * they should be.
4392 */
4393 write_fast_connectable(req, false);
4394
4395 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4396 !list_empty(&hdev->whitelist))
4397 scan |= SCAN_PAGE;
4398 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4399 scan |= SCAN_INQUIRY;
4400
4401 if (scan)
4402 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4403 }
4404
4405 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4406 {
4407 struct pending_cmd *cmd;
4408
4409 BT_DBG("status 0x%02x", status);
4410
4411 hci_dev_lock(hdev);
4412
4413 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4414 if (!cmd)
4415 goto unlock;
4416
4417 if (status) {
4418 u8 mgmt_err = mgmt_status(status);
4419
4420 /* We need to restore the flag if related HCI commands
4421 * failed.
4422 */
4423 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4424
4425 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4426 } else {
4427 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4428 new_settings(hdev, cmd->sk);
4429 }
4430
4431 mgmt_pending_remove(cmd);
4432
4433 unlock:
4434 hci_dev_unlock(hdev);
4435 }
4436
4437 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4438 {
4439 struct mgmt_mode *cp = data;
4440 struct pending_cmd *cmd;
4441 struct hci_request req;
4442 int err;
4443
4444 BT_DBG("request for %s", hdev->name);
4445
4446 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4447 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4448 MGMT_STATUS_NOT_SUPPORTED);
4449
4450 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4451 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4452 MGMT_STATUS_REJECTED);
4453
4454 if (cp->val != 0x00 && cp->val != 0x01)
4455 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4456 MGMT_STATUS_INVALID_PARAMS);
4457
4458 hci_dev_lock(hdev);
4459
4460 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4461 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4462 goto unlock;
4463 }
4464
4465 if (!hdev_is_powered(hdev)) {
4466 if (!cp->val) {
4467 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4468 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4469 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4470 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4471 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4472 }
4473
4474 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4475
4476 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4477 if (err < 0)
4478 goto unlock;
4479
4480 err = new_settings(hdev, sk);
4481 goto unlock;
4482 }
4483
4484 /* Reject disabling when powered on */
4485 if (!cp->val) {
4486 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4487 MGMT_STATUS_REJECTED);
4488 goto unlock;
4489 }
4490
4491 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4492 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4493 MGMT_STATUS_BUSY);
4494 goto unlock;
4495 }
4496
4497 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4498 if (!cmd) {
4499 err = -ENOMEM;
4500 goto unlock;
4501 }
4502
4503 /* We need to flip the bit already here so that update_adv_data
4504 * generates the correct flags.
4505 */
4506 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4507
4508 hci_req_init(&req, hdev);
4509
4510 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
4511 !list_empty(&hdev->whitelist))
4512 set_bredr_scan(&req);
4513
4514 /* Since only the advertising data flags will change, there
4515 * is no need to update the scan response data.
4516 */
4517 update_adv_data(&req);
4518
4519 err = hci_req_run(&req, set_bredr_complete);
4520 if (err < 0)
4521 mgmt_pending_remove(cmd);
4522
4523 unlock:
4524 hci_dev_unlock(hdev);
4525 return err;
4526 }
4527
4528 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4529 void *data, u16 len)
4530 {
4531 struct mgmt_mode *cp = data;
4532 struct pending_cmd *cmd;
4533 u8 val, status;
4534 int err;
4535
4536 BT_DBG("request for %s", hdev->name);
4537
4538 status = mgmt_bredr_support(hdev);
4539 if (status)
4540 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4541 status);
4542
4543 if (!lmp_sc_capable(hdev) &&
4544 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4545 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4546 MGMT_STATUS_NOT_SUPPORTED);
4547
4548 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4549 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4550 MGMT_STATUS_INVALID_PARAMS);
4551
4552 hci_dev_lock(hdev);
4553
4554 if (!hdev_is_powered(hdev)) {
4555 bool changed;
4556
4557 if (cp->val) {
4558 changed = !test_and_set_bit(HCI_SC_ENABLED,
4559 &hdev->dev_flags);
4560 if (cp->val == 0x02)
4561 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4562 else
4563 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4564 } else {
4565 changed = test_and_clear_bit(HCI_SC_ENABLED,
4566 &hdev->dev_flags);
4567 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4568 }
4569
4570 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4571 if (err < 0)
4572 goto failed;
4573
4574 if (changed)
4575 err = new_settings(hdev, sk);
4576
4577 goto failed;
4578 }
4579
4580 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4581 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4582 MGMT_STATUS_BUSY);
4583 goto failed;
4584 }
4585
4586 val = !!cp->val;
4587
4588 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4589 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4590 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4591 goto failed;
4592 }
4593
4594 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4595 if (!cmd) {
4596 err = -ENOMEM;
4597 goto failed;
4598 }
4599
4600 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4601 if (err < 0) {
4602 mgmt_pending_remove(cmd);
4603 goto failed;
4604 }
4605
4606 if (cp->val == 0x02)
4607 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4608 else
4609 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4610
4611 failed:
4612 hci_dev_unlock(hdev);
4613 return err;
4614 }
4615
4616 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4617 void *data, u16 len)
4618 {
4619 struct mgmt_mode *cp = data;
4620 bool changed, use_changed;
4621 int err;
4622
4623 BT_DBG("request for %s", hdev->name);
4624
4625 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4626 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4627 MGMT_STATUS_INVALID_PARAMS);
4628
4629 hci_dev_lock(hdev);
4630
4631 if (cp->val)
4632 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4633 &hdev->dev_flags);
4634 else
4635 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4636 &hdev->dev_flags);
4637
4638 if (cp->val == 0x02)
4639 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4640 &hdev->dev_flags);
4641 else
4642 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4643 &hdev->dev_flags);
4644
4645 if (hdev_is_powered(hdev) && use_changed &&
4646 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4647 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4648 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4649 sizeof(mode), &mode);
4650 }
4651
4652 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4653 if (err < 0)
4654 goto unlock;
4655
4656 if (changed)
4657 err = new_settings(hdev, sk);
4658
4659 unlock:
4660 hci_dev_unlock(hdev);
4661 return err;
4662 }
4663
4664 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4665 u16 len)
4666 {
4667 struct mgmt_cp_set_privacy *cp = cp_data;
4668 bool changed;
4669 int err;
4670
4671 BT_DBG("request for %s", hdev->name);
4672
4673 if (!lmp_le_capable(hdev))
4674 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4675 MGMT_STATUS_NOT_SUPPORTED);
4676
4677 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4678 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4679 MGMT_STATUS_INVALID_PARAMS);
4680
4681 if (hdev_is_powered(hdev))
4682 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4683 MGMT_STATUS_REJECTED);
4684
4685 hci_dev_lock(hdev);
4686
4687 /* If user space supports this command it is also expected to
4688 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4689 */
4690 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4691
4692 if (cp->privacy) {
4693 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4694 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4695 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4696 } else {
4697 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4698 memset(hdev->irk, 0, sizeof(hdev->irk));
4699 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4700 }
4701
4702 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4703 if (err < 0)
4704 goto unlock;
4705
4706 if (changed)
4707 err = new_settings(hdev, sk);
4708
4709 unlock:
4710 hci_dev_unlock(hdev);
4711 return err;
4712 }
4713
4714 static bool irk_is_valid(struct mgmt_irk_info *irk)
4715 {
4716 switch (irk->addr.type) {
4717 case BDADDR_LE_PUBLIC:
4718 return true;
4719
4720 case BDADDR_LE_RANDOM:
4721 /* Two most significant bits shall be set */
4722 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4723 return false;
4724 return true;
4725 }
4726
4727 return false;
4728 }
4729
4730 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4731 u16 len)
4732 {
4733 struct mgmt_cp_load_irks *cp = cp_data;
4734 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4735 sizeof(struct mgmt_irk_info));
4736 u16 irk_count, expected_len;
4737 int i, err;
4738
4739 BT_DBG("request for %s", hdev->name);
4740
4741 if (!lmp_le_capable(hdev))
4742 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4743 MGMT_STATUS_NOT_SUPPORTED);
4744
4745 irk_count = __le16_to_cpu(cp->irk_count);
4746 if (irk_count > max_irk_count) {
4747 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4748 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4749 MGMT_STATUS_INVALID_PARAMS);
4750 }
4751
4752 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4753 if (expected_len != len) {
4754 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4755 expected_len, len);
4756 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4757 MGMT_STATUS_INVALID_PARAMS);
4758 }
4759
4760 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4761
4762 for (i = 0; i < irk_count; i++) {
4763 struct mgmt_irk_info *key = &cp->irks[i];
4764
4765 if (!irk_is_valid(key))
4766 return cmd_status(sk, hdev->id,
4767 MGMT_OP_LOAD_IRKS,
4768 MGMT_STATUS_INVALID_PARAMS);
4769 }
4770
4771 hci_dev_lock(hdev);
4772
4773 hci_smp_irks_clear(hdev);
4774
4775 for (i = 0; i < irk_count; i++) {
4776 struct mgmt_irk_info *irk = &cp->irks[i];
4777 u8 addr_type;
4778
4779 if (irk->addr.type == BDADDR_LE_PUBLIC)
4780 addr_type = ADDR_LE_DEV_PUBLIC;
4781 else
4782 addr_type = ADDR_LE_DEV_RANDOM;
4783
4784 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4785 BDADDR_ANY);
4786 }
4787
4788 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4789
4790 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4791
4792 hci_dev_unlock(hdev);
4793
4794 return err;
4795 }
4796
4797 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4798 {
4799 if (key->master != 0x00 && key->master != 0x01)
4800 return false;
4801
4802 switch (key->addr.type) {
4803 case BDADDR_LE_PUBLIC:
4804 return true;
4805
4806 case BDADDR_LE_RANDOM:
4807 /* Two most significant bits shall be set */
4808 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4809 return false;
4810 return true;
4811 }
4812
4813 return false;
4814 }
4815
4816 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4817 void *cp_data, u16 len)
4818 {
4819 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4820 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4821 sizeof(struct mgmt_ltk_info));
4822 u16 key_count, expected_len;
4823 int i, err;
4824
4825 BT_DBG("request for %s", hdev->name);
4826
4827 if (!lmp_le_capable(hdev))
4828 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4829 MGMT_STATUS_NOT_SUPPORTED);
4830
4831 key_count = __le16_to_cpu(cp->key_count);
4832 if (key_count > max_key_count) {
4833 BT_ERR("load_ltks: too big key_count value %u", key_count);
4834 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4835 MGMT_STATUS_INVALID_PARAMS);
4836 }
4837
4838 expected_len = sizeof(*cp) + key_count *
4839 sizeof(struct mgmt_ltk_info);
4840 if (expected_len != len) {
4841 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4842 expected_len, len);
4843 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4844 MGMT_STATUS_INVALID_PARAMS);
4845 }
4846
4847 BT_DBG("%s key_count %u", hdev->name, key_count);
4848
4849 for (i = 0; i < key_count; i++) {
4850 struct mgmt_ltk_info *key = &cp->keys[i];
4851
4852 if (!ltk_is_valid(key))
4853 return cmd_status(sk, hdev->id,
4854 MGMT_OP_LOAD_LONG_TERM_KEYS,
4855 MGMT_STATUS_INVALID_PARAMS);
4856 }
4857
4858 hci_dev_lock(hdev);
4859
4860 hci_smp_ltks_clear(hdev);
4861
4862 for (i = 0; i < key_count; i++) {
4863 struct mgmt_ltk_info *key = &cp->keys[i];
4864 u8 type, addr_type, authenticated;
4865
4866 if (key->addr.type == BDADDR_LE_PUBLIC)
4867 addr_type = ADDR_LE_DEV_PUBLIC;
4868 else
4869 addr_type = ADDR_LE_DEV_RANDOM;
4870
4871 if (key->master)
4872 type = SMP_LTK;
4873 else
4874 type = SMP_LTK_SLAVE;
4875
4876 switch (key->type) {
4877 case MGMT_LTK_UNAUTHENTICATED:
4878 authenticated = 0x00;
4879 break;
4880 case MGMT_LTK_AUTHENTICATED:
4881 authenticated = 0x01;
4882 break;
4883 default:
4884 continue;
4885 }
4886
4887 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4888 authenticated, key->val, key->enc_size, key->ediv,
4889 key->rand);
4890 }
4891
4892 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4893 NULL, 0);
4894
4895 hci_dev_unlock(hdev);
4896
4897 return err;
4898 }
4899
4900 struct cmd_conn_lookup {
4901 struct hci_conn *conn;
4902 bool valid_tx_power;
4903 u8 mgmt_status;
4904 };
4905
4906 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4907 {
4908 struct cmd_conn_lookup *match = data;
4909 struct mgmt_cp_get_conn_info *cp;
4910 struct mgmt_rp_get_conn_info rp;
4911 struct hci_conn *conn = cmd->user_data;
4912
4913 if (conn != match->conn)
4914 return;
4915
4916 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4917
4918 memset(&rp, 0, sizeof(rp));
4919 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4920 rp.addr.type = cp->addr.type;
4921
4922 if (!match->mgmt_status) {
4923 rp.rssi = conn->rssi;
4924
4925 if (match->valid_tx_power) {
4926 rp.tx_power = conn->tx_power;
4927 rp.max_tx_power = conn->max_tx_power;
4928 } else {
4929 rp.tx_power = HCI_TX_POWER_INVALID;
4930 rp.max_tx_power = HCI_TX_POWER_INVALID;
4931 }
4932 }
4933
4934 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4935 match->mgmt_status, &rp, sizeof(rp));
4936
4937 hci_conn_drop(conn);
4938
4939 mgmt_pending_remove(cmd);
4940 }
4941
4942 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4943 {
4944 struct hci_cp_read_rssi *cp;
4945 struct hci_conn *conn;
4946 struct cmd_conn_lookup match;
4947 u16 handle;
4948
4949 BT_DBG("status 0x%02x", status);
4950
4951 hci_dev_lock(hdev);
4952
4953 /* TX power data is valid in case request completed successfully,
4954 * otherwise we assume it's not valid. At the moment we assume that
4955 * either both or none of current and max values are valid to keep code
4956 * simple.
4957 */
4958 match.valid_tx_power = !status;
4959
4960 /* Commands sent in request are either Read RSSI or Read Transmit Power
4961 * Level so we check which one was last sent to retrieve connection
4962 * handle. Both commands have handle as first parameter so it's safe to
4963 * cast data on the same command struct.
4964 *
4965 * First command sent is always Read RSSI and we fail only if it fails.
4966 * In other case we simply override error to indicate success as we
4967 * already remembered if TX power value is actually valid.
4968 */
4969 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4970 if (!cp) {
4971 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4972 status = 0;
4973 }
4974
4975 if (!cp) {
4976 BT_ERR("invalid sent_cmd in response");
4977 goto unlock;
4978 }
4979
4980 handle = __le16_to_cpu(cp->handle);
4981 conn = hci_conn_hash_lookup_handle(hdev, handle);
4982 if (!conn) {
4983 BT_ERR("unknown handle (%d) in response", handle);
4984 goto unlock;
4985 }
4986
4987 match.conn = conn;
4988 match.mgmt_status = mgmt_status(status);
4989
4990 /* Cache refresh is complete, now reply for mgmt request for given
4991 * connection only.
4992 */
4993 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4994 get_conn_info_complete, &match);
4995
4996 unlock:
4997 hci_dev_unlock(hdev);
4998 }
4999
5000 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5001 u16 len)
5002 {
5003 struct mgmt_cp_get_conn_info *cp = data;
5004 struct mgmt_rp_get_conn_info rp;
5005 struct hci_conn *conn;
5006 unsigned long conn_info_age;
5007 int err = 0;
5008
5009 BT_DBG("%s", hdev->name);
5010
5011 memset(&rp, 0, sizeof(rp));
5012 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5013 rp.addr.type = cp->addr.type;
5014
5015 if (!bdaddr_type_is_valid(cp->addr.type))
5016 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5017 MGMT_STATUS_INVALID_PARAMS,
5018 &rp, sizeof(rp));
5019
5020 hci_dev_lock(hdev);
5021
5022 if (!hdev_is_powered(hdev)) {
5023 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5024 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5025 goto unlock;
5026 }
5027
5028 if (cp->addr.type == BDADDR_BREDR)
5029 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5030 &cp->addr.bdaddr);
5031 else
5032 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5033
5034 if (!conn || conn->state != BT_CONNECTED) {
5035 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5036 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5037 goto unlock;
5038 }
5039
5040 /* To avoid client trying to guess when to poll again for information we
5041 * calculate conn info age as random value between min/max set in hdev.
5042 */
5043 conn_info_age = hdev->conn_info_min_age +
5044 prandom_u32_max(hdev->conn_info_max_age -
5045 hdev->conn_info_min_age);
5046
5047 /* Query controller to refresh cached values if they are too old or were
5048 * never read.
5049 */
5050 if (time_after(jiffies, conn->conn_info_timestamp +
5051 msecs_to_jiffies(conn_info_age)) ||
5052 !conn->conn_info_timestamp) {
5053 struct hci_request req;
5054 struct hci_cp_read_tx_power req_txp_cp;
5055 struct hci_cp_read_rssi req_rssi_cp;
5056 struct pending_cmd *cmd;
5057
5058 hci_req_init(&req, hdev);
5059 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5060 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5061 &req_rssi_cp);
5062
5063 /* For LE links TX power does not change thus we don't need to
5064 * query for it once value is known.
5065 */
5066 if (!bdaddr_type_is_le(cp->addr.type) ||
5067 conn->tx_power == HCI_TX_POWER_INVALID) {
5068 req_txp_cp.handle = cpu_to_le16(conn->handle);
5069 req_txp_cp.type = 0x00;
5070 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5071 sizeof(req_txp_cp), &req_txp_cp);
5072 }
5073
5074 /* Max TX power needs to be read only once per connection */
5075 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5076 req_txp_cp.handle = cpu_to_le16(conn->handle);
5077 req_txp_cp.type = 0x01;
5078 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5079 sizeof(req_txp_cp), &req_txp_cp);
5080 }
5081
5082 err = hci_req_run(&req, conn_info_refresh_complete);
5083 if (err < 0)
5084 goto unlock;
5085
5086 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5087 data, len);
5088 if (!cmd) {
5089 err = -ENOMEM;
5090 goto unlock;
5091 }
5092
5093 hci_conn_hold(conn);
5094 cmd->user_data = conn;
5095
5096 conn->conn_info_timestamp = jiffies;
5097 } else {
5098 /* Cache is valid, just reply with values cached in hci_conn */
5099 rp.rssi = conn->rssi;
5100 rp.tx_power = conn->tx_power;
5101 rp.max_tx_power = conn->max_tx_power;
5102
5103 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5104 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5105 }
5106
5107 unlock:
5108 hci_dev_unlock(hdev);
5109 return err;
5110 }
5111
5112 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5113 {
5114 struct mgmt_cp_get_clock_info *cp;
5115 struct mgmt_rp_get_clock_info rp;
5116 struct hci_cp_read_clock *hci_cp;
5117 struct pending_cmd *cmd;
5118 struct hci_conn *conn;
5119
5120 BT_DBG("%s status %u", hdev->name, status);
5121
5122 hci_dev_lock(hdev);
5123
5124 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5125 if (!hci_cp)
5126 goto unlock;
5127
5128 if (hci_cp->which) {
5129 u16 handle = __le16_to_cpu(hci_cp->handle);
5130 conn = hci_conn_hash_lookup_handle(hdev, handle);
5131 } else {
5132 conn = NULL;
5133 }
5134
5135 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5136 if (!cmd)
5137 goto unlock;
5138
5139 cp = cmd->param;
5140
5141 memset(&rp, 0, sizeof(rp));
5142 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5143
5144 if (status)
5145 goto send_rsp;
5146
5147 rp.local_clock = cpu_to_le32(hdev->clock);
5148
5149 if (conn) {
5150 rp.piconet_clock = cpu_to_le32(conn->clock);
5151 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5152 }
5153
5154 send_rsp:
5155 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5156 &rp, sizeof(rp));
5157 mgmt_pending_remove(cmd);
5158 if (conn)
5159 hci_conn_drop(conn);
5160
5161 unlock:
5162 hci_dev_unlock(hdev);
5163 }
5164
5165 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5166 u16 len)
5167 {
5168 struct mgmt_cp_get_clock_info *cp = data;
5169 struct mgmt_rp_get_clock_info rp;
5170 struct hci_cp_read_clock hci_cp;
5171 struct pending_cmd *cmd;
5172 struct hci_request req;
5173 struct hci_conn *conn;
5174 int err;
5175
5176 BT_DBG("%s", hdev->name);
5177
5178 memset(&rp, 0, sizeof(rp));
5179 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5180 rp.addr.type = cp->addr.type;
5181
5182 if (cp->addr.type != BDADDR_BREDR)
5183 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5184 MGMT_STATUS_INVALID_PARAMS,
5185 &rp, sizeof(rp));
5186
5187 hci_dev_lock(hdev);
5188
5189 if (!hdev_is_powered(hdev)) {
5190 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5191 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5192 goto unlock;
5193 }
5194
5195 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5196 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5197 &cp->addr.bdaddr);
5198 if (!conn || conn->state != BT_CONNECTED) {
5199 err = cmd_complete(sk, hdev->id,
5200 MGMT_OP_GET_CLOCK_INFO,
5201 MGMT_STATUS_NOT_CONNECTED,
5202 &rp, sizeof(rp));
5203 goto unlock;
5204 }
5205 } else {
5206 conn = NULL;
5207 }
5208
5209 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5210 if (!cmd) {
5211 err = -ENOMEM;
5212 goto unlock;
5213 }
5214
5215 hci_req_init(&req, hdev);
5216
5217 memset(&hci_cp, 0, sizeof(hci_cp));
5218 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5219
5220 if (conn) {
5221 hci_conn_hold(conn);
5222 cmd->user_data = conn;
5223
5224 hci_cp.handle = cpu_to_le16(conn->handle);
5225 hci_cp.which = 0x01; /* Piconet clock */
5226 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5227 }
5228
5229 err = hci_req_run(&req, get_clock_info_complete);
5230 if (err < 0)
5231 mgmt_pending_remove(cmd);
5232
5233 unlock:
5234 hci_dev_unlock(hdev);
5235 return err;
5236 }
5237
5238 /* Helper for Add/Remove Device commands */
5239 static void update_page_scan(struct hci_dev *hdev, u8 scan)
5240 {
5241 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5242 return;
5243
5244 if (!hdev_is_powered(hdev))
5245 return;
5246
5247 /* If HCI_CONNECTABLE is set then Add/Remove Device should not
5248 * make any changes to page scanning.
5249 */
5250 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
5251 return;
5252
5253 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5254 scan |= SCAN_INQUIRY;
5255
5256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5257 }
5258
5259 static void device_added(struct sock *sk, struct hci_dev *hdev,
5260 bdaddr_t *bdaddr, u8 type, u8 action)
5261 {
5262 struct mgmt_ev_device_added ev;
5263
5264 bacpy(&ev.addr.bdaddr, bdaddr);
5265 ev.addr.type = type;
5266 ev.action = action;
5267
5268 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5269 }
5270
5271 static int add_device(struct sock *sk, struct hci_dev *hdev,
5272 void *data, u16 len)
5273 {
5274 struct mgmt_cp_add_device *cp = data;
5275 u8 auto_conn, addr_type;
5276 int err;
5277
5278 BT_DBG("%s", hdev->name);
5279
5280 if (!bdaddr_type_is_valid(cp->addr.type) ||
5281 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5282 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5283 MGMT_STATUS_INVALID_PARAMS,
5284 &cp->addr, sizeof(cp->addr));
5285
5286 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5287 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5288 MGMT_STATUS_INVALID_PARAMS,
5289 &cp->addr, sizeof(cp->addr));
5290
5291 hci_dev_lock(hdev);
5292
5293 if (cp->addr.type == BDADDR_BREDR) {
5294 bool update_scan;
5295
5296 /* Only incoming connections action is supported for now */
5297 if (cp->action != 0x01) {
5298 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5299 MGMT_STATUS_INVALID_PARAMS,
5300 &cp->addr, sizeof(cp->addr));
5301 goto unlock;
5302 }
5303
5304 update_scan = list_empty(&hdev->whitelist);
5305
5306 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5307 cp->addr.type);
5308 if (err)
5309 goto unlock;
5310
5311 if (update_scan)
5312 update_page_scan(hdev, SCAN_PAGE);
5313
5314 goto added;
5315 }
5316
5317 if (cp->addr.type == BDADDR_LE_PUBLIC)
5318 addr_type = ADDR_LE_DEV_PUBLIC;
5319 else
5320 addr_type = ADDR_LE_DEV_RANDOM;
5321
5322 if (cp->action == 0x02)
5323 auto_conn = HCI_AUTO_CONN_ALWAYS;
5324 else if (cp->action == 0x01)
5325 auto_conn = HCI_AUTO_CONN_DIRECT;
5326 else
5327 auto_conn = HCI_AUTO_CONN_REPORT;
5328
5329 /* If the connection parameters don't exist for this device,
5330 * they will be created and configured with defaults.
5331 */
5332 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5333 auto_conn) < 0) {
5334 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5335 MGMT_STATUS_FAILED,
5336 &cp->addr, sizeof(cp->addr));
5337 goto unlock;
5338 }
5339
5340 added:
5341 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5342
5343 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5344 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5345
5346 unlock:
5347 hci_dev_unlock(hdev);
5348 return err;
5349 }
5350
5351 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5352 bdaddr_t *bdaddr, u8 type)
5353 {
5354 struct mgmt_ev_device_removed ev;
5355
5356 bacpy(&ev.addr.bdaddr, bdaddr);
5357 ev.addr.type = type;
5358
5359 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5360 }
5361
5362 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5363 void *data, u16 len)
5364 {
5365 struct mgmt_cp_remove_device *cp = data;
5366 int err;
5367
5368 BT_DBG("%s", hdev->name);
5369
5370 hci_dev_lock(hdev);
5371
5372 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5373 struct hci_conn_params *params;
5374 u8 addr_type;
5375
5376 if (!bdaddr_type_is_valid(cp->addr.type)) {
5377 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5378 MGMT_STATUS_INVALID_PARAMS,
5379 &cp->addr, sizeof(cp->addr));
5380 goto unlock;
5381 }
5382
5383 if (cp->addr.type == BDADDR_BREDR) {
5384 err = hci_bdaddr_list_del(&hdev->whitelist,
5385 &cp->addr.bdaddr,
5386 cp->addr.type);
5387 if (err) {
5388 err = cmd_complete(sk, hdev->id,
5389 MGMT_OP_REMOVE_DEVICE,
5390 MGMT_STATUS_INVALID_PARAMS,
5391 &cp->addr, sizeof(cp->addr));
5392 goto unlock;
5393 }
5394
5395 if (list_empty(&hdev->whitelist))
5396 update_page_scan(hdev, SCAN_DISABLED);
5397
5398 device_removed(sk, hdev, &cp->addr.bdaddr,
5399 cp->addr.type);
5400 goto complete;
5401 }
5402
5403 if (cp->addr.type == BDADDR_LE_PUBLIC)
5404 addr_type = ADDR_LE_DEV_PUBLIC;
5405 else
5406 addr_type = ADDR_LE_DEV_RANDOM;
5407
5408 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5409 addr_type);
5410 if (!params) {
5411 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5412 MGMT_STATUS_INVALID_PARAMS,
5413 &cp->addr, sizeof(cp->addr));
5414 goto unlock;
5415 }
5416
5417 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5418 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5419 MGMT_STATUS_INVALID_PARAMS,
5420 &cp->addr, sizeof(cp->addr));
5421 goto unlock;
5422 }
5423
5424 list_del(&params->action);
5425 list_del(&params->list);
5426 kfree(params);
5427 hci_update_background_scan(hdev);
5428
5429 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5430 } else {
5431 struct hci_conn_params *p, *tmp;
5432 struct bdaddr_list *b, *btmp;
5433
5434 if (cp->addr.type) {
5435 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5436 MGMT_STATUS_INVALID_PARAMS,
5437 &cp->addr, sizeof(cp->addr));
5438 goto unlock;
5439 }
5440
5441 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5442 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5443 list_del(&b->list);
5444 kfree(b);
5445 }
5446
5447 update_page_scan(hdev, SCAN_DISABLED);
5448
5449 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5450 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5451 continue;
5452 device_removed(sk, hdev, &p->addr, p->addr_type);
5453 list_del(&p->action);
5454 list_del(&p->list);
5455 kfree(p);
5456 }
5457
5458 BT_DBG("All LE connection parameters were removed");
5459
5460 hci_update_background_scan(hdev);
5461 }
5462
5463 complete:
5464 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5465 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5466
5467 unlock:
5468 hci_dev_unlock(hdev);
5469 return err;
5470 }
5471
5472 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5473 u16 len)
5474 {
5475 struct mgmt_cp_load_conn_param *cp = data;
5476 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5477 sizeof(struct mgmt_conn_param));
5478 u16 param_count, expected_len;
5479 int i;
5480
5481 if (!lmp_le_capable(hdev))
5482 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5483 MGMT_STATUS_NOT_SUPPORTED);
5484
5485 param_count = __le16_to_cpu(cp->param_count);
5486 if (param_count > max_param_count) {
5487 BT_ERR("load_conn_param: too big param_count value %u",
5488 param_count);
5489 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5490 MGMT_STATUS_INVALID_PARAMS);
5491 }
5492
5493 expected_len = sizeof(*cp) + param_count *
5494 sizeof(struct mgmt_conn_param);
5495 if (expected_len != len) {
5496 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5497 expected_len, len);
5498 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5499 MGMT_STATUS_INVALID_PARAMS);
5500 }
5501
5502 BT_DBG("%s param_count %u", hdev->name, param_count);
5503
5504 hci_dev_lock(hdev);
5505
5506 hci_conn_params_clear_disabled(hdev);
5507
5508 for (i = 0; i < param_count; i++) {
5509 struct mgmt_conn_param *param = &cp->params[i];
5510 struct hci_conn_params *hci_param;
5511 u16 min, max, latency, timeout;
5512 u8 addr_type;
5513
5514 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5515 param->addr.type);
5516
5517 if (param->addr.type == BDADDR_LE_PUBLIC) {
5518 addr_type = ADDR_LE_DEV_PUBLIC;
5519 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5520 addr_type = ADDR_LE_DEV_RANDOM;
5521 } else {
5522 BT_ERR("Ignoring invalid connection parameters");
5523 continue;
5524 }
5525
5526 min = le16_to_cpu(param->min_interval);
5527 max = le16_to_cpu(param->max_interval);
5528 latency = le16_to_cpu(param->latency);
5529 timeout = le16_to_cpu(param->timeout);
5530
5531 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5532 min, max, latency, timeout);
5533
5534 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5535 BT_ERR("Ignoring invalid connection parameters");
5536 continue;
5537 }
5538
5539 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5540 addr_type);
5541 if (!hci_param) {
5542 BT_ERR("Failed to add connection parameters");
5543 continue;
5544 }
5545
5546 hci_param->conn_min_interval = min;
5547 hci_param->conn_max_interval = max;
5548 hci_param->conn_latency = latency;
5549 hci_param->supervision_timeout = timeout;
5550 }
5551
5552 hci_dev_unlock(hdev);
5553
5554 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5555 }
5556
5557 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5558 void *data, u16 len)
5559 {
5560 struct mgmt_cp_set_external_config *cp = data;
5561 bool changed;
5562 int err;
5563
5564 BT_DBG("%s", hdev->name);
5565
5566 if (hdev_is_powered(hdev))
5567 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5568 MGMT_STATUS_REJECTED);
5569
5570 if (cp->config != 0x00 && cp->config != 0x01)
5571 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5572 MGMT_STATUS_INVALID_PARAMS);
5573
5574 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5575 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5576 MGMT_STATUS_NOT_SUPPORTED);
5577
5578 hci_dev_lock(hdev);
5579
5580 if (cp->config)
5581 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5582 &hdev->dev_flags);
5583 else
5584 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5585 &hdev->dev_flags);
5586
5587 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5588 if (err < 0)
5589 goto unlock;
5590
5591 if (!changed)
5592 goto unlock;
5593
5594 err = new_options(hdev, sk);
5595
5596 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5597 mgmt_index_removed(hdev);
5598
5599 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5600 set_bit(HCI_CONFIG, &hdev->dev_flags);
5601 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5602
5603 queue_work(hdev->req_workqueue, &hdev->power_on);
5604 } else {
5605 set_bit(HCI_RAW, &hdev->flags);
5606 mgmt_index_added(hdev);
5607 }
5608 }
5609
5610 unlock:
5611 hci_dev_unlock(hdev);
5612 return err;
5613 }
5614
5615 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5616 void *data, u16 len)
5617 {
5618 struct mgmt_cp_set_public_address *cp = data;
5619 bool changed;
5620 int err;
5621
5622 BT_DBG("%s", hdev->name);
5623
5624 if (hdev_is_powered(hdev))
5625 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5626 MGMT_STATUS_REJECTED);
5627
5628 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5629 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5630 MGMT_STATUS_INVALID_PARAMS);
5631
5632 if (!hdev->set_bdaddr)
5633 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5634 MGMT_STATUS_NOT_SUPPORTED);
5635
5636 hci_dev_lock(hdev);
5637
5638 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5639 bacpy(&hdev->public_addr, &cp->bdaddr);
5640
5641 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5642 if (err < 0)
5643 goto unlock;
5644
5645 if (!changed)
5646 goto unlock;
5647
5648 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5649 err = new_options(hdev, sk);
5650
5651 if (is_configured(hdev)) {
5652 mgmt_index_removed(hdev);
5653
5654 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5655
5656 set_bit(HCI_CONFIG, &hdev->dev_flags);
5657 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5658
5659 queue_work(hdev->req_workqueue, &hdev->power_on);
5660 }
5661
5662 unlock:
5663 hci_dev_unlock(hdev);
5664 return err;
5665 }
5666
5667 static const struct mgmt_handler {
5668 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5669 u16 data_len);
5670 bool var_len;
5671 size_t data_len;
5672 } mgmt_handlers[] = {
5673 { NULL }, /* 0x0000 (no command) */
5674 { read_version, false, MGMT_READ_VERSION_SIZE },
5675 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5676 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5677 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5678 { set_powered, false, MGMT_SETTING_SIZE },
5679 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5680 { set_connectable, false, MGMT_SETTING_SIZE },
5681 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5682 { set_bondable, false, MGMT_SETTING_SIZE },
5683 { set_link_security, false, MGMT_SETTING_SIZE },
5684 { set_ssp, false, MGMT_SETTING_SIZE },
5685 { set_hs, false, MGMT_SETTING_SIZE },
5686 { set_le, false, MGMT_SETTING_SIZE },
5687 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5688 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5689 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5690 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5691 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5692 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5693 { disconnect, false, MGMT_DISCONNECT_SIZE },
5694 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5695 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5696 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5697 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5698 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5699 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5700 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5701 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5702 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5703 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5704 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5705 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5706 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5707 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5708 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5709 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5710 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5711 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5712 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5713 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5714 { set_advertising, false, MGMT_SETTING_SIZE },
5715 { set_bredr, false, MGMT_SETTING_SIZE },
5716 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5717 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5718 { set_secure_conn, false, MGMT_SETTING_SIZE },
5719 { set_debug_keys, false, MGMT_SETTING_SIZE },
5720 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5721 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5722 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5723 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5724 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5725 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5726 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5727 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5728 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5729 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5730 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5731 };
5732
5733 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5734 {
5735 void *buf;
5736 u8 *cp;
5737 struct mgmt_hdr *hdr;
5738 u16 opcode, index, len;
5739 struct hci_dev *hdev = NULL;
5740 const struct mgmt_handler *handler;
5741 int err;
5742
5743 BT_DBG("got %zu bytes", msglen);
5744
5745 if (msglen < sizeof(*hdr))
5746 return -EINVAL;
5747
5748 buf = kmalloc(msglen, GFP_KERNEL);
5749 if (!buf)
5750 return -ENOMEM;
5751
5752 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5753 err = -EFAULT;
5754 goto done;
5755 }
5756
5757 hdr = buf;
5758 opcode = __le16_to_cpu(hdr->opcode);
5759 index = __le16_to_cpu(hdr->index);
5760 len = __le16_to_cpu(hdr->len);
5761
5762 if (len != msglen - sizeof(*hdr)) {
5763 err = -EINVAL;
5764 goto done;
5765 }
5766
5767 if (index != MGMT_INDEX_NONE) {
5768 hdev = hci_dev_get(index);
5769 if (!hdev) {
5770 err = cmd_status(sk, index, opcode,
5771 MGMT_STATUS_INVALID_INDEX);
5772 goto done;
5773 }
5774
5775 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5776 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5777 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5778 err = cmd_status(sk, index, opcode,
5779 MGMT_STATUS_INVALID_INDEX);
5780 goto done;
5781 }
5782
5783 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5784 opcode != MGMT_OP_READ_CONFIG_INFO &&
5785 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5786 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5787 err = cmd_status(sk, index, opcode,
5788 MGMT_STATUS_INVALID_INDEX);
5789 goto done;
5790 }
5791 }
5792
5793 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5794 mgmt_handlers[opcode].func == NULL) {
5795 BT_DBG("Unknown op %u", opcode);
5796 err = cmd_status(sk, index, opcode,
5797 MGMT_STATUS_UNKNOWN_COMMAND);
5798 goto done;
5799 }
5800
5801 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5802 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5803 err = cmd_status(sk, index, opcode,
5804 MGMT_STATUS_INVALID_INDEX);
5805 goto done;
5806 }
5807
5808 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5809 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5810 err = cmd_status(sk, index, opcode,
5811 MGMT_STATUS_INVALID_INDEX);
5812 goto done;
5813 }
5814
5815 handler = &mgmt_handlers[opcode];
5816
5817 if ((handler->var_len && len < handler->data_len) ||
5818 (!handler->var_len && len != handler->data_len)) {
5819 err = cmd_status(sk, index, opcode,
5820 MGMT_STATUS_INVALID_PARAMS);
5821 goto done;
5822 }
5823
5824 if (hdev)
5825 mgmt_init_hdev(sk, hdev);
5826
5827 cp = buf + sizeof(*hdr);
5828
5829 err = handler->func(sk, hdev, cp, len);
5830 if (err < 0)
5831 goto done;
5832
5833 err = msglen;
5834
5835 done:
5836 if (hdev)
5837 hci_dev_put(hdev);
5838
5839 kfree(buf);
5840 return err;
5841 }
5842
5843 void mgmt_index_added(struct hci_dev *hdev)
5844 {
5845 if (hdev->dev_type != HCI_BREDR)
5846 return;
5847
5848 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5849 return;
5850
5851 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5852 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5853 else
5854 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5855 }
5856
5857 void mgmt_index_removed(struct hci_dev *hdev)
5858 {
5859 u8 status = MGMT_STATUS_INVALID_INDEX;
5860
5861 if (hdev->dev_type != HCI_BREDR)
5862 return;
5863
5864 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5865 return;
5866
5867 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5868
5869 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5870 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5871 else
5872 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5873 }
5874
5875 /* This function requires the caller holds hdev->lock */
5876 static void restart_le_actions(struct hci_dev *hdev)
5877 {
5878 struct hci_conn_params *p;
5879
5880 list_for_each_entry(p, &hdev->le_conn_params, list) {
5881 /* Needed for AUTO_OFF case where might not "really"
5882 * have been powered off.
5883 */
5884 list_del_init(&p->action);
5885
5886 switch (p->auto_connect) {
5887 case HCI_AUTO_CONN_DIRECT:
5888 case HCI_AUTO_CONN_ALWAYS:
5889 list_add(&p->action, &hdev->pend_le_conns);
5890 break;
5891 case HCI_AUTO_CONN_REPORT:
5892 list_add(&p->action, &hdev->pend_le_reports);
5893 break;
5894 default:
5895 break;
5896 }
5897 }
5898
5899 hci_update_background_scan(hdev);
5900 }
5901
5902 static void powered_complete(struct hci_dev *hdev, u8 status)
5903 {
5904 struct cmd_lookup match = { NULL, hdev };
5905
5906 BT_DBG("status 0x%02x", status);
5907
5908 hci_dev_lock(hdev);
5909
5910 restart_le_actions(hdev);
5911
5912 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5913
5914 new_settings(hdev, match.sk);
5915
5916 hci_dev_unlock(hdev);
5917
5918 if (match.sk)
5919 sock_put(match.sk);
5920 }
5921
5922 static int powered_update_hci(struct hci_dev *hdev)
5923 {
5924 struct hci_request req;
5925 u8 link_sec;
5926
5927 hci_req_init(&req, hdev);
5928
5929 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5930 !lmp_host_ssp_capable(hdev)) {
5931 u8 ssp = 1;
5932
5933 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5934 }
5935
5936 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5937 lmp_bredr_capable(hdev)) {
5938 struct hci_cp_write_le_host_supported cp;
5939
5940 cp.le = 0x01;
5941 cp.simul = 0x00;
5942
5943 /* Check first if we already have the right
5944 * host state (host features set)
5945 */
5946 if (cp.le != lmp_host_le_capable(hdev) ||
5947 cp.simul != lmp_host_le_br_capable(hdev))
5948 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5949 sizeof(cp), &cp);
5950 }
5951
5952 if (lmp_le_capable(hdev)) {
5953 /* Make sure the controller has a good default for
5954 * advertising data. This also applies to the case
5955 * where BR/EDR was toggled during the AUTO_OFF phase.
5956 */
5957 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5958 update_adv_data(&req);
5959 update_scan_rsp_data(&req);
5960 }
5961
5962 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5963 enable_advertising(&req);
5964 }
5965
5966 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5967 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5968 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5969 sizeof(link_sec), &link_sec);
5970
5971 if (lmp_bredr_capable(hdev)) {
5972 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5973 set_bredr_scan(&req);
5974 update_class(&req);
5975 update_name(&req);
5976 update_eir(&req);
5977 }
5978
5979 return hci_req_run(&req, powered_complete);
5980 }
5981
5982 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5983 {
5984 struct cmd_lookup match = { NULL, hdev };
5985 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5986 u8 zero_cod[] = { 0, 0, 0 };
5987 int err;
5988
5989 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5990 return 0;
5991
5992 if (powered) {
5993 if (powered_update_hci(hdev) == 0)
5994 return 0;
5995
5996 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5997 &match);
5998 goto new_settings;
5999 }
6000
6001 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
6002 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
6003
6004 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
6005 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
6006 zero_cod, sizeof(zero_cod), NULL);
6007
6008 new_settings:
6009 err = new_settings(hdev, match.sk);
6010
6011 if (match.sk)
6012 sock_put(match.sk);
6013
6014 return err;
6015 }
6016
6017 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
6018 {
6019 struct pending_cmd *cmd;
6020 u8 status;
6021
6022 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6023 if (!cmd)
6024 return;
6025
6026 if (err == -ERFKILL)
6027 status = MGMT_STATUS_RFKILLED;
6028 else
6029 status = MGMT_STATUS_FAILED;
6030
6031 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
6032
6033 mgmt_pending_remove(cmd);
6034 }
6035
6036 void mgmt_discoverable_timeout(struct hci_dev *hdev)
6037 {
6038 struct hci_request req;
6039
6040 hci_dev_lock(hdev);
6041
6042 /* When discoverable timeout triggers, then just make sure
6043 * the limited discoverable flag is cleared. Even in the case
6044 * of a timeout triggered from general discoverable, it is
6045 * safe to unconditionally clear the flag.
6046 */
6047 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
6048 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
6049
6050 hci_req_init(&req, hdev);
6051 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
6052 u8 scan = SCAN_PAGE;
6053 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
6054 sizeof(scan), &scan);
6055 }
6056 update_class(&req);
6057 update_adv_data(&req);
6058 hci_req_run(&req, NULL);
6059
6060 hdev->discov_timeout = 0;
6061
6062 new_settings(hdev, NULL);
6063
6064 hci_dev_unlock(hdev);
6065 }
6066
6067 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6068 bool persistent)
6069 {
6070 struct mgmt_ev_new_link_key ev;
6071
6072 memset(&ev, 0, sizeof(ev));
6073
6074 ev.store_hint = persistent;
6075 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6076 ev.key.addr.type = BDADDR_BREDR;
6077 ev.key.type = key->type;
6078 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6079 ev.key.pin_len = key->pin_len;
6080
6081 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6082 }
6083
6084 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6085 {
6086 if (ltk->authenticated)
6087 return MGMT_LTK_AUTHENTICATED;
6088
6089 return MGMT_LTK_UNAUTHENTICATED;
6090 }
6091
6092 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6093 {
6094 struct mgmt_ev_new_long_term_key ev;
6095
6096 memset(&ev, 0, sizeof(ev));
6097
6098 /* Devices using resolvable or non-resolvable random addresses
6099 * without providing an indentity resolving key don't require
6100 * to store long term keys. Their addresses will change the
6101 * next time around.
6102 *
6103 * Only when a remote device provides an identity address
6104 * make sure the long term key is stored. If the remote
6105 * identity is known, the long term keys are internally
6106 * mapped to the identity address. So allow static random
6107 * and public addresses here.
6108 */
6109 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6110 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6111 ev.store_hint = 0x00;
6112 else
6113 ev.store_hint = persistent;
6114
6115 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6116 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6117 ev.key.type = mgmt_ltk_type(key);
6118 ev.key.enc_size = key->enc_size;
6119 ev.key.ediv = key->ediv;
6120 ev.key.rand = key->rand;
6121
6122 if (key->type == SMP_LTK)
6123 ev.key.master = 1;
6124
6125 memcpy(ev.key.val, key->val, sizeof(key->val));
6126
6127 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6128 }
6129
6130 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6131 {
6132 struct mgmt_ev_new_irk ev;
6133
6134 memset(&ev, 0, sizeof(ev));
6135
6136 /* For identity resolving keys from devices that are already
6137 * using a public address or static random address, do not
6138 * ask for storing this key. The identity resolving key really
6139 * is only mandatory for devices using resovlable random
6140 * addresses.
6141 *
6142 * Storing all identity resolving keys has the downside that
6143 * they will be also loaded on next boot of they system. More
6144 * identity resolving keys, means more time during scanning is
6145 * needed to actually resolve these addresses.
6146 */
6147 if (bacmp(&irk->rpa, BDADDR_ANY))
6148 ev.store_hint = 0x01;
6149 else
6150 ev.store_hint = 0x00;
6151
6152 bacpy(&ev.rpa, &irk->rpa);
6153 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6154 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6155 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6156
6157 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6158 }
6159
6160 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6161 bool persistent)
6162 {
6163 struct mgmt_ev_new_csrk ev;
6164
6165 memset(&ev, 0, sizeof(ev));
6166
6167 /* Devices using resolvable or non-resolvable random addresses
6168 * without providing an indentity resolving key don't require
6169 * to store signature resolving keys. Their addresses will change
6170 * the next time around.
6171 *
6172 * Only when a remote device provides an identity address
6173 * make sure the signature resolving key is stored. So allow
6174 * static random and public addresses here.
6175 */
6176 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6177 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6178 ev.store_hint = 0x00;
6179 else
6180 ev.store_hint = persistent;
6181
6182 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6183 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6184 ev.key.master = csrk->master;
6185 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6186
6187 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6188 }
6189
6190 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6191 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6192 u16 max_interval, u16 latency, u16 timeout)
6193 {
6194 struct mgmt_ev_new_conn_param ev;
6195
6196 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6197 return;
6198
6199 memset(&ev, 0, sizeof(ev));
6200 bacpy(&ev.addr.bdaddr, bdaddr);
6201 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6202 ev.store_hint = store_hint;
6203 ev.min_interval = cpu_to_le16(min_interval);
6204 ev.max_interval = cpu_to_le16(max_interval);
6205 ev.latency = cpu_to_le16(latency);
6206 ev.timeout = cpu_to_le16(timeout);
6207
6208 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6209 }
6210
6211 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6212 u8 data_len)
6213 {
6214 eir[eir_len++] = sizeof(type) + data_len;
6215 eir[eir_len++] = type;
6216 memcpy(&eir[eir_len], data, data_len);
6217 eir_len += data_len;
6218
6219 return eir_len;
6220 }
6221
6222 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6223 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6224 u8 *dev_class)
6225 {
6226 char buf[512];
6227 struct mgmt_ev_device_connected *ev = (void *) buf;
6228 u16 eir_len = 0;
6229
6230 bacpy(&ev->addr.bdaddr, bdaddr);
6231 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6232
6233 ev->flags = __cpu_to_le32(flags);
6234
6235 if (name_len > 0)
6236 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6237 name, name_len);
6238
6239 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6240 eir_len = eir_append_data(ev->eir, eir_len,
6241 EIR_CLASS_OF_DEV, dev_class, 3);
6242
6243 ev->eir_len = cpu_to_le16(eir_len);
6244
6245 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6246 sizeof(*ev) + eir_len, NULL);
6247 }
6248
6249 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6250 {
6251 struct mgmt_cp_disconnect *cp = cmd->param;
6252 struct sock **sk = data;
6253 struct mgmt_rp_disconnect rp;
6254
6255 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6256 rp.addr.type = cp->addr.type;
6257
6258 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6259 sizeof(rp));
6260
6261 *sk = cmd->sk;
6262 sock_hold(*sk);
6263
6264 mgmt_pending_remove(cmd);
6265 }
6266
6267 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6268 {
6269 struct hci_dev *hdev = data;
6270 struct mgmt_cp_unpair_device *cp = cmd->param;
6271 struct mgmt_rp_unpair_device rp;
6272
6273 memset(&rp, 0, sizeof(rp));
6274 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6275 rp.addr.type = cp->addr.type;
6276
6277 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6278
6279 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6280
6281 mgmt_pending_remove(cmd);
6282 }
6283
6284 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6285 u8 link_type, u8 addr_type, u8 reason,
6286 bool mgmt_connected)
6287 {
6288 struct mgmt_ev_device_disconnected ev;
6289 struct pending_cmd *power_off;
6290 struct sock *sk = NULL;
6291
6292 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6293 if (power_off) {
6294 struct mgmt_mode *cp = power_off->param;
6295
6296 /* The connection is still in hci_conn_hash so test for 1
6297 * instead of 0 to know if this is the last one.
6298 */
6299 if (!cp->val && hci_conn_count(hdev) == 1) {
6300 cancel_delayed_work(&hdev->power_off);
6301 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6302 }
6303 }
6304
6305 if (!mgmt_connected)
6306 return;
6307
6308 if (link_type != ACL_LINK && link_type != LE_LINK)
6309 return;
6310
6311 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6312
6313 bacpy(&ev.addr.bdaddr, bdaddr);
6314 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6315 ev.reason = reason;
6316
6317 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6318
6319 if (sk)
6320 sock_put(sk);
6321
6322 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6323 hdev);
6324 }
6325
6326 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6327 u8 link_type, u8 addr_type, u8 status)
6328 {
6329 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6330 struct mgmt_cp_disconnect *cp;
6331 struct mgmt_rp_disconnect rp;
6332 struct pending_cmd *cmd;
6333
6334 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6335 hdev);
6336
6337 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6338 if (!cmd)
6339 return;
6340
6341 cp = cmd->param;
6342
6343 if (bacmp(bdaddr, &cp->addr.bdaddr))
6344 return;
6345
6346 if (cp->addr.type != bdaddr_type)
6347 return;
6348
6349 bacpy(&rp.addr.bdaddr, bdaddr);
6350 rp.addr.type = bdaddr_type;
6351
6352 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6353 mgmt_status(status), &rp, sizeof(rp));
6354
6355 mgmt_pending_remove(cmd);
6356 }
6357
6358 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6359 u8 addr_type, u8 status)
6360 {
6361 struct mgmt_ev_connect_failed ev;
6362 struct pending_cmd *power_off;
6363
6364 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6365 if (power_off) {
6366 struct mgmt_mode *cp = power_off->param;
6367
6368 /* The connection is still in hci_conn_hash so test for 1
6369 * instead of 0 to know if this is the last one.
6370 */
6371 if (!cp->val && hci_conn_count(hdev) == 1) {
6372 cancel_delayed_work(&hdev->power_off);
6373 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6374 }
6375 }
6376
6377 bacpy(&ev.addr.bdaddr, bdaddr);
6378 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6379 ev.status = mgmt_status(status);
6380
6381 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6382 }
6383
6384 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6385 {
6386 struct mgmt_ev_pin_code_request ev;
6387
6388 bacpy(&ev.addr.bdaddr, bdaddr);
6389 ev.addr.type = BDADDR_BREDR;
6390 ev.secure = secure;
6391
6392 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6393 }
6394
6395 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6396 u8 status)
6397 {
6398 struct pending_cmd *cmd;
6399 struct mgmt_rp_pin_code_reply rp;
6400
6401 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6402 if (!cmd)
6403 return;
6404
6405 bacpy(&rp.addr.bdaddr, bdaddr);
6406 rp.addr.type = BDADDR_BREDR;
6407
6408 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6409 mgmt_status(status), &rp, sizeof(rp));
6410
6411 mgmt_pending_remove(cmd);
6412 }
6413
6414 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6415 u8 status)
6416 {
6417 struct pending_cmd *cmd;
6418 struct mgmt_rp_pin_code_reply rp;
6419
6420 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6421 if (!cmd)
6422 return;
6423
6424 bacpy(&rp.addr.bdaddr, bdaddr);
6425 rp.addr.type = BDADDR_BREDR;
6426
6427 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6428 mgmt_status(status), &rp, sizeof(rp));
6429
6430 mgmt_pending_remove(cmd);
6431 }
6432
6433 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6434 u8 link_type, u8 addr_type, u32 value,
6435 u8 confirm_hint)
6436 {
6437 struct mgmt_ev_user_confirm_request ev;
6438
6439 BT_DBG("%s", hdev->name);
6440
6441 bacpy(&ev.addr.bdaddr, bdaddr);
6442 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6443 ev.confirm_hint = confirm_hint;
6444 ev.value = cpu_to_le32(value);
6445
6446 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6447 NULL);
6448 }
6449
6450 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6451 u8 link_type, u8 addr_type)
6452 {
6453 struct mgmt_ev_user_passkey_request ev;
6454
6455 BT_DBG("%s", hdev->name);
6456
6457 bacpy(&ev.addr.bdaddr, bdaddr);
6458 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6459
6460 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6461 NULL);
6462 }
6463
6464 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6465 u8 link_type, u8 addr_type, u8 status,
6466 u8 opcode)
6467 {
6468 struct pending_cmd *cmd;
6469 struct mgmt_rp_user_confirm_reply rp;
6470 int err;
6471
6472 cmd = mgmt_pending_find(opcode, hdev);
6473 if (!cmd)
6474 return -ENOENT;
6475
6476 bacpy(&rp.addr.bdaddr, bdaddr);
6477 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6478 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6479 &rp, sizeof(rp));
6480
6481 mgmt_pending_remove(cmd);
6482
6483 return err;
6484 }
6485
6486 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6487 u8 link_type, u8 addr_type, u8 status)
6488 {
6489 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6490 status, MGMT_OP_USER_CONFIRM_REPLY);
6491 }
6492
6493 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6494 u8 link_type, u8 addr_type, u8 status)
6495 {
6496 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6497 status,
6498 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6499 }
6500
6501 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6502 u8 link_type, u8 addr_type, u8 status)
6503 {
6504 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6505 status, MGMT_OP_USER_PASSKEY_REPLY);
6506 }
6507
6508 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6509 u8 link_type, u8 addr_type, u8 status)
6510 {
6511 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6512 status,
6513 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6514 }
6515
6516 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6517 u8 link_type, u8 addr_type, u32 passkey,
6518 u8 entered)
6519 {
6520 struct mgmt_ev_passkey_notify ev;
6521
6522 BT_DBG("%s", hdev->name);
6523
6524 bacpy(&ev.addr.bdaddr, bdaddr);
6525 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6526 ev.passkey = __cpu_to_le32(passkey);
6527 ev.entered = entered;
6528
6529 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6530 }
6531
6532 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6533 u8 addr_type, u8 status)
6534 {
6535 struct mgmt_ev_auth_failed ev;
6536
6537 bacpy(&ev.addr.bdaddr, bdaddr);
6538 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6539 ev.status = mgmt_status(status);
6540
6541 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6542 }
6543
6544 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6545 {
6546 struct cmd_lookup match = { NULL, hdev };
6547 bool changed;
6548
6549 if (status) {
6550 u8 mgmt_err = mgmt_status(status);
6551 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6552 cmd_status_rsp, &mgmt_err);
6553 return;
6554 }
6555
6556 if (test_bit(HCI_AUTH, &hdev->flags))
6557 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6558 &hdev->dev_flags);
6559 else
6560 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6561 &hdev->dev_flags);
6562
6563 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6564 &match);
6565
6566 if (changed)
6567 new_settings(hdev, match.sk);
6568
6569 if (match.sk)
6570 sock_put(match.sk);
6571 }
6572
6573 static void clear_eir(struct hci_request *req)
6574 {
6575 struct hci_dev *hdev = req->hdev;
6576 struct hci_cp_write_eir cp;
6577
6578 if (!lmp_ext_inq_capable(hdev))
6579 return;
6580
6581 memset(hdev->eir, 0, sizeof(hdev->eir));
6582
6583 memset(&cp, 0, sizeof(cp));
6584
6585 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6586 }
6587
6588 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6589 {
6590 struct cmd_lookup match = { NULL, hdev };
6591 struct hci_request req;
6592 bool changed = false;
6593
6594 if (status) {
6595 u8 mgmt_err = mgmt_status(status);
6596
6597 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6598 &hdev->dev_flags)) {
6599 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6600 new_settings(hdev, NULL);
6601 }
6602
6603 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6604 &mgmt_err);
6605 return;
6606 }
6607
6608 if (enable) {
6609 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6610 } else {
6611 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6612 if (!changed)
6613 changed = test_and_clear_bit(HCI_HS_ENABLED,
6614 &hdev->dev_flags);
6615 else
6616 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6617 }
6618
6619 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6620
6621 if (changed)
6622 new_settings(hdev, match.sk);
6623
6624 if (match.sk)
6625 sock_put(match.sk);
6626
6627 hci_req_init(&req, hdev);
6628
6629 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6630 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6631 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6632 sizeof(enable), &enable);
6633 update_eir(&req);
6634 } else {
6635 clear_eir(&req);
6636 }
6637
6638 hci_req_run(&req, NULL);
6639 }
6640
6641 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6642 {
6643 struct cmd_lookup match = { NULL, hdev };
6644 bool changed = false;
6645
6646 if (status) {
6647 u8 mgmt_err = mgmt_status(status);
6648
6649 if (enable) {
6650 if (test_and_clear_bit(HCI_SC_ENABLED,
6651 &hdev->dev_flags))
6652 new_settings(hdev, NULL);
6653 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6654 }
6655
6656 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6657 cmd_status_rsp, &mgmt_err);
6658 return;
6659 }
6660
6661 if (enable) {
6662 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6663 } else {
6664 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6665 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6666 }
6667
6668 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6669 settings_rsp, &match);
6670
6671 if (changed)
6672 new_settings(hdev, match.sk);
6673
6674 if (match.sk)
6675 sock_put(match.sk);
6676 }
6677
6678 static void sk_lookup(struct pending_cmd *cmd, void *data)
6679 {
6680 struct cmd_lookup *match = data;
6681
6682 if (match->sk == NULL) {
6683 match->sk = cmd->sk;
6684 sock_hold(match->sk);
6685 }
6686 }
6687
6688 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6689 u8 status)
6690 {
6691 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6692
6693 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6694 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6695 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6696
6697 if (!status)
6698 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6699 NULL);
6700
6701 if (match.sk)
6702 sock_put(match.sk);
6703 }
6704
6705 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6706 {
6707 struct mgmt_cp_set_local_name ev;
6708 struct pending_cmd *cmd;
6709
6710 if (status)
6711 return;
6712
6713 memset(&ev, 0, sizeof(ev));
6714 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6715 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6716
6717 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6718 if (!cmd) {
6719 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6720
6721 /* If this is a HCI command related to powering on the
6722 * HCI dev don't send any mgmt signals.
6723 */
6724 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6725 return;
6726 }
6727
6728 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6729 cmd ? cmd->sk : NULL);
6730 }
6731
6732 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6733 u8 *randomizer192, u8 *hash256,
6734 u8 *randomizer256, u8 status)
6735 {
6736 struct pending_cmd *cmd;
6737
6738 BT_DBG("%s status %u", hdev->name, status);
6739
6740 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6741 if (!cmd)
6742 return;
6743
6744 if (status) {
6745 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6746 mgmt_status(status));
6747 } else {
6748 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6749 hash256 && randomizer256) {
6750 struct mgmt_rp_read_local_oob_ext_data rp;
6751
6752 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6753 memcpy(rp.randomizer192, randomizer192,
6754 sizeof(rp.randomizer192));
6755
6756 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6757 memcpy(rp.randomizer256, randomizer256,
6758 sizeof(rp.randomizer256));
6759
6760 cmd_complete(cmd->sk, hdev->id,
6761 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6762 &rp, sizeof(rp));
6763 } else {
6764 struct mgmt_rp_read_local_oob_data rp;
6765
6766 memcpy(rp.hash, hash192, sizeof(rp.hash));
6767 memcpy(rp.randomizer, randomizer192,
6768 sizeof(rp.randomizer));
6769
6770 cmd_complete(cmd->sk, hdev->id,
6771 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6772 &rp, sizeof(rp));
6773 }
6774 }
6775
6776 mgmt_pending_remove(cmd);
6777 }
6778
6779 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6780 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6781 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6782 {
6783 char buf[512];
6784 struct mgmt_ev_device_found *ev = (void *) buf;
6785 size_t ev_size;
6786
6787 /* Don't send events for a non-kernel initiated discovery. With
6788 * LE one exception is if we have pend_le_reports > 0 in which
6789 * case we're doing passive scanning and want these events.
6790 */
6791 if (!hci_discovery_active(hdev)) {
6792 if (link_type == ACL_LINK)
6793 return;
6794 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6795 return;
6796 }
6797
6798 /* Make sure that the buffer is big enough. The 5 extra bytes
6799 * are for the potential CoD field.
6800 */
6801 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6802 return;
6803
6804 memset(buf, 0, sizeof(buf));
6805
6806 bacpy(&ev->addr.bdaddr, bdaddr);
6807 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6808 ev->rssi = rssi;
6809 ev->flags = cpu_to_le32(flags);
6810
6811 if (eir_len > 0)
6812 memcpy(ev->eir, eir, eir_len);
6813
6814 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6815 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6816 dev_class, 3);
6817
6818 if (scan_rsp_len > 0)
6819 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6820
6821 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6822 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6823
6824 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6825 }
6826
6827 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6828 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6829 {
6830 struct mgmt_ev_device_found *ev;
6831 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6832 u16 eir_len;
6833
6834 ev = (struct mgmt_ev_device_found *) buf;
6835
6836 memset(buf, 0, sizeof(buf));
6837
6838 bacpy(&ev->addr.bdaddr, bdaddr);
6839 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6840 ev->rssi = rssi;
6841
6842 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6843 name_len);
6844
6845 ev->eir_len = cpu_to_le16(eir_len);
6846
6847 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6848 }
6849
6850 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6851 {
6852 struct mgmt_ev_discovering ev;
6853 struct pending_cmd *cmd;
6854
6855 BT_DBG("%s discovering %u", hdev->name, discovering);
6856
6857 if (discovering)
6858 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6859 else
6860 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6861
6862 if (cmd != NULL) {
6863 u8 type = hdev->discovery.type;
6864
6865 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6866 sizeof(type));
6867 mgmt_pending_remove(cmd);
6868 }
6869
6870 memset(&ev, 0, sizeof(ev));
6871 ev.type = hdev->discovery.type;
6872 ev.discovering = discovering;
6873
6874 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6875 }
6876
6877 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6878 {
6879 BT_DBG("%s status %u", hdev->name, status);
6880 }
6881
6882 void mgmt_reenable_advertising(struct hci_dev *hdev)
6883 {
6884 struct hci_request req;
6885
6886 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6887 return;
6888
6889 hci_req_init(&req, hdev);
6890 enable_advertising(&req);
6891 hci_req_run(&req, adv_enable_complete);
6892 }