]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/mgmt.c
Bluetooth: Unify helpers for bdaddr_list manipulations
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
34
35 #include "smp.h"
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_BREDR,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
85 MGMT_OP_SET_PRIVACY,
86 MGMT_OP_LOAD_IRKS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
89 MGMT_OP_ADD_DEVICE,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
96 };
97
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
100 MGMT_EV_INDEX_ADDED,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
113 MGMT_EV_AUTH_FAILED,
114 MGMT_EV_DEVICE_FOUND,
115 MGMT_EV_DISCOVERING,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
120 MGMT_EV_NEW_IRK,
121 MGMT_EV_NEW_CSRK,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
128 };
129
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
131
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
134
135 struct pending_cmd {
136 struct list_head list;
137 u16 opcode;
138 int index;
139 void *param;
140 struct sock *sk;
141 void *user_data;
142 };
143
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
146 MGMT_STATUS_SUCCESS,
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
207 };
208
209 static u8 mgmt_status(u8 hci_status)
210 {
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
213
214 return MGMT_STATUS_FAILED;
215 }
216
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
219 {
220 struct sk_buff *skb;
221 struct mgmt_hdr *hdr;
222
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
224 if (!skb)
225 return -ENOMEM;
226
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
229 if (hdev)
230 hdr->index = cpu_to_le16(hdev->id);
231 else
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
234
235 if (data)
236 memcpy(skb_put(skb, data_len), data, data_len);
237
238 /* Time stamp */
239 __net_timestamp(skb);
240
241 hci_send_to_control(skb, skip_sk);
242 kfree_skb(skb);
243
244 return 0;
245 }
246
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
248 {
249 struct sk_buff *skb;
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
252 int err;
253
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
255
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
257 if (!skb)
258 return -ENOMEM;
259
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
261
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
265
266 ev = (void *) skb_put(skb, sizeof(*ev));
267 ev->status = status;
268 ev->opcode = cpu_to_le16(cmd);
269
270 err = sock_queue_rcv_skb(sk, skb);
271 if (err < 0)
272 kfree_skb(skb);
273
274 return err;
275 }
276
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
279 {
280 struct sk_buff *skb;
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
283 int err;
284
285 BT_DBG("sock %p", sk);
286
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
288 if (!skb)
289 return -ENOMEM;
290
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
292
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
296
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
299 ev->status = status;
300
301 if (rp)
302 memcpy(ev->data, rp, rp_len);
303
304 err = sock_queue_rcv_skb(sk, skb);
305 if (err < 0)
306 kfree_skb(skb);
307
308 return err;
309 }
310
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
312 u16 data_len)
313 {
314 struct mgmt_rp_read_version rp;
315
316 BT_DBG("sock %p", sk);
317
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
320
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
322 sizeof(rp));
323 }
324
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
326 u16 data_len)
327 {
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
331 __le16 *opcode;
332 size_t rp_size;
333 int i, err;
334
335 BT_DBG("sock %p", sk);
336
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
338
339 rp = kmalloc(rp_size, GFP_KERNEL);
340 if (!rp)
341 return -ENOMEM;
342
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
345
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
348
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
351
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
353 rp_size);
354 kfree(rp);
355
356 return err;
357 }
358
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
360 u16 data_len)
361 {
362 struct mgmt_rp_read_index_list *rp;
363 struct hci_dev *d;
364 size_t rp_len;
365 u16 count;
366 int err;
367
368 BT_DBG("sock %p", sk);
369
370 read_lock(&hci_dev_list_lock);
371
372 count = 0;
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
376 count++;
377 }
378
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
381 if (!rp) {
382 read_unlock(&hci_dev_list_lock);
383 return -ENOMEM;
384 }
385
386 count = 0;
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
391 continue;
392
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
395 */
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
397 continue;
398
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
403 }
404 }
405
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
408
409 read_unlock(&hci_dev_list_lock);
410
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
412 rp_len);
413
414 kfree(rp);
415
416 return err;
417 }
418
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
421 {
422 struct mgmt_rp_read_unconf_index_list *rp;
423 struct hci_dev *d;
424 size_t rp_len;
425 u16 count;
426 int err;
427
428 BT_DBG("sock %p", sk);
429
430 read_lock(&hci_dev_list_lock);
431
432 count = 0;
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
436 count++;
437 }
438
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
441 if (!rp) {
442 read_unlock(&hci_dev_list_lock);
443 return -ENOMEM;
444 }
445
446 count = 0;
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
451 continue;
452
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
455 */
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
457 continue;
458
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
463 }
464 }
465
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
468
469 read_unlock(&hci_dev_list_lock);
470
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
472 0, rp, rp_len);
473
474 kfree(rp);
475
476 return err;
477 }
478
479 static bool is_configured(struct hci_dev *hdev)
480 {
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
483 return false;
484
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
487 return false;
488
489 return true;
490 }
491
492 static __le32 get_missing_options(struct hci_dev *hdev)
493 {
494 u32 options = 0;
495
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
499
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
503
504 return cpu_to_le32(options);
505 }
506
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
508 {
509 __le32 options = get_missing_options(hdev);
510
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
513 }
514
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
516 {
517 __le32 options = get_missing_options(hdev);
518
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
520 sizeof(options));
521 }
522
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
525 {
526 struct mgmt_rp_read_config_info rp;
527 u32 options = 0;
528
529 BT_DBG("sock %p %s", sk, hdev->name);
530
531 hci_dev_lock(hdev);
532
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
535
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
538
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
541
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
544
545 hci_dev_unlock(hdev);
546
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
548 sizeof(rp));
549 }
550
551 static u32 get_supported_settings(struct hci_dev *hdev)
552 {
553 u32 settings = 0;
554
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
560
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
566
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
570 }
571
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
575 }
576
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
581 }
582
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
584 hdev->set_bdaddr)
585 settings |= MGMT_SETTING_CONFIGURATION;
586
587 return settings;
588 }
589
590 static u32 get_current_settings(struct hci_dev *hdev)
591 {
592 u32 settings = 0;
593
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
596
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
599
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
602
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
605
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
608
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
611
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
614
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
617
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
620
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
623
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
626
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
629
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
632
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
635
636 return settings;
637 }
638
639 #define PNP_INFO_SVCLASS_ID 0x1200
640
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
642 {
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
645
646 if (len < 4)
647 return ptr;
648
649 list_for_each_entry(uuid, &hdev->uuids, list) {
650 u16 uuid16;
651
652 if (uuid->size != 16)
653 continue;
654
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
656 if (uuid16 < 0x1100)
657 continue;
658
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
660 continue;
661
662 if (!uuids_start) {
663 uuids_start = ptr;
664 uuids_start[0] = 1;
665 uuids_start[1] = EIR_UUID16_ALL;
666 ptr += 2;
667 }
668
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
672 break;
673 }
674
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
678 }
679
680 return ptr;
681 }
682
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
684 {
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
687
688 if (len < 6)
689 return ptr;
690
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
693 continue;
694
695 if (!uuids_start) {
696 uuids_start = ptr;
697 uuids_start[0] = 1;
698 uuids_start[1] = EIR_UUID32_ALL;
699 ptr += 2;
700 }
701
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
705 break;
706 }
707
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
709 ptr += sizeof(u32);
710 uuids_start[0] += sizeof(u32);
711 }
712
713 return ptr;
714 }
715
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
717 {
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
720
721 if (len < 18)
722 return ptr;
723
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
726 continue;
727
728 if (!uuids_start) {
729 uuids_start = ptr;
730 uuids_start[0] = 1;
731 uuids_start[1] = EIR_UUID128_ALL;
732 ptr += 2;
733 }
734
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
738 break;
739 }
740
741 memcpy(ptr, uuid->uuid, 16);
742 ptr += 16;
743 uuids_start[0] += 16;
744 }
745
746 return ptr;
747 }
748
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
750 {
751 struct pending_cmd *cmd;
752
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
755 return cmd;
756 }
757
758 return NULL;
759 }
760
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
763 const void *data)
764 {
765 struct pending_cmd *cmd;
766
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
769 continue;
770 if (cmd->opcode == opcode)
771 return cmd;
772 }
773
774 return NULL;
775 }
776
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
778 {
779 u8 ad_len = 0;
780 size_t name_len;
781
782 name_len = strlen(hdev->dev_name);
783 if (name_len > 0) {
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
785
786 if (name_len > max_len) {
787 name_len = max_len;
788 ptr[1] = EIR_NAME_SHORT;
789 } else
790 ptr[1] = EIR_NAME_COMPLETE;
791
792 ptr[0] = name_len + 1;
793
794 memcpy(ptr + 2, hdev->dev_name, name_len);
795
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
798 }
799
800 return ad_len;
801 }
802
803 static void update_scan_rsp_data(struct hci_request *req)
804 {
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
807 u8 len;
808
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
810 return;
811
812 memset(&cp, 0, sizeof(cp));
813
814 len = create_scan_rsp_data(hdev, cp.data);
815
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
818 return;
819
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
822
823 cp.length = len;
824
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
826 }
827
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
829 {
830 struct pending_cmd *cmd;
831
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
834 */
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
836 if (cmd) {
837 struct mgmt_mode *cp = cmd->param;
838 if (cp->val == 0x01)
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
842 } else {
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
847 }
848
849 return 0;
850 }
851
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
853 {
854 u8 ad_len = 0, flags = 0;
855
856 flags |= get_adv_discov_flags(hdev);
857
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
860
861 if (flags) {
862 BT_DBG("adv flags 0x%02x", flags);
863
864 ptr[0] = 2;
865 ptr[1] = EIR_FLAGS;
866 ptr[2] = flags;
867
868 ad_len += 3;
869 ptr += 3;
870 }
871
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
873 ptr[0] = 2;
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
876
877 ad_len += 3;
878 ptr += 3;
879 }
880
881 return ad_len;
882 }
883
884 static void update_adv_data(struct hci_request *req)
885 {
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
888 u8 len;
889
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
891 return;
892
893 memset(&cp, 0, sizeof(cp));
894
895 len = create_adv_data(hdev, cp.data);
896
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
899 return;
900
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
903
904 cp.length = len;
905
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
907 }
908
909 static void create_eir(struct hci_dev *hdev, u8 *data)
910 {
911 u8 *ptr = data;
912 size_t name_len;
913
914 name_len = strlen(hdev->dev_name);
915
916 if (name_len > 0) {
917 /* EIR Data type */
918 if (name_len > 48) {
919 name_len = 48;
920 ptr[1] = EIR_NAME_SHORT;
921 } else
922 ptr[1] = EIR_NAME_COMPLETE;
923
924 /* EIR Data length */
925 ptr[0] = name_len + 1;
926
927 memcpy(ptr + 2, hdev->dev_name, name_len);
928
929 ptr += (name_len + 2);
930 }
931
932 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
933 ptr[0] = 2;
934 ptr[1] = EIR_TX_POWER;
935 ptr[2] = (u8) hdev->inq_tx_power;
936
937 ptr += 3;
938 }
939
940 if (hdev->devid_source > 0) {
941 ptr[0] = 9;
942 ptr[1] = EIR_DEVICE_ID;
943
944 put_unaligned_le16(hdev->devid_source, ptr + 2);
945 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
946 put_unaligned_le16(hdev->devid_product, ptr + 6);
947 put_unaligned_le16(hdev->devid_version, ptr + 8);
948
949 ptr += 10;
950 }
951
952 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
953 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
955 }
956
957 static void update_eir(struct hci_request *req)
958 {
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_write_eir cp;
961
962 if (!hdev_is_powered(hdev))
963 return;
964
965 if (!lmp_ext_inq_capable(hdev))
966 return;
967
968 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
969 return;
970
971 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
972 return;
973
974 memset(&cp, 0, sizeof(cp));
975
976 create_eir(hdev, cp.data);
977
978 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
979 return;
980
981 memcpy(hdev->eir, cp.data, sizeof(cp.data));
982
983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
984 }
985
986 static u8 get_service_classes(struct hci_dev *hdev)
987 {
988 struct bt_uuid *uuid;
989 u8 val = 0;
990
991 list_for_each_entry(uuid, &hdev->uuids, list)
992 val |= uuid->svc_hint;
993
994 return val;
995 }
996
997 static void update_class(struct hci_request *req)
998 {
999 struct hci_dev *hdev = req->hdev;
1000 u8 cod[3];
1001
1002 BT_DBG("%s", hdev->name);
1003
1004 if (!hdev_is_powered(hdev))
1005 return;
1006
1007 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1008 return;
1009
1010 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1011 return;
1012
1013 cod[0] = hdev->minor_class;
1014 cod[1] = hdev->major_class;
1015 cod[2] = get_service_classes(hdev);
1016
1017 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1018 cod[1] |= 0x20;
1019
1020 if (memcmp(cod, hdev->dev_class, 3) == 0)
1021 return;
1022
1023 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1024 }
1025
1026 static bool get_connectable(struct hci_dev *hdev)
1027 {
1028 struct pending_cmd *cmd;
1029
1030 /* If there's a pending mgmt command the flag will not yet have
1031 * it's final value, so check for this first.
1032 */
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1034 if (cmd) {
1035 struct mgmt_mode *cp = cmd->param;
1036 return cp->val;
1037 }
1038
1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1040 }
1041
1042 static void disable_advertising(struct hci_request *req)
1043 {
1044 u8 enable = 0x00;
1045
1046 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1047 }
1048
1049 static void enable_advertising(struct hci_request *req)
1050 {
1051 struct hci_dev *hdev = req->hdev;
1052 struct hci_cp_le_set_adv_param cp;
1053 u8 own_addr_type, enable = 0x01;
1054 bool connectable;
1055
1056 if (hci_conn_num(hdev, LE_LINK) > 0)
1057 return;
1058
1059 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1060 disable_advertising(req);
1061
1062 /* Clear the HCI_LE_ADV bit temporarily so that the
1063 * hci_update_random_address knows that it's safe to go ahead
1064 * and write a new random address. The flag will be set back on
1065 * as soon as the SET_ADV_ENABLE HCI command completes.
1066 */
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068
1069 connectable = get_connectable(hdev);
1070
1071 /* Set require_privacy to true only when non-connectable
1072 * advertising is used. In that case it is fine to use a
1073 * non-resolvable private address.
1074 */
1075 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1076 return;
1077
1078 memset(&cp, 0, sizeof(cp));
1079 cp.min_interval = cpu_to_le16(0x0800);
1080 cp.max_interval = cpu_to_le16(0x0800);
1081 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1082 cp.own_address_type = own_addr_type;
1083 cp.channel_map = hdev->le_adv_channel_map;
1084
1085 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1086
1087 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1088 }
1089
1090 static void service_cache_off(struct work_struct *work)
1091 {
1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
1093 service_cache.work);
1094 struct hci_request req;
1095
1096 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1097 return;
1098
1099 hci_req_init(&req, hdev);
1100
1101 hci_dev_lock(hdev);
1102
1103 update_eir(&req);
1104 update_class(&req);
1105
1106 hci_dev_unlock(hdev);
1107
1108 hci_req_run(&req, NULL);
1109 }
1110
1111 static void rpa_expired(struct work_struct *work)
1112 {
1113 struct hci_dev *hdev = container_of(work, struct hci_dev,
1114 rpa_expired.work);
1115 struct hci_request req;
1116
1117 BT_DBG("");
1118
1119 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1120
1121 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1122 return;
1123
1124 /* The generation of a new RPA and programming it into the
1125 * controller happens in the enable_advertising() function.
1126 */
1127 hci_req_init(&req, hdev);
1128 enable_advertising(&req);
1129 hci_req_run(&req, NULL);
1130 }
1131
1132 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1133 {
1134 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1135 return;
1136
1137 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1138 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1139
1140 /* Non-mgmt controlled devices get this bit set
1141 * implicitly so that pairing works for them, however
1142 * for mgmt we require user-space to explicitly enable
1143 * it
1144 */
1145 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1146 }
1147
1148 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1149 void *data, u16 data_len)
1150 {
1151 struct mgmt_rp_read_info rp;
1152
1153 BT_DBG("sock %p %s", sk, hdev->name);
1154
1155 hci_dev_lock(hdev);
1156
1157 memset(&rp, 0, sizeof(rp));
1158
1159 bacpy(&rp.bdaddr, &hdev->bdaddr);
1160
1161 rp.version = hdev->hci_ver;
1162 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1163
1164 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1165 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1166
1167 memcpy(rp.dev_class, hdev->dev_class, 3);
1168
1169 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1170 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1171
1172 hci_dev_unlock(hdev);
1173
1174 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1175 sizeof(rp));
1176 }
1177
1178 static void mgmt_pending_free(struct pending_cmd *cmd)
1179 {
1180 sock_put(cmd->sk);
1181 kfree(cmd->param);
1182 kfree(cmd);
1183 }
1184
1185 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1186 struct hci_dev *hdev, void *data,
1187 u16 len)
1188 {
1189 struct pending_cmd *cmd;
1190
1191 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1192 if (!cmd)
1193 return NULL;
1194
1195 cmd->opcode = opcode;
1196 cmd->index = hdev->id;
1197
1198 cmd->param = kmalloc(len, GFP_KERNEL);
1199 if (!cmd->param) {
1200 kfree(cmd);
1201 return NULL;
1202 }
1203
1204 if (data)
1205 memcpy(cmd->param, data, len);
1206
1207 cmd->sk = sk;
1208 sock_hold(sk);
1209
1210 list_add(&cmd->list, &hdev->mgmt_pending);
1211
1212 return cmd;
1213 }
1214
1215 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1216 void (*cb)(struct pending_cmd *cmd,
1217 void *data),
1218 void *data)
1219 {
1220 struct pending_cmd *cmd, *tmp;
1221
1222 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1223 if (opcode > 0 && cmd->opcode != opcode)
1224 continue;
1225
1226 cb(cmd, data);
1227 }
1228 }
1229
1230 static void mgmt_pending_remove(struct pending_cmd *cmd)
1231 {
1232 list_del(&cmd->list);
1233 mgmt_pending_free(cmd);
1234 }
1235
1236 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1237 {
1238 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1239
1240 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1241 sizeof(settings));
1242 }
1243
1244 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1245 {
1246 BT_DBG("%s status 0x%02x", hdev->name, status);
1247
1248 if (hci_conn_count(hdev) == 0) {
1249 cancel_delayed_work(&hdev->power_off);
1250 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1251 }
1252 }
1253
1254 static bool hci_stop_discovery(struct hci_request *req)
1255 {
1256 struct hci_dev *hdev = req->hdev;
1257 struct hci_cp_remote_name_req_cancel cp;
1258 struct inquiry_entry *e;
1259
1260 switch (hdev->discovery.state) {
1261 case DISCOVERY_FINDING:
1262 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1263 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1264 } else {
1265 cancel_delayed_work(&hdev->le_scan_disable);
1266 hci_req_add_le_scan_disable(req);
1267 }
1268
1269 return true;
1270
1271 case DISCOVERY_RESOLVING:
1272 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1273 NAME_PENDING);
1274 if (!e)
1275 break;
1276
1277 bacpy(&cp.bdaddr, &e->data.bdaddr);
1278 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1279 &cp);
1280
1281 return true;
1282
1283 default:
1284 /* Passive scanning */
1285 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1286 hci_req_add_le_scan_disable(req);
1287 return true;
1288 }
1289
1290 break;
1291 }
1292
1293 return false;
1294 }
1295
1296 static int clean_up_hci_state(struct hci_dev *hdev)
1297 {
1298 struct hci_request req;
1299 struct hci_conn *conn;
1300 bool discov_stopped;
1301 int err;
1302
1303 hci_req_init(&req, hdev);
1304
1305 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1306 test_bit(HCI_PSCAN, &hdev->flags)) {
1307 u8 scan = 0x00;
1308 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1309 }
1310
1311 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1312 disable_advertising(&req);
1313
1314 discov_stopped = hci_stop_discovery(&req);
1315
1316 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1317 struct hci_cp_disconnect dc;
1318 struct hci_cp_reject_conn_req rej;
1319
1320 switch (conn->state) {
1321 case BT_CONNECTED:
1322 case BT_CONFIG:
1323 dc.handle = cpu_to_le16(conn->handle);
1324 dc.reason = 0x15; /* Terminated due to Power Off */
1325 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1326 break;
1327 case BT_CONNECT:
1328 if (conn->type == LE_LINK)
1329 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1330 0, NULL);
1331 else if (conn->type == ACL_LINK)
1332 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1333 6, &conn->dst);
1334 break;
1335 case BT_CONNECT2:
1336 bacpy(&rej.bdaddr, &conn->dst);
1337 rej.reason = 0x15; /* Terminated due to Power Off */
1338 if (conn->type == ACL_LINK)
1339 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1340 sizeof(rej), &rej);
1341 else if (conn->type == SCO_LINK)
1342 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1343 sizeof(rej), &rej);
1344 break;
1345 }
1346 }
1347
1348 err = hci_req_run(&req, clean_up_hci_complete);
1349 if (!err && discov_stopped)
1350 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1351
1352 return err;
1353 }
1354
1355 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1356 u16 len)
1357 {
1358 struct mgmt_mode *cp = data;
1359 struct pending_cmd *cmd;
1360 int err;
1361
1362 BT_DBG("request for %s", hdev->name);
1363
1364 if (cp->val != 0x00 && cp->val != 0x01)
1365 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1366 MGMT_STATUS_INVALID_PARAMS);
1367
1368 hci_dev_lock(hdev);
1369
1370 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1371 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1372 MGMT_STATUS_BUSY);
1373 goto failed;
1374 }
1375
1376 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1377 cancel_delayed_work(&hdev->power_off);
1378
1379 if (cp->val) {
1380 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1381 data, len);
1382 err = mgmt_powered(hdev, 1);
1383 goto failed;
1384 }
1385 }
1386
1387 if (!!cp->val == hdev_is_powered(hdev)) {
1388 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1389 goto failed;
1390 }
1391
1392 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1393 if (!cmd) {
1394 err = -ENOMEM;
1395 goto failed;
1396 }
1397
1398 if (cp->val) {
1399 queue_work(hdev->req_workqueue, &hdev->power_on);
1400 err = 0;
1401 } else {
1402 /* Disconnect connections, stop scans, etc */
1403 err = clean_up_hci_state(hdev);
1404 if (!err)
1405 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1406 HCI_POWER_OFF_TIMEOUT);
1407
1408 /* ENODATA means there were no HCI commands queued */
1409 if (err == -ENODATA) {
1410 cancel_delayed_work(&hdev->power_off);
1411 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1412 err = 0;
1413 }
1414 }
1415
1416 failed:
1417 hci_dev_unlock(hdev);
1418 return err;
1419 }
1420
1421 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1422 {
1423 __le32 ev;
1424
1425 ev = cpu_to_le32(get_current_settings(hdev));
1426
1427 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1428 }
1429
1430 struct cmd_lookup {
1431 struct sock *sk;
1432 struct hci_dev *hdev;
1433 u8 mgmt_status;
1434 };
1435
1436 static void settings_rsp(struct pending_cmd *cmd, void *data)
1437 {
1438 struct cmd_lookup *match = data;
1439
1440 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1441
1442 list_del(&cmd->list);
1443
1444 if (match->sk == NULL) {
1445 match->sk = cmd->sk;
1446 sock_hold(match->sk);
1447 }
1448
1449 mgmt_pending_free(cmd);
1450 }
1451
1452 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1453 {
1454 u8 *status = data;
1455
1456 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1457 mgmt_pending_remove(cmd);
1458 }
1459
1460 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1461 {
1462 if (!lmp_bredr_capable(hdev))
1463 return MGMT_STATUS_NOT_SUPPORTED;
1464 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1465 return MGMT_STATUS_REJECTED;
1466 else
1467 return MGMT_STATUS_SUCCESS;
1468 }
1469
1470 static u8 mgmt_le_support(struct hci_dev *hdev)
1471 {
1472 if (!lmp_le_capable(hdev))
1473 return MGMT_STATUS_NOT_SUPPORTED;
1474 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1475 return MGMT_STATUS_REJECTED;
1476 else
1477 return MGMT_STATUS_SUCCESS;
1478 }
1479
1480 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1481 {
1482 struct pending_cmd *cmd;
1483 struct mgmt_mode *cp;
1484 struct hci_request req;
1485 bool changed;
1486
1487 BT_DBG("status 0x%02x", status);
1488
1489 hci_dev_lock(hdev);
1490
1491 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1492 if (!cmd)
1493 goto unlock;
1494
1495 if (status) {
1496 u8 mgmt_err = mgmt_status(status);
1497 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1498 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1499 goto remove_cmd;
1500 }
1501
1502 cp = cmd->param;
1503 if (cp->val) {
1504 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1505 &hdev->dev_flags);
1506
1507 if (hdev->discov_timeout > 0) {
1508 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1509 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1510 to);
1511 }
1512 } else {
1513 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1514 &hdev->dev_flags);
1515 }
1516
1517 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1518
1519 if (changed)
1520 new_settings(hdev, cmd->sk);
1521
1522 /* When the discoverable mode gets changed, make sure
1523 * that class of device has the limited discoverable
1524 * bit correctly set.
1525 */
1526 hci_req_init(&req, hdev);
1527 update_class(&req);
1528 hci_req_run(&req, NULL);
1529
1530 remove_cmd:
1531 mgmt_pending_remove(cmd);
1532
1533 unlock:
1534 hci_dev_unlock(hdev);
1535 }
1536
1537 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1538 u16 len)
1539 {
1540 struct mgmt_cp_set_discoverable *cp = data;
1541 struct pending_cmd *cmd;
1542 struct hci_request req;
1543 u16 timeout;
1544 u8 scan;
1545 int err;
1546
1547 BT_DBG("request for %s", hdev->name);
1548
1549 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1550 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1551 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1553
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1557
1558 timeout = __le16_to_cpu(cp->timeout);
1559
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1562 */
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1567
1568 hci_dev_lock(hdev);
1569
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1573 goto failed;
1574 }
1575
1576 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1579 MGMT_STATUS_BUSY);
1580 goto failed;
1581 }
1582
1583 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1586 goto failed;
1587 }
1588
1589 if (!hdev_is_powered(hdev)) {
1590 bool changed = false;
1591
1592 /* Setting limited discoverable when powered off is
1593 * not a valid operation since it requires a timeout
1594 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1595 */
1596 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1597 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1598 changed = true;
1599 }
1600
1601 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1602 if (err < 0)
1603 goto failed;
1604
1605 if (changed)
1606 err = new_settings(hdev, sk);
1607
1608 goto failed;
1609 }
1610
1611 /* If the current mode is the same, then just update the timeout
1612 * value with the new value. And if only the timeout gets updated,
1613 * then no need for any HCI transactions.
1614 */
1615 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1616 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1617 &hdev->dev_flags)) {
1618 cancel_delayed_work(&hdev->discov_off);
1619 hdev->discov_timeout = timeout;
1620
1621 if (cp->val && hdev->discov_timeout > 0) {
1622 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1623 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1624 to);
1625 }
1626
1627 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1628 goto failed;
1629 }
1630
1631 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1632 if (!cmd) {
1633 err = -ENOMEM;
1634 goto failed;
1635 }
1636
1637 /* Cancel any potential discoverable timeout that might be
1638 * still active and store new timeout value. The arming of
1639 * the timeout happens in the complete handler.
1640 */
1641 cancel_delayed_work(&hdev->discov_off);
1642 hdev->discov_timeout = timeout;
1643
1644 /* Limited discoverable mode */
1645 if (cp->val == 0x02)
1646 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1647 else
1648 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1649
1650 hci_req_init(&req, hdev);
1651
1652 /* The procedure for LE-only controllers is much simpler - just
1653 * update the advertising data.
1654 */
1655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1656 goto update_ad;
1657
1658 scan = SCAN_PAGE;
1659
1660 if (cp->val) {
1661 struct hci_cp_write_current_iac_lap hci_cp;
1662
1663 if (cp->val == 0x02) {
1664 /* Limited discoverable mode */
1665 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1666 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1667 hci_cp.iac_lap[1] = 0x8b;
1668 hci_cp.iac_lap[2] = 0x9e;
1669 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1670 hci_cp.iac_lap[4] = 0x8b;
1671 hci_cp.iac_lap[5] = 0x9e;
1672 } else {
1673 /* General discoverable mode */
1674 hci_cp.num_iac = 1;
1675 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1676 hci_cp.iac_lap[1] = 0x8b;
1677 hci_cp.iac_lap[2] = 0x9e;
1678 }
1679
1680 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1681 (hci_cp.num_iac * 3) + 1, &hci_cp);
1682
1683 scan |= SCAN_INQUIRY;
1684 } else {
1685 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1686 }
1687
1688 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1689
1690 update_ad:
1691 update_adv_data(&req);
1692
1693 err = hci_req_run(&req, set_discoverable_complete);
1694 if (err < 0)
1695 mgmt_pending_remove(cmd);
1696
1697 failed:
1698 hci_dev_unlock(hdev);
1699 return err;
1700 }
1701
1702 static void write_fast_connectable(struct hci_request *req, bool enable)
1703 {
1704 struct hci_dev *hdev = req->hdev;
1705 struct hci_cp_write_page_scan_activity acp;
1706 u8 type;
1707
1708 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1709 return;
1710
1711 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1712 return;
1713
1714 if (enable) {
1715 type = PAGE_SCAN_TYPE_INTERLACED;
1716
1717 /* 160 msec page scan interval */
1718 acp.interval = cpu_to_le16(0x0100);
1719 } else {
1720 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1721
1722 /* default 1.28 sec page scan */
1723 acp.interval = cpu_to_le16(0x0800);
1724 }
1725
1726 acp.window = cpu_to_le16(0x0012);
1727
1728 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1729 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1730 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1731 sizeof(acp), &acp);
1732
1733 if (hdev->page_scan_type != type)
1734 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1735 }
1736
1737 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1738 {
1739 struct pending_cmd *cmd;
1740 struct mgmt_mode *cp;
1741 bool changed;
1742
1743 BT_DBG("status 0x%02x", status);
1744
1745 hci_dev_lock(hdev);
1746
1747 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1748 if (!cmd)
1749 goto unlock;
1750
1751 if (status) {
1752 u8 mgmt_err = mgmt_status(status);
1753 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1754 goto remove_cmd;
1755 }
1756
1757 cp = cmd->param;
1758 if (cp->val)
1759 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1760 else
1761 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1762
1763 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1764
1765 if (changed) {
1766 new_settings(hdev, cmd->sk);
1767 hci_update_background_scan(hdev);
1768 }
1769
1770 remove_cmd:
1771 mgmt_pending_remove(cmd);
1772
1773 unlock:
1774 hci_dev_unlock(hdev);
1775 }
1776
1777 static int set_connectable_update_settings(struct hci_dev *hdev,
1778 struct sock *sk, u8 val)
1779 {
1780 bool changed = false;
1781 int err;
1782
1783 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1784 changed = true;
1785
1786 if (val) {
1787 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1788 } else {
1789 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1790 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1791 }
1792
1793 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1794 if (err < 0)
1795 return err;
1796
1797 if (changed) {
1798 hci_update_background_scan(hdev);
1799 return new_settings(hdev, sk);
1800 }
1801
1802 return 0;
1803 }
1804
1805 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1806 u16 len)
1807 {
1808 struct mgmt_mode *cp = data;
1809 struct pending_cmd *cmd;
1810 struct hci_request req;
1811 u8 scan;
1812 int err;
1813
1814 BT_DBG("request for %s", hdev->name);
1815
1816 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1817 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1818 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1819 MGMT_STATUS_REJECTED);
1820
1821 if (cp->val != 0x00 && cp->val != 0x01)
1822 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1823 MGMT_STATUS_INVALID_PARAMS);
1824
1825 hci_dev_lock(hdev);
1826
1827 if (!hdev_is_powered(hdev)) {
1828 err = set_connectable_update_settings(hdev, sk, cp->val);
1829 goto failed;
1830 }
1831
1832 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1833 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1834 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1835 MGMT_STATUS_BUSY);
1836 goto failed;
1837 }
1838
1839 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1840 if (!cmd) {
1841 err = -ENOMEM;
1842 goto failed;
1843 }
1844
1845 hci_req_init(&req, hdev);
1846
1847 /* If BR/EDR is not enabled and we disable advertising as a
1848 * by-product of disabling connectable, we need to update the
1849 * advertising flags.
1850 */
1851 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1852 if (!cp->val) {
1853 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1854 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1855 }
1856 update_adv_data(&req);
1857 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1858 if (cp->val) {
1859 scan = SCAN_PAGE;
1860 } else {
1861 scan = 0;
1862
1863 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1864 hdev->discov_timeout > 0)
1865 cancel_delayed_work(&hdev->discov_off);
1866 }
1867
1868 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1869 }
1870
1871 /* If we're going from non-connectable to connectable or
1872 * vice-versa when fast connectable is enabled ensure that fast
1873 * connectable gets disabled. write_fast_connectable won't do
1874 * anything if the page scan parameters are already what they
1875 * should be.
1876 */
1877 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1878 write_fast_connectable(&req, false);
1879
1880 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1881 !test_bit(HCI_LE_ADV, &hdev->dev_flags))
1882 enable_advertising(&req);
1883
1884 err = hci_req_run(&req, set_connectable_complete);
1885 if (err < 0) {
1886 mgmt_pending_remove(cmd);
1887 if (err == -ENODATA)
1888 err = set_connectable_update_settings(hdev, sk,
1889 cp->val);
1890 goto failed;
1891 }
1892
1893 failed:
1894 hci_dev_unlock(hdev);
1895 return err;
1896 }
1897
1898 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1899 u16 len)
1900 {
1901 struct mgmt_mode *cp = data;
1902 bool changed;
1903 int err;
1904
1905 BT_DBG("request for %s", hdev->name);
1906
1907 if (cp->val != 0x00 && cp->val != 0x01)
1908 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1909 MGMT_STATUS_INVALID_PARAMS);
1910
1911 hci_dev_lock(hdev);
1912
1913 if (cp->val)
1914 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1915 else
1916 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1917
1918 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1919 if (err < 0)
1920 goto unlock;
1921
1922 if (changed)
1923 err = new_settings(hdev, sk);
1924
1925 unlock:
1926 hci_dev_unlock(hdev);
1927 return err;
1928 }
1929
1930 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1931 u16 len)
1932 {
1933 struct mgmt_mode *cp = data;
1934 struct pending_cmd *cmd;
1935 u8 val, status;
1936 int err;
1937
1938 BT_DBG("request for %s", hdev->name);
1939
1940 status = mgmt_bredr_support(hdev);
1941 if (status)
1942 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1943 status);
1944
1945 if (cp->val != 0x00 && cp->val != 0x01)
1946 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1947 MGMT_STATUS_INVALID_PARAMS);
1948
1949 hci_dev_lock(hdev);
1950
1951 if (!hdev_is_powered(hdev)) {
1952 bool changed = false;
1953
1954 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1955 &hdev->dev_flags)) {
1956 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1957 changed = true;
1958 }
1959
1960 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1961 if (err < 0)
1962 goto failed;
1963
1964 if (changed)
1965 err = new_settings(hdev, sk);
1966
1967 goto failed;
1968 }
1969
1970 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1971 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1972 MGMT_STATUS_BUSY);
1973 goto failed;
1974 }
1975
1976 val = !!cp->val;
1977
1978 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1979 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1980 goto failed;
1981 }
1982
1983 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1984 if (!cmd) {
1985 err = -ENOMEM;
1986 goto failed;
1987 }
1988
1989 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1990 if (err < 0) {
1991 mgmt_pending_remove(cmd);
1992 goto failed;
1993 }
1994
1995 failed:
1996 hci_dev_unlock(hdev);
1997 return err;
1998 }
1999
2000 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2001 {
2002 struct mgmt_mode *cp = data;
2003 struct pending_cmd *cmd;
2004 u8 status;
2005 int err;
2006
2007 BT_DBG("request for %s", hdev->name);
2008
2009 status = mgmt_bredr_support(hdev);
2010 if (status)
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2012
2013 if (!lmp_ssp_capable(hdev))
2014 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2015 MGMT_STATUS_NOT_SUPPORTED);
2016
2017 if (cp->val != 0x00 && cp->val != 0x01)
2018 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 MGMT_STATUS_INVALID_PARAMS);
2020
2021 hci_dev_lock(hdev);
2022
2023 if (!hdev_is_powered(hdev)) {
2024 bool changed;
2025
2026 if (cp->val) {
2027 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2028 &hdev->dev_flags);
2029 } else {
2030 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2031 &hdev->dev_flags);
2032 if (!changed)
2033 changed = test_and_clear_bit(HCI_HS_ENABLED,
2034 &hdev->dev_flags);
2035 else
2036 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2037 }
2038
2039 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2040 if (err < 0)
2041 goto failed;
2042
2043 if (changed)
2044 err = new_settings(hdev, sk);
2045
2046 goto failed;
2047 }
2048
2049 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2050 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2051 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2052 MGMT_STATUS_BUSY);
2053 goto failed;
2054 }
2055
2056 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2057 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2058 goto failed;
2059 }
2060
2061 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2062 if (!cmd) {
2063 err = -ENOMEM;
2064 goto failed;
2065 }
2066
2067 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2068 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2069 sizeof(cp->val), &cp->val);
2070
2071 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2072 if (err < 0) {
2073 mgmt_pending_remove(cmd);
2074 goto failed;
2075 }
2076
2077 failed:
2078 hci_dev_unlock(hdev);
2079 return err;
2080 }
2081
2082 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2083 {
2084 struct mgmt_mode *cp = data;
2085 bool changed;
2086 u8 status;
2087 int err;
2088
2089 BT_DBG("request for %s", hdev->name);
2090
2091 status = mgmt_bredr_support(hdev);
2092 if (status)
2093 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2094
2095 if (!lmp_ssp_capable(hdev))
2096 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2097 MGMT_STATUS_NOT_SUPPORTED);
2098
2099 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2100 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2101 MGMT_STATUS_REJECTED);
2102
2103 if (cp->val != 0x00 && cp->val != 0x01)
2104 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2105 MGMT_STATUS_INVALID_PARAMS);
2106
2107 hci_dev_lock(hdev);
2108
2109 if (cp->val) {
2110 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2111 } else {
2112 if (hdev_is_powered(hdev)) {
2113 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2114 MGMT_STATUS_REJECTED);
2115 goto unlock;
2116 }
2117
2118 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2119 }
2120
2121 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2122 if (err < 0)
2123 goto unlock;
2124
2125 if (changed)
2126 err = new_settings(hdev, sk);
2127
2128 unlock:
2129 hci_dev_unlock(hdev);
2130 return err;
2131 }
2132
2133 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2134 {
2135 struct cmd_lookup match = { NULL, hdev };
2136
2137 if (status) {
2138 u8 mgmt_err = mgmt_status(status);
2139
2140 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2141 &mgmt_err);
2142 return;
2143 }
2144
2145 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2146
2147 new_settings(hdev, match.sk);
2148
2149 if (match.sk)
2150 sock_put(match.sk);
2151
2152 /* Make sure the controller has a good default for
2153 * advertising data. Restrict the update to when LE
2154 * has actually been enabled. During power on, the
2155 * update in powered_update_hci will take care of it.
2156 */
2157 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2158 struct hci_request req;
2159
2160 hci_dev_lock(hdev);
2161
2162 hci_req_init(&req, hdev);
2163 update_adv_data(&req);
2164 update_scan_rsp_data(&req);
2165 hci_req_run(&req, NULL);
2166
2167 hci_update_background_scan(hdev);
2168
2169 hci_dev_unlock(hdev);
2170 }
2171 }
2172
2173 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2174 {
2175 struct mgmt_mode *cp = data;
2176 struct hci_cp_write_le_host_supported hci_cp;
2177 struct pending_cmd *cmd;
2178 struct hci_request req;
2179 int err;
2180 u8 val, enabled;
2181
2182 BT_DBG("request for %s", hdev->name);
2183
2184 if (!lmp_le_capable(hdev))
2185 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2186 MGMT_STATUS_NOT_SUPPORTED);
2187
2188 if (cp->val != 0x00 && cp->val != 0x01)
2189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2190 MGMT_STATUS_INVALID_PARAMS);
2191
2192 /* LE-only devices do not allow toggling LE on/off */
2193 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2194 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2195 MGMT_STATUS_REJECTED);
2196
2197 hci_dev_lock(hdev);
2198
2199 val = !!cp->val;
2200 enabled = lmp_host_le_capable(hdev);
2201
2202 if (!hdev_is_powered(hdev) || val == enabled) {
2203 bool changed = false;
2204
2205 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2206 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2207 changed = true;
2208 }
2209
2210 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2212 changed = true;
2213 }
2214
2215 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2216 if (err < 0)
2217 goto unlock;
2218
2219 if (changed)
2220 err = new_settings(hdev, sk);
2221
2222 goto unlock;
2223 }
2224
2225 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2226 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2227 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2228 MGMT_STATUS_BUSY);
2229 goto unlock;
2230 }
2231
2232 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2233 if (!cmd) {
2234 err = -ENOMEM;
2235 goto unlock;
2236 }
2237
2238 hci_req_init(&req, hdev);
2239
2240 memset(&hci_cp, 0, sizeof(hci_cp));
2241
2242 if (val) {
2243 hci_cp.le = val;
2244 hci_cp.simul = lmp_le_br_capable(hdev);
2245 } else {
2246 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2247 disable_advertising(&req);
2248 }
2249
2250 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2251 &hci_cp);
2252
2253 err = hci_req_run(&req, le_enable_complete);
2254 if (err < 0)
2255 mgmt_pending_remove(cmd);
2256
2257 unlock:
2258 hci_dev_unlock(hdev);
2259 return err;
2260 }
2261
2262 /* This is a helper function to test for pending mgmt commands that can
2263 * cause CoD or EIR HCI commands. We can only allow one such pending
2264 * mgmt command at a time since otherwise we cannot easily track what
2265 * the current values are, will be, and based on that calculate if a new
2266 * HCI command needs to be sent and if yes with what value.
2267 */
2268 static bool pending_eir_or_class(struct hci_dev *hdev)
2269 {
2270 struct pending_cmd *cmd;
2271
2272 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2273 switch (cmd->opcode) {
2274 case MGMT_OP_ADD_UUID:
2275 case MGMT_OP_REMOVE_UUID:
2276 case MGMT_OP_SET_DEV_CLASS:
2277 case MGMT_OP_SET_POWERED:
2278 return true;
2279 }
2280 }
2281
2282 return false;
2283 }
2284
2285 static const u8 bluetooth_base_uuid[] = {
2286 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2287 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2288 };
2289
2290 static u8 get_uuid_size(const u8 *uuid)
2291 {
2292 u32 val;
2293
2294 if (memcmp(uuid, bluetooth_base_uuid, 12))
2295 return 128;
2296
2297 val = get_unaligned_le32(&uuid[12]);
2298 if (val > 0xffff)
2299 return 32;
2300
2301 return 16;
2302 }
2303
2304 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2305 {
2306 struct pending_cmd *cmd;
2307
2308 hci_dev_lock(hdev);
2309
2310 cmd = mgmt_pending_find(mgmt_op, hdev);
2311 if (!cmd)
2312 goto unlock;
2313
2314 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2315 hdev->dev_class, 3);
2316
2317 mgmt_pending_remove(cmd);
2318
2319 unlock:
2320 hci_dev_unlock(hdev);
2321 }
2322
2323 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2324 {
2325 BT_DBG("status 0x%02x", status);
2326
2327 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2328 }
2329
2330 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2331 {
2332 struct mgmt_cp_add_uuid *cp = data;
2333 struct pending_cmd *cmd;
2334 struct hci_request req;
2335 struct bt_uuid *uuid;
2336 int err;
2337
2338 BT_DBG("request for %s", hdev->name);
2339
2340 hci_dev_lock(hdev);
2341
2342 if (pending_eir_or_class(hdev)) {
2343 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2344 MGMT_STATUS_BUSY);
2345 goto failed;
2346 }
2347
2348 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2349 if (!uuid) {
2350 err = -ENOMEM;
2351 goto failed;
2352 }
2353
2354 memcpy(uuid->uuid, cp->uuid, 16);
2355 uuid->svc_hint = cp->svc_hint;
2356 uuid->size = get_uuid_size(cp->uuid);
2357
2358 list_add_tail(&uuid->list, &hdev->uuids);
2359
2360 hci_req_init(&req, hdev);
2361
2362 update_class(&req);
2363 update_eir(&req);
2364
2365 err = hci_req_run(&req, add_uuid_complete);
2366 if (err < 0) {
2367 if (err != -ENODATA)
2368 goto failed;
2369
2370 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2371 hdev->dev_class, 3);
2372 goto failed;
2373 }
2374
2375 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2376 if (!cmd) {
2377 err = -ENOMEM;
2378 goto failed;
2379 }
2380
2381 err = 0;
2382
2383 failed:
2384 hci_dev_unlock(hdev);
2385 return err;
2386 }
2387
2388 static bool enable_service_cache(struct hci_dev *hdev)
2389 {
2390 if (!hdev_is_powered(hdev))
2391 return false;
2392
2393 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2394 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2395 CACHE_TIMEOUT);
2396 return true;
2397 }
2398
2399 return false;
2400 }
2401
2402 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2403 {
2404 BT_DBG("status 0x%02x", status);
2405
2406 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2407 }
2408
2409 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2410 u16 len)
2411 {
2412 struct mgmt_cp_remove_uuid *cp = data;
2413 struct pending_cmd *cmd;
2414 struct bt_uuid *match, *tmp;
2415 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2416 struct hci_request req;
2417 int err, found;
2418
2419 BT_DBG("request for %s", hdev->name);
2420
2421 hci_dev_lock(hdev);
2422
2423 if (pending_eir_or_class(hdev)) {
2424 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2425 MGMT_STATUS_BUSY);
2426 goto unlock;
2427 }
2428
2429 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2430 hci_uuids_clear(hdev);
2431
2432 if (enable_service_cache(hdev)) {
2433 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2434 0, hdev->dev_class, 3);
2435 goto unlock;
2436 }
2437
2438 goto update_class;
2439 }
2440
2441 found = 0;
2442
2443 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2444 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2445 continue;
2446
2447 list_del(&match->list);
2448 kfree(match);
2449 found++;
2450 }
2451
2452 if (found == 0) {
2453 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2454 MGMT_STATUS_INVALID_PARAMS);
2455 goto unlock;
2456 }
2457
2458 update_class:
2459 hci_req_init(&req, hdev);
2460
2461 update_class(&req);
2462 update_eir(&req);
2463
2464 err = hci_req_run(&req, remove_uuid_complete);
2465 if (err < 0) {
2466 if (err != -ENODATA)
2467 goto unlock;
2468
2469 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2470 hdev->dev_class, 3);
2471 goto unlock;
2472 }
2473
2474 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2475 if (!cmd) {
2476 err = -ENOMEM;
2477 goto unlock;
2478 }
2479
2480 err = 0;
2481
2482 unlock:
2483 hci_dev_unlock(hdev);
2484 return err;
2485 }
2486
2487 static void set_class_complete(struct hci_dev *hdev, u8 status)
2488 {
2489 BT_DBG("status 0x%02x", status);
2490
2491 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2492 }
2493
2494 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2495 u16 len)
2496 {
2497 struct mgmt_cp_set_dev_class *cp = data;
2498 struct pending_cmd *cmd;
2499 struct hci_request req;
2500 int err;
2501
2502 BT_DBG("request for %s", hdev->name);
2503
2504 if (!lmp_bredr_capable(hdev))
2505 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2506 MGMT_STATUS_NOT_SUPPORTED);
2507
2508 hci_dev_lock(hdev);
2509
2510 if (pending_eir_or_class(hdev)) {
2511 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2512 MGMT_STATUS_BUSY);
2513 goto unlock;
2514 }
2515
2516 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2517 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2518 MGMT_STATUS_INVALID_PARAMS);
2519 goto unlock;
2520 }
2521
2522 hdev->major_class = cp->major;
2523 hdev->minor_class = cp->minor;
2524
2525 if (!hdev_is_powered(hdev)) {
2526 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2527 hdev->dev_class, 3);
2528 goto unlock;
2529 }
2530
2531 hci_req_init(&req, hdev);
2532
2533 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2534 hci_dev_unlock(hdev);
2535 cancel_delayed_work_sync(&hdev->service_cache);
2536 hci_dev_lock(hdev);
2537 update_eir(&req);
2538 }
2539
2540 update_class(&req);
2541
2542 err = hci_req_run(&req, set_class_complete);
2543 if (err < 0) {
2544 if (err != -ENODATA)
2545 goto unlock;
2546
2547 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2548 hdev->dev_class, 3);
2549 goto unlock;
2550 }
2551
2552 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2553 if (!cmd) {
2554 err = -ENOMEM;
2555 goto unlock;
2556 }
2557
2558 err = 0;
2559
2560 unlock:
2561 hci_dev_unlock(hdev);
2562 return err;
2563 }
2564
2565 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2566 u16 len)
2567 {
2568 struct mgmt_cp_load_link_keys *cp = data;
2569 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2570 sizeof(struct mgmt_link_key_info));
2571 u16 key_count, expected_len;
2572 bool changed;
2573 int i;
2574
2575 BT_DBG("request for %s", hdev->name);
2576
2577 if (!lmp_bredr_capable(hdev))
2578 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2579 MGMT_STATUS_NOT_SUPPORTED);
2580
2581 key_count = __le16_to_cpu(cp->key_count);
2582 if (key_count > max_key_count) {
2583 BT_ERR("load_link_keys: too big key_count value %u",
2584 key_count);
2585 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2586 MGMT_STATUS_INVALID_PARAMS);
2587 }
2588
2589 expected_len = sizeof(*cp) + key_count *
2590 sizeof(struct mgmt_link_key_info);
2591 if (expected_len != len) {
2592 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2593 expected_len, len);
2594 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2595 MGMT_STATUS_INVALID_PARAMS);
2596 }
2597
2598 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2599 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2600 MGMT_STATUS_INVALID_PARAMS);
2601
2602 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2603 key_count);
2604
2605 for (i = 0; i < key_count; i++) {
2606 struct mgmt_link_key_info *key = &cp->keys[i];
2607
2608 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2609 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2610 MGMT_STATUS_INVALID_PARAMS);
2611 }
2612
2613 hci_dev_lock(hdev);
2614
2615 hci_link_keys_clear(hdev);
2616
2617 if (cp->debug_keys)
2618 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2619 &hdev->dev_flags);
2620 else
2621 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2622 &hdev->dev_flags);
2623
2624 if (changed)
2625 new_settings(hdev, NULL);
2626
2627 for (i = 0; i < key_count; i++) {
2628 struct mgmt_link_key_info *key = &cp->keys[i];
2629
2630 /* Always ignore debug keys and require a new pairing if
2631 * the user wants to use them.
2632 */
2633 if (key->type == HCI_LK_DEBUG_COMBINATION)
2634 continue;
2635
2636 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2637 key->type, key->pin_len, NULL);
2638 }
2639
2640 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2641
2642 hci_dev_unlock(hdev);
2643
2644 return 0;
2645 }
2646
2647 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648 u8 addr_type, struct sock *skip_sk)
2649 {
2650 struct mgmt_ev_device_unpaired ev;
2651
2652 bacpy(&ev.addr.bdaddr, bdaddr);
2653 ev.addr.type = addr_type;
2654
2655 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2656 skip_sk);
2657 }
2658
2659 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2660 u16 len)
2661 {
2662 struct mgmt_cp_unpair_device *cp = data;
2663 struct mgmt_rp_unpair_device rp;
2664 struct hci_cp_disconnect dc;
2665 struct pending_cmd *cmd;
2666 struct hci_conn *conn;
2667 int err;
2668
2669 memset(&rp, 0, sizeof(rp));
2670 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2671 rp.addr.type = cp->addr.type;
2672
2673 if (!bdaddr_type_is_valid(cp->addr.type))
2674 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2675 MGMT_STATUS_INVALID_PARAMS,
2676 &rp, sizeof(rp));
2677
2678 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2679 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2680 MGMT_STATUS_INVALID_PARAMS,
2681 &rp, sizeof(rp));
2682
2683 hci_dev_lock(hdev);
2684
2685 if (!hdev_is_powered(hdev)) {
2686 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2687 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2688 goto unlock;
2689 }
2690
2691 if (cp->addr.type == BDADDR_BREDR) {
2692 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2693 } else {
2694 u8 addr_type;
2695
2696 if (cp->addr.type == BDADDR_LE_PUBLIC)
2697 addr_type = ADDR_LE_DEV_PUBLIC;
2698 else
2699 addr_type = ADDR_LE_DEV_RANDOM;
2700
2701 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2702
2703 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2704
2705 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2706 }
2707
2708 if (err < 0) {
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2711 goto unlock;
2712 }
2713
2714 if (cp->disconnect) {
2715 if (cp->addr.type == BDADDR_BREDR)
2716 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2717 &cp->addr.bdaddr);
2718 else
2719 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2720 &cp->addr.bdaddr);
2721 } else {
2722 conn = NULL;
2723 }
2724
2725 if (!conn) {
2726 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2727 &rp, sizeof(rp));
2728 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2729 goto unlock;
2730 }
2731
2732 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2733 sizeof(*cp));
2734 if (!cmd) {
2735 err = -ENOMEM;
2736 goto unlock;
2737 }
2738
2739 dc.handle = cpu_to_le16(conn->handle);
2740 dc.reason = 0x13; /* Remote User Terminated Connection */
2741 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2742 if (err < 0)
2743 mgmt_pending_remove(cmd);
2744
2745 unlock:
2746 hci_dev_unlock(hdev);
2747 return err;
2748 }
2749
2750 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2751 u16 len)
2752 {
2753 struct mgmt_cp_disconnect *cp = data;
2754 struct mgmt_rp_disconnect rp;
2755 struct hci_cp_disconnect dc;
2756 struct pending_cmd *cmd;
2757 struct hci_conn *conn;
2758 int err;
2759
2760 BT_DBG("");
2761
2762 memset(&rp, 0, sizeof(rp));
2763 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2764 rp.addr.type = cp->addr.type;
2765
2766 if (!bdaddr_type_is_valid(cp->addr.type))
2767 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2768 MGMT_STATUS_INVALID_PARAMS,
2769 &rp, sizeof(rp));
2770
2771 hci_dev_lock(hdev);
2772
2773 if (!test_bit(HCI_UP, &hdev->flags)) {
2774 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2775 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2776 goto failed;
2777 }
2778
2779 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2780 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2781 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2782 goto failed;
2783 }
2784
2785 if (cp->addr.type == BDADDR_BREDR)
2786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2787 &cp->addr.bdaddr);
2788 else
2789 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2790
2791 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2792 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2793 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2794 goto failed;
2795 }
2796
2797 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2798 if (!cmd) {
2799 err = -ENOMEM;
2800 goto failed;
2801 }
2802
2803 dc.handle = cpu_to_le16(conn->handle);
2804 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2805
2806 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2807 if (err < 0)
2808 mgmt_pending_remove(cmd);
2809
2810 failed:
2811 hci_dev_unlock(hdev);
2812 return err;
2813 }
2814
2815 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2816 {
2817 switch (link_type) {
2818 case LE_LINK:
2819 switch (addr_type) {
2820 case ADDR_LE_DEV_PUBLIC:
2821 return BDADDR_LE_PUBLIC;
2822
2823 default:
2824 /* Fallback to LE Random address type */
2825 return BDADDR_LE_RANDOM;
2826 }
2827
2828 default:
2829 /* Fallback to BR/EDR type */
2830 return BDADDR_BREDR;
2831 }
2832 }
2833
2834 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2835 u16 data_len)
2836 {
2837 struct mgmt_rp_get_connections *rp;
2838 struct hci_conn *c;
2839 size_t rp_len;
2840 int err;
2841 u16 i;
2842
2843 BT_DBG("");
2844
2845 hci_dev_lock(hdev);
2846
2847 if (!hdev_is_powered(hdev)) {
2848 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2849 MGMT_STATUS_NOT_POWERED);
2850 goto unlock;
2851 }
2852
2853 i = 0;
2854 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2855 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2856 i++;
2857 }
2858
2859 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2860 rp = kmalloc(rp_len, GFP_KERNEL);
2861 if (!rp) {
2862 err = -ENOMEM;
2863 goto unlock;
2864 }
2865
2866 i = 0;
2867 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2868 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2869 continue;
2870 bacpy(&rp->addr[i].bdaddr, &c->dst);
2871 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2872 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2873 continue;
2874 i++;
2875 }
2876
2877 rp->conn_count = cpu_to_le16(i);
2878
2879 /* Recalculate length in case of filtered SCO connections, etc */
2880 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2881
2882 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2883 rp_len);
2884
2885 kfree(rp);
2886
2887 unlock:
2888 hci_dev_unlock(hdev);
2889 return err;
2890 }
2891
2892 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2893 struct mgmt_cp_pin_code_neg_reply *cp)
2894 {
2895 struct pending_cmd *cmd;
2896 int err;
2897
2898 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2899 sizeof(*cp));
2900 if (!cmd)
2901 return -ENOMEM;
2902
2903 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2904 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2905 if (err < 0)
2906 mgmt_pending_remove(cmd);
2907
2908 return err;
2909 }
2910
2911 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2912 u16 len)
2913 {
2914 struct hci_conn *conn;
2915 struct mgmt_cp_pin_code_reply *cp = data;
2916 struct hci_cp_pin_code_reply reply;
2917 struct pending_cmd *cmd;
2918 int err;
2919
2920 BT_DBG("");
2921
2922 hci_dev_lock(hdev);
2923
2924 if (!hdev_is_powered(hdev)) {
2925 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2926 MGMT_STATUS_NOT_POWERED);
2927 goto failed;
2928 }
2929
2930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2931 if (!conn) {
2932 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2933 MGMT_STATUS_NOT_CONNECTED);
2934 goto failed;
2935 }
2936
2937 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2938 struct mgmt_cp_pin_code_neg_reply ncp;
2939
2940 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2941
2942 BT_ERR("PIN code is not 16 bytes long");
2943
2944 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2945 if (err >= 0)
2946 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2947 MGMT_STATUS_INVALID_PARAMS);
2948
2949 goto failed;
2950 }
2951
2952 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2953 if (!cmd) {
2954 err = -ENOMEM;
2955 goto failed;
2956 }
2957
2958 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2959 reply.pin_len = cp->pin_len;
2960 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2961
2962 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2963 if (err < 0)
2964 mgmt_pending_remove(cmd);
2965
2966 failed:
2967 hci_dev_unlock(hdev);
2968 return err;
2969 }
2970
2971 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2972 u16 len)
2973 {
2974 struct mgmt_cp_set_io_capability *cp = data;
2975
2976 BT_DBG("");
2977
2978 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2979 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2980 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2981
2982 hci_dev_lock(hdev);
2983
2984 hdev->io_capability = cp->io_capability;
2985
2986 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2987 hdev->io_capability);
2988
2989 hci_dev_unlock(hdev);
2990
2991 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2992 0);
2993 }
2994
2995 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2996 {
2997 struct hci_dev *hdev = conn->hdev;
2998 struct pending_cmd *cmd;
2999
3000 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3001 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3002 continue;
3003
3004 if (cmd->user_data != conn)
3005 continue;
3006
3007 return cmd;
3008 }
3009
3010 return NULL;
3011 }
3012
3013 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3014 {
3015 struct mgmt_rp_pair_device rp;
3016 struct hci_conn *conn = cmd->user_data;
3017
3018 bacpy(&rp.addr.bdaddr, &conn->dst);
3019 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3020
3021 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3022 &rp, sizeof(rp));
3023
3024 /* So we don't get further callbacks for this connection */
3025 conn->connect_cfm_cb = NULL;
3026 conn->security_cfm_cb = NULL;
3027 conn->disconn_cfm_cb = NULL;
3028
3029 hci_conn_drop(conn);
3030
3031 mgmt_pending_remove(cmd);
3032 }
3033
3034 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3035 {
3036 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3037 struct pending_cmd *cmd;
3038
3039 cmd = find_pairing(conn);
3040 if (cmd)
3041 pairing_complete(cmd, status);
3042 }
3043
3044 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3045 {
3046 struct pending_cmd *cmd;
3047
3048 BT_DBG("status %u", status);
3049
3050 cmd = find_pairing(conn);
3051 if (!cmd)
3052 BT_DBG("Unable to find a pending command");
3053 else
3054 pairing_complete(cmd, mgmt_status(status));
3055 }
3056
3057 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3058 {
3059 struct pending_cmd *cmd;
3060
3061 BT_DBG("status %u", status);
3062
3063 if (!status)
3064 return;
3065
3066 cmd = find_pairing(conn);
3067 if (!cmd)
3068 BT_DBG("Unable to find a pending command");
3069 else
3070 pairing_complete(cmd, mgmt_status(status));
3071 }
3072
3073 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3074 u16 len)
3075 {
3076 struct mgmt_cp_pair_device *cp = data;
3077 struct mgmt_rp_pair_device rp;
3078 struct pending_cmd *cmd;
3079 u8 sec_level, auth_type;
3080 struct hci_conn *conn;
3081 int err;
3082
3083 BT_DBG("");
3084
3085 memset(&rp, 0, sizeof(rp));
3086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3087 rp.addr.type = cp->addr.type;
3088
3089 if (!bdaddr_type_is_valid(cp->addr.type))
3090 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3091 MGMT_STATUS_INVALID_PARAMS,
3092 &rp, sizeof(rp));
3093
3094 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3095 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3096 MGMT_STATUS_INVALID_PARAMS,
3097 &rp, sizeof(rp));
3098
3099 hci_dev_lock(hdev);
3100
3101 if (!hdev_is_powered(hdev)) {
3102 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3103 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3104 goto unlock;
3105 }
3106
3107 sec_level = BT_SECURITY_MEDIUM;
3108 auth_type = HCI_AT_DEDICATED_BONDING;
3109
3110 if (cp->addr.type == BDADDR_BREDR) {
3111 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3112 auth_type);
3113 } else {
3114 u8 addr_type;
3115
3116 /* Convert from L2CAP channel address type to HCI address type
3117 */
3118 if (cp->addr.type == BDADDR_LE_PUBLIC)
3119 addr_type = ADDR_LE_DEV_PUBLIC;
3120 else
3121 addr_type = ADDR_LE_DEV_RANDOM;
3122
3123 /* When pairing a new device, it is expected to remember
3124 * this device for future connections. Adding the connection
3125 * parameter information ahead of time allows tracking
3126 * of the slave preferred values and will speed up any
3127 * further connection establishment.
3128 *
3129 * If connection parameters already exist, then they
3130 * will be kept and this function does nothing.
3131 */
3132 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3133
3134 /* Request a connection with master = true role */
3135 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3136 sec_level, HCI_LE_CONN_TIMEOUT, true);
3137 }
3138
3139 if (IS_ERR(conn)) {
3140 int status;
3141
3142 if (PTR_ERR(conn) == -EBUSY)
3143 status = MGMT_STATUS_BUSY;
3144 else
3145 status = MGMT_STATUS_CONNECT_FAILED;
3146
3147 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3148 status, &rp,
3149 sizeof(rp));
3150 goto unlock;
3151 }
3152
3153 if (conn->connect_cfm_cb) {
3154 hci_conn_drop(conn);
3155 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3156 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3157 goto unlock;
3158 }
3159
3160 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3161 if (!cmd) {
3162 err = -ENOMEM;
3163 hci_conn_drop(conn);
3164 goto unlock;
3165 }
3166
3167 /* For LE, just connecting isn't a proof that the pairing finished */
3168 if (cp->addr.type == BDADDR_BREDR) {
3169 conn->connect_cfm_cb = pairing_complete_cb;
3170 conn->security_cfm_cb = pairing_complete_cb;
3171 conn->disconn_cfm_cb = pairing_complete_cb;
3172 } else {
3173 conn->connect_cfm_cb = le_pairing_complete_cb;
3174 conn->security_cfm_cb = le_pairing_complete_cb;
3175 conn->disconn_cfm_cb = le_pairing_complete_cb;
3176 }
3177
3178 conn->io_capability = cp->io_cap;
3179 cmd->user_data = conn;
3180
3181 if (conn->state == BT_CONNECTED &&
3182 hci_conn_security(conn, sec_level, auth_type))
3183 pairing_complete(cmd, 0);
3184
3185 err = 0;
3186
3187 unlock:
3188 hci_dev_unlock(hdev);
3189 return err;
3190 }
3191
3192 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3193 u16 len)
3194 {
3195 struct mgmt_addr_info *addr = data;
3196 struct pending_cmd *cmd;
3197 struct hci_conn *conn;
3198 int err;
3199
3200 BT_DBG("");
3201
3202 hci_dev_lock(hdev);
3203
3204 if (!hdev_is_powered(hdev)) {
3205 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3206 MGMT_STATUS_NOT_POWERED);
3207 goto unlock;
3208 }
3209
3210 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3211 if (!cmd) {
3212 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3213 MGMT_STATUS_INVALID_PARAMS);
3214 goto unlock;
3215 }
3216
3217 conn = cmd->user_data;
3218
3219 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3221 MGMT_STATUS_INVALID_PARAMS);
3222 goto unlock;
3223 }
3224
3225 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3226
3227 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3228 addr, sizeof(*addr));
3229 unlock:
3230 hci_dev_unlock(hdev);
3231 return err;
3232 }
3233
3234 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3235 struct mgmt_addr_info *addr, u16 mgmt_op,
3236 u16 hci_op, __le32 passkey)
3237 {
3238 struct pending_cmd *cmd;
3239 struct hci_conn *conn;
3240 int err;
3241
3242 hci_dev_lock(hdev);
3243
3244 if (!hdev_is_powered(hdev)) {
3245 err = cmd_complete(sk, hdev->id, mgmt_op,
3246 MGMT_STATUS_NOT_POWERED, addr,
3247 sizeof(*addr));
3248 goto done;
3249 }
3250
3251 if (addr->type == BDADDR_BREDR)
3252 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3253 else
3254 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3255
3256 if (!conn) {
3257 err = cmd_complete(sk, hdev->id, mgmt_op,
3258 MGMT_STATUS_NOT_CONNECTED, addr,
3259 sizeof(*addr));
3260 goto done;
3261 }
3262
3263 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3264 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3265 if (!err)
3266 err = cmd_complete(sk, hdev->id, mgmt_op,
3267 MGMT_STATUS_SUCCESS, addr,
3268 sizeof(*addr));
3269 else
3270 err = cmd_complete(sk, hdev->id, mgmt_op,
3271 MGMT_STATUS_FAILED, addr,
3272 sizeof(*addr));
3273
3274 goto done;
3275 }
3276
3277 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3278 if (!cmd) {
3279 err = -ENOMEM;
3280 goto done;
3281 }
3282
3283 /* Continue with pairing via HCI */
3284 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3285 struct hci_cp_user_passkey_reply cp;
3286
3287 bacpy(&cp.bdaddr, &addr->bdaddr);
3288 cp.passkey = passkey;
3289 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3290 } else
3291 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3292 &addr->bdaddr);
3293
3294 if (err < 0)
3295 mgmt_pending_remove(cmd);
3296
3297 done:
3298 hci_dev_unlock(hdev);
3299 return err;
3300 }
3301
3302 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3303 void *data, u16 len)
3304 {
3305 struct mgmt_cp_pin_code_neg_reply *cp = data;
3306
3307 BT_DBG("");
3308
3309 return user_pairing_resp(sk, hdev, &cp->addr,
3310 MGMT_OP_PIN_CODE_NEG_REPLY,
3311 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3312 }
3313
3314 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3315 u16 len)
3316 {
3317 struct mgmt_cp_user_confirm_reply *cp = data;
3318
3319 BT_DBG("");
3320
3321 if (len != sizeof(*cp))
3322 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3323 MGMT_STATUS_INVALID_PARAMS);
3324
3325 return user_pairing_resp(sk, hdev, &cp->addr,
3326 MGMT_OP_USER_CONFIRM_REPLY,
3327 HCI_OP_USER_CONFIRM_REPLY, 0);
3328 }
3329
3330 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3331 void *data, u16 len)
3332 {
3333 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3334
3335 BT_DBG("");
3336
3337 return user_pairing_resp(sk, hdev, &cp->addr,
3338 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3339 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3340 }
3341
3342 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3343 u16 len)
3344 {
3345 struct mgmt_cp_user_passkey_reply *cp = data;
3346
3347 BT_DBG("");
3348
3349 return user_pairing_resp(sk, hdev, &cp->addr,
3350 MGMT_OP_USER_PASSKEY_REPLY,
3351 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3352 }
3353
3354 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3355 void *data, u16 len)
3356 {
3357 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3358
3359 BT_DBG("");
3360
3361 return user_pairing_resp(sk, hdev, &cp->addr,
3362 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3363 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3364 }
3365
3366 static void update_name(struct hci_request *req)
3367 {
3368 struct hci_dev *hdev = req->hdev;
3369 struct hci_cp_write_local_name cp;
3370
3371 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3372
3373 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3374 }
3375
3376 static void set_name_complete(struct hci_dev *hdev, u8 status)
3377 {
3378 struct mgmt_cp_set_local_name *cp;
3379 struct pending_cmd *cmd;
3380
3381 BT_DBG("status 0x%02x", status);
3382
3383 hci_dev_lock(hdev);
3384
3385 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3386 if (!cmd)
3387 goto unlock;
3388
3389 cp = cmd->param;
3390
3391 if (status)
3392 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3393 mgmt_status(status));
3394 else
3395 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3396 cp, sizeof(*cp));
3397
3398 mgmt_pending_remove(cmd);
3399
3400 unlock:
3401 hci_dev_unlock(hdev);
3402 }
3403
3404 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3405 u16 len)
3406 {
3407 struct mgmt_cp_set_local_name *cp = data;
3408 struct pending_cmd *cmd;
3409 struct hci_request req;
3410 int err;
3411
3412 BT_DBG("");
3413
3414 hci_dev_lock(hdev);
3415
3416 /* If the old values are the same as the new ones just return a
3417 * direct command complete event.
3418 */
3419 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3420 !memcmp(hdev->short_name, cp->short_name,
3421 sizeof(hdev->short_name))) {
3422 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3423 data, len);
3424 goto failed;
3425 }
3426
3427 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3428
3429 if (!hdev_is_powered(hdev)) {
3430 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3431
3432 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3433 data, len);
3434 if (err < 0)
3435 goto failed;
3436
3437 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3438 sk);
3439
3440 goto failed;
3441 }
3442
3443 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3444 if (!cmd) {
3445 err = -ENOMEM;
3446 goto failed;
3447 }
3448
3449 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3450
3451 hci_req_init(&req, hdev);
3452
3453 if (lmp_bredr_capable(hdev)) {
3454 update_name(&req);
3455 update_eir(&req);
3456 }
3457
3458 /* The name is stored in the scan response data and so
3459 * no need to udpate the advertising data here.
3460 */
3461 if (lmp_le_capable(hdev))
3462 update_scan_rsp_data(&req);
3463
3464 err = hci_req_run(&req, set_name_complete);
3465 if (err < 0)
3466 mgmt_pending_remove(cmd);
3467
3468 failed:
3469 hci_dev_unlock(hdev);
3470 return err;
3471 }
3472
3473 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3474 void *data, u16 data_len)
3475 {
3476 struct pending_cmd *cmd;
3477 int err;
3478
3479 BT_DBG("%s", hdev->name);
3480
3481 hci_dev_lock(hdev);
3482
3483 if (!hdev_is_powered(hdev)) {
3484 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3485 MGMT_STATUS_NOT_POWERED);
3486 goto unlock;
3487 }
3488
3489 if (!lmp_ssp_capable(hdev)) {
3490 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3491 MGMT_STATUS_NOT_SUPPORTED);
3492 goto unlock;
3493 }
3494
3495 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3496 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3497 MGMT_STATUS_BUSY);
3498 goto unlock;
3499 }
3500
3501 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3502 if (!cmd) {
3503 err = -ENOMEM;
3504 goto unlock;
3505 }
3506
3507 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3508 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3509 0, NULL);
3510 else
3511 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3512
3513 if (err < 0)
3514 mgmt_pending_remove(cmd);
3515
3516 unlock:
3517 hci_dev_unlock(hdev);
3518 return err;
3519 }
3520
3521 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3522 void *data, u16 len)
3523 {
3524 int err;
3525
3526 BT_DBG("%s ", hdev->name);
3527
3528 hci_dev_lock(hdev);
3529
3530 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3531 struct mgmt_cp_add_remote_oob_data *cp = data;
3532 u8 status;
3533
3534 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3535 cp->hash, cp->randomizer);
3536 if (err < 0)
3537 status = MGMT_STATUS_FAILED;
3538 else
3539 status = MGMT_STATUS_SUCCESS;
3540
3541 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3542 status, &cp->addr, sizeof(cp->addr));
3543 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3544 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3545 u8 status;
3546
3547 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3548 cp->hash192,
3549 cp->randomizer192,
3550 cp->hash256,
3551 cp->randomizer256);
3552 if (err < 0)
3553 status = MGMT_STATUS_FAILED;
3554 else
3555 status = MGMT_STATUS_SUCCESS;
3556
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3558 status, &cp->addr, sizeof(cp->addr));
3559 } else {
3560 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3561 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3562 MGMT_STATUS_INVALID_PARAMS);
3563 }
3564
3565 hci_dev_unlock(hdev);
3566 return err;
3567 }
3568
3569 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3570 void *data, u16 len)
3571 {
3572 struct mgmt_cp_remove_remote_oob_data *cp = data;
3573 u8 status;
3574 int err;
3575
3576 BT_DBG("%s", hdev->name);
3577
3578 hci_dev_lock(hdev);
3579
3580 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3581 if (err < 0)
3582 status = MGMT_STATUS_INVALID_PARAMS;
3583 else
3584 status = MGMT_STATUS_SUCCESS;
3585
3586 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3587 status, &cp->addr, sizeof(cp->addr));
3588
3589 hci_dev_unlock(hdev);
3590 return err;
3591 }
3592
3593 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3594 {
3595 struct pending_cmd *cmd;
3596 u8 type;
3597 int err;
3598
3599 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3600
3601 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3602 if (!cmd)
3603 return -ENOENT;
3604
3605 type = hdev->discovery.type;
3606
3607 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3608 &type, sizeof(type));
3609 mgmt_pending_remove(cmd);
3610
3611 return err;
3612 }
3613
3614 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3615 {
3616 unsigned long timeout = 0;
3617
3618 BT_DBG("status %d", status);
3619
3620 if (status) {
3621 hci_dev_lock(hdev);
3622 mgmt_start_discovery_failed(hdev, status);
3623 hci_dev_unlock(hdev);
3624 return;
3625 }
3626
3627 hci_dev_lock(hdev);
3628 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3629 hci_dev_unlock(hdev);
3630
3631 switch (hdev->discovery.type) {
3632 case DISCOV_TYPE_LE:
3633 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3634 break;
3635
3636 case DISCOV_TYPE_INTERLEAVED:
3637 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3638 break;
3639
3640 case DISCOV_TYPE_BREDR:
3641 break;
3642
3643 default:
3644 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3645 }
3646
3647 if (!timeout)
3648 return;
3649
3650 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3651 }
3652
3653 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3654 void *data, u16 len)
3655 {
3656 struct mgmt_cp_start_discovery *cp = data;
3657 struct pending_cmd *cmd;
3658 struct hci_cp_le_set_scan_param param_cp;
3659 struct hci_cp_le_set_scan_enable enable_cp;
3660 struct hci_cp_inquiry inq_cp;
3661 struct hci_request req;
3662 /* General inquiry access code (GIAC) */
3663 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3664 u8 status, own_addr_type;
3665 int err;
3666
3667 BT_DBG("%s", hdev->name);
3668
3669 hci_dev_lock(hdev);
3670
3671 if (!hdev_is_powered(hdev)) {
3672 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3673 MGMT_STATUS_NOT_POWERED);
3674 goto failed;
3675 }
3676
3677 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3678 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3679 MGMT_STATUS_BUSY);
3680 goto failed;
3681 }
3682
3683 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3684 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3685 MGMT_STATUS_BUSY);
3686 goto failed;
3687 }
3688
3689 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3690 if (!cmd) {
3691 err = -ENOMEM;
3692 goto failed;
3693 }
3694
3695 hdev->discovery.type = cp->type;
3696
3697 hci_req_init(&req, hdev);
3698
3699 switch (hdev->discovery.type) {
3700 case DISCOV_TYPE_BREDR:
3701 status = mgmt_bredr_support(hdev);
3702 if (status) {
3703 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3704 status);
3705 mgmt_pending_remove(cmd);
3706 goto failed;
3707 }
3708
3709 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3710 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3711 MGMT_STATUS_BUSY);
3712 mgmt_pending_remove(cmd);
3713 goto failed;
3714 }
3715
3716 hci_inquiry_cache_flush(hdev);
3717
3718 memset(&inq_cp, 0, sizeof(inq_cp));
3719 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3720 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3721 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3722 break;
3723
3724 case DISCOV_TYPE_LE:
3725 case DISCOV_TYPE_INTERLEAVED:
3726 status = mgmt_le_support(hdev);
3727 if (status) {
3728 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3729 status);
3730 mgmt_pending_remove(cmd);
3731 goto failed;
3732 }
3733
3734 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3735 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3736 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3737 MGMT_STATUS_NOT_SUPPORTED);
3738 mgmt_pending_remove(cmd);
3739 goto failed;
3740 }
3741
3742 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3743 /* Don't let discovery abort an outgoing
3744 * connection attempt that's using directed
3745 * advertising.
3746 */
3747 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3748 BT_CONNECT)) {
3749 err = cmd_status(sk, hdev->id,
3750 MGMT_OP_START_DISCOVERY,
3751 MGMT_STATUS_REJECTED);
3752 mgmt_pending_remove(cmd);
3753 goto failed;
3754 }
3755
3756 disable_advertising(&req);
3757 }
3758
3759 /* If controller is scanning, it means the background scanning
3760 * is running. Thus, we should temporarily stop it in order to
3761 * set the discovery scanning parameters.
3762 */
3763 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3764 hci_req_add_le_scan_disable(&req);
3765
3766 memset(&param_cp, 0, sizeof(param_cp));
3767
3768 /* All active scans will be done with either a resolvable
3769 * private address (when privacy feature has been enabled)
3770 * or unresolvable private address.
3771 */
3772 err = hci_update_random_address(&req, true, &own_addr_type);
3773 if (err < 0) {
3774 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3775 MGMT_STATUS_FAILED);
3776 mgmt_pending_remove(cmd);
3777 goto failed;
3778 }
3779
3780 param_cp.type = LE_SCAN_ACTIVE;
3781 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3782 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3783 param_cp.own_address_type = own_addr_type;
3784 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3785 &param_cp);
3786
3787 memset(&enable_cp, 0, sizeof(enable_cp));
3788 enable_cp.enable = LE_SCAN_ENABLE;
3789 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3790 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3791 &enable_cp);
3792 break;
3793
3794 default:
3795 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3796 MGMT_STATUS_INVALID_PARAMS);
3797 mgmt_pending_remove(cmd);
3798 goto failed;
3799 }
3800
3801 err = hci_req_run(&req, start_discovery_complete);
3802 if (err < 0)
3803 mgmt_pending_remove(cmd);
3804 else
3805 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3806
3807 failed:
3808 hci_dev_unlock(hdev);
3809 return err;
3810 }
3811
3812 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3813 {
3814 struct pending_cmd *cmd;
3815 int err;
3816
3817 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3818 if (!cmd)
3819 return -ENOENT;
3820
3821 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3822 &hdev->discovery.type, sizeof(hdev->discovery.type));
3823 mgmt_pending_remove(cmd);
3824
3825 return err;
3826 }
3827
3828 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3829 {
3830 BT_DBG("status %d", status);
3831
3832 hci_dev_lock(hdev);
3833
3834 if (status) {
3835 mgmt_stop_discovery_failed(hdev, status);
3836 goto unlock;
3837 }
3838
3839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3840
3841 unlock:
3842 hci_dev_unlock(hdev);
3843 }
3844
3845 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3846 u16 len)
3847 {
3848 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3849 struct pending_cmd *cmd;
3850 struct hci_request req;
3851 int err;
3852
3853 BT_DBG("%s", hdev->name);
3854
3855 hci_dev_lock(hdev);
3856
3857 if (!hci_discovery_active(hdev)) {
3858 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3859 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3860 sizeof(mgmt_cp->type));
3861 goto unlock;
3862 }
3863
3864 if (hdev->discovery.type != mgmt_cp->type) {
3865 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3866 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3867 sizeof(mgmt_cp->type));
3868 goto unlock;
3869 }
3870
3871 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3872 if (!cmd) {
3873 err = -ENOMEM;
3874 goto unlock;
3875 }
3876
3877 hci_req_init(&req, hdev);
3878
3879 hci_stop_discovery(&req);
3880
3881 err = hci_req_run(&req, stop_discovery_complete);
3882 if (!err) {
3883 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3884 goto unlock;
3885 }
3886
3887 mgmt_pending_remove(cmd);
3888
3889 /* If no HCI commands were sent we're done */
3890 if (err == -ENODATA) {
3891 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3892 &mgmt_cp->type, sizeof(mgmt_cp->type));
3893 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3894 }
3895
3896 unlock:
3897 hci_dev_unlock(hdev);
3898 return err;
3899 }
3900
3901 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3902 u16 len)
3903 {
3904 struct mgmt_cp_confirm_name *cp = data;
3905 struct inquiry_entry *e;
3906 int err;
3907
3908 BT_DBG("%s", hdev->name);
3909
3910 hci_dev_lock(hdev);
3911
3912 if (!hci_discovery_active(hdev)) {
3913 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3914 MGMT_STATUS_FAILED, &cp->addr,
3915 sizeof(cp->addr));
3916 goto failed;
3917 }
3918
3919 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3920 if (!e) {
3921 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3922 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3923 sizeof(cp->addr));
3924 goto failed;
3925 }
3926
3927 if (cp->name_known) {
3928 e->name_state = NAME_KNOWN;
3929 list_del(&e->list);
3930 } else {
3931 e->name_state = NAME_NEEDED;
3932 hci_inquiry_cache_update_resolve(hdev, e);
3933 }
3934
3935 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3936 sizeof(cp->addr));
3937
3938 failed:
3939 hci_dev_unlock(hdev);
3940 return err;
3941 }
3942
3943 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3944 u16 len)
3945 {
3946 struct mgmt_cp_block_device *cp = data;
3947 u8 status;
3948 int err;
3949
3950 BT_DBG("%s", hdev->name);
3951
3952 if (!bdaddr_type_is_valid(cp->addr.type))
3953 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3954 MGMT_STATUS_INVALID_PARAMS,
3955 &cp->addr, sizeof(cp->addr));
3956
3957 hci_dev_lock(hdev);
3958
3959 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
3960 cp->addr.type);
3961 if (err < 0) {
3962 status = MGMT_STATUS_FAILED;
3963 goto done;
3964 }
3965
3966 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3967 sk);
3968 status = MGMT_STATUS_SUCCESS;
3969
3970 done:
3971 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3972 &cp->addr, sizeof(cp->addr));
3973
3974 hci_dev_unlock(hdev);
3975
3976 return err;
3977 }
3978
3979 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3980 u16 len)
3981 {
3982 struct mgmt_cp_unblock_device *cp = data;
3983 u8 status;
3984 int err;
3985
3986 BT_DBG("%s", hdev->name);
3987
3988 if (!bdaddr_type_is_valid(cp->addr.type))
3989 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3990 MGMT_STATUS_INVALID_PARAMS,
3991 &cp->addr, sizeof(cp->addr));
3992
3993 hci_dev_lock(hdev);
3994
3995 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
3996 cp->addr.type);
3997 if (err < 0) {
3998 status = MGMT_STATUS_INVALID_PARAMS;
3999 goto done;
4000 }
4001
4002 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4003 sk);
4004 status = MGMT_STATUS_SUCCESS;
4005
4006 done:
4007 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4008 &cp->addr, sizeof(cp->addr));
4009
4010 hci_dev_unlock(hdev);
4011
4012 return err;
4013 }
4014
4015 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4016 u16 len)
4017 {
4018 struct mgmt_cp_set_device_id *cp = data;
4019 struct hci_request req;
4020 int err;
4021 __u16 source;
4022
4023 BT_DBG("%s", hdev->name);
4024
4025 source = __le16_to_cpu(cp->source);
4026
4027 if (source > 0x0002)
4028 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4029 MGMT_STATUS_INVALID_PARAMS);
4030
4031 hci_dev_lock(hdev);
4032
4033 hdev->devid_source = source;
4034 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4035 hdev->devid_product = __le16_to_cpu(cp->product);
4036 hdev->devid_version = __le16_to_cpu(cp->version);
4037
4038 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4039
4040 hci_req_init(&req, hdev);
4041 update_eir(&req);
4042 hci_req_run(&req, NULL);
4043
4044 hci_dev_unlock(hdev);
4045
4046 return err;
4047 }
4048
4049 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4050 {
4051 struct cmd_lookup match = { NULL, hdev };
4052
4053 if (status) {
4054 u8 mgmt_err = mgmt_status(status);
4055
4056 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4057 cmd_status_rsp, &mgmt_err);
4058 return;
4059 }
4060
4061 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4062 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4063 else
4064 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4065
4066 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4067 &match);
4068
4069 new_settings(hdev, match.sk);
4070
4071 if (match.sk)
4072 sock_put(match.sk);
4073 }
4074
4075 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4076 u16 len)
4077 {
4078 struct mgmt_mode *cp = data;
4079 struct pending_cmd *cmd;
4080 struct hci_request req;
4081 u8 val, enabled, status;
4082 int err;
4083
4084 BT_DBG("request for %s", hdev->name);
4085
4086 status = mgmt_le_support(hdev);
4087 if (status)
4088 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4089 status);
4090
4091 if (cp->val != 0x00 && cp->val != 0x01)
4092 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4093 MGMT_STATUS_INVALID_PARAMS);
4094
4095 hci_dev_lock(hdev);
4096
4097 val = !!cp->val;
4098 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4099
4100 /* The following conditions are ones which mean that we should
4101 * not do any HCI communication but directly send a mgmt
4102 * response to user space (after toggling the flag if
4103 * necessary).
4104 */
4105 if (!hdev_is_powered(hdev) || val == enabled ||
4106 hci_conn_num(hdev, LE_LINK) > 0 ||
4107 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4108 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4109 bool changed = false;
4110
4111 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4112 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4113 changed = true;
4114 }
4115
4116 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4117 if (err < 0)
4118 goto unlock;
4119
4120 if (changed)
4121 err = new_settings(hdev, sk);
4122
4123 goto unlock;
4124 }
4125
4126 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4127 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4128 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4129 MGMT_STATUS_BUSY);
4130 goto unlock;
4131 }
4132
4133 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4134 if (!cmd) {
4135 err = -ENOMEM;
4136 goto unlock;
4137 }
4138
4139 hci_req_init(&req, hdev);
4140
4141 if (val)
4142 enable_advertising(&req);
4143 else
4144 disable_advertising(&req);
4145
4146 err = hci_req_run(&req, set_advertising_complete);
4147 if (err < 0)
4148 mgmt_pending_remove(cmd);
4149
4150 unlock:
4151 hci_dev_unlock(hdev);
4152 return err;
4153 }
4154
4155 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4156 void *data, u16 len)
4157 {
4158 struct mgmt_cp_set_static_address *cp = data;
4159 int err;
4160
4161 BT_DBG("%s", hdev->name);
4162
4163 if (!lmp_le_capable(hdev))
4164 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4165 MGMT_STATUS_NOT_SUPPORTED);
4166
4167 if (hdev_is_powered(hdev))
4168 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4169 MGMT_STATUS_REJECTED);
4170
4171 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4172 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4173 return cmd_status(sk, hdev->id,
4174 MGMT_OP_SET_STATIC_ADDRESS,
4175 MGMT_STATUS_INVALID_PARAMS);
4176
4177 /* Two most significant bits shall be set */
4178 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4179 return cmd_status(sk, hdev->id,
4180 MGMT_OP_SET_STATIC_ADDRESS,
4181 MGMT_STATUS_INVALID_PARAMS);
4182 }
4183
4184 hci_dev_lock(hdev);
4185
4186 bacpy(&hdev->static_addr, &cp->bdaddr);
4187
4188 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4189
4190 hci_dev_unlock(hdev);
4191
4192 return err;
4193 }
4194
4195 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4196 void *data, u16 len)
4197 {
4198 struct mgmt_cp_set_scan_params *cp = data;
4199 __u16 interval, window;
4200 int err;
4201
4202 BT_DBG("%s", hdev->name);
4203
4204 if (!lmp_le_capable(hdev))
4205 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4206 MGMT_STATUS_NOT_SUPPORTED);
4207
4208 interval = __le16_to_cpu(cp->interval);
4209
4210 if (interval < 0x0004 || interval > 0x4000)
4211 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4212 MGMT_STATUS_INVALID_PARAMS);
4213
4214 window = __le16_to_cpu(cp->window);
4215
4216 if (window < 0x0004 || window > 0x4000)
4217 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4218 MGMT_STATUS_INVALID_PARAMS);
4219
4220 if (window > interval)
4221 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4222 MGMT_STATUS_INVALID_PARAMS);
4223
4224 hci_dev_lock(hdev);
4225
4226 hdev->le_scan_interval = interval;
4227 hdev->le_scan_window = window;
4228
4229 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4230
4231 /* If background scan is running, restart it so new parameters are
4232 * loaded.
4233 */
4234 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4235 hdev->discovery.state == DISCOVERY_STOPPED) {
4236 struct hci_request req;
4237
4238 hci_req_init(&req, hdev);
4239
4240 hci_req_add_le_scan_disable(&req);
4241 hci_req_add_le_passive_scan(&req);
4242
4243 hci_req_run(&req, NULL);
4244 }
4245
4246 hci_dev_unlock(hdev);
4247
4248 return err;
4249 }
4250
4251 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4252 {
4253 struct pending_cmd *cmd;
4254
4255 BT_DBG("status 0x%02x", status);
4256
4257 hci_dev_lock(hdev);
4258
4259 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4260 if (!cmd)
4261 goto unlock;
4262
4263 if (status) {
4264 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4265 mgmt_status(status));
4266 } else {
4267 struct mgmt_mode *cp = cmd->param;
4268
4269 if (cp->val)
4270 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4271 else
4272 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4273
4274 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4275 new_settings(hdev, cmd->sk);
4276 }
4277
4278 mgmt_pending_remove(cmd);
4279
4280 unlock:
4281 hci_dev_unlock(hdev);
4282 }
4283
4284 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4285 void *data, u16 len)
4286 {
4287 struct mgmt_mode *cp = data;
4288 struct pending_cmd *cmd;
4289 struct hci_request req;
4290 int err;
4291
4292 BT_DBG("%s", hdev->name);
4293
4294 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4295 hdev->hci_ver < BLUETOOTH_VER_1_2)
4296 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4297 MGMT_STATUS_NOT_SUPPORTED);
4298
4299 if (cp->val != 0x00 && cp->val != 0x01)
4300 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4301 MGMT_STATUS_INVALID_PARAMS);
4302
4303 if (!hdev_is_powered(hdev))
4304 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4305 MGMT_STATUS_NOT_POWERED);
4306
4307 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4308 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4309 MGMT_STATUS_REJECTED);
4310
4311 hci_dev_lock(hdev);
4312
4313 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4314 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4315 MGMT_STATUS_BUSY);
4316 goto unlock;
4317 }
4318
4319 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4320 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4321 hdev);
4322 goto unlock;
4323 }
4324
4325 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4326 data, len);
4327 if (!cmd) {
4328 err = -ENOMEM;
4329 goto unlock;
4330 }
4331
4332 hci_req_init(&req, hdev);
4333
4334 write_fast_connectable(&req, cp->val);
4335
4336 err = hci_req_run(&req, fast_connectable_complete);
4337 if (err < 0) {
4338 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4339 MGMT_STATUS_FAILED);
4340 mgmt_pending_remove(cmd);
4341 }
4342
4343 unlock:
4344 hci_dev_unlock(hdev);
4345
4346 return err;
4347 }
4348
4349 static void set_bredr_scan(struct hci_request *req)
4350 {
4351 struct hci_dev *hdev = req->hdev;
4352 u8 scan = 0;
4353
4354 /* Ensure that fast connectable is disabled. This function will
4355 * not do anything if the page scan parameters are already what
4356 * they should be.
4357 */
4358 write_fast_connectable(req, false);
4359
4360 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4361 scan |= SCAN_PAGE;
4362 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4363 scan |= SCAN_INQUIRY;
4364
4365 if (scan)
4366 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4367 }
4368
4369 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4370 {
4371 struct pending_cmd *cmd;
4372
4373 BT_DBG("status 0x%02x", status);
4374
4375 hci_dev_lock(hdev);
4376
4377 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4378 if (!cmd)
4379 goto unlock;
4380
4381 if (status) {
4382 u8 mgmt_err = mgmt_status(status);
4383
4384 /* We need to restore the flag if related HCI commands
4385 * failed.
4386 */
4387 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4388
4389 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4390 } else {
4391 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4392 new_settings(hdev, cmd->sk);
4393 }
4394
4395 mgmt_pending_remove(cmd);
4396
4397 unlock:
4398 hci_dev_unlock(hdev);
4399 }
4400
4401 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4402 {
4403 struct mgmt_mode *cp = data;
4404 struct pending_cmd *cmd;
4405 struct hci_request req;
4406 int err;
4407
4408 BT_DBG("request for %s", hdev->name);
4409
4410 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4411 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4412 MGMT_STATUS_NOT_SUPPORTED);
4413
4414 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4415 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4416 MGMT_STATUS_REJECTED);
4417
4418 if (cp->val != 0x00 && cp->val != 0x01)
4419 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4420 MGMT_STATUS_INVALID_PARAMS);
4421
4422 hci_dev_lock(hdev);
4423
4424 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4425 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4426 goto unlock;
4427 }
4428
4429 if (!hdev_is_powered(hdev)) {
4430 if (!cp->val) {
4431 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4432 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4433 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4434 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4435 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4436 }
4437
4438 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4439
4440 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4441 if (err < 0)
4442 goto unlock;
4443
4444 err = new_settings(hdev, sk);
4445 goto unlock;
4446 }
4447
4448 /* Reject disabling when powered on */
4449 if (!cp->val) {
4450 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4451 MGMT_STATUS_REJECTED);
4452 goto unlock;
4453 }
4454
4455 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4456 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4457 MGMT_STATUS_BUSY);
4458 goto unlock;
4459 }
4460
4461 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4462 if (!cmd) {
4463 err = -ENOMEM;
4464 goto unlock;
4465 }
4466
4467 /* We need to flip the bit already here so that update_adv_data
4468 * generates the correct flags.
4469 */
4470 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4471
4472 hci_req_init(&req, hdev);
4473
4474 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4475 set_bredr_scan(&req);
4476
4477 /* Since only the advertising data flags will change, there
4478 * is no need to update the scan response data.
4479 */
4480 update_adv_data(&req);
4481
4482 err = hci_req_run(&req, set_bredr_complete);
4483 if (err < 0)
4484 mgmt_pending_remove(cmd);
4485
4486 unlock:
4487 hci_dev_unlock(hdev);
4488 return err;
4489 }
4490
4491 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4492 void *data, u16 len)
4493 {
4494 struct mgmt_mode *cp = data;
4495 struct pending_cmd *cmd;
4496 u8 val, status;
4497 int err;
4498
4499 BT_DBG("request for %s", hdev->name);
4500
4501 status = mgmt_bredr_support(hdev);
4502 if (status)
4503 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4504 status);
4505
4506 if (!lmp_sc_capable(hdev) &&
4507 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4508 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4509 MGMT_STATUS_NOT_SUPPORTED);
4510
4511 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4512 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4513 MGMT_STATUS_INVALID_PARAMS);
4514
4515 hci_dev_lock(hdev);
4516
4517 if (!hdev_is_powered(hdev)) {
4518 bool changed;
4519
4520 if (cp->val) {
4521 changed = !test_and_set_bit(HCI_SC_ENABLED,
4522 &hdev->dev_flags);
4523 if (cp->val == 0x02)
4524 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4525 else
4526 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4527 } else {
4528 changed = test_and_clear_bit(HCI_SC_ENABLED,
4529 &hdev->dev_flags);
4530 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4531 }
4532
4533 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4534 if (err < 0)
4535 goto failed;
4536
4537 if (changed)
4538 err = new_settings(hdev, sk);
4539
4540 goto failed;
4541 }
4542
4543 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4544 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4545 MGMT_STATUS_BUSY);
4546 goto failed;
4547 }
4548
4549 val = !!cp->val;
4550
4551 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4552 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4553 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4554 goto failed;
4555 }
4556
4557 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4558 if (!cmd) {
4559 err = -ENOMEM;
4560 goto failed;
4561 }
4562
4563 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4564 if (err < 0) {
4565 mgmt_pending_remove(cmd);
4566 goto failed;
4567 }
4568
4569 if (cp->val == 0x02)
4570 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4571 else
4572 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4573
4574 failed:
4575 hci_dev_unlock(hdev);
4576 return err;
4577 }
4578
4579 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4580 void *data, u16 len)
4581 {
4582 struct mgmt_mode *cp = data;
4583 bool changed, use_changed;
4584 int err;
4585
4586 BT_DBG("request for %s", hdev->name);
4587
4588 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4589 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4590 MGMT_STATUS_INVALID_PARAMS);
4591
4592 hci_dev_lock(hdev);
4593
4594 if (cp->val)
4595 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4596 &hdev->dev_flags);
4597 else
4598 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4599 &hdev->dev_flags);
4600
4601 if (cp->val == 0x02)
4602 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4603 &hdev->dev_flags);
4604 else
4605 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4606 &hdev->dev_flags);
4607
4608 if (hdev_is_powered(hdev) && use_changed &&
4609 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4610 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4611 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4612 sizeof(mode), &mode);
4613 }
4614
4615 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4616 if (err < 0)
4617 goto unlock;
4618
4619 if (changed)
4620 err = new_settings(hdev, sk);
4621
4622 unlock:
4623 hci_dev_unlock(hdev);
4624 return err;
4625 }
4626
4627 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4628 u16 len)
4629 {
4630 struct mgmt_cp_set_privacy *cp = cp_data;
4631 bool changed;
4632 int err;
4633
4634 BT_DBG("request for %s", hdev->name);
4635
4636 if (!lmp_le_capable(hdev))
4637 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4638 MGMT_STATUS_NOT_SUPPORTED);
4639
4640 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4641 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4642 MGMT_STATUS_INVALID_PARAMS);
4643
4644 if (hdev_is_powered(hdev))
4645 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4646 MGMT_STATUS_REJECTED);
4647
4648 hci_dev_lock(hdev);
4649
4650 /* If user space supports this command it is also expected to
4651 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4652 */
4653 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4654
4655 if (cp->privacy) {
4656 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4657 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4658 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4659 } else {
4660 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4661 memset(hdev->irk, 0, sizeof(hdev->irk));
4662 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4663 }
4664
4665 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4666 if (err < 0)
4667 goto unlock;
4668
4669 if (changed)
4670 err = new_settings(hdev, sk);
4671
4672 unlock:
4673 hci_dev_unlock(hdev);
4674 return err;
4675 }
4676
4677 static bool irk_is_valid(struct mgmt_irk_info *irk)
4678 {
4679 switch (irk->addr.type) {
4680 case BDADDR_LE_PUBLIC:
4681 return true;
4682
4683 case BDADDR_LE_RANDOM:
4684 /* Two most significant bits shall be set */
4685 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4686 return false;
4687 return true;
4688 }
4689
4690 return false;
4691 }
4692
4693 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4694 u16 len)
4695 {
4696 struct mgmt_cp_load_irks *cp = cp_data;
4697 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4698 sizeof(struct mgmt_irk_info));
4699 u16 irk_count, expected_len;
4700 int i, err;
4701
4702 BT_DBG("request for %s", hdev->name);
4703
4704 if (!lmp_le_capable(hdev))
4705 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4706 MGMT_STATUS_NOT_SUPPORTED);
4707
4708 irk_count = __le16_to_cpu(cp->irk_count);
4709 if (irk_count > max_irk_count) {
4710 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4711 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4712 MGMT_STATUS_INVALID_PARAMS);
4713 }
4714
4715 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4716 if (expected_len != len) {
4717 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4718 expected_len, len);
4719 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4720 MGMT_STATUS_INVALID_PARAMS);
4721 }
4722
4723 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4724
4725 for (i = 0; i < irk_count; i++) {
4726 struct mgmt_irk_info *key = &cp->irks[i];
4727
4728 if (!irk_is_valid(key))
4729 return cmd_status(sk, hdev->id,
4730 MGMT_OP_LOAD_IRKS,
4731 MGMT_STATUS_INVALID_PARAMS);
4732 }
4733
4734 hci_dev_lock(hdev);
4735
4736 hci_smp_irks_clear(hdev);
4737
4738 for (i = 0; i < irk_count; i++) {
4739 struct mgmt_irk_info *irk = &cp->irks[i];
4740 u8 addr_type;
4741
4742 if (irk->addr.type == BDADDR_LE_PUBLIC)
4743 addr_type = ADDR_LE_DEV_PUBLIC;
4744 else
4745 addr_type = ADDR_LE_DEV_RANDOM;
4746
4747 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4748 BDADDR_ANY);
4749 }
4750
4751 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4752
4753 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4754
4755 hci_dev_unlock(hdev);
4756
4757 return err;
4758 }
4759
4760 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4761 {
4762 if (key->master != 0x00 && key->master != 0x01)
4763 return false;
4764
4765 switch (key->addr.type) {
4766 case BDADDR_LE_PUBLIC:
4767 return true;
4768
4769 case BDADDR_LE_RANDOM:
4770 /* Two most significant bits shall be set */
4771 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4772 return false;
4773 return true;
4774 }
4775
4776 return false;
4777 }
4778
4779 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4780 void *cp_data, u16 len)
4781 {
4782 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4783 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4784 sizeof(struct mgmt_ltk_info));
4785 u16 key_count, expected_len;
4786 int i, err;
4787
4788 BT_DBG("request for %s", hdev->name);
4789
4790 if (!lmp_le_capable(hdev))
4791 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4792 MGMT_STATUS_NOT_SUPPORTED);
4793
4794 key_count = __le16_to_cpu(cp->key_count);
4795 if (key_count > max_key_count) {
4796 BT_ERR("load_ltks: too big key_count value %u", key_count);
4797 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4798 MGMT_STATUS_INVALID_PARAMS);
4799 }
4800
4801 expected_len = sizeof(*cp) + key_count *
4802 sizeof(struct mgmt_ltk_info);
4803 if (expected_len != len) {
4804 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4805 expected_len, len);
4806 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4807 MGMT_STATUS_INVALID_PARAMS);
4808 }
4809
4810 BT_DBG("%s key_count %u", hdev->name, key_count);
4811
4812 for (i = 0; i < key_count; i++) {
4813 struct mgmt_ltk_info *key = &cp->keys[i];
4814
4815 if (!ltk_is_valid(key))
4816 return cmd_status(sk, hdev->id,
4817 MGMT_OP_LOAD_LONG_TERM_KEYS,
4818 MGMT_STATUS_INVALID_PARAMS);
4819 }
4820
4821 hci_dev_lock(hdev);
4822
4823 hci_smp_ltks_clear(hdev);
4824
4825 for (i = 0; i < key_count; i++) {
4826 struct mgmt_ltk_info *key = &cp->keys[i];
4827 u8 type, addr_type, authenticated;
4828
4829 if (key->addr.type == BDADDR_LE_PUBLIC)
4830 addr_type = ADDR_LE_DEV_PUBLIC;
4831 else
4832 addr_type = ADDR_LE_DEV_RANDOM;
4833
4834 if (key->master)
4835 type = SMP_LTK;
4836 else
4837 type = SMP_LTK_SLAVE;
4838
4839 switch (key->type) {
4840 case MGMT_LTK_UNAUTHENTICATED:
4841 authenticated = 0x00;
4842 break;
4843 case MGMT_LTK_AUTHENTICATED:
4844 authenticated = 0x01;
4845 break;
4846 default:
4847 continue;
4848 }
4849
4850 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4851 authenticated, key->val, key->enc_size, key->ediv,
4852 key->rand);
4853 }
4854
4855 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4856 NULL, 0);
4857
4858 hci_dev_unlock(hdev);
4859
4860 return err;
4861 }
4862
4863 struct cmd_conn_lookup {
4864 struct hci_conn *conn;
4865 bool valid_tx_power;
4866 u8 mgmt_status;
4867 };
4868
4869 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4870 {
4871 struct cmd_conn_lookup *match = data;
4872 struct mgmt_cp_get_conn_info *cp;
4873 struct mgmt_rp_get_conn_info rp;
4874 struct hci_conn *conn = cmd->user_data;
4875
4876 if (conn != match->conn)
4877 return;
4878
4879 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4880
4881 memset(&rp, 0, sizeof(rp));
4882 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4883 rp.addr.type = cp->addr.type;
4884
4885 if (!match->mgmt_status) {
4886 rp.rssi = conn->rssi;
4887
4888 if (match->valid_tx_power) {
4889 rp.tx_power = conn->tx_power;
4890 rp.max_tx_power = conn->max_tx_power;
4891 } else {
4892 rp.tx_power = HCI_TX_POWER_INVALID;
4893 rp.max_tx_power = HCI_TX_POWER_INVALID;
4894 }
4895 }
4896
4897 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4898 match->mgmt_status, &rp, sizeof(rp));
4899
4900 hci_conn_drop(conn);
4901
4902 mgmt_pending_remove(cmd);
4903 }
4904
4905 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4906 {
4907 struct hci_cp_read_rssi *cp;
4908 struct hci_conn *conn;
4909 struct cmd_conn_lookup match;
4910 u16 handle;
4911
4912 BT_DBG("status 0x%02x", status);
4913
4914 hci_dev_lock(hdev);
4915
4916 /* TX power data is valid in case request completed successfully,
4917 * otherwise we assume it's not valid. At the moment we assume that
4918 * either both or none of current and max values are valid to keep code
4919 * simple.
4920 */
4921 match.valid_tx_power = !status;
4922
4923 /* Commands sent in request are either Read RSSI or Read Transmit Power
4924 * Level so we check which one was last sent to retrieve connection
4925 * handle. Both commands have handle as first parameter so it's safe to
4926 * cast data on the same command struct.
4927 *
4928 * First command sent is always Read RSSI and we fail only if it fails.
4929 * In other case we simply override error to indicate success as we
4930 * already remembered if TX power value is actually valid.
4931 */
4932 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4933 if (!cp) {
4934 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4935 status = 0;
4936 }
4937
4938 if (!cp) {
4939 BT_ERR("invalid sent_cmd in response");
4940 goto unlock;
4941 }
4942
4943 handle = __le16_to_cpu(cp->handle);
4944 conn = hci_conn_hash_lookup_handle(hdev, handle);
4945 if (!conn) {
4946 BT_ERR("unknown handle (%d) in response", handle);
4947 goto unlock;
4948 }
4949
4950 match.conn = conn;
4951 match.mgmt_status = mgmt_status(status);
4952
4953 /* Cache refresh is complete, now reply for mgmt request for given
4954 * connection only.
4955 */
4956 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4957 get_conn_info_complete, &match);
4958
4959 unlock:
4960 hci_dev_unlock(hdev);
4961 }
4962
4963 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4964 u16 len)
4965 {
4966 struct mgmt_cp_get_conn_info *cp = data;
4967 struct mgmt_rp_get_conn_info rp;
4968 struct hci_conn *conn;
4969 unsigned long conn_info_age;
4970 int err = 0;
4971
4972 BT_DBG("%s", hdev->name);
4973
4974 memset(&rp, 0, sizeof(rp));
4975 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4976 rp.addr.type = cp->addr.type;
4977
4978 if (!bdaddr_type_is_valid(cp->addr.type))
4979 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4980 MGMT_STATUS_INVALID_PARAMS,
4981 &rp, sizeof(rp));
4982
4983 hci_dev_lock(hdev);
4984
4985 if (!hdev_is_powered(hdev)) {
4986 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4987 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4988 goto unlock;
4989 }
4990
4991 if (cp->addr.type == BDADDR_BREDR)
4992 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4993 &cp->addr.bdaddr);
4994 else
4995 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4996
4997 if (!conn || conn->state != BT_CONNECTED) {
4998 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4999 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5000 goto unlock;
5001 }
5002
5003 /* To avoid client trying to guess when to poll again for information we
5004 * calculate conn info age as random value between min/max set in hdev.
5005 */
5006 conn_info_age = hdev->conn_info_min_age +
5007 prandom_u32_max(hdev->conn_info_max_age -
5008 hdev->conn_info_min_age);
5009
5010 /* Query controller to refresh cached values if they are too old or were
5011 * never read.
5012 */
5013 if (time_after(jiffies, conn->conn_info_timestamp +
5014 msecs_to_jiffies(conn_info_age)) ||
5015 !conn->conn_info_timestamp) {
5016 struct hci_request req;
5017 struct hci_cp_read_tx_power req_txp_cp;
5018 struct hci_cp_read_rssi req_rssi_cp;
5019 struct pending_cmd *cmd;
5020
5021 hci_req_init(&req, hdev);
5022 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5023 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5024 &req_rssi_cp);
5025
5026 /* For LE links TX power does not change thus we don't need to
5027 * query for it once value is known.
5028 */
5029 if (!bdaddr_type_is_le(cp->addr.type) ||
5030 conn->tx_power == HCI_TX_POWER_INVALID) {
5031 req_txp_cp.handle = cpu_to_le16(conn->handle);
5032 req_txp_cp.type = 0x00;
5033 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5034 sizeof(req_txp_cp), &req_txp_cp);
5035 }
5036
5037 /* Max TX power needs to be read only once per connection */
5038 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5039 req_txp_cp.handle = cpu_to_le16(conn->handle);
5040 req_txp_cp.type = 0x01;
5041 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5042 sizeof(req_txp_cp), &req_txp_cp);
5043 }
5044
5045 err = hci_req_run(&req, conn_info_refresh_complete);
5046 if (err < 0)
5047 goto unlock;
5048
5049 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5050 data, len);
5051 if (!cmd) {
5052 err = -ENOMEM;
5053 goto unlock;
5054 }
5055
5056 hci_conn_hold(conn);
5057 cmd->user_data = conn;
5058
5059 conn->conn_info_timestamp = jiffies;
5060 } else {
5061 /* Cache is valid, just reply with values cached in hci_conn */
5062 rp.rssi = conn->rssi;
5063 rp.tx_power = conn->tx_power;
5064 rp.max_tx_power = conn->max_tx_power;
5065
5066 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5067 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5068 }
5069
5070 unlock:
5071 hci_dev_unlock(hdev);
5072 return err;
5073 }
5074
5075 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5076 {
5077 struct mgmt_cp_get_clock_info *cp;
5078 struct mgmt_rp_get_clock_info rp;
5079 struct hci_cp_read_clock *hci_cp;
5080 struct pending_cmd *cmd;
5081 struct hci_conn *conn;
5082
5083 BT_DBG("%s status %u", hdev->name, status);
5084
5085 hci_dev_lock(hdev);
5086
5087 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5088 if (!hci_cp)
5089 goto unlock;
5090
5091 if (hci_cp->which) {
5092 u16 handle = __le16_to_cpu(hci_cp->handle);
5093 conn = hci_conn_hash_lookup_handle(hdev, handle);
5094 } else {
5095 conn = NULL;
5096 }
5097
5098 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5099 if (!cmd)
5100 goto unlock;
5101
5102 cp = cmd->param;
5103
5104 memset(&rp, 0, sizeof(rp));
5105 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5106
5107 if (status)
5108 goto send_rsp;
5109
5110 rp.local_clock = cpu_to_le32(hdev->clock);
5111
5112 if (conn) {
5113 rp.piconet_clock = cpu_to_le32(conn->clock);
5114 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5115 }
5116
5117 send_rsp:
5118 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5119 &rp, sizeof(rp));
5120 mgmt_pending_remove(cmd);
5121 if (conn)
5122 hci_conn_drop(conn);
5123
5124 unlock:
5125 hci_dev_unlock(hdev);
5126 }
5127
5128 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5129 u16 len)
5130 {
5131 struct mgmt_cp_get_clock_info *cp = data;
5132 struct mgmt_rp_get_clock_info rp;
5133 struct hci_cp_read_clock hci_cp;
5134 struct pending_cmd *cmd;
5135 struct hci_request req;
5136 struct hci_conn *conn;
5137 int err;
5138
5139 BT_DBG("%s", hdev->name);
5140
5141 memset(&rp, 0, sizeof(rp));
5142 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5143 rp.addr.type = cp->addr.type;
5144
5145 if (cp->addr.type != BDADDR_BREDR)
5146 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5147 MGMT_STATUS_INVALID_PARAMS,
5148 &rp, sizeof(rp));
5149
5150 hci_dev_lock(hdev);
5151
5152 if (!hdev_is_powered(hdev)) {
5153 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5154 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5155 goto unlock;
5156 }
5157
5158 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5160 &cp->addr.bdaddr);
5161 if (!conn || conn->state != BT_CONNECTED) {
5162 err = cmd_complete(sk, hdev->id,
5163 MGMT_OP_GET_CLOCK_INFO,
5164 MGMT_STATUS_NOT_CONNECTED,
5165 &rp, sizeof(rp));
5166 goto unlock;
5167 }
5168 } else {
5169 conn = NULL;
5170 }
5171
5172 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5173 if (!cmd) {
5174 err = -ENOMEM;
5175 goto unlock;
5176 }
5177
5178 hci_req_init(&req, hdev);
5179
5180 memset(&hci_cp, 0, sizeof(hci_cp));
5181 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5182
5183 if (conn) {
5184 hci_conn_hold(conn);
5185 cmd->user_data = conn;
5186
5187 hci_cp.handle = cpu_to_le16(conn->handle);
5188 hci_cp.which = 0x01; /* Piconet clock */
5189 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5190 }
5191
5192 err = hci_req_run(&req, get_clock_info_complete);
5193 if (err < 0)
5194 mgmt_pending_remove(cmd);
5195
5196 unlock:
5197 hci_dev_unlock(hdev);
5198 return err;
5199 }
5200
5201 static void device_added(struct sock *sk, struct hci_dev *hdev,
5202 bdaddr_t *bdaddr, u8 type, u8 action)
5203 {
5204 struct mgmt_ev_device_added ev;
5205
5206 bacpy(&ev.addr.bdaddr, bdaddr);
5207 ev.addr.type = type;
5208 ev.action = action;
5209
5210 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5211 }
5212
5213 static int add_device(struct sock *sk, struct hci_dev *hdev,
5214 void *data, u16 len)
5215 {
5216 struct mgmt_cp_add_device *cp = data;
5217 u8 auto_conn, addr_type;
5218 int err;
5219
5220 BT_DBG("%s", hdev->name);
5221
5222 if (!bdaddr_type_is_le(cp->addr.type) ||
5223 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5224 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5225 MGMT_STATUS_INVALID_PARAMS,
5226 &cp->addr, sizeof(cp->addr));
5227
5228 if (cp->action != 0x00 && cp->action != 0x01)
5229 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5230 MGMT_STATUS_INVALID_PARAMS,
5231 &cp->addr, sizeof(cp->addr));
5232
5233 hci_dev_lock(hdev);
5234
5235 if (cp->addr.type == BDADDR_LE_PUBLIC)
5236 addr_type = ADDR_LE_DEV_PUBLIC;
5237 else
5238 addr_type = ADDR_LE_DEV_RANDOM;
5239
5240 if (cp->action)
5241 auto_conn = HCI_AUTO_CONN_ALWAYS;
5242 else
5243 auto_conn = HCI_AUTO_CONN_REPORT;
5244
5245 /* If the connection parameters don't exist for this device,
5246 * they will be created and configured with defaults.
5247 */
5248 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5249 auto_conn) < 0) {
5250 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5251 MGMT_STATUS_FAILED,
5252 &cp->addr, sizeof(cp->addr));
5253 goto unlock;
5254 }
5255
5256 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5257
5258 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5259 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5260
5261 unlock:
5262 hci_dev_unlock(hdev);
5263 return err;
5264 }
5265
5266 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5267 bdaddr_t *bdaddr, u8 type)
5268 {
5269 struct mgmt_ev_device_removed ev;
5270
5271 bacpy(&ev.addr.bdaddr, bdaddr);
5272 ev.addr.type = type;
5273
5274 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5275 }
5276
5277 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5278 void *data, u16 len)
5279 {
5280 struct mgmt_cp_remove_device *cp = data;
5281 int err;
5282
5283 BT_DBG("%s", hdev->name);
5284
5285 hci_dev_lock(hdev);
5286
5287 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5288 struct hci_conn_params *params;
5289 u8 addr_type;
5290
5291 if (!bdaddr_type_is_le(cp->addr.type)) {
5292 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5293 MGMT_STATUS_INVALID_PARAMS,
5294 &cp->addr, sizeof(cp->addr));
5295 goto unlock;
5296 }
5297
5298 if (cp->addr.type == BDADDR_LE_PUBLIC)
5299 addr_type = ADDR_LE_DEV_PUBLIC;
5300 else
5301 addr_type = ADDR_LE_DEV_RANDOM;
5302
5303 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5304 addr_type);
5305 if (!params) {
5306 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5307 MGMT_STATUS_INVALID_PARAMS,
5308 &cp->addr, sizeof(cp->addr));
5309 goto unlock;
5310 }
5311
5312 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5313 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5314 MGMT_STATUS_INVALID_PARAMS,
5315 &cp->addr, sizeof(cp->addr));
5316 goto unlock;
5317 }
5318
5319 list_del(&params->action);
5320 list_del(&params->list);
5321 kfree(params);
5322 hci_update_background_scan(hdev);
5323
5324 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5325 } else {
5326 struct hci_conn_params *p, *tmp;
5327
5328 if (cp->addr.type) {
5329 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5330 MGMT_STATUS_INVALID_PARAMS,
5331 &cp->addr, sizeof(cp->addr));
5332 goto unlock;
5333 }
5334
5335 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5336 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5337 continue;
5338 device_removed(sk, hdev, &p->addr, p->addr_type);
5339 list_del(&p->action);
5340 list_del(&p->list);
5341 kfree(p);
5342 }
5343
5344 BT_DBG("All LE connection parameters were removed");
5345
5346 hci_update_background_scan(hdev);
5347 }
5348
5349 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5350 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5351
5352 unlock:
5353 hci_dev_unlock(hdev);
5354 return err;
5355 }
5356
5357 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5358 u16 len)
5359 {
5360 struct mgmt_cp_load_conn_param *cp = data;
5361 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5362 sizeof(struct mgmt_conn_param));
5363 u16 param_count, expected_len;
5364 int i;
5365
5366 if (!lmp_le_capable(hdev))
5367 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5368 MGMT_STATUS_NOT_SUPPORTED);
5369
5370 param_count = __le16_to_cpu(cp->param_count);
5371 if (param_count > max_param_count) {
5372 BT_ERR("load_conn_param: too big param_count value %u",
5373 param_count);
5374 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5375 MGMT_STATUS_INVALID_PARAMS);
5376 }
5377
5378 expected_len = sizeof(*cp) + param_count *
5379 sizeof(struct mgmt_conn_param);
5380 if (expected_len != len) {
5381 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5382 expected_len, len);
5383 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5384 MGMT_STATUS_INVALID_PARAMS);
5385 }
5386
5387 BT_DBG("%s param_count %u", hdev->name, param_count);
5388
5389 hci_dev_lock(hdev);
5390
5391 hci_conn_params_clear_disabled(hdev);
5392
5393 for (i = 0; i < param_count; i++) {
5394 struct mgmt_conn_param *param = &cp->params[i];
5395 struct hci_conn_params *hci_param;
5396 u16 min, max, latency, timeout;
5397 u8 addr_type;
5398
5399 BT_DBG("Adding %pMR (type %u)", &param->addr.bdaddr,
5400 param->addr.type);
5401
5402 if (param->addr.type == BDADDR_LE_PUBLIC) {
5403 addr_type = ADDR_LE_DEV_PUBLIC;
5404 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5405 addr_type = ADDR_LE_DEV_RANDOM;
5406 } else {
5407 BT_ERR("Ignoring invalid connection parameters");
5408 continue;
5409 }
5410
5411 min = le16_to_cpu(param->min_interval);
5412 max = le16_to_cpu(param->max_interval);
5413 latency = le16_to_cpu(param->latency);
5414 timeout = le16_to_cpu(param->timeout);
5415
5416 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5417 min, max, latency, timeout);
5418
5419 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5420 BT_ERR("Ignoring invalid connection parameters");
5421 continue;
5422 }
5423
5424 hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
5425 addr_type);
5426 if (!hci_param) {
5427 BT_ERR("Failed to add connection parameters");
5428 continue;
5429 }
5430
5431 hci_param->conn_min_interval = min;
5432 hci_param->conn_max_interval = max;
5433 hci_param->conn_latency = latency;
5434 hci_param->supervision_timeout = timeout;
5435 }
5436
5437 hci_dev_unlock(hdev);
5438
5439 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5440 }
5441
5442 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5443 void *data, u16 len)
5444 {
5445 struct mgmt_cp_set_external_config *cp = data;
5446 bool changed;
5447 int err;
5448
5449 BT_DBG("%s", hdev->name);
5450
5451 if (hdev_is_powered(hdev))
5452 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5453 MGMT_STATUS_REJECTED);
5454
5455 if (cp->config != 0x00 && cp->config != 0x01)
5456 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5457 MGMT_STATUS_INVALID_PARAMS);
5458
5459 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5460 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5461 MGMT_STATUS_NOT_SUPPORTED);
5462
5463 hci_dev_lock(hdev);
5464
5465 if (cp->config)
5466 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5467 &hdev->dev_flags);
5468 else
5469 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5470 &hdev->dev_flags);
5471
5472 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5473 if (err < 0)
5474 goto unlock;
5475
5476 if (!changed)
5477 goto unlock;
5478
5479 err = new_options(hdev, sk);
5480
5481 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5482 mgmt_index_removed(hdev);
5483
5484 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5485 set_bit(HCI_CONFIG, &hdev->dev_flags);
5486 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5487
5488 queue_work(hdev->req_workqueue, &hdev->power_on);
5489 } else {
5490 set_bit(HCI_RAW, &hdev->flags);
5491 mgmt_index_added(hdev);
5492 }
5493 }
5494
5495 unlock:
5496 hci_dev_unlock(hdev);
5497 return err;
5498 }
5499
5500 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5501 void *data, u16 len)
5502 {
5503 struct mgmt_cp_set_public_address *cp = data;
5504 bool changed;
5505 int err;
5506
5507 BT_DBG("%s", hdev->name);
5508
5509 if (hdev_is_powered(hdev))
5510 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5511 MGMT_STATUS_REJECTED);
5512
5513 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5514 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5515 MGMT_STATUS_INVALID_PARAMS);
5516
5517 if (!hdev->set_bdaddr)
5518 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5519 MGMT_STATUS_NOT_SUPPORTED);
5520
5521 hci_dev_lock(hdev);
5522
5523 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5524 bacpy(&hdev->public_addr, &cp->bdaddr);
5525
5526 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5527 if (err < 0)
5528 goto unlock;
5529
5530 if (!changed)
5531 goto unlock;
5532
5533 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5534 err = new_options(hdev, sk);
5535
5536 if (is_configured(hdev)) {
5537 mgmt_index_removed(hdev);
5538
5539 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5540
5541 set_bit(HCI_CONFIG, &hdev->dev_flags);
5542 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5543
5544 queue_work(hdev->req_workqueue, &hdev->power_on);
5545 }
5546
5547 unlock:
5548 hci_dev_unlock(hdev);
5549 return err;
5550 }
5551
5552 static const struct mgmt_handler {
5553 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5554 u16 data_len);
5555 bool var_len;
5556 size_t data_len;
5557 } mgmt_handlers[] = {
5558 { NULL }, /* 0x0000 (no command) */
5559 { read_version, false, MGMT_READ_VERSION_SIZE },
5560 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5561 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5562 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5563 { set_powered, false, MGMT_SETTING_SIZE },
5564 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5565 { set_connectable, false, MGMT_SETTING_SIZE },
5566 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5567 { set_pairable, false, MGMT_SETTING_SIZE },
5568 { set_link_security, false, MGMT_SETTING_SIZE },
5569 { set_ssp, false, MGMT_SETTING_SIZE },
5570 { set_hs, false, MGMT_SETTING_SIZE },
5571 { set_le, false, MGMT_SETTING_SIZE },
5572 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5573 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5574 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5575 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5576 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5577 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5578 { disconnect, false, MGMT_DISCONNECT_SIZE },
5579 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5580 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5581 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5582 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5583 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5584 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5585 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5586 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5587 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5588 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5589 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5590 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5591 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5592 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5593 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5594 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5595 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5596 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5597 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5598 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5599 { set_advertising, false, MGMT_SETTING_SIZE },
5600 { set_bredr, false, MGMT_SETTING_SIZE },
5601 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5602 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5603 { set_secure_conn, false, MGMT_SETTING_SIZE },
5604 { set_debug_keys, false, MGMT_SETTING_SIZE },
5605 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5606 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5607 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5608 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5609 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5610 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5611 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5612 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5613 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5614 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5615 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5616 };
5617
5618 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5619 {
5620 void *buf;
5621 u8 *cp;
5622 struct mgmt_hdr *hdr;
5623 u16 opcode, index, len;
5624 struct hci_dev *hdev = NULL;
5625 const struct mgmt_handler *handler;
5626 int err;
5627
5628 BT_DBG("got %zu bytes", msglen);
5629
5630 if (msglen < sizeof(*hdr))
5631 return -EINVAL;
5632
5633 buf = kmalloc(msglen, GFP_KERNEL);
5634 if (!buf)
5635 return -ENOMEM;
5636
5637 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5638 err = -EFAULT;
5639 goto done;
5640 }
5641
5642 hdr = buf;
5643 opcode = __le16_to_cpu(hdr->opcode);
5644 index = __le16_to_cpu(hdr->index);
5645 len = __le16_to_cpu(hdr->len);
5646
5647 if (len != msglen - sizeof(*hdr)) {
5648 err = -EINVAL;
5649 goto done;
5650 }
5651
5652 if (index != MGMT_INDEX_NONE) {
5653 hdev = hci_dev_get(index);
5654 if (!hdev) {
5655 err = cmd_status(sk, index, opcode,
5656 MGMT_STATUS_INVALID_INDEX);
5657 goto done;
5658 }
5659
5660 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5661 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5662 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5663 err = cmd_status(sk, index, opcode,
5664 MGMT_STATUS_INVALID_INDEX);
5665 goto done;
5666 }
5667
5668 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5669 opcode != MGMT_OP_READ_CONFIG_INFO &&
5670 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5671 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5672 err = cmd_status(sk, index, opcode,
5673 MGMT_STATUS_INVALID_INDEX);
5674 goto done;
5675 }
5676 }
5677
5678 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5679 mgmt_handlers[opcode].func == NULL) {
5680 BT_DBG("Unknown op %u", opcode);
5681 err = cmd_status(sk, index, opcode,
5682 MGMT_STATUS_UNKNOWN_COMMAND);
5683 goto done;
5684 }
5685
5686 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5687 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5688 err = cmd_status(sk, index, opcode,
5689 MGMT_STATUS_INVALID_INDEX);
5690 goto done;
5691 }
5692
5693 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5694 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5695 err = cmd_status(sk, index, opcode,
5696 MGMT_STATUS_INVALID_INDEX);
5697 goto done;
5698 }
5699
5700 handler = &mgmt_handlers[opcode];
5701
5702 if ((handler->var_len && len < handler->data_len) ||
5703 (!handler->var_len && len != handler->data_len)) {
5704 err = cmd_status(sk, index, opcode,
5705 MGMT_STATUS_INVALID_PARAMS);
5706 goto done;
5707 }
5708
5709 if (hdev)
5710 mgmt_init_hdev(sk, hdev);
5711
5712 cp = buf + sizeof(*hdr);
5713
5714 err = handler->func(sk, hdev, cp, len);
5715 if (err < 0)
5716 goto done;
5717
5718 err = msglen;
5719
5720 done:
5721 if (hdev)
5722 hci_dev_put(hdev);
5723
5724 kfree(buf);
5725 return err;
5726 }
5727
5728 void mgmt_index_added(struct hci_dev *hdev)
5729 {
5730 if (hdev->dev_type != HCI_BREDR)
5731 return;
5732
5733 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5734 return;
5735
5736 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5737 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5738 else
5739 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5740 }
5741
5742 void mgmt_index_removed(struct hci_dev *hdev)
5743 {
5744 u8 status = MGMT_STATUS_INVALID_INDEX;
5745
5746 if (hdev->dev_type != HCI_BREDR)
5747 return;
5748
5749 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5750 return;
5751
5752 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5753
5754 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5755 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5756 else
5757 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5758 }
5759
5760 /* This function requires the caller holds hdev->lock */
5761 static void restart_le_actions(struct hci_dev *hdev)
5762 {
5763 struct hci_conn_params *p;
5764
5765 list_for_each_entry(p, &hdev->le_conn_params, list) {
5766 /* Needed for AUTO_OFF case where might not "really"
5767 * have been powered off.
5768 */
5769 list_del_init(&p->action);
5770
5771 switch (p->auto_connect) {
5772 case HCI_AUTO_CONN_ALWAYS:
5773 list_add(&p->action, &hdev->pend_le_conns);
5774 break;
5775 case HCI_AUTO_CONN_REPORT:
5776 list_add(&p->action, &hdev->pend_le_reports);
5777 break;
5778 default:
5779 break;
5780 }
5781 }
5782
5783 hci_update_background_scan(hdev);
5784 }
5785
5786 static void powered_complete(struct hci_dev *hdev, u8 status)
5787 {
5788 struct cmd_lookup match = { NULL, hdev };
5789
5790 BT_DBG("status 0x%02x", status);
5791
5792 hci_dev_lock(hdev);
5793
5794 restart_le_actions(hdev);
5795
5796 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5797
5798 new_settings(hdev, match.sk);
5799
5800 hci_dev_unlock(hdev);
5801
5802 if (match.sk)
5803 sock_put(match.sk);
5804 }
5805
5806 static int powered_update_hci(struct hci_dev *hdev)
5807 {
5808 struct hci_request req;
5809 u8 link_sec;
5810
5811 hci_req_init(&req, hdev);
5812
5813 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5814 !lmp_host_ssp_capable(hdev)) {
5815 u8 ssp = 1;
5816
5817 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5818 }
5819
5820 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5821 lmp_bredr_capable(hdev)) {
5822 struct hci_cp_write_le_host_supported cp;
5823
5824 cp.le = 1;
5825 cp.simul = lmp_le_br_capable(hdev);
5826
5827 /* Check first if we already have the right
5828 * host state (host features set)
5829 */
5830 if (cp.le != lmp_host_le_capable(hdev) ||
5831 cp.simul != lmp_host_le_br_capable(hdev))
5832 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5833 sizeof(cp), &cp);
5834 }
5835
5836 if (lmp_le_capable(hdev)) {
5837 /* Make sure the controller has a good default for
5838 * advertising data. This also applies to the case
5839 * where BR/EDR was toggled during the AUTO_OFF phase.
5840 */
5841 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5842 update_adv_data(&req);
5843 update_scan_rsp_data(&req);
5844 }
5845
5846 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5847 enable_advertising(&req);
5848 }
5849
5850 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5851 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5852 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5853 sizeof(link_sec), &link_sec);
5854
5855 if (lmp_bredr_capable(hdev)) {
5856 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5857 set_bredr_scan(&req);
5858 update_class(&req);
5859 update_name(&req);
5860 update_eir(&req);
5861 }
5862
5863 return hci_req_run(&req, powered_complete);
5864 }
5865
5866 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5867 {
5868 struct cmd_lookup match = { NULL, hdev };
5869 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5870 u8 zero_cod[] = { 0, 0, 0 };
5871 int err;
5872
5873 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5874 return 0;
5875
5876 if (powered) {
5877 if (powered_update_hci(hdev) == 0)
5878 return 0;
5879
5880 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5881 &match);
5882 goto new_settings;
5883 }
5884
5885 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5886 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5887
5888 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5889 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5890 zero_cod, sizeof(zero_cod), NULL);
5891
5892 new_settings:
5893 err = new_settings(hdev, match.sk);
5894
5895 if (match.sk)
5896 sock_put(match.sk);
5897
5898 return err;
5899 }
5900
5901 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5902 {
5903 struct pending_cmd *cmd;
5904 u8 status;
5905
5906 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5907 if (!cmd)
5908 return;
5909
5910 if (err == -ERFKILL)
5911 status = MGMT_STATUS_RFKILLED;
5912 else
5913 status = MGMT_STATUS_FAILED;
5914
5915 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5916
5917 mgmt_pending_remove(cmd);
5918 }
5919
5920 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5921 {
5922 struct hci_request req;
5923
5924 hci_dev_lock(hdev);
5925
5926 /* When discoverable timeout triggers, then just make sure
5927 * the limited discoverable flag is cleared. Even in the case
5928 * of a timeout triggered from general discoverable, it is
5929 * safe to unconditionally clear the flag.
5930 */
5931 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5932 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5933
5934 hci_req_init(&req, hdev);
5935 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5936 u8 scan = SCAN_PAGE;
5937 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5938 sizeof(scan), &scan);
5939 }
5940 update_class(&req);
5941 update_adv_data(&req);
5942 hci_req_run(&req, NULL);
5943
5944 hdev->discov_timeout = 0;
5945
5946 new_settings(hdev, NULL);
5947
5948 hci_dev_unlock(hdev);
5949 }
5950
5951 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5952 {
5953 bool changed;
5954
5955 /* Nothing needed here if there's a pending command since that
5956 * commands request completion callback takes care of everything
5957 * necessary.
5958 */
5959 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5960 return;
5961
5962 /* Powering off may clear the scan mode - don't let that interfere */
5963 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5964 return;
5965
5966 if (discoverable) {
5967 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5968 } else {
5969 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5970 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5971 }
5972
5973 if (changed) {
5974 struct hci_request req;
5975
5976 /* In case this change in discoverable was triggered by
5977 * a disabling of connectable there could be a need to
5978 * update the advertising flags.
5979 */
5980 hci_req_init(&req, hdev);
5981 update_adv_data(&req);
5982 hci_req_run(&req, NULL);
5983
5984 new_settings(hdev, NULL);
5985 }
5986 }
5987
5988 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5989 {
5990 bool changed;
5991
5992 /* Nothing needed here if there's a pending command since that
5993 * commands request completion callback takes care of everything
5994 * necessary.
5995 */
5996 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5997 return;
5998
5999 /* Powering off may clear the scan mode - don't let that interfere */
6000 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6001 return;
6002
6003 if (connectable)
6004 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
6005 else
6006 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
6007
6008 if (changed)
6009 new_settings(hdev, NULL);
6010 }
6011
6012 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
6013 {
6014 u8 mgmt_err = mgmt_status(status);
6015
6016 if (scan & SCAN_PAGE)
6017 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
6018 cmd_status_rsp, &mgmt_err);
6019
6020 if (scan & SCAN_INQUIRY)
6021 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
6022 cmd_status_rsp, &mgmt_err);
6023 }
6024
6025 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6026 bool persistent)
6027 {
6028 struct mgmt_ev_new_link_key ev;
6029
6030 memset(&ev, 0, sizeof(ev));
6031
6032 ev.store_hint = persistent;
6033 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6034 ev.key.addr.type = BDADDR_BREDR;
6035 ev.key.type = key->type;
6036 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6037 ev.key.pin_len = key->pin_len;
6038
6039 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6040 }
6041
6042 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6043 {
6044 if (ltk->authenticated)
6045 return MGMT_LTK_AUTHENTICATED;
6046
6047 return MGMT_LTK_UNAUTHENTICATED;
6048 }
6049
6050 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6051 {
6052 struct mgmt_ev_new_long_term_key ev;
6053
6054 memset(&ev, 0, sizeof(ev));
6055
6056 /* Devices using resolvable or non-resolvable random addresses
6057 * without providing an indentity resolving key don't require
6058 * to store long term keys. Their addresses will change the
6059 * next time around.
6060 *
6061 * Only when a remote device provides an identity address
6062 * make sure the long term key is stored. If the remote
6063 * identity is known, the long term keys are internally
6064 * mapped to the identity address. So allow static random
6065 * and public addresses here.
6066 */
6067 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6068 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6069 ev.store_hint = 0x00;
6070 else
6071 ev.store_hint = persistent;
6072
6073 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6074 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6075 ev.key.type = mgmt_ltk_type(key);
6076 ev.key.enc_size = key->enc_size;
6077 ev.key.ediv = key->ediv;
6078 ev.key.rand = key->rand;
6079
6080 if (key->type == SMP_LTK)
6081 ev.key.master = 1;
6082
6083 memcpy(ev.key.val, key->val, sizeof(key->val));
6084
6085 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6086 }
6087
6088 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6089 {
6090 struct mgmt_ev_new_irk ev;
6091
6092 memset(&ev, 0, sizeof(ev));
6093
6094 /* For identity resolving keys from devices that are already
6095 * using a public address or static random address, do not
6096 * ask for storing this key. The identity resolving key really
6097 * is only mandatory for devices using resovlable random
6098 * addresses.
6099 *
6100 * Storing all identity resolving keys has the downside that
6101 * they will be also loaded on next boot of they system. More
6102 * identity resolving keys, means more time during scanning is
6103 * needed to actually resolve these addresses.
6104 */
6105 if (bacmp(&irk->rpa, BDADDR_ANY))
6106 ev.store_hint = 0x01;
6107 else
6108 ev.store_hint = 0x00;
6109
6110 bacpy(&ev.rpa, &irk->rpa);
6111 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6112 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6113 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6114
6115 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6116 }
6117
6118 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6119 bool persistent)
6120 {
6121 struct mgmt_ev_new_csrk ev;
6122
6123 memset(&ev, 0, sizeof(ev));
6124
6125 /* Devices using resolvable or non-resolvable random addresses
6126 * without providing an indentity resolving key don't require
6127 * to store signature resolving keys. Their addresses will change
6128 * the next time around.
6129 *
6130 * Only when a remote device provides an identity address
6131 * make sure the signature resolving key is stored. So allow
6132 * static random and public addresses here.
6133 */
6134 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6135 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6136 ev.store_hint = 0x00;
6137 else
6138 ev.store_hint = persistent;
6139
6140 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6141 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6142 ev.key.master = csrk->master;
6143 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6144
6145 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6146 }
6147
6148 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6149 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6150 u16 max_interval, u16 latency, u16 timeout)
6151 {
6152 struct mgmt_ev_new_conn_param ev;
6153
6154 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6155 return;
6156
6157 memset(&ev, 0, sizeof(ev));
6158 bacpy(&ev.addr.bdaddr, bdaddr);
6159 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6160 ev.store_hint = store_hint;
6161 ev.min_interval = cpu_to_le16(min_interval);
6162 ev.max_interval = cpu_to_le16(max_interval);
6163 ev.latency = cpu_to_le16(latency);
6164 ev.timeout = cpu_to_le16(timeout);
6165
6166 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6167 }
6168
6169 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6170 u8 data_len)
6171 {
6172 eir[eir_len++] = sizeof(type) + data_len;
6173 eir[eir_len++] = type;
6174 memcpy(&eir[eir_len], data, data_len);
6175 eir_len += data_len;
6176
6177 return eir_len;
6178 }
6179
6180 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6181 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6182 u8 *dev_class)
6183 {
6184 char buf[512];
6185 struct mgmt_ev_device_connected *ev = (void *) buf;
6186 u16 eir_len = 0;
6187
6188 bacpy(&ev->addr.bdaddr, bdaddr);
6189 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6190
6191 ev->flags = __cpu_to_le32(flags);
6192
6193 if (name_len > 0)
6194 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6195 name, name_len);
6196
6197 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6198 eir_len = eir_append_data(ev->eir, eir_len,
6199 EIR_CLASS_OF_DEV, dev_class, 3);
6200
6201 ev->eir_len = cpu_to_le16(eir_len);
6202
6203 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6204 sizeof(*ev) + eir_len, NULL);
6205 }
6206
6207 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6208 {
6209 struct mgmt_cp_disconnect *cp = cmd->param;
6210 struct sock **sk = data;
6211 struct mgmt_rp_disconnect rp;
6212
6213 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6214 rp.addr.type = cp->addr.type;
6215
6216 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6217 sizeof(rp));
6218
6219 *sk = cmd->sk;
6220 sock_hold(*sk);
6221
6222 mgmt_pending_remove(cmd);
6223 }
6224
6225 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6226 {
6227 struct hci_dev *hdev = data;
6228 struct mgmt_cp_unpair_device *cp = cmd->param;
6229 struct mgmt_rp_unpair_device rp;
6230
6231 memset(&rp, 0, sizeof(rp));
6232 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6233 rp.addr.type = cp->addr.type;
6234
6235 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6236
6237 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6238
6239 mgmt_pending_remove(cmd);
6240 }
6241
6242 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6243 u8 link_type, u8 addr_type, u8 reason,
6244 bool mgmt_connected)
6245 {
6246 struct mgmt_ev_device_disconnected ev;
6247 struct pending_cmd *power_off;
6248 struct sock *sk = NULL;
6249
6250 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6251 if (power_off) {
6252 struct mgmt_mode *cp = power_off->param;
6253
6254 /* The connection is still in hci_conn_hash so test for 1
6255 * instead of 0 to know if this is the last one.
6256 */
6257 if (!cp->val && hci_conn_count(hdev) == 1) {
6258 cancel_delayed_work(&hdev->power_off);
6259 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6260 }
6261 }
6262
6263 if (!mgmt_connected)
6264 return;
6265
6266 if (link_type != ACL_LINK && link_type != LE_LINK)
6267 return;
6268
6269 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6270
6271 bacpy(&ev.addr.bdaddr, bdaddr);
6272 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6273 ev.reason = reason;
6274
6275 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6276
6277 if (sk)
6278 sock_put(sk);
6279
6280 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6281 hdev);
6282 }
6283
6284 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6285 u8 link_type, u8 addr_type, u8 status)
6286 {
6287 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6288 struct mgmt_cp_disconnect *cp;
6289 struct mgmt_rp_disconnect rp;
6290 struct pending_cmd *cmd;
6291
6292 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6293 hdev);
6294
6295 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6296 if (!cmd)
6297 return;
6298
6299 cp = cmd->param;
6300
6301 if (bacmp(bdaddr, &cp->addr.bdaddr))
6302 return;
6303
6304 if (cp->addr.type != bdaddr_type)
6305 return;
6306
6307 bacpy(&rp.addr.bdaddr, bdaddr);
6308 rp.addr.type = bdaddr_type;
6309
6310 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6311 mgmt_status(status), &rp, sizeof(rp));
6312
6313 mgmt_pending_remove(cmd);
6314 }
6315
6316 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6317 u8 addr_type, u8 status)
6318 {
6319 struct mgmt_ev_connect_failed ev;
6320 struct pending_cmd *power_off;
6321
6322 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6323 if (power_off) {
6324 struct mgmt_mode *cp = power_off->param;
6325
6326 /* The connection is still in hci_conn_hash so test for 1
6327 * instead of 0 to know if this is the last one.
6328 */
6329 if (!cp->val && hci_conn_count(hdev) == 1) {
6330 cancel_delayed_work(&hdev->power_off);
6331 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6332 }
6333 }
6334
6335 bacpy(&ev.addr.bdaddr, bdaddr);
6336 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6337 ev.status = mgmt_status(status);
6338
6339 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6340 }
6341
6342 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6343 {
6344 struct mgmt_ev_pin_code_request ev;
6345
6346 bacpy(&ev.addr.bdaddr, bdaddr);
6347 ev.addr.type = BDADDR_BREDR;
6348 ev.secure = secure;
6349
6350 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6351 }
6352
6353 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6354 u8 status)
6355 {
6356 struct pending_cmd *cmd;
6357 struct mgmt_rp_pin_code_reply rp;
6358
6359 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6360 if (!cmd)
6361 return;
6362
6363 bacpy(&rp.addr.bdaddr, bdaddr);
6364 rp.addr.type = BDADDR_BREDR;
6365
6366 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6367 mgmt_status(status), &rp, sizeof(rp));
6368
6369 mgmt_pending_remove(cmd);
6370 }
6371
6372 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6373 u8 status)
6374 {
6375 struct pending_cmd *cmd;
6376 struct mgmt_rp_pin_code_reply rp;
6377
6378 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6379 if (!cmd)
6380 return;
6381
6382 bacpy(&rp.addr.bdaddr, bdaddr);
6383 rp.addr.type = BDADDR_BREDR;
6384
6385 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6386 mgmt_status(status), &rp, sizeof(rp));
6387
6388 mgmt_pending_remove(cmd);
6389 }
6390
6391 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6392 u8 link_type, u8 addr_type, u32 value,
6393 u8 confirm_hint)
6394 {
6395 struct mgmt_ev_user_confirm_request ev;
6396
6397 BT_DBG("%s", hdev->name);
6398
6399 bacpy(&ev.addr.bdaddr, bdaddr);
6400 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6401 ev.confirm_hint = confirm_hint;
6402 ev.value = cpu_to_le32(value);
6403
6404 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6405 NULL);
6406 }
6407
6408 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6409 u8 link_type, u8 addr_type)
6410 {
6411 struct mgmt_ev_user_passkey_request ev;
6412
6413 BT_DBG("%s", hdev->name);
6414
6415 bacpy(&ev.addr.bdaddr, bdaddr);
6416 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6417
6418 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6419 NULL);
6420 }
6421
6422 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6423 u8 link_type, u8 addr_type, u8 status,
6424 u8 opcode)
6425 {
6426 struct pending_cmd *cmd;
6427 struct mgmt_rp_user_confirm_reply rp;
6428 int err;
6429
6430 cmd = mgmt_pending_find(opcode, hdev);
6431 if (!cmd)
6432 return -ENOENT;
6433
6434 bacpy(&rp.addr.bdaddr, bdaddr);
6435 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6436 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6437 &rp, sizeof(rp));
6438
6439 mgmt_pending_remove(cmd);
6440
6441 return err;
6442 }
6443
6444 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6445 u8 link_type, u8 addr_type, u8 status)
6446 {
6447 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6448 status, MGMT_OP_USER_CONFIRM_REPLY);
6449 }
6450
6451 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6452 u8 link_type, u8 addr_type, u8 status)
6453 {
6454 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6455 status,
6456 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6457 }
6458
6459 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6460 u8 link_type, u8 addr_type, u8 status)
6461 {
6462 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6463 status, MGMT_OP_USER_PASSKEY_REPLY);
6464 }
6465
6466 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6467 u8 link_type, u8 addr_type, u8 status)
6468 {
6469 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6470 status,
6471 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6472 }
6473
6474 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6475 u8 link_type, u8 addr_type, u32 passkey,
6476 u8 entered)
6477 {
6478 struct mgmt_ev_passkey_notify ev;
6479
6480 BT_DBG("%s", hdev->name);
6481
6482 bacpy(&ev.addr.bdaddr, bdaddr);
6483 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6484 ev.passkey = __cpu_to_le32(passkey);
6485 ev.entered = entered;
6486
6487 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6488 }
6489
6490 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6491 u8 addr_type, u8 status)
6492 {
6493 struct mgmt_ev_auth_failed ev;
6494
6495 bacpy(&ev.addr.bdaddr, bdaddr);
6496 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6497 ev.status = mgmt_status(status);
6498
6499 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6500 }
6501
6502 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6503 {
6504 struct cmd_lookup match = { NULL, hdev };
6505 bool changed;
6506
6507 if (status) {
6508 u8 mgmt_err = mgmt_status(status);
6509 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6510 cmd_status_rsp, &mgmt_err);
6511 return;
6512 }
6513
6514 if (test_bit(HCI_AUTH, &hdev->flags))
6515 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6516 &hdev->dev_flags);
6517 else
6518 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6519 &hdev->dev_flags);
6520
6521 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6522 &match);
6523
6524 if (changed)
6525 new_settings(hdev, match.sk);
6526
6527 if (match.sk)
6528 sock_put(match.sk);
6529 }
6530
6531 static void clear_eir(struct hci_request *req)
6532 {
6533 struct hci_dev *hdev = req->hdev;
6534 struct hci_cp_write_eir cp;
6535
6536 if (!lmp_ext_inq_capable(hdev))
6537 return;
6538
6539 memset(hdev->eir, 0, sizeof(hdev->eir));
6540
6541 memset(&cp, 0, sizeof(cp));
6542
6543 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6544 }
6545
6546 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6547 {
6548 struct cmd_lookup match = { NULL, hdev };
6549 struct hci_request req;
6550 bool changed = false;
6551
6552 if (status) {
6553 u8 mgmt_err = mgmt_status(status);
6554
6555 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6556 &hdev->dev_flags)) {
6557 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6558 new_settings(hdev, NULL);
6559 }
6560
6561 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6562 &mgmt_err);
6563 return;
6564 }
6565
6566 if (enable) {
6567 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6568 } else {
6569 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6570 if (!changed)
6571 changed = test_and_clear_bit(HCI_HS_ENABLED,
6572 &hdev->dev_flags);
6573 else
6574 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6575 }
6576
6577 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6578
6579 if (changed)
6580 new_settings(hdev, match.sk);
6581
6582 if (match.sk)
6583 sock_put(match.sk);
6584
6585 hci_req_init(&req, hdev);
6586
6587 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6588 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6589 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6590 sizeof(enable), &enable);
6591 update_eir(&req);
6592 } else {
6593 clear_eir(&req);
6594 }
6595
6596 hci_req_run(&req, NULL);
6597 }
6598
6599 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6600 {
6601 struct cmd_lookup match = { NULL, hdev };
6602 bool changed = false;
6603
6604 if (status) {
6605 u8 mgmt_err = mgmt_status(status);
6606
6607 if (enable) {
6608 if (test_and_clear_bit(HCI_SC_ENABLED,
6609 &hdev->dev_flags))
6610 new_settings(hdev, NULL);
6611 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6612 }
6613
6614 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6615 cmd_status_rsp, &mgmt_err);
6616 return;
6617 }
6618
6619 if (enable) {
6620 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6621 } else {
6622 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6623 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6624 }
6625
6626 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6627 settings_rsp, &match);
6628
6629 if (changed)
6630 new_settings(hdev, match.sk);
6631
6632 if (match.sk)
6633 sock_put(match.sk);
6634 }
6635
6636 static void sk_lookup(struct pending_cmd *cmd, void *data)
6637 {
6638 struct cmd_lookup *match = data;
6639
6640 if (match->sk == NULL) {
6641 match->sk = cmd->sk;
6642 sock_hold(match->sk);
6643 }
6644 }
6645
6646 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6647 u8 status)
6648 {
6649 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6650
6651 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6652 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6653 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6654
6655 if (!status)
6656 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6657 NULL);
6658
6659 if (match.sk)
6660 sock_put(match.sk);
6661 }
6662
6663 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6664 {
6665 struct mgmt_cp_set_local_name ev;
6666 struct pending_cmd *cmd;
6667
6668 if (status)
6669 return;
6670
6671 memset(&ev, 0, sizeof(ev));
6672 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6673 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6674
6675 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6676 if (!cmd) {
6677 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6678
6679 /* If this is a HCI command related to powering on the
6680 * HCI dev don't send any mgmt signals.
6681 */
6682 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6683 return;
6684 }
6685
6686 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6687 cmd ? cmd->sk : NULL);
6688 }
6689
6690 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6691 u8 *randomizer192, u8 *hash256,
6692 u8 *randomizer256, u8 status)
6693 {
6694 struct pending_cmd *cmd;
6695
6696 BT_DBG("%s status %u", hdev->name, status);
6697
6698 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6699 if (!cmd)
6700 return;
6701
6702 if (status) {
6703 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6704 mgmt_status(status));
6705 } else {
6706 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6707 hash256 && randomizer256) {
6708 struct mgmt_rp_read_local_oob_ext_data rp;
6709
6710 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6711 memcpy(rp.randomizer192, randomizer192,
6712 sizeof(rp.randomizer192));
6713
6714 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6715 memcpy(rp.randomizer256, randomizer256,
6716 sizeof(rp.randomizer256));
6717
6718 cmd_complete(cmd->sk, hdev->id,
6719 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6720 &rp, sizeof(rp));
6721 } else {
6722 struct mgmt_rp_read_local_oob_data rp;
6723
6724 memcpy(rp.hash, hash192, sizeof(rp.hash));
6725 memcpy(rp.randomizer, randomizer192,
6726 sizeof(rp.randomizer));
6727
6728 cmd_complete(cmd->sk, hdev->id,
6729 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6730 &rp, sizeof(rp));
6731 }
6732 }
6733
6734 mgmt_pending_remove(cmd);
6735 }
6736
6737 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6738 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6739 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6740 {
6741 char buf[512];
6742 struct mgmt_ev_device_found *ev = (void *) buf;
6743 size_t ev_size;
6744
6745 /* Don't send events for a non-kernel initiated discovery. With
6746 * LE one exception is if we have pend_le_reports > 0 in which
6747 * case we're doing passive scanning and want these events.
6748 */
6749 if (!hci_discovery_active(hdev)) {
6750 if (link_type == ACL_LINK)
6751 return;
6752 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6753 return;
6754 }
6755
6756 /* Make sure that the buffer is big enough. The 5 extra bytes
6757 * are for the potential CoD field.
6758 */
6759 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6760 return;
6761
6762 memset(buf, 0, sizeof(buf));
6763
6764 bacpy(&ev->addr.bdaddr, bdaddr);
6765 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6766 ev->rssi = rssi;
6767 ev->flags = cpu_to_le32(flags);
6768
6769 if (eir_len > 0)
6770 memcpy(ev->eir, eir, eir_len);
6771
6772 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6773 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6774 dev_class, 3);
6775
6776 if (scan_rsp_len > 0)
6777 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6778
6779 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6780 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6781
6782 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6783 }
6784
6785 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6786 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6787 {
6788 struct mgmt_ev_device_found *ev;
6789 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6790 u16 eir_len;
6791
6792 ev = (struct mgmt_ev_device_found *) buf;
6793
6794 memset(buf, 0, sizeof(buf));
6795
6796 bacpy(&ev->addr.bdaddr, bdaddr);
6797 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6798 ev->rssi = rssi;
6799
6800 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6801 name_len);
6802
6803 ev->eir_len = cpu_to_le16(eir_len);
6804
6805 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6806 }
6807
6808 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6809 {
6810 struct mgmt_ev_discovering ev;
6811 struct pending_cmd *cmd;
6812
6813 BT_DBG("%s discovering %u", hdev->name, discovering);
6814
6815 if (discovering)
6816 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6817 else
6818 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6819
6820 if (cmd != NULL) {
6821 u8 type = hdev->discovery.type;
6822
6823 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6824 sizeof(type));
6825 mgmt_pending_remove(cmd);
6826 }
6827
6828 memset(&ev, 0, sizeof(ev));
6829 ev.type = hdev->discovery.type;
6830 ev.discovering = discovering;
6831
6832 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6833 }
6834
6835 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6836 {
6837 BT_DBG("%s status %u", hdev->name, status);
6838 }
6839
6840 void mgmt_reenable_advertising(struct hci_dev *hdev)
6841 {
6842 struct hci_request req;
6843
6844 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6845 return;
6846
6847 hci_req_init(&req, hdev);
6848 enable_advertising(&req);
6849 hci_req_run(&req, adv_enable_complete);
6850 }