]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/bluetooth/mgmt.c
Merge remote-tracking branch 'linus/master' into testing
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / mgmt.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI Management interface */
26
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
34
35 bool enable_hs;
36
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
39
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
42 MGMT_OP_READ_INFO,
43 MGMT_OP_SET_POWERED,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_PAIRABLE,
48 MGMT_OP_SET_LINK_SECURITY,
49 MGMT_OP_SET_SSP,
50 MGMT_OP_SET_HS,
51 MGMT_OP_SET_LE,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
54 MGMT_OP_ADD_UUID,
55 MGMT_OP_REMOVE_UUID,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_DISCONNECT,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_PAIR_DEVICE,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
75 MGMT_OP_CONFIRM_NAME,
76 MGMT_OP_BLOCK_DEVICE,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 };
80
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
83 MGMT_EV_INDEX_ADDED,
84 MGMT_EV_INDEX_REMOVED,
85 MGMT_EV_NEW_SETTINGS,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
88 MGMT_EV_NEW_LINK_KEY,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
96 MGMT_EV_AUTH_FAILED,
97 MGMT_EV_DEVICE_FOUND,
98 MGMT_EV_DISCOVERING,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
103 };
104
105 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
106
107 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
108 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
109
110 struct pending_cmd {
111 struct list_head list;
112 u16 opcode;
113 int index;
114 void *param;
115 struct sock *sk;
116 void *user_data;
117 };
118
119 /* HCI to MGMT error code conversion table */
120 static u8 mgmt_status_table[] = {
121 MGMT_STATUS_SUCCESS,
122 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
123 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
124 MGMT_STATUS_FAILED, /* Hardware Failure */
125 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
126 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
127 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
128 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
129 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
131 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
132 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
133 MGMT_STATUS_BUSY, /* Command Disallowed */
134 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
135 MGMT_STATUS_REJECTED, /* Rejected Security */
136 MGMT_STATUS_REJECTED, /* Rejected Personal */
137 MGMT_STATUS_TIMEOUT, /* Host Timeout */
138 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
139 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
140 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
141 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
142 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
143 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
144 MGMT_STATUS_BUSY, /* Repeated Attempts */
145 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
146 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
148 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
149 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
150 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
152 MGMT_STATUS_FAILED, /* Unspecified Error */
153 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
154 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
155 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
156 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
157 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
158 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
159 MGMT_STATUS_FAILED, /* Unit Link Key Used */
160 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
161 MGMT_STATUS_TIMEOUT, /* Instant Passed */
162 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
163 MGMT_STATUS_FAILED, /* Transaction Collision */
164 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
165 MGMT_STATUS_REJECTED, /* QoS Rejected */
166 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
167 MGMT_STATUS_REJECTED, /* Insufficient Security */
168 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
169 MGMT_STATUS_BUSY, /* Role Switch Pending */
170 MGMT_STATUS_FAILED, /* Slot Violation */
171 MGMT_STATUS_FAILED, /* Role Switch Failed */
172 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
173 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
174 MGMT_STATUS_BUSY, /* Host Busy Pairing */
175 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
176 MGMT_STATUS_BUSY, /* Controller Busy */
177 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
178 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
179 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
180 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
181 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
182 };
183
184 bool mgmt_valid_hdev(struct hci_dev *hdev)
185 {
186 return hdev->dev_type == HCI_BREDR;
187 }
188
189 static u8 mgmt_status(u8 hci_status)
190 {
191 if (hci_status < ARRAY_SIZE(mgmt_status_table))
192 return mgmt_status_table[hci_status];
193
194 return MGMT_STATUS_FAILED;
195 }
196
197 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
198 {
199 struct sk_buff *skb;
200 struct mgmt_hdr *hdr;
201 struct mgmt_ev_cmd_status *ev;
202 int err;
203
204 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
205
206 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
207 if (!skb)
208 return -ENOMEM;
209
210 hdr = (void *) skb_put(skb, sizeof(*hdr));
211
212 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
213 hdr->index = cpu_to_le16(index);
214 hdr->len = cpu_to_le16(sizeof(*ev));
215
216 ev = (void *) skb_put(skb, sizeof(*ev));
217 ev->status = status;
218 ev->opcode = cpu_to_le16(cmd);
219
220 err = sock_queue_rcv_skb(sk, skb);
221 if (err < 0)
222 kfree_skb(skb);
223
224 return err;
225 }
226
227 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
228 void *rp, size_t rp_len)
229 {
230 struct sk_buff *skb;
231 struct mgmt_hdr *hdr;
232 struct mgmt_ev_cmd_complete *ev;
233 int err;
234
235 BT_DBG("sock %p", sk);
236
237 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
238 if (!skb)
239 return -ENOMEM;
240
241 hdr = (void *) skb_put(skb, sizeof(*hdr));
242
243 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
244 hdr->index = cpu_to_le16(index);
245 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
246
247 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
248 ev->opcode = cpu_to_le16(cmd);
249 ev->status = status;
250
251 if (rp)
252 memcpy(ev->data, rp, rp_len);
253
254 err = sock_queue_rcv_skb(sk, skb);
255 if (err < 0)
256 kfree_skb(skb);
257
258 return err;
259 }
260
261 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
262 u16 data_len)
263 {
264 struct mgmt_rp_read_version rp;
265
266 BT_DBG("sock %p", sk);
267
268 rp.version = MGMT_VERSION;
269 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
270
271 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
272 sizeof(rp));
273 }
274
275 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
276 u16 data_len)
277 {
278 struct mgmt_rp_read_commands *rp;
279 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
280 const u16 num_events = ARRAY_SIZE(mgmt_events);
281 __le16 *opcode;
282 size_t rp_size;
283 int i, err;
284
285 BT_DBG("sock %p", sk);
286
287 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
288
289 rp = kmalloc(rp_size, GFP_KERNEL);
290 if (!rp)
291 return -ENOMEM;
292
293 rp->num_commands = __constant_cpu_to_le16(num_commands);
294 rp->num_events = __constant_cpu_to_le16(num_events);
295
296 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
297 put_unaligned_le16(mgmt_commands[i], opcode);
298
299 for (i = 0; i < num_events; i++, opcode++)
300 put_unaligned_le16(mgmt_events[i], opcode);
301
302 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
303 rp_size);
304 kfree(rp);
305
306 return err;
307 }
308
309 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
310 u16 data_len)
311 {
312 struct mgmt_rp_read_index_list *rp;
313 struct hci_dev *d;
314 size_t rp_len;
315 u16 count;
316 int err;
317
318 BT_DBG("sock %p", sk);
319
320 read_lock(&hci_dev_list_lock);
321
322 count = 0;
323 list_for_each_entry(d, &hci_dev_list, list) {
324 if (!mgmt_valid_hdev(d))
325 continue;
326
327 count++;
328 }
329
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
332 if (!rp) {
333 read_unlock(&hci_dev_list_lock);
334 return -ENOMEM;
335 }
336
337 count = 0;
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
340 continue;
341
342 if (!mgmt_valid_hdev(d))
343 continue;
344
345 rp->index[count++] = cpu_to_le16(d->id);
346 BT_DBG("Added hci%u", d->id);
347 }
348
349 rp->num_controllers = cpu_to_le16(count);
350 rp_len = sizeof(*rp) + (2 * count);
351
352 read_unlock(&hci_dev_list_lock);
353
354 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
355 rp_len);
356
357 kfree(rp);
358
359 return err;
360 }
361
362 static u32 get_supported_settings(struct hci_dev *hdev)
363 {
364 u32 settings = 0;
365
366 settings |= MGMT_SETTING_POWERED;
367 settings |= MGMT_SETTING_PAIRABLE;
368
369 if (lmp_ssp_capable(hdev))
370 settings |= MGMT_SETTING_SSP;
371
372 if (lmp_bredr_capable(hdev)) {
373 settings |= MGMT_SETTING_CONNECTABLE;
374 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
375 settings |= MGMT_SETTING_FAST_CONNECTABLE;
376 settings |= MGMT_SETTING_DISCOVERABLE;
377 settings |= MGMT_SETTING_BREDR;
378 settings |= MGMT_SETTING_LINK_SECURITY;
379 }
380
381 if (enable_hs)
382 settings |= MGMT_SETTING_HS;
383
384 if (lmp_le_capable(hdev))
385 settings |= MGMT_SETTING_LE;
386
387 return settings;
388 }
389
390 static u32 get_current_settings(struct hci_dev *hdev)
391 {
392 u32 settings = 0;
393
394 if (hdev_is_powered(hdev))
395 settings |= MGMT_SETTING_POWERED;
396
397 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
398 settings |= MGMT_SETTING_CONNECTABLE;
399
400 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
401 settings |= MGMT_SETTING_FAST_CONNECTABLE;
402
403 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_DISCOVERABLE;
405
406 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_PAIRABLE;
408
409 if (lmp_bredr_capable(hdev))
410 settings |= MGMT_SETTING_BREDR;
411
412 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
413 settings |= MGMT_SETTING_LE;
414
415 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
416 settings |= MGMT_SETTING_LINK_SECURITY;
417
418 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
419 settings |= MGMT_SETTING_SSP;
420
421 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_HS;
423
424 return settings;
425 }
426
427 #define PNP_INFO_SVCLASS_ID 0x1200
428
429 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
430 {
431 u8 *ptr = data, *uuids_start = NULL;
432 struct bt_uuid *uuid;
433
434 if (len < 4)
435 return ptr;
436
437 list_for_each_entry(uuid, &hdev->uuids, list) {
438 u16 uuid16;
439
440 if (uuid->size != 16)
441 continue;
442
443 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
444 if (uuid16 < 0x1100)
445 continue;
446
447 if (uuid16 == PNP_INFO_SVCLASS_ID)
448 continue;
449
450 if (!uuids_start) {
451 uuids_start = ptr;
452 uuids_start[0] = 1;
453 uuids_start[1] = EIR_UUID16_ALL;
454 ptr += 2;
455 }
456
457 /* Stop if not enough space to put next UUID */
458 if ((ptr - data) + sizeof(u16) > len) {
459 uuids_start[1] = EIR_UUID16_SOME;
460 break;
461 }
462
463 *ptr++ = (uuid16 & 0x00ff);
464 *ptr++ = (uuid16 & 0xff00) >> 8;
465 uuids_start[0] += sizeof(uuid16);
466 }
467
468 return ptr;
469 }
470
471 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
472 {
473 u8 *ptr = data, *uuids_start = NULL;
474 struct bt_uuid *uuid;
475
476 if (len < 6)
477 return ptr;
478
479 list_for_each_entry(uuid, &hdev->uuids, list) {
480 if (uuid->size != 32)
481 continue;
482
483 if (!uuids_start) {
484 uuids_start = ptr;
485 uuids_start[0] = 1;
486 uuids_start[1] = EIR_UUID32_ALL;
487 ptr += 2;
488 }
489
490 /* Stop if not enough space to put next UUID */
491 if ((ptr - data) + sizeof(u32) > len) {
492 uuids_start[1] = EIR_UUID32_SOME;
493 break;
494 }
495
496 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
497 ptr += sizeof(u32);
498 uuids_start[0] += sizeof(u32);
499 }
500
501 return ptr;
502 }
503
504 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
505 {
506 u8 *ptr = data, *uuids_start = NULL;
507 struct bt_uuid *uuid;
508
509 if (len < 18)
510 return ptr;
511
512 list_for_each_entry(uuid, &hdev->uuids, list) {
513 if (uuid->size != 128)
514 continue;
515
516 if (!uuids_start) {
517 uuids_start = ptr;
518 uuids_start[0] = 1;
519 uuids_start[1] = EIR_UUID128_ALL;
520 ptr += 2;
521 }
522
523 /* Stop if not enough space to put next UUID */
524 if ((ptr - data) + 16 > len) {
525 uuids_start[1] = EIR_UUID128_SOME;
526 break;
527 }
528
529 memcpy(ptr, uuid->uuid, 16);
530 ptr += 16;
531 uuids_start[0] += 16;
532 }
533
534 return ptr;
535 }
536
537 static void create_eir(struct hci_dev *hdev, u8 *data)
538 {
539 u8 *ptr = data;
540 size_t name_len;
541
542 name_len = strlen(hdev->dev_name);
543
544 if (name_len > 0) {
545 /* EIR Data type */
546 if (name_len > 48) {
547 name_len = 48;
548 ptr[1] = EIR_NAME_SHORT;
549 } else
550 ptr[1] = EIR_NAME_COMPLETE;
551
552 /* EIR Data length */
553 ptr[0] = name_len + 1;
554
555 memcpy(ptr + 2, hdev->dev_name, name_len);
556
557 ptr += (name_len + 2);
558 }
559
560 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
561 ptr[0] = 2;
562 ptr[1] = EIR_TX_POWER;
563 ptr[2] = (u8) hdev->inq_tx_power;
564
565 ptr += 3;
566 }
567
568 if (hdev->devid_source > 0) {
569 ptr[0] = 9;
570 ptr[1] = EIR_DEVICE_ID;
571
572 put_unaligned_le16(hdev->devid_source, ptr + 2);
573 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
574 put_unaligned_le16(hdev->devid_product, ptr + 6);
575 put_unaligned_le16(hdev->devid_version, ptr + 8);
576
577 ptr += 10;
578 }
579
580 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
581 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
582 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
583 }
584
585 static void update_eir(struct hci_request *req)
586 {
587 struct hci_dev *hdev = req->hdev;
588 struct hci_cp_write_eir cp;
589
590 if (!hdev_is_powered(hdev))
591 return;
592
593 if (!lmp_ext_inq_capable(hdev))
594 return;
595
596 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
597 return;
598
599 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
600 return;
601
602 memset(&cp, 0, sizeof(cp));
603
604 create_eir(hdev, cp.data);
605
606 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
607 return;
608
609 memcpy(hdev->eir, cp.data, sizeof(cp.data));
610
611 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
612 }
613
614 static u8 get_service_classes(struct hci_dev *hdev)
615 {
616 struct bt_uuid *uuid;
617 u8 val = 0;
618
619 list_for_each_entry(uuid, &hdev->uuids, list)
620 val |= uuid->svc_hint;
621
622 return val;
623 }
624
625 static void update_class(struct hci_request *req)
626 {
627 struct hci_dev *hdev = req->hdev;
628 u8 cod[3];
629
630 BT_DBG("%s", hdev->name);
631
632 if (!hdev_is_powered(hdev))
633 return;
634
635 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
636 return;
637
638 cod[0] = hdev->minor_class;
639 cod[1] = hdev->major_class;
640 cod[2] = get_service_classes(hdev);
641
642 if (memcmp(cod, hdev->dev_class, 3) == 0)
643 return;
644
645 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
646 }
647
648 static void service_cache_off(struct work_struct *work)
649 {
650 struct hci_dev *hdev = container_of(work, struct hci_dev,
651 service_cache.work);
652 struct hci_request req;
653
654 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
655 return;
656
657 hci_req_init(&req, hdev);
658
659 hci_dev_lock(hdev);
660
661 update_eir(&req);
662 update_class(&req);
663
664 hci_dev_unlock(hdev);
665
666 hci_req_run(&req, NULL);
667 }
668
669 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
670 {
671 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
672 return;
673
674 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
675
676 /* Non-mgmt controlled devices get this bit set
677 * implicitly so that pairing works for them, however
678 * for mgmt we require user-space to explicitly enable
679 * it
680 */
681 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
682 }
683
684 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
685 void *data, u16 data_len)
686 {
687 struct mgmt_rp_read_info rp;
688
689 BT_DBG("sock %p %s", sk, hdev->name);
690
691 hci_dev_lock(hdev);
692
693 memset(&rp, 0, sizeof(rp));
694
695 bacpy(&rp.bdaddr, &hdev->bdaddr);
696
697 rp.version = hdev->hci_ver;
698 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
699
700 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
701 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
702
703 memcpy(rp.dev_class, hdev->dev_class, 3);
704
705 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
706 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
707
708 hci_dev_unlock(hdev);
709
710 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
711 sizeof(rp));
712 }
713
714 static void mgmt_pending_free(struct pending_cmd *cmd)
715 {
716 sock_put(cmd->sk);
717 kfree(cmd->param);
718 kfree(cmd);
719 }
720
721 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
722 struct hci_dev *hdev, void *data,
723 u16 len)
724 {
725 struct pending_cmd *cmd;
726
727 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
728 if (!cmd)
729 return NULL;
730
731 cmd->opcode = opcode;
732 cmd->index = hdev->id;
733
734 cmd->param = kmalloc(len, GFP_KERNEL);
735 if (!cmd->param) {
736 kfree(cmd);
737 return NULL;
738 }
739
740 if (data)
741 memcpy(cmd->param, data, len);
742
743 cmd->sk = sk;
744 sock_hold(sk);
745
746 list_add(&cmd->list, &hdev->mgmt_pending);
747
748 return cmd;
749 }
750
751 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
752 void (*cb)(struct pending_cmd *cmd,
753 void *data),
754 void *data)
755 {
756 struct pending_cmd *cmd, *tmp;
757
758 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
759 if (opcode > 0 && cmd->opcode != opcode)
760 continue;
761
762 cb(cmd, data);
763 }
764 }
765
766 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
767 {
768 struct pending_cmd *cmd;
769
770 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
771 if (cmd->opcode == opcode)
772 return cmd;
773 }
774
775 return NULL;
776 }
777
778 static void mgmt_pending_remove(struct pending_cmd *cmd)
779 {
780 list_del(&cmd->list);
781 mgmt_pending_free(cmd);
782 }
783
784 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
785 {
786 __le32 settings = cpu_to_le32(get_current_settings(hdev));
787
788 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
789 sizeof(settings));
790 }
791
792 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
793 u16 len)
794 {
795 struct mgmt_mode *cp = data;
796 struct pending_cmd *cmd;
797 int err;
798
799 BT_DBG("request for %s", hdev->name);
800
801 if (cp->val != 0x00 && cp->val != 0x01)
802 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
803 MGMT_STATUS_INVALID_PARAMS);
804
805 hci_dev_lock(hdev);
806
807 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
808 cancel_delayed_work(&hdev->power_off);
809
810 if (cp->val) {
811 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
812 data, len);
813 err = mgmt_powered(hdev, 1);
814 goto failed;
815 }
816 }
817
818 if (!!cp->val == hdev_is_powered(hdev)) {
819 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
820 goto failed;
821 }
822
823 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
824 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
825 MGMT_STATUS_BUSY);
826 goto failed;
827 }
828
829 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
830 if (!cmd) {
831 err = -ENOMEM;
832 goto failed;
833 }
834
835 if (cp->val)
836 queue_work(hdev->req_workqueue, &hdev->power_on);
837 else
838 queue_work(hdev->req_workqueue, &hdev->power_off.work);
839
840 err = 0;
841
842 failed:
843 hci_dev_unlock(hdev);
844 return err;
845 }
846
847 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
848 struct sock *skip_sk)
849 {
850 struct sk_buff *skb;
851 struct mgmt_hdr *hdr;
852
853 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
854 if (!skb)
855 return -ENOMEM;
856
857 hdr = (void *) skb_put(skb, sizeof(*hdr));
858 hdr->opcode = cpu_to_le16(event);
859 if (hdev)
860 hdr->index = cpu_to_le16(hdev->id);
861 else
862 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
863 hdr->len = cpu_to_le16(data_len);
864
865 if (data)
866 memcpy(skb_put(skb, data_len), data, data_len);
867
868 /* Time stamp */
869 __net_timestamp(skb);
870
871 hci_send_to_control(skb, skip_sk);
872 kfree_skb(skb);
873
874 return 0;
875 }
876
877 static int new_settings(struct hci_dev *hdev, struct sock *skip)
878 {
879 __le32 ev;
880
881 ev = cpu_to_le32(get_current_settings(hdev));
882
883 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
884 }
885
886 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
887 u16 len)
888 {
889 struct mgmt_cp_set_discoverable *cp = data;
890 struct pending_cmd *cmd;
891 u16 timeout;
892 u8 scan;
893 int err;
894
895 BT_DBG("request for %s", hdev->name);
896
897 if (!lmp_bredr_capable(hdev))
898 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
899 MGMT_STATUS_NOT_SUPPORTED);
900
901 if (cp->val != 0x00 && cp->val != 0x01)
902 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
903 MGMT_STATUS_INVALID_PARAMS);
904
905 timeout = __le16_to_cpu(cp->timeout);
906 if (!cp->val && timeout > 0)
907 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
908 MGMT_STATUS_INVALID_PARAMS);
909
910 hci_dev_lock(hdev);
911
912 if (!hdev_is_powered(hdev) && timeout > 0) {
913 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
914 MGMT_STATUS_NOT_POWERED);
915 goto failed;
916 }
917
918 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
919 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
920 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
921 MGMT_STATUS_BUSY);
922 goto failed;
923 }
924
925 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
926 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
927 MGMT_STATUS_REJECTED);
928 goto failed;
929 }
930
931 if (!hdev_is_powered(hdev)) {
932 bool changed = false;
933
934 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
935 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
936 changed = true;
937 }
938
939 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
940 if (err < 0)
941 goto failed;
942
943 if (changed)
944 err = new_settings(hdev, sk);
945
946 goto failed;
947 }
948
949 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
950 if (hdev->discov_timeout > 0) {
951 cancel_delayed_work(&hdev->discov_off);
952 hdev->discov_timeout = 0;
953 }
954
955 if (cp->val && timeout > 0) {
956 hdev->discov_timeout = timeout;
957 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
958 msecs_to_jiffies(hdev->discov_timeout * 1000));
959 }
960
961 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
962 goto failed;
963 }
964
965 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
966 if (!cmd) {
967 err = -ENOMEM;
968 goto failed;
969 }
970
971 scan = SCAN_PAGE;
972
973 if (cp->val)
974 scan |= SCAN_INQUIRY;
975 else
976 cancel_delayed_work(&hdev->discov_off);
977
978 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
979 if (err < 0)
980 mgmt_pending_remove(cmd);
981
982 if (cp->val)
983 hdev->discov_timeout = timeout;
984
985 failed:
986 hci_dev_unlock(hdev);
987 return err;
988 }
989
990 static void write_fast_connectable(struct hci_request *req, bool enable)
991 {
992 struct hci_dev *hdev = req->hdev;
993 struct hci_cp_write_page_scan_activity acp;
994 u8 type;
995
996 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
997 return;
998
999 if (enable) {
1000 type = PAGE_SCAN_TYPE_INTERLACED;
1001
1002 /* 160 msec page scan interval */
1003 acp.interval = __constant_cpu_to_le16(0x0100);
1004 } else {
1005 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1006
1007 /* default 1.28 sec page scan */
1008 acp.interval = __constant_cpu_to_le16(0x0800);
1009 }
1010
1011 acp.window = __constant_cpu_to_le16(0x0012);
1012
1013 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1014 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1015 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1016 sizeof(acp), &acp);
1017
1018 if (hdev->page_scan_type != type)
1019 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1020 }
1021
1022 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1023 {
1024 struct pending_cmd *cmd;
1025
1026 BT_DBG("status 0x%02x", status);
1027
1028 hci_dev_lock(hdev);
1029
1030 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1031 if (!cmd)
1032 goto unlock;
1033
1034 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1035
1036 mgmt_pending_remove(cmd);
1037
1038 unlock:
1039 hci_dev_unlock(hdev);
1040 }
1041
1042 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1043 u16 len)
1044 {
1045 struct mgmt_mode *cp = data;
1046 struct pending_cmd *cmd;
1047 struct hci_request req;
1048 u8 scan;
1049 int err;
1050
1051 BT_DBG("request for %s", hdev->name);
1052
1053 if (!lmp_bredr_capable(hdev))
1054 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1055 MGMT_STATUS_NOT_SUPPORTED);
1056
1057 if (cp->val != 0x00 && cp->val != 0x01)
1058 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1059 MGMT_STATUS_INVALID_PARAMS);
1060
1061 hci_dev_lock(hdev);
1062
1063 if (!hdev_is_powered(hdev)) {
1064 bool changed = false;
1065
1066 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1067 changed = true;
1068
1069 if (cp->val) {
1070 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1071 } else {
1072 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1073 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1074 }
1075
1076 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1077 if (err < 0)
1078 goto failed;
1079
1080 if (changed)
1081 err = new_settings(hdev, sk);
1082
1083 goto failed;
1084 }
1085
1086 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1087 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1088 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1089 MGMT_STATUS_BUSY);
1090 goto failed;
1091 }
1092
1093 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1094 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1095 goto failed;
1096 }
1097
1098 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1099 if (!cmd) {
1100 err = -ENOMEM;
1101 goto failed;
1102 }
1103
1104 if (cp->val) {
1105 scan = SCAN_PAGE;
1106 } else {
1107 scan = 0;
1108
1109 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1110 hdev->discov_timeout > 0)
1111 cancel_delayed_work(&hdev->discov_off);
1112 }
1113
1114 hci_req_init(&req, hdev);
1115
1116 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1117
1118 /* If we're going from non-connectable to connectable or
1119 * vice-versa when fast connectable is enabled ensure that fast
1120 * connectable gets disabled. write_fast_connectable won't do
1121 * anything if the page scan parameters are already what they
1122 * should be.
1123 */
1124 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1125 write_fast_connectable(&req, false);
1126
1127 err = hci_req_run(&req, set_connectable_complete);
1128 if (err < 0)
1129 mgmt_pending_remove(cmd);
1130
1131 failed:
1132 hci_dev_unlock(hdev);
1133 return err;
1134 }
1135
1136 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1137 u16 len)
1138 {
1139 struct mgmt_mode *cp = data;
1140 int err;
1141
1142 BT_DBG("request for %s", hdev->name);
1143
1144 if (cp->val != 0x00 && cp->val != 0x01)
1145 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1146 MGMT_STATUS_INVALID_PARAMS);
1147
1148 hci_dev_lock(hdev);
1149
1150 if (cp->val)
1151 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1152 else
1153 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1154
1155 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1156 if (err < 0)
1157 goto failed;
1158
1159 err = new_settings(hdev, sk);
1160
1161 failed:
1162 hci_dev_unlock(hdev);
1163 return err;
1164 }
1165
1166 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1167 u16 len)
1168 {
1169 struct mgmt_mode *cp = data;
1170 struct pending_cmd *cmd;
1171 u8 val;
1172 int err;
1173
1174 BT_DBG("request for %s", hdev->name);
1175
1176 if (!lmp_bredr_capable(hdev))
1177 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1178 MGMT_STATUS_NOT_SUPPORTED);
1179
1180 if (cp->val != 0x00 && cp->val != 0x01)
1181 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1182 MGMT_STATUS_INVALID_PARAMS);
1183
1184 hci_dev_lock(hdev);
1185
1186 if (!hdev_is_powered(hdev)) {
1187 bool changed = false;
1188
1189 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1190 &hdev->dev_flags)) {
1191 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1192 changed = true;
1193 }
1194
1195 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1196 if (err < 0)
1197 goto failed;
1198
1199 if (changed)
1200 err = new_settings(hdev, sk);
1201
1202 goto failed;
1203 }
1204
1205 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1206 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1207 MGMT_STATUS_BUSY);
1208 goto failed;
1209 }
1210
1211 val = !!cp->val;
1212
1213 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1214 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1215 goto failed;
1216 }
1217
1218 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1219 if (!cmd) {
1220 err = -ENOMEM;
1221 goto failed;
1222 }
1223
1224 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1225 if (err < 0) {
1226 mgmt_pending_remove(cmd);
1227 goto failed;
1228 }
1229
1230 failed:
1231 hci_dev_unlock(hdev);
1232 return err;
1233 }
1234
1235 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1236 {
1237 struct mgmt_mode *cp = data;
1238 struct pending_cmd *cmd;
1239 u8 val;
1240 int err;
1241
1242 BT_DBG("request for %s", hdev->name);
1243
1244 if (!lmp_ssp_capable(hdev))
1245 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1246 MGMT_STATUS_NOT_SUPPORTED);
1247
1248 if (cp->val != 0x00 && cp->val != 0x01)
1249 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1250 MGMT_STATUS_INVALID_PARAMS);
1251
1252 hci_dev_lock(hdev);
1253
1254 val = !!cp->val;
1255
1256 if (!hdev_is_powered(hdev)) {
1257 bool changed = false;
1258
1259 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1260 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1261 changed = true;
1262 }
1263
1264 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1265 if (err < 0)
1266 goto failed;
1267
1268 if (changed)
1269 err = new_settings(hdev, sk);
1270
1271 goto failed;
1272 }
1273
1274 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1275 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1276 MGMT_STATUS_BUSY);
1277 goto failed;
1278 }
1279
1280 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1281 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1282 goto failed;
1283 }
1284
1285 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1286 if (!cmd) {
1287 err = -ENOMEM;
1288 goto failed;
1289 }
1290
1291 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1292 if (err < 0) {
1293 mgmt_pending_remove(cmd);
1294 goto failed;
1295 }
1296
1297 failed:
1298 hci_dev_unlock(hdev);
1299 return err;
1300 }
1301
1302 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1303 {
1304 struct mgmt_mode *cp = data;
1305
1306 BT_DBG("request for %s", hdev->name);
1307
1308 if (!enable_hs)
1309 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1310 MGMT_STATUS_NOT_SUPPORTED);
1311
1312 if (cp->val != 0x00 && cp->val != 0x01)
1313 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1314 MGMT_STATUS_INVALID_PARAMS);
1315
1316 if (cp->val)
1317 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1318 else
1319 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1320
1321 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1322 }
1323
1324 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1325 {
1326 struct mgmt_mode *cp = data;
1327 struct hci_cp_write_le_host_supported hci_cp;
1328 struct pending_cmd *cmd;
1329 int err;
1330 u8 val, enabled;
1331
1332 BT_DBG("request for %s", hdev->name);
1333
1334 if (!lmp_le_capable(hdev))
1335 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1336 MGMT_STATUS_NOT_SUPPORTED);
1337
1338 if (cp->val != 0x00 && cp->val != 0x01)
1339 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1340 MGMT_STATUS_INVALID_PARAMS);
1341
1342 /* LE-only devices do not allow toggling LE on/off */
1343 if (!lmp_bredr_capable(hdev))
1344 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1345 MGMT_STATUS_REJECTED);
1346
1347 hci_dev_lock(hdev);
1348
1349 val = !!cp->val;
1350 enabled = lmp_host_le_capable(hdev);
1351
1352 if (!hdev_is_powered(hdev) || val == enabled) {
1353 bool changed = false;
1354
1355 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1356 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1357 changed = true;
1358 }
1359
1360 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1361 if (err < 0)
1362 goto unlock;
1363
1364 if (changed)
1365 err = new_settings(hdev, sk);
1366
1367 goto unlock;
1368 }
1369
1370 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1371 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1372 MGMT_STATUS_BUSY);
1373 goto unlock;
1374 }
1375
1376 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1377 if (!cmd) {
1378 err = -ENOMEM;
1379 goto unlock;
1380 }
1381
1382 memset(&hci_cp, 0, sizeof(hci_cp));
1383
1384 if (val) {
1385 hci_cp.le = val;
1386 hci_cp.simul = lmp_le_br_capable(hdev);
1387 }
1388
1389 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1390 &hci_cp);
1391 if (err < 0)
1392 mgmt_pending_remove(cmd);
1393
1394 unlock:
1395 hci_dev_unlock(hdev);
1396 return err;
1397 }
1398
1399 /* This is a helper function to test for pending mgmt commands that can
1400 * cause CoD or EIR HCI commands. We can only allow one such pending
1401 * mgmt command at a time since otherwise we cannot easily track what
1402 * the current values are, will be, and based on that calculate if a new
1403 * HCI command needs to be sent and if yes with what value.
1404 */
1405 static bool pending_eir_or_class(struct hci_dev *hdev)
1406 {
1407 struct pending_cmd *cmd;
1408
1409 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1410 switch (cmd->opcode) {
1411 case MGMT_OP_ADD_UUID:
1412 case MGMT_OP_REMOVE_UUID:
1413 case MGMT_OP_SET_DEV_CLASS:
1414 case MGMT_OP_SET_POWERED:
1415 return true;
1416 }
1417 }
1418
1419 return false;
1420 }
1421
1422 static const u8 bluetooth_base_uuid[] = {
1423 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1424 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1425 };
1426
1427 static u8 get_uuid_size(const u8 *uuid)
1428 {
1429 u32 val;
1430
1431 if (memcmp(uuid, bluetooth_base_uuid, 12))
1432 return 128;
1433
1434 val = get_unaligned_le32(&uuid[12]);
1435 if (val > 0xffff)
1436 return 32;
1437
1438 return 16;
1439 }
1440
1441 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1442 {
1443 struct pending_cmd *cmd;
1444
1445 hci_dev_lock(hdev);
1446
1447 cmd = mgmt_pending_find(mgmt_op, hdev);
1448 if (!cmd)
1449 goto unlock;
1450
1451 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1452 hdev->dev_class, 3);
1453
1454 mgmt_pending_remove(cmd);
1455
1456 unlock:
1457 hci_dev_unlock(hdev);
1458 }
1459
1460 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1461 {
1462 BT_DBG("status 0x%02x", status);
1463
1464 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1465 }
1466
1467 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1468 {
1469 struct mgmt_cp_add_uuid *cp = data;
1470 struct pending_cmd *cmd;
1471 struct hci_request req;
1472 struct bt_uuid *uuid;
1473 int err;
1474
1475 BT_DBG("request for %s", hdev->name);
1476
1477 hci_dev_lock(hdev);
1478
1479 if (pending_eir_or_class(hdev)) {
1480 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1481 MGMT_STATUS_BUSY);
1482 goto failed;
1483 }
1484
1485 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1486 if (!uuid) {
1487 err = -ENOMEM;
1488 goto failed;
1489 }
1490
1491 memcpy(uuid->uuid, cp->uuid, 16);
1492 uuid->svc_hint = cp->svc_hint;
1493 uuid->size = get_uuid_size(cp->uuid);
1494
1495 list_add_tail(&uuid->list, &hdev->uuids);
1496
1497 hci_req_init(&req, hdev);
1498
1499 update_class(&req);
1500 update_eir(&req);
1501
1502 err = hci_req_run(&req, add_uuid_complete);
1503 if (err < 0) {
1504 if (err != -ENODATA)
1505 goto failed;
1506
1507 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1508 hdev->dev_class, 3);
1509 goto failed;
1510 }
1511
1512 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1513 if (!cmd) {
1514 err = -ENOMEM;
1515 goto failed;
1516 }
1517
1518 err = 0;
1519
1520 failed:
1521 hci_dev_unlock(hdev);
1522 return err;
1523 }
1524
1525 static bool enable_service_cache(struct hci_dev *hdev)
1526 {
1527 if (!hdev_is_powered(hdev))
1528 return false;
1529
1530 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1531 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1532 CACHE_TIMEOUT);
1533 return true;
1534 }
1535
1536 return false;
1537 }
1538
1539 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1540 {
1541 BT_DBG("status 0x%02x", status);
1542
1543 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1544 }
1545
1546 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1547 u16 len)
1548 {
1549 struct mgmt_cp_remove_uuid *cp = data;
1550 struct pending_cmd *cmd;
1551 struct bt_uuid *match, *tmp;
1552 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1553 struct hci_request req;
1554 int err, found;
1555
1556 BT_DBG("request for %s", hdev->name);
1557
1558 hci_dev_lock(hdev);
1559
1560 if (pending_eir_or_class(hdev)) {
1561 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1562 MGMT_STATUS_BUSY);
1563 goto unlock;
1564 }
1565
1566 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1567 err = hci_uuids_clear(hdev);
1568
1569 if (enable_service_cache(hdev)) {
1570 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1571 0, hdev->dev_class, 3);
1572 goto unlock;
1573 }
1574
1575 goto update_class;
1576 }
1577
1578 found = 0;
1579
1580 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1581 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1582 continue;
1583
1584 list_del(&match->list);
1585 kfree(match);
1586 found++;
1587 }
1588
1589 if (found == 0) {
1590 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1591 MGMT_STATUS_INVALID_PARAMS);
1592 goto unlock;
1593 }
1594
1595 update_class:
1596 hci_req_init(&req, hdev);
1597
1598 update_class(&req);
1599 update_eir(&req);
1600
1601 err = hci_req_run(&req, remove_uuid_complete);
1602 if (err < 0) {
1603 if (err != -ENODATA)
1604 goto unlock;
1605
1606 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1607 hdev->dev_class, 3);
1608 goto unlock;
1609 }
1610
1611 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1612 if (!cmd) {
1613 err = -ENOMEM;
1614 goto unlock;
1615 }
1616
1617 err = 0;
1618
1619 unlock:
1620 hci_dev_unlock(hdev);
1621 return err;
1622 }
1623
1624 static void set_class_complete(struct hci_dev *hdev, u8 status)
1625 {
1626 BT_DBG("status 0x%02x", status);
1627
1628 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1629 }
1630
1631 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1632 u16 len)
1633 {
1634 struct mgmt_cp_set_dev_class *cp = data;
1635 struct pending_cmd *cmd;
1636 struct hci_request req;
1637 int err;
1638
1639 BT_DBG("request for %s", hdev->name);
1640
1641 if (!lmp_bredr_capable(hdev))
1642 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1643 MGMT_STATUS_NOT_SUPPORTED);
1644
1645 hci_dev_lock(hdev);
1646
1647 if (pending_eir_or_class(hdev)) {
1648 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1649 MGMT_STATUS_BUSY);
1650 goto unlock;
1651 }
1652
1653 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1654 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1655 MGMT_STATUS_INVALID_PARAMS);
1656 goto unlock;
1657 }
1658
1659 hdev->major_class = cp->major;
1660 hdev->minor_class = cp->minor;
1661
1662 if (!hdev_is_powered(hdev)) {
1663 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1664 hdev->dev_class, 3);
1665 goto unlock;
1666 }
1667
1668 hci_req_init(&req, hdev);
1669
1670 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1671 hci_dev_unlock(hdev);
1672 cancel_delayed_work_sync(&hdev->service_cache);
1673 hci_dev_lock(hdev);
1674 update_eir(&req);
1675 }
1676
1677 update_class(&req);
1678
1679 err = hci_req_run(&req, set_class_complete);
1680 if (err < 0) {
1681 if (err != -ENODATA)
1682 goto unlock;
1683
1684 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1685 hdev->dev_class, 3);
1686 goto unlock;
1687 }
1688
1689 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1690 if (!cmd) {
1691 err = -ENOMEM;
1692 goto unlock;
1693 }
1694
1695 err = 0;
1696
1697 unlock:
1698 hci_dev_unlock(hdev);
1699 return err;
1700 }
1701
1702 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1703 u16 len)
1704 {
1705 struct mgmt_cp_load_link_keys *cp = data;
1706 u16 key_count, expected_len;
1707 int i;
1708
1709 key_count = __le16_to_cpu(cp->key_count);
1710
1711 expected_len = sizeof(*cp) + key_count *
1712 sizeof(struct mgmt_link_key_info);
1713 if (expected_len != len) {
1714 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1715 len, expected_len);
1716 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1717 MGMT_STATUS_INVALID_PARAMS);
1718 }
1719
1720 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1721 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1722 MGMT_STATUS_INVALID_PARAMS);
1723
1724 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1725 key_count);
1726
1727 for (i = 0; i < key_count; i++) {
1728 struct mgmt_link_key_info *key = &cp->keys[i];
1729
1730 if (key->addr.type != BDADDR_BREDR)
1731 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1732 MGMT_STATUS_INVALID_PARAMS);
1733 }
1734
1735 hci_dev_lock(hdev);
1736
1737 hci_link_keys_clear(hdev);
1738
1739 if (cp->debug_keys)
1740 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1741 else
1742 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1743
1744 for (i = 0; i < key_count; i++) {
1745 struct mgmt_link_key_info *key = &cp->keys[i];
1746
1747 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1748 key->type, key->pin_len);
1749 }
1750
1751 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1752
1753 hci_dev_unlock(hdev);
1754
1755 return 0;
1756 }
1757
1758 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1759 u8 addr_type, struct sock *skip_sk)
1760 {
1761 struct mgmt_ev_device_unpaired ev;
1762
1763 bacpy(&ev.addr.bdaddr, bdaddr);
1764 ev.addr.type = addr_type;
1765
1766 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1767 skip_sk);
1768 }
1769
1770 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1771 u16 len)
1772 {
1773 struct mgmt_cp_unpair_device *cp = data;
1774 struct mgmt_rp_unpair_device rp;
1775 struct hci_cp_disconnect dc;
1776 struct pending_cmd *cmd;
1777 struct hci_conn *conn;
1778 int err;
1779
1780 memset(&rp, 0, sizeof(rp));
1781 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1782 rp.addr.type = cp->addr.type;
1783
1784 if (!bdaddr_type_is_valid(cp->addr.type))
1785 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1786 MGMT_STATUS_INVALID_PARAMS,
1787 &rp, sizeof(rp));
1788
1789 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1790 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1791 MGMT_STATUS_INVALID_PARAMS,
1792 &rp, sizeof(rp));
1793
1794 hci_dev_lock(hdev);
1795
1796 if (!hdev_is_powered(hdev)) {
1797 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1798 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1799 goto unlock;
1800 }
1801
1802 if (cp->addr.type == BDADDR_BREDR)
1803 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1804 else
1805 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1806
1807 if (err < 0) {
1808 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1809 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1810 goto unlock;
1811 }
1812
1813 if (cp->disconnect) {
1814 if (cp->addr.type == BDADDR_BREDR)
1815 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1816 &cp->addr.bdaddr);
1817 else
1818 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1819 &cp->addr.bdaddr);
1820 } else {
1821 conn = NULL;
1822 }
1823
1824 if (!conn) {
1825 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1826 &rp, sizeof(rp));
1827 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1828 goto unlock;
1829 }
1830
1831 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1832 sizeof(*cp));
1833 if (!cmd) {
1834 err = -ENOMEM;
1835 goto unlock;
1836 }
1837
1838 dc.handle = cpu_to_le16(conn->handle);
1839 dc.reason = 0x13; /* Remote User Terminated Connection */
1840 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1841 if (err < 0)
1842 mgmt_pending_remove(cmd);
1843
1844 unlock:
1845 hci_dev_unlock(hdev);
1846 return err;
1847 }
1848
1849 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1850 u16 len)
1851 {
1852 struct mgmt_cp_disconnect *cp = data;
1853 struct mgmt_rp_disconnect rp;
1854 struct hci_cp_disconnect dc;
1855 struct pending_cmd *cmd;
1856 struct hci_conn *conn;
1857 int err;
1858
1859 BT_DBG("");
1860
1861 memset(&rp, 0, sizeof(rp));
1862 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1863 rp.addr.type = cp->addr.type;
1864
1865 if (!bdaddr_type_is_valid(cp->addr.type))
1866 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1867 MGMT_STATUS_INVALID_PARAMS,
1868 &rp, sizeof(rp));
1869
1870 hci_dev_lock(hdev);
1871
1872 if (!test_bit(HCI_UP, &hdev->flags)) {
1873 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1874 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1875 goto failed;
1876 }
1877
1878 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1879 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1880 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1881 goto failed;
1882 }
1883
1884 if (cp->addr.type == BDADDR_BREDR)
1885 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1886 &cp->addr.bdaddr);
1887 else
1888 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1889
1890 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1891 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1892 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1893 goto failed;
1894 }
1895
1896 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1897 if (!cmd) {
1898 err = -ENOMEM;
1899 goto failed;
1900 }
1901
1902 dc.handle = cpu_to_le16(conn->handle);
1903 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1904
1905 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1906 if (err < 0)
1907 mgmt_pending_remove(cmd);
1908
1909 failed:
1910 hci_dev_unlock(hdev);
1911 return err;
1912 }
1913
1914 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1915 {
1916 switch (link_type) {
1917 case LE_LINK:
1918 switch (addr_type) {
1919 case ADDR_LE_DEV_PUBLIC:
1920 return BDADDR_LE_PUBLIC;
1921
1922 default:
1923 /* Fallback to LE Random address type */
1924 return BDADDR_LE_RANDOM;
1925 }
1926
1927 default:
1928 /* Fallback to BR/EDR type */
1929 return BDADDR_BREDR;
1930 }
1931 }
1932
1933 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1934 u16 data_len)
1935 {
1936 struct mgmt_rp_get_connections *rp;
1937 struct hci_conn *c;
1938 size_t rp_len;
1939 int err;
1940 u16 i;
1941
1942 BT_DBG("");
1943
1944 hci_dev_lock(hdev);
1945
1946 if (!hdev_is_powered(hdev)) {
1947 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1948 MGMT_STATUS_NOT_POWERED);
1949 goto unlock;
1950 }
1951
1952 i = 0;
1953 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1954 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1955 i++;
1956 }
1957
1958 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1959 rp = kmalloc(rp_len, GFP_KERNEL);
1960 if (!rp) {
1961 err = -ENOMEM;
1962 goto unlock;
1963 }
1964
1965 i = 0;
1966 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1967 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1968 continue;
1969 bacpy(&rp->addr[i].bdaddr, &c->dst);
1970 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1971 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1972 continue;
1973 i++;
1974 }
1975
1976 rp->conn_count = cpu_to_le16(i);
1977
1978 /* Recalculate length in case of filtered SCO connections, etc */
1979 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1980
1981 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1982 rp_len);
1983
1984 kfree(rp);
1985
1986 unlock:
1987 hci_dev_unlock(hdev);
1988 return err;
1989 }
1990
1991 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1992 struct mgmt_cp_pin_code_neg_reply *cp)
1993 {
1994 struct pending_cmd *cmd;
1995 int err;
1996
1997 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
1998 sizeof(*cp));
1999 if (!cmd)
2000 return -ENOMEM;
2001
2002 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2003 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2004 if (err < 0)
2005 mgmt_pending_remove(cmd);
2006
2007 return err;
2008 }
2009
2010 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2011 u16 len)
2012 {
2013 struct hci_conn *conn;
2014 struct mgmt_cp_pin_code_reply *cp = data;
2015 struct hci_cp_pin_code_reply reply;
2016 struct pending_cmd *cmd;
2017 int err;
2018
2019 BT_DBG("");
2020
2021 hci_dev_lock(hdev);
2022
2023 if (!hdev_is_powered(hdev)) {
2024 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2025 MGMT_STATUS_NOT_POWERED);
2026 goto failed;
2027 }
2028
2029 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2030 if (!conn) {
2031 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2032 MGMT_STATUS_NOT_CONNECTED);
2033 goto failed;
2034 }
2035
2036 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2037 struct mgmt_cp_pin_code_neg_reply ncp;
2038
2039 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2040
2041 BT_ERR("PIN code is not 16 bytes long");
2042
2043 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2044 if (err >= 0)
2045 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2046 MGMT_STATUS_INVALID_PARAMS);
2047
2048 goto failed;
2049 }
2050
2051 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2052 if (!cmd) {
2053 err = -ENOMEM;
2054 goto failed;
2055 }
2056
2057 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2058 reply.pin_len = cp->pin_len;
2059 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2060
2061 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2062 if (err < 0)
2063 mgmt_pending_remove(cmd);
2064
2065 failed:
2066 hci_dev_unlock(hdev);
2067 return err;
2068 }
2069
2070 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2071 u16 len)
2072 {
2073 struct mgmt_cp_set_io_capability *cp = data;
2074
2075 BT_DBG("");
2076
2077 hci_dev_lock(hdev);
2078
2079 hdev->io_capability = cp->io_capability;
2080
2081 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2082 hdev->io_capability);
2083
2084 hci_dev_unlock(hdev);
2085
2086 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2087 0);
2088 }
2089
2090 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2091 {
2092 struct hci_dev *hdev = conn->hdev;
2093 struct pending_cmd *cmd;
2094
2095 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2096 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2097 continue;
2098
2099 if (cmd->user_data != conn)
2100 continue;
2101
2102 return cmd;
2103 }
2104
2105 return NULL;
2106 }
2107
2108 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2109 {
2110 struct mgmt_rp_pair_device rp;
2111 struct hci_conn *conn = cmd->user_data;
2112
2113 bacpy(&rp.addr.bdaddr, &conn->dst);
2114 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2115
2116 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2117 &rp, sizeof(rp));
2118
2119 /* So we don't get further callbacks for this connection */
2120 conn->connect_cfm_cb = NULL;
2121 conn->security_cfm_cb = NULL;
2122 conn->disconn_cfm_cb = NULL;
2123
2124 hci_conn_drop(conn);
2125
2126 mgmt_pending_remove(cmd);
2127 }
2128
2129 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2130 {
2131 struct pending_cmd *cmd;
2132
2133 BT_DBG("status %u", status);
2134
2135 cmd = find_pairing(conn);
2136 if (!cmd)
2137 BT_DBG("Unable to find a pending command");
2138 else
2139 pairing_complete(cmd, mgmt_status(status));
2140 }
2141
2142 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2143 {
2144 struct pending_cmd *cmd;
2145
2146 BT_DBG("status %u", status);
2147
2148 if (!status)
2149 return;
2150
2151 cmd = find_pairing(conn);
2152 if (!cmd)
2153 BT_DBG("Unable to find a pending command");
2154 else
2155 pairing_complete(cmd, mgmt_status(status));
2156 }
2157
2158 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2159 u16 len)
2160 {
2161 struct mgmt_cp_pair_device *cp = data;
2162 struct mgmt_rp_pair_device rp;
2163 struct pending_cmd *cmd;
2164 u8 sec_level, auth_type;
2165 struct hci_conn *conn;
2166 int err;
2167
2168 BT_DBG("");
2169
2170 memset(&rp, 0, sizeof(rp));
2171 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2172 rp.addr.type = cp->addr.type;
2173
2174 if (!bdaddr_type_is_valid(cp->addr.type))
2175 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2176 MGMT_STATUS_INVALID_PARAMS,
2177 &rp, sizeof(rp));
2178
2179 hci_dev_lock(hdev);
2180
2181 if (!hdev_is_powered(hdev)) {
2182 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2183 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2184 goto unlock;
2185 }
2186
2187 sec_level = BT_SECURITY_MEDIUM;
2188 if (cp->io_cap == 0x03)
2189 auth_type = HCI_AT_DEDICATED_BONDING;
2190 else
2191 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2192
2193 if (cp->addr.type == BDADDR_BREDR)
2194 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2195 cp->addr.type, sec_level, auth_type);
2196 else
2197 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2198 cp->addr.type, sec_level, auth_type);
2199
2200 if (IS_ERR(conn)) {
2201 int status;
2202
2203 if (PTR_ERR(conn) == -EBUSY)
2204 status = MGMT_STATUS_BUSY;
2205 else
2206 status = MGMT_STATUS_CONNECT_FAILED;
2207
2208 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2209 status, &rp,
2210 sizeof(rp));
2211 goto unlock;
2212 }
2213
2214 if (conn->connect_cfm_cb) {
2215 hci_conn_drop(conn);
2216 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2217 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2218 goto unlock;
2219 }
2220
2221 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2222 if (!cmd) {
2223 err = -ENOMEM;
2224 hci_conn_drop(conn);
2225 goto unlock;
2226 }
2227
2228 /* For LE, just connecting isn't a proof that the pairing finished */
2229 if (cp->addr.type == BDADDR_BREDR)
2230 conn->connect_cfm_cb = pairing_complete_cb;
2231 else
2232 conn->connect_cfm_cb = le_connect_complete_cb;
2233
2234 conn->security_cfm_cb = pairing_complete_cb;
2235 conn->disconn_cfm_cb = pairing_complete_cb;
2236 conn->io_capability = cp->io_cap;
2237 cmd->user_data = conn;
2238
2239 if (conn->state == BT_CONNECTED &&
2240 hci_conn_security(conn, sec_level, auth_type))
2241 pairing_complete(cmd, 0);
2242
2243 err = 0;
2244
2245 unlock:
2246 hci_dev_unlock(hdev);
2247 return err;
2248 }
2249
2250 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2251 u16 len)
2252 {
2253 struct mgmt_addr_info *addr = data;
2254 struct pending_cmd *cmd;
2255 struct hci_conn *conn;
2256 int err;
2257
2258 BT_DBG("");
2259
2260 hci_dev_lock(hdev);
2261
2262 if (!hdev_is_powered(hdev)) {
2263 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2264 MGMT_STATUS_NOT_POWERED);
2265 goto unlock;
2266 }
2267
2268 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2269 if (!cmd) {
2270 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2271 MGMT_STATUS_INVALID_PARAMS);
2272 goto unlock;
2273 }
2274
2275 conn = cmd->user_data;
2276
2277 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2278 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2279 MGMT_STATUS_INVALID_PARAMS);
2280 goto unlock;
2281 }
2282
2283 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2284
2285 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2286 addr, sizeof(*addr));
2287 unlock:
2288 hci_dev_unlock(hdev);
2289 return err;
2290 }
2291
2292 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2293 struct mgmt_addr_info *addr, u16 mgmt_op,
2294 u16 hci_op, __le32 passkey)
2295 {
2296 struct pending_cmd *cmd;
2297 struct hci_conn *conn;
2298 int err;
2299
2300 hci_dev_lock(hdev);
2301
2302 if (!hdev_is_powered(hdev)) {
2303 err = cmd_complete(sk, hdev->id, mgmt_op,
2304 MGMT_STATUS_NOT_POWERED, addr,
2305 sizeof(*addr));
2306 goto done;
2307 }
2308
2309 if (addr->type == BDADDR_BREDR)
2310 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2311 else
2312 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2313
2314 if (!conn) {
2315 err = cmd_complete(sk, hdev->id, mgmt_op,
2316 MGMT_STATUS_NOT_CONNECTED, addr,
2317 sizeof(*addr));
2318 goto done;
2319 }
2320
2321 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2322 /* Continue with pairing via SMP */
2323 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2324
2325 if (!err)
2326 err = cmd_complete(sk, hdev->id, mgmt_op,
2327 MGMT_STATUS_SUCCESS, addr,
2328 sizeof(*addr));
2329 else
2330 err = cmd_complete(sk, hdev->id, mgmt_op,
2331 MGMT_STATUS_FAILED, addr,
2332 sizeof(*addr));
2333
2334 goto done;
2335 }
2336
2337 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2338 if (!cmd) {
2339 err = -ENOMEM;
2340 goto done;
2341 }
2342
2343 /* Continue with pairing via HCI */
2344 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2345 struct hci_cp_user_passkey_reply cp;
2346
2347 bacpy(&cp.bdaddr, &addr->bdaddr);
2348 cp.passkey = passkey;
2349 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2350 } else
2351 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2352 &addr->bdaddr);
2353
2354 if (err < 0)
2355 mgmt_pending_remove(cmd);
2356
2357 done:
2358 hci_dev_unlock(hdev);
2359 return err;
2360 }
2361
2362 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2363 void *data, u16 len)
2364 {
2365 struct mgmt_cp_pin_code_neg_reply *cp = data;
2366
2367 BT_DBG("");
2368
2369 return user_pairing_resp(sk, hdev, &cp->addr,
2370 MGMT_OP_PIN_CODE_NEG_REPLY,
2371 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2372 }
2373
2374 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2375 u16 len)
2376 {
2377 struct mgmt_cp_user_confirm_reply *cp = data;
2378
2379 BT_DBG("");
2380
2381 if (len != sizeof(*cp))
2382 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2383 MGMT_STATUS_INVALID_PARAMS);
2384
2385 return user_pairing_resp(sk, hdev, &cp->addr,
2386 MGMT_OP_USER_CONFIRM_REPLY,
2387 HCI_OP_USER_CONFIRM_REPLY, 0);
2388 }
2389
2390 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2391 void *data, u16 len)
2392 {
2393 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2394
2395 BT_DBG("");
2396
2397 return user_pairing_resp(sk, hdev, &cp->addr,
2398 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2399 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2400 }
2401
2402 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2403 u16 len)
2404 {
2405 struct mgmt_cp_user_passkey_reply *cp = data;
2406
2407 BT_DBG("");
2408
2409 return user_pairing_resp(sk, hdev, &cp->addr,
2410 MGMT_OP_USER_PASSKEY_REPLY,
2411 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2412 }
2413
2414 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2415 void *data, u16 len)
2416 {
2417 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2418
2419 BT_DBG("");
2420
2421 return user_pairing_resp(sk, hdev, &cp->addr,
2422 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2423 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2424 }
2425
2426 static void update_name(struct hci_request *req)
2427 {
2428 struct hci_dev *hdev = req->hdev;
2429 struct hci_cp_write_local_name cp;
2430
2431 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2432
2433 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2434 }
2435
2436 static void set_name_complete(struct hci_dev *hdev, u8 status)
2437 {
2438 struct mgmt_cp_set_local_name *cp;
2439 struct pending_cmd *cmd;
2440
2441 BT_DBG("status 0x%02x", status);
2442
2443 hci_dev_lock(hdev);
2444
2445 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2446 if (!cmd)
2447 goto unlock;
2448
2449 cp = cmd->param;
2450
2451 if (status)
2452 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2453 mgmt_status(status));
2454 else
2455 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2456 cp, sizeof(*cp));
2457
2458 mgmt_pending_remove(cmd);
2459
2460 unlock:
2461 hci_dev_unlock(hdev);
2462 }
2463
2464 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2465 u16 len)
2466 {
2467 struct mgmt_cp_set_local_name *cp = data;
2468 struct pending_cmd *cmd;
2469 struct hci_request req;
2470 int err;
2471
2472 BT_DBG("");
2473
2474 hci_dev_lock(hdev);
2475
2476 /* If the old values are the same as the new ones just return a
2477 * direct command complete event.
2478 */
2479 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2480 !memcmp(hdev->short_name, cp->short_name,
2481 sizeof(hdev->short_name))) {
2482 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2483 data, len);
2484 goto failed;
2485 }
2486
2487 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2488
2489 if (!hdev_is_powered(hdev)) {
2490 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2491
2492 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2493 data, len);
2494 if (err < 0)
2495 goto failed;
2496
2497 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2498 sk);
2499
2500 goto failed;
2501 }
2502
2503 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2504 if (!cmd) {
2505 err = -ENOMEM;
2506 goto failed;
2507 }
2508
2509 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2510
2511 hci_req_init(&req, hdev);
2512
2513 if (lmp_bredr_capable(hdev)) {
2514 update_name(&req);
2515 update_eir(&req);
2516 }
2517
2518 if (lmp_le_capable(hdev))
2519 hci_update_ad(&req);
2520
2521 err = hci_req_run(&req, set_name_complete);
2522 if (err < 0)
2523 mgmt_pending_remove(cmd);
2524
2525 failed:
2526 hci_dev_unlock(hdev);
2527 return err;
2528 }
2529
2530 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2531 void *data, u16 data_len)
2532 {
2533 struct pending_cmd *cmd;
2534 int err;
2535
2536 BT_DBG("%s", hdev->name);
2537
2538 hci_dev_lock(hdev);
2539
2540 if (!hdev_is_powered(hdev)) {
2541 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2542 MGMT_STATUS_NOT_POWERED);
2543 goto unlock;
2544 }
2545
2546 if (!lmp_ssp_capable(hdev)) {
2547 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2548 MGMT_STATUS_NOT_SUPPORTED);
2549 goto unlock;
2550 }
2551
2552 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2553 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2554 MGMT_STATUS_BUSY);
2555 goto unlock;
2556 }
2557
2558 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2559 if (!cmd) {
2560 err = -ENOMEM;
2561 goto unlock;
2562 }
2563
2564 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2565 if (err < 0)
2566 mgmt_pending_remove(cmd);
2567
2568 unlock:
2569 hci_dev_unlock(hdev);
2570 return err;
2571 }
2572
2573 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2574 void *data, u16 len)
2575 {
2576 struct mgmt_cp_add_remote_oob_data *cp = data;
2577 u8 status;
2578 int err;
2579
2580 BT_DBG("%s ", hdev->name);
2581
2582 hci_dev_lock(hdev);
2583
2584 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2585 cp->randomizer);
2586 if (err < 0)
2587 status = MGMT_STATUS_FAILED;
2588 else
2589 status = MGMT_STATUS_SUCCESS;
2590
2591 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2592 &cp->addr, sizeof(cp->addr));
2593
2594 hci_dev_unlock(hdev);
2595 return err;
2596 }
2597
2598 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2599 void *data, u16 len)
2600 {
2601 struct mgmt_cp_remove_remote_oob_data *cp = data;
2602 u8 status;
2603 int err;
2604
2605 BT_DBG("%s", hdev->name);
2606
2607 hci_dev_lock(hdev);
2608
2609 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2610 if (err < 0)
2611 status = MGMT_STATUS_INVALID_PARAMS;
2612 else
2613 status = MGMT_STATUS_SUCCESS;
2614
2615 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2616 status, &cp->addr, sizeof(cp->addr));
2617
2618 hci_dev_unlock(hdev);
2619 return err;
2620 }
2621
2622 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2623 {
2624 struct pending_cmd *cmd;
2625 u8 type;
2626 int err;
2627
2628 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2629
2630 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2631 if (!cmd)
2632 return -ENOENT;
2633
2634 type = hdev->discovery.type;
2635
2636 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2637 &type, sizeof(type));
2638 mgmt_pending_remove(cmd);
2639
2640 return err;
2641 }
2642
2643 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2644 {
2645 BT_DBG("status %d", status);
2646
2647 if (status) {
2648 hci_dev_lock(hdev);
2649 mgmt_start_discovery_failed(hdev, status);
2650 hci_dev_unlock(hdev);
2651 return;
2652 }
2653
2654 hci_dev_lock(hdev);
2655 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2656 hci_dev_unlock(hdev);
2657
2658 switch (hdev->discovery.type) {
2659 case DISCOV_TYPE_LE:
2660 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2661 DISCOV_LE_TIMEOUT);
2662 break;
2663
2664 case DISCOV_TYPE_INTERLEAVED:
2665 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2666 DISCOV_INTERLEAVED_TIMEOUT);
2667 break;
2668
2669 case DISCOV_TYPE_BREDR:
2670 break;
2671
2672 default:
2673 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2674 }
2675 }
2676
2677 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2678 void *data, u16 len)
2679 {
2680 struct mgmt_cp_start_discovery *cp = data;
2681 struct pending_cmd *cmd;
2682 struct hci_cp_le_set_scan_param param_cp;
2683 struct hci_cp_le_set_scan_enable enable_cp;
2684 struct hci_cp_inquiry inq_cp;
2685 struct hci_request req;
2686 /* General inquiry access code (GIAC) */
2687 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2688 int err;
2689
2690 BT_DBG("%s", hdev->name);
2691
2692 hci_dev_lock(hdev);
2693
2694 if (!hdev_is_powered(hdev)) {
2695 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2696 MGMT_STATUS_NOT_POWERED);
2697 goto failed;
2698 }
2699
2700 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2701 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2702 MGMT_STATUS_BUSY);
2703 goto failed;
2704 }
2705
2706 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2707 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2708 MGMT_STATUS_BUSY);
2709 goto failed;
2710 }
2711
2712 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2713 if (!cmd) {
2714 err = -ENOMEM;
2715 goto failed;
2716 }
2717
2718 hdev->discovery.type = cp->type;
2719
2720 hci_req_init(&req, hdev);
2721
2722 switch (hdev->discovery.type) {
2723 case DISCOV_TYPE_BREDR:
2724 if (!lmp_bredr_capable(hdev)) {
2725 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2726 MGMT_STATUS_NOT_SUPPORTED);
2727 mgmt_pending_remove(cmd);
2728 goto failed;
2729 }
2730
2731 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2732 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2733 MGMT_STATUS_BUSY);
2734 mgmt_pending_remove(cmd);
2735 goto failed;
2736 }
2737
2738 hci_inquiry_cache_flush(hdev);
2739
2740 memset(&inq_cp, 0, sizeof(inq_cp));
2741 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2742 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2743 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2744 break;
2745
2746 case DISCOV_TYPE_LE:
2747 case DISCOV_TYPE_INTERLEAVED:
2748 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2749 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2750 MGMT_STATUS_NOT_SUPPORTED);
2751 mgmt_pending_remove(cmd);
2752 goto failed;
2753 }
2754
2755 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2756 !lmp_bredr_capable(hdev)) {
2757 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2758 MGMT_STATUS_NOT_SUPPORTED);
2759 mgmt_pending_remove(cmd);
2760 goto failed;
2761 }
2762
2763 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2764 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2765 MGMT_STATUS_REJECTED);
2766 mgmt_pending_remove(cmd);
2767 goto failed;
2768 }
2769
2770 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2771 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2772 MGMT_STATUS_BUSY);
2773 mgmt_pending_remove(cmd);
2774 goto failed;
2775 }
2776
2777 memset(&param_cp, 0, sizeof(param_cp));
2778 param_cp.type = LE_SCAN_ACTIVE;
2779 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2780 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2781 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2782 &param_cp);
2783
2784 memset(&enable_cp, 0, sizeof(enable_cp));
2785 enable_cp.enable = LE_SCAN_ENABLE;
2786 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2787 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2788 &enable_cp);
2789 break;
2790
2791 default:
2792 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2793 MGMT_STATUS_INVALID_PARAMS);
2794 mgmt_pending_remove(cmd);
2795 goto failed;
2796 }
2797
2798 err = hci_req_run(&req, start_discovery_complete);
2799 if (err < 0)
2800 mgmt_pending_remove(cmd);
2801 else
2802 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2803
2804 failed:
2805 hci_dev_unlock(hdev);
2806 return err;
2807 }
2808
2809 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2810 {
2811 struct pending_cmd *cmd;
2812 int err;
2813
2814 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2815 if (!cmd)
2816 return -ENOENT;
2817
2818 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2819 &hdev->discovery.type, sizeof(hdev->discovery.type));
2820 mgmt_pending_remove(cmd);
2821
2822 return err;
2823 }
2824
2825 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2826 {
2827 BT_DBG("status %d", status);
2828
2829 hci_dev_lock(hdev);
2830
2831 if (status) {
2832 mgmt_stop_discovery_failed(hdev, status);
2833 goto unlock;
2834 }
2835
2836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2837
2838 unlock:
2839 hci_dev_unlock(hdev);
2840 }
2841
2842 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2843 u16 len)
2844 {
2845 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2846 struct pending_cmd *cmd;
2847 struct hci_cp_remote_name_req_cancel cp;
2848 struct inquiry_entry *e;
2849 struct hci_request req;
2850 struct hci_cp_le_set_scan_enable enable_cp;
2851 int err;
2852
2853 BT_DBG("%s", hdev->name);
2854
2855 hci_dev_lock(hdev);
2856
2857 if (!hci_discovery_active(hdev)) {
2858 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2859 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2860 sizeof(mgmt_cp->type));
2861 goto unlock;
2862 }
2863
2864 if (hdev->discovery.type != mgmt_cp->type) {
2865 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2866 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2867 sizeof(mgmt_cp->type));
2868 goto unlock;
2869 }
2870
2871 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2872 if (!cmd) {
2873 err = -ENOMEM;
2874 goto unlock;
2875 }
2876
2877 hci_req_init(&req, hdev);
2878
2879 switch (hdev->discovery.state) {
2880 case DISCOVERY_FINDING:
2881 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2882 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2883 } else {
2884 cancel_delayed_work(&hdev->le_scan_disable);
2885
2886 memset(&enable_cp, 0, sizeof(enable_cp));
2887 enable_cp.enable = LE_SCAN_DISABLE;
2888 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2889 sizeof(enable_cp), &enable_cp);
2890 }
2891
2892 break;
2893
2894 case DISCOVERY_RESOLVING:
2895 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2896 NAME_PENDING);
2897 if (!e) {
2898 mgmt_pending_remove(cmd);
2899 err = cmd_complete(sk, hdev->id,
2900 MGMT_OP_STOP_DISCOVERY, 0,
2901 &mgmt_cp->type,
2902 sizeof(mgmt_cp->type));
2903 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2904 goto unlock;
2905 }
2906
2907 bacpy(&cp.bdaddr, &e->data.bdaddr);
2908 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2909 &cp);
2910
2911 break;
2912
2913 default:
2914 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2915
2916 mgmt_pending_remove(cmd);
2917 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2918 MGMT_STATUS_FAILED, &mgmt_cp->type,
2919 sizeof(mgmt_cp->type));
2920 goto unlock;
2921 }
2922
2923 err = hci_req_run(&req, stop_discovery_complete);
2924 if (err < 0)
2925 mgmt_pending_remove(cmd);
2926 else
2927 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2928
2929 unlock:
2930 hci_dev_unlock(hdev);
2931 return err;
2932 }
2933
2934 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2935 u16 len)
2936 {
2937 struct mgmt_cp_confirm_name *cp = data;
2938 struct inquiry_entry *e;
2939 int err;
2940
2941 BT_DBG("%s", hdev->name);
2942
2943 hci_dev_lock(hdev);
2944
2945 if (!hci_discovery_active(hdev)) {
2946 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2947 MGMT_STATUS_FAILED);
2948 goto failed;
2949 }
2950
2951 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2952 if (!e) {
2953 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2954 MGMT_STATUS_INVALID_PARAMS);
2955 goto failed;
2956 }
2957
2958 if (cp->name_known) {
2959 e->name_state = NAME_KNOWN;
2960 list_del(&e->list);
2961 } else {
2962 e->name_state = NAME_NEEDED;
2963 hci_inquiry_cache_update_resolve(hdev, e);
2964 }
2965
2966 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2967 sizeof(cp->addr));
2968
2969 failed:
2970 hci_dev_unlock(hdev);
2971 return err;
2972 }
2973
2974 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2975 u16 len)
2976 {
2977 struct mgmt_cp_block_device *cp = data;
2978 u8 status;
2979 int err;
2980
2981 BT_DBG("%s", hdev->name);
2982
2983 if (!bdaddr_type_is_valid(cp->addr.type))
2984 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2985 MGMT_STATUS_INVALID_PARAMS,
2986 &cp->addr, sizeof(cp->addr));
2987
2988 hci_dev_lock(hdev);
2989
2990 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2991 if (err < 0)
2992 status = MGMT_STATUS_FAILED;
2993 else
2994 status = MGMT_STATUS_SUCCESS;
2995
2996 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
2997 &cp->addr, sizeof(cp->addr));
2998
2999 hci_dev_unlock(hdev);
3000
3001 return err;
3002 }
3003
3004 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3005 u16 len)
3006 {
3007 struct mgmt_cp_unblock_device *cp = data;
3008 u8 status;
3009 int err;
3010
3011 BT_DBG("%s", hdev->name);
3012
3013 if (!bdaddr_type_is_valid(cp->addr.type))
3014 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3015 MGMT_STATUS_INVALID_PARAMS,
3016 &cp->addr, sizeof(cp->addr));
3017
3018 hci_dev_lock(hdev);
3019
3020 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3021 if (err < 0)
3022 status = MGMT_STATUS_INVALID_PARAMS;
3023 else
3024 status = MGMT_STATUS_SUCCESS;
3025
3026 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3027 &cp->addr, sizeof(cp->addr));
3028
3029 hci_dev_unlock(hdev);
3030
3031 return err;
3032 }
3033
3034 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3035 u16 len)
3036 {
3037 struct mgmt_cp_set_device_id *cp = data;
3038 struct hci_request req;
3039 int err;
3040 __u16 source;
3041
3042 BT_DBG("%s", hdev->name);
3043
3044 source = __le16_to_cpu(cp->source);
3045
3046 if (source > 0x0002)
3047 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3048 MGMT_STATUS_INVALID_PARAMS);
3049
3050 hci_dev_lock(hdev);
3051
3052 hdev->devid_source = source;
3053 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3054 hdev->devid_product = __le16_to_cpu(cp->product);
3055 hdev->devid_version = __le16_to_cpu(cp->version);
3056
3057 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3058
3059 hci_req_init(&req, hdev);
3060 update_eir(&req);
3061 hci_req_run(&req, NULL);
3062
3063 hci_dev_unlock(hdev);
3064
3065 return err;
3066 }
3067
3068 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3069 {
3070 struct pending_cmd *cmd;
3071
3072 BT_DBG("status 0x%02x", status);
3073
3074 hci_dev_lock(hdev);
3075
3076 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3077 if (!cmd)
3078 goto unlock;
3079
3080 if (status) {
3081 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3082 mgmt_status(status));
3083 } else {
3084 struct mgmt_mode *cp = cmd->param;
3085
3086 if (cp->val)
3087 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3088 else
3089 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3090
3091 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3092 new_settings(hdev, cmd->sk);
3093 }
3094
3095 mgmt_pending_remove(cmd);
3096
3097 unlock:
3098 hci_dev_unlock(hdev);
3099 }
3100
3101 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3102 void *data, u16 len)
3103 {
3104 struct mgmt_mode *cp = data;
3105 struct pending_cmd *cmd;
3106 struct hci_request req;
3107 int err;
3108
3109 BT_DBG("%s", hdev->name);
3110
3111 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3112 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3113 MGMT_STATUS_NOT_SUPPORTED);
3114
3115 if (cp->val != 0x00 && cp->val != 0x01)
3116 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3117 MGMT_STATUS_INVALID_PARAMS);
3118
3119 if (!hdev_is_powered(hdev))
3120 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3121 MGMT_STATUS_NOT_POWERED);
3122
3123 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3124 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3125 MGMT_STATUS_REJECTED);
3126
3127 hci_dev_lock(hdev);
3128
3129 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3130 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3131 MGMT_STATUS_BUSY);
3132 goto unlock;
3133 }
3134
3135 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3136 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3137 hdev);
3138 goto unlock;
3139 }
3140
3141 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3142 data, len);
3143 if (!cmd) {
3144 err = -ENOMEM;
3145 goto unlock;
3146 }
3147
3148 hci_req_init(&req, hdev);
3149
3150 write_fast_connectable(&req, cp->val);
3151
3152 err = hci_req_run(&req, fast_connectable_complete);
3153 if (err < 0) {
3154 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3155 MGMT_STATUS_FAILED);
3156 mgmt_pending_remove(cmd);
3157 }
3158
3159 unlock:
3160 hci_dev_unlock(hdev);
3161
3162 return err;
3163 }
3164
3165 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3166 {
3167 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3168 return false;
3169 if (key->master != 0x00 && key->master != 0x01)
3170 return false;
3171 if (!bdaddr_type_is_le(key->addr.type))
3172 return false;
3173 return true;
3174 }
3175
3176 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3177 void *cp_data, u16 len)
3178 {
3179 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3180 u16 key_count, expected_len;
3181 int i, err;
3182
3183 key_count = __le16_to_cpu(cp->key_count);
3184
3185 expected_len = sizeof(*cp) + key_count *
3186 sizeof(struct mgmt_ltk_info);
3187 if (expected_len != len) {
3188 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3189 len, expected_len);
3190 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3191 MGMT_STATUS_INVALID_PARAMS);
3192 }
3193
3194 BT_DBG("%s key_count %u", hdev->name, key_count);
3195
3196 for (i = 0; i < key_count; i++) {
3197 struct mgmt_ltk_info *key = &cp->keys[i];
3198
3199 if (!ltk_is_valid(key))
3200 return cmd_status(sk, hdev->id,
3201 MGMT_OP_LOAD_LONG_TERM_KEYS,
3202 MGMT_STATUS_INVALID_PARAMS);
3203 }
3204
3205 hci_dev_lock(hdev);
3206
3207 hci_smp_ltks_clear(hdev);
3208
3209 for (i = 0; i < key_count; i++) {
3210 struct mgmt_ltk_info *key = &cp->keys[i];
3211 u8 type;
3212
3213 if (key->master)
3214 type = HCI_SMP_LTK;
3215 else
3216 type = HCI_SMP_LTK_SLAVE;
3217
3218 hci_add_ltk(hdev, &key->addr.bdaddr,
3219 bdaddr_to_le(key->addr.type),
3220 type, 0, key->authenticated, key->val,
3221 key->enc_size, key->ediv, key->rand);
3222 }
3223
3224 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3225 NULL, 0);
3226
3227 hci_dev_unlock(hdev);
3228
3229 return err;
3230 }
3231
3232 static const struct mgmt_handler {
3233 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3234 u16 data_len);
3235 bool var_len;
3236 size_t data_len;
3237 } mgmt_handlers[] = {
3238 { NULL }, /* 0x0000 (no command) */
3239 { read_version, false, MGMT_READ_VERSION_SIZE },
3240 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3241 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3242 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3243 { set_powered, false, MGMT_SETTING_SIZE },
3244 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3245 { set_connectable, false, MGMT_SETTING_SIZE },
3246 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3247 { set_pairable, false, MGMT_SETTING_SIZE },
3248 { set_link_security, false, MGMT_SETTING_SIZE },
3249 { set_ssp, false, MGMT_SETTING_SIZE },
3250 { set_hs, false, MGMT_SETTING_SIZE },
3251 { set_le, false, MGMT_SETTING_SIZE },
3252 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3253 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3254 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3255 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3256 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3257 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3258 { disconnect, false, MGMT_DISCONNECT_SIZE },
3259 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3260 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3261 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3262 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3263 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3264 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3265 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3266 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3267 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3268 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3269 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3270 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3271 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3272 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3273 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3274 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3275 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3276 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3277 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3278 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3279 };
3280
3281
3282 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3283 {
3284 void *buf;
3285 u8 *cp;
3286 struct mgmt_hdr *hdr;
3287 u16 opcode, index, len;
3288 struct hci_dev *hdev = NULL;
3289 const struct mgmt_handler *handler;
3290 int err;
3291
3292 BT_DBG("got %zu bytes", msglen);
3293
3294 if (msglen < sizeof(*hdr))
3295 return -EINVAL;
3296
3297 buf = kmalloc(msglen, GFP_KERNEL);
3298 if (!buf)
3299 return -ENOMEM;
3300
3301 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3302 err = -EFAULT;
3303 goto done;
3304 }
3305
3306 hdr = buf;
3307 opcode = __le16_to_cpu(hdr->opcode);
3308 index = __le16_to_cpu(hdr->index);
3309 len = __le16_to_cpu(hdr->len);
3310
3311 if (len != msglen - sizeof(*hdr)) {
3312 err = -EINVAL;
3313 goto done;
3314 }
3315
3316 if (index != MGMT_INDEX_NONE) {
3317 hdev = hci_dev_get(index);
3318 if (!hdev) {
3319 err = cmd_status(sk, index, opcode,
3320 MGMT_STATUS_INVALID_INDEX);
3321 goto done;
3322 }
3323 }
3324
3325 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3326 mgmt_handlers[opcode].func == NULL) {
3327 BT_DBG("Unknown op %u", opcode);
3328 err = cmd_status(sk, index, opcode,
3329 MGMT_STATUS_UNKNOWN_COMMAND);
3330 goto done;
3331 }
3332
3333 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3334 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3335 err = cmd_status(sk, index, opcode,
3336 MGMT_STATUS_INVALID_INDEX);
3337 goto done;
3338 }
3339
3340 handler = &mgmt_handlers[opcode];
3341
3342 if ((handler->var_len && len < handler->data_len) ||
3343 (!handler->var_len && len != handler->data_len)) {
3344 err = cmd_status(sk, index, opcode,
3345 MGMT_STATUS_INVALID_PARAMS);
3346 goto done;
3347 }
3348
3349 if (hdev)
3350 mgmt_init_hdev(sk, hdev);
3351
3352 cp = buf + sizeof(*hdr);
3353
3354 err = handler->func(sk, hdev, cp, len);
3355 if (err < 0)
3356 goto done;
3357
3358 err = msglen;
3359
3360 done:
3361 if (hdev)
3362 hci_dev_put(hdev);
3363
3364 kfree(buf);
3365 return err;
3366 }
3367
3368 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3369 {
3370 u8 *status = data;
3371
3372 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3373 mgmt_pending_remove(cmd);
3374 }
3375
3376 int mgmt_index_added(struct hci_dev *hdev)
3377 {
3378 if (!mgmt_valid_hdev(hdev))
3379 return -ENOTSUPP;
3380
3381 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3382 }
3383
3384 int mgmt_index_removed(struct hci_dev *hdev)
3385 {
3386 u8 status = MGMT_STATUS_INVALID_INDEX;
3387
3388 if (!mgmt_valid_hdev(hdev))
3389 return -ENOTSUPP;
3390
3391 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3392
3393 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3394 }
3395
3396 struct cmd_lookup {
3397 struct sock *sk;
3398 struct hci_dev *hdev;
3399 u8 mgmt_status;
3400 };
3401
3402 static void settings_rsp(struct pending_cmd *cmd, void *data)
3403 {
3404 struct cmd_lookup *match = data;
3405
3406 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3407
3408 list_del(&cmd->list);
3409
3410 if (match->sk == NULL) {
3411 match->sk = cmd->sk;
3412 sock_hold(match->sk);
3413 }
3414
3415 mgmt_pending_free(cmd);
3416 }
3417
3418 static void set_bredr_scan(struct hci_request *req)
3419 {
3420 struct hci_dev *hdev = req->hdev;
3421 u8 scan = 0;
3422
3423 /* Ensure that fast connectable is disabled. This function will
3424 * not do anything if the page scan parameters are already what
3425 * they should be.
3426 */
3427 write_fast_connectable(req, false);
3428
3429 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3430 scan |= SCAN_PAGE;
3431 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3432 scan |= SCAN_INQUIRY;
3433
3434 if (scan)
3435 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3436 }
3437
3438 static void powered_complete(struct hci_dev *hdev, u8 status)
3439 {
3440 struct cmd_lookup match = { NULL, hdev };
3441
3442 BT_DBG("status 0x%02x", status);
3443
3444 hci_dev_lock(hdev);
3445
3446 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3447
3448 new_settings(hdev, match.sk);
3449
3450 hci_dev_unlock(hdev);
3451
3452 if (match.sk)
3453 sock_put(match.sk);
3454 }
3455
3456 static int powered_update_hci(struct hci_dev *hdev)
3457 {
3458 struct hci_request req;
3459 u8 link_sec;
3460
3461 hci_req_init(&req, hdev);
3462
3463 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3464 !lmp_host_ssp_capable(hdev)) {
3465 u8 ssp = 1;
3466
3467 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3468 }
3469
3470 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3471 lmp_bredr_capable(hdev)) {
3472 struct hci_cp_write_le_host_supported cp;
3473
3474 cp.le = 1;
3475 cp.simul = lmp_le_br_capable(hdev);
3476
3477 /* Check first if we already have the right
3478 * host state (host features set)
3479 */
3480 if (cp.le != lmp_host_le_capable(hdev) ||
3481 cp.simul != lmp_host_le_br_capable(hdev))
3482 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3483 sizeof(cp), &cp);
3484 }
3485
3486 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3487 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3488 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3489 sizeof(link_sec), &link_sec);
3490
3491 if (lmp_bredr_capable(hdev)) {
3492 set_bredr_scan(&req);
3493 update_class(&req);
3494 update_name(&req);
3495 update_eir(&req);
3496 }
3497
3498 return hci_req_run(&req, powered_complete);
3499 }
3500
3501 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3502 {
3503 struct cmd_lookup match = { NULL, hdev };
3504 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3505 u8 zero_cod[] = { 0, 0, 0 };
3506 int err;
3507
3508 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3509 return 0;
3510
3511 if (powered) {
3512 if (powered_update_hci(hdev) == 0)
3513 return 0;
3514
3515 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3516 &match);
3517 goto new_settings;
3518 }
3519
3520 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3521 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3522
3523 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3524 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3525 zero_cod, sizeof(zero_cod), NULL);
3526
3527 new_settings:
3528 err = new_settings(hdev, match.sk);
3529
3530 if (match.sk)
3531 sock_put(match.sk);
3532
3533 return err;
3534 }
3535
3536 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3537 {
3538 struct pending_cmd *cmd;
3539 u8 status;
3540
3541 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3542 if (!cmd)
3543 return -ENOENT;
3544
3545 if (err == -ERFKILL)
3546 status = MGMT_STATUS_RFKILLED;
3547 else
3548 status = MGMT_STATUS_FAILED;
3549
3550 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3551
3552 mgmt_pending_remove(cmd);
3553
3554 return err;
3555 }
3556
3557 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3558 {
3559 struct cmd_lookup match = { NULL, hdev };
3560 bool changed = false;
3561 int err = 0;
3562
3563 if (discoverable) {
3564 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3565 changed = true;
3566 } else {
3567 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3568 changed = true;
3569 }
3570
3571 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3572 &match);
3573
3574 if (changed)
3575 err = new_settings(hdev, match.sk);
3576
3577 if (match.sk)
3578 sock_put(match.sk);
3579
3580 return err;
3581 }
3582
3583 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3584 {
3585 struct pending_cmd *cmd;
3586 bool changed = false;
3587 int err = 0;
3588
3589 if (connectable) {
3590 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3591 changed = true;
3592 } else {
3593 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3594 changed = true;
3595 }
3596
3597 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3598
3599 if (changed)
3600 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3601
3602 return err;
3603 }
3604
3605 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3606 {
3607 u8 mgmt_err = mgmt_status(status);
3608
3609 if (scan & SCAN_PAGE)
3610 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3611 cmd_status_rsp, &mgmt_err);
3612
3613 if (scan & SCAN_INQUIRY)
3614 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3615 cmd_status_rsp, &mgmt_err);
3616
3617 return 0;
3618 }
3619
3620 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3621 bool persistent)
3622 {
3623 struct mgmt_ev_new_link_key ev;
3624
3625 memset(&ev, 0, sizeof(ev));
3626
3627 ev.store_hint = persistent;
3628 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3629 ev.key.addr.type = BDADDR_BREDR;
3630 ev.key.type = key->type;
3631 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3632 ev.key.pin_len = key->pin_len;
3633
3634 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3635 }
3636
3637 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3638 {
3639 struct mgmt_ev_new_long_term_key ev;
3640
3641 memset(&ev, 0, sizeof(ev));
3642
3643 ev.store_hint = persistent;
3644 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3645 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3646 ev.key.authenticated = key->authenticated;
3647 ev.key.enc_size = key->enc_size;
3648 ev.key.ediv = key->ediv;
3649
3650 if (key->type == HCI_SMP_LTK)
3651 ev.key.master = 1;
3652
3653 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3654 memcpy(ev.key.val, key->val, sizeof(key->val));
3655
3656 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3657 NULL);
3658 }
3659
3660 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3661 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3662 u8 *dev_class)
3663 {
3664 char buf[512];
3665 struct mgmt_ev_device_connected *ev = (void *) buf;
3666 u16 eir_len = 0;
3667
3668 bacpy(&ev->addr.bdaddr, bdaddr);
3669 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3670
3671 ev->flags = __cpu_to_le32(flags);
3672
3673 if (name_len > 0)
3674 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3675 name, name_len);
3676
3677 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3678 eir_len = eir_append_data(ev->eir, eir_len,
3679 EIR_CLASS_OF_DEV, dev_class, 3);
3680
3681 ev->eir_len = cpu_to_le16(eir_len);
3682
3683 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3684 sizeof(*ev) + eir_len, NULL);
3685 }
3686
3687 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3688 {
3689 struct mgmt_cp_disconnect *cp = cmd->param;
3690 struct sock **sk = data;
3691 struct mgmt_rp_disconnect rp;
3692
3693 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3694 rp.addr.type = cp->addr.type;
3695
3696 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3697 sizeof(rp));
3698
3699 *sk = cmd->sk;
3700 sock_hold(*sk);
3701
3702 mgmt_pending_remove(cmd);
3703 }
3704
3705 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3706 {
3707 struct hci_dev *hdev = data;
3708 struct mgmt_cp_unpair_device *cp = cmd->param;
3709 struct mgmt_rp_unpair_device rp;
3710
3711 memset(&rp, 0, sizeof(rp));
3712 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3713 rp.addr.type = cp->addr.type;
3714
3715 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3716
3717 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3718
3719 mgmt_pending_remove(cmd);
3720 }
3721
3722 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3723 u8 link_type, u8 addr_type, u8 reason)
3724 {
3725 struct mgmt_ev_device_disconnected ev;
3726 struct sock *sk = NULL;
3727 int err;
3728
3729 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3730
3731 bacpy(&ev.addr.bdaddr, bdaddr);
3732 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3733 ev.reason = reason;
3734
3735 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3736 sk);
3737
3738 if (sk)
3739 sock_put(sk);
3740
3741 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3742 hdev);
3743
3744 return err;
3745 }
3746
3747 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3748 u8 link_type, u8 addr_type, u8 status)
3749 {
3750 struct mgmt_rp_disconnect rp;
3751 struct pending_cmd *cmd;
3752 int err;
3753
3754 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3755 hdev);
3756
3757 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3758 if (!cmd)
3759 return -ENOENT;
3760
3761 bacpy(&rp.addr.bdaddr, bdaddr);
3762 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3763
3764 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3765 mgmt_status(status), &rp, sizeof(rp));
3766
3767 mgmt_pending_remove(cmd);
3768
3769 return err;
3770 }
3771
3772 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3773 u8 addr_type, u8 status)
3774 {
3775 struct mgmt_ev_connect_failed ev;
3776
3777 bacpy(&ev.addr.bdaddr, bdaddr);
3778 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3779 ev.status = mgmt_status(status);
3780
3781 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3782 }
3783
3784 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3785 {
3786 struct mgmt_ev_pin_code_request ev;
3787
3788 bacpy(&ev.addr.bdaddr, bdaddr);
3789 ev.addr.type = BDADDR_BREDR;
3790 ev.secure = secure;
3791
3792 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3793 NULL);
3794 }
3795
3796 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3797 u8 status)
3798 {
3799 struct pending_cmd *cmd;
3800 struct mgmt_rp_pin_code_reply rp;
3801 int err;
3802
3803 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3804 if (!cmd)
3805 return -ENOENT;
3806
3807 bacpy(&rp.addr.bdaddr, bdaddr);
3808 rp.addr.type = BDADDR_BREDR;
3809
3810 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3811 mgmt_status(status), &rp, sizeof(rp));
3812
3813 mgmt_pending_remove(cmd);
3814
3815 return err;
3816 }
3817
3818 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3819 u8 status)
3820 {
3821 struct pending_cmd *cmd;
3822 struct mgmt_rp_pin_code_reply rp;
3823 int err;
3824
3825 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3826 if (!cmd)
3827 return -ENOENT;
3828
3829 bacpy(&rp.addr.bdaddr, bdaddr);
3830 rp.addr.type = BDADDR_BREDR;
3831
3832 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3833 mgmt_status(status), &rp, sizeof(rp));
3834
3835 mgmt_pending_remove(cmd);
3836
3837 return err;
3838 }
3839
3840 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3841 u8 link_type, u8 addr_type, __le32 value,
3842 u8 confirm_hint)
3843 {
3844 struct mgmt_ev_user_confirm_request ev;
3845
3846 BT_DBG("%s", hdev->name);
3847
3848 bacpy(&ev.addr.bdaddr, bdaddr);
3849 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3850 ev.confirm_hint = confirm_hint;
3851 ev.value = value;
3852
3853 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3854 NULL);
3855 }
3856
3857 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3858 u8 link_type, u8 addr_type)
3859 {
3860 struct mgmt_ev_user_passkey_request ev;
3861
3862 BT_DBG("%s", hdev->name);
3863
3864 bacpy(&ev.addr.bdaddr, bdaddr);
3865 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3866
3867 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3868 NULL);
3869 }
3870
3871 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3872 u8 link_type, u8 addr_type, u8 status,
3873 u8 opcode)
3874 {
3875 struct pending_cmd *cmd;
3876 struct mgmt_rp_user_confirm_reply rp;
3877 int err;
3878
3879 cmd = mgmt_pending_find(opcode, hdev);
3880 if (!cmd)
3881 return -ENOENT;
3882
3883 bacpy(&rp.addr.bdaddr, bdaddr);
3884 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3885 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3886 &rp, sizeof(rp));
3887
3888 mgmt_pending_remove(cmd);
3889
3890 return err;
3891 }
3892
3893 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3894 u8 link_type, u8 addr_type, u8 status)
3895 {
3896 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3897 status, MGMT_OP_USER_CONFIRM_REPLY);
3898 }
3899
3900 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3901 u8 link_type, u8 addr_type, u8 status)
3902 {
3903 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3904 status,
3905 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3906 }
3907
3908 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3909 u8 link_type, u8 addr_type, u8 status)
3910 {
3911 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3912 status, MGMT_OP_USER_PASSKEY_REPLY);
3913 }
3914
3915 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3916 u8 link_type, u8 addr_type, u8 status)
3917 {
3918 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3919 status,
3920 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3921 }
3922
3923 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3924 u8 link_type, u8 addr_type, u32 passkey,
3925 u8 entered)
3926 {
3927 struct mgmt_ev_passkey_notify ev;
3928
3929 BT_DBG("%s", hdev->name);
3930
3931 bacpy(&ev.addr.bdaddr, bdaddr);
3932 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3933 ev.passkey = __cpu_to_le32(passkey);
3934 ev.entered = entered;
3935
3936 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3937 }
3938
3939 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3940 u8 addr_type, u8 status)
3941 {
3942 struct mgmt_ev_auth_failed ev;
3943
3944 bacpy(&ev.addr.bdaddr, bdaddr);
3945 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3946 ev.status = mgmt_status(status);
3947
3948 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3949 }
3950
3951 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3952 {
3953 struct cmd_lookup match = { NULL, hdev };
3954 bool changed = false;
3955 int err = 0;
3956
3957 if (status) {
3958 u8 mgmt_err = mgmt_status(status);
3959 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3960 cmd_status_rsp, &mgmt_err);
3961 return 0;
3962 }
3963
3964 if (test_bit(HCI_AUTH, &hdev->flags)) {
3965 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3966 changed = true;
3967 } else {
3968 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3969 changed = true;
3970 }
3971
3972 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3973 &match);
3974
3975 if (changed)
3976 err = new_settings(hdev, match.sk);
3977
3978 if (match.sk)
3979 sock_put(match.sk);
3980
3981 return err;
3982 }
3983
3984 static void clear_eir(struct hci_request *req)
3985 {
3986 struct hci_dev *hdev = req->hdev;
3987 struct hci_cp_write_eir cp;
3988
3989 if (!lmp_ext_inq_capable(hdev))
3990 return;
3991
3992 memset(hdev->eir, 0, sizeof(hdev->eir));
3993
3994 memset(&cp, 0, sizeof(cp));
3995
3996 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
3997 }
3998
3999 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4000 {
4001 struct cmd_lookup match = { NULL, hdev };
4002 struct hci_request req;
4003 bool changed = false;
4004 int err = 0;
4005
4006 if (status) {
4007 u8 mgmt_err = mgmt_status(status);
4008
4009 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4010 &hdev->dev_flags))
4011 err = new_settings(hdev, NULL);
4012
4013 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4014 &mgmt_err);
4015
4016 return err;
4017 }
4018
4019 if (enable) {
4020 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4021 changed = true;
4022 } else {
4023 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4024 changed = true;
4025 }
4026
4027 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4028
4029 if (changed)
4030 err = new_settings(hdev, match.sk);
4031
4032 if (match.sk)
4033 sock_put(match.sk);
4034
4035 hci_req_init(&req, hdev);
4036
4037 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4038 update_eir(&req);
4039 else
4040 clear_eir(&req);
4041
4042 hci_req_run(&req, NULL);
4043
4044 return err;
4045 }
4046
4047 static void sk_lookup(struct pending_cmd *cmd, void *data)
4048 {
4049 struct cmd_lookup *match = data;
4050
4051 if (match->sk == NULL) {
4052 match->sk = cmd->sk;
4053 sock_hold(match->sk);
4054 }
4055 }
4056
4057 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4058 u8 status)
4059 {
4060 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4061 int err = 0;
4062
4063 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4064 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4065 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4066
4067 if (!status)
4068 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4069 3, NULL);
4070
4071 if (match.sk)
4072 sock_put(match.sk);
4073
4074 return err;
4075 }
4076
4077 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4078 {
4079 struct mgmt_cp_set_local_name ev;
4080 struct pending_cmd *cmd;
4081
4082 if (status)
4083 return 0;
4084
4085 memset(&ev, 0, sizeof(ev));
4086 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4087 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4088
4089 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4090 if (!cmd) {
4091 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4092
4093 /* If this is a HCI command related to powering on the
4094 * HCI dev don't send any mgmt signals.
4095 */
4096 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4097 return 0;
4098 }
4099
4100 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4101 cmd ? cmd->sk : NULL);
4102 }
4103
4104 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4105 u8 *randomizer, u8 status)
4106 {
4107 struct pending_cmd *cmd;
4108 int err;
4109
4110 BT_DBG("%s status %u", hdev->name, status);
4111
4112 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4113 if (!cmd)
4114 return -ENOENT;
4115
4116 if (status) {
4117 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4118 mgmt_status(status));
4119 } else {
4120 struct mgmt_rp_read_local_oob_data rp;
4121
4122 memcpy(rp.hash, hash, sizeof(rp.hash));
4123 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4124
4125 err = cmd_complete(cmd->sk, hdev->id,
4126 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4127 sizeof(rp));
4128 }
4129
4130 mgmt_pending_remove(cmd);
4131
4132 return err;
4133 }
4134
4135 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4136 {
4137 struct cmd_lookup match = { NULL, hdev };
4138 bool changed = false;
4139 int err = 0;
4140
4141 if (status) {
4142 u8 mgmt_err = mgmt_status(status);
4143
4144 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4145 &hdev->dev_flags))
4146 err = new_settings(hdev, NULL);
4147
4148 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4149 &mgmt_err);
4150
4151 return err;
4152 }
4153
4154 if (enable) {
4155 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4156 changed = true;
4157 } else {
4158 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4159 changed = true;
4160 }
4161
4162 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4163
4164 if (changed)
4165 err = new_settings(hdev, match.sk);
4166
4167 if (match.sk)
4168 sock_put(match.sk);
4169
4170 return err;
4171 }
4172
4173 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4174 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4175 ssp, u8 *eir, u16 eir_len)
4176 {
4177 char buf[512];
4178 struct mgmt_ev_device_found *ev = (void *) buf;
4179 size_t ev_size;
4180
4181 if (!hci_discovery_active(hdev))
4182 return -EPERM;
4183
4184 /* Leave 5 bytes for a potential CoD field */
4185 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4186 return -EINVAL;
4187
4188 memset(buf, 0, sizeof(buf));
4189
4190 bacpy(&ev->addr.bdaddr, bdaddr);
4191 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4192 ev->rssi = rssi;
4193 if (cfm_name)
4194 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4195 if (!ssp)
4196 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4197
4198 if (eir_len > 0)
4199 memcpy(ev->eir, eir, eir_len);
4200
4201 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4202 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4203 dev_class, 3);
4204
4205 ev->eir_len = cpu_to_le16(eir_len);
4206 ev_size = sizeof(*ev) + eir_len;
4207
4208 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4209 }
4210
4211 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4212 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4213 {
4214 struct mgmt_ev_device_found *ev;
4215 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4216 u16 eir_len;
4217
4218 ev = (struct mgmt_ev_device_found *) buf;
4219
4220 memset(buf, 0, sizeof(buf));
4221
4222 bacpy(&ev->addr.bdaddr, bdaddr);
4223 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4224 ev->rssi = rssi;
4225
4226 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4227 name_len);
4228
4229 ev->eir_len = cpu_to_le16(eir_len);
4230
4231 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4232 sizeof(*ev) + eir_len, NULL);
4233 }
4234
4235 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4236 {
4237 struct mgmt_ev_discovering ev;
4238 struct pending_cmd *cmd;
4239
4240 BT_DBG("%s discovering %u", hdev->name, discovering);
4241
4242 if (discovering)
4243 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4244 else
4245 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4246
4247 if (cmd != NULL) {
4248 u8 type = hdev->discovery.type;
4249
4250 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4251 sizeof(type));
4252 mgmt_pending_remove(cmd);
4253 }
4254
4255 memset(&ev, 0, sizeof(ev));
4256 ev.type = hdev->discovery.type;
4257 ev.discovering = discovering;
4258
4259 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4260 }
4261
4262 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4263 {
4264 struct pending_cmd *cmd;
4265 struct mgmt_ev_device_blocked ev;
4266
4267 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4268
4269 bacpy(&ev.addr.bdaddr, bdaddr);
4270 ev.addr.type = type;
4271
4272 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4273 cmd ? cmd->sk : NULL);
4274 }
4275
4276 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4277 {
4278 struct pending_cmd *cmd;
4279 struct mgmt_ev_device_unblocked ev;
4280
4281 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4282
4283 bacpy(&ev.addr.bdaddr, bdaddr);
4284 ev.addr.type = type;
4285
4286 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4287 cmd ? cmd->sk : NULL);
4288 }
4289
4290 module_param(enable_hs, bool, 0644);
4291 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");