]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_request.c
Bluetooth: Clean up advertising initialization in powered_update_hci()
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
b1a8917c
JH
24#include <asm/unaligned.h>
25
0857dd3b
JH
26#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
f2252570 28#include <net/bluetooth/mgmt.h>
0857dd3b
JH
29
30#include "smp.h"
31#include "hci_request.h"
32
be91cd05
JH
33#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
0857dd3b
JH
37void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
e6214487
JH
44static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
46{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
66 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
0857dd3b
JH
72
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
e6214487
JH
82int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
be91cd05
JH
92static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
b504430c 106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189/* Execute request and wait for completion. */
a1d01db1
JH
190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
4ebeee2d 192 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
a1d01db1
JH
204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
be91cd05
JH
210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
568f44f6
JH
226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
be91cd05 229 return 0;
568f44f6
JH
230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
248 if (hci_status)
249 *hci_status = hdev->req_result;
be91cd05
JH
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
4ebeee2d
JH
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
256 break;
257
258 default:
259 err = -ETIMEDOUT;
4ebeee2d
JH
260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
a1d01db1
JH
272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
4ebeee2d 274 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
b504430c 282 hci_req_sync_lock(hdev);
4ebeee2d 283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 284 hci_req_sync_unlock(hdev);
be91cd05
JH
285
286 return ret;
287}
288
0857dd3b
JH
289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
d79f34e3
MH
309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
44d27137 339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 340
242c0ebd 341 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
bf943cbf
JH
352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
196a5e97
JH
387/* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
00cf5040
JH
460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
b1a8917c
JH
470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
0857dd3b
JH
657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, &params->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
689 */
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691 struct hci_cp_le_del_from_white_list cp;
692
693 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
694 &b->bdaddr, b->bdaddr_type) ||
695 hci_pend_le_action_lookup(&hdev->pend_le_reports,
696 &b->bdaddr, b->bdaddr_type)) {
697 white_list_entries++;
698 continue;
699 }
700
701 cp.bdaddr_type = b->bdaddr_type;
702 bacpy(&cp.bdaddr, &b->bdaddr);
703
704 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
705 sizeof(cp), &cp);
706 }
707
708 /* Since all no longer valid white list entries have been
709 * removed, walk through the list of pending connections
710 * and ensure that any new device gets programmed into
711 * the controller.
712 *
713 * If the list of the devices is larger than the list of
714 * available white list entries in the controller, then
715 * just abort and return filer policy value to not use the
716 * white list.
717 */
718 list_for_each_entry(params, &hdev->pend_le_conns, action) {
719 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
720 &params->addr, params->addr_type))
721 continue;
722
723 if (white_list_entries >= hdev->le_white_list_size) {
724 /* Select filter policy to accept all advertising */
725 return 0x00;
726 }
727
728 if (hci_find_irk_by_addr(hdev, &params->addr,
729 params->addr_type)) {
730 /* White list can not be used with RPAs */
731 return 0x00;
732 }
733
734 white_list_entries++;
735 add_to_white_list(req, params);
736 }
737
738 /* After adding all new pending connections, walk through
739 * the list of pending reports and also add these to the
740 * white list if there is still space.
741 */
742 list_for_each_entry(params, &hdev->pend_le_reports, action) {
743 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
744 &params->addr, params->addr_type))
745 continue;
746
747 if (white_list_entries >= hdev->le_white_list_size) {
748 /* Select filter policy to accept all advertising */
749 return 0x00;
750 }
751
752 if (hci_find_irk_by_addr(hdev, &params->addr,
753 params->addr_type)) {
754 /* White list can not be used with RPAs */
755 return 0x00;
756 }
757
758 white_list_entries++;
759 add_to_white_list(req, params);
760 }
761
762 /* Select filter policy to use white list */
763 return 0x01;
764}
765
766void hci_req_add_le_passive_scan(struct hci_request *req)
767{
768 struct hci_cp_le_set_scan_param param_cp;
769 struct hci_cp_le_set_scan_enable enable_cp;
770 struct hci_dev *hdev = req->hdev;
771 u8 own_addr_type;
772 u8 filter_policy;
773
774 /* Set require_privacy to false since no SCAN_REQ are send
775 * during passive scanning. Not using an non-resolvable address
776 * here is important so that peer devices using direct
777 * advertising with our address will be correctly reported
778 * by the controller.
779 */
780 if (hci_update_random_address(req, false, &own_addr_type))
781 return;
782
783 /* Adding or removing entries from the white list must
784 * happen before enabling scanning. The controller does
785 * not allow white list modification while scanning.
786 */
787 filter_policy = update_white_list(req);
788
789 /* When the controller is using random resolvable addresses and
790 * with that having LE privacy enabled, then controllers with
791 * Extended Scanner Filter Policies support can now enable support
792 * for handling directed advertising.
793 *
794 * So instead of using filter polices 0x00 (no whitelist)
795 * and 0x01 (whitelist enabled) use the new filter policies
796 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
797 */
d7a5a11d 798 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
799 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
800 filter_policy |= 0x02;
801
802 memset(&param_cp, 0, sizeof(param_cp));
803 param_cp.type = LE_SCAN_PASSIVE;
804 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
805 param_cp.window = cpu_to_le16(hdev->le_scan_window);
806 param_cp.own_address_type = own_addr_type;
807 param_cp.filter_policy = filter_policy;
808 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
809 &param_cp);
810
811 memset(&enable_cp, 0, sizeof(enable_cp));
812 enable_cp.enable = LE_SCAN_ENABLE;
813 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
814 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
815 &enable_cp);
816}
817
f2252570
JH
818static u8 get_current_adv_instance(struct hci_dev *hdev)
819{
820 /* The "Set Advertising" setting supersedes the "Add Advertising"
821 * setting. Here we set the advertising data based on which
822 * setting was set. When neither apply, default to the global settings,
823 * represented by instance "0".
824 */
17fd08ff 825 if (!list_empty(&hdev->adv_instances) &&
f2252570
JH
826 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
827 return hdev->cur_adv_instance;
828
829 return 0x00;
830}
831
832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
834 u8 instance = get_current_adv_instance(hdev);
835 struct adv_info *adv_instance;
836
837 /* Ignore instance 0 */
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845 /* TODO: Take into account the "appearance" and "local-name" flags here.
846 * These are currently being ignored as they are not supported.
847 */
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864 /* Instance 0 always manages the "Tx Power" and "Flags"
865 * fields
866 */
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870 * corresponds to the "connectable" instance flag.
871 */
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875 return flags;
876 }
877
878 adv_instance = hci_find_adv_instance(hdev, instance);
879
880 /* Return 0 when we got an invalid instance identifier. */
881 if (!adv_instance)
882 return 0;
883
884 return adv_instance->flags;
885}
886
887void __hci_req_enable_advertising(struct hci_request *req)
888{
889 struct hci_dev *hdev = req->hdev;
890 struct hci_cp_le_set_adv_param cp;
891 u8 own_addr_type, enable = 0x01;
892 bool connectable;
893 u8 instance;
894 u32 flags;
895
896 if (hci_conn_num(hdev, LE_LINK) > 0)
897 return;
898
899 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
900 __hci_req_disable_advertising(req);
901
902 /* Clear the HCI_LE_ADV bit temporarily so that the
903 * hci_update_random_address knows that it's safe to go ahead
904 * and write a new random address. The flag will be set back on
905 * as soon as the SET_ADV_ENABLE HCI command completes.
906 */
907 hci_dev_clear_flag(hdev, HCI_LE_ADV);
908
909 instance = get_current_adv_instance(hdev);
910 flags = get_adv_instance_flags(hdev, instance);
911
912 /* If the "connectable" instance flag was not set, then choose between
913 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
914 */
915 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
916 mgmt_get_connectable(hdev);
917
918 /* Set require_privacy to true only when non-connectable
919 * advertising is used. In that case it is fine to use a
920 * non-resolvable private address.
921 */
922 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
923 return;
924
925 memset(&cp, 0, sizeof(cp));
926 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
927 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
928
929 if (connectable)
930 cp.type = LE_ADV_IND;
931 else if (get_cur_adv_instance_scan_rsp_len(hdev))
932 cp.type = LE_ADV_SCAN_IND;
933 else
934 cp.type = LE_ADV_NONCONN_IND;
935
936 cp.own_address_type = own_addr_type;
937 cp.channel_map = hdev->le_adv_channel_map;
938
939 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
940
941 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
942}
943
944static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
945{
946 u8 ad_len = 0;
947 size_t name_len;
948
949 name_len = strlen(hdev->dev_name);
950 if (name_len > 0) {
951 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
952
953 if (name_len > max_len) {
954 name_len = max_len;
955 ptr[1] = EIR_NAME_SHORT;
956 } else
957 ptr[1] = EIR_NAME_COMPLETE;
958
959 ptr[0] = name_len + 1;
960
961 memcpy(ptr + 2, hdev->dev_name, name_len);
962
963 ad_len += (name_len + 2);
964 ptr += (name_len + 2);
965 }
966
967 return ad_len;
968}
969
970static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
971 u8 *ptr)
972{
973 struct adv_info *adv_instance;
974
975 adv_instance = hci_find_adv_instance(hdev, instance);
976 if (!adv_instance)
977 return 0;
978
979 /* TODO: Set the appropriate entries based on advertising instance flags
980 * here once flags other than 0 are supported.
981 */
982 memcpy(ptr, adv_instance->scan_rsp_data,
983 adv_instance->scan_rsp_len);
984
985 return adv_instance->scan_rsp_len;
986}
987
988static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
989{
990 struct hci_dev *hdev = req->hdev;
991 struct hci_cp_le_set_scan_rsp_data cp;
992 u8 len;
993
994 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
995 return;
996
997 memset(&cp, 0, sizeof(cp));
998
999 if (instance)
1000 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1001 else
1002 len = create_default_scan_rsp_data(hdev, cp.data);
1003
1004 if (hdev->scan_rsp_data_len == len &&
1005 !memcmp(cp.data, hdev->scan_rsp_data, len))
1006 return;
1007
1008 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1009 hdev->scan_rsp_data_len = len;
1010
1011 cp.length = len;
1012
1013 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1014}
1015
1016void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance)
1017{
1018 if (instance == HCI_ADV_CURRENT)
1019 instance = get_current_adv_instance(req->hdev);
1020
d6dac32e 1021 update_inst_scan_rsp_data(req, instance);
f2252570
JH
1022}
1023
1024static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1025{
1026 struct adv_info *adv_instance = NULL;
1027 u8 ad_len = 0, flags = 0;
1028 u32 instance_flags;
1029
1030 /* Return 0 when the current instance identifier is invalid. */
1031 if (instance) {
1032 adv_instance = hci_find_adv_instance(hdev, instance);
1033 if (!adv_instance)
1034 return 0;
1035 }
1036
1037 instance_flags = get_adv_instance_flags(hdev, instance);
1038
1039 /* The Add Advertising command allows userspace to set both the general
1040 * and limited discoverable flags.
1041 */
1042 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1043 flags |= LE_AD_GENERAL;
1044
1045 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1046 flags |= LE_AD_LIMITED;
1047
1048 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1049 /* If a discovery flag wasn't provided, simply use the global
1050 * settings.
1051 */
1052 if (!flags)
1053 flags |= mgmt_get_adv_discov_flags(hdev);
1054
1055 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1056 flags |= LE_AD_NO_BREDR;
1057
1058 /* If flags would still be empty, then there is no need to
1059 * include the "Flags" AD field".
1060 */
1061 if (flags) {
1062 ptr[0] = 0x02;
1063 ptr[1] = EIR_FLAGS;
1064 ptr[2] = flags;
1065
1066 ad_len += 3;
1067 ptr += 3;
1068 }
1069 }
1070
1071 if (adv_instance) {
1072 memcpy(ptr, adv_instance->adv_data,
1073 adv_instance->adv_data_len);
1074 ad_len += adv_instance->adv_data_len;
1075 ptr += adv_instance->adv_data_len;
1076 }
1077
1078 /* Provide Tx Power only if we can provide a valid value for it */
1079 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1080 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1081 ptr[0] = 0x02;
1082 ptr[1] = EIR_TX_POWER;
1083 ptr[2] = (u8)hdev->adv_tx_power;
1084
1085 ad_len += 3;
1086 ptr += 3;
1087 }
1088
1089 return ad_len;
1090}
1091
1092static void update_inst_adv_data(struct hci_request *req, u8 instance)
1093{
1094 struct hci_dev *hdev = req->hdev;
1095 struct hci_cp_le_set_adv_data cp;
1096 u8 len;
1097
1098 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1099 return;
1100
1101 memset(&cp, 0, sizeof(cp));
1102
1103 len = create_instance_adv_data(hdev, instance, cp.data);
1104
1105 /* There's nothing to do if the data hasn't changed */
1106 if (hdev->adv_data_len == len &&
1107 memcmp(cp.data, hdev->adv_data, len) == 0)
1108 return;
1109
1110 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1111 hdev->adv_data_len = len;
1112
1113 cp.length = len;
1114
1115 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1116}
1117
1118void __hci_req_update_adv_data(struct hci_request *req, int instance)
1119{
1120 if (instance == HCI_ADV_CURRENT)
1121 instance = get_current_adv_instance(req->hdev);
1122
1123 update_inst_adv_data(req, instance);
1124}
1125
1126int hci_req_update_adv_data(struct hci_dev *hdev, int instance)
1127{
1128 struct hci_request req;
1129
1130 hci_req_init(&req, hdev);
1131 __hci_req_update_adv_data(&req, instance);
1132
1133 return hci_req_run(&req, NULL);
1134}
1135
1136static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1137{
1138 BT_DBG("%s status %u", hdev->name, status);
1139}
1140
1141void hci_req_reenable_advertising(struct hci_dev *hdev)
1142{
1143 struct hci_request req;
1144 u8 instance;
1145
1146 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
17fd08ff 1147 list_empty(&hdev->adv_instances))
f2252570
JH
1148 return;
1149
1150 instance = get_current_adv_instance(hdev);
1151
1152 hci_req_init(&req, hdev);
1153
1154 if (instance) {
1155 __hci_req_schedule_adv_instance(&req, instance, true);
1156 } else {
1157 __hci_req_update_adv_data(&req, HCI_ADV_CURRENT);
1158 __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT);
1159 __hci_req_enable_advertising(&req);
1160 }
1161
1162 hci_req_run(&req, adv_enable_complete);
1163}
1164
1165static void adv_timeout_expire(struct work_struct *work)
1166{
1167 struct hci_dev *hdev = container_of(work, struct hci_dev,
1168 adv_instance_expire.work);
1169
1170 struct hci_request req;
1171 u8 instance;
1172
1173 BT_DBG("%s", hdev->name);
1174
1175 hci_dev_lock(hdev);
1176
1177 hdev->adv_instance_timeout = 0;
1178
1179 instance = get_current_adv_instance(hdev);
1180 if (instance == 0x00)
1181 goto unlock;
1182
1183 hci_req_init(&req, hdev);
1184
1185 hci_req_clear_adv_instance(hdev, &req, instance, false);
1186
1187 if (list_empty(&hdev->adv_instances))
1188 __hci_req_disable_advertising(&req);
1189
550a8ca7 1190 hci_req_run(&req, NULL);
f2252570
JH
1191
1192unlock:
1193 hci_dev_unlock(hdev);
1194}
1195
1196int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1197 bool force)
1198{
1199 struct hci_dev *hdev = req->hdev;
1200 struct adv_info *adv_instance = NULL;
1201 u16 timeout;
1202
1203 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
17fd08ff 1204 list_empty(&hdev->adv_instances))
f2252570
JH
1205 return -EPERM;
1206
1207 if (hdev->adv_instance_timeout)
1208 return -EBUSY;
1209
1210 adv_instance = hci_find_adv_instance(hdev, instance);
1211 if (!adv_instance)
1212 return -ENOENT;
1213
1214 /* A zero timeout means unlimited advertising. As long as there is
1215 * only one instance, duration should be ignored. We still set a timeout
1216 * in case further instances are being added later on.
1217 *
1218 * If the remaining lifetime of the instance is more than the duration
1219 * then the timeout corresponds to the duration, otherwise it will be
1220 * reduced to the remaining instance lifetime.
1221 */
1222 if (adv_instance->timeout == 0 ||
1223 adv_instance->duration <= adv_instance->remaining_time)
1224 timeout = adv_instance->duration;
1225 else
1226 timeout = adv_instance->remaining_time;
1227
1228 /* The remaining time is being reduced unless the instance is being
1229 * advertised without time limit.
1230 */
1231 if (adv_instance->timeout)
1232 adv_instance->remaining_time =
1233 adv_instance->remaining_time - timeout;
1234
1235 hdev->adv_instance_timeout = timeout;
1236 queue_delayed_work(hdev->req_workqueue,
1237 &hdev->adv_instance_expire,
1238 msecs_to_jiffies(timeout * 1000));
1239
1240 /* If we're just re-scheduling the same instance again then do not
1241 * execute any HCI commands. This happens when a single instance is
1242 * being advertised.
1243 */
1244 if (!force && hdev->cur_adv_instance == instance &&
1245 hci_dev_test_flag(hdev, HCI_LE_ADV))
1246 return 0;
1247
1248 hdev->cur_adv_instance = instance;
1249 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1250 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
1251 __hci_req_enable_advertising(req);
1252
1253 return 0;
1254}
1255
1256static void cancel_adv_timeout(struct hci_dev *hdev)
1257{
1258 if (hdev->adv_instance_timeout) {
1259 hdev->adv_instance_timeout = 0;
1260 cancel_delayed_work(&hdev->adv_instance_expire);
1261 }
1262}
1263
1264/* For a single instance:
1265 * - force == true: The instance will be removed even when its remaining
1266 * lifetime is not zero.
1267 * - force == false: the instance will be deactivated but kept stored unless
1268 * the remaining lifetime is zero.
1269 *
1270 * For instance == 0x00:
1271 * - force == true: All instances will be removed regardless of their timeout
1272 * setting.
1273 * - force == false: Only instances that have a timeout will be removed.
1274 */
1275void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1276 u8 instance, bool force)
1277{
1278 struct adv_info *adv_instance, *n, *next_instance = NULL;
1279 int err;
1280 u8 rem_inst;
1281
1282 /* Cancel any timeout concerning the removed instance(s). */
1283 if (!instance || hdev->cur_adv_instance == instance)
1284 cancel_adv_timeout(hdev);
1285
1286 /* Get the next instance to advertise BEFORE we remove
1287 * the current one. This can be the same instance again
1288 * if there is only one instance.
1289 */
1290 if (instance && hdev->cur_adv_instance == instance)
1291 next_instance = hci_get_next_instance(hdev, instance);
1292
1293 if (instance == 0x00) {
1294 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1295 list) {
1296 if (!(force || adv_instance->timeout))
1297 continue;
1298
1299 rem_inst = adv_instance->instance;
1300 err = hci_remove_adv_instance(hdev, rem_inst);
1301 if (!err)
1302 mgmt_advertising_removed(NULL, hdev, rem_inst);
1303 }
1304 hdev->cur_adv_instance = 0x00;
1305 } else {
1306 adv_instance = hci_find_adv_instance(hdev, instance);
1307
1308 if (force || (adv_instance && adv_instance->timeout &&
1309 !adv_instance->remaining_time)) {
1310 /* Don't advertise a removed instance. */
1311 if (next_instance &&
1312 next_instance->instance == instance)
1313 next_instance = NULL;
1314
1315 err = hci_remove_adv_instance(hdev, instance);
1316 if (!err)
1317 mgmt_advertising_removed(NULL, hdev, instance);
1318 }
1319 }
1320
17fd08ff 1321 if (list_empty(&hdev->adv_instances))
f2252570 1322 hdev->cur_adv_instance = 0x00;
f2252570
JH
1323
1324 if (!req || !hdev_is_powered(hdev) ||
1325 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1326 return;
1327
1328 if (next_instance)
1329 __hci_req_schedule_adv_instance(req, next_instance->instance,
1330 false);
1331}
1332
0857dd3b
JH
1333static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1334{
1335 struct hci_dev *hdev = req->hdev;
1336
1337 /* If we're advertising or initiating an LE connection we can't
1338 * go ahead and change the random address at this time. This is
1339 * because the eventual initiator address used for the
1340 * subsequently created connection will be undefined (some
1341 * controllers use the new address and others the one we had
1342 * when the operation started).
1343 *
1344 * In this kind of scenario skip the update and let the random
1345 * address be updated at the next cycle.
1346 */
d7a5a11d 1347 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 1348 hci_lookup_le_connect(hdev)) {
0857dd3b 1349 BT_DBG("Deferring random address update");
a1536da2 1350 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
1351 return;
1352 }
1353
1354 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1355}
1356
1357int hci_update_random_address(struct hci_request *req, bool require_privacy,
1358 u8 *own_addr_type)
1359{
1360 struct hci_dev *hdev = req->hdev;
1361 int err;
1362
1363 /* If privacy is enabled use a resolvable private address. If
1364 * current RPA has expired or there is something else than
1365 * the current RPA in use, then generate a new one.
1366 */
d7a5a11d 1367 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
0857dd3b
JH
1368 int to;
1369
1370 *own_addr_type = ADDR_LE_DEV_RANDOM;
1371
a69d8927 1372 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
1373 !bacmp(&hdev->random_addr, &hdev->rpa))
1374 return 0;
1375
1376 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1377 if (err < 0) {
1378 BT_ERR("%s failed to generate new RPA", hdev->name);
1379 return err;
1380 }
1381
1382 set_random_addr(req, &hdev->rpa);
1383
1384 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1385 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1386
1387 return 0;
1388 }
1389
1390 /* In case of required privacy without resolvable private address,
1391 * use an non-resolvable private address. This is useful for active
1392 * scanning and non-connectable advertising.
1393 */
1394 if (require_privacy) {
1395 bdaddr_t nrpa;
1396
1397 while (true) {
1398 /* The non-resolvable private address is generated
1399 * from random six bytes with the two most significant
1400 * bits cleared.
1401 */
1402 get_random_bytes(&nrpa, 6);
1403 nrpa.b[5] &= 0x3f;
1404
1405 /* The non-resolvable private address shall not be
1406 * equal to the public address.
1407 */
1408 if (bacmp(&hdev->bdaddr, &nrpa))
1409 break;
1410 }
1411
1412 *own_addr_type = ADDR_LE_DEV_RANDOM;
1413 set_random_addr(req, &nrpa);
1414 return 0;
1415 }
1416
1417 /* If forcing static address is in use or there is no public
1418 * address use the static address as random address (but skip
1419 * the HCI command if the current random address is already the
1420 * static one.
50b5b952
MH
1421 *
1422 * In case BR/EDR has been disabled on a dual-mode controller
1423 * and a static address has been configured, then use that
1424 * address instead of the public BR/EDR address.
0857dd3b 1425 */
b7cb93e5 1426 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 1427 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 1428 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 1429 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
1430 *own_addr_type = ADDR_LE_DEV_RANDOM;
1431 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1432 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1433 &hdev->static_addr);
1434 return 0;
1435 }
1436
1437 /* Neither privacy nor static address is being used so use a
1438 * public address.
1439 */
1440 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1441
1442 return 0;
1443}
2cf22218 1444
405a2611
JH
1445static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1446{
1447 struct bdaddr_list *b;
1448
1449 list_for_each_entry(b, &hdev->whitelist, list) {
1450 struct hci_conn *conn;
1451
1452 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1453 if (!conn)
1454 return true;
1455
1456 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1457 return true;
1458 }
1459
1460 return false;
1461}
1462
01b1cb87 1463void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
1464{
1465 struct hci_dev *hdev = req->hdev;
1466 u8 scan;
1467
d7a5a11d 1468 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
1469 return;
1470
1471 if (!hdev_is_powered(hdev))
1472 return;
1473
1474 if (mgmt_powering_down(hdev))
1475 return;
1476
d7a5a11d 1477 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
1478 disconnected_whitelist_entries(hdev))
1479 scan = SCAN_PAGE;
1480 else
1481 scan = SCAN_DISABLED;
1482
d7a5a11d 1483 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
1484 scan |= SCAN_INQUIRY;
1485
01b1cb87
JH
1486 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1487 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1488 return;
1489
405a2611
JH
1490 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1491}
1492
01b1cb87 1493static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 1494{
01b1cb87
JH
1495 hci_dev_lock(req->hdev);
1496 __hci_req_update_scan(req);
1497 hci_dev_unlock(req->hdev);
1498 return 0;
1499}
405a2611 1500
01b1cb87
JH
1501static void scan_update_work(struct work_struct *work)
1502{
1503 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1504
1505 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
1506}
1507
53c0ba74
JH
1508static int connectable_update(struct hci_request *req, unsigned long opt)
1509{
1510 struct hci_dev *hdev = req->hdev;
1511
1512 hci_dev_lock(hdev);
1513
1514 __hci_req_update_scan(req);
1515
1516 /* If BR/EDR is not enabled and we disable advertising as a
1517 * by-product of disabling connectable, we need to update the
1518 * advertising flags.
1519 */
1520 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1521 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1522
1523 /* Update the advertising parameters if necessary */
1524 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
17fd08ff 1525 !list_empty(&hdev->adv_instances))
53c0ba74
JH
1526 __hci_req_enable_advertising(req);
1527
1528 __hci_update_background_scan(req);
1529
1530 hci_dev_unlock(hdev);
1531
1532 return 0;
1533}
1534
1535static void connectable_update_work(struct work_struct *work)
1536{
1537 struct hci_dev *hdev = container_of(work, struct hci_dev,
1538 connectable_update);
1539 u8 status;
1540
1541 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1542 mgmt_set_connectable_complete(hdev, status);
1543}
1544
14bf5eac
JH
1545static u8 get_service_classes(struct hci_dev *hdev)
1546{
1547 struct bt_uuid *uuid;
1548 u8 val = 0;
1549
1550 list_for_each_entry(uuid, &hdev->uuids, list)
1551 val |= uuid->svc_hint;
1552
1553 return val;
1554}
1555
1556void __hci_req_update_class(struct hci_request *req)
1557{
1558 struct hci_dev *hdev = req->hdev;
1559 u8 cod[3];
1560
1561 BT_DBG("%s", hdev->name);
1562
1563 if (!hdev_is_powered(hdev))
1564 return;
1565
1566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 return;
1568
1569 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1570 return;
1571
1572 cod[0] = hdev->minor_class;
1573 cod[1] = hdev->major_class;
1574 cod[2] = get_service_classes(hdev);
1575
1576 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1577 cod[1] |= 0x20;
1578
1579 if (memcmp(cod, hdev->dev_class, 3) == 0)
1580 return;
1581
1582 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1583}
1584
aed1a885
JH
1585static void write_iac(struct hci_request *req)
1586{
1587 struct hci_dev *hdev = req->hdev;
1588 struct hci_cp_write_current_iac_lap cp;
1589
1590 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1591 return;
1592
1593 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1594 /* Limited discoverable mode */
1595 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1596 cp.iac_lap[0] = 0x00; /* LIAC */
1597 cp.iac_lap[1] = 0x8b;
1598 cp.iac_lap[2] = 0x9e;
1599 cp.iac_lap[3] = 0x33; /* GIAC */
1600 cp.iac_lap[4] = 0x8b;
1601 cp.iac_lap[5] = 0x9e;
1602 } else {
1603 /* General discoverable mode */
1604 cp.num_iac = 1;
1605 cp.iac_lap[0] = 0x33; /* GIAC */
1606 cp.iac_lap[1] = 0x8b;
1607 cp.iac_lap[2] = 0x9e;
1608 }
1609
1610 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1611 (cp.num_iac * 3) + 1, &cp);
1612}
1613
1614static int discoverable_update(struct hci_request *req, unsigned long opt)
1615{
1616 struct hci_dev *hdev = req->hdev;
1617
1618 hci_dev_lock(hdev);
1619
1620 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1621 write_iac(req);
1622 __hci_req_update_scan(req);
1623 __hci_req_update_class(req);
1624 }
1625
1626 /* Advertising instances don't use the global discoverable setting, so
1627 * only update AD if advertising was enabled using Set Advertising.
1628 */
1629 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1630 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1631
1632 hci_dev_unlock(hdev);
1633
1634 return 0;
1635}
1636
1637static void discoverable_update_work(struct work_struct *work)
1638{
1639 struct hci_dev *hdev = container_of(work, struct hci_dev,
1640 discoverable_update);
1641 u8 status;
1642
1643 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1644 mgmt_set_discoverable_complete(hdev, status);
1645}
1646
dcc0f0d9
JH
1647void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1648 u8 reason)
1649{
1650 switch (conn->state) {
1651 case BT_CONNECTED:
1652 case BT_CONFIG:
1653 if (conn->type == AMP_LINK) {
1654 struct hci_cp_disconn_phy_link cp;
1655
1656 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1657 cp.reason = reason;
1658 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1659 &cp);
1660 } else {
1661 struct hci_cp_disconnect dc;
1662
1663 dc.handle = cpu_to_le16(conn->handle);
1664 dc.reason = reason;
1665 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1666 }
1667
1668 conn->state = BT_DISCONN;
1669
1670 break;
1671 case BT_CONNECT:
1672 if (conn->type == LE_LINK) {
1673 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1674 break;
1675 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1676 0, NULL);
1677 } else if (conn->type == ACL_LINK) {
1678 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1679 break;
1680 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1681 6, &conn->dst);
1682 }
1683 break;
1684 case BT_CONNECT2:
1685 if (conn->type == ACL_LINK) {
1686 struct hci_cp_reject_conn_req rej;
1687
1688 bacpy(&rej.bdaddr, &conn->dst);
1689 rej.reason = reason;
1690
1691 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1692 sizeof(rej), &rej);
1693 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1694 struct hci_cp_reject_sync_conn_req rej;
1695
1696 bacpy(&rej.bdaddr, &conn->dst);
1697
1698 /* SCO rejection has its own limited set of
1699 * allowed error values (0x0D-0x0F) which isn't
1700 * compatible with most values passed to this
1701 * function. To be safe hard-code one of the
1702 * values that's suitable for SCO.
1703 */
1704 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1705
1706 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1707 sizeof(rej), &rej);
1708 }
1709 break;
1710 default:
1711 conn->state = BT_CLOSED;
1712 break;
1713 }
1714}
1715
1716static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1717{
1718 if (status)
1719 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1720}
1721
1722int hci_abort_conn(struct hci_conn *conn, u8 reason)
1723{
1724 struct hci_request req;
1725 int err;
1726
1727 hci_req_init(&req, conn->hdev);
1728
1729 __hci_abort_conn(&req, conn, reason);
1730
1731 err = hci_req_run(&req, abort_conn_complete);
1732 if (err && err != -ENODATA) {
1733 BT_ERR("Failed to run HCI request: err %d", err);
1734 return err;
1735 }
1736
1737 return 0;
1738}
5fc16cc4 1739
a1d01db1 1740static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
1741{
1742 hci_dev_lock(req->hdev);
1743 __hci_update_background_scan(req);
1744 hci_dev_unlock(req->hdev);
a1d01db1 1745 return 0;
2e93e53b
JH
1746}
1747
1748static void bg_scan_update(struct work_struct *work)
1749{
1750 struct hci_dev *hdev = container_of(work, struct hci_dev,
1751 bg_scan_update);
84235d22
JH
1752 struct hci_conn *conn;
1753 u8 status;
1754 int err;
1755
1756 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1757 if (!err)
1758 return;
1759
1760 hci_dev_lock(hdev);
1761
1762 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1763 if (conn)
1764 hci_le_conn_failed(conn, status);
2e93e53b 1765
84235d22 1766 hci_dev_unlock(hdev);
2e93e53b
JH
1767}
1768
f4a2cb4d 1769static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 1770{
f4a2cb4d
JH
1771 hci_req_add_le_scan_disable(req);
1772 return 0;
7c1fbed2
JH
1773}
1774
f4a2cb4d 1775static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 1776{
f4a2cb4d 1777 u8 length = opt;
7c1fbed2
JH
1778 /* General inquiry access code (GIAC) */
1779 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1780 struct hci_cp_inquiry cp;
7c1fbed2 1781
f4a2cb4d 1782 BT_DBG("%s", req->hdev->name);
7c1fbed2 1783
f4a2cb4d
JH
1784 hci_dev_lock(req->hdev);
1785 hci_inquiry_cache_flush(req->hdev);
1786 hci_dev_unlock(req->hdev);
7c1fbed2 1787
f4a2cb4d
JH
1788 memset(&cp, 0, sizeof(cp));
1789 memcpy(&cp.lap, lap, sizeof(cp.lap));
1790 cp.length = length;
7c1fbed2 1791
f4a2cb4d 1792 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 1793
a1d01db1 1794 return 0;
7c1fbed2
JH
1795}
1796
1797static void le_scan_disable_work(struct work_struct *work)
1798{
1799 struct hci_dev *hdev = container_of(work, struct hci_dev,
1800 le_scan_disable.work);
1801 u8 status;
7c1fbed2
JH
1802
1803 BT_DBG("%s", hdev->name);
1804
f4a2cb4d
JH
1805 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1806 return;
1807
7c1fbed2
JH
1808 cancel_delayed_work(&hdev->le_scan_restart);
1809
f4a2cb4d
JH
1810 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1811 if (status) {
1812 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1813 return;
1814 }
1815
1816 hdev->discovery.scan_start = 0;
1817
1818 /* If we were running LE only scan, change discovery state. If
1819 * we were running both LE and BR/EDR inquiry simultaneously,
1820 * and BR/EDR inquiry is already finished, stop discovery,
1821 * otherwise BR/EDR inquiry will stop discovery when finished.
1822 * If we will resolve remote device name, do not change
1823 * discovery state.
1824 */
1825
1826 if (hdev->discovery.type == DISCOV_TYPE_LE)
1827 goto discov_stopped;
1828
1829 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
1830 return;
1831
f4a2cb4d
JH
1832 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1833 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1834 hdev->discovery.state != DISCOVERY_RESOLVING)
1835 goto discov_stopped;
1836
1837 return;
1838 }
1839
1840 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1841 HCI_CMD_TIMEOUT, &status);
1842 if (status) {
1843 BT_ERR("Inquiry failed: status 0x%02x", status);
1844 goto discov_stopped;
1845 }
1846
1847 return;
1848
1849discov_stopped:
1850 hci_dev_lock(hdev);
1851 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1852 hci_dev_unlock(hdev);
7c1fbed2
JH
1853}
1854
3dfe5905
JH
1855static int le_scan_restart(struct hci_request *req, unsigned long opt)
1856{
1857 struct hci_dev *hdev = req->hdev;
1858 struct hci_cp_le_set_scan_enable cp;
1859
1860 /* If controller is not scanning we are done. */
1861 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1862 return 0;
1863
1864 hci_req_add_le_scan_disable(req);
1865
1866 memset(&cp, 0, sizeof(cp));
1867 cp.enable = LE_SCAN_ENABLE;
1868 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1869 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1870
1871 return 0;
1872}
1873
1874static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 1875{
3dfe5905
JH
1876 struct hci_dev *hdev = container_of(work, struct hci_dev,
1877 le_scan_restart.work);
7c1fbed2 1878 unsigned long timeout, duration, scan_start, now;
3dfe5905 1879 u8 status;
7c1fbed2
JH
1880
1881 BT_DBG("%s", hdev->name);
1882
3dfe5905 1883 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2
JH
1884 if (status) {
1885 BT_ERR("Failed to restart LE scan: status %d", status);
1886 return;
1887 }
1888
1889 hci_dev_lock(hdev);
1890
1891 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1892 !hdev->discovery.scan_start)
1893 goto unlock;
1894
1895 /* When the scan was started, hdev->le_scan_disable has been queued
1896 * after duration from scan_start. During scan restart this job
1897 * has been canceled, and we need to queue it again after proper
1898 * timeout, to make sure that scan does not run indefinitely.
1899 */
1900 duration = hdev->discovery.scan_duration;
1901 scan_start = hdev->discovery.scan_start;
1902 now = jiffies;
1903 if (now - scan_start <= duration) {
1904 int elapsed;
1905
1906 if (now >= scan_start)
1907 elapsed = now - scan_start;
1908 else
1909 elapsed = ULONG_MAX - scan_start + now;
1910
1911 timeout = duration - elapsed;
1912 } else {
1913 timeout = 0;
1914 }
1915
1916 queue_delayed_work(hdev->req_workqueue,
1917 &hdev->le_scan_disable, timeout);
1918
1919unlock:
1920 hci_dev_unlock(hdev);
1921}
1922
e68f072b
JH
1923static void disable_advertising(struct hci_request *req)
1924{
1925 u8 enable = 0x00;
1926
1927 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1928}
1929
1930static int active_scan(struct hci_request *req, unsigned long opt)
1931{
1932 uint16_t interval = opt;
1933 struct hci_dev *hdev = req->hdev;
1934 struct hci_cp_le_set_scan_param param_cp;
1935 struct hci_cp_le_set_scan_enable enable_cp;
1936 u8 own_addr_type;
1937 int err;
1938
1939 BT_DBG("%s", hdev->name);
1940
1941 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1942 hci_dev_lock(hdev);
1943
1944 /* Don't let discovery abort an outgoing connection attempt
1945 * that's using directed advertising.
1946 */
1947 if (hci_lookup_le_connect(hdev)) {
1948 hci_dev_unlock(hdev);
1949 return -EBUSY;
1950 }
1951
1952 cancel_adv_timeout(hdev);
1953 hci_dev_unlock(hdev);
1954
1955 disable_advertising(req);
1956 }
1957
1958 /* If controller is scanning, it means the background scanning is
1959 * running. Thus, we should temporarily stop it in order to set the
1960 * discovery scanning parameters.
1961 */
1962 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1963 hci_req_add_le_scan_disable(req);
1964
1965 /* All active scans will be done with either a resolvable private
1966 * address (when privacy feature has been enabled) or non-resolvable
1967 * private address.
1968 */
1969 err = hci_update_random_address(req, true, &own_addr_type);
1970 if (err < 0)
1971 own_addr_type = ADDR_LE_DEV_PUBLIC;
1972
1973 memset(&param_cp, 0, sizeof(param_cp));
1974 param_cp.type = LE_SCAN_ACTIVE;
1975 param_cp.interval = cpu_to_le16(interval);
1976 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1977 param_cp.own_address_type = own_addr_type;
1978
1979 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1980 &param_cp);
1981
1982 memset(&enable_cp, 0, sizeof(enable_cp));
1983 enable_cp.enable = LE_SCAN_ENABLE;
1984 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1985
1986 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1987 &enable_cp);
1988
1989 return 0;
1990}
1991
1992static int interleaved_discov(struct hci_request *req, unsigned long opt)
1993{
1994 int err;
1995
1996 BT_DBG("%s", req->hdev->name);
1997
1998 err = active_scan(req, opt);
1999 if (err)
2000 return err;
2001
7df26b56 2002 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
2003}
2004
2005static void start_discovery(struct hci_dev *hdev, u8 *status)
2006{
2007 unsigned long timeout;
2008
2009 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2010
2011 switch (hdev->discovery.type) {
2012 case DISCOV_TYPE_BREDR:
2013 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
2014 hci_req_sync(hdev, bredr_inquiry,
2015 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
2016 status);
2017 return;
2018 case DISCOV_TYPE_INTERLEAVED:
2019 /* When running simultaneous discovery, the LE scanning time
2020 * should occupy the whole discovery time sine BR/EDR inquiry
2021 * and LE scanning are scheduled by the controller.
2022 *
2023 * For interleaving discovery in comparison, BR/EDR inquiry
2024 * and LE scanning are done sequentially with separate
2025 * timeouts.
2026 */
2027 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2028 &hdev->quirks)) {
2029 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2030 /* During simultaneous discovery, we double LE scan
2031 * interval. We must leave some time for the controller
2032 * to do BR/EDR inquiry.
2033 */
2034 hci_req_sync(hdev, interleaved_discov,
2035 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2036 status);
2037 break;
2038 }
2039
2040 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2041 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2042 HCI_CMD_TIMEOUT, status);
2043 break;
2044 case DISCOV_TYPE_LE:
2045 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2046 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2047 HCI_CMD_TIMEOUT, status);
2048 break;
2049 default:
2050 *status = HCI_ERROR_UNSPECIFIED;
2051 return;
2052 }
2053
2054 if (*status)
2055 return;
2056
2057 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2058
2059 /* When service discovery is used and the controller has a
2060 * strict duplicate filter, it is important to remember the
2061 * start and duration of the scan. This is required for
2062 * restarting scanning during the discovery phase.
2063 */
2064 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2065 hdev->discovery.result_filtering) {
2066 hdev->discovery.scan_start = jiffies;
2067 hdev->discovery.scan_duration = timeout;
2068 }
2069
2070 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2071 timeout);
2072}
2073
2154d3f4
JH
2074bool hci_req_stop_discovery(struct hci_request *req)
2075{
2076 struct hci_dev *hdev = req->hdev;
2077 struct discovery_state *d = &hdev->discovery;
2078 struct hci_cp_remote_name_req_cancel cp;
2079 struct inquiry_entry *e;
2080 bool ret = false;
2081
2082 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2083
2084 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2085 if (test_bit(HCI_INQUIRY, &hdev->flags))
2086 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2087
2088 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2089 cancel_delayed_work(&hdev->le_scan_disable);
2090 hci_req_add_le_scan_disable(req);
2091 }
2092
2093 ret = true;
2094 } else {
2095 /* Passive scanning */
2096 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2097 hci_req_add_le_scan_disable(req);
2098 ret = true;
2099 }
2100 }
2101
2102 /* No further actions needed for LE-only discovery */
2103 if (d->type == DISCOV_TYPE_LE)
2104 return ret;
2105
2106 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2107 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2108 NAME_PENDING);
2109 if (!e)
2110 return ret;
2111
2112 bacpy(&cp.bdaddr, &e->data.bdaddr);
2113 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2114 &cp);
2115 ret = true;
2116 }
2117
2118 return ret;
2119}
2120
2121static int stop_discovery(struct hci_request *req, unsigned long opt)
2122{
2123 hci_dev_lock(req->hdev);
2124 hci_req_stop_discovery(req);
2125 hci_dev_unlock(req->hdev);
2126
2127 return 0;
2128}
2129
e68f072b
JH
2130static void discov_update(struct work_struct *work)
2131{
2132 struct hci_dev *hdev = container_of(work, struct hci_dev,
2133 discov_update);
2134 u8 status = 0;
2135
2136 switch (hdev->discovery.state) {
2137 case DISCOVERY_STARTING:
2138 start_discovery(hdev, &status);
2139 mgmt_start_discovery_complete(hdev, status);
2140 if (status)
2141 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2142 else
2143 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2144 break;
2154d3f4
JH
2145 case DISCOVERY_STOPPING:
2146 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2147 mgmt_stop_discovery_complete(hdev, status);
2148 if (!status)
2149 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2150 break;
e68f072b
JH
2151 case DISCOVERY_STOPPED:
2152 default:
2153 return;
2154 }
2155}
2156
c366f555
JH
2157static void discov_off(struct work_struct *work)
2158{
2159 struct hci_dev *hdev = container_of(work, struct hci_dev,
2160 discov_off.work);
2161
2162 BT_DBG("%s", hdev->name);
2163
2164 hci_dev_lock(hdev);
2165
2166 /* When discoverable timeout triggers, then just make sure
2167 * the limited discoverable flag is cleared. Even in the case
2168 * of a timeout triggered from general discoverable, it is
2169 * safe to unconditionally clear the flag.
2170 */
2171 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2172 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2173 hdev->discov_timeout = 0;
2174
2175 hci_dev_unlock(hdev);
2176
2177 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2178 mgmt_new_settings(hdev);
2179}
2180
2ff13894
JH
2181static int powered_update_hci(struct hci_request *req, unsigned long opt)
2182{
2183 struct hci_dev *hdev = req->hdev;
2ff13894
JH
2184 u8 link_sec;
2185
2186 hci_dev_lock(hdev);
2187
2188 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2189 !lmp_host_ssp_capable(hdev)) {
2190 u8 mode = 0x01;
2191
2192 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2193
2194 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2195 u8 support = 0x01;
2196
2197 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2198 sizeof(support), &support);
2199 }
2200 }
2201
2202 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2203 lmp_bredr_capable(hdev)) {
2204 struct hci_cp_write_le_host_supported cp;
2205
2206 cp.le = 0x01;
2207 cp.simul = 0x00;
2208
2209 /* Check first if we already have the right
2210 * host state (host features set)
2211 */
2212 if (cp.le != lmp_host_le_capable(hdev) ||
2213 cp.simul != lmp_host_le_br_capable(hdev))
2214 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2215 sizeof(cp), &cp);
2216 }
2217
d6b7e2cd 2218 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2ff13894
JH
2219 /* Make sure the controller has a good default for
2220 * advertising data. This also applies to the case
2221 * where BR/EDR was toggled during the AUTO_OFF phase.
2222 */
d6b7e2cd
JH
2223 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2224 list_empty(&hdev->adv_instances)) {
2225 __hci_req_update_adv_data(req, 0x00);
2226 __hci_req_update_scan_rsp_data(req, 0x00);
2227
2228 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2229 __hci_req_enable_advertising(req);
2230 } else if (!list_empty(&hdev->adv_instances)) {
2231 struct adv_info *adv_instance;
2ff13894 2232
2ff13894
JH
2233 adv_instance = list_first_entry(&hdev->adv_instances,
2234 struct adv_info, list);
2ff13894 2235 __hci_req_schedule_adv_instance(req,
d6b7e2cd 2236 adv_instance->instance,
2ff13894 2237 true);
d6b7e2cd 2238 }
2ff13894
JH
2239 }
2240
2241 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2242 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2243 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2244 sizeof(link_sec), &link_sec);
2245
2246 if (lmp_bredr_capable(hdev)) {
2247 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2248 __hci_req_write_fast_connectable(req, true);
2249 else
2250 __hci_req_write_fast_connectable(req, false);
2251 __hci_req_update_scan(req);
2252 __hci_req_update_class(req);
2253 __hci_req_update_name(req);
2254 __hci_req_update_eir(req);
2255 }
2256
2257 hci_dev_unlock(hdev);
2258 return 0;
2259}
2260
2261int __hci_req_hci_power_on(struct hci_dev *hdev)
2262{
2263 /* Register the available SMP channels (BR/EDR and LE) only when
2264 * successfully powering on the controller. This late
2265 * registration is required so that LE SMP can clearly decide if
2266 * the public address or static address is used.
2267 */
2268 smp_register(hdev);
2269
2270 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2271 NULL);
2272}
2273
5fc16cc4
JH
2274void hci_request_setup(struct hci_dev *hdev)
2275{
e68f072b 2276 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 2277 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 2278 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 2279 INIT_WORK(&hdev->connectable_update, connectable_update_work);
aed1a885 2280 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
c366f555 2281 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
7c1fbed2
JH
2282 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2283 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 2284 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
2285}
2286
2287void hci_request_cancel_all(struct hci_dev *hdev)
2288{
7df0f73e
JH
2289 hci_req_sync_cancel(hdev, ENODEV);
2290
e68f072b 2291 cancel_work_sync(&hdev->discov_update);
2e93e53b 2292 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 2293 cancel_work_sync(&hdev->scan_update);
53c0ba74 2294 cancel_work_sync(&hdev->connectable_update);
aed1a885 2295 cancel_work_sync(&hdev->discoverable_update);
c366f555 2296 cancel_delayed_work_sync(&hdev->discov_off);
7c1fbed2
JH
2297 cancel_delayed_work_sync(&hdev->le_scan_disable);
2298 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
2299
2300 if (hdev->adv_instance_timeout) {
2301 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2302 hdev->adv_instance_timeout = 0;
2303 }
5fc16cc4 2304}