]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/bluetooth/hci_request.c
sched/headers: Prepare to move signal wakeup & sigpending methods from <linux/sched...
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_request.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22 */
23
24 #include <linux/sched/signal.h>
25
26 #include <net/bluetooth/bluetooth.h>
27 #include <net/bluetooth/hci_core.h>
28 #include <net/bluetooth/mgmt.h>
29
30 #include "smp.h"
31 #include "hci_request.h"
32
33 #define HCI_REQ_DONE 0
34 #define HCI_REQ_PEND 1
35 #define HCI_REQ_CANCELED 2
36
37 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38 {
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42 }
43
44 static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
46 {
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
66 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
72
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80 }
81
82 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83 {
84 return req_run(req, complete, NULL);
85 }
86
87 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88 {
89 return req_run(req, NULL, complete);
90 }
91
92 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94 {
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104 }
105
106 void hci_req_sync_cancel(struct hci_dev *hdev, int err)
107 {
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115 }
116
117 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119 {
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179 }
180 EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184 {
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186 }
187 EXPORT_SYMBOL(__hci_cmd_sync);
188
189 /* Execute request and wait for completion. */
190 int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
192 unsigned long opt, u32 timeout, u8 *hci_status)
193 {
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
229 return 0;
230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
248 if (hci_status)
249 *hci_status = hdev->req_result;
250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
256 break;
257
258 default:
259 err = -ETIMEDOUT;
260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
262 break;
263 }
264
265 kfree_skb(hdev->req_skb);
266 hdev->req_skb = NULL;
267 hdev->req_status = hdev->req_result = 0;
268
269 BT_DBG("%s end: err %d", hdev->name, err);
270
271 return err;
272 }
273
274 int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
275 unsigned long opt),
276 unsigned long opt, u32 timeout, u8 *hci_status)
277 {
278 int ret;
279
280 if (!test_bit(HCI_UP, &hdev->flags))
281 return -ENETDOWN;
282
283 /* Serialize all requests */
284 hci_req_sync_lock(hdev);
285 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
286 hci_req_sync_unlock(hdev);
287
288 return ret;
289 }
290
291 struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
292 const void *param)
293 {
294 int len = HCI_COMMAND_HDR_SIZE + plen;
295 struct hci_command_hdr *hdr;
296 struct sk_buff *skb;
297
298 skb = bt_skb_alloc(len, GFP_ATOMIC);
299 if (!skb)
300 return NULL;
301
302 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
303 hdr->opcode = cpu_to_le16(opcode);
304 hdr->plen = plen;
305
306 if (plen)
307 memcpy(skb_put(skb, plen), param, plen);
308
309 BT_DBG("skb len %d", skb->len);
310
311 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
312 hci_skb_opcode(skb) = opcode;
313
314 return skb;
315 }
316
317 /* Queue a command to an asynchronous HCI request */
318 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
319 const void *param, u8 event)
320 {
321 struct hci_dev *hdev = req->hdev;
322 struct sk_buff *skb;
323
324 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
325
326 /* If an error occurred during request building, there is no point in
327 * queueing the HCI command. We can simply return.
328 */
329 if (req->err)
330 return;
331
332 skb = hci_prepare_cmd(hdev, opcode, plen, param);
333 if (!skb) {
334 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
335 hdev->name, opcode);
336 req->err = -ENOMEM;
337 return;
338 }
339
340 if (skb_queue_empty(&req->cmd_q))
341 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
342
343 bt_cb(skb)->hci.req_event = event;
344
345 skb_queue_tail(&req->cmd_q, skb);
346 }
347
348 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
349 const void *param)
350 {
351 hci_req_add_ev(req, opcode, plen, param, 0);
352 }
353
354 void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
355 {
356 struct hci_dev *hdev = req->hdev;
357 struct hci_cp_write_page_scan_activity acp;
358 u8 type;
359
360 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
361 return;
362
363 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
364 return;
365
366 if (enable) {
367 type = PAGE_SCAN_TYPE_INTERLACED;
368
369 /* 160 msec page scan interval */
370 acp.interval = cpu_to_le16(0x0100);
371 } else {
372 type = PAGE_SCAN_TYPE_STANDARD; /* default */
373
374 /* default 1.28 sec page scan */
375 acp.interval = cpu_to_le16(0x0800);
376 }
377
378 acp.window = cpu_to_le16(0x0012);
379
380 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
381 __cpu_to_le16(hdev->page_scan_window) != acp.window)
382 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
383 sizeof(acp), &acp);
384
385 if (hdev->page_scan_type != type)
386 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
387 }
388
389 /* This function controls the background scanning based on hdev->pend_le_conns
390 * list. If there are pending LE connection we start the background scanning,
391 * otherwise we stop it.
392 *
393 * This function requires the caller holds hdev->lock.
394 */
395 static void __hci_update_background_scan(struct hci_request *req)
396 {
397 struct hci_dev *hdev = req->hdev;
398
399 if (!test_bit(HCI_UP, &hdev->flags) ||
400 test_bit(HCI_INIT, &hdev->flags) ||
401 hci_dev_test_flag(hdev, HCI_SETUP) ||
402 hci_dev_test_flag(hdev, HCI_CONFIG) ||
403 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
404 hci_dev_test_flag(hdev, HCI_UNREGISTER))
405 return;
406
407 /* No point in doing scanning if LE support hasn't been enabled */
408 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
409 return;
410
411 /* If discovery is active don't interfere with it */
412 if (hdev->discovery.state != DISCOVERY_STOPPED)
413 return;
414
415 /* Reset RSSI and UUID filters when starting background scanning
416 * since these filters are meant for service discovery only.
417 *
418 * The Start Discovery and Start Service Discovery operations
419 * ensure to set proper values for RSSI threshold and UUID
420 * filter list. So it is safe to just reset them here.
421 */
422 hci_discovery_filter_clear(hdev);
423
424 if (list_empty(&hdev->pend_le_conns) &&
425 list_empty(&hdev->pend_le_reports)) {
426 /* If there is no pending LE connections or devices
427 * to be scanned for, we should stop the background
428 * scanning.
429 */
430
431 /* If controller is not scanning we are done. */
432 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
433 return;
434
435 hci_req_add_le_scan_disable(req);
436
437 BT_DBG("%s stopping background scanning", hdev->name);
438 } else {
439 /* If there is at least one pending LE connection, we should
440 * keep the background scan running.
441 */
442
443 /* If controller is connecting, we should not start scanning
444 * since some controllers are not able to scan and connect at
445 * the same time.
446 */
447 if (hci_lookup_le_connect(hdev))
448 return;
449
450 /* If controller is currently scanning, we stop it to ensure we
451 * don't miss any advertising (due to duplicates filter).
452 */
453 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
454 hci_req_add_le_scan_disable(req);
455
456 hci_req_add_le_passive_scan(req);
457
458 BT_DBG("%s starting background scanning", hdev->name);
459 }
460 }
461
462 void __hci_req_update_name(struct hci_request *req)
463 {
464 struct hci_dev *hdev = req->hdev;
465 struct hci_cp_write_local_name cp;
466
467 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
468
469 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
470 }
471
472 #define PNP_INFO_SVCLASS_ID 0x1200
473
474 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
475 {
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
478
479 if (len < 4)
480 return ptr;
481
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 u16 uuid16;
484
485 if (uuid->size != 16)
486 continue;
487
488 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
489 if (uuid16 < 0x1100)
490 continue;
491
492 if (uuid16 == PNP_INFO_SVCLASS_ID)
493 continue;
494
495 if (!uuids_start) {
496 uuids_start = ptr;
497 uuids_start[0] = 1;
498 uuids_start[1] = EIR_UUID16_ALL;
499 ptr += 2;
500 }
501
502 /* Stop if not enough space to put next UUID */
503 if ((ptr - data) + sizeof(u16) > len) {
504 uuids_start[1] = EIR_UUID16_SOME;
505 break;
506 }
507
508 *ptr++ = (uuid16 & 0x00ff);
509 *ptr++ = (uuid16 & 0xff00) >> 8;
510 uuids_start[0] += sizeof(uuid16);
511 }
512
513 return ptr;
514 }
515
516 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
517 {
518 u8 *ptr = data, *uuids_start = NULL;
519 struct bt_uuid *uuid;
520
521 if (len < 6)
522 return ptr;
523
524 list_for_each_entry(uuid, &hdev->uuids, list) {
525 if (uuid->size != 32)
526 continue;
527
528 if (!uuids_start) {
529 uuids_start = ptr;
530 uuids_start[0] = 1;
531 uuids_start[1] = EIR_UUID32_ALL;
532 ptr += 2;
533 }
534
535 /* Stop if not enough space to put next UUID */
536 if ((ptr - data) + sizeof(u32) > len) {
537 uuids_start[1] = EIR_UUID32_SOME;
538 break;
539 }
540
541 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
542 ptr += sizeof(u32);
543 uuids_start[0] += sizeof(u32);
544 }
545
546 return ptr;
547 }
548
549 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
550 {
551 u8 *ptr = data, *uuids_start = NULL;
552 struct bt_uuid *uuid;
553
554 if (len < 18)
555 return ptr;
556
557 list_for_each_entry(uuid, &hdev->uuids, list) {
558 if (uuid->size != 128)
559 continue;
560
561 if (!uuids_start) {
562 uuids_start = ptr;
563 uuids_start[0] = 1;
564 uuids_start[1] = EIR_UUID128_ALL;
565 ptr += 2;
566 }
567
568 /* Stop if not enough space to put next UUID */
569 if ((ptr - data) + 16 > len) {
570 uuids_start[1] = EIR_UUID128_SOME;
571 break;
572 }
573
574 memcpy(ptr, uuid->uuid, 16);
575 ptr += 16;
576 uuids_start[0] += 16;
577 }
578
579 return ptr;
580 }
581
582 static void create_eir(struct hci_dev *hdev, u8 *data)
583 {
584 u8 *ptr = data;
585 size_t name_len;
586
587 name_len = strlen(hdev->dev_name);
588
589 if (name_len > 0) {
590 /* EIR Data type */
591 if (name_len > 48) {
592 name_len = 48;
593 ptr[1] = EIR_NAME_SHORT;
594 } else
595 ptr[1] = EIR_NAME_COMPLETE;
596
597 /* EIR Data length */
598 ptr[0] = name_len + 1;
599
600 memcpy(ptr + 2, hdev->dev_name, name_len);
601
602 ptr += (name_len + 2);
603 }
604
605 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
606 ptr[0] = 2;
607 ptr[1] = EIR_TX_POWER;
608 ptr[2] = (u8) hdev->inq_tx_power;
609
610 ptr += 3;
611 }
612
613 if (hdev->devid_source > 0) {
614 ptr[0] = 9;
615 ptr[1] = EIR_DEVICE_ID;
616
617 put_unaligned_le16(hdev->devid_source, ptr + 2);
618 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
619 put_unaligned_le16(hdev->devid_product, ptr + 6);
620 put_unaligned_le16(hdev->devid_version, ptr + 8);
621
622 ptr += 10;
623 }
624
625 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
627 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
628 }
629
630 void __hci_req_update_eir(struct hci_request *req)
631 {
632 struct hci_dev *hdev = req->hdev;
633 struct hci_cp_write_eir cp;
634
635 if (!hdev_is_powered(hdev))
636 return;
637
638 if (!lmp_ext_inq_capable(hdev))
639 return;
640
641 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
642 return;
643
644 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
645 return;
646
647 memset(&cp, 0, sizeof(cp));
648
649 create_eir(hdev, cp.data);
650
651 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
652 return;
653
654 memcpy(hdev->eir, cp.data, sizeof(cp.data));
655
656 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
657 }
658
659 void hci_req_add_le_scan_disable(struct hci_request *req)
660 {
661 struct hci_cp_le_set_scan_enable cp;
662
663 memset(&cp, 0, sizeof(cp));
664 cp.enable = LE_SCAN_DISABLE;
665 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
666 }
667
668 static void add_to_white_list(struct hci_request *req,
669 struct hci_conn_params *params)
670 {
671 struct hci_cp_le_add_to_white_list cp;
672
673 cp.bdaddr_type = params->addr_type;
674 bacpy(&cp.bdaddr, &params->addr);
675
676 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677 }
678
679 static u8 update_white_list(struct hci_request *req)
680 {
681 struct hci_dev *hdev = req->hdev;
682 struct hci_conn_params *params;
683 struct bdaddr_list *b;
684 uint8_t white_list_entries = 0;
685
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
691 */
692 list_for_each_entry(b, &hdev->le_white_list, list) {
693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
695 */
696 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 &b->bdaddr, b->bdaddr_type) &&
698 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 &b->bdaddr, b->bdaddr_type)) {
700 struct hci_cp_le_del_from_white_list cp;
701
702 cp.bdaddr_type = b->bdaddr_type;
703 bacpy(&cp.bdaddr, &b->bdaddr);
704
705 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 sizeof(cp), &cp);
707 continue;
708 }
709
710 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 /* White list can not be used with RPAs */
712 return 0x00;
713 }
714
715 white_list_entries++;
716 }
717
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
721 * the controller.
722 *
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
726 * white list.
727 */
728 list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 &params->addr, params->addr_type))
731 continue;
732
733 if (white_list_entries >= hdev->le_white_list_size) {
734 /* Select filter policy to accept all advertising */
735 return 0x00;
736 }
737
738 if (hci_find_irk_by_addr(hdev, &params->addr,
739 params->addr_type)) {
740 /* White list can not be used with RPAs */
741 return 0x00;
742 }
743
744 white_list_entries++;
745 add_to_white_list(req, params);
746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 &params->addr, params->addr_type))
755 continue;
756
757 if (white_list_entries >= hdev->le_white_list_size) {
758 /* Select filter policy to accept all advertising */
759 return 0x00;
760 }
761
762 if (hci_find_irk_by_addr(hdev, &params->addr,
763 params->addr_type)) {
764 /* White list can not be used with RPAs */
765 return 0x00;
766 }
767
768 white_list_entries++;
769 add_to_white_list(req, params);
770 }
771
772 /* Select filter policy to use white list */
773 return 0x01;
774 }
775
776 static bool scan_use_rpa(struct hci_dev *hdev)
777 {
778 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779 }
780
781 void hci_req_add_le_passive_scan(struct hci_request *req)
782 {
783 struct hci_cp_le_set_scan_param param_cp;
784 struct hci_cp_le_set_scan_enable enable_cp;
785 struct hci_dev *hdev = req->hdev;
786 u8 own_addr_type;
787 u8 filter_policy;
788
789 /* Set require_privacy to false since no SCAN_REQ are send
790 * during passive scanning. Not using an non-resolvable address
791 * here is important so that peer devices using direct
792 * advertising with our address will be correctly reported
793 * by the controller.
794 */
795 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
796 &own_addr_type))
797 return;
798
799 /* Adding or removing entries from the white list must
800 * happen before enabling scanning. The controller does
801 * not allow white list modification while scanning.
802 */
803 filter_policy = update_white_list(req);
804
805 /* When the controller is using random resolvable addresses and
806 * with that having LE privacy enabled, then controllers with
807 * Extended Scanner Filter Policies support can now enable support
808 * for handling directed advertising.
809 *
810 * So instead of using filter polices 0x00 (no whitelist)
811 * and 0x01 (whitelist enabled) use the new filter policies
812 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
813 */
814 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
815 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
816 filter_policy |= 0x02;
817
818 memset(&param_cp, 0, sizeof(param_cp));
819 param_cp.type = LE_SCAN_PASSIVE;
820 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
821 param_cp.window = cpu_to_le16(hdev->le_scan_window);
822 param_cp.own_address_type = own_addr_type;
823 param_cp.filter_policy = filter_policy;
824 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
825 &param_cp);
826
827 memset(&enable_cp, 0, sizeof(enable_cp));
828 enable_cp.enable = LE_SCAN_ENABLE;
829 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
830 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
831 &enable_cp);
832 }
833
834 static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
835 {
836 u8 instance = hdev->cur_adv_instance;
837 struct adv_info *adv_instance;
838
839 /* Ignore instance 0 */
840 if (instance == 0x00)
841 return 0;
842
843 adv_instance = hci_find_adv_instance(hdev, instance);
844 if (!adv_instance)
845 return 0;
846
847 /* TODO: Take into account the "appearance" and "local-name" flags here.
848 * These are currently being ignored as they are not supported.
849 */
850 return adv_instance->scan_rsp_len;
851 }
852
853 void __hci_req_disable_advertising(struct hci_request *req)
854 {
855 u8 enable = 0x00;
856
857 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
858 }
859
860 static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
861 {
862 u32 flags;
863 struct adv_info *adv_instance;
864
865 if (instance == 0x00) {
866 /* Instance 0 always manages the "Tx Power" and "Flags"
867 * fields
868 */
869 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
870
871 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
872 * corresponds to the "connectable" instance flag.
873 */
874 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
875 flags |= MGMT_ADV_FLAG_CONNECTABLE;
876
877 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
878 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
879 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
880 flags |= MGMT_ADV_FLAG_DISCOV;
881
882 return flags;
883 }
884
885 adv_instance = hci_find_adv_instance(hdev, instance);
886
887 /* Return 0 when we got an invalid instance identifier. */
888 if (!adv_instance)
889 return 0;
890
891 return adv_instance->flags;
892 }
893
894 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
895 {
896 /* If privacy is not enabled don't use RPA */
897 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
898 return false;
899
900 /* If basic privacy mode is enabled use RPA */
901 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
902 return true;
903
904 /* If limited privacy mode is enabled don't use RPA if we're
905 * both discoverable and bondable.
906 */
907 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
908 hci_dev_test_flag(hdev, HCI_BONDABLE))
909 return false;
910
911 /* We're neither bondable nor discoverable in the limited
912 * privacy mode, therefore use RPA.
913 */
914 return true;
915 }
916
917 void __hci_req_enable_advertising(struct hci_request *req)
918 {
919 struct hci_dev *hdev = req->hdev;
920 struct hci_cp_le_set_adv_param cp;
921 u8 own_addr_type, enable = 0x01;
922 bool connectable;
923 u32 flags;
924
925 if (hci_conn_num(hdev, LE_LINK) > 0)
926 return;
927
928 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
929 __hci_req_disable_advertising(req);
930
931 /* Clear the HCI_LE_ADV bit temporarily so that the
932 * hci_update_random_address knows that it's safe to go ahead
933 * and write a new random address. The flag will be set back on
934 * as soon as the SET_ADV_ENABLE HCI command completes.
935 */
936 hci_dev_clear_flag(hdev, HCI_LE_ADV);
937
938 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
939
940 /* If the "connectable" instance flag was not set, then choose between
941 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
942 */
943 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
944 mgmt_get_connectable(hdev);
945
946 /* Set require_privacy to true only when non-connectable
947 * advertising is used. In that case it is fine to use a
948 * non-resolvable private address.
949 */
950 if (hci_update_random_address(req, !connectable,
951 adv_use_rpa(hdev, flags),
952 &own_addr_type) < 0)
953 return;
954
955 memset(&cp, 0, sizeof(cp));
956 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
957 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
958
959 if (connectable)
960 cp.type = LE_ADV_IND;
961 else if (get_cur_adv_instance_scan_rsp_len(hdev))
962 cp.type = LE_ADV_SCAN_IND;
963 else
964 cp.type = LE_ADV_NONCONN_IND;
965
966 cp.own_address_type = own_addr_type;
967 cp.channel_map = hdev->le_adv_channel_map;
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
970
971 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
972 }
973
974 u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
975 {
976 size_t short_len;
977 size_t complete_len;
978
979 /* no space left for name (+ NULL + type + len) */
980 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
981 return ad_len;
982
983 /* use complete name if present and fits */
984 complete_len = strlen(hdev->dev_name);
985 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
986 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
987 hdev->dev_name, complete_len + 1);
988
989 /* use short name if present */
990 short_len = strlen(hdev->short_name);
991 if (short_len)
992 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
993 hdev->short_name, short_len + 1);
994
995 /* use shortened full name if present, we already know that name
996 * is longer then HCI_MAX_SHORT_NAME_LENGTH
997 */
998 if (complete_len) {
999 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1000
1001 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1002 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1003
1004 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1005 sizeof(name));
1006 }
1007
1008 return ad_len;
1009 }
1010
1011 static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1012 {
1013 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1014 }
1015
1016 static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1017 {
1018 u8 scan_rsp_len = 0;
1019
1020 if (hdev->appearance) {
1021 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1022 }
1023
1024 return append_local_name(hdev, ptr, scan_rsp_len);
1025 }
1026
1027 static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1028 u8 *ptr)
1029 {
1030 struct adv_info *adv_instance;
1031 u32 instance_flags;
1032 u8 scan_rsp_len = 0;
1033
1034 adv_instance = hci_find_adv_instance(hdev, instance);
1035 if (!adv_instance)
1036 return 0;
1037
1038 instance_flags = adv_instance->flags;
1039
1040 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
1041 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
1042 }
1043
1044 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
1045 adv_instance->scan_rsp_len);
1046
1047 scan_rsp_len += adv_instance->scan_rsp_len;
1048
1049 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1050 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1051
1052 return scan_rsp_len;
1053 }
1054
1055 void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
1056 {
1057 struct hci_dev *hdev = req->hdev;
1058 struct hci_cp_le_set_scan_rsp_data cp;
1059 u8 len;
1060
1061 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1062 return;
1063
1064 memset(&cp, 0, sizeof(cp));
1065
1066 if (instance)
1067 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1068 else
1069 len = create_default_scan_rsp_data(hdev, cp.data);
1070
1071 if (hdev->scan_rsp_data_len == len &&
1072 !memcmp(cp.data, hdev->scan_rsp_data, len))
1073 return;
1074
1075 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1076 hdev->scan_rsp_data_len = len;
1077
1078 cp.length = len;
1079
1080 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1081 }
1082
1083 static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1084 {
1085 struct adv_info *adv_instance = NULL;
1086 u8 ad_len = 0, flags = 0;
1087 u32 instance_flags;
1088
1089 /* Return 0 when the current instance identifier is invalid. */
1090 if (instance) {
1091 adv_instance = hci_find_adv_instance(hdev, instance);
1092 if (!adv_instance)
1093 return 0;
1094 }
1095
1096 instance_flags = get_adv_instance_flags(hdev, instance);
1097
1098 /* The Add Advertising command allows userspace to set both the general
1099 * and limited discoverable flags.
1100 */
1101 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1102 flags |= LE_AD_GENERAL;
1103
1104 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1105 flags |= LE_AD_LIMITED;
1106
1107 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1108 flags |= LE_AD_NO_BREDR;
1109
1110 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1111 /* If a discovery flag wasn't provided, simply use the global
1112 * settings.
1113 */
1114 if (!flags)
1115 flags |= mgmt_get_adv_discov_flags(hdev);
1116
1117 /* If flags would still be empty, then there is no need to
1118 * include the "Flags" AD field".
1119 */
1120 if (flags) {
1121 ptr[0] = 0x02;
1122 ptr[1] = EIR_FLAGS;
1123 ptr[2] = flags;
1124
1125 ad_len += 3;
1126 ptr += 3;
1127 }
1128 }
1129
1130 if (adv_instance) {
1131 memcpy(ptr, adv_instance->adv_data,
1132 adv_instance->adv_data_len);
1133 ad_len += adv_instance->adv_data_len;
1134 ptr += adv_instance->adv_data_len;
1135 }
1136
1137 /* Provide Tx Power only if we can provide a valid value for it */
1138 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1139 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1140 ptr[0] = 0x02;
1141 ptr[1] = EIR_TX_POWER;
1142 ptr[2] = (u8)hdev->adv_tx_power;
1143
1144 ad_len += 3;
1145 ptr += 3;
1146 }
1147
1148 return ad_len;
1149 }
1150
1151 void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
1152 {
1153 struct hci_dev *hdev = req->hdev;
1154 struct hci_cp_le_set_adv_data cp;
1155 u8 len;
1156
1157 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1158 return;
1159
1160 memset(&cp, 0, sizeof(cp));
1161
1162 len = create_instance_adv_data(hdev, instance, cp.data);
1163
1164 /* There's nothing to do if the data hasn't changed */
1165 if (hdev->adv_data_len == len &&
1166 memcmp(cp.data, hdev->adv_data, len) == 0)
1167 return;
1168
1169 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1170 hdev->adv_data_len = len;
1171
1172 cp.length = len;
1173
1174 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1175 }
1176
1177 int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
1178 {
1179 struct hci_request req;
1180
1181 hci_req_init(&req, hdev);
1182 __hci_req_update_adv_data(&req, instance);
1183
1184 return hci_req_run(&req, NULL);
1185 }
1186
1187 static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1188 {
1189 BT_DBG("%s status %u", hdev->name, status);
1190 }
1191
1192 void hci_req_reenable_advertising(struct hci_dev *hdev)
1193 {
1194 struct hci_request req;
1195
1196 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1197 list_empty(&hdev->adv_instances))
1198 return;
1199
1200 hci_req_init(&req, hdev);
1201
1202 if (hdev->cur_adv_instance) {
1203 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1204 true);
1205 } else {
1206 __hci_req_update_adv_data(&req, 0x00);
1207 __hci_req_update_scan_rsp_data(&req, 0x00);
1208 __hci_req_enable_advertising(&req);
1209 }
1210
1211 hci_req_run(&req, adv_enable_complete);
1212 }
1213
1214 static void adv_timeout_expire(struct work_struct *work)
1215 {
1216 struct hci_dev *hdev = container_of(work, struct hci_dev,
1217 adv_instance_expire.work);
1218
1219 struct hci_request req;
1220 u8 instance;
1221
1222 BT_DBG("%s", hdev->name);
1223
1224 hci_dev_lock(hdev);
1225
1226 hdev->adv_instance_timeout = 0;
1227
1228 instance = hdev->cur_adv_instance;
1229 if (instance == 0x00)
1230 goto unlock;
1231
1232 hci_req_init(&req, hdev);
1233
1234 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
1235
1236 if (list_empty(&hdev->adv_instances))
1237 __hci_req_disable_advertising(&req);
1238
1239 hci_req_run(&req, NULL);
1240
1241 unlock:
1242 hci_dev_unlock(hdev);
1243 }
1244
1245 int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1246 bool force)
1247 {
1248 struct hci_dev *hdev = req->hdev;
1249 struct adv_info *adv_instance = NULL;
1250 u16 timeout;
1251
1252 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1253 list_empty(&hdev->adv_instances))
1254 return -EPERM;
1255
1256 if (hdev->adv_instance_timeout)
1257 return -EBUSY;
1258
1259 adv_instance = hci_find_adv_instance(hdev, instance);
1260 if (!adv_instance)
1261 return -ENOENT;
1262
1263 /* A zero timeout means unlimited advertising. As long as there is
1264 * only one instance, duration should be ignored. We still set a timeout
1265 * in case further instances are being added later on.
1266 *
1267 * If the remaining lifetime of the instance is more than the duration
1268 * then the timeout corresponds to the duration, otherwise it will be
1269 * reduced to the remaining instance lifetime.
1270 */
1271 if (adv_instance->timeout == 0 ||
1272 adv_instance->duration <= adv_instance->remaining_time)
1273 timeout = adv_instance->duration;
1274 else
1275 timeout = adv_instance->remaining_time;
1276
1277 /* The remaining time is being reduced unless the instance is being
1278 * advertised without time limit.
1279 */
1280 if (adv_instance->timeout)
1281 adv_instance->remaining_time =
1282 adv_instance->remaining_time - timeout;
1283
1284 hdev->adv_instance_timeout = timeout;
1285 queue_delayed_work(hdev->req_workqueue,
1286 &hdev->adv_instance_expire,
1287 msecs_to_jiffies(timeout * 1000));
1288
1289 /* If we're just re-scheduling the same instance again then do not
1290 * execute any HCI commands. This happens when a single instance is
1291 * being advertised.
1292 */
1293 if (!force && hdev->cur_adv_instance == instance &&
1294 hci_dev_test_flag(hdev, HCI_LE_ADV))
1295 return 0;
1296
1297 hdev->cur_adv_instance = instance;
1298 __hci_req_update_adv_data(req, instance);
1299 __hci_req_update_scan_rsp_data(req, instance);
1300 __hci_req_enable_advertising(req);
1301
1302 return 0;
1303 }
1304
1305 static void cancel_adv_timeout(struct hci_dev *hdev)
1306 {
1307 if (hdev->adv_instance_timeout) {
1308 hdev->adv_instance_timeout = 0;
1309 cancel_delayed_work(&hdev->adv_instance_expire);
1310 }
1311 }
1312
1313 /* For a single instance:
1314 * - force == true: The instance will be removed even when its remaining
1315 * lifetime is not zero.
1316 * - force == false: the instance will be deactivated but kept stored unless
1317 * the remaining lifetime is zero.
1318 *
1319 * For instance == 0x00:
1320 * - force == true: All instances will be removed regardless of their timeout
1321 * setting.
1322 * - force == false: Only instances that have a timeout will be removed.
1323 */
1324 void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1325 struct hci_request *req, u8 instance,
1326 bool force)
1327 {
1328 struct adv_info *adv_instance, *n, *next_instance = NULL;
1329 int err;
1330 u8 rem_inst;
1331
1332 /* Cancel any timeout concerning the removed instance(s). */
1333 if (!instance || hdev->cur_adv_instance == instance)
1334 cancel_adv_timeout(hdev);
1335
1336 /* Get the next instance to advertise BEFORE we remove
1337 * the current one. This can be the same instance again
1338 * if there is only one instance.
1339 */
1340 if (instance && hdev->cur_adv_instance == instance)
1341 next_instance = hci_get_next_instance(hdev, instance);
1342
1343 if (instance == 0x00) {
1344 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1345 list) {
1346 if (!(force || adv_instance->timeout))
1347 continue;
1348
1349 rem_inst = adv_instance->instance;
1350 err = hci_remove_adv_instance(hdev, rem_inst);
1351 if (!err)
1352 mgmt_advertising_removed(sk, hdev, rem_inst);
1353 }
1354 } else {
1355 adv_instance = hci_find_adv_instance(hdev, instance);
1356
1357 if (force || (adv_instance && adv_instance->timeout &&
1358 !adv_instance->remaining_time)) {
1359 /* Don't advertise a removed instance. */
1360 if (next_instance &&
1361 next_instance->instance == instance)
1362 next_instance = NULL;
1363
1364 err = hci_remove_adv_instance(hdev, instance);
1365 if (!err)
1366 mgmt_advertising_removed(sk, hdev, instance);
1367 }
1368 }
1369
1370 if (!req || !hdev_is_powered(hdev) ||
1371 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1372 return;
1373
1374 if (next_instance)
1375 __hci_req_schedule_adv_instance(req, next_instance->instance,
1376 false);
1377 }
1378
1379 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1380 {
1381 struct hci_dev *hdev = req->hdev;
1382
1383 /* If we're advertising or initiating an LE connection we can't
1384 * go ahead and change the random address at this time. This is
1385 * because the eventual initiator address used for the
1386 * subsequently created connection will be undefined (some
1387 * controllers use the new address and others the one we had
1388 * when the operation started).
1389 *
1390 * In this kind of scenario skip the update and let the random
1391 * address be updated at the next cycle.
1392 */
1393 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1394 hci_lookup_le_connect(hdev)) {
1395 BT_DBG("Deferring random address update");
1396 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1397 return;
1398 }
1399
1400 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1401 }
1402
1403 int hci_update_random_address(struct hci_request *req, bool require_privacy,
1404 bool use_rpa, u8 *own_addr_type)
1405 {
1406 struct hci_dev *hdev = req->hdev;
1407 int err;
1408
1409 /* If privacy is enabled use a resolvable private address. If
1410 * current RPA has expired or there is something else than
1411 * the current RPA in use, then generate a new one.
1412 */
1413 if (use_rpa) {
1414 int to;
1415
1416 *own_addr_type = ADDR_LE_DEV_RANDOM;
1417
1418 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
1419 !bacmp(&hdev->random_addr, &hdev->rpa))
1420 return 0;
1421
1422 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1423 if (err < 0) {
1424 BT_ERR("%s failed to generate new RPA", hdev->name);
1425 return err;
1426 }
1427
1428 set_random_addr(req, &hdev->rpa);
1429
1430 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1431 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1432
1433 return 0;
1434 }
1435
1436 /* In case of required privacy without resolvable private address,
1437 * use an non-resolvable private address. This is useful for active
1438 * scanning and non-connectable advertising.
1439 */
1440 if (require_privacy) {
1441 bdaddr_t nrpa;
1442
1443 while (true) {
1444 /* The non-resolvable private address is generated
1445 * from random six bytes with the two most significant
1446 * bits cleared.
1447 */
1448 get_random_bytes(&nrpa, 6);
1449 nrpa.b[5] &= 0x3f;
1450
1451 /* The non-resolvable private address shall not be
1452 * equal to the public address.
1453 */
1454 if (bacmp(&hdev->bdaddr, &nrpa))
1455 break;
1456 }
1457
1458 *own_addr_type = ADDR_LE_DEV_RANDOM;
1459 set_random_addr(req, &nrpa);
1460 return 0;
1461 }
1462
1463 /* If forcing static address is in use or there is no public
1464 * address use the static address as random address (but skip
1465 * the HCI command if the current random address is already the
1466 * static one.
1467 *
1468 * In case BR/EDR has been disabled on a dual-mode controller
1469 * and a static address has been configured, then use that
1470 * address instead of the public BR/EDR address.
1471 */
1472 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1473 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1474 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1475 bacmp(&hdev->static_addr, BDADDR_ANY))) {
1476 *own_addr_type = ADDR_LE_DEV_RANDOM;
1477 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1478 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1479 &hdev->static_addr);
1480 return 0;
1481 }
1482
1483 /* Neither privacy nor static address is being used so use a
1484 * public address.
1485 */
1486 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1487
1488 return 0;
1489 }
1490
1491 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1492 {
1493 struct bdaddr_list *b;
1494
1495 list_for_each_entry(b, &hdev->whitelist, list) {
1496 struct hci_conn *conn;
1497
1498 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1499 if (!conn)
1500 return true;
1501
1502 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1503 return true;
1504 }
1505
1506 return false;
1507 }
1508
1509 void __hci_req_update_scan(struct hci_request *req)
1510 {
1511 struct hci_dev *hdev = req->hdev;
1512 u8 scan;
1513
1514 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1515 return;
1516
1517 if (!hdev_is_powered(hdev))
1518 return;
1519
1520 if (mgmt_powering_down(hdev))
1521 return;
1522
1523 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
1524 disconnected_whitelist_entries(hdev))
1525 scan = SCAN_PAGE;
1526 else
1527 scan = SCAN_DISABLED;
1528
1529 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1530 scan |= SCAN_INQUIRY;
1531
1532 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1533 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1534 return;
1535
1536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1537 }
1538
1539 static int update_scan(struct hci_request *req, unsigned long opt)
1540 {
1541 hci_dev_lock(req->hdev);
1542 __hci_req_update_scan(req);
1543 hci_dev_unlock(req->hdev);
1544 return 0;
1545 }
1546
1547 static void scan_update_work(struct work_struct *work)
1548 {
1549 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1550
1551 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
1552 }
1553
1554 static int connectable_update(struct hci_request *req, unsigned long opt)
1555 {
1556 struct hci_dev *hdev = req->hdev;
1557
1558 hci_dev_lock(hdev);
1559
1560 __hci_req_update_scan(req);
1561
1562 /* If BR/EDR is not enabled and we disable advertising as a
1563 * by-product of disabling connectable, we need to update the
1564 * advertising flags.
1565 */
1566 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1567 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
1568
1569 /* Update the advertising parameters if necessary */
1570 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1571 !list_empty(&hdev->adv_instances))
1572 __hci_req_enable_advertising(req);
1573
1574 __hci_update_background_scan(req);
1575
1576 hci_dev_unlock(hdev);
1577
1578 return 0;
1579 }
1580
1581 static void connectable_update_work(struct work_struct *work)
1582 {
1583 struct hci_dev *hdev = container_of(work, struct hci_dev,
1584 connectable_update);
1585 u8 status;
1586
1587 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1588 mgmt_set_connectable_complete(hdev, status);
1589 }
1590
1591 static u8 get_service_classes(struct hci_dev *hdev)
1592 {
1593 struct bt_uuid *uuid;
1594 u8 val = 0;
1595
1596 list_for_each_entry(uuid, &hdev->uuids, list)
1597 val |= uuid->svc_hint;
1598
1599 return val;
1600 }
1601
1602 void __hci_req_update_class(struct hci_request *req)
1603 {
1604 struct hci_dev *hdev = req->hdev;
1605 u8 cod[3];
1606
1607 BT_DBG("%s", hdev->name);
1608
1609 if (!hdev_is_powered(hdev))
1610 return;
1611
1612 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1613 return;
1614
1615 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1616 return;
1617
1618 cod[0] = hdev->minor_class;
1619 cod[1] = hdev->major_class;
1620 cod[2] = get_service_classes(hdev);
1621
1622 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1623 cod[1] |= 0x20;
1624
1625 if (memcmp(cod, hdev->dev_class, 3) == 0)
1626 return;
1627
1628 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1629 }
1630
1631 static void write_iac(struct hci_request *req)
1632 {
1633 struct hci_dev *hdev = req->hdev;
1634 struct hci_cp_write_current_iac_lap cp;
1635
1636 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1637 return;
1638
1639 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1640 /* Limited discoverable mode */
1641 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1642 cp.iac_lap[0] = 0x00; /* LIAC */
1643 cp.iac_lap[1] = 0x8b;
1644 cp.iac_lap[2] = 0x9e;
1645 cp.iac_lap[3] = 0x33; /* GIAC */
1646 cp.iac_lap[4] = 0x8b;
1647 cp.iac_lap[5] = 0x9e;
1648 } else {
1649 /* General discoverable mode */
1650 cp.num_iac = 1;
1651 cp.iac_lap[0] = 0x33; /* GIAC */
1652 cp.iac_lap[1] = 0x8b;
1653 cp.iac_lap[2] = 0x9e;
1654 }
1655
1656 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1657 (cp.num_iac * 3) + 1, &cp);
1658 }
1659
1660 static int discoverable_update(struct hci_request *req, unsigned long opt)
1661 {
1662 struct hci_dev *hdev = req->hdev;
1663
1664 hci_dev_lock(hdev);
1665
1666 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1667 write_iac(req);
1668 __hci_req_update_scan(req);
1669 __hci_req_update_class(req);
1670 }
1671
1672 /* Advertising instances don't use the global discoverable setting, so
1673 * only update AD if advertising was enabled using Set Advertising.
1674 */
1675 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1676 __hci_req_update_adv_data(req, 0x00);
1677
1678 /* Discoverable mode affects the local advertising
1679 * address in limited privacy mode.
1680 */
1681 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1682 __hci_req_enable_advertising(req);
1683 }
1684
1685 hci_dev_unlock(hdev);
1686
1687 return 0;
1688 }
1689
1690 static void discoverable_update_work(struct work_struct *work)
1691 {
1692 struct hci_dev *hdev = container_of(work, struct hci_dev,
1693 discoverable_update);
1694 u8 status;
1695
1696 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1697 mgmt_set_discoverable_complete(hdev, status);
1698 }
1699
1700 void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1701 u8 reason)
1702 {
1703 switch (conn->state) {
1704 case BT_CONNECTED:
1705 case BT_CONFIG:
1706 if (conn->type == AMP_LINK) {
1707 struct hci_cp_disconn_phy_link cp;
1708
1709 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1710 cp.reason = reason;
1711 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1712 &cp);
1713 } else {
1714 struct hci_cp_disconnect dc;
1715
1716 dc.handle = cpu_to_le16(conn->handle);
1717 dc.reason = reason;
1718 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1719 }
1720
1721 conn->state = BT_DISCONN;
1722
1723 break;
1724 case BT_CONNECT:
1725 if (conn->type == LE_LINK) {
1726 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1727 break;
1728 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1729 0, NULL);
1730 } else if (conn->type == ACL_LINK) {
1731 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1732 break;
1733 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1734 6, &conn->dst);
1735 }
1736 break;
1737 case BT_CONNECT2:
1738 if (conn->type == ACL_LINK) {
1739 struct hci_cp_reject_conn_req rej;
1740
1741 bacpy(&rej.bdaddr, &conn->dst);
1742 rej.reason = reason;
1743
1744 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1745 sizeof(rej), &rej);
1746 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1747 struct hci_cp_reject_sync_conn_req rej;
1748
1749 bacpy(&rej.bdaddr, &conn->dst);
1750
1751 /* SCO rejection has its own limited set of
1752 * allowed error values (0x0D-0x0F) which isn't
1753 * compatible with most values passed to this
1754 * function. To be safe hard-code one of the
1755 * values that's suitable for SCO.
1756 */
1757 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
1758
1759 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1760 sizeof(rej), &rej);
1761 }
1762 break;
1763 default:
1764 conn->state = BT_CLOSED;
1765 break;
1766 }
1767 }
1768
1769 static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1770 {
1771 if (status)
1772 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1773 }
1774
1775 int hci_abort_conn(struct hci_conn *conn, u8 reason)
1776 {
1777 struct hci_request req;
1778 int err;
1779
1780 hci_req_init(&req, conn->hdev);
1781
1782 __hci_abort_conn(&req, conn, reason);
1783
1784 err = hci_req_run(&req, abort_conn_complete);
1785 if (err && err != -ENODATA) {
1786 BT_ERR("Failed to run HCI request: err %d", err);
1787 return err;
1788 }
1789
1790 return 0;
1791 }
1792
1793 static int update_bg_scan(struct hci_request *req, unsigned long opt)
1794 {
1795 hci_dev_lock(req->hdev);
1796 __hci_update_background_scan(req);
1797 hci_dev_unlock(req->hdev);
1798 return 0;
1799 }
1800
1801 static void bg_scan_update(struct work_struct *work)
1802 {
1803 struct hci_dev *hdev = container_of(work, struct hci_dev,
1804 bg_scan_update);
1805 struct hci_conn *conn;
1806 u8 status;
1807 int err;
1808
1809 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1810 if (!err)
1811 return;
1812
1813 hci_dev_lock(hdev);
1814
1815 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1816 if (conn)
1817 hci_le_conn_failed(conn, status);
1818
1819 hci_dev_unlock(hdev);
1820 }
1821
1822 static int le_scan_disable(struct hci_request *req, unsigned long opt)
1823 {
1824 hci_req_add_le_scan_disable(req);
1825 return 0;
1826 }
1827
1828 static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1829 {
1830 u8 length = opt;
1831 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1832 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
1833 struct hci_cp_inquiry cp;
1834
1835 BT_DBG("%s", req->hdev->name);
1836
1837 hci_dev_lock(req->hdev);
1838 hci_inquiry_cache_flush(req->hdev);
1839 hci_dev_unlock(req->hdev);
1840
1841 memset(&cp, 0, sizeof(cp));
1842
1843 if (req->hdev->discovery.limited)
1844 memcpy(&cp.lap, liac, sizeof(cp.lap));
1845 else
1846 memcpy(&cp.lap, giac, sizeof(cp.lap));
1847
1848 cp.length = length;
1849
1850 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1851
1852 return 0;
1853 }
1854
1855 static void le_scan_disable_work(struct work_struct *work)
1856 {
1857 struct hci_dev *hdev = container_of(work, struct hci_dev,
1858 le_scan_disable.work);
1859 u8 status;
1860
1861 BT_DBG("%s", hdev->name);
1862
1863 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1864 return;
1865
1866 cancel_delayed_work(&hdev->le_scan_restart);
1867
1868 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1869 if (status) {
1870 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1871 return;
1872 }
1873
1874 hdev->discovery.scan_start = 0;
1875
1876 /* If we were running LE only scan, change discovery state. If
1877 * we were running both LE and BR/EDR inquiry simultaneously,
1878 * and BR/EDR inquiry is already finished, stop discovery,
1879 * otherwise BR/EDR inquiry will stop discovery when finished.
1880 * If we will resolve remote device name, do not change
1881 * discovery state.
1882 */
1883
1884 if (hdev->discovery.type == DISCOV_TYPE_LE)
1885 goto discov_stopped;
1886
1887 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1888 return;
1889
1890 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1891 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1892 hdev->discovery.state != DISCOVERY_RESOLVING)
1893 goto discov_stopped;
1894
1895 return;
1896 }
1897
1898 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1899 HCI_CMD_TIMEOUT, &status);
1900 if (status) {
1901 BT_ERR("Inquiry failed: status 0x%02x", status);
1902 goto discov_stopped;
1903 }
1904
1905 return;
1906
1907 discov_stopped:
1908 hci_dev_lock(hdev);
1909 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1910 hci_dev_unlock(hdev);
1911 }
1912
1913 static int le_scan_restart(struct hci_request *req, unsigned long opt)
1914 {
1915 struct hci_dev *hdev = req->hdev;
1916 struct hci_cp_le_set_scan_enable cp;
1917
1918 /* If controller is not scanning we are done. */
1919 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1920 return 0;
1921
1922 hci_req_add_le_scan_disable(req);
1923
1924 memset(&cp, 0, sizeof(cp));
1925 cp.enable = LE_SCAN_ENABLE;
1926 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1927 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1928
1929 return 0;
1930 }
1931
1932 static void le_scan_restart_work(struct work_struct *work)
1933 {
1934 struct hci_dev *hdev = container_of(work, struct hci_dev,
1935 le_scan_restart.work);
1936 unsigned long timeout, duration, scan_start, now;
1937 u8 status;
1938
1939 BT_DBG("%s", hdev->name);
1940
1941 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1942 if (status) {
1943 BT_ERR("Failed to restart LE scan: status %d", status);
1944 return;
1945 }
1946
1947 hci_dev_lock(hdev);
1948
1949 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1950 !hdev->discovery.scan_start)
1951 goto unlock;
1952
1953 /* When the scan was started, hdev->le_scan_disable has been queued
1954 * after duration from scan_start. During scan restart this job
1955 * has been canceled, and we need to queue it again after proper
1956 * timeout, to make sure that scan does not run indefinitely.
1957 */
1958 duration = hdev->discovery.scan_duration;
1959 scan_start = hdev->discovery.scan_start;
1960 now = jiffies;
1961 if (now - scan_start <= duration) {
1962 int elapsed;
1963
1964 if (now >= scan_start)
1965 elapsed = now - scan_start;
1966 else
1967 elapsed = ULONG_MAX - scan_start + now;
1968
1969 timeout = duration - elapsed;
1970 } else {
1971 timeout = 0;
1972 }
1973
1974 queue_delayed_work(hdev->req_workqueue,
1975 &hdev->le_scan_disable, timeout);
1976
1977 unlock:
1978 hci_dev_unlock(hdev);
1979 }
1980
1981 static void disable_advertising(struct hci_request *req)
1982 {
1983 u8 enable = 0x00;
1984
1985 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1986 }
1987
1988 static int active_scan(struct hci_request *req, unsigned long opt)
1989 {
1990 uint16_t interval = opt;
1991 struct hci_dev *hdev = req->hdev;
1992 struct hci_cp_le_set_scan_param param_cp;
1993 struct hci_cp_le_set_scan_enable enable_cp;
1994 u8 own_addr_type;
1995 int err;
1996
1997 BT_DBG("%s", hdev->name);
1998
1999 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2000 hci_dev_lock(hdev);
2001
2002 /* Don't let discovery abort an outgoing connection attempt
2003 * that's using directed advertising.
2004 */
2005 if (hci_lookup_le_connect(hdev)) {
2006 hci_dev_unlock(hdev);
2007 return -EBUSY;
2008 }
2009
2010 cancel_adv_timeout(hdev);
2011 hci_dev_unlock(hdev);
2012
2013 disable_advertising(req);
2014 }
2015
2016 /* If controller is scanning, it means the background scanning is
2017 * running. Thus, we should temporarily stop it in order to set the
2018 * discovery scanning parameters.
2019 */
2020 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2021 hci_req_add_le_scan_disable(req);
2022
2023 /* All active scans will be done with either a resolvable private
2024 * address (when privacy feature has been enabled) or non-resolvable
2025 * private address.
2026 */
2027 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2028 &own_addr_type);
2029 if (err < 0)
2030 own_addr_type = ADDR_LE_DEV_PUBLIC;
2031
2032 memset(&param_cp, 0, sizeof(param_cp));
2033 param_cp.type = LE_SCAN_ACTIVE;
2034 param_cp.interval = cpu_to_le16(interval);
2035 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2036 param_cp.own_address_type = own_addr_type;
2037
2038 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2039 &param_cp);
2040
2041 memset(&enable_cp, 0, sizeof(enable_cp));
2042 enable_cp.enable = LE_SCAN_ENABLE;
2043 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2044
2045 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2046 &enable_cp);
2047
2048 return 0;
2049 }
2050
2051 static int interleaved_discov(struct hci_request *req, unsigned long opt)
2052 {
2053 int err;
2054
2055 BT_DBG("%s", req->hdev->name);
2056
2057 err = active_scan(req, opt);
2058 if (err)
2059 return err;
2060
2061 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
2062 }
2063
2064 static void start_discovery(struct hci_dev *hdev, u8 *status)
2065 {
2066 unsigned long timeout;
2067
2068 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2069
2070 switch (hdev->discovery.type) {
2071 case DISCOV_TYPE_BREDR:
2072 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
2073 hci_req_sync(hdev, bredr_inquiry,
2074 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
2075 status);
2076 return;
2077 case DISCOV_TYPE_INTERLEAVED:
2078 /* When running simultaneous discovery, the LE scanning time
2079 * should occupy the whole discovery time sine BR/EDR inquiry
2080 * and LE scanning are scheduled by the controller.
2081 *
2082 * For interleaving discovery in comparison, BR/EDR inquiry
2083 * and LE scanning are done sequentially with separate
2084 * timeouts.
2085 */
2086 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2087 &hdev->quirks)) {
2088 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2089 /* During simultaneous discovery, we double LE scan
2090 * interval. We must leave some time for the controller
2091 * to do BR/EDR inquiry.
2092 */
2093 hci_req_sync(hdev, interleaved_discov,
2094 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2095 status);
2096 break;
2097 }
2098
2099 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2100 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2101 HCI_CMD_TIMEOUT, status);
2102 break;
2103 case DISCOV_TYPE_LE:
2104 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2105 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2106 HCI_CMD_TIMEOUT, status);
2107 break;
2108 default:
2109 *status = HCI_ERROR_UNSPECIFIED;
2110 return;
2111 }
2112
2113 if (*status)
2114 return;
2115
2116 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2117
2118 /* When service discovery is used and the controller has a
2119 * strict duplicate filter, it is important to remember the
2120 * start and duration of the scan. This is required for
2121 * restarting scanning during the discovery phase.
2122 */
2123 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2124 hdev->discovery.result_filtering) {
2125 hdev->discovery.scan_start = jiffies;
2126 hdev->discovery.scan_duration = timeout;
2127 }
2128
2129 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2130 timeout);
2131 }
2132
2133 bool hci_req_stop_discovery(struct hci_request *req)
2134 {
2135 struct hci_dev *hdev = req->hdev;
2136 struct discovery_state *d = &hdev->discovery;
2137 struct hci_cp_remote_name_req_cancel cp;
2138 struct inquiry_entry *e;
2139 bool ret = false;
2140
2141 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2142
2143 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2144 if (test_bit(HCI_INQUIRY, &hdev->flags))
2145 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2146
2147 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2148 cancel_delayed_work(&hdev->le_scan_disable);
2149 hci_req_add_le_scan_disable(req);
2150 }
2151
2152 ret = true;
2153 } else {
2154 /* Passive scanning */
2155 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2156 hci_req_add_le_scan_disable(req);
2157 ret = true;
2158 }
2159 }
2160
2161 /* No further actions needed for LE-only discovery */
2162 if (d->type == DISCOV_TYPE_LE)
2163 return ret;
2164
2165 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2166 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2167 NAME_PENDING);
2168 if (!e)
2169 return ret;
2170
2171 bacpy(&cp.bdaddr, &e->data.bdaddr);
2172 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2173 &cp);
2174 ret = true;
2175 }
2176
2177 return ret;
2178 }
2179
2180 static int stop_discovery(struct hci_request *req, unsigned long opt)
2181 {
2182 hci_dev_lock(req->hdev);
2183 hci_req_stop_discovery(req);
2184 hci_dev_unlock(req->hdev);
2185
2186 return 0;
2187 }
2188
2189 static void discov_update(struct work_struct *work)
2190 {
2191 struct hci_dev *hdev = container_of(work, struct hci_dev,
2192 discov_update);
2193 u8 status = 0;
2194
2195 switch (hdev->discovery.state) {
2196 case DISCOVERY_STARTING:
2197 start_discovery(hdev, &status);
2198 mgmt_start_discovery_complete(hdev, status);
2199 if (status)
2200 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2201 else
2202 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2203 break;
2204 case DISCOVERY_STOPPING:
2205 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2206 mgmt_stop_discovery_complete(hdev, status);
2207 if (!status)
2208 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2209 break;
2210 case DISCOVERY_STOPPED:
2211 default:
2212 return;
2213 }
2214 }
2215
2216 static void discov_off(struct work_struct *work)
2217 {
2218 struct hci_dev *hdev = container_of(work, struct hci_dev,
2219 discov_off.work);
2220
2221 BT_DBG("%s", hdev->name);
2222
2223 hci_dev_lock(hdev);
2224
2225 /* When discoverable timeout triggers, then just make sure
2226 * the limited discoverable flag is cleared. Even in the case
2227 * of a timeout triggered from general discoverable, it is
2228 * safe to unconditionally clear the flag.
2229 */
2230 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2231 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2232 hdev->discov_timeout = 0;
2233
2234 hci_dev_unlock(hdev);
2235
2236 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2237 mgmt_new_settings(hdev);
2238 }
2239
2240 static int powered_update_hci(struct hci_request *req, unsigned long opt)
2241 {
2242 struct hci_dev *hdev = req->hdev;
2243 u8 link_sec;
2244
2245 hci_dev_lock(hdev);
2246
2247 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2248 !lmp_host_ssp_capable(hdev)) {
2249 u8 mode = 0x01;
2250
2251 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2252
2253 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2254 u8 support = 0x01;
2255
2256 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2257 sizeof(support), &support);
2258 }
2259 }
2260
2261 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2262 lmp_bredr_capable(hdev)) {
2263 struct hci_cp_write_le_host_supported cp;
2264
2265 cp.le = 0x01;
2266 cp.simul = 0x00;
2267
2268 /* Check first if we already have the right
2269 * host state (host features set)
2270 */
2271 if (cp.le != lmp_host_le_capable(hdev) ||
2272 cp.simul != lmp_host_le_br_capable(hdev))
2273 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2274 sizeof(cp), &cp);
2275 }
2276
2277 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2278 /* Make sure the controller has a good default for
2279 * advertising data. This also applies to the case
2280 * where BR/EDR was toggled during the AUTO_OFF phase.
2281 */
2282 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2283 list_empty(&hdev->adv_instances)) {
2284 __hci_req_update_adv_data(req, 0x00);
2285 __hci_req_update_scan_rsp_data(req, 0x00);
2286
2287 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2288 __hci_req_enable_advertising(req);
2289 } else if (!list_empty(&hdev->adv_instances)) {
2290 struct adv_info *adv_instance;
2291
2292 adv_instance = list_first_entry(&hdev->adv_instances,
2293 struct adv_info, list);
2294 __hci_req_schedule_adv_instance(req,
2295 adv_instance->instance,
2296 true);
2297 }
2298 }
2299
2300 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2301 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2302 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2303 sizeof(link_sec), &link_sec);
2304
2305 if (lmp_bredr_capable(hdev)) {
2306 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2307 __hci_req_write_fast_connectable(req, true);
2308 else
2309 __hci_req_write_fast_connectable(req, false);
2310 __hci_req_update_scan(req);
2311 __hci_req_update_class(req);
2312 __hci_req_update_name(req);
2313 __hci_req_update_eir(req);
2314 }
2315
2316 hci_dev_unlock(hdev);
2317 return 0;
2318 }
2319
2320 int __hci_req_hci_power_on(struct hci_dev *hdev)
2321 {
2322 /* Register the available SMP channels (BR/EDR and LE) only when
2323 * successfully powering on the controller. This late
2324 * registration is required so that LE SMP can clearly decide if
2325 * the public address or static address is used.
2326 */
2327 smp_register(hdev);
2328
2329 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2330 NULL);
2331 }
2332
2333 void hci_request_setup(struct hci_dev *hdev)
2334 {
2335 INIT_WORK(&hdev->discov_update, discov_update);
2336 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
2337 INIT_WORK(&hdev->scan_update, scan_update_work);
2338 INIT_WORK(&hdev->connectable_update, connectable_update_work);
2339 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
2340 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
2341 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2342 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
2343 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
2344 }
2345
2346 void hci_request_cancel_all(struct hci_dev *hdev)
2347 {
2348 hci_req_sync_cancel(hdev, ENODEV);
2349
2350 cancel_work_sync(&hdev->discov_update);
2351 cancel_work_sync(&hdev->bg_scan_update);
2352 cancel_work_sync(&hdev->scan_update);
2353 cancel_work_sync(&hdev->connectable_update);
2354 cancel_work_sync(&hdev->discoverable_update);
2355 cancel_delayed_work_sync(&hdev->discov_off);
2356 cancel_delayed_work_sync(&hdev->le_scan_disable);
2357 cancel_delayed_work_sync(&hdev->le_scan_restart);
2358
2359 if (hdev->adv_instance_timeout) {
2360 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2361 hdev->adv_instance_timeout = 0;
2362 }
2363 }