]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_request.c
Bluetooth: Add discovery type validity helper
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
be91cd05
JH
30#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
0857dd3b
JH
34void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
35{
36 skb_queue_head_init(&req->cmd_q);
37 req->hdev = hdev;
38 req->err = 0;
39}
40
e6214487
JH
41static int req_run(struct hci_request *req, hci_req_complete_t complete,
42 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
43{
44 struct hci_dev *hdev = req->hdev;
45 struct sk_buff *skb;
46 unsigned long flags;
47
48 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
49
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
52 */
53 if (req->err) {
54 skb_queue_purge(&req->cmd_q);
55 return req->err;
56 }
57
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req->cmd_q))
60 return -ENODATA;
61
62 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
63 if (complete) {
64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
0857dd3b
JH
69
70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
72 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
73
74 queue_work(hdev->workqueue, &hdev->cmd_work);
75
76 return 0;
77}
78
e6214487
JH
79int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
80{
81 return req_run(req, complete, NULL);
82}
83
84int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
85{
86 return req_run(req, NULL, complete);
87}
88
be91cd05
JH
89static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
b504430c 103void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
187int __hci_req_sync(struct hci_dev *hdev, void (*func)(struct hci_request *req,
188 unsigned long opt),
4ebeee2d 189 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
201 func(&req, opt);
202
203 add_wait_queue(&hdev->req_wait_q, &wait);
204 set_current_state(TASK_INTERRUPTIBLE);
205
206 err = hci_req_run_skb(&req, hci_req_sync_complete);
207 if (err < 0) {
208 hdev->req_status = 0;
209
210 remove_wait_queue(&hdev->req_wait_q, &wait);
211 set_current_state(TASK_RUNNING);
212
213 /* ENODATA means the HCI request command queue is empty.
214 * This can happen when a request with conditionals doesn't
215 * trigger any commands to be sent. This is normal behavior
216 * and should not trigger an error return.
217 */
218 if (err == -ENODATA)
219 return 0;
220
221 return err;
222 }
223
224 schedule_timeout(timeout);
225
226 remove_wait_queue(&hdev->req_wait_q, &wait);
227
228 if (signal_pending(current))
229 return -EINTR;
230
231 switch (hdev->req_status) {
232 case HCI_REQ_DONE:
233 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
234 if (hci_status)
235 *hci_status = hdev->req_result;
be91cd05
JH
236 break;
237
238 case HCI_REQ_CANCELED:
239 err = -hdev->req_result;
4ebeee2d
JH
240 if (hci_status)
241 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
242 break;
243
244 default:
245 err = -ETIMEDOUT;
4ebeee2d
JH
246 if (hci_status)
247 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
248 break;
249 }
250
251 hdev->req_status = hdev->req_result = 0;
252
253 BT_DBG("%s end: err %d", hdev->name, err);
254
255 return err;
256}
257
258int hci_req_sync(struct hci_dev *hdev, void (*req)(struct hci_request *req,
259 unsigned long opt),
4ebeee2d 260 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
261{
262 int ret;
263
264 if (!test_bit(HCI_UP, &hdev->flags))
265 return -ENETDOWN;
266
267 /* Serialize all requests */
b504430c 268 hci_req_sync_lock(hdev);
4ebeee2d 269 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 270 hci_req_sync_unlock(hdev);
be91cd05
JH
271
272 return ret;
273}
274
0857dd3b
JH
275struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
276 const void *param)
277{
278 int len = HCI_COMMAND_HDR_SIZE + plen;
279 struct hci_command_hdr *hdr;
280 struct sk_buff *skb;
281
282 skb = bt_skb_alloc(len, GFP_ATOMIC);
283 if (!skb)
284 return NULL;
285
286 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
287 hdr->opcode = cpu_to_le16(opcode);
288 hdr->plen = plen;
289
290 if (plen)
291 memcpy(skb_put(skb, plen), param, plen);
292
293 BT_DBG("skb len %d", skb->len);
294
d79f34e3
MH
295 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
296 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
297
298 return skb;
299}
300
301/* Queue a command to an asynchronous HCI request */
302void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
303 const void *param, u8 event)
304{
305 struct hci_dev *hdev = req->hdev;
306 struct sk_buff *skb;
307
308 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
309
310 /* If an error occurred during request building, there is no point in
311 * queueing the HCI command. We can simply return.
312 */
313 if (req->err)
314 return;
315
316 skb = hci_prepare_cmd(hdev, opcode, plen, param);
317 if (!skb) {
318 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
319 hdev->name, opcode);
320 req->err = -ENOMEM;
321 return;
322 }
323
324 if (skb_queue_empty(&req->cmd_q))
44d27137 325 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 326
242c0ebd 327 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
328
329 skb_queue_tail(&req->cmd_q, skb);
330}
331
332void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
333 const void *param)
334{
335 hci_req_add_ev(req, opcode, plen, param, 0);
336}
337
338void hci_req_add_le_scan_disable(struct hci_request *req)
339{
340 struct hci_cp_le_set_scan_enable cp;
341
342 memset(&cp, 0, sizeof(cp));
343 cp.enable = LE_SCAN_DISABLE;
344 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
345}
346
347static void add_to_white_list(struct hci_request *req,
348 struct hci_conn_params *params)
349{
350 struct hci_cp_le_add_to_white_list cp;
351
352 cp.bdaddr_type = params->addr_type;
353 bacpy(&cp.bdaddr, &params->addr);
354
355 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
356}
357
358static u8 update_white_list(struct hci_request *req)
359{
360 struct hci_dev *hdev = req->hdev;
361 struct hci_conn_params *params;
362 struct bdaddr_list *b;
363 uint8_t white_list_entries = 0;
364
365 /* Go through the current white list programmed into the
366 * controller one by one and check if that address is still
367 * in the list of pending connections or list of devices to
368 * report. If not present in either list, then queue the
369 * command to remove it from the controller.
370 */
371 list_for_each_entry(b, &hdev->le_white_list, list) {
372 struct hci_cp_le_del_from_white_list cp;
373
374 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
375 &b->bdaddr, b->bdaddr_type) ||
376 hci_pend_le_action_lookup(&hdev->pend_le_reports,
377 &b->bdaddr, b->bdaddr_type)) {
378 white_list_entries++;
379 continue;
380 }
381
382 cp.bdaddr_type = b->bdaddr_type;
383 bacpy(&cp.bdaddr, &b->bdaddr);
384
385 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
386 sizeof(cp), &cp);
387 }
388
389 /* Since all no longer valid white list entries have been
390 * removed, walk through the list of pending connections
391 * and ensure that any new device gets programmed into
392 * the controller.
393 *
394 * If the list of the devices is larger than the list of
395 * available white list entries in the controller, then
396 * just abort and return filer policy value to not use the
397 * white list.
398 */
399 list_for_each_entry(params, &hdev->pend_le_conns, action) {
400 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
401 &params->addr, params->addr_type))
402 continue;
403
404 if (white_list_entries >= hdev->le_white_list_size) {
405 /* Select filter policy to accept all advertising */
406 return 0x00;
407 }
408
409 if (hci_find_irk_by_addr(hdev, &params->addr,
410 params->addr_type)) {
411 /* White list can not be used with RPAs */
412 return 0x00;
413 }
414
415 white_list_entries++;
416 add_to_white_list(req, params);
417 }
418
419 /* After adding all new pending connections, walk through
420 * the list of pending reports and also add these to the
421 * white list if there is still space.
422 */
423 list_for_each_entry(params, &hdev->pend_le_reports, action) {
424 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
425 &params->addr, params->addr_type))
426 continue;
427
428 if (white_list_entries >= hdev->le_white_list_size) {
429 /* Select filter policy to accept all advertising */
430 return 0x00;
431 }
432
433 if (hci_find_irk_by_addr(hdev, &params->addr,
434 params->addr_type)) {
435 /* White list can not be used with RPAs */
436 return 0x00;
437 }
438
439 white_list_entries++;
440 add_to_white_list(req, params);
441 }
442
443 /* Select filter policy to use white list */
444 return 0x01;
445}
446
447void hci_req_add_le_passive_scan(struct hci_request *req)
448{
449 struct hci_cp_le_set_scan_param param_cp;
450 struct hci_cp_le_set_scan_enable enable_cp;
451 struct hci_dev *hdev = req->hdev;
452 u8 own_addr_type;
453 u8 filter_policy;
454
455 /* Set require_privacy to false since no SCAN_REQ are send
456 * during passive scanning. Not using an non-resolvable address
457 * here is important so that peer devices using direct
458 * advertising with our address will be correctly reported
459 * by the controller.
460 */
461 if (hci_update_random_address(req, false, &own_addr_type))
462 return;
463
464 /* Adding or removing entries from the white list must
465 * happen before enabling scanning. The controller does
466 * not allow white list modification while scanning.
467 */
468 filter_policy = update_white_list(req);
469
470 /* When the controller is using random resolvable addresses and
471 * with that having LE privacy enabled, then controllers with
472 * Extended Scanner Filter Policies support can now enable support
473 * for handling directed advertising.
474 *
475 * So instead of using filter polices 0x00 (no whitelist)
476 * and 0x01 (whitelist enabled) use the new filter policies
477 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
478 */
d7a5a11d 479 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
480 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
481 filter_policy |= 0x02;
482
483 memset(&param_cp, 0, sizeof(param_cp));
484 param_cp.type = LE_SCAN_PASSIVE;
485 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
486 param_cp.window = cpu_to_le16(hdev->le_scan_window);
487 param_cp.own_address_type = own_addr_type;
488 param_cp.filter_policy = filter_policy;
489 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
490 &param_cp);
491
492 memset(&enable_cp, 0, sizeof(enable_cp));
493 enable_cp.enable = LE_SCAN_ENABLE;
494 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
495 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
496 &enable_cp);
497}
498
499static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
500{
501 struct hci_dev *hdev = req->hdev;
502
503 /* If we're advertising or initiating an LE connection we can't
504 * go ahead and change the random address at this time. This is
505 * because the eventual initiator address used for the
506 * subsequently created connection will be undefined (some
507 * controllers use the new address and others the one we had
508 * when the operation started).
509 *
510 * In this kind of scenario skip the update and let the random
511 * address be updated at the next cycle.
512 */
d7a5a11d 513 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 514 hci_lookup_le_connect(hdev)) {
0857dd3b 515 BT_DBG("Deferring random address update");
a1536da2 516 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
517 return;
518 }
519
520 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
521}
522
523int hci_update_random_address(struct hci_request *req, bool require_privacy,
524 u8 *own_addr_type)
525{
526 struct hci_dev *hdev = req->hdev;
527 int err;
528
529 /* If privacy is enabled use a resolvable private address. If
530 * current RPA has expired or there is something else than
531 * the current RPA in use, then generate a new one.
532 */
d7a5a11d 533 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
0857dd3b
JH
534 int to;
535
536 *own_addr_type = ADDR_LE_DEV_RANDOM;
537
a69d8927 538 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
539 !bacmp(&hdev->random_addr, &hdev->rpa))
540 return 0;
541
542 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
543 if (err < 0) {
544 BT_ERR("%s failed to generate new RPA", hdev->name);
545 return err;
546 }
547
548 set_random_addr(req, &hdev->rpa);
549
550 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
551 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
552
553 return 0;
554 }
555
556 /* In case of required privacy without resolvable private address,
557 * use an non-resolvable private address. This is useful for active
558 * scanning and non-connectable advertising.
559 */
560 if (require_privacy) {
561 bdaddr_t nrpa;
562
563 while (true) {
564 /* The non-resolvable private address is generated
565 * from random six bytes with the two most significant
566 * bits cleared.
567 */
568 get_random_bytes(&nrpa, 6);
569 nrpa.b[5] &= 0x3f;
570
571 /* The non-resolvable private address shall not be
572 * equal to the public address.
573 */
574 if (bacmp(&hdev->bdaddr, &nrpa))
575 break;
576 }
577
578 *own_addr_type = ADDR_LE_DEV_RANDOM;
579 set_random_addr(req, &nrpa);
580 return 0;
581 }
582
583 /* If forcing static address is in use or there is no public
584 * address use the static address as random address (but skip
585 * the HCI command if the current random address is already the
586 * static one.
50b5b952
MH
587 *
588 * In case BR/EDR has been disabled on a dual-mode controller
589 * and a static address has been configured, then use that
590 * address instead of the public BR/EDR address.
0857dd3b 591 */
b7cb93e5 592 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 593 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 594 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 595 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
596 *own_addr_type = ADDR_LE_DEV_RANDOM;
597 if (bacmp(&hdev->static_addr, &hdev->random_addr))
598 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
599 &hdev->static_addr);
600 return 0;
601 }
602
603 /* Neither privacy nor static address is being used so use a
604 * public address.
605 */
606 *own_addr_type = ADDR_LE_DEV_PUBLIC;
607
608 return 0;
609}
2cf22218 610
405a2611
JH
611static bool disconnected_whitelist_entries(struct hci_dev *hdev)
612{
613 struct bdaddr_list *b;
614
615 list_for_each_entry(b, &hdev->whitelist, list) {
616 struct hci_conn *conn;
617
618 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
619 if (!conn)
620 return true;
621
622 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
623 return true;
624 }
625
626 return false;
627}
628
629void __hci_update_page_scan(struct hci_request *req)
630{
631 struct hci_dev *hdev = req->hdev;
632 u8 scan;
633
d7a5a11d 634 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
635 return;
636
637 if (!hdev_is_powered(hdev))
638 return;
639
640 if (mgmt_powering_down(hdev))
641 return;
642
d7a5a11d 643 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
644 disconnected_whitelist_entries(hdev))
645 scan = SCAN_PAGE;
646 else
647 scan = SCAN_DISABLED;
648
649 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
650 return;
651
d7a5a11d 652 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
653 scan |= SCAN_INQUIRY;
654
655 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
656}
657
658void hci_update_page_scan(struct hci_dev *hdev)
659{
660 struct hci_request req;
661
662 hci_req_init(&req, hdev);
663 __hci_update_page_scan(&req);
664 hci_req_run(&req, NULL);
665}
666
2cf22218
JH
667/* This function controls the background scanning based on hdev->pend_le_conns
668 * list. If there are pending LE connection we start the background scanning,
669 * otherwise we stop it.
670 *
671 * This function requires the caller holds hdev->lock.
672 */
145a0913 673static void __hci_update_background_scan(struct hci_request *req)
2cf22218
JH
674{
675 struct hci_dev *hdev = req->hdev;
2cf22218
JH
676
677 if (!test_bit(HCI_UP, &hdev->flags) ||
678 test_bit(HCI_INIT, &hdev->flags) ||
d7a5a11d
MH
679 hci_dev_test_flag(hdev, HCI_SETUP) ||
680 hci_dev_test_flag(hdev, HCI_CONFIG) ||
681 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
682 hci_dev_test_flag(hdev, HCI_UNREGISTER))
2cf22218
JH
683 return;
684
685 /* No point in doing scanning if LE support hasn't been enabled */
d7a5a11d 686 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
2cf22218
JH
687 return;
688
689 /* If discovery is active don't interfere with it */
690 if (hdev->discovery.state != DISCOVERY_STOPPED)
691 return;
692
693 /* Reset RSSI and UUID filters when starting background scanning
694 * since these filters are meant for service discovery only.
695 *
696 * The Start Discovery and Start Service Discovery operations
697 * ensure to set proper values for RSSI threshold and UUID
698 * filter list. So it is safe to just reset them here.
699 */
700 hci_discovery_filter_clear(hdev);
701
702 if (list_empty(&hdev->pend_le_conns) &&
703 list_empty(&hdev->pend_le_reports)) {
704 /* If there is no pending LE connections or devices
705 * to be scanned for, we should stop the background
706 * scanning.
707 */
708
709 /* If controller is not scanning we are done. */
d7a5a11d 710 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2cf22218
JH
711 return;
712
713 hci_req_add_le_scan_disable(req);
714
715 BT_DBG("%s stopping background scanning", hdev->name);
716 } else {
717 /* If there is at least one pending LE connection, we should
718 * keep the background scan running.
719 */
720
721 /* If controller is connecting, we should not start scanning
722 * since some controllers are not able to scan and connect at
723 * the same time.
724 */
e7d9ab73 725 if (hci_lookup_le_connect(hdev))
2cf22218
JH
726 return;
727
728 /* If controller is currently scanning, we stop it to ensure we
729 * don't miss any advertising (due to duplicates filter).
730 */
d7a5a11d 731 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2cf22218
JH
732 hci_req_add_le_scan_disable(req);
733
734 hci_req_add_le_passive_scan(req);
735
736 BT_DBG("%s starting background scanning", hdev->name);
737 }
738}
739
dcc0f0d9
JH
740void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
741 u8 reason)
742{
743 switch (conn->state) {
744 case BT_CONNECTED:
745 case BT_CONFIG:
746 if (conn->type == AMP_LINK) {
747 struct hci_cp_disconn_phy_link cp;
748
749 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
750 cp.reason = reason;
751 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
752 &cp);
753 } else {
754 struct hci_cp_disconnect dc;
755
756 dc.handle = cpu_to_le16(conn->handle);
757 dc.reason = reason;
758 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
759 }
760
761 conn->state = BT_DISCONN;
762
763 break;
764 case BT_CONNECT:
765 if (conn->type == LE_LINK) {
766 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
767 break;
768 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
769 0, NULL);
770 } else if (conn->type == ACL_LINK) {
771 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
772 break;
773 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
774 6, &conn->dst);
775 }
776 break;
777 case BT_CONNECT2:
778 if (conn->type == ACL_LINK) {
779 struct hci_cp_reject_conn_req rej;
780
781 bacpy(&rej.bdaddr, &conn->dst);
782 rej.reason = reason;
783
784 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
785 sizeof(rej), &rej);
786 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
787 struct hci_cp_reject_sync_conn_req rej;
788
789 bacpy(&rej.bdaddr, &conn->dst);
790
791 /* SCO rejection has its own limited set of
792 * allowed error values (0x0D-0x0F) which isn't
793 * compatible with most values passed to this
794 * function. To be safe hard-code one of the
795 * values that's suitable for SCO.
796 */
797 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
798
799 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
800 sizeof(rej), &rej);
801 }
802 break;
803 default:
804 conn->state = BT_CLOSED;
805 break;
806 }
807}
808
809static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
810{
811 if (status)
812 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
813}
814
815int hci_abort_conn(struct hci_conn *conn, u8 reason)
816{
817 struct hci_request req;
818 int err;
819
820 hci_req_init(&req, conn->hdev);
821
822 __hci_abort_conn(&req, conn, reason);
823
824 err = hci_req_run(&req, abort_conn_complete);
825 if (err && err != -ENODATA) {
826 BT_ERR("Failed to run HCI request: err %d", err);
827 return err;
828 }
829
830 return 0;
831}
5fc16cc4 832
2e93e53b
JH
833static void update_bg_scan(struct hci_request *req, unsigned long opt)
834{
835 hci_dev_lock(req->hdev);
836 __hci_update_background_scan(req);
837 hci_dev_unlock(req->hdev);
838}
839
840static void bg_scan_update(struct work_struct *work)
841{
842 struct hci_dev *hdev = container_of(work, struct hci_dev,
843 bg_scan_update);
84235d22
JH
844 struct hci_conn *conn;
845 u8 status;
846 int err;
847
848 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
849 if (!err)
850 return;
851
852 hci_dev_lock(hdev);
853
854 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
855 if (conn)
856 hci_le_conn_failed(conn, status);
2e93e53b 857
84235d22 858 hci_dev_unlock(hdev);
2e93e53b
JH
859}
860
7c1fbed2
JH
861static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
862{
863 if (status) {
864 BT_ERR("Failed to start inquiry: status %d", status);
865
866 hci_dev_lock(hdev);
867 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
868 hci_dev_unlock(hdev);
869 return;
870 }
871}
872
873static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
874{
875 /* General inquiry access code (GIAC) */
876 u8 lap[3] = { 0x33, 0x8b, 0x9e };
877 struct hci_cp_inquiry cp;
878 int err;
879
880 if (status) {
881 BT_ERR("Failed to disable LE scanning: status %d", status);
882 return;
883 }
884
885 hdev->discovery.scan_start = 0;
886
887 switch (hdev->discovery.type) {
888 case DISCOV_TYPE_LE:
889 hci_dev_lock(hdev);
890 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
891 hci_dev_unlock(hdev);
892 break;
893
894 case DISCOV_TYPE_INTERLEAVED:
895 hci_dev_lock(hdev);
896
897 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
898 &hdev->quirks)) {
899 /* If we were running LE only scan, change discovery
900 * state. If we were running both LE and BR/EDR inquiry
901 * simultaneously, and BR/EDR inquiry is already
902 * finished, stop discovery, otherwise BR/EDR inquiry
903 * will stop discovery when finished. If we will resolve
904 * remote device name, do not change discovery state.
905 */
906 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
907 hdev->discovery.state != DISCOVERY_RESOLVING)
908 hci_discovery_set_state(hdev,
909 DISCOVERY_STOPPED);
910 } else {
911 struct hci_request req;
912
913 hci_inquiry_cache_flush(hdev);
914
915 hci_req_init(&req, hdev);
916
917 memset(&cp, 0, sizeof(cp));
918 memcpy(&cp.lap, lap, sizeof(cp.lap));
919 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
920 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
921
922 err = hci_req_run(&req, inquiry_complete);
923 if (err) {
924 BT_ERR("Inquiry request failed: err %d", err);
925 hci_discovery_set_state(hdev,
926 DISCOVERY_STOPPED);
927 }
928 }
929
930 hci_dev_unlock(hdev);
931 break;
932 }
933}
934
935static void le_scan_disable(struct hci_request *req, unsigned long opt)
936{
937 hci_req_add_le_scan_disable(req);
938}
939
940static void le_scan_disable_work(struct work_struct *work)
941{
942 struct hci_dev *hdev = container_of(work, struct hci_dev,
943 le_scan_disable.work);
944 u8 status;
945 int err;
946
947 BT_DBG("%s", hdev->name);
948
949 cancel_delayed_work(&hdev->le_scan_restart);
950
951 err = hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
952 if (err)
953 return;
954
955 le_scan_disable_work_complete(hdev, status);
956}
957
958static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status)
959{
960 unsigned long timeout, duration, scan_start, now;
961
962 BT_DBG("%s", hdev->name);
963
964 if (status) {
965 BT_ERR("Failed to restart LE scan: status %d", status);
966 return;
967 }
968
969 hci_dev_lock(hdev);
970
971 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
972 !hdev->discovery.scan_start)
973 goto unlock;
974
975 /* When the scan was started, hdev->le_scan_disable has been queued
976 * after duration from scan_start. During scan restart this job
977 * has been canceled, and we need to queue it again after proper
978 * timeout, to make sure that scan does not run indefinitely.
979 */
980 duration = hdev->discovery.scan_duration;
981 scan_start = hdev->discovery.scan_start;
982 now = jiffies;
983 if (now - scan_start <= duration) {
984 int elapsed;
985
986 if (now >= scan_start)
987 elapsed = now - scan_start;
988 else
989 elapsed = ULONG_MAX - scan_start + now;
990
991 timeout = duration - elapsed;
992 } else {
993 timeout = 0;
994 }
995
996 queue_delayed_work(hdev->req_workqueue,
997 &hdev->le_scan_disable, timeout);
998
999unlock:
1000 hci_dev_unlock(hdev);
1001}
1002
1003static void le_scan_restart(struct hci_request *req, unsigned long opt)
1004{
1005 struct hci_dev *hdev = req->hdev;
1006 struct hci_cp_le_set_scan_enable cp;
1007
1008 /* If controller is not scanning we are done. */
1009 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1010 return;
1011
1012 hci_req_add_le_scan_disable(req);
1013
1014 memset(&cp, 0, sizeof(cp));
1015 cp.enable = LE_SCAN_ENABLE;
1016 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1017 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1018}
1019
1020static void le_scan_restart_work(struct work_struct *work)
1021{
1022 struct hci_dev *hdev = container_of(work, struct hci_dev,
1023 le_scan_restart.work);
1024 u8 status;
1025 int err;
1026
1027 BT_DBG("%s", hdev->name);
1028
1029 err = hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
1030 if (err)
1031 return;
1032
1033 le_scan_restart_work_complete(hdev, status);
1034}
1035
5fc16cc4
JH
1036void hci_request_setup(struct hci_dev *hdev)
1037{
2e93e53b 1038 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
7c1fbed2
JH
1039 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1040 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5fc16cc4
JH
1041}
1042
1043void hci_request_cancel_all(struct hci_dev *hdev)
1044{
2e93e53b 1045 cancel_work_sync(&hdev->bg_scan_update);
7c1fbed2
JH
1046 cancel_delayed_work_sync(&hdev->le_scan_disable);
1047 cancel_delayed_work_sync(&hdev->le_scan_restart);
5fc16cc4 1048}