]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - net/bluetooth/hci_request.c
Bluetooth: HCI name update to hci_request.c
[mirror_ubuntu-zesty-kernel.git] / net / bluetooth / hci_request.c
CommitLineData
0857dd3b
JH
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
f2252570 26#include <net/bluetooth/mgmt.h>
0857dd3b
JH
27
28#include "smp.h"
29#include "hci_request.h"
30
be91cd05
JH
31#define HCI_REQ_DONE 0
32#define HCI_REQ_PEND 1
33#define HCI_REQ_CANCELED 2
34
0857dd3b
JH
35void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
e6214487
JH
42static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
0857dd3b
JH
44{
45 struct hci_dev *hdev = req->hdev;
46 struct sk_buff *skb;
47 unsigned long flags;
48
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
53 */
54 if (req->err) {
55 skb_queue_purge(&req->cmd_q);
56 return req->err;
57 }
58
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req->cmd_q))
61 return -ENODATA;
62
63 skb = skb_peek_tail(&req->cmd_q);
44d27137
JH
64 if (complete) {
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69 }
0857dd3b
JH
70
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75 queue_work(hdev->workqueue, &hdev->cmd_work);
76
77 return 0;
78}
79
e6214487
JH
80int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81{
82 return req_run(req, complete, NULL);
83}
84
85int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86{
87 return req_run(req, NULL, complete);
88}
89
be91cd05
JH
90static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91 struct sk_buff *skb)
92{
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
98 if (skb)
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
b504430c 104void hci_req_sync_cancel(struct hci_dev *hdev, int err)
be91cd05
JH
105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
120 struct sk_buff *skb;
121 int err = 0;
122
123 BT_DBG("%s", hdev->name);
124
125 hci_req_init(&req, hdev);
126
127 hci_req_add_ev(&req, opcode, plen, param, event);
128
129 hdev->req_status = HCI_REQ_PEND;
130
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
133
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
135 if (err < 0) {
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
138 return ERR_PTR(err);
139 }
140
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
160 }
161
162 hdev->req_status = hdev->req_result = 0;
163 skb = hdev->req_skb;
164 hdev->req_skb = NULL;
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 if (err < 0) {
169 kfree_skb(skb);
170 return ERR_PTR(err);
171 }
172
173 if (!skb)
174 return ERR_PTR(-ENODATA);
175
176 return skb;
177}
178EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
182{
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184}
185EXPORT_SYMBOL(__hci_cmd_sync);
186
187/* Execute request and wait for completion. */
a1d01db1
JH
188int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt),
4ebeee2d 190 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
191{
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
194 int err = 0;
195
196 BT_DBG("%s start", hdev->name);
197
198 hci_req_init(&req, hdev);
199
200 hdev->req_status = HCI_REQ_PEND;
201
a1d01db1
JH
202 err = func(&req, opt);
203 if (err) {
204 if (hci_status)
205 *hci_status = HCI_ERROR_UNSPECIFIED;
206 return err;
207 }
be91cd05
JH
208
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
223 */
568f44f6
JH
224 if (err == -ENODATA) {
225 if (hci_status)
226 *hci_status = 0;
be91cd05 227 return 0;
568f44f6
JH
228 }
229
230 if (hci_status)
231 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
232
233 return err;
234 }
235
236 schedule_timeout(timeout);
237
238 remove_wait_queue(&hdev->req_wait_q, &wait);
239
240 if (signal_pending(current))
241 return -EINTR;
242
243 switch (hdev->req_status) {
244 case HCI_REQ_DONE:
245 err = -bt_to_errno(hdev->req_result);
4ebeee2d
JH
246 if (hci_status)
247 *hci_status = hdev->req_result;
be91cd05
JH
248 break;
249
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
4ebeee2d
JH
252 if (hci_status)
253 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
254 break;
255
256 default:
257 err = -ETIMEDOUT;
4ebeee2d
JH
258 if (hci_status)
259 *hci_status = HCI_ERROR_UNSPECIFIED;
be91cd05
JH
260 break;
261 }
262
263 hdev->req_status = hdev->req_result = 0;
264
265 BT_DBG("%s end: err %d", hdev->name, err);
266
267 return err;
268}
269
a1d01db1
JH
270int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
271 unsigned long opt),
4ebeee2d 272 unsigned long opt, u32 timeout, u8 *hci_status)
be91cd05
JH
273{
274 int ret;
275
276 if (!test_bit(HCI_UP, &hdev->flags))
277 return -ENETDOWN;
278
279 /* Serialize all requests */
b504430c 280 hci_req_sync_lock(hdev);
4ebeee2d 281 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
b504430c 282 hci_req_sync_unlock(hdev);
be91cd05
JH
283
284 return ret;
285}
286
0857dd3b
JH
287struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
288 const void *param)
289{
290 int len = HCI_COMMAND_HDR_SIZE + plen;
291 struct hci_command_hdr *hdr;
292 struct sk_buff *skb;
293
294 skb = bt_skb_alloc(len, GFP_ATOMIC);
295 if (!skb)
296 return NULL;
297
298 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
299 hdr->opcode = cpu_to_le16(opcode);
300 hdr->plen = plen;
301
302 if (plen)
303 memcpy(skb_put(skb, plen), param, plen);
304
305 BT_DBG("skb len %d", skb->len);
306
d79f34e3
MH
307 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
308 hci_skb_opcode(skb) = opcode;
0857dd3b
JH
309
310 return skb;
311}
312
313/* Queue a command to an asynchronous HCI request */
314void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
315 const void *param, u8 event)
316{
317 struct hci_dev *hdev = req->hdev;
318 struct sk_buff *skb;
319
320 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
321
322 /* If an error occurred during request building, there is no point in
323 * queueing the HCI command. We can simply return.
324 */
325 if (req->err)
326 return;
327
328 skb = hci_prepare_cmd(hdev, opcode, plen, param);
329 if (!skb) {
330 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
331 hdev->name, opcode);
332 req->err = -ENOMEM;
333 return;
334 }
335
336 if (skb_queue_empty(&req->cmd_q))
44d27137 337 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
0857dd3b 338
242c0ebd 339 bt_cb(skb)->hci.req_event = event;
0857dd3b
JH
340
341 skb_queue_tail(&req->cmd_q, skb);
342}
343
344void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
345 const void *param)
346{
347 hci_req_add_ev(req, opcode, plen, param, 0);
348}
349
196a5e97
JH
350/* This function controls the background scanning based on hdev->pend_le_conns
351 * list. If there are pending LE connection we start the background scanning,
352 * otherwise we stop it.
353 *
354 * This function requires the caller holds hdev->lock.
355 */
356static void __hci_update_background_scan(struct hci_request *req)
357{
358 struct hci_dev *hdev = req->hdev;
359
360 if (!test_bit(HCI_UP, &hdev->flags) ||
361 test_bit(HCI_INIT, &hdev->flags) ||
362 hci_dev_test_flag(hdev, HCI_SETUP) ||
363 hci_dev_test_flag(hdev, HCI_CONFIG) ||
364 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
365 hci_dev_test_flag(hdev, HCI_UNREGISTER))
366 return;
367
368 /* No point in doing scanning if LE support hasn't been enabled */
369 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
370 return;
371
372 /* If discovery is active don't interfere with it */
373 if (hdev->discovery.state != DISCOVERY_STOPPED)
374 return;
375
376 /* Reset RSSI and UUID filters when starting background scanning
377 * since these filters are meant for service discovery only.
378 *
379 * The Start Discovery and Start Service Discovery operations
380 * ensure to set proper values for RSSI threshold and UUID
381 * filter list. So it is safe to just reset them here.
382 */
383 hci_discovery_filter_clear(hdev);
384
385 if (list_empty(&hdev->pend_le_conns) &&
386 list_empty(&hdev->pend_le_reports)) {
387 /* If there is no pending LE connections or devices
388 * to be scanned for, we should stop the background
389 * scanning.
390 */
391
392 /* If controller is not scanning we are done. */
393 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
394 return;
395
396 hci_req_add_le_scan_disable(req);
397
398 BT_DBG("%s stopping background scanning", hdev->name);
399 } else {
400 /* If there is at least one pending LE connection, we should
401 * keep the background scan running.
402 */
403
404 /* If controller is connecting, we should not start scanning
405 * since some controllers are not able to scan and connect at
406 * the same time.
407 */
408 if (hci_lookup_le_connect(hdev))
409 return;
410
411 /* If controller is currently scanning, we stop it to ensure we
412 * don't miss any advertising (due to duplicates filter).
413 */
414 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
415 hci_req_add_le_scan_disable(req);
416
417 hci_req_add_le_passive_scan(req);
418
419 BT_DBG("%s starting background scanning", hdev->name);
420 }
421}
422
00cf5040
JH
423void __hci_req_update_name(struct hci_request *req)
424{
425 struct hci_dev *hdev = req->hdev;
426 struct hci_cp_write_local_name cp;
427
428 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
429
430 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
431}
432
0857dd3b
JH
433void hci_req_add_le_scan_disable(struct hci_request *req)
434{
435 struct hci_cp_le_set_scan_enable cp;
436
437 memset(&cp, 0, sizeof(cp));
438 cp.enable = LE_SCAN_DISABLE;
439 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
440}
441
442static void add_to_white_list(struct hci_request *req,
443 struct hci_conn_params *params)
444{
445 struct hci_cp_le_add_to_white_list cp;
446
447 cp.bdaddr_type = params->addr_type;
448 bacpy(&cp.bdaddr, &params->addr);
449
450 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
451}
452
453static u8 update_white_list(struct hci_request *req)
454{
455 struct hci_dev *hdev = req->hdev;
456 struct hci_conn_params *params;
457 struct bdaddr_list *b;
458 uint8_t white_list_entries = 0;
459
460 /* Go through the current white list programmed into the
461 * controller one by one and check if that address is still
462 * in the list of pending connections or list of devices to
463 * report. If not present in either list, then queue the
464 * command to remove it from the controller.
465 */
466 list_for_each_entry(b, &hdev->le_white_list, list) {
467 struct hci_cp_le_del_from_white_list cp;
468
469 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
470 &b->bdaddr, b->bdaddr_type) ||
471 hci_pend_le_action_lookup(&hdev->pend_le_reports,
472 &b->bdaddr, b->bdaddr_type)) {
473 white_list_entries++;
474 continue;
475 }
476
477 cp.bdaddr_type = b->bdaddr_type;
478 bacpy(&cp.bdaddr, &b->bdaddr);
479
480 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
481 sizeof(cp), &cp);
482 }
483
484 /* Since all no longer valid white list entries have been
485 * removed, walk through the list of pending connections
486 * and ensure that any new device gets programmed into
487 * the controller.
488 *
489 * If the list of the devices is larger than the list of
490 * available white list entries in the controller, then
491 * just abort and return filer policy value to not use the
492 * white list.
493 */
494 list_for_each_entry(params, &hdev->pend_le_conns, action) {
495 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
496 &params->addr, params->addr_type))
497 continue;
498
499 if (white_list_entries >= hdev->le_white_list_size) {
500 /* Select filter policy to accept all advertising */
501 return 0x00;
502 }
503
504 if (hci_find_irk_by_addr(hdev, &params->addr,
505 params->addr_type)) {
506 /* White list can not be used with RPAs */
507 return 0x00;
508 }
509
510 white_list_entries++;
511 add_to_white_list(req, params);
512 }
513
514 /* After adding all new pending connections, walk through
515 * the list of pending reports and also add these to the
516 * white list if there is still space.
517 */
518 list_for_each_entry(params, &hdev->pend_le_reports, action) {
519 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
520 &params->addr, params->addr_type))
521 continue;
522
523 if (white_list_entries >= hdev->le_white_list_size) {
524 /* Select filter policy to accept all advertising */
525 return 0x00;
526 }
527
528 if (hci_find_irk_by_addr(hdev, &params->addr,
529 params->addr_type)) {
530 /* White list can not be used with RPAs */
531 return 0x00;
532 }
533
534 white_list_entries++;
535 add_to_white_list(req, params);
536 }
537
538 /* Select filter policy to use white list */
539 return 0x01;
540}
541
542void hci_req_add_le_passive_scan(struct hci_request *req)
543{
544 struct hci_cp_le_set_scan_param param_cp;
545 struct hci_cp_le_set_scan_enable enable_cp;
546 struct hci_dev *hdev = req->hdev;
547 u8 own_addr_type;
548 u8 filter_policy;
549
550 /* Set require_privacy to false since no SCAN_REQ are send
551 * during passive scanning. Not using an non-resolvable address
552 * here is important so that peer devices using direct
553 * advertising with our address will be correctly reported
554 * by the controller.
555 */
556 if (hci_update_random_address(req, false, &own_addr_type))
557 return;
558
559 /* Adding or removing entries from the white list must
560 * happen before enabling scanning. The controller does
561 * not allow white list modification while scanning.
562 */
563 filter_policy = update_white_list(req);
564
565 /* When the controller is using random resolvable addresses and
566 * with that having LE privacy enabled, then controllers with
567 * Extended Scanner Filter Policies support can now enable support
568 * for handling directed advertising.
569 *
570 * So instead of using filter polices 0x00 (no whitelist)
571 * and 0x01 (whitelist enabled) use the new filter policies
572 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
573 */
d7a5a11d 574 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
0857dd3b
JH
575 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
576 filter_policy |= 0x02;
577
578 memset(&param_cp, 0, sizeof(param_cp));
579 param_cp.type = LE_SCAN_PASSIVE;
580 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
581 param_cp.window = cpu_to_le16(hdev->le_scan_window);
582 param_cp.own_address_type = own_addr_type;
583 param_cp.filter_policy = filter_policy;
584 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
585 &param_cp);
586
587 memset(&enable_cp, 0, sizeof(enable_cp));
588 enable_cp.enable = LE_SCAN_ENABLE;
589 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
590 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
591 &enable_cp);
592}
593
f2252570
JH
594static u8 get_current_adv_instance(struct hci_dev *hdev)
595{
596 /* The "Set Advertising" setting supersedes the "Add Advertising"
597 * setting. Here we set the advertising data based on which
598 * setting was set. When neither apply, default to the global settings,
599 * represented by instance "0".
600 */
601 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
602 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
603 return hdev->cur_adv_instance;
604
605 return 0x00;
606}
607
608static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
609{
610 u8 instance = get_current_adv_instance(hdev);
611 struct adv_info *adv_instance;
612
613 /* Ignore instance 0 */
614 if (instance == 0x00)
615 return 0;
616
617 adv_instance = hci_find_adv_instance(hdev, instance);
618 if (!adv_instance)
619 return 0;
620
621 /* TODO: Take into account the "appearance" and "local-name" flags here.
622 * These are currently being ignored as they are not supported.
623 */
624 return adv_instance->scan_rsp_len;
625}
626
627void __hci_req_disable_advertising(struct hci_request *req)
628{
629 u8 enable = 0x00;
630
631 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
632}
633
634static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
635{
636 u32 flags;
637 struct adv_info *adv_instance;
638
639 if (instance == 0x00) {
640 /* Instance 0 always manages the "Tx Power" and "Flags"
641 * fields
642 */
643 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
644
645 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
646 * corresponds to the "connectable" instance flag.
647 */
648 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
649 flags |= MGMT_ADV_FLAG_CONNECTABLE;
650
651 return flags;
652 }
653
654 adv_instance = hci_find_adv_instance(hdev, instance);
655
656 /* Return 0 when we got an invalid instance identifier. */
657 if (!adv_instance)
658 return 0;
659
660 return adv_instance->flags;
661}
662
663void __hci_req_enable_advertising(struct hci_request *req)
664{
665 struct hci_dev *hdev = req->hdev;
666 struct hci_cp_le_set_adv_param cp;
667 u8 own_addr_type, enable = 0x01;
668 bool connectable;
669 u8 instance;
670 u32 flags;
671
672 if (hci_conn_num(hdev, LE_LINK) > 0)
673 return;
674
675 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
676 __hci_req_disable_advertising(req);
677
678 /* Clear the HCI_LE_ADV bit temporarily so that the
679 * hci_update_random_address knows that it's safe to go ahead
680 * and write a new random address. The flag will be set back on
681 * as soon as the SET_ADV_ENABLE HCI command completes.
682 */
683 hci_dev_clear_flag(hdev, HCI_LE_ADV);
684
685 instance = get_current_adv_instance(hdev);
686 flags = get_adv_instance_flags(hdev, instance);
687
688 /* If the "connectable" instance flag was not set, then choose between
689 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
690 */
691 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
692 mgmt_get_connectable(hdev);
693
694 /* Set require_privacy to true only when non-connectable
695 * advertising is used. In that case it is fine to use a
696 * non-resolvable private address.
697 */
698 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
699 return;
700
701 memset(&cp, 0, sizeof(cp));
702 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
703 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
704
705 if (connectable)
706 cp.type = LE_ADV_IND;
707 else if (get_cur_adv_instance_scan_rsp_len(hdev))
708 cp.type = LE_ADV_SCAN_IND;
709 else
710 cp.type = LE_ADV_NONCONN_IND;
711
712 cp.own_address_type = own_addr_type;
713 cp.channel_map = hdev->le_adv_channel_map;
714
715 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
716
717 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
718}
719
720static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
721{
722 u8 ad_len = 0;
723 size_t name_len;
724
725 name_len = strlen(hdev->dev_name);
726 if (name_len > 0) {
727 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
728
729 if (name_len > max_len) {
730 name_len = max_len;
731 ptr[1] = EIR_NAME_SHORT;
732 } else
733 ptr[1] = EIR_NAME_COMPLETE;
734
735 ptr[0] = name_len + 1;
736
737 memcpy(ptr + 2, hdev->dev_name, name_len);
738
739 ad_len += (name_len + 2);
740 ptr += (name_len + 2);
741 }
742
743 return ad_len;
744}
745
746static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
747 u8 *ptr)
748{
749 struct adv_info *adv_instance;
750
751 adv_instance = hci_find_adv_instance(hdev, instance);
752 if (!adv_instance)
753 return 0;
754
755 /* TODO: Set the appropriate entries based on advertising instance flags
756 * here once flags other than 0 are supported.
757 */
758 memcpy(ptr, adv_instance->scan_rsp_data,
759 adv_instance->scan_rsp_len);
760
761 return adv_instance->scan_rsp_len;
762}
763
764static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
765{
766 struct hci_dev *hdev = req->hdev;
767 struct hci_cp_le_set_scan_rsp_data cp;
768 u8 len;
769
770 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
771 return;
772
773 memset(&cp, 0, sizeof(cp));
774
775 if (instance)
776 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
777 else
778 len = create_default_scan_rsp_data(hdev, cp.data);
779
780 if (hdev->scan_rsp_data_len == len &&
781 !memcmp(cp.data, hdev->scan_rsp_data, len))
782 return;
783
784 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
785 hdev->scan_rsp_data_len = len;
786
787 cp.length = len;
788
789 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
790}
791
792void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance)
793{
794 if (instance == HCI_ADV_CURRENT)
795 instance = get_current_adv_instance(req->hdev);
796
797 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
798}
799
800static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
801{
802 struct adv_info *adv_instance = NULL;
803 u8 ad_len = 0, flags = 0;
804 u32 instance_flags;
805
806 /* Return 0 when the current instance identifier is invalid. */
807 if (instance) {
808 adv_instance = hci_find_adv_instance(hdev, instance);
809 if (!adv_instance)
810 return 0;
811 }
812
813 instance_flags = get_adv_instance_flags(hdev, instance);
814
815 /* The Add Advertising command allows userspace to set both the general
816 * and limited discoverable flags.
817 */
818 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
819 flags |= LE_AD_GENERAL;
820
821 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
822 flags |= LE_AD_LIMITED;
823
824 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
825 /* If a discovery flag wasn't provided, simply use the global
826 * settings.
827 */
828 if (!flags)
829 flags |= mgmt_get_adv_discov_flags(hdev);
830
831 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
832 flags |= LE_AD_NO_BREDR;
833
834 /* If flags would still be empty, then there is no need to
835 * include the "Flags" AD field".
836 */
837 if (flags) {
838 ptr[0] = 0x02;
839 ptr[1] = EIR_FLAGS;
840 ptr[2] = flags;
841
842 ad_len += 3;
843 ptr += 3;
844 }
845 }
846
847 if (adv_instance) {
848 memcpy(ptr, adv_instance->adv_data,
849 adv_instance->adv_data_len);
850 ad_len += adv_instance->adv_data_len;
851 ptr += adv_instance->adv_data_len;
852 }
853
854 /* Provide Tx Power only if we can provide a valid value for it */
855 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
856 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
857 ptr[0] = 0x02;
858 ptr[1] = EIR_TX_POWER;
859 ptr[2] = (u8)hdev->adv_tx_power;
860
861 ad_len += 3;
862 ptr += 3;
863 }
864
865 return ad_len;
866}
867
868static void update_inst_adv_data(struct hci_request *req, u8 instance)
869{
870 struct hci_dev *hdev = req->hdev;
871 struct hci_cp_le_set_adv_data cp;
872 u8 len;
873
874 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
875 return;
876
877 memset(&cp, 0, sizeof(cp));
878
879 len = create_instance_adv_data(hdev, instance, cp.data);
880
881 /* There's nothing to do if the data hasn't changed */
882 if (hdev->adv_data_len == len &&
883 memcmp(cp.data, hdev->adv_data, len) == 0)
884 return;
885
886 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
887 hdev->adv_data_len = len;
888
889 cp.length = len;
890
891 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
892}
893
894void __hci_req_update_adv_data(struct hci_request *req, int instance)
895{
896 if (instance == HCI_ADV_CURRENT)
897 instance = get_current_adv_instance(req->hdev);
898
899 update_inst_adv_data(req, instance);
900}
901
902int hci_req_update_adv_data(struct hci_dev *hdev, int instance)
903{
904 struct hci_request req;
905
906 hci_req_init(&req, hdev);
907 __hci_req_update_adv_data(&req, instance);
908
909 return hci_req_run(&req, NULL);
910}
911
912static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
913{
914 BT_DBG("%s status %u", hdev->name, status);
915}
916
917void hci_req_reenable_advertising(struct hci_dev *hdev)
918{
919 struct hci_request req;
920 u8 instance;
921
922 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
923 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
924 return;
925
926 instance = get_current_adv_instance(hdev);
927
928 hci_req_init(&req, hdev);
929
930 if (instance) {
931 __hci_req_schedule_adv_instance(&req, instance, true);
932 } else {
933 __hci_req_update_adv_data(&req, HCI_ADV_CURRENT);
934 __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT);
935 __hci_req_enable_advertising(&req);
936 }
937
938 hci_req_run(&req, adv_enable_complete);
939}
940
941static void adv_timeout_expire(struct work_struct *work)
942{
943 struct hci_dev *hdev = container_of(work, struct hci_dev,
944 adv_instance_expire.work);
945
946 struct hci_request req;
947 u8 instance;
948
949 BT_DBG("%s", hdev->name);
950
951 hci_dev_lock(hdev);
952
953 hdev->adv_instance_timeout = 0;
954
955 instance = get_current_adv_instance(hdev);
956 if (instance == 0x00)
957 goto unlock;
958
959 hci_req_init(&req, hdev);
960
961 hci_req_clear_adv_instance(hdev, &req, instance, false);
962
963 if (list_empty(&hdev->adv_instances))
964 __hci_req_disable_advertising(&req);
965
966 if (!skb_queue_empty(&req.cmd_q))
967 hci_req_run(&req, NULL);
968
969unlock:
970 hci_dev_unlock(hdev);
971}
972
973int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
974 bool force)
975{
976 struct hci_dev *hdev = req->hdev;
977 struct adv_info *adv_instance = NULL;
978 u16 timeout;
979
980 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
981 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
982 return -EPERM;
983
984 if (hdev->adv_instance_timeout)
985 return -EBUSY;
986
987 adv_instance = hci_find_adv_instance(hdev, instance);
988 if (!adv_instance)
989 return -ENOENT;
990
991 /* A zero timeout means unlimited advertising. As long as there is
992 * only one instance, duration should be ignored. We still set a timeout
993 * in case further instances are being added later on.
994 *
995 * If the remaining lifetime of the instance is more than the duration
996 * then the timeout corresponds to the duration, otherwise it will be
997 * reduced to the remaining instance lifetime.
998 */
999 if (adv_instance->timeout == 0 ||
1000 adv_instance->duration <= adv_instance->remaining_time)
1001 timeout = adv_instance->duration;
1002 else
1003 timeout = adv_instance->remaining_time;
1004
1005 /* The remaining time is being reduced unless the instance is being
1006 * advertised without time limit.
1007 */
1008 if (adv_instance->timeout)
1009 adv_instance->remaining_time =
1010 adv_instance->remaining_time - timeout;
1011
1012 hdev->adv_instance_timeout = timeout;
1013 queue_delayed_work(hdev->req_workqueue,
1014 &hdev->adv_instance_expire,
1015 msecs_to_jiffies(timeout * 1000));
1016
1017 /* If we're just re-scheduling the same instance again then do not
1018 * execute any HCI commands. This happens when a single instance is
1019 * being advertised.
1020 */
1021 if (!force && hdev->cur_adv_instance == instance &&
1022 hci_dev_test_flag(hdev, HCI_LE_ADV))
1023 return 0;
1024
1025 hdev->cur_adv_instance = instance;
1026 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1027 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
1028 __hci_req_enable_advertising(req);
1029
1030 return 0;
1031}
1032
1033static void cancel_adv_timeout(struct hci_dev *hdev)
1034{
1035 if (hdev->adv_instance_timeout) {
1036 hdev->adv_instance_timeout = 0;
1037 cancel_delayed_work(&hdev->adv_instance_expire);
1038 }
1039}
1040
1041/* For a single instance:
1042 * - force == true: The instance will be removed even when its remaining
1043 * lifetime is not zero.
1044 * - force == false: the instance will be deactivated but kept stored unless
1045 * the remaining lifetime is zero.
1046 *
1047 * For instance == 0x00:
1048 * - force == true: All instances will be removed regardless of their timeout
1049 * setting.
1050 * - force == false: Only instances that have a timeout will be removed.
1051 */
1052void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1053 u8 instance, bool force)
1054{
1055 struct adv_info *adv_instance, *n, *next_instance = NULL;
1056 int err;
1057 u8 rem_inst;
1058
1059 /* Cancel any timeout concerning the removed instance(s). */
1060 if (!instance || hdev->cur_adv_instance == instance)
1061 cancel_adv_timeout(hdev);
1062
1063 /* Get the next instance to advertise BEFORE we remove
1064 * the current one. This can be the same instance again
1065 * if there is only one instance.
1066 */
1067 if (instance && hdev->cur_adv_instance == instance)
1068 next_instance = hci_get_next_instance(hdev, instance);
1069
1070 if (instance == 0x00) {
1071 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1072 list) {
1073 if (!(force || adv_instance->timeout))
1074 continue;
1075
1076 rem_inst = adv_instance->instance;
1077 err = hci_remove_adv_instance(hdev, rem_inst);
1078 if (!err)
1079 mgmt_advertising_removed(NULL, hdev, rem_inst);
1080 }
1081 hdev->cur_adv_instance = 0x00;
1082 } else {
1083 adv_instance = hci_find_adv_instance(hdev, instance);
1084
1085 if (force || (adv_instance && adv_instance->timeout &&
1086 !adv_instance->remaining_time)) {
1087 /* Don't advertise a removed instance. */
1088 if (next_instance &&
1089 next_instance->instance == instance)
1090 next_instance = NULL;
1091
1092 err = hci_remove_adv_instance(hdev, instance);
1093 if (!err)
1094 mgmt_advertising_removed(NULL, hdev, instance);
1095 }
1096 }
1097
1098 if (list_empty(&hdev->adv_instances)) {
1099 hdev->cur_adv_instance = 0x00;
1100 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1101 }
1102
1103 if (!req || !hdev_is_powered(hdev) ||
1104 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1105 return;
1106
1107 if (next_instance)
1108 __hci_req_schedule_adv_instance(req, next_instance->instance,
1109 false);
1110}
1111
0857dd3b
JH
1112static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1113{
1114 struct hci_dev *hdev = req->hdev;
1115
1116 /* If we're advertising or initiating an LE connection we can't
1117 * go ahead and change the random address at this time. This is
1118 * because the eventual initiator address used for the
1119 * subsequently created connection will be undefined (some
1120 * controllers use the new address and others the one we had
1121 * when the operation started).
1122 *
1123 * In this kind of scenario skip the update and let the random
1124 * address be updated at the next cycle.
1125 */
d7a5a11d 1126 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
e7d9ab73 1127 hci_lookup_le_connect(hdev)) {
0857dd3b 1128 BT_DBG("Deferring random address update");
a1536da2 1129 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
0857dd3b
JH
1130 return;
1131 }
1132
1133 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1134}
1135
1136int hci_update_random_address(struct hci_request *req, bool require_privacy,
1137 u8 *own_addr_type)
1138{
1139 struct hci_dev *hdev = req->hdev;
1140 int err;
1141
1142 /* If privacy is enabled use a resolvable private address. If
1143 * current RPA has expired or there is something else than
1144 * the current RPA in use, then generate a new one.
1145 */
d7a5a11d 1146 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
0857dd3b
JH
1147 int to;
1148
1149 *own_addr_type = ADDR_LE_DEV_RANDOM;
1150
a69d8927 1151 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
0857dd3b
JH
1152 !bacmp(&hdev->random_addr, &hdev->rpa))
1153 return 0;
1154
1155 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1156 if (err < 0) {
1157 BT_ERR("%s failed to generate new RPA", hdev->name);
1158 return err;
1159 }
1160
1161 set_random_addr(req, &hdev->rpa);
1162
1163 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1164 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1165
1166 return 0;
1167 }
1168
1169 /* In case of required privacy without resolvable private address,
1170 * use an non-resolvable private address. This is useful for active
1171 * scanning and non-connectable advertising.
1172 */
1173 if (require_privacy) {
1174 bdaddr_t nrpa;
1175
1176 while (true) {
1177 /* The non-resolvable private address is generated
1178 * from random six bytes with the two most significant
1179 * bits cleared.
1180 */
1181 get_random_bytes(&nrpa, 6);
1182 nrpa.b[5] &= 0x3f;
1183
1184 /* The non-resolvable private address shall not be
1185 * equal to the public address.
1186 */
1187 if (bacmp(&hdev->bdaddr, &nrpa))
1188 break;
1189 }
1190
1191 *own_addr_type = ADDR_LE_DEV_RANDOM;
1192 set_random_addr(req, &nrpa);
1193 return 0;
1194 }
1195
1196 /* If forcing static address is in use or there is no public
1197 * address use the static address as random address (but skip
1198 * the HCI command if the current random address is already the
1199 * static one.
50b5b952
MH
1200 *
1201 * In case BR/EDR has been disabled on a dual-mode controller
1202 * and a static address has been configured, then use that
1203 * address instead of the public BR/EDR address.
0857dd3b 1204 */
b7cb93e5 1205 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 1206 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 1207 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 1208 bacmp(&hdev->static_addr, BDADDR_ANY))) {
0857dd3b
JH
1209 *own_addr_type = ADDR_LE_DEV_RANDOM;
1210 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1211 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1212 &hdev->static_addr);
1213 return 0;
1214 }
1215
1216 /* Neither privacy nor static address is being used so use a
1217 * public address.
1218 */
1219 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1220
1221 return 0;
1222}
2cf22218 1223
405a2611
JH
1224static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1225{
1226 struct bdaddr_list *b;
1227
1228 list_for_each_entry(b, &hdev->whitelist, list) {
1229 struct hci_conn *conn;
1230
1231 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1232 if (!conn)
1233 return true;
1234
1235 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1236 return true;
1237 }
1238
1239 return false;
1240}
1241
01b1cb87 1242void __hci_req_update_scan(struct hci_request *req)
405a2611
JH
1243{
1244 struct hci_dev *hdev = req->hdev;
1245 u8 scan;
1246
d7a5a11d 1247 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
405a2611
JH
1248 return;
1249
1250 if (!hdev_is_powered(hdev))
1251 return;
1252
1253 if (mgmt_powering_down(hdev))
1254 return;
1255
d7a5a11d 1256 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
405a2611
JH
1257 disconnected_whitelist_entries(hdev))
1258 scan = SCAN_PAGE;
1259 else
1260 scan = SCAN_DISABLED;
1261
d7a5a11d 1262 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
405a2611
JH
1263 scan |= SCAN_INQUIRY;
1264
01b1cb87
JH
1265 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1266 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1267 return;
1268
405a2611
JH
1269 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1270}
1271
01b1cb87 1272static int update_scan(struct hci_request *req, unsigned long opt)
405a2611 1273{
01b1cb87
JH
1274 hci_dev_lock(req->hdev);
1275 __hci_req_update_scan(req);
1276 hci_dev_unlock(req->hdev);
1277 return 0;
1278}
405a2611 1279
01b1cb87
JH
1280static void scan_update_work(struct work_struct *work)
1281{
1282 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1283
1284 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
405a2611
JH
1285}
1286
53c0ba74
JH
1287static int connectable_update(struct hci_request *req, unsigned long opt)
1288{
1289 struct hci_dev *hdev = req->hdev;
1290
1291 hci_dev_lock(hdev);
1292
1293 __hci_req_update_scan(req);
1294
1295 /* If BR/EDR is not enabled and we disable advertising as a
1296 * by-product of disabling connectable, we need to update the
1297 * advertising flags.
1298 */
1299 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1300 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1301
1302 /* Update the advertising parameters if necessary */
1303 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1304 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1305 __hci_req_enable_advertising(req);
1306
1307 __hci_update_background_scan(req);
1308
1309 hci_dev_unlock(hdev);
1310
1311 return 0;
1312}
1313
1314static void connectable_update_work(struct work_struct *work)
1315{
1316 struct hci_dev *hdev = container_of(work, struct hci_dev,
1317 connectable_update);
1318 u8 status;
1319
1320 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1321 mgmt_set_connectable_complete(hdev, status);
1322}
1323
14bf5eac
JH
1324static u8 get_service_classes(struct hci_dev *hdev)
1325{
1326 struct bt_uuid *uuid;
1327 u8 val = 0;
1328
1329 list_for_each_entry(uuid, &hdev->uuids, list)
1330 val |= uuid->svc_hint;
1331
1332 return val;
1333}
1334
1335void __hci_req_update_class(struct hci_request *req)
1336{
1337 struct hci_dev *hdev = req->hdev;
1338 u8 cod[3];
1339
1340 BT_DBG("%s", hdev->name);
1341
1342 if (!hdev_is_powered(hdev))
1343 return;
1344
1345 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1346 return;
1347
1348 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1349 return;
1350
1351 cod[0] = hdev->minor_class;
1352 cod[1] = hdev->major_class;
1353 cod[2] = get_service_classes(hdev);
1354
1355 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1356 cod[1] |= 0x20;
1357
1358 if (memcmp(cod, hdev->dev_class, 3) == 0)
1359 return;
1360
1361 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1362}
1363
aed1a885
JH
1364static void write_iac(struct hci_request *req)
1365{
1366 struct hci_dev *hdev = req->hdev;
1367 struct hci_cp_write_current_iac_lap cp;
1368
1369 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1370 return;
1371
1372 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1373 /* Limited discoverable mode */
1374 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1375 cp.iac_lap[0] = 0x00; /* LIAC */
1376 cp.iac_lap[1] = 0x8b;
1377 cp.iac_lap[2] = 0x9e;
1378 cp.iac_lap[3] = 0x33; /* GIAC */
1379 cp.iac_lap[4] = 0x8b;
1380 cp.iac_lap[5] = 0x9e;
1381 } else {
1382 /* General discoverable mode */
1383 cp.num_iac = 1;
1384 cp.iac_lap[0] = 0x33; /* GIAC */
1385 cp.iac_lap[1] = 0x8b;
1386 cp.iac_lap[2] = 0x9e;
1387 }
1388
1389 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1390 (cp.num_iac * 3) + 1, &cp);
1391}
1392
1393static int discoverable_update(struct hci_request *req, unsigned long opt)
1394{
1395 struct hci_dev *hdev = req->hdev;
1396
1397 hci_dev_lock(hdev);
1398
1399 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1400 write_iac(req);
1401 __hci_req_update_scan(req);
1402 __hci_req_update_class(req);
1403 }
1404
1405 /* Advertising instances don't use the global discoverable setting, so
1406 * only update AD if advertising was enabled using Set Advertising.
1407 */
1408 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1409 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1410
1411 hci_dev_unlock(hdev);
1412
1413 return 0;
1414}
1415
1416static void discoverable_update_work(struct work_struct *work)
1417{
1418 struct hci_dev *hdev = container_of(work, struct hci_dev,
1419 discoverable_update);
1420 u8 status;
1421
1422 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1423 mgmt_set_discoverable_complete(hdev, status);
1424}
1425
dcc0f0d9
JH
1426void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1427 u8 reason)
1428{
1429 switch (conn->state) {
1430 case BT_CONNECTED:
1431 case BT_CONFIG:
1432 if (conn->type == AMP_LINK) {
1433 struct hci_cp_disconn_phy_link cp;
1434
1435 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1436 cp.reason = reason;
1437 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1438 &cp);
1439 } else {
1440 struct hci_cp_disconnect dc;
1441
1442 dc.handle = cpu_to_le16(conn->handle);
1443 dc.reason = reason;
1444 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1445 }
1446
1447 conn->state = BT_DISCONN;
1448
1449 break;
1450 case BT_CONNECT:
1451 if (conn->type == LE_LINK) {
1452 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1453 break;
1454 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1455 0, NULL);
1456 } else if (conn->type == ACL_LINK) {
1457 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1458 break;
1459 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1460 6, &conn->dst);
1461 }
1462 break;
1463 case BT_CONNECT2:
1464 if (conn->type == ACL_LINK) {
1465 struct hci_cp_reject_conn_req rej;
1466
1467 bacpy(&rej.bdaddr, &conn->dst);
1468 rej.reason = reason;
1469
1470 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1471 sizeof(rej), &rej);
1472 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1473 struct hci_cp_reject_sync_conn_req rej;
1474
1475 bacpy(&rej.bdaddr, &conn->dst);
1476
1477 /* SCO rejection has its own limited set of
1478 * allowed error values (0x0D-0x0F) which isn't
1479 * compatible with most values passed to this
1480 * function. To be safe hard-code one of the
1481 * values that's suitable for SCO.
1482 */
1483 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1484
1485 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1486 sizeof(rej), &rej);
1487 }
1488 break;
1489 default:
1490 conn->state = BT_CLOSED;
1491 break;
1492 }
1493}
1494
1495static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1496{
1497 if (status)
1498 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1499}
1500
1501int hci_abort_conn(struct hci_conn *conn, u8 reason)
1502{
1503 struct hci_request req;
1504 int err;
1505
1506 hci_req_init(&req, conn->hdev);
1507
1508 __hci_abort_conn(&req, conn, reason);
1509
1510 err = hci_req_run(&req, abort_conn_complete);
1511 if (err && err != -ENODATA) {
1512 BT_ERR("Failed to run HCI request: err %d", err);
1513 return err;
1514 }
1515
1516 return 0;
1517}
5fc16cc4 1518
a1d01db1 1519static int update_bg_scan(struct hci_request *req, unsigned long opt)
2e93e53b
JH
1520{
1521 hci_dev_lock(req->hdev);
1522 __hci_update_background_scan(req);
1523 hci_dev_unlock(req->hdev);
a1d01db1 1524 return 0;
2e93e53b
JH
1525}
1526
1527static void bg_scan_update(struct work_struct *work)
1528{
1529 struct hci_dev *hdev = container_of(work, struct hci_dev,
1530 bg_scan_update);
84235d22
JH
1531 struct hci_conn *conn;
1532 u8 status;
1533 int err;
1534
1535 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1536 if (!err)
1537 return;
1538
1539 hci_dev_lock(hdev);
1540
1541 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1542 if (conn)
1543 hci_le_conn_failed(conn, status);
2e93e53b 1544
84235d22 1545 hci_dev_unlock(hdev);
2e93e53b
JH
1546}
1547
f4a2cb4d 1548static int le_scan_disable(struct hci_request *req, unsigned long opt)
7c1fbed2 1549{
f4a2cb4d
JH
1550 hci_req_add_le_scan_disable(req);
1551 return 0;
7c1fbed2
JH
1552}
1553
f4a2cb4d 1554static int bredr_inquiry(struct hci_request *req, unsigned long opt)
7c1fbed2 1555{
f4a2cb4d 1556 u8 length = opt;
7c1fbed2
JH
1557 /* General inquiry access code (GIAC) */
1558 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1559 struct hci_cp_inquiry cp;
7c1fbed2 1560
f4a2cb4d 1561 BT_DBG("%s", req->hdev->name);
7c1fbed2 1562
f4a2cb4d
JH
1563 hci_dev_lock(req->hdev);
1564 hci_inquiry_cache_flush(req->hdev);
1565 hci_dev_unlock(req->hdev);
7c1fbed2 1566
f4a2cb4d
JH
1567 memset(&cp, 0, sizeof(cp));
1568 memcpy(&cp.lap, lap, sizeof(cp.lap));
1569 cp.length = length;
7c1fbed2 1570
f4a2cb4d 1571 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7c1fbed2 1572
a1d01db1 1573 return 0;
7c1fbed2
JH
1574}
1575
1576static void le_scan_disable_work(struct work_struct *work)
1577{
1578 struct hci_dev *hdev = container_of(work, struct hci_dev,
1579 le_scan_disable.work);
1580 u8 status;
7c1fbed2
JH
1581
1582 BT_DBG("%s", hdev->name);
1583
f4a2cb4d
JH
1584 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1585 return;
1586
7c1fbed2
JH
1587 cancel_delayed_work(&hdev->le_scan_restart);
1588
f4a2cb4d
JH
1589 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1590 if (status) {
1591 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1592 return;
1593 }
1594
1595 hdev->discovery.scan_start = 0;
1596
1597 /* If we were running LE only scan, change discovery state. If
1598 * we were running both LE and BR/EDR inquiry simultaneously,
1599 * and BR/EDR inquiry is already finished, stop discovery,
1600 * otherwise BR/EDR inquiry will stop discovery when finished.
1601 * If we will resolve remote device name, do not change
1602 * discovery state.
1603 */
1604
1605 if (hdev->discovery.type == DISCOV_TYPE_LE)
1606 goto discov_stopped;
1607
1608 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
7c1fbed2
JH
1609 return;
1610
f4a2cb4d
JH
1611 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1612 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1613 hdev->discovery.state != DISCOVERY_RESOLVING)
1614 goto discov_stopped;
1615
1616 return;
1617 }
1618
1619 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1620 HCI_CMD_TIMEOUT, &status);
1621 if (status) {
1622 BT_ERR("Inquiry failed: status 0x%02x", status);
1623 goto discov_stopped;
1624 }
1625
1626 return;
1627
1628discov_stopped:
1629 hci_dev_lock(hdev);
1630 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1631 hci_dev_unlock(hdev);
7c1fbed2
JH
1632}
1633
3dfe5905
JH
1634static int le_scan_restart(struct hci_request *req, unsigned long opt)
1635{
1636 struct hci_dev *hdev = req->hdev;
1637 struct hci_cp_le_set_scan_enable cp;
1638
1639 /* If controller is not scanning we are done. */
1640 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1641 return 0;
1642
1643 hci_req_add_le_scan_disable(req);
1644
1645 memset(&cp, 0, sizeof(cp));
1646 cp.enable = LE_SCAN_ENABLE;
1647 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1648 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1649
1650 return 0;
1651}
1652
1653static void le_scan_restart_work(struct work_struct *work)
7c1fbed2 1654{
3dfe5905
JH
1655 struct hci_dev *hdev = container_of(work, struct hci_dev,
1656 le_scan_restart.work);
7c1fbed2 1657 unsigned long timeout, duration, scan_start, now;
3dfe5905 1658 u8 status;
7c1fbed2
JH
1659
1660 BT_DBG("%s", hdev->name);
1661
3dfe5905 1662 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
7c1fbed2
JH
1663 if (status) {
1664 BT_ERR("Failed to restart LE scan: status %d", status);
1665 return;
1666 }
1667
1668 hci_dev_lock(hdev);
1669
1670 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1671 !hdev->discovery.scan_start)
1672 goto unlock;
1673
1674 /* When the scan was started, hdev->le_scan_disable has been queued
1675 * after duration from scan_start. During scan restart this job
1676 * has been canceled, and we need to queue it again after proper
1677 * timeout, to make sure that scan does not run indefinitely.
1678 */
1679 duration = hdev->discovery.scan_duration;
1680 scan_start = hdev->discovery.scan_start;
1681 now = jiffies;
1682 if (now - scan_start <= duration) {
1683 int elapsed;
1684
1685 if (now >= scan_start)
1686 elapsed = now - scan_start;
1687 else
1688 elapsed = ULONG_MAX - scan_start + now;
1689
1690 timeout = duration - elapsed;
1691 } else {
1692 timeout = 0;
1693 }
1694
1695 queue_delayed_work(hdev->req_workqueue,
1696 &hdev->le_scan_disable, timeout);
1697
1698unlock:
1699 hci_dev_unlock(hdev);
1700}
1701
e68f072b
JH
1702static void disable_advertising(struct hci_request *req)
1703{
1704 u8 enable = 0x00;
1705
1706 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1707}
1708
1709static int active_scan(struct hci_request *req, unsigned long opt)
1710{
1711 uint16_t interval = opt;
1712 struct hci_dev *hdev = req->hdev;
1713 struct hci_cp_le_set_scan_param param_cp;
1714 struct hci_cp_le_set_scan_enable enable_cp;
1715 u8 own_addr_type;
1716 int err;
1717
1718 BT_DBG("%s", hdev->name);
1719
1720 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1721 hci_dev_lock(hdev);
1722
1723 /* Don't let discovery abort an outgoing connection attempt
1724 * that's using directed advertising.
1725 */
1726 if (hci_lookup_le_connect(hdev)) {
1727 hci_dev_unlock(hdev);
1728 return -EBUSY;
1729 }
1730
1731 cancel_adv_timeout(hdev);
1732 hci_dev_unlock(hdev);
1733
1734 disable_advertising(req);
1735 }
1736
1737 /* If controller is scanning, it means the background scanning is
1738 * running. Thus, we should temporarily stop it in order to set the
1739 * discovery scanning parameters.
1740 */
1741 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1742 hci_req_add_le_scan_disable(req);
1743
1744 /* All active scans will be done with either a resolvable private
1745 * address (when privacy feature has been enabled) or non-resolvable
1746 * private address.
1747 */
1748 err = hci_update_random_address(req, true, &own_addr_type);
1749 if (err < 0)
1750 own_addr_type = ADDR_LE_DEV_PUBLIC;
1751
1752 memset(&param_cp, 0, sizeof(param_cp));
1753 param_cp.type = LE_SCAN_ACTIVE;
1754 param_cp.interval = cpu_to_le16(interval);
1755 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1756 param_cp.own_address_type = own_addr_type;
1757
1758 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1759 &param_cp);
1760
1761 memset(&enable_cp, 0, sizeof(enable_cp));
1762 enable_cp.enable = LE_SCAN_ENABLE;
1763 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1764
1765 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1766 &enable_cp);
1767
1768 return 0;
1769}
1770
1771static int interleaved_discov(struct hci_request *req, unsigned long opt)
1772{
1773 int err;
1774
1775 BT_DBG("%s", req->hdev->name);
1776
1777 err = active_scan(req, opt);
1778 if (err)
1779 return err;
1780
7df26b56 1781 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
e68f072b
JH
1782}
1783
1784static void start_discovery(struct hci_dev *hdev, u8 *status)
1785{
1786 unsigned long timeout;
1787
1788 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1789
1790 switch (hdev->discovery.type) {
1791 case DISCOV_TYPE_BREDR:
1792 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
7df26b56
JH
1793 hci_req_sync(hdev, bredr_inquiry,
1794 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
e68f072b
JH
1795 status);
1796 return;
1797 case DISCOV_TYPE_INTERLEAVED:
1798 /* When running simultaneous discovery, the LE scanning time
1799 * should occupy the whole discovery time sine BR/EDR inquiry
1800 * and LE scanning are scheduled by the controller.
1801 *
1802 * For interleaving discovery in comparison, BR/EDR inquiry
1803 * and LE scanning are done sequentially with separate
1804 * timeouts.
1805 */
1806 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1807 &hdev->quirks)) {
1808 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1809 /* During simultaneous discovery, we double LE scan
1810 * interval. We must leave some time for the controller
1811 * to do BR/EDR inquiry.
1812 */
1813 hci_req_sync(hdev, interleaved_discov,
1814 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1815 status);
1816 break;
1817 }
1818
1819 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1820 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1821 HCI_CMD_TIMEOUT, status);
1822 break;
1823 case DISCOV_TYPE_LE:
1824 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1825 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1826 HCI_CMD_TIMEOUT, status);
1827 break;
1828 default:
1829 *status = HCI_ERROR_UNSPECIFIED;
1830 return;
1831 }
1832
1833 if (*status)
1834 return;
1835
1836 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1837
1838 /* When service discovery is used and the controller has a
1839 * strict duplicate filter, it is important to remember the
1840 * start and duration of the scan. This is required for
1841 * restarting scanning during the discovery phase.
1842 */
1843 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1844 hdev->discovery.result_filtering) {
1845 hdev->discovery.scan_start = jiffies;
1846 hdev->discovery.scan_duration = timeout;
1847 }
1848
1849 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1850 timeout);
1851}
1852
2154d3f4
JH
1853bool hci_req_stop_discovery(struct hci_request *req)
1854{
1855 struct hci_dev *hdev = req->hdev;
1856 struct discovery_state *d = &hdev->discovery;
1857 struct hci_cp_remote_name_req_cancel cp;
1858 struct inquiry_entry *e;
1859 bool ret = false;
1860
1861 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1862
1863 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1864 if (test_bit(HCI_INQUIRY, &hdev->flags))
1865 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1866
1867 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1868 cancel_delayed_work(&hdev->le_scan_disable);
1869 hci_req_add_le_scan_disable(req);
1870 }
1871
1872 ret = true;
1873 } else {
1874 /* Passive scanning */
1875 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1876 hci_req_add_le_scan_disable(req);
1877 ret = true;
1878 }
1879 }
1880
1881 /* No further actions needed for LE-only discovery */
1882 if (d->type == DISCOV_TYPE_LE)
1883 return ret;
1884
1885 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1886 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1887 NAME_PENDING);
1888 if (!e)
1889 return ret;
1890
1891 bacpy(&cp.bdaddr, &e->data.bdaddr);
1892 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1893 &cp);
1894 ret = true;
1895 }
1896
1897 return ret;
1898}
1899
1900static int stop_discovery(struct hci_request *req, unsigned long opt)
1901{
1902 hci_dev_lock(req->hdev);
1903 hci_req_stop_discovery(req);
1904 hci_dev_unlock(req->hdev);
1905
1906 return 0;
1907}
1908
e68f072b
JH
1909static void discov_update(struct work_struct *work)
1910{
1911 struct hci_dev *hdev = container_of(work, struct hci_dev,
1912 discov_update);
1913 u8 status = 0;
1914
1915 switch (hdev->discovery.state) {
1916 case DISCOVERY_STARTING:
1917 start_discovery(hdev, &status);
1918 mgmt_start_discovery_complete(hdev, status);
1919 if (status)
1920 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1921 else
1922 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1923 break;
2154d3f4
JH
1924 case DISCOVERY_STOPPING:
1925 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1926 mgmt_stop_discovery_complete(hdev, status);
1927 if (!status)
1928 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1929 break;
e68f072b
JH
1930 case DISCOVERY_STOPPED:
1931 default:
1932 return;
1933 }
1934}
1935
c366f555
JH
1936static void discov_off(struct work_struct *work)
1937{
1938 struct hci_dev *hdev = container_of(work, struct hci_dev,
1939 discov_off.work);
1940
1941 BT_DBG("%s", hdev->name);
1942
1943 hci_dev_lock(hdev);
1944
1945 /* When discoverable timeout triggers, then just make sure
1946 * the limited discoverable flag is cleared. Even in the case
1947 * of a timeout triggered from general discoverable, it is
1948 * safe to unconditionally clear the flag.
1949 */
1950 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1951 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1952 hdev->discov_timeout = 0;
1953
1954 hci_dev_unlock(hdev);
1955
1956 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
1957 mgmt_new_settings(hdev);
1958}
1959
5fc16cc4
JH
1960void hci_request_setup(struct hci_dev *hdev)
1961{
e68f072b 1962 INIT_WORK(&hdev->discov_update, discov_update);
2e93e53b 1963 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
01b1cb87 1964 INIT_WORK(&hdev->scan_update, scan_update_work);
53c0ba74 1965 INIT_WORK(&hdev->connectable_update, connectable_update_work);
aed1a885 1966 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
c366f555 1967 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
7c1fbed2
JH
1968 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1969 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
f2252570 1970 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
5fc16cc4
JH
1971}
1972
1973void hci_request_cancel_all(struct hci_dev *hdev)
1974{
7df0f73e
JH
1975 hci_req_sync_cancel(hdev, ENODEV);
1976
e68f072b 1977 cancel_work_sync(&hdev->discov_update);
2e93e53b 1978 cancel_work_sync(&hdev->bg_scan_update);
01b1cb87 1979 cancel_work_sync(&hdev->scan_update);
53c0ba74 1980 cancel_work_sync(&hdev->connectable_update);
aed1a885 1981 cancel_work_sync(&hdev->discoverable_update);
c366f555 1982 cancel_delayed_work_sync(&hdev->discov_off);
7c1fbed2
JH
1983 cancel_delayed_work_sync(&hdev->le_scan_disable);
1984 cancel_delayed_work_sync(&hdev->le_scan_restart);
f2252570
JH
1985
1986 if (hdev->adv_instance_timeout) {
1987 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1988 hdev->adv_instance_timeout = 0;
1989 }
5fc16cc4 1990}