]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Fix memory leaking when hdev->send returns an error
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
47219839
MH
194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
47219839
MH
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
baf27f6e
MH
229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
02d08d15
MH
265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
babdbb3c
MH
293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
041000b9
MH
317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
ebd1e33b
MH
331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
5afeac14
MH
356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
111902f7 362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
111902f7 387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
388 return -EALREADY;
389
111902f7 390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
134c2a89
MH
402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
2bfa3531
MH
420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
2be48b65 428 hdev->idle_timeout = val;
2bfa3531
MH
429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
c982b2ea
JH
448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
2bfa3531
MH
479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
2be48b65 487 hdev->sniff_min_interval = val;
2bfa3531
MH
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
2be48b65 515 hdev->sniff_max_interval = val;
2bfa3531
MH
516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
31ad1691
AK
535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
ac345813
MH
591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
a1f4c318 594 bdaddr_t addr;
ac345813
MH
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
a1f4c318 599 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 600
a1f4c318 601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 602 16, hdev->irk, &hdev->rpa);
ac345813
MH
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
7a4cd51d
MH
621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
e7b8fc92
MH
644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
b32bba6c
MH
667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
92202185 670{
b32bba6c
MH
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
92202185 673
111902f7 674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
678}
679
b32bba6c
MH
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
92202185 683{
b32bba6c
MH
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
92202185 688
b32bba6c
MH
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
92202185 691
b32bba6c
MH
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
111902f7 699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
700 return -EALREADY;
701
111902f7 702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
703
704 return count;
92202185
MH
705}
706
b32bba6c
MH
707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
92202185 713
d2ab0ac1
MH
714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
3698d704
MH
739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
8f8625cd
MH
769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
f813f1be 775 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 780 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
4e70c7e7
MH
799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
2be48b65 807 hdev->le_conn_min_interval = val;
4e70c7e7
MH
808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
2be48b65 835 hdev->le_conn_max_interval = val;
4e70c7e7
MH
836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
816a93d1
MH
855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
f1649577
MH
883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
3f959d46
MH
911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
0b3c7d37 939static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 940{
0b3c7d37 941 struct hci_dev *hdev = f->private;
7d474e06
AG
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
7d474e06 945 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
947 p->auto_connect);
948 }
7d474e06
AG
949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
0b3c7d37 954static int device_list_open(struct inode *inode, struct file *file)
7d474e06 955{
0b3c7d37 956 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
957}
958
0b3c7d37
MH
959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
7d474e06 961 .read = seq_read,
7d474e06
AG
962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
1da177e4
LT
966/* ---- HCI requests ---- */
967
42c6b129 968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 969{
42c6b129 970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
77a63e0a
FW
990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
75e84b7c
JH
992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
7b1abbbe
JH
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
75e84b7c
JH
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
7b1abbbe 1045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1046 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
7b1abbbe 1056 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
7b1abbbe
JH
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1100 const void *param, u32 timeout)
7b1abbbe
JH
1101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
1da177e4 1106/* Execute request and wait for completion. */
01178cd4 1107static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
01178cd4 1110 unsigned long opt, __u32 timeout)
1da177e4 1111{
42c6b129 1112 struct hci_request req;
1da177e4
LT
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
42c6b129
JH
1118 hci_req_init(&req, hdev);
1119
1da177e4
LT
1120 hdev->req_status = HCI_REQ_PEND;
1121
42c6b129 1122 func(&req, opt);
53cce22d 1123
42c6b129
JH
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
53cce22d 1126 hdev->req_status = 0;
920c8300
AG
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
42c6b129 1132 */
920c8300
AG
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
53cce22d
JH
1137 }
1138
bc4445c7
AG
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1da177e4
LT
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
e175072f 1151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
3ff50b79 1161 }
1da177e4 1162
a5040efa 1163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
01178cd4 1170static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
01178cd4 1173 unsigned long opt, __u32 timeout)
1da177e4
LT
1174{
1175 int ret;
1176
7c6a329e
MH
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1da177e4
LT
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
01178cd4 1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
42c6b129 1188static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1189{
42c6b129 1190 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1191
1192 /* Reset device */
42c6b129
JH
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1195}
1196
42c6b129 1197static void bredr_init(struct hci_request *req)
1da177e4 1198{
42c6b129 1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1200
1da177e4 1201 /* Read Local Supported Features */
42c6b129 1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1203
1143e5a6 1204 /* Read Local Version */
42c6b129 1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1206
1207 /* Read BD Address */
42c6b129 1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1209}
1210
42c6b129 1211static void amp_init(struct hci_request *req)
e61ef499 1212{
42c6b129 1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1214
e61ef499 1215 /* Read Local Version */
42c6b129 1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1217
f6996cfe
MH
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
6bcbc489 1224 /* Read Local AMP Info */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1226
1227 /* Read Data Blk size */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1229
f38ba941
MH
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
7528ca1c
MH
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1235}
1236
42c6b129 1237static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1238{
42c6b129 1239 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
11778716
AE
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1245 hci_reset_req(req, 0);
11778716 1246
e61ef499
AE
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
42c6b129 1249 bredr_init(req);
e61ef499
AE
1250 break;
1251
1252 case HCI_AMP:
42c6b129 1253 amp_init(req);
e61ef499
AE
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
e61ef499
AE
1260}
1261
42c6b129 1262static void bredr_setup(struct hci_request *req)
2177bab5 1263{
4ca048e3
MH
1264 struct hci_dev *hdev = req->hdev;
1265
2177bab5
JH
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1271
1272 /* Read Class of Device */
42c6b129 1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1274
1275 /* Read Local Name */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1277
1278 /* Read Voice Setting */
42c6b129 1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1280
b4cb9fb2
MH
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
4b836f39
MH
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
2177bab5
JH
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1290
1291 /* Connection accept timeout ~20 secs */
dcf4adbf 1292 param = cpu_to_le16(0x7d00);
42c6b129 1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1294
4ca048e3
MH
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
2177bab5
JH
1302}
1303
42c6b129 1304static void le_setup(struct hci_request *req)
2177bab5 1305{
c73eee91
JH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5 1308 /* Read LE Buffer Size */
42c6b129 1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1310
1311 /* Read LE Local Supported Features */
42c6b129 1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1313
747d3f03
MH
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
2177bab5 1317 /* Read LE Advertising Channel TX Power */
42c6b129 1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1319
1320 /* Read LE White List Size */
42c6b129 1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1322
747d3f03
MH
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
42c6b129 1359static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1360{
1361 u8 mode;
1362
42c6b129 1363 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1364
42c6b129 1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1366}
1367
42c6b129 1368static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1369{
42c6b129
JH
1370 struct hci_dev *hdev = req->hdev;
1371
2177bab5
JH
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
42c6b129 1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1440}
1441
42c6b129 1442static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1443{
42c6b129
JH
1444 struct hci_dev *hdev = req->hdev;
1445
2177bab5 1446 if (lmp_bredr_capable(hdev))
42c6b129 1447 bredr_setup(req);
56f87901
JH
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1450
1451 if (lmp_le_capable(hdev))
42c6b129 1452 le_setup(req);
2177bab5 1453
42c6b129 1454 hci_setup_event_mask(req);
2177bab5 1455
3f8e2d75
JH
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1461
1462 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
2177bab5
JH
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
42c6b129
JH
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
2177bab5
JH
1475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
42c6b129 1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
42c6b129 1486 hci_setup_inquiry_mode(req);
2177bab5
JH
1487
1488 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
42c6b129
JH
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
2177bab5
JH
1497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
42c6b129
JH
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
2177bab5
JH
1503 }
1504}
1505
42c6b129 1506static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1507{
42c6b129 1508 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
42c6b129 1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1523}
1524
42c6b129 1525static void hci_set_le_support(struct hci_request *req)
2177bab5 1526{
42c6b129 1527 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1528 struct hci_cp_write_le_host_supported cp;
1529
c73eee91
JH
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
2177bab5
JH
1534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
2177bab5
JH
1544}
1545
d62e6d67
JH
1546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
53b834d2 1554 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
53b834d2 1564 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
40c59fcb
MH
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
d62e6d67
JH
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
42c6b129 1578static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1579{
42c6b129 1580 struct hci_dev *hdev = req->hdev;
d2c5d77f 1581 u8 p;
42c6b129 1582
b8f4e068
GP
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
f9f462fa
MH
1591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
637b4cae 1595 */
f9f462fa
MH
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
2177bab5 1606 if (hdev->commands[5] & 0x10)
42c6b129 1607 hci_setup_link_policy(req);
2177bab5 1608
9193c6e8
AG
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
662bc2e6
AG
1614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
9193c6e8
AG
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
42c6b129 1626 hci_set_le_support(req);
9193c6e8 1627 }
d2c5d77f
JH
1628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
2177bab5
JH
1637}
1638
5d4e7e8d
JH
1639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
d62e6d67
JH
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
5d4e7e8d 1647 /* Check for Synchronization Train support */
53b834d2 1648 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1650
1651 /* Enable Secure Connections if supported and configured */
5afeac14 1652 if ((lmp_sc_capable(hdev) ||
111902f7 1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
5d4e7e8d
JH
1659}
1660
2177bab5
JH
1661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
4b4148e9
MH
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
2177bab5
JH
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
5d4e7e8d
JH
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
baf27f6e
MH
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
dfb826a8
MH
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
ceeb3bc0
MH
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
47219839
MH
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
31ad1691
AK
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
baf27f6e
MH
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
02d08d15
MH
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
babdbb3c
MH
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
041000b9
MH
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
baf27f6e
MH
1726 }
1727
06f5b778 1728 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
5afeac14
MH
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
134c2a89
MH
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
06f5b778 1735 }
ebd1e33b 1736
2bfa3531
MH
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
d0f729b8 1746 if (lmp_le_capable(hdev)) {
ac345813
MH
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
b32bba6c
MH
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
d0f729b8
MH
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
d2ab0ac1
MH
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
3698d704
MH
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
8f8625cd
MH
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
4e70c7e7
MH
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
816a93d1
MH
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
f1649577
MH
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
3f959d46
MH
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
b9a7a61e
LR
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
d0f729b8 1789 }
e7b8fc92 1790
baf27f6e 1791 return 0;
2177bab5
JH
1792}
1793
0ebca7d6
MH
1794static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795{
1796 struct hci_dev *hdev = req->hdev;
1797
1798 BT_DBG("%s %ld", hdev->name, opt);
1799
1800 /* Reset */
1801 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802 hci_reset_req(req, 0);
1803
1804 /* Read Local Version */
1805 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807 /* Read BD Address */
1808 if (hdev->set_bdaddr)
1809 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810}
1811
1812static int __hci_unconf_init(struct hci_dev *hdev)
1813{
1814 int err;
1815
cc78b44b
MH
1816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1817 return 0;
1818
0ebca7d6
MH
1819 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1820 if (err < 0)
1821 return err;
1822
1823 return 0;
1824}
1825
42c6b129 1826static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1827{
1828 __u8 scan = opt;
1829
42c6b129 1830 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1831
1832 /* Inquiry and Page scans */
42c6b129 1833 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1834}
1835
42c6b129 1836static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1837{
1838 __u8 auth = opt;
1839
42c6b129 1840 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1841
1842 /* Authentication */
42c6b129 1843 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1844}
1845
42c6b129 1846static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1847{
1848 __u8 encrypt = opt;
1849
42c6b129 1850 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1851
e4e8e37c 1852 /* Encryption */
42c6b129 1853 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1854}
1855
42c6b129 1856static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1857{
1858 __le16 policy = cpu_to_le16(opt);
1859
42c6b129 1860 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1861
1862 /* Default link policy */
42c6b129 1863 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1864}
1865
8e87d142 1866/* Get HCI device by index.
1da177e4
LT
1867 * Device is held on return. */
1868struct hci_dev *hci_dev_get(int index)
1869{
8035ded4 1870 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1871
1872 BT_DBG("%d", index);
1873
1874 if (index < 0)
1875 return NULL;
1876
1877 read_lock(&hci_dev_list_lock);
8035ded4 1878 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1879 if (d->id == index) {
1880 hdev = hci_dev_hold(d);
1881 break;
1882 }
1883 }
1884 read_unlock(&hci_dev_list_lock);
1885 return hdev;
1886}
1da177e4
LT
1887
1888/* ---- Inquiry support ---- */
ff9ef578 1889
30dc78e1
JH
1890bool hci_discovery_active(struct hci_dev *hdev)
1891{
1892 struct discovery_state *discov = &hdev->discovery;
1893
6fbe195d 1894 switch (discov->state) {
343f935b 1895 case DISCOVERY_FINDING:
6fbe195d 1896 case DISCOVERY_RESOLVING:
30dc78e1
JH
1897 return true;
1898
6fbe195d
AG
1899 default:
1900 return false;
1901 }
30dc78e1
JH
1902}
1903
ff9ef578
JH
1904void hci_discovery_set_state(struct hci_dev *hdev, int state)
1905{
1906 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1907
1908 if (hdev->discovery.state == state)
1909 return;
1910
1911 switch (state) {
1912 case DISCOVERY_STOPPED:
c54c3860
AG
1913 hci_update_background_scan(hdev);
1914
7b99b659
AG
1915 if (hdev->discovery.state != DISCOVERY_STARTING)
1916 mgmt_discovering(hdev, 0);
ff9ef578
JH
1917 break;
1918 case DISCOVERY_STARTING:
1919 break;
343f935b 1920 case DISCOVERY_FINDING:
ff9ef578
JH
1921 mgmt_discovering(hdev, 1);
1922 break;
30dc78e1
JH
1923 case DISCOVERY_RESOLVING:
1924 break;
ff9ef578
JH
1925 case DISCOVERY_STOPPING:
1926 break;
1927 }
1928
1929 hdev->discovery.state = state;
1930}
1931
1f9b9a5d 1932void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1933{
30883512 1934 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1935 struct inquiry_entry *p, *n;
1da177e4 1936
561aafbc
JH
1937 list_for_each_entry_safe(p, n, &cache->all, all) {
1938 list_del(&p->all);
b57c1a56 1939 kfree(p);
1da177e4 1940 }
561aafbc
JH
1941
1942 INIT_LIST_HEAD(&cache->unknown);
1943 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1944}
1945
a8c5fb1a
GP
1946struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1947 bdaddr_t *bdaddr)
1da177e4 1948{
30883512 1949 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1950 struct inquiry_entry *e;
1951
6ed93dc6 1952 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1953
561aafbc
JH
1954 list_for_each_entry(e, &cache->all, all) {
1955 if (!bacmp(&e->data.bdaddr, bdaddr))
1956 return e;
1957 }
1958
1959 return NULL;
1960}
1961
1962struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1963 bdaddr_t *bdaddr)
561aafbc 1964{
30883512 1965 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1966 struct inquiry_entry *e;
1967
6ed93dc6 1968 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1969
1970 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1971 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1972 return e;
1973 }
1974
1975 return NULL;
1da177e4
LT
1976}
1977
30dc78e1 1978struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1979 bdaddr_t *bdaddr,
1980 int state)
30dc78e1
JH
1981{
1982 struct discovery_state *cache = &hdev->discovery;
1983 struct inquiry_entry *e;
1984
6ed93dc6 1985 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1986
1987 list_for_each_entry(e, &cache->resolve, list) {
1988 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1989 return e;
1990 if (!bacmp(&e->data.bdaddr, bdaddr))
1991 return e;
1992 }
1993
1994 return NULL;
1995}
1996
a3d4e20a 1997void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1998 struct inquiry_entry *ie)
a3d4e20a
JH
1999{
2000 struct discovery_state *cache = &hdev->discovery;
2001 struct list_head *pos = &cache->resolve;
2002 struct inquiry_entry *p;
2003
2004 list_del(&ie->list);
2005
2006 list_for_each_entry(p, &cache->resolve, list) {
2007 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2008 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2009 break;
2010 pos = &p->list;
2011 }
2012
2013 list_add(&ie->list, pos);
2014}
2015
af58925c
MH
2016u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2017 bool name_known)
1da177e4 2018{
30883512 2019 struct discovery_state *cache = &hdev->discovery;
70f23020 2020 struct inquiry_entry *ie;
af58925c 2021 u32 flags = 0;
1da177e4 2022
6ed93dc6 2023 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2024
2b2fec4d
SJ
2025 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2026
af58925c
MH
2027 if (!data->ssp_mode)
2028 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2029
70f23020 2030 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2031 if (ie) {
af58925c
MH
2032 if (!ie->data.ssp_mode)
2033 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2034
a3d4e20a 2035 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2036 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2037 ie->data.rssi = data->rssi;
2038 hci_inquiry_cache_update_resolve(hdev, ie);
2039 }
2040
561aafbc 2041 goto update;
a3d4e20a 2042 }
561aafbc
JH
2043
2044 /* Entry not in the cache. Add new one. */
2045 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2046 if (!ie) {
2047 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2048 goto done;
2049 }
561aafbc
JH
2050
2051 list_add(&ie->all, &cache->all);
2052
2053 if (name_known) {
2054 ie->name_state = NAME_KNOWN;
2055 } else {
2056 ie->name_state = NAME_NOT_KNOWN;
2057 list_add(&ie->list, &cache->unknown);
2058 }
70f23020 2059
561aafbc
JH
2060update:
2061 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2062 ie->name_state != NAME_PENDING) {
561aafbc
JH
2063 ie->name_state = NAME_KNOWN;
2064 list_del(&ie->list);
1da177e4
LT
2065 }
2066
70f23020
AE
2067 memcpy(&ie->data, data, sizeof(*data));
2068 ie->timestamp = jiffies;
1da177e4 2069 cache->timestamp = jiffies;
3175405b
JH
2070
2071 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2072 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2073
af58925c
MH
2074done:
2075 return flags;
1da177e4
LT
2076}
2077
2078static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2079{
30883512 2080 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2081 struct inquiry_info *info = (struct inquiry_info *) buf;
2082 struct inquiry_entry *e;
2083 int copied = 0;
2084
561aafbc 2085 list_for_each_entry(e, &cache->all, all) {
1da177e4 2086 struct inquiry_data *data = &e->data;
b57c1a56
JH
2087
2088 if (copied >= num)
2089 break;
2090
1da177e4
LT
2091 bacpy(&info->bdaddr, &data->bdaddr);
2092 info->pscan_rep_mode = data->pscan_rep_mode;
2093 info->pscan_period_mode = data->pscan_period_mode;
2094 info->pscan_mode = data->pscan_mode;
2095 memcpy(info->dev_class, data->dev_class, 3);
2096 info->clock_offset = data->clock_offset;
b57c1a56 2097
1da177e4 2098 info++;
b57c1a56 2099 copied++;
1da177e4
LT
2100 }
2101
2102 BT_DBG("cache %p, copied %d", cache, copied);
2103 return copied;
2104}
2105
42c6b129 2106static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2107{
2108 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2109 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2110 struct hci_cp_inquiry cp;
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 if (test_bit(HCI_INQUIRY, &hdev->flags))
2115 return;
2116
2117 /* Start Inquiry */
2118 memcpy(&cp.lap, &ir->lap, 3);
2119 cp.length = ir->length;
2120 cp.num_rsp = ir->num_rsp;
42c6b129 2121 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2122}
2123
3e13fa1e
AG
2124static int wait_inquiry(void *word)
2125{
2126 schedule();
2127 return signal_pending(current);
2128}
2129
1da177e4
LT
2130int hci_inquiry(void __user *arg)
2131{
2132 __u8 __user *ptr = arg;
2133 struct hci_inquiry_req ir;
2134 struct hci_dev *hdev;
2135 int err = 0, do_inquiry = 0, max_rsp;
2136 long timeo;
2137 __u8 *buf;
2138
2139 if (copy_from_user(&ir, ptr, sizeof(ir)))
2140 return -EFAULT;
2141
5a08ecce
AE
2142 hdev = hci_dev_get(ir.dev_id);
2143 if (!hdev)
1da177e4
LT
2144 return -ENODEV;
2145
0736cfa8
MH
2146 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2147 err = -EBUSY;
2148 goto done;
2149 }
2150
4a964404 2151 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2152 err = -EOPNOTSUPP;
2153 goto done;
2154 }
2155
5b69bef5
MH
2156 if (hdev->dev_type != HCI_BREDR) {
2157 err = -EOPNOTSUPP;
2158 goto done;
2159 }
2160
56f87901
JH
2161 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2162 err = -EOPNOTSUPP;
2163 goto done;
2164 }
2165
09fd0de5 2166 hci_dev_lock(hdev);
8e87d142 2167 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2168 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2169 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2170 do_inquiry = 1;
2171 }
09fd0de5 2172 hci_dev_unlock(hdev);
1da177e4 2173
04837f64 2174 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2175
2176 if (do_inquiry) {
01178cd4
JH
2177 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2178 timeo);
70f23020
AE
2179 if (err < 0)
2180 goto done;
3e13fa1e
AG
2181
2182 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2183 * cleared). If it is interrupted by a signal, return -EINTR.
2184 */
2185 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2186 TASK_INTERRUPTIBLE))
2187 return -EINTR;
70f23020 2188 }
1da177e4 2189
8fc9ced3
GP
2190 /* for unlimited number of responses we will use buffer with
2191 * 255 entries
2192 */
1da177e4
LT
2193 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2194
2195 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2196 * copy it to the user space.
2197 */
01df8c31 2198 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2199 if (!buf) {
1da177e4
LT
2200 err = -ENOMEM;
2201 goto done;
2202 }
2203
09fd0de5 2204 hci_dev_lock(hdev);
1da177e4 2205 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2206 hci_dev_unlock(hdev);
1da177e4
LT
2207
2208 BT_DBG("num_rsp %d", ir.num_rsp);
2209
2210 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2211 ptr += sizeof(ir);
2212 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2213 ir.num_rsp))
1da177e4 2214 err = -EFAULT;
8e87d142 2215 } else
1da177e4
LT
2216 err = -EFAULT;
2217
2218 kfree(buf);
2219
2220done:
2221 hci_dev_put(hdev);
2222 return err;
2223}
2224
cbed0ca1 2225static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2226{
1da177e4
LT
2227 int ret = 0;
2228
1da177e4
LT
2229 BT_DBG("%s %p", hdev->name, hdev);
2230
2231 hci_req_lock(hdev);
2232
94324962
JH
2233 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2234 ret = -ENODEV;
2235 goto done;
2236 }
2237
d603b76b
MH
2238 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2239 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2240 /* Check for rfkill but allow the HCI setup stage to
2241 * proceed (which in itself doesn't cause any RF activity).
2242 */
2243 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2244 ret = -ERFKILL;
2245 goto done;
2246 }
2247
2248 /* Check for valid public address or a configured static
2249 * random adddress, but let the HCI setup proceed to
2250 * be able to determine if there is a public address
2251 * or not.
2252 *
c6beca0e
MH
2253 * In case of user channel usage, it is not important
2254 * if a public address or static random address is
2255 * available.
2256 *
a5c8f270
MH
2257 * This check is only valid for BR/EDR controllers
2258 * since AMP controllers do not have an address.
2259 */
c6beca0e
MH
2260 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2261 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2262 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2264 ret = -EADDRNOTAVAIL;
2265 goto done;
2266 }
611b30f7
MH
2267 }
2268
1da177e4
LT
2269 if (test_bit(HCI_UP, &hdev->flags)) {
2270 ret = -EALREADY;
2271 goto done;
2272 }
2273
1da177e4
LT
2274 if (hdev->open(hdev)) {
2275 ret = -EIO;
2276 goto done;
2277 }
2278
f41c70c4
MH
2279 atomic_set(&hdev->cmd_cnt, 1);
2280 set_bit(HCI_INIT, &hdev->flags);
2281
af202f84
MH
2282 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2283 if (hdev->setup)
2284 ret = hdev->setup(hdev);
f41c70c4 2285
af202f84
MH
2286 /* The transport driver can set these quirks before
2287 * creating the HCI device or in its setup callback.
2288 *
2289 * In case any of them is set, the controller has to
2290 * start up as unconfigured.
2291 */
eb1904f4
MH
2292 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2293 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2294 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
0ebca7d6
MH
2295
2296 /* For an unconfigured controller it is required to
2297 * read at least the version information provided by
2298 * the Read Local Version Information command.
2299 *
2300 * If the set_bdaddr driver callback is provided, then
2301 * also the original Bluetooth public device address
2302 * will be read using the Read BD Address command.
2303 */
2304 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2305 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2306 }
2307
9713c17b
MH
2308 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2309 /* If public address change is configured, ensure that
2310 * the address gets programmed. If the driver does not
2311 * support changing the public address, fail the power
2312 * on procedure.
2313 */
2314 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2315 hdev->set_bdaddr)
24c457e2
MH
2316 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2317 else
2318 ret = -EADDRNOTAVAIL;
2319 }
2320
f41c70c4 2321 if (!ret) {
4a964404 2322 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2323 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2324 ret = __hci_init(hdev);
1da177e4
LT
2325 }
2326
f41c70c4
MH
2327 clear_bit(HCI_INIT, &hdev->flags);
2328
1da177e4
LT
2329 if (!ret) {
2330 hci_dev_hold(hdev);
d6bfd59c 2331 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2332 set_bit(HCI_UP, &hdev->flags);
2333 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2334 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2335 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2336 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2337 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2338 hdev->dev_type == HCI_BREDR) {
09fd0de5 2339 hci_dev_lock(hdev);
744cf19e 2340 mgmt_powered(hdev, 1);
09fd0de5 2341 hci_dev_unlock(hdev);
56e5cb86 2342 }
8e87d142 2343 } else {
1da177e4 2344 /* Init failed, cleanup */
3eff45ea 2345 flush_work(&hdev->tx_work);
c347b765 2346 flush_work(&hdev->cmd_work);
b78752cc 2347 flush_work(&hdev->rx_work);
1da177e4
LT
2348
2349 skb_queue_purge(&hdev->cmd_q);
2350 skb_queue_purge(&hdev->rx_q);
2351
2352 if (hdev->flush)
2353 hdev->flush(hdev);
2354
2355 if (hdev->sent_cmd) {
2356 kfree_skb(hdev->sent_cmd);
2357 hdev->sent_cmd = NULL;
2358 }
2359
2360 hdev->close(hdev);
fee746b0 2361 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2362 }
2363
2364done:
2365 hci_req_unlock(hdev);
1da177e4
LT
2366 return ret;
2367}
2368
cbed0ca1
JH
2369/* ---- HCI ioctl helpers ---- */
2370
2371int hci_dev_open(__u16 dev)
2372{
2373 struct hci_dev *hdev;
2374 int err;
2375
2376 hdev = hci_dev_get(dev);
2377 if (!hdev)
2378 return -ENODEV;
2379
4a964404 2380 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2381 * up as user channel. Trying to bring them up as normal devices
2382 * will result into a failure. Only user channel operation is
2383 * possible.
2384 *
2385 * When this function is called for a user channel, the flag
2386 * HCI_USER_CHANNEL will be set first before attempting to
2387 * open the device.
2388 */
4a964404 2389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2390 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2391 err = -EOPNOTSUPP;
2392 goto done;
2393 }
2394
e1d08f40
JH
2395 /* We need to ensure that no other power on/off work is pending
2396 * before proceeding to call hci_dev_do_open. This is
2397 * particularly important if the setup procedure has not yet
2398 * completed.
2399 */
2400 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2401 cancel_delayed_work(&hdev->power_off);
2402
a5c8f270
MH
2403 /* After this call it is guaranteed that the setup procedure
2404 * has finished. This means that error conditions like RFKILL
2405 * or no valid public or static random address apply.
2406 */
e1d08f40
JH
2407 flush_workqueue(hdev->req_workqueue);
2408
cbed0ca1
JH
2409 err = hci_dev_do_open(hdev);
2410
fee746b0 2411done:
cbed0ca1 2412 hci_dev_put(hdev);
cbed0ca1
JH
2413 return err;
2414}
2415
d7347f3c
JH
2416/* This function requires the caller holds hdev->lock */
2417static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2418{
2419 struct hci_conn_params *p;
2420
2421 list_for_each_entry(p, &hdev->le_conn_params, list)
2422 list_del_init(&p->action);
2423
2424 BT_DBG("All LE pending actions cleared");
2425}
2426
1da177e4
LT
2427static int hci_dev_do_close(struct hci_dev *hdev)
2428{
2429 BT_DBG("%s %p", hdev->name, hdev);
2430
78c04c0b
VCG
2431 cancel_delayed_work(&hdev->power_off);
2432
1da177e4
LT
2433 hci_req_cancel(hdev, ENODEV);
2434 hci_req_lock(hdev);
2435
2436 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2437 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2438 hci_req_unlock(hdev);
2439 return 0;
2440 }
2441
3eff45ea
GP
2442 /* Flush RX and TX works */
2443 flush_work(&hdev->tx_work);
b78752cc 2444 flush_work(&hdev->rx_work);
1da177e4 2445
16ab91ab 2446 if (hdev->discov_timeout > 0) {
e0f9309f 2447 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2448 hdev->discov_timeout = 0;
5e5282bb 2449 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2450 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2451 }
2452
a8b2d5c2 2453 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2454 cancel_delayed_work(&hdev->service_cache);
2455
7ba8b4be 2456 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2457
2458 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2459 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2460
09fd0de5 2461 hci_dev_lock(hdev);
1f9b9a5d 2462 hci_inquiry_cache_flush(hdev);
1da177e4 2463 hci_conn_hash_flush(hdev);
d7347f3c 2464 hci_pend_le_actions_clear(hdev);
09fd0de5 2465 hci_dev_unlock(hdev);
1da177e4
LT
2466
2467 hci_notify(hdev, HCI_DEV_DOWN);
2468
2469 if (hdev->flush)
2470 hdev->flush(hdev);
2471
2472 /* Reset device */
2473 skb_queue_purge(&hdev->cmd_q);
2474 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2475 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2476 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2477 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2478 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2479 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2480 clear_bit(HCI_INIT, &hdev->flags);
2481 }
2482
c347b765
GP
2483 /* flush cmd work */
2484 flush_work(&hdev->cmd_work);
1da177e4
LT
2485
2486 /* Drop queues */
2487 skb_queue_purge(&hdev->rx_q);
2488 skb_queue_purge(&hdev->cmd_q);
2489 skb_queue_purge(&hdev->raw_q);
2490
2491 /* Drop last sent command */
2492 if (hdev->sent_cmd) {
65cc2b49 2493 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2494 kfree_skb(hdev->sent_cmd);
2495 hdev->sent_cmd = NULL;
2496 }
2497
b6ddb638
JH
2498 kfree_skb(hdev->recv_evt);
2499 hdev->recv_evt = NULL;
2500
1da177e4
LT
2501 /* After this point our queues are empty
2502 * and no tasks are scheduled. */
2503 hdev->close(hdev);
2504
35b973c9 2505 /* Clear flags */
fee746b0 2506 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2507 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2508
93c311a0
MH
2509 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2510 if (hdev->dev_type == HCI_BREDR) {
2511 hci_dev_lock(hdev);
2512 mgmt_powered(hdev, 0);
2513 hci_dev_unlock(hdev);
2514 }
8ee56540 2515 }
5add6af8 2516
ced5c338 2517 /* Controller radio is available but is currently powered down */
536619e8 2518 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2519
e59fda8d 2520 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2521 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2522 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2523
1da177e4
LT
2524 hci_req_unlock(hdev);
2525
2526 hci_dev_put(hdev);
2527 return 0;
2528}
2529
2530int hci_dev_close(__u16 dev)
2531{
2532 struct hci_dev *hdev;
2533 int err;
2534
70f23020
AE
2535 hdev = hci_dev_get(dev);
2536 if (!hdev)
1da177e4 2537 return -ENODEV;
8ee56540 2538
0736cfa8
MH
2539 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2540 err = -EBUSY;
2541 goto done;
2542 }
2543
8ee56540
MH
2544 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2545 cancel_delayed_work(&hdev->power_off);
2546
1da177e4 2547 err = hci_dev_do_close(hdev);
8ee56540 2548
0736cfa8 2549done:
1da177e4
LT
2550 hci_dev_put(hdev);
2551 return err;
2552}
2553
2554int hci_dev_reset(__u16 dev)
2555{
2556 struct hci_dev *hdev;
2557 int ret = 0;
2558
70f23020
AE
2559 hdev = hci_dev_get(dev);
2560 if (!hdev)
1da177e4
LT
2561 return -ENODEV;
2562
2563 hci_req_lock(hdev);
1da177e4 2564
808a049e
MH
2565 if (!test_bit(HCI_UP, &hdev->flags)) {
2566 ret = -ENETDOWN;
1da177e4 2567 goto done;
808a049e 2568 }
1da177e4 2569
0736cfa8
MH
2570 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2571 ret = -EBUSY;
2572 goto done;
2573 }
2574
4a964404 2575 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2576 ret = -EOPNOTSUPP;
2577 goto done;
2578 }
2579
1da177e4
LT
2580 /* Drop queues */
2581 skb_queue_purge(&hdev->rx_q);
2582 skb_queue_purge(&hdev->cmd_q);
2583
09fd0de5 2584 hci_dev_lock(hdev);
1f9b9a5d 2585 hci_inquiry_cache_flush(hdev);
1da177e4 2586 hci_conn_hash_flush(hdev);
09fd0de5 2587 hci_dev_unlock(hdev);
1da177e4
LT
2588
2589 if (hdev->flush)
2590 hdev->flush(hdev);
2591
8e87d142 2592 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2593 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2594
fee746b0 2595 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2596
2597done:
1da177e4
LT
2598 hci_req_unlock(hdev);
2599 hci_dev_put(hdev);
2600 return ret;
2601}
2602
2603int hci_dev_reset_stat(__u16 dev)
2604{
2605 struct hci_dev *hdev;
2606 int ret = 0;
2607
70f23020
AE
2608 hdev = hci_dev_get(dev);
2609 if (!hdev)
1da177e4
LT
2610 return -ENODEV;
2611
0736cfa8
MH
2612 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2613 ret = -EBUSY;
2614 goto done;
2615 }
2616
4a964404 2617 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2618 ret = -EOPNOTSUPP;
2619 goto done;
2620 }
2621
1da177e4
LT
2622 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2623
0736cfa8 2624done:
1da177e4 2625 hci_dev_put(hdev);
1da177e4
LT
2626 return ret;
2627}
2628
2629int hci_dev_cmd(unsigned int cmd, void __user *arg)
2630{
2631 struct hci_dev *hdev;
2632 struct hci_dev_req dr;
2633 int err = 0;
2634
2635 if (copy_from_user(&dr, arg, sizeof(dr)))
2636 return -EFAULT;
2637
70f23020
AE
2638 hdev = hci_dev_get(dr.dev_id);
2639 if (!hdev)
1da177e4
LT
2640 return -ENODEV;
2641
0736cfa8
MH
2642 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2643 err = -EBUSY;
2644 goto done;
2645 }
2646
4a964404 2647 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2648 err = -EOPNOTSUPP;
2649 goto done;
2650 }
2651
5b69bef5
MH
2652 if (hdev->dev_type != HCI_BREDR) {
2653 err = -EOPNOTSUPP;
2654 goto done;
2655 }
2656
56f87901
JH
2657 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2658 err = -EOPNOTSUPP;
2659 goto done;
2660 }
2661
1da177e4
LT
2662 switch (cmd) {
2663 case HCISETAUTH:
01178cd4
JH
2664 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2665 HCI_INIT_TIMEOUT);
1da177e4
LT
2666 break;
2667
2668 case HCISETENCRYPT:
2669 if (!lmp_encrypt_capable(hdev)) {
2670 err = -EOPNOTSUPP;
2671 break;
2672 }
2673
2674 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2675 /* Auth must be enabled first */
01178cd4
JH
2676 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2677 HCI_INIT_TIMEOUT);
1da177e4
LT
2678 if (err)
2679 break;
2680 }
2681
01178cd4
JH
2682 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2683 HCI_INIT_TIMEOUT);
1da177e4
LT
2684 break;
2685
2686 case HCISETSCAN:
01178cd4
JH
2687 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2688 HCI_INIT_TIMEOUT);
1da177e4
LT
2689 break;
2690
1da177e4 2691 case HCISETLINKPOL:
01178cd4
JH
2692 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2693 HCI_INIT_TIMEOUT);
1da177e4
LT
2694 break;
2695
2696 case HCISETLINKMODE:
e4e8e37c
MH
2697 hdev->link_mode = ((__u16) dr.dev_opt) &
2698 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2699 break;
2700
2701 case HCISETPTYPE:
2702 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2703 break;
2704
2705 case HCISETACLMTU:
e4e8e37c
MH
2706 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2707 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2708 break;
2709
2710 case HCISETSCOMTU:
e4e8e37c
MH
2711 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2712 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2713 break;
2714
2715 default:
2716 err = -EINVAL;
2717 break;
2718 }
e4e8e37c 2719
0736cfa8 2720done:
1da177e4
LT
2721 hci_dev_put(hdev);
2722 return err;
2723}
2724
2725int hci_get_dev_list(void __user *arg)
2726{
8035ded4 2727 struct hci_dev *hdev;
1da177e4
LT
2728 struct hci_dev_list_req *dl;
2729 struct hci_dev_req *dr;
1da177e4
LT
2730 int n = 0, size, err;
2731 __u16 dev_num;
2732
2733 if (get_user(dev_num, (__u16 __user *) arg))
2734 return -EFAULT;
2735
2736 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2737 return -EINVAL;
2738
2739 size = sizeof(*dl) + dev_num * sizeof(*dr);
2740
70f23020
AE
2741 dl = kzalloc(size, GFP_KERNEL);
2742 if (!dl)
1da177e4
LT
2743 return -ENOMEM;
2744
2745 dr = dl->dev_req;
2746
f20d09d5 2747 read_lock(&hci_dev_list_lock);
8035ded4 2748 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2749 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2750 cancel_delayed_work(&hdev->power_off);
c542a06c 2751
a8b2d5c2
JH
2752 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2753 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2754
1da177e4
LT
2755 (dr + n)->dev_id = hdev->id;
2756 (dr + n)->dev_opt = hdev->flags;
c542a06c 2757
1da177e4
LT
2758 if (++n >= dev_num)
2759 break;
2760 }
f20d09d5 2761 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2762
2763 dl->dev_num = n;
2764 size = sizeof(*dl) + n * sizeof(*dr);
2765
2766 err = copy_to_user(arg, dl, size);
2767 kfree(dl);
2768
2769 return err ? -EFAULT : 0;
2770}
2771
2772int hci_get_dev_info(void __user *arg)
2773{
2774 struct hci_dev *hdev;
2775 struct hci_dev_info di;
2776 int err = 0;
2777
2778 if (copy_from_user(&di, arg, sizeof(di)))
2779 return -EFAULT;
2780
70f23020
AE
2781 hdev = hci_dev_get(di.dev_id);
2782 if (!hdev)
1da177e4
LT
2783 return -ENODEV;
2784
a8b2d5c2 2785 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2786 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2787
a8b2d5c2
JH
2788 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2789 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2790
1da177e4
LT
2791 strcpy(di.name, hdev->name);
2792 di.bdaddr = hdev->bdaddr;
60f2a3ed 2793 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2794 di.flags = hdev->flags;
2795 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2796 if (lmp_bredr_capable(hdev)) {
2797 di.acl_mtu = hdev->acl_mtu;
2798 di.acl_pkts = hdev->acl_pkts;
2799 di.sco_mtu = hdev->sco_mtu;
2800 di.sco_pkts = hdev->sco_pkts;
2801 } else {
2802 di.acl_mtu = hdev->le_mtu;
2803 di.acl_pkts = hdev->le_pkts;
2804 di.sco_mtu = 0;
2805 di.sco_pkts = 0;
2806 }
1da177e4
LT
2807 di.link_policy = hdev->link_policy;
2808 di.link_mode = hdev->link_mode;
2809
2810 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2811 memcpy(&di.features, &hdev->features, sizeof(di.features));
2812
2813 if (copy_to_user(arg, &di, sizeof(di)))
2814 err = -EFAULT;
2815
2816 hci_dev_put(hdev);
2817
2818 return err;
2819}
2820
2821/* ---- Interface to HCI drivers ---- */
2822
611b30f7
MH
2823static int hci_rfkill_set_block(void *data, bool blocked)
2824{
2825 struct hci_dev *hdev = data;
2826
2827 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2828
0736cfa8
MH
2829 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2830 return -EBUSY;
2831
5e130367
JH
2832 if (blocked) {
2833 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2834 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2835 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2836 hci_dev_do_close(hdev);
5e130367
JH
2837 } else {
2838 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2839 }
611b30f7
MH
2840
2841 return 0;
2842}
2843
2844static const struct rfkill_ops hci_rfkill_ops = {
2845 .set_block = hci_rfkill_set_block,
2846};
2847
ab81cbf9
JH
2848static void hci_power_on(struct work_struct *work)
2849{
2850 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2851 int err;
ab81cbf9
JH
2852
2853 BT_DBG("%s", hdev->name);
2854
cbed0ca1 2855 err = hci_dev_do_open(hdev);
96570ffc
JH
2856 if (err < 0) {
2857 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2858 return;
96570ffc 2859 }
ab81cbf9 2860
a5c8f270
MH
2861 /* During the HCI setup phase, a few error conditions are
2862 * ignored and they need to be checked now. If they are still
2863 * valid, it is important to turn the device back off.
2864 */
2865 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2866 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2867 (hdev->dev_type == HCI_BREDR &&
2868 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2869 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2870 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2871 hci_dev_do_close(hdev);
2872 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2873 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2874 HCI_AUTO_OFF_TIMEOUT);
bf543036 2875 }
ab81cbf9 2876
fee746b0 2877 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2878 /* For unconfigured devices, set the HCI_RAW flag
2879 * so that userspace can easily identify them.
4a964404
MH
2880 */
2881 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2882 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2883
2884 /* For fully configured devices, this will send
2885 * the Index Added event. For unconfigured devices,
2886 * it will send Unconfigued Index Added event.
2887 *
2888 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2889 * and no event will be send.
2890 */
2891 mgmt_index_added(hdev);
d603b76b 2892 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
2893 /* When the controller is now configured, then it
2894 * is important to clear the HCI_RAW flag.
2895 */
2896 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2897 clear_bit(HCI_RAW, &hdev->flags);
2898
d603b76b
MH
2899 /* Powering on the controller with HCI_CONFIG set only
2900 * happens with the transition from unconfigured to
2901 * configured. This will send the Index Added event.
2902 */
2903 mgmt_index_added(hdev);
fee746b0 2904 }
ab81cbf9
JH
2905}
2906
2907static void hci_power_off(struct work_struct *work)
2908{
3243553f 2909 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2910 power_off.work);
ab81cbf9
JH
2911
2912 BT_DBG("%s", hdev->name);
2913
8ee56540 2914 hci_dev_do_close(hdev);
ab81cbf9
JH
2915}
2916
16ab91ab
JH
2917static void hci_discov_off(struct work_struct *work)
2918{
2919 struct hci_dev *hdev;
16ab91ab
JH
2920
2921 hdev = container_of(work, struct hci_dev, discov_off.work);
2922
2923 BT_DBG("%s", hdev->name);
2924
d1967ff8 2925 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2926}
2927
35f7498a 2928void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2929{
4821002c 2930 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2931
4821002c
JH
2932 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2933 list_del(&uuid->list);
2aeb9a1a
JH
2934 kfree(uuid);
2935 }
2aeb9a1a
JH
2936}
2937
35f7498a 2938void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2939{
2940 struct list_head *p, *n;
2941
2942 list_for_each_safe(p, n, &hdev->link_keys) {
2943 struct link_key *key;
2944
2945 key = list_entry(p, struct link_key, list);
2946
2947 list_del(p);
2948 kfree(key);
2949 }
55ed8ca1
JH
2950}
2951
35f7498a 2952void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2953{
2954 struct smp_ltk *k, *tmp;
2955
2956 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2957 list_del(&k->list);
2958 kfree(k);
2959 }
b899efaf
VCG
2960}
2961
970c4e46
JH
2962void hci_smp_irks_clear(struct hci_dev *hdev)
2963{
2964 struct smp_irk *k, *tmp;
2965
2966 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2967 list_del(&k->list);
2968 kfree(k);
2969 }
2970}
2971
55ed8ca1
JH
2972struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2973{
8035ded4 2974 struct link_key *k;
55ed8ca1 2975
8035ded4 2976 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2977 if (bacmp(bdaddr, &k->bdaddr) == 0)
2978 return k;
55ed8ca1
JH
2979
2980 return NULL;
2981}
2982
745c0ce3 2983static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2984 u8 key_type, u8 old_key_type)
d25e28ab
JH
2985{
2986 /* Legacy key */
2987 if (key_type < 0x03)
745c0ce3 2988 return true;
d25e28ab
JH
2989
2990 /* Debug keys are insecure so don't store them persistently */
2991 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2992 return false;
d25e28ab
JH
2993
2994 /* Changed combination key and there's no previous one */
2995 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2996 return false;
d25e28ab
JH
2997
2998 /* Security mode 3 case */
2999 if (!conn)
745c0ce3 3000 return true;
d25e28ab
JH
3001
3002 /* Neither local nor remote side had no-bonding as requirement */
3003 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3004 return true;
d25e28ab
JH
3005
3006 /* Local side had dedicated bonding as requirement */
3007 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3008 return true;
d25e28ab
JH
3009
3010 /* Remote side had dedicated bonding as requirement */
3011 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3012 return true;
d25e28ab
JH
3013
3014 /* If none of the above criteria match, then don't store the key
3015 * persistently */
745c0ce3 3016 return false;
d25e28ab
JH
3017}
3018
98a0b845
JH
3019static bool ltk_type_master(u8 type)
3020{
d97c9fb0 3021 return (type == SMP_LTK);
98a0b845
JH
3022}
3023
fe39c7b2 3024struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 3025 bool master)
75d262c2 3026{
c9839a11 3027 struct smp_ltk *k;
75d262c2 3028
c9839a11 3029 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 3030 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3031 continue;
3032
98a0b845
JH
3033 if (ltk_type_master(k->type) != master)
3034 continue;
3035
c9839a11 3036 return k;
75d262c2
VCG
3037 }
3038
3039 return NULL;
3040}
75d262c2 3041
c9839a11 3042struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 3043 u8 addr_type, bool master)
75d262c2 3044{
c9839a11 3045 struct smp_ltk *k;
75d262c2 3046
c9839a11
VCG
3047 list_for_each_entry(k, &hdev->long_term_keys, list)
3048 if (addr_type == k->bdaddr_type &&
98a0b845
JH
3049 bacmp(bdaddr, &k->bdaddr) == 0 &&
3050 ltk_type_master(k->type) == master)
75d262c2
VCG
3051 return k;
3052
3053 return NULL;
3054}
75d262c2 3055
970c4e46
JH
3056struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3057{
3058 struct smp_irk *irk;
3059
3060 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3061 if (!bacmp(&irk->rpa, rpa))
3062 return irk;
3063 }
3064
3065 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3066 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3067 bacpy(&irk->rpa, rpa);
3068 return irk;
3069 }
3070 }
3071
3072 return NULL;
3073}
3074
3075struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3076 u8 addr_type)
3077{
3078 struct smp_irk *irk;
3079
6cfc9988
JH
3080 /* Identity Address must be public or static random */
3081 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3082 return NULL;
3083
970c4e46
JH
3084 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3085 if (addr_type == irk->addr_type &&
3086 bacmp(bdaddr, &irk->bdaddr) == 0)
3087 return irk;
3088 }
3089
3090 return NULL;
3091}
3092
567fa2aa 3093struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3094 bdaddr_t *bdaddr, u8 *val, u8 type,
3095 u8 pin_len, bool *persistent)
55ed8ca1
JH
3096{
3097 struct link_key *key, *old_key;
745c0ce3 3098 u8 old_key_type;
55ed8ca1
JH
3099
3100 old_key = hci_find_link_key(hdev, bdaddr);
3101 if (old_key) {
3102 old_key_type = old_key->type;
3103 key = old_key;
3104 } else {
12adcf3a 3105 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3106 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3107 if (!key)
567fa2aa 3108 return NULL;
55ed8ca1
JH
3109 list_add(&key->list, &hdev->link_keys);
3110 }
3111
6ed93dc6 3112 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3113
d25e28ab
JH
3114 /* Some buggy controller combinations generate a changed
3115 * combination key for legacy pairing even when there's no
3116 * previous key */
3117 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3118 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3119 type = HCI_LK_COMBINATION;
655fe6ec
JH
3120 if (conn)
3121 conn->key_type = type;
3122 }
d25e28ab 3123
55ed8ca1 3124 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3125 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3126 key->pin_len = pin_len;
3127
b6020ba0 3128 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3129 key->type = old_key_type;
4748fed2
JH
3130 else
3131 key->type = type;
3132
7652ff6a
JH
3133 if (persistent)
3134 *persistent = hci_persistent_key(hdev, conn, type,
3135 old_key_type);
55ed8ca1 3136
567fa2aa 3137 return key;
55ed8ca1
JH
3138}
3139
ca9142b8 3140struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3141 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3142 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3143{
c9839a11 3144 struct smp_ltk *key, *old_key;
98a0b845 3145 bool master = ltk_type_master(type);
75d262c2 3146
98a0b845 3147 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3148 if (old_key)
75d262c2 3149 key = old_key;
c9839a11 3150 else {
0a14ab41 3151 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3152 if (!key)
ca9142b8 3153 return NULL;
c9839a11 3154 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3155 }
3156
75d262c2 3157 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3158 key->bdaddr_type = addr_type;
3159 memcpy(key->val, tk, sizeof(key->val));
3160 key->authenticated = authenticated;
3161 key->ediv = ediv;
fe39c7b2 3162 key->rand = rand;
c9839a11
VCG
3163 key->enc_size = enc_size;
3164 key->type = type;
75d262c2 3165
ca9142b8 3166 return key;
75d262c2
VCG
3167}
3168
ca9142b8
JH
3169struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3170 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3171{
3172 struct smp_irk *irk;
3173
3174 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3175 if (!irk) {
3176 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3177 if (!irk)
ca9142b8 3178 return NULL;
970c4e46
JH
3179
3180 bacpy(&irk->bdaddr, bdaddr);
3181 irk->addr_type = addr_type;
3182
3183 list_add(&irk->list, &hdev->identity_resolving_keys);
3184 }
3185
3186 memcpy(irk->val, val, 16);
3187 bacpy(&irk->rpa, rpa);
3188
ca9142b8 3189 return irk;
970c4e46
JH
3190}
3191
55ed8ca1
JH
3192int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3193{
3194 struct link_key *key;
3195
3196 key = hci_find_link_key(hdev, bdaddr);
3197 if (!key)
3198 return -ENOENT;
3199
6ed93dc6 3200 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3201
3202 list_del(&key->list);
3203 kfree(key);
3204
3205 return 0;
3206}
3207
e0b2b27e 3208int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3209{
3210 struct smp_ltk *k, *tmp;
c51ffa0b 3211 int removed = 0;
b899efaf
VCG
3212
3213 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3214 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3215 continue;
3216
6ed93dc6 3217 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3218
3219 list_del(&k->list);
3220 kfree(k);
c51ffa0b 3221 removed++;
b899efaf
VCG
3222 }
3223
c51ffa0b 3224 return removed ? 0 : -ENOENT;
b899efaf
VCG
3225}
3226
a7ec7338
JH
3227void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3228{
3229 struct smp_irk *k, *tmp;
3230
668b7b19 3231 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3232 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3233 continue;
3234
3235 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3236
3237 list_del(&k->list);
3238 kfree(k);
3239 }
3240}
3241
6bd32326 3242/* HCI command timer function */
65cc2b49 3243static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3244{
65cc2b49
MH
3245 struct hci_dev *hdev = container_of(work, struct hci_dev,
3246 cmd_timer.work);
6bd32326 3247
bda4f23a
AE
3248 if (hdev->sent_cmd) {
3249 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3250 u16 opcode = __le16_to_cpu(sent->opcode);
3251
3252 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3253 } else {
3254 BT_ERR("%s command tx timeout", hdev->name);
3255 }
3256
6bd32326 3257 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3258 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3259}
3260
2763eda6 3261struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3262 bdaddr_t *bdaddr)
2763eda6
SJ
3263{
3264 struct oob_data *data;
3265
3266 list_for_each_entry(data, &hdev->remote_oob_data, list)
3267 if (bacmp(bdaddr, &data->bdaddr) == 0)
3268 return data;
3269
3270 return NULL;
3271}
3272
3273int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3274{
3275 struct oob_data *data;
3276
3277 data = hci_find_remote_oob_data(hdev, bdaddr);
3278 if (!data)
3279 return -ENOENT;
3280
6ed93dc6 3281 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3282
3283 list_del(&data->list);
3284 kfree(data);
3285
3286 return 0;
3287}
3288
35f7498a 3289void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3290{
3291 struct oob_data *data, *n;
3292
3293 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3294 list_del(&data->list);
3295 kfree(data);
3296 }
2763eda6
SJ
3297}
3298
0798872e
MH
3299int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3300 u8 *hash, u8 *randomizer)
2763eda6
SJ
3301{
3302 struct oob_data *data;
3303
3304 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3305 if (!data) {
0a14ab41 3306 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3307 if (!data)
3308 return -ENOMEM;
3309
3310 bacpy(&data->bdaddr, bdaddr);
3311 list_add(&data->list, &hdev->remote_oob_data);
3312 }
3313
519ca9d0
MH
3314 memcpy(data->hash192, hash, sizeof(data->hash192));
3315 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3316
0798872e
MH
3317 memset(data->hash256, 0, sizeof(data->hash256));
3318 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3319
3320 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3321
3322 return 0;
3323}
3324
3325int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3326 u8 *hash192, u8 *randomizer192,
3327 u8 *hash256, u8 *randomizer256)
3328{
3329 struct oob_data *data;
3330
3331 data = hci_find_remote_oob_data(hdev, bdaddr);
3332 if (!data) {
0a14ab41 3333 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3334 if (!data)
3335 return -ENOMEM;
3336
3337 bacpy(&data->bdaddr, bdaddr);
3338 list_add(&data->list, &hdev->remote_oob_data);
3339 }
3340
3341 memcpy(data->hash192, hash192, sizeof(data->hash192));
3342 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3343
3344 memcpy(data->hash256, hash256, sizeof(data->hash256));
3345 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3346
6ed93dc6 3347 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3348
3349 return 0;
3350}
3351
b9ee0a78
MH
3352struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3353 bdaddr_t *bdaddr, u8 type)
b2a66aad 3354{
8035ded4 3355 struct bdaddr_list *b;
b2a66aad 3356
b9ee0a78
MH
3357 list_for_each_entry(b, &hdev->blacklist, list) {
3358 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3359 return b;
b9ee0a78 3360 }
b2a66aad
AJ
3361
3362 return NULL;
3363}
3364
c9507490 3365static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3366{
3367 struct list_head *p, *n;
3368
3369 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3370 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3371
3372 list_del(p);
3373 kfree(b);
3374 }
b2a66aad
AJ
3375}
3376
88c1fe4b 3377int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3378{
3379 struct bdaddr_list *entry;
b2a66aad 3380
b9ee0a78 3381 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3382 return -EBADF;
3383
b9ee0a78 3384 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3385 return -EEXIST;
b2a66aad
AJ
3386
3387 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3388 if (!entry)
3389 return -ENOMEM;
b2a66aad
AJ
3390
3391 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3392 entry->bdaddr_type = type;
b2a66aad
AJ
3393
3394 list_add(&entry->list, &hdev->blacklist);
3395
2a8357f2 3396 return 0;
b2a66aad
AJ
3397}
3398
88c1fe4b 3399int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3400{
3401 struct bdaddr_list *entry;
b2a66aad 3402
35f7498a
JH
3403 if (!bacmp(bdaddr, BDADDR_ANY)) {
3404 hci_blacklist_clear(hdev);
3405 return 0;
3406 }
b2a66aad 3407
b9ee0a78 3408 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3409 if (!entry)
5e762444 3410 return -ENOENT;
b2a66aad
AJ
3411
3412 list_del(&entry->list);
3413 kfree(entry);
3414
2a8357f2 3415 return 0;
b2a66aad
AJ
3416}
3417
d2ab0ac1
MH
3418struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3419 bdaddr_t *bdaddr, u8 type)
3420{
3421 struct bdaddr_list *b;
3422
3423 list_for_each_entry(b, &hdev->le_white_list, list) {
3424 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3425 return b;
3426 }
3427
3428 return NULL;
3429}
3430
3431void hci_white_list_clear(struct hci_dev *hdev)
3432{
3433 struct list_head *p, *n;
3434
3435 list_for_each_safe(p, n, &hdev->le_white_list) {
3436 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3437
3438 list_del(p);
3439 kfree(b);
3440 }
3441}
3442
3443int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3444{
3445 struct bdaddr_list *entry;
3446
3447 if (!bacmp(bdaddr, BDADDR_ANY))
3448 return -EBADF;
3449
3450 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3451 if (!entry)
3452 return -ENOMEM;
3453
3454 bacpy(&entry->bdaddr, bdaddr);
3455 entry->bdaddr_type = type;
3456
3457 list_add(&entry->list, &hdev->le_white_list);
3458
3459 return 0;
3460}
3461
3462int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3463{
3464 struct bdaddr_list *entry;
3465
3466 if (!bacmp(bdaddr, BDADDR_ANY))
3467 return -EBADF;
3468
3469 entry = hci_white_list_lookup(hdev, bdaddr, type);
3470 if (!entry)
3471 return -ENOENT;
3472
3473 list_del(&entry->list);
3474 kfree(entry);
3475
3476 return 0;
3477}
3478
15819a70
AG
3479/* This function requires the caller holds hdev->lock */
3480struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3481 bdaddr_t *addr, u8 addr_type)
3482{
3483 struct hci_conn_params *params;
3484
738f6185
JH
3485 /* The conn params list only contains identity addresses */
3486 if (!hci_is_identity_address(addr, addr_type))
3487 return NULL;
3488
15819a70
AG
3489 list_for_each_entry(params, &hdev->le_conn_params, list) {
3490 if (bacmp(&params->addr, addr) == 0 &&
3491 params->addr_type == addr_type) {
3492 return params;
3493 }
3494 }
3495
3496 return NULL;
3497}
3498
cef952ce
AG
3499static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3500{
3501 struct hci_conn *conn;
3502
3503 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3504 if (!conn)
3505 return false;
3506
3507 if (conn->dst_type != type)
3508 return false;
3509
3510 if (conn->state != BT_CONNECTED)
3511 return false;
3512
3513 return true;
3514}
3515
4b10966f 3516/* This function requires the caller holds hdev->lock */
501f8827
JH
3517struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3518 bdaddr_t *addr, u8 addr_type)
4b10966f 3519{
912b42ef 3520 struct hci_conn_params *param;
4b10966f 3521
738f6185
JH
3522 /* The list only contains identity addresses */
3523 if (!hci_is_identity_address(addr, addr_type))
3524 return NULL;
3525
501f8827 3526 list_for_each_entry(param, list, action) {
912b42ef
JH
3527 if (bacmp(&param->addr, addr) == 0 &&
3528 param->addr_type == addr_type)
3529 return param;
4b10966f
MH
3530 }
3531
3532 return NULL;
3533}
3534
3535/* This function requires the caller holds hdev->lock */
51d167c0
MH
3536struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3537 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3538{
3539 struct hci_conn_params *params;
3540
c46245b3 3541 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3542 return NULL;
bf5b3c8b
MH
3543
3544 params = hci_conn_params_lookup(hdev, addr, addr_type);
3545 if (params)
51d167c0 3546 return params;
bf5b3c8b
MH
3547
3548 params = kzalloc(sizeof(*params), GFP_KERNEL);
3549 if (!params) {
3550 BT_ERR("Out of memory");
51d167c0 3551 return NULL;
bf5b3c8b
MH
3552 }
3553
3554 bacpy(&params->addr, addr);
3555 params->addr_type = addr_type;
3556
3557 list_add(&params->list, &hdev->le_conn_params);
93450c75 3558 INIT_LIST_HEAD(&params->action);
bf5b3c8b
MH
3559
3560 params->conn_min_interval = hdev->le_conn_min_interval;
3561 params->conn_max_interval = hdev->le_conn_max_interval;
3562 params->conn_latency = hdev->le_conn_latency;
3563 params->supervision_timeout = hdev->le_supv_timeout;
3564 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3565
3566 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3567
51d167c0 3568 return params;
bf5b3c8b
MH
3569}
3570
3571/* This function requires the caller holds hdev->lock */
3572int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3573 u8 auto_connect)
15819a70
AG
3574{
3575 struct hci_conn_params *params;
3576
8c87aae1
MH
3577 params = hci_conn_params_add(hdev, addr, addr_type);
3578 if (!params)
3579 return -EIO;
cef952ce 3580
42ce26de
JH
3581 if (params->auto_connect == auto_connect)
3582 return 0;
3583
95305baa 3584 list_del_init(&params->action);
15819a70 3585
cef952ce
AG
3586 switch (auto_connect) {
3587 case HCI_AUTO_CONN_DISABLED:
3588 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3589 hci_update_background_scan(hdev);
cef952ce 3590 break;
851efca8 3591 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3592 list_add(&params->action, &hdev->pend_le_reports);
3593 hci_update_background_scan(hdev);
851efca8 3594 break;
cef952ce 3595 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3596 if (!is_connected(hdev, addr, addr_type)) {
3597 list_add(&params->action, &hdev->pend_le_conns);
3598 hci_update_background_scan(hdev);
3599 }
cef952ce
AG
3600 break;
3601 }
15819a70 3602
851efca8
JH
3603 params->auto_connect = auto_connect;
3604
d06b50ce
MH
3605 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3606 auto_connect);
a9b0a04c
AG
3607
3608 return 0;
15819a70
AG
3609}
3610
3611/* This function requires the caller holds hdev->lock */
3612void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3613{
3614 struct hci_conn_params *params;
3615
3616 params = hci_conn_params_lookup(hdev, addr, addr_type);
3617 if (!params)
3618 return;
3619
95305baa 3620 list_del(&params->action);
15819a70
AG
3621 list_del(&params->list);
3622 kfree(params);
3623
95305baa
JH
3624 hci_update_background_scan(hdev);
3625
15819a70
AG
3626 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3627}
3628
55af49a8
JH
3629/* This function requires the caller holds hdev->lock */
3630void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3631{
3632 struct hci_conn_params *params, *tmp;
3633
3634 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3635 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3636 continue;
3637 list_del(&params->list);
3638 kfree(params);
3639 }
3640
3641 BT_DBG("All LE disabled connection parameters were removed");
3642}
3643
15819a70 3644/* This function requires the caller holds hdev->lock */
373110c5 3645void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3646{
3647 struct hci_conn_params *params, *tmp;
3648
3649 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3650 list_del(&params->action);
15819a70
AG
3651 list_del(&params->list);
3652 kfree(params);
3653 }
3654
a2f41a8f 3655 hci_update_background_scan(hdev);
1089b67d 3656
15819a70
AG
3657 BT_DBG("All LE connection parameters were removed");
3658}
3659
4c87eaab 3660static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3661{
4c87eaab
AG
3662 if (status) {
3663 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3664
4c87eaab
AG
3665 hci_dev_lock(hdev);
3666 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3667 hci_dev_unlock(hdev);
3668 return;
3669 }
7ba8b4be
AG
3670}
3671
4c87eaab 3672static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3673{
4c87eaab
AG
3674 /* General inquiry access code (GIAC) */
3675 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3676 struct hci_request req;
3677 struct hci_cp_inquiry cp;
7ba8b4be
AG
3678 int err;
3679
4c87eaab
AG
3680 if (status) {
3681 BT_ERR("Failed to disable LE scanning: status %d", status);
3682 return;
3683 }
7ba8b4be 3684
4c87eaab
AG
3685 switch (hdev->discovery.type) {
3686 case DISCOV_TYPE_LE:
3687 hci_dev_lock(hdev);
3688 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3689 hci_dev_unlock(hdev);
3690 break;
7ba8b4be 3691
4c87eaab
AG
3692 case DISCOV_TYPE_INTERLEAVED:
3693 hci_req_init(&req, hdev);
7ba8b4be 3694
4c87eaab
AG
3695 memset(&cp, 0, sizeof(cp));
3696 memcpy(&cp.lap, lap, sizeof(cp.lap));
3697 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3698 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3699
4c87eaab 3700 hci_dev_lock(hdev);
7dbfac1d 3701
4c87eaab 3702 hci_inquiry_cache_flush(hdev);
7dbfac1d 3703
4c87eaab
AG
3704 err = hci_req_run(&req, inquiry_complete);
3705 if (err) {
3706 BT_ERR("Inquiry request failed: err %d", err);
3707 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3708 }
7dbfac1d 3709
4c87eaab
AG
3710 hci_dev_unlock(hdev);
3711 break;
7dbfac1d 3712 }
7dbfac1d
AG
3713}
3714
7ba8b4be
AG
3715static void le_scan_disable_work(struct work_struct *work)
3716{
3717 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3718 le_scan_disable.work);
4c87eaab
AG
3719 struct hci_request req;
3720 int err;
7ba8b4be
AG
3721
3722 BT_DBG("%s", hdev->name);
3723
4c87eaab 3724 hci_req_init(&req, hdev);
28b75a89 3725
b1efcc28 3726 hci_req_add_le_scan_disable(&req);
28b75a89 3727
4c87eaab
AG
3728 err = hci_req_run(&req, le_scan_disable_work_complete);
3729 if (err)
3730 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3731}
3732
8d97250e
JH
3733static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3734{
3735 struct hci_dev *hdev = req->hdev;
3736
3737 /* If we're advertising or initiating an LE connection we can't
3738 * go ahead and change the random address at this time. This is
3739 * because the eventual initiator address used for the
3740 * subsequently created connection will be undefined (some
3741 * controllers use the new address and others the one we had
3742 * when the operation started).
3743 *
3744 * In this kind of scenario skip the update and let the random
3745 * address be updated at the next cycle.
3746 */
3747 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3748 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3749 BT_DBG("Deferring random address update");
3750 return;
3751 }
3752
3753 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3754}
3755
94b1fc92
MH
3756int hci_update_random_address(struct hci_request *req, bool require_privacy,
3757 u8 *own_addr_type)
ebd3a747
JH
3758{
3759 struct hci_dev *hdev = req->hdev;
3760 int err;
3761
3762 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3763 * current RPA has expired or there is something else than
3764 * the current RPA in use, then generate a new one.
ebd3a747
JH
3765 */
3766 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3767 int to;
3768
3769 *own_addr_type = ADDR_LE_DEV_RANDOM;
3770
3771 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3772 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3773 return 0;
3774
2b5224dc 3775 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3776 if (err < 0) {
3777 BT_ERR("%s failed to generate new RPA", hdev->name);
3778 return err;
3779 }
3780
8d97250e 3781 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3782
3783 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3784 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3785
3786 return 0;
94b1fc92
MH
3787 }
3788
3789 /* In case of required privacy without resolvable private address,
3790 * use an unresolvable private address. This is useful for active
3791 * scanning and non-connectable advertising.
3792 */
3793 if (require_privacy) {
3794 bdaddr_t urpa;
3795
3796 get_random_bytes(&urpa, 6);
3797 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3798
3799 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3800 set_random_addr(req, &urpa);
94b1fc92 3801 return 0;
ebd3a747
JH
3802 }
3803
3804 /* If forcing static address is in use or there is no public
3805 * address use the static address as random address (but skip
3806 * the HCI command if the current random address is already the
3807 * static one.
3808 */
111902f7 3809 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3810 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3811 *own_addr_type = ADDR_LE_DEV_RANDOM;
3812 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3813 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3814 &hdev->static_addr);
3815 return 0;
3816 }
3817
3818 /* Neither privacy nor static address is being used so use a
3819 * public address.
3820 */
3821 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3822
3823 return 0;
3824}
3825
a1f4c318
JH
3826/* Copy the Identity Address of the controller.
3827 *
3828 * If the controller has a public BD_ADDR, then by default use that one.
3829 * If this is a LE only controller without a public address, default to
3830 * the static random address.
3831 *
3832 * For debugging purposes it is possible to force controllers with a
3833 * public address to use the static random address instead.
3834 */
3835void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3836 u8 *bdaddr_type)
3837{
111902f7 3838 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3839 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3840 bacpy(bdaddr, &hdev->static_addr);
3841 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3842 } else {
3843 bacpy(bdaddr, &hdev->bdaddr);
3844 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3845 }
3846}
3847
9be0dab7
DH
3848/* Alloc HCI device */
3849struct hci_dev *hci_alloc_dev(void)
3850{
3851 struct hci_dev *hdev;
3852
3853 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3854 if (!hdev)
3855 return NULL;
3856
b1b813d4
DH
3857 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3858 hdev->esco_type = (ESCO_HV1);
3859 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3860 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3861 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3862 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3863 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3864 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3865
b1b813d4
DH
3866 hdev->sniff_max_interval = 800;
3867 hdev->sniff_min_interval = 80;
3868
3f959d46 3869 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3870 hdev->le_scan_interval = 0x0060;
3871 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3872 hdev->le_conn_min_interval = 0x0028;
3873 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3874 hdev->le_conn_latency = 0x0000;
3875 hdev->le_supv_timeout = 0x002a;
bef64738 3876
d6bfd59c 3877 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3878 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3879 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3880 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3881
b1b813d4
DH
3882 mutex_init(&hdev->lock);
3883 mutex_init(&hdev->req_lock);
3884
3885 INIT_LIST_HEAD(&hdev->mgmt_pending);
3886 INIT_LIST_HEAD(&hdev->blacklist);
3887 INIT_LIST_HEAD(&hdev->uuids);
3888 INIT_LIST_HEAD(&hdev->link_keys);
3889 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3890 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3891 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3892 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3893 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3894 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3895 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3896 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3897
3898 INIT_WORK(&hdev->rx_work, hci_rx_work);
3899 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3900 INIT_WORK(&hdev->tx_work, hci_tx_work);
3901 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3902
b1b813d4
DH
3903 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3904 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3905 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3906
b1b813d4
DH
3907 skb_queue_head_init(&hdev->rx_q);
3908 skb_queue_head_init(&hdev->cmd_q);
3909 skb_queue_head_init(&hdev->raw_q);
3910
3911 init_waitqueue_head(&hdev->req_wait_q);
3912
65cc2b49 3913 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3914
b1b813d4
DH
3915 hci_init_sysfs(hdev);
3916 discovery_init(hdev);
9be0dab7
DH
3917
3918 return hdev;
3919}
3920EXPORT_SYMBOL(hci_alloc_dev);
3921
3922/* Free HCI device */
3923void hci_free_dev(struct hci_dev *hdev)
3924{
9be0dab7
DH
3925 /* will free via device release */
3926 put_device(&hdev->dev);
3927}
3928EXPORT_SYMBOL(hci_free_dev);
3929
1da177e4
LT
3930/* Register HCI device */
3931int hci_register_dev(struct hci_dev *hdev)
3932{
b1b813d4 3933 int id, error;
1da177e4 3934
010666a1 3935 if (!hdev->open || !hdev->close)
1da177e4
LT
3936 return -EINVAL;
3937
08add513
MM
3938 /* Do not allow HCI_AMP devices to register at index 0,
3939 * so the index can be used as the AMP controller ID.
3940 */
3df92b31
SL
3941 switch (hdev->dev_type) {
3942 case HCI_BREDR:
3943 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3944 break;
3945 case HCI_AMP:
3946 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3947 break;
3948 default:
3949 return -EINVAL;
1da177e4 3950 }
8e87d142 3951
3df92b31
SL
3952 if (id < 0)
3953 return id;
3954
1da177e4
LT
3955 sprintf(hdev->name, "hci%d", id);
3956 hdev->id = id;
2d8b3a11
AE
3957
3958 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3959
d8537548
KC
3960 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3961 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3962 if (!hdev->workqueue) {
3963 error = -ENOMEM;
3964 goto err;
3965 }
f48fd9c8 3966
d8537548
KC
3967 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3968 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3969 if (!hdev->req_workqueue) {
3970 destroy_workqueue(hdev->workqueue);
3971 error = -ENOMEM;
3972 goto err;
3973 }
3974
0153e2ec
MH
3975 if (!IS_ERR_OR_NULL(bt_debugfs))
3976 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3977
bdc3e0f1
MH
3978 dev_set_name(&hdev->dev, "%s", hdev->name);
3979
99780a7b
JH
3980 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3981 CRYPTO_ALG_ASYNC);
3982 if (IS_ERR(hdev->tfm_aes)) {
3983 BT_ERR("Unable to create crypto context");
3984 error = PTR_ERR(hdev->tfm_aes);
3985 hdev->tfm_aes = NULL;
3986 goto err_wqueue;
3987 }
3988
bdc3e0f1 3989 error = device_add(&hdev->dev);
33ca954d 3990 if (error < 0)
99780a7b 3991 goto err_tfm;
1da177e4 3992
611b30f7 3993 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3994 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3995 hdev);
611b30f7
MH
3996 if (hdev->rfkill) {
3997 if (rfkill_register(hdev->rfkill) < 0) {
3998 rfkill_destroy(hdev->rfkill);
3999 hdev->rfkill = NULL;
4000 }
4001 }
4002
5e130367
JH
4003 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4004 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4005
a8b2d5c2 4006 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4007 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4008
01cd3404 4009 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4010 /* Assume BR/EDR support until proven otherwise (such as
4011 * through reading supported features during init.
4012 */
4013 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4014 }
ce2be9ac 4015
fcee3377
GP
4016 write_lock(&hci_dev_list_lock);
4017 list_add(&hdev->list, &hci_dev_list);
4018 write_unlock(&hci_dev_list_lock);
4019
4a964404
MH
4020 /* Devices that are marked for raw-only usage are unconfigured
4021 * and should not be included in normal operation.
fee746b0
MH
4022 */
4023 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4024 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4025
1da177e4 4026 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4027 hci_dev_hold(hdev);
1da177e4 4028
19202573 4029 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4030
1da177e4 4031 return id;
f48fd9c8 4032
99780a7b
JH
4033err_tfm:
4034 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4035err_wqueue:
4036 destroy_workqueue(hdev->workqueue);
6ead1bbc 4037 destroy_workqueue(hdev->req_workqueue);
33ca954d 4038err:
3df92b31 4039 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4040
33ca954d 4041 return error;
1da177e4
LT
4042}
4043EXPORT_SYMBOL(hci_register_dev);
4044
4045/* Unregister HCI device */
59735631 4046void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4047{
3df92b31 4048 int i, id;
ef222013 4049
c13854ce 4050 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4051
94324962
JH
4052 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4053
3df92b31
SL
4054 id = hdev->id;
4055
f20d09d5 4056 write_lock(&hci_dev_list_lock);
1da177e4 4057 list_del(&hdev->list);
f20d09d5 4058 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4059
4060 hci_dev_do_close(hdev);
4061
cd4c5391 4062 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4063 kfree_skb(hdev->reassembly[i]);
4064
b9b5ef18
GP
4065 cancel_work_sync(&hdev->power_on);
4066
ab81cbf9 4067 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4068 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4069 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4070 hci_dev_lock(hdev);
744cf19e 4071 mgmt_index_removed(hdev);
09fd0de5 4072 hci_dev_unlock(hdev);
56e5cb86 4073 }
ab81cbf9 4074
2e58ef3e
JH
4075 /* mgmt_index_removed should take care of emptying the
4076 * pending list */
4077 BUG_ON(!list_empty(&hdev->mgmt_pending));
4078
1da177e4
LT
4079 hci_notify(hdev, HCI_DEV_UNREG);
4080
611b30f7
MH
4081 if (hdev->rfkill) {
4082 rfkill_unregister(hdev->rfkill);
4083 rfkill_destroy(hdev->rfkill);
4084 }
4085
99780a7b
JH
4086 if (hdev->tfm_aes)
4087 crypto_free_blkcipher(hdev->tfm_aes);
4088
bdc3e0f1 4089 device_del(&hdev->dev);
147e2d59 4090
0153e2ec
MH
4091 debugfs_remove_recursive(hdev->debugfs);
4092
f48fd9c8 4093 destroy_workqueue(hdev->workqueue);
6ead1bbc 4094 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4095
09fd0de5 4096 hci_dev_lock(hdev);
e2e0cacb 4097 hci_blacklist_clear(hdev);
2aeb9a1a 4098 hci_uuids_clear(hdev);
55ed8ca1 4099 hci_link_keys_clear(hdev);
b899efaf 4100 hci_smp_ltks_clear(hdev);
970c4e46 4101 hci_smp_irks_clear(hdev);
2763eda6 4102 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4103 hci_white_list_clear(hdev);
373110c5 4104 hci_conn_params_clear_all(hdev);
09fd0de5 4105 hci_dev_unlock(hdev);
e2e0cacb 4106
dc946bd8 4107 hci_dev_put(hdev);
3df92b31
SL
4108
4109 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4110}
4111EXPORT_SYMBOL(hci_unregister_dev);
4112
4113/* Suspend HCI device */
4114int hci_suspend_dev(struct hci_dev *hdev)
4115{
4116 hci_notify(hdev, HCI_DEV_SUSPEND);
4117 return 0;
4118}
4119EXPORT_SYMBOL(hci_suspend_dev);
4120
4121/* Resume HCI device */
4122int hci_resume_dev(struct hci_dev *hdev)
4123{
4124 hci_notify(hdev, HCI_DEV_RESUME);
4125 return 0;
4126}
4127EXPORT_SYMBOL(hci_resume_dev);
4128
76bca880 4129/* Receive frame from HCI drivers */
e1a26170 4130int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4131{
76bca880 4132 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4133 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4134 kfree_skb(skb);
4135 return -ENXIO;
4136 }
4137
d82603c6 4138 /* Incoming skb */
76bca880
MH
4139 bt_cb(skb)->incoming = 1;
4140
4141 /* Time stamp */
4142 __net_timestamp(skb);
4143
76bca880 4144 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4145 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4146
76bca880
MH
4147 return 0;
4148}
4149EXPORT_SYMBOL(hci_recv_frame);
4150
33e882a5 4151static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4152 int count, __u8 index)
33e882a5
SS
4153{
4154 int len = 0;
4155 int hlen = 0;
4156 int remain = count;
4157 struct sk_buff *skb;
4158 struct bt_skb_cb *scb;
4159
4160 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4161 index >= NUM_REASSEMBLY)
33e882a5
SS
4162 return -EILSEQ;
4163
4164 skb = hdev->reassembly[index];
4165
4166 if (!skb) {
4167 switch (type) {
4168 case HCI_ACLDATA_PKT:
4169 len = HCI_MAX_FRAME_SIZE;
4170 hlen = HCI_ACL_HDR_SIZE;
4171 break;
4172 case HCI_EVENT_PKT:
4173 len = HCI_MAX_EVENT_SIZE;
4174 hlen = HCI_EVENT_HDR_SIZE;
4175 break;
4176 case HCI_SCODATA_PKT:
4177 len = HCI_MAX_SCO_SIZE;
4178 hlen = HCI_SCO_HDR_SIZE;
4179 break;
4180 }
4181
1e429f38 4182 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4183 if (!skb)
4184 return -ENOMEM;
4185
4186 scb = (void *) skb->cb;
4187 scb->expect = hlen;
4188 scb->pkt_type = type;
4189
33e882a5
SS
4190 hdev->reassembly[index] = skb;
4191 }
4192
4193 while (count) {
4194 scb = (void *) skb->cb;
89bb46d0 4195 len = min_t(uint, scb->expect, count);
33e882a5
SS
4196
4197 memcpy(skb_put(skb, len), data, len);
4198
4199 count -= len;
4200 data += len;
4201 scb->expect -= len;
4202 remain = count;
4203
4204 switch (type) {
4205 case HCI_EVENT_PKT:
4206 if (skb->len == HCI_EVENT_HDR_SIZE) {
4207 struct hci_event_hdr *h = hci_event_hdr(skb);
4208 scb->expect = h->plen;
4209
4210 if (skb_tailroom(skb) < scb->expect) {
4211 kfree_skb(skb);
4212 hdev->reassembly[index] = NULL;
4213 return -ENOMEM;
4214 }
4215 }
4216 break;
4217
4218 case HCI_ACLDATA_PKT:
4219 if (skb->len == HCI_ACL_HDR_SIZE) {
4220 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4221 scb->expect = __le16_to_cpu(h->dlen);
4222
4223 if (skb_tailroom(skb) < scb->expect) {
4224 kfree_skb(skb);
4225 hdev->reassembly[index] = NULL;
4226 return -ENOMEM;
4227 }
4228 }
4229 break;
4230
4231 case HCI_SCODATA_PKT:
4232 if (skb->len == HCI_SCO_HDR_SIZE) {
4233 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4234 scb->expect = h->dlen;
4235
4236 if (skb_tailroom(skb) < scb->expect) {
4237 kfree_skb(skb);
4238 hdev->reassembly[index] = NULL;
4239 return -ENOMEM;
4240 }
4241 }
4242 break;
4243 }
4244
4245 if (scb->expect == 0) {
4246 /* Complete frame */
4247
4248 bt_cb(skb)->pkt_type = type;
e1a26170 4249 hci_recv_frame(hdev, skb);
33e882a5
SS
4250
4251 hdev->reassembly[index] = NULL;
4252 return remain;
4253 }
4254 }
4255
4256 return remain;
4257}
4258
ef222013
MH
4259int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4260{
f39a3c06
SS
4261 int rem = 0;
4262
ef222013
MH
4263 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4264 return -EILSEQ;
4265
da5f6c37 4266 while (count) {
1e429f38 4267 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4268 if (rem < 0)
4269 return rem;
ef222013 4270
f39a3c06
SS
4271 data += (count - rem);
4272 count = rem;
f81c6224 4273 }
ef222013 4274
f39a3c06 4275 return rem;
ef222013
MH
4276}
4277EXPORT_SYMBOL(hci_recv_fragment);
4278
99811510
SS
4279#define STREAM_REASSEMBLY 0
4280
4281int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4282{
4283 int type;
4284 int rem = 0;
4285
da5f6c37 4286 while (count) {
99811510
SS
4287 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4288
4289 if (!skb) {
4290 struct { char type; } *pkt;
4291
4292 /* Start of the frame */
4293 pkt = data;
4294 type = pkt->type;
4295
4296 data++;
4297 count--;
4298 } else
4299 type = bt_cb(skb)->pkt_type;
4300
1e429f38 4301 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4302 STREAM_REASSEMBLY);
99811510
SS
4303 if (rem < 0)
4304 return rem;
4305
4306 data += (count - rem);
4307 count = rem;
f81c6224 4308 }
99811510
SS
4309
4310 return rem;
4311}
4312EXPORT_SYMBOL(hci_recv_stream_fragment);
4313
1da177e4
LT
4314/* ---- Interface to upper protocols ---- */
4315
1da177e4
LT
4316int hci_register_cb(struct hci_cb *cb)
4317{
4318 BT_DBG("%p name %s", cb, cb->name);
4319
f20d09d5 4320 write_lock(&hci_cb_list_lock);
1da177e4 4321 list_add(&cb->list, &hci_cb_list);
f20d09d5 4322 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4323
4324 return 0;
4325}
4326EXPORT_SYMBOL(hci_register_cb);
4327
4328int hci_unregister_cb(struct hci_cb *cb)
4329{
4330 BT_DBG("%p name %s", cb, cb->name);
4331
f20d09d5 4332 write_lock(&hci_cb_list_lock);
1da177e4 4333 list_del(&cb->list);
f20d09d5 4334 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4335
4336 return 0;
4337}
4338EXPORT_SYMBOL(hci_unregister_cb);
4339
51086991 4340static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4341{
cdc52faa
MH
4342 int err;
4343
0d48d939 4344 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4345
cd82e61c
MH
4346 /* Time stamp */
4347 __net_timestamp(skb);
1da177e4 4348
cd82e61c
MH
4349 /* Send copy to monitor */
4350 hci_send_to_monitor(hdev, skb);
4351
4352 if (atomic_read(&hdev->promisc)) {
4353 /* Send copy to the sockets */
470fe1b5 4354 hci_send_to_sock(hdev, skb);
1da177e4
LT
4355 }
4356
4357 /* Get rid of skb owner, prior to sending to the driver. */
4358 skb_orphan(skb);
4359
cdc52faa
MH
4360 err = hdev->send(hdev, skb);
4361 if (err < 0) {
4362 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4363 kfree_skb(skb);
4364 }
1da177e4
LT
4365}
4366
3119ae95
JH
4367void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4368{
4369 skb_queue_head_init(&req->cmd_q);
4370 req->hdev = hdev;
5d73e034 4371 req->err = 0;
3119ae95
JH
4372}
4373
4374int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4375{
4376 struct hci_dev *hdev = req->hdev;
4377 struct sk_buff *skb;
4378 unsigned long flags;
4379
4380 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4381
5d73e034
AG
4382 /* If an error occured during request building, remove all HCI
4383 * commands queued on the HCI request queue.
4384 */
4385 if (req->err) {
4386 skb_queue_purge(&req->cmd_q);
4387 return req->err;
4388 }
4389
3119ae95
JH
4390 /* Do not allow empty requests */
4391 if (skb_queue_empty(&req->cmd_q))
382b0c39 4392 return -ENODATA;
3119ae95
JH
4393
4394 skb = skb_peek_tail(&req->cmd_q);
4395 bt_cb(skb)->req.complete = complete;
4396
4397 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4398 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4399 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4400
4401 queue_work(hdev->workqueue, &hdev->cmd_work);
4402
4403 return 0;
4404}
4405
1ca3a9d0 4406static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4407 u32 plen, const void *param)
1da177e4
LT
4408{
4409 int len = HCI_COMMAND_HDR_SIZE + plen;
4410 struct hci_command_hdr *hdr;
4411 struct sk_buff *skb;
4412
1da177e4 4413 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4414 if (!skb)
4415 return NULL;
1da177e4
LT
4416
4417 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4418 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4419 hdr->plen = plen;
4420
4421 if (plen)
4422 memcpy(skb_put(skb, plen), param, plen);
4423
4424 BT_DBG("skb len %d", skb->len);
4425
0d48d939 4426 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4427
1ca3a9d0
JH
4428 return skb;
4429}
4430
4431/* Send HCI command */
07dc93dd
JH
4432int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4433 const void *param)
1ca3a9d0
JH
4434{
4435 struct sk_buff *skb;
4436
4437 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4438
4439 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4440 if (!skb) {
4441 BT_ERR("%s no memory for command", hdev->name);
4442 return -ENOMEM;
4443 }
4444
11714b3d
JH
4445 /* Stand-alone HCI commands must be flaged as
4446 * single-command requests.
4447 */
4448 bt_cb(skb)->req.start = true;
4449
1da177e4 4450 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4451 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4452
4453 return 0;
4454}
1da177e4 4455
71c76a17 4456/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4457void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4458 const void *param, u8 event)
71c76a17
JH
4459{
4460 struct hci_dev *hdev = req->hdev;
4461 struct sk_buff *skb;
4462
4463 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4464
34739c1e
AG
4465 /* If an error occured during request building, there is no point in
4466 * queueing the HCI command. We can simply return.
4467 */
4468 if (req->err)
4469 return;
4470
71c76a17
JH
4471 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4472 if (!skb) {
5d73e034
AG
4473 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4474 hdev->name, opcode);
4475 req->err = -ENOMEM;
e348fe6b 4476 return;
71c76a17
JH
4477 }
4478
4479 if (skb_queue_empty(&req->cmd_q))
4480 bt_cb(skb)->req.start = true;
4481
02350a72
JH
4482 bt_cb(skb)->req.event = event;
4483
71c76a17 4484 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4485}
4486
07dc93dd
JH
4487void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4488 const void *param)
02350a72
JH
4489{
4490 hci_req_add_ev(req, opcode, plen, param, 0);
4491}
4492
1da177e4 4493/* Get data from the previously sent command */
a9de9248 4494void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4495{
4496 struct hci_command_hdr *hdr;
4497
4498 if (!hdev->sent_cmd)
4499 return NULL;
4500
4501 hdr = (void *) hdev->sent_cmd->data;
4502
a9de9248 4503 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4504 return NULL;
4505
f0e09510 4506 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4507
4508 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4509}
4510
4511/* Send ACL data */
4512static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4513{
4514 struct hci_acl_hdr *hdr;
4515 int len = skb->len;
4516
badff6d0
ACM
4517 skb_push(skb, HCI_ACL_HDR_SIZE);
4518 skb_reset_transport_header(skb);
9c70220b 4519 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4520 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4521 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4522}
4523
ee22be7e 4524static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4525 struct sk_buff *skb, __u16 flags)
1da177e4 4526{
ee22be7e 4527 struct hci_conn *conn = chan->conn;
1da177e4
LT
4528 struct hci_dev *hdev = conn->hdev;
4529 struct sk_buff *list;
4530
087bfd99
GP
4531 skb->len = skb_headlen(skb);
4532 skb->data_len = 0;
4533
4534 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4535
4536 switch (hdev->dev_type) {
4537 case HCI_BREDR:
4538 hci_add_acl_hdr(skb, conn->handle, flags);
4539 break;
4540 case HCI_AMP:
4541 hci_add_acl_hdr(skb, chan->handle, flags);
4542 break;
4543 default:
4544 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4545 return;
4546 }
087bfd99 4547
70f23020
AE
4548 list = skb_shinfo(skb)->frag_list;
4549 if (!list) {
1da177e4
LT
4550 /* Non fragmented */
4551 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4552
73d80deb 4553 skb_queue_tail(queue, skb);
1da177e4
LT
4554 } else {
4555 /* Fragmented */
4556 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4557
4558 skb_shinfo(skb)->frag_list = NULL;
4559
4560 /* Queue all fragments atomically */
af3e6359 4561 spin_lock(&queue->lock);
1da177e4 4562
73d80deb 4563 __skb_queue_tail(queue, skb);
e702112f
AE
4564
4565 flags &= ~ACL_START;
4566 flags |= ACL_CONT;
1da177e4
LT
4567 do {
4568 skb = list; list = list->next;
8e87d142 4569
0d48d939 4570 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4571 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4572
4573 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4574
73d80deb 4575 __skb_queue_tail(queue, skb);
1da177e4
LT
4576 } while (list);
4577
af3e6359 4578 spin_unlock(&queue->lock);
1da177e4 4579 }
73d80deb
LAD
4580}
4581
4582void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4583{
ee22be7e 4584 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4585
f0e09510 4586 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4587
ee22be7e 4588 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4589
3eff45ea 4590 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4591}
1da177e4
LT
4592
4593/* Send SCO data */
0d861d8b 4594void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4595{
4596 struct hci_dev *hdev = conn->hdev;
4597 struct hci_sco_hdr hdr;
4598
4599 BT_DBG("%s len %d", hdev->name, skb->len);
4600
aca3192c 4601 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4602 hdr.dlen = skb->len;
4603
badff6d0
ACM
4604 skb_push(skb, HCI_SCO_HDR_SIZE);
4605 skb_reset_transport_header(skb);
9c70220b 4606 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4607
0d48d939 4608 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4609
1da177e4 4610 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4611 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4612}
1da177e4
LT
4613
4614/* ---- HCI TX task (outgoing data) ---- */
4615
4616/* HCI Connection scheduler */
6039aa73
GP
4617static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4618 int *quote)
1da177e4
LT
4619{
4620 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4621 struct hci_conn *conn = NULL, *c;
abc5de8f 4622 unsigned int num = 0, min = ~0;
1da177e4 4623
8e87d142 4624 /* We don't have to lock device here. Connections are always
1da177e4 4625 * added and removed with TX task disabled. */
bf4c6325
GP
4626
4627 rcu_read_lock();
4628
4629 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4630 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4631 continue;
769be974
MH
4632
4633 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4634 continue;
4635
1da177e4
LT
4636 num++;
4637
4638 if (c->sent < min) {
4639 min = c->sent;
4640 conn = c;
4641 }
52087a79
LAD
4642
4643 if (hci_conn_num(hdev, type) == num)
4644 break;
1da177e4
LT
4645 }
4646
bf4c6325
GP
4647 rcu_read_unlock();
4648
1da177e4 4649 if (conn) {
6ed58ec5
VT
4650 int cnt, q;
4651
4652 switch (conn->type) {
4653 case ACL_LINK:
4654 cnt = hdev->acl_cnt;
4655 break;
4656 case SCO_LINK:
4657 case ESCO_LINK:
4658 cnt = hdev->sco_cnt;
4659 break;
4660 case LE_LINK:
4661 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4662 break;
4663 default:
4664 cnt = 0;
4665 BT_ERR("Unknown link type");
4666 }
4667
4668 q = cnt / num;
1da177e4
LT
4669 *quote = q ? q : 1;
4670 } else
4671 *quote = 0;
4672
4673 BT_DBG("conn %p quote %d", conn, *quote);
4674 return conn;
4675}
4676
6039aa73 4677static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4678{
4679 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4680 struct hci_conn *c;
1da177e4 4681
bae1f5d9 4682 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4683
bf4c6325
GP
4684 rcu_read_lock();
4685
1da177e4 4686 /* Kill stalled connections */
bf4c6325 4687 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4688 if (c->type == type && c->sent) {
6ed93dc6
AE
4689 BT_ERR("%s killing stalled connection %pMR",
4690 hdev->name, &c->dst);
bed71748 4691 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4692 }
4693 }
bf4c6325
GP
4694
4695 rcu_read_unlock();
1da177e4
LT
4696}
4697
6039aa73
GP
4698static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4699 int *quote)
1da177e4 4700{
73d80deb
LAD
4701 struct hci_conn_hash *h = &hdev->conn_hash;
4702 struct hci_chan *chan = NULL;
abc5de8f 4703 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4704 struct hci_conn *conn;
73d80deb
LAD
4705 int cnt, q, conn_num = 0;
4706
4707 BT_DBG("%s", hdev->name);
4708
bf4c6325
GP
4709 rcu_read_lock();
4710
4711 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4712 struct hci_chan *tmp;
4713
4714 if (conn->type != type)
4715 continue;
4716
4717 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4718 continue;
4719
4720 conn_num++;
4721
8192edef 4722 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4723 struct sk_buff *skb;
4724
4725 if (skb_queue_empty(&tmp->data_q))
4726 continue;
4727
4728 skb = skb_peek(&tmp->data_q);
4729 if (skb->priority < cur_prio)
4730 continue;
4731
4732 if (skb->priority > cur_prio) {
4733 num = 0;
4734 min = ~0;
4735 cur_prio = skb->priority;
4736 }
4737
4738 num++;
4739
4740 if (conn->sent < min) {
4741 min = conn->sent;
4742 chan = tmp;
4743 }
4744 }
4745
4746 if (hci_conn_num(hdev, type) == conn_num)
4747 break;
4748 }
4749
bf4c6325
GP
4750 rcu_read_unlock();
4751
73d80deb
LAD
4752 if (!chan)
4753 return NULL;
4754
4755 switch (chan->conn->type) {
4756 case ACL_LINK:
4757 cnt = hdev->acl_cnt;
4758 break;
bd1eb66b
AE
4759 case AMP_LINK:
4760 cnt = hdev->block_cnt;
4761 break;
73d80deb
LAD
4762 case SCO_LINK:
4763 case ESCO_LINK:
4764 cnt = hdev->sco_cnt;
4765 break;
4766 case LE_LINK:
4767 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4768 break;
4769 default:
4770 cnt = 0;
4771 BT_ERR("Unknown link type");
4772 }
4773
4774 q = cnt / num;
4775 *quote = q ? q : 1;
4776 BT_DBG("chan %p quote %d", chan, *quote);
4777 return chan;
4778}
4779
02b20f0b
LAD
4780static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4781{
4782 struct hci_conn_hash *h = &hdev->conn_hash;
4783 struct hci_conn *conn;
4784 int num = 0;
4785
4786 BT_DBG("%s", hdev->name);
4787
bf4c6325
GP
4788 rcu_read_lock();
4789
4790 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4791 struct hci_chan *chan;
4792
4793 if (conn->type != type)
4794 continue;
4795
4796 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4797 continue;
4798
4799 num++;
4800
8192edef 4801 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4802 struct sk_buff *skb;
4803
4804 if (chan->sent) {
4805 chan->sent = 0;
4806 continue;
4807 }
4808
4809 if (skb_queue_empty(&chan->data_q))
4810 continue;
4811
4812 skb = skb_peek(&chan->data_q);
4813 if (skb->priority >= HCI_PRIO_MAX - 1)
4814 continue;
4815
4816 skb->priority = HCI_PRIO_MAX - 1;
4817
4818 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4819 skb->priority);
02b20f0b
LAD
4820 }
4821
4822 if (hci_conn_num(hdev, type) == num)
4823 break;
4824 }
bf4c6325
GP
4825
4826 rcu_read_unlock();
4827
02b20f0b
LAD
4828}
4829
b71d385a
AE
4830static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4831{
4832 /* Calculate count of blocks used by this packet */
4833 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4834}
4835
6039aa73 4836static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4837{
4a964404 4838 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4839 /* ACL tx timeout must be longer than maximum
4840 * link supervision timeout (40.9 seconds) */
63d2bc1b 4841 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4842 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4843 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4844 }
63d2bc1b 4845}
1da177e4 4846
6039aa73 4847static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4848{
4849 unsigned int cnt = hdev->acl_cnt;
4850 struct hci_chan *chan;
4851 struct sk_buff *skb;
4852 int quote;
4853
4854 __check_timeout(hdev, cnt);
04837f64 4855
73d80deb 4856 while (hdev->acl_cnt &&
a8c5fb1a 4857 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4858 u32 priority = (skb_peek(&chan->data_q))->priority;
4859 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4860 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4861 skb->len, skb->priority);
73d80deb 4862
ec1cce24
LAD
4863 /* Stop if priority has changed */
4864 if (skb->priority < priority)
4865 break;
4866
4867 skb = skb_dequeue(&chan->data_q);
4868
73d80deb 4869 hci_conn_enter_active_mode(chan->conn,
04124681 4870 bt_cb(skb)->force_active);
04837f64 4871
57d17d70 4872 hci_send_frame(hdev, skb);
1da177e4
LT
4873 hdev->acl_last_tx = jiffies;
4874
4875 hdev->acl_cnt--;
73d80deb
LAD
4876 chan->sent++;
4877 chan->conn->sent++;
1da177e4
LT
4878 }
4879 }
02b20f0b
LAD
4880
4881 if (cnt != hdev->acl_cnt)
4882 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4883}
4884
6039aa73 4885static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4886{
63d2bc1b 4887 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4888 struct hci_chan *chan;
4889 struct sk_buff *skb;
4890 int quote;
bd1eb66b 4891 u8 type;
b71d385a 4892
63d2bc1b 4893 __check_timeout(hdev, cnt);
b71d385a 4894
bd1eb66b
AE
4895 BT_DBG("%s", hdev->name);
4896
4897 if (hdev->dev_type == HCI_AMP)
4898 type = AMP_LINK;
4899 else
4900 type = ACL_LINK;
4901
b71d385a 4902 while (hdev->block_cnt > 0 &&
bd1eb66b 4903 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4904 u32 priority = (skb_peek(&chan->data_q))->priority;
4905 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4906 int blocks;
4907
4908 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4909 skb->len, skb->priority);
b71d385a
AE
4910
4911 /* Stop if priority has changed */
4912 if (skb->priority < priority)
4913 break;
4914
4915 skb = skb_dequeue(&chan->data_q);
4916
4917 blocks = __get_blocks(hdev, skb);
4918 if (blocks > hdev->block_cnt)
4919 return;
4920
4921 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4922 bt_cb(skb)->force_active);
b71d385a 4923
57d17d70 4924 hci_send_frame(hdev, skb);
b71d385a
AE
4925 hdev->acl_last_tx = jiffies;
4926
4927 hdev->block_cnt -= blocks;
4928 quote -= blocks;
4929
4930 chan->sent += blocks;
4931 chan->conn->sent += blocks;
4932 }
4933 }
4934
4935 if (cnt != hdev->block_cnt)
bd1eb66b 4936 hci_prio_recalculate(hdev, type);
b71d385a
AE
4937}
4938
6039aa73 4939static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4940{
4941 BT_DBG("%s", hdev->name);
4942
bd1eb66b
AE
4943 /* No ACL link over BR/EDR controller */
4944 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4945 return;
4946
4947 /* No AMP link over AMP controller */
4948 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4949 return;
4950
4951 switch (hdev->flow_ctl_mode) {
4952 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4953 hci_sched_acl_pkt(hdev);
4954 break;
4955
4956 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4957 hci_sched_acl_blk(hdev);
4958 break;
4959 }
4960}
4961
1da177e4 4962/* Schedule SCO */
6039aa73 4963static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4964{
4965 struct hci_conn *conn;
4966 struct sk_buff *skb;
4967 int quote;
4968
4969 BT_DBG("%s", hdev->name);
4970
52087a79
LAD
4971 if (!hci_conn_num(hdev, SCO_LINK))
4972 return;
4973
1da177e4
LT
4974 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4975 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4976 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4977 hci_send_frame(hdev, skb);
1da177e4
LT
4978
4979 conn->sent++;
4980 if (conn->sent == ~0)
4981 conn->sent = 0;
4982 }
4983 }
4984}
4985
6039aa73 4986static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4987{
4988 struct hci_conn *conn;
4989 struct sk_buff *skb;
4990 int quote;
4991
4992 BT_DBG("%s", hdev->name);
4993
52087a79
LAD
4994 if (!hci_conn_num(hdev, ESCO_LINK))
4995 return;
4996
8fc9ced3
GP
4997 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4998 &quote))) {
b6a0dc82
MH
4999 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5000 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5001 hci_send_frame(hdev, skb);
b6a0dc82
MH
5002
5003 conn->sent++;
5004 if (conn->sent == ~0)
5005 conn->sent = 0;
5006 }
5007 }
5008}
5009
6039aa73 5010static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5011{
73d80deb 5012 struct hci_chan *chan;
6ed58ec5 5013 struct sk_buff *skb;
02b20f0b 5014 int quote, cnt, tmp;
6ed58ec5
VT
5015
5016 BT_DBG("%s", hdev->name);
5017
52087a79
LAD
5018 if (!hci_conn_num(hdev, LE_LINK))
5019 return;
5020
4a964404 5021 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5022 /* LE tx timeout must be longer than maximum
5023 * link supervision timeout (40.9 seconds) */
bae1f5d9 5024 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5025 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5026 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5027 }
5028
5029 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5030 tmp = cnt;
73d80deb 5031 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5032 u32 priority = (skb_peek(&chan->data_q))->priority;
5033 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5034 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5035 skb->len, skb->priority);
6ed58ec5 5036
ec1cce24
LAD
5037 /* Stop if priority has changed */
5038 if (skb->priority < priority)
5039 break;
5040
5041 skb = skb_dequeue(&chan->data_q);
5042
57d17d70 5043 hci_send_frame(hdev, skb);
6ed58ec5
VT
5044 hdev->le_last_tx = jiffies;
5045
5046 cnt--;
73d80deb
LAD
5047 chan->sent++;
5048 chan->conn->sent++;
6ed58ec5
VT
5049 }
5050 }
73d80deb 5051
6ed58ec5
VT
5052 if (hdev->le_pkts)
5053 hdev->le_cnt = cnt;
5054 else
5055 hdev->acl_cnt = cnt;
02b20f0b
LAD
5056
5057 if (cnt != tmp)
5058 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5059}
5060
3eff45ea 5061static void hci_tx_work(struct work_struct *work)
1da177e4 5062{
3eff45ea 5063 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5064 struct sk_buff *skb;
5065
6ed58ec5 5066 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5067 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5068
52de599e
MH
5069 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5070 /* Schedule queues and send stuff to HCI driver */
5071 hci_sched_acl(hdev);
5072 hci_sched_sco(hdev);
5073 hci_sched_esco(hdev);
5074 hci_sched_le(hdev);
5075 }
6ed58ec5 5076
1da177e4
LT
5077 /* Send next queued raw (unknown type) packet */
5078 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5079 hci_send_frame(hdev, skb);
1da177e4
LT
5080}
5081
25985edc 5082/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5083
5084/* ACL data packet */
6039aa73 5085static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5086{
5087 struct hci_acl_hdr *hdr = (void *) skb->data;
5088 struct hci_conn *conn;
5089 __u16 handle, flags;
5090
5091 skb_pull(skb, HCI_ACL_HDR_SIZE);
5092
5093 handle = __le16_to_cpu(hdr->handle);
5094 flags = hci_flags(handle);
5095 handle = hci_handle(handle);
5096
f0e09510 5097 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5098 handle, flags);
1da177e4
LT
5099
5100 hdev->stat.acl_rx++;
5101
5102 hci_dev_lock(hdev);
5103 conn = hci_conn_hash_lookup_handle(hdev, handle);
5104 hci_dev_unlock(hdev);
8e87d142 5105
1da177e4 5106 if (conn) {
65983fc7 5107 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5108
1da177e4 5109 /* Send to upper protocol */
686ebf28
UF
5110 l2cap_recv_acldata(conn, skb, flags);
5111 return;
1da177e4 5112 } else {
8e87d142 5113 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5114 hdev->name, handle);
1da177e4
LT
5115 }
5116
5117 kfree_skb(skb);
5118}
5119
5120/* SCO data packet */
6039aa73 5121static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5122{
5123 struct hci_sco_hdr *hdr = (void *) skb->data;
5124 struct hci_conn *conn;
5125 __u16 handle;
5126
5127 skb_pull(skb, HCI_SCO_HDR_SIZE);
5128
5129 handle = __le16_to_cpu(hdr->handle);
5130
f0e09510 5131 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5132
5133 hdev->stat.sco_rx++;
5134
5135 hci_dev_lock(hdev);
5136 conn = hci_conn_hash_lookup_handle(hdev, handle);
5137 hci_dev_unlock(hdev);
5138
5139 if (conn) {
1da177e4 5140 /* Send to upper protocol */
686ebf28
UF
5141 sco_recv_scodata(conn, skb);
5142 return;
1da177e4 5143 } else {
8e87d142 5144 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5145 hdev->name, handle);
1da177e4
LT
5146 }
5147
5148 kfree_skb(skb);
5149}
5150
9238f36a
JH
5151static bool hci_req_is_complete(struct hci_dev *hdev)
5152{
5153 struct sk_buff *skb;
5154
5155 skb = skb_peek(&hdev->cmd_q);
5156 if (!skb)
5157 return true;
5158
5159 return bt_cb(skb)->req.start;
5160}
5161
42c6b129
JH
5162static void hci_resend_last(struct hci_dev *hdev)
5163{
5164 struct hci_command_hdr *sent;
5165 struct sk_buff *skb;
5166 u16 opcode;
5167
5168 if (!hdev->sent_cmd)
5169 return;
5170
5171 sent = (void *) hdev->sent_cmd->data;
5172 opcode = __le16_to_cpu(sent->opcode);
5173 if (opcode == HCI_OP_RESET)
5174 return;
5175
5176 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5177 if (!skb)
5178 return;
5179
5180 skb_queue_head(&hdev->cmd_q, skb);
5181 queue_work(hdev->workqueue, &hdev->cmd_work);
5182}
5183
9238f36a
JH
5184void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5185{
5186 hci_req_complete_t req_complete = NULL;
5187 struct sk_buff *skb;
5188 unsigned long flags;
5189
5190 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5191
42c6b129
JH
5192 /* If the completed command doesn't match the last one that was
5193 * sent we need to do special handling of it.
9238f36a 5194 */
42c6b129
JH
5195 if (!hci_sent_cmd_data(hdev, opcode)) {
5196 /* Some CSR based controllers generate a spontaneous
5197 * reset complete event during init and any pending
5198 * command will never be completed. In such a case we
5199 * need to resend whatever was the last sent
5200 * command.
5201 */
5202 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5203 hci_resend_last(hdev);
5204
9238f36a 5205 return;
42c6b129 5206 }
9238f36a
JH
5207
5208 /* If the command succeeded and there's still more commands in
5209 * this request the request is not yet complete.
5210 */
5211 if (!status && !hci_req_is_complete(hdev))
5212 return;
5213
5214 /* If this was the last command in a request the complete
5215 * callback would be found in hdev->sent_cmd instead of the
5216 * command queue (hdev->cmd_q).
5217 */
5218 if (hdev->sent_cmd) {
5219 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5220
5221 if (req_complete) {
5222 /* We must set the complete callback to NULL to
5223 * avoid calling the callback more than once if
5224 * this function gets called again.
5225 */
5226 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5227
9238f36a 5228 goto call_complete;
53e21fbc 5229 }
9238f36a
JH
5230 }
5231
5232 /* Remove all pending commands belonging to this request */
5233 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5234 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5235 if (bt_cb(skb)->req.start) {
5236 __skb_queue_head(&hdev->cmd_q, skb);
5237 break;
5238 }
5239
5240 req_complete = bt_cb(skb)->req.complete;
5241 kfree_skb(skb);
5242 }
5243 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5244
5245call_complete:
5246 if (req_complete)
5247 req_complete(hdev, status);
5248}
5249
b78752cc 5250static void hci_rx_work(struct work_struct *work)
1da177e4 5251{
b78752cc 5252 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5253 struct sk_buff *skb;
5254
5255 BT_DBG("%s", hdev->name);
5256
1da177e4 5257 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5258 /* Send copy to monitor */
5259 hci_send_to_monitor(hdev, skb);
5260
1da177e4
LT
5261 if (atomic_read(&hdev->promisc)) {
5262 /* Send copy to the sockets */
470fe1b5 5263 hci_send_to_sock(hdev, skb);
1da177e4
LT
5264 }
5265
fee746b0 5266 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5267 kfree_skb(skb);
5268 continue;
5269 }
5270
5271 if (test_bit(HCI_INIT, &hdev->flags)) {
5272 /* Don't process data packets in this states. */
0d48d939 5273 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5274 case HCI_ACLDATA_PKT:
5275 case HCI_SCODATA_PKT:
5276 kfree_skb(skb);
5277 continue;
3ff50b79 5278 }
1da177e4
LT
5279 }
5280
5281 /* Process frame */
0d48d939 5282 switch (bt_cb(skb)->pkt_type) {
1da177e4 5283 case HCI_EVENT_PKT:
b78752cc 5284 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5285 hci_event_packet(hdev, skb);
5286 break;
5287
5288 case HCI_ACLDATA_PKT:
5289 BT_DBG("%s ACL data packet", hdev->name);
5290 hci_acldata_packet(hdev, skb);
5291 break;
5292
5293 case HCI_SCODATA_PKT:
5294 BT_DBG("%s SCO data packet", hdev->name);
5295 hci_scodata_packet(hdev, skb);
5296 break;
5297
5298 default:
5299 kfree_skb(skb);
5300 break;
5301 }
5302 }
1da177e4
LT
5303}
5304
c347b765 5305static void hci_cmd_work(struct work_struct *work)
1da177e4 5306{
c347b765 5307 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5308 struct sk_buff *skb;
5309
2104786b
AE
5310 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5311 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5312
1da177e4 5313 /* Send queued commands */
5a08ecce
AE
5314 if (atomic_read(&hdev->cmd_cnt)) {
5315 skb = skb_dequeue(&hdev->cmd_q);
5316 if (!skb)
5317 return;
5318
7585b97a 5319 kfree_skb(hdev->sent_cmd);
1da177e4 5320
a675d7f1 5321 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5322 if (hdev->sent_cmd) {
1da177e4 5323 atomic_dec(&hdev->cmd_cnt);
57d17d70 5324 hci_send_frame(hdev, skb);
7bdb8a5c 5325 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5326 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5327 else
65cc2b49
MH
5328 schedule_delayed_work(&hdev->cmd_timer,
5329 HCI_CMD_TIMEOUT);
1da177e4
LT
5330 } else {
5331 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5332 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5333 }
5334 }
5335}
b1efcc28
AG
5336
5337void hci_req_add_le_scan_disable(struct hci_request *req)
5338{
5339 struct hci_cp_le_set_scan_enable cp;
5340
5341 memset(&cp, 0, sizeof(cp));
5342 cp.enable = LE_SCAN_DISABLE;
5343 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5344}
a4790dbd 5345
8ef30fd3
AG
5346void hci_req_add_le_passive_scan(struct hci_request *req)
5347{
5348 struct hci_cp_le_set_scan_param param_cp;
5349 struct hci_cp_le_set_scan_enable enable_cp;
5350 struct hci_dev *hdev = req->hdev;
5351 u8 own_addr_type;
5352
6ab535a7
MH
5353 /* Set require_privacy to false since no SCAN_REQ are send
5354 * during passive scanning. Not using an unresolvable address
5355 * here is important so that peer devices using direct
5356 * advertising with our address will be correctly reported
5357 * by the controller.
8ef30fd3 5358 */
6ab535a7 5359 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5360 return;
5361
5362 memset(&param_cp, 0, sizeof(param_cp));
5363 param_cp.type = LE_SCAN_PASSIVE;
5364 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5365 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5366 param_cp.own_address_type = own_addr_type;
5367 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5368 &param_cp);
5369
5370 memset(&enable_cp, 0, sizeof(enable_cp));
5371 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5372 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5373 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5374 &enable_cp);
5375}
5376
a4790dbd
AG
5377static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5378{
5379 if (status)
5380 BT_DBG("HCI request failed to update background scanning: "
5381 "status 0x%2.2x", status);
5382}
5383
5384/* This function controls the background scanning based on hdev->pend_le_conns
5385 * list. If there are pending LE connection we start the background scanning,
5386 * otherwise we stop it.
5387 *
5388 * This function requires the caller holds hdev->lock.
5389 */
5390void hci_update_background_scan(struct hci_dev *hdev)
5391{
a4790dbd
AG
5392 struct hci_request req;
5393 struct hci_conn *conn;
5394 int err;
5395
c20c02d5
MH
5396 if (!test_bit(HCI_UP, &hdev->flags) ||
5397 test_bit(HCI_INIT, &hdev->flags) ||
5398 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5399 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5400 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5401 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5402 return;
5403
a4790dbd
AG
5404 hci_req_init(&req, hdev);
5405
66f8455a
JH
5406 if (list_empty(&hdev->pend_le_conns) &&
5407 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5408 /* If there is no pending LE connections or devices
5409 * to be scanned for, we should stop the background
5410 * scanning.
a4790dbd
AG
5411 */
5412
5413 /* If controller is not scanning we are done. */
5414 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5415 return;
5416
5417 hci_req_add_le_scan_disable(&req);
5418
5419 BT_DBG("%s stopping background scanning", hdev->name);
5420 } else {
a4790dbd
AG
5421 /* If there is at least one pending LE connection, we should
5422 * keep the background scan running.
5423 */
5424
a4790dbd
AG
5425 /* If controller is connecting, we should not start scanning
5426 * since some controllers are not able to scan and connect at
5427 * the same time.
5428 */
5429 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5430 if (conn)
5431 return;
5432
4340a124
AG
5433 /* If controller is currently scanning, we stop it to ensure we
5434 * don't miss any advertising (due to duplicates filter).
5435 */
5436 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5437 hci_req_add_le_scan_disable(&req);
5438
8ef30fd3 5439 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5440
5441 BT_DBG("%s starting background scanning", hdev->name);
5442 }
5443
5444 err = hci_req_run(&req, update_background_scan_complete);
5445 if (err)
5446 BT_ERR("Failed to run HCI request: err %d", err);
5447}