]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Avoid use of session socket after the session gets freed
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
6659358e
JH
203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
47219839
MH
228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
235 u8 i, val[16];
236
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
243
244 seq_printf(f, "%pUb\n", val);
47219839
MH
245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
baf27f6e
MH
263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
02d08d15
MH
299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
babdbb3c
MH
327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
041000b9
MH
351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
ebd1e33b
MH
365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
5afeac14
MH
390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
111902f7 396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
111902f7 421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
422 return -EALREADY;
423
111902f7 424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
134c2a89
MH
436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
2bfa3531
MH
454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
2be48b65 462 hdev->idle_timeout = val;
2bfa3531
MH
463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
c982b2ea
JH
482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
2bfa3531
MH
513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
2be48b65 521 hdev->sniff_min_interval = val;
2bfa3531
MH
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
2be48b65 549 hdev->sniff_max_interval = val;
2bfa3531
MH
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
31ad1691
AK
569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
ac345813
MH
625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
a1f4c318 628 bdaddr_t addr;
ac345813
MH
629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
a1f4c318 633 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 634
a1f4c318 635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 636 16, hdev->irk, &hdev->rpa);
ac345813
MH
637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
7a4cd51d
MH
655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
e7b8fc92
MH
678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
b32bba6c
MH
701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
92202185 704{
b32bba6c
MH
705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
92202185 707
111902f7 708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
712}
713
b32bba6c
MH
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
92202185 717{
b32bba6c
MH
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
92202185 722
b32bba6c
MH
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
92202185 725
b32bba6c
MH
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
731 return -EINVAL;
732
111902f7 733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
734 return -EALREADY;
735
111902f7 736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
737
738 return count;
92202185
MH
739}
740
b32bba6c
MH
741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
92202185 747
d2ab0ac1
MH
748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
3698d704
MH
773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
8f8625cd
MH
803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
f813f1be 809 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 814 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
4e70c7e7
MH
833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
2be48b65 841 hdev->le_conn_min_interval = val;
4e70c7e7
MH
842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
2be48b65 869 hdev->le_conn_max_interval = val;
4e70c7e7
MH
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
816a93d1
MH
889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
f1649577
MH
917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
3f959d46
MH
945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
0b3c7d37 973static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 974{
0b3c7d37 975 struct hci_dev *hdev = f->private;
7d474e06
AG
976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
7d474e06 979 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
981 p->auto_connect);
982 }
7d474e06
AG
983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
0b3c7d37 988static int device_list_open(struct inode *inode, struct file *file)
7d474e06 989{
0b3c7d37 990 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
991}
992
0b3c7d37
MH
993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
7d474e06 995 .read = seq_read,
7d474e06
AG
996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
1da177e4
LT
1000/* ---- HCI requests ---- */
1001
42c6b129 1002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1003{
42c6b129 1004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
77a63e0a
FW
1024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
75e84b7c
JH
1026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
7b1abbbe
JH
1049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
75e84b7c
JH
1055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
7b1abbbe 1079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1080 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
7b1abbbe 1090 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
7b1abbbe
JH
1129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1134 const void *param, u32 timeout)
7b1abbbe
JH
1135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
1da177e4 1140/* Execute request and wait for completion. */
01178cd4 1141static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
01178cd4 1144 unsigned long opt, __u32 timeout)
1da177e4 1145{
42c6b129 1146 struct hci_request req;
1da177e4
LT
1147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
42c6b129
JH
1152 hci_req_init(&req, hdev);
1153
1da177e4
LT
1154 hdev->req_status = HCI_REQ_PEND;
1155
42c6b129 1156 func(&req, opt);
53cce22d 1157
42c6b129
JH
1158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
53cce22d 1160 hdev->req_status = 0;
920c8300
AG
1161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
42c6b129 1166 */
920c8300
AG
1167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
53cce22d
JH
1171 }
1172
bc4445c7
AG
1173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
1da177e4
LT
1176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
e175072f 1185 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
3ff50b79 1195 }
1da177e4 1196
a5040efa 1197 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
01178cd4 1204static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
01178cd4 1207 unsigned long opt, __u32 timeout)
1da177e4
LT
1208{
1209 int ret;
1210
7c6a329e
MH
1211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
1da177e4
LT
1214 /* Serialize all requests */
1215 hci_req_lock(hdev);
01178cd4 1216 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
42c6b129 1222static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1223{
42c6b129 1224 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1225
1226 /* Reset device */
42c6b129
JH
1227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1229}
1230
42c6b129 1231static void bredr_init(struct hci_request *req)
1da177e4 1232{
42c6b129 1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1234
1da177e4 1235 /* Read Local Supported Features */
42c6b129 1236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1237
1143e5a6 1238 /* Read Local Version */
42c6b129 1239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1240
1241 /* Read BD Address */
42c6b129 1242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1243}
1244
42c6b129 1245static void amp_init(struct hci_request *req)
e61ef499 1246{
42c6b129 1247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1248
e61ef499 1249 /* Read Local Version */
42c6b129 1250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1251
f6996cfe
MH
1252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
6bcbc489 1258 /* Read Local AMP Info */
42c6b129 1259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1260
1261 /* Read Data Blk size */
42c6b129 1262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1263
f38ba941
MH
1264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
7528ca1c
MH
1267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1269}
1270
42c6b129 1271static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1272{
42c6b129 1273 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
11778716
AE
1277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1279 hci_reset_req(req, 0);
11778716 1280
e61ef499
AE
1281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
42c6b129 1283 bredr_init(req);
e61ef499
AE
1284 break;
1285
1286 case HCI_AMP:
42c6b129 1287 amp_init(req);
e61ef499
AE
1288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
e61ef499
AE
1294}
1295
42c6b129 1296static void bredr_setup(struct hci_request *req)
2177bab5 1297{
4ca048e3
MH
1298 struct hci_dev *hdev = req->hdev;
1299
2177bab5
JH
1300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1305
1306 /* Read Class of Device */
42c6b129 1307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1308
1309 /* Read Local Name */
42c6b129 1310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1311
1312 /* Read Voice Setting */
42c6b129 1313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1314
b4cb9fb2
MH
1315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
4b836f39
MH
1318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
2177bab5
JH
1321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1324
1325 /* Connection accept timeout ~20 secs */
dcf4adbf 1326 param = cpu_to_le16(0x7d00);
42c6b129 1327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1328
4ca048e3
MH
1329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
2177bab5
JH
1336}
1337
42c6b129 1338static void le_setup(struct hci_request *req)
2177bab5 1339{
c73eee91
JH
1340 struct hci_dev *hdev = req->hdev;
1341
2177bab5 1342 /* Read LE Buffer Size */
42c6b129 1343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1344
1345 /* Read LE Local Supported Features */
42c6b129 1346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1347
747d3f03
MH
1348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
2177bab5 1351 /* Read LE White List Size */
42c6b129 1352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1353
747d3f03
MH
1354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
42c6b129 1390static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1391{
1392 u8 mode;
1393
42c6b129 1394 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1395
42c6b129 1396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1397}
1398
42c6b129 1399static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1400{
42c6b129
JH
1401 struct hci_dev *hdev = req->hdev;
1402
2177bab5
JH
1403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
2177bab5
JH
1436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
42c6b129 1473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1474}
1475
42c6b129 1476static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1477{
42c6b129
JH
1478 struct hci_dev *hdev = req->hdev;
1479
2177bab5 1480 if (lmp_bredr_capable(hdev))
42c6b129 1481 bredr_setup(req);
56f87901
JH
1482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1484
1485 if (lmp_le_capable(hdev))
42c6b129 1486 le_setup(req);
2177bab5 1487
3f8e2d75
JH
1488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1493
1494 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
2177bab5
JH
1503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
42c6b129
JH
1505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
2177bab5
JH
1507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
42c6b129 1513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
42c6b129 1518 hci_setup_inquiry_mode(req);
2177bab5
JH
1519
1520 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
42c6b129
JH
1527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
2177bab5
JH
1529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
42c6b129
JH
1533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
2177bab5
JH
1535 }
1536}
1537
42c6b129 1538static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1539{
42c6b129 1540 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
42c6b129 1554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1555}
1556
42c6b129 1557static void hci_set_le_support(struct hci_request *req)
2177bab5 1558{
42c6b129 1559 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1560 struct hci_cp_write_le_host_supported cp;
1561
c73eee91
JH
1562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
2177bab5
JH
1566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
2177bab5
JH
1576}
1577
d62e6d67
JH
1578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
53b834d2 1586 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
53b834d2 1596 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
40c59fcb 1603 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1605 events[2] |= 0x80;
1606
d62e6d67
JH
1607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
42c6b129 1610static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1611{
42c6b129 1612 struct hci_dev *hdev = req->hdev;
d2c5d77f 1613 u8 p;
42c6b129 1614
0da71f1b
MH
1615 hci_setup_event_mask(req);
1616
b8f4e068
GP
1617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
f9f462fa
MH
1625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
637b4cae 1629 */
f9f462fa
MH
1630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
2177bab5 1640 if (hdev->commands[5] & 0x10)
42c6b129 1641 hci_setup_link_policy(req);
2177bab5 1642
9193c6e8
AG
1643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
4d6c705b
MH
1647 events[0] = 0x0f;
1648
1649 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1650 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1651
1652 /* If controller supports the Connection Parameters Request
1653 * Link Layer Procedure, enable the corresponding event.
1654 */
1655 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1656 events[0] |= 0x20; /* LE Remote Connection
1657 * Parameter Request
1658 */
1659
9193c6e8
AG
1660 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1661 events);
1662
15a49cca
MH
1663 if (hdev->commands[25] & 0x40) {
1664 /* Read LE Advertising Channel TX Power */
1665 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1666 }
1667
42c6b129 1668 hci_set_le_support(req);
9193c6e8 1669 }
d2c5d77f
JH
1670
1671 /* Read features beyond page 1 if available */
1672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1673 struct hci_cp_read_local_ext_features cp;
1674
1675 cp.page = p;
1676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1677 sizeof(cp), &cp);
1678 }
2177bab5
JH
1679}
1680
5d4e7e8d
JH
1681static void hci_init4_req(struct hci_request *req, unsigned long opt)
1682{
1683 struct hci_dev *hdev = req->hdev;
1684
d62e6d67
JH
1685 /* Set event mask page 2 if the HCI command for it is supported */
1686 if (hdev->commands[22] & 0x04)
1687 hci_set_event_mask_page_2(req);
1688
5d4e7e8d 1689 /* Check for Synchronization Train support */
53b834d2 1690 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1692
1693 /* Enable Secure Connections if supported and configured */
5afeac14 1694 if ((lmp_sc_capable(hdev) ||
111902f7 1695 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1696 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1697 u8 support = 0x01;
1698 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1699 sizeof(support), &support);
1700 }
5d4e7e8d
JH
1701}
1702
2177bab5
JH
1703static int __hci_init(struct hci_dev *hdev)
1704{
1705 int err;
1706
1707 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1708 if (err < 0)
1709 return err;
1710
4b4148e9
MH
1711 /* The Device Under Test (DUT) mode is special and available for
1712 * all controller types. So just create it early on.
1713 */
1714 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1715 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1716 &dut_mode_fops);
1717 }
1718
2177bab5
JH
1719 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1720 * BR/EDR/LE type controllers. AMP controllers only need the
1721 * first stage init.
1722 */
1723 if (hdev->dev_type != HCI_BREDR)
1724 return 0;
1725
1726 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1727 if (err < 0)
1728 return err;
1729
5d4e7e8d
JH
1730 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1731 if (err < 0)
1732 return err;
1733
baf27f6e
MH
1734 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
1738 /* Only create debugfs entries during the initial setup
1739 * phase and not every time the controller gets powered on.
1740 */
1741 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1742 return 0;
1743
dfb826a8
MH
1744 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1745 &features_fops);
ceeb3bc0
MH
1746 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1747 &hdev->manufacturer);
1748 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1749 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1750 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1751 &blacklist_fops);
6659358e
JH
1752 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1753 &whitelist_fops);
47219839
MH
1754 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1755
31ad1691
AK
1756 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1757 &conn_info_min_age_fops);
1758 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1759 &conn_info_max_age_fops);
1760
baf27f6e
MH
1761 if (lmp_bredr_capable(hdev)) {
1762 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1763 hdev, &inquiry_cache_fops);
02d08d15
MH
1764 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1765 hdev, &link_keys_fops);
babdbb3c
MH
1766 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1767 hdev, &dev_class_fops);
041000b9
MH
1768 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1769 hdev, &voice_setting_fops);
baf27f6e
MH
1770 }
1771
06f5b778 1772 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1773 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1774 hdev, &auto_accept_delay_fops);
5afeac14
MH
1775 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1776 hdev, &force_sc_support_fops);
134c2a89
MH
1777 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1778 hdev, &sc_only_mode_fops);
06f5b778 1779 }
ebd1e33b 1780
2bfa3531
MH
1781 if (lmp_sniff_capable(hdev)) {
1782 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1783 hdev, &idle_timeout_fops);
1784 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1785 hdev, &sniff_min_interval_fops);
1786 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1787 hdev, &sniff_max_interval_fops);
1788 }
1789
d0f729b8 1790 if (lmp_le_capable(hdev)) {
ac345813
MH
1791 debugfs_create_file("identity", 0400, hdev->debugfs,
1792 hdev, &identity_fops);
1793 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1794 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1795 debugfs_create_file("random_address", 0444, hdev->debugfs,
1796 hdev, &random_address_fops);
b32bba6c
MH
1797 debugfs_create_file("static_address", 0444, hdev->debugfs,
1798 hdev, &static_address_fops);
1799
1800 /* For controllers with a public address, provide a debug
1801 * option to force the usage of the configured static
1802 * address. By default the public address is used.
1803 */
1804 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1805 debugfs_create_file("force_static_address", 0644,
1806 hdev->debugfs, hdev,
1807 &force_static_address_fops);
1808
d0f729b8
MH
1809 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1810 &hdev->le_white_list_size);
d2ab0ac1
MH
1811 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1812 &white_list_fops);
3698d704
MH
1813 debugfs_create_file("identity_resolving_keys", 0400,
1814 hdev->debugfs, hdev,
1815 &identity_resolving_keys_fops);
8f8625cd
MH
1816 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1817 hdev, &long_term_keys_fops);
4e70c7e7
MH
1818 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1819 hdev, &conn_min_interval_fops);
1820 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1821 hdev, &conn_max_interval_fops);
816a93d1
MH
1822 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1823 hdev, &conn_latency_fops);
f1649577
MH
1824 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1825 hdev, &supervision_timeout_fops);
3f959d46
MH
1826 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1827 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1828 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1829 &device_list_fops);
b9a7a61e
LR
1830 debugfs_create_u16("discov_interleaved_timeout", 0644,
1831 hdev->debugfs,
1832 &hdev->discov_interleaved_timeout);
d0f729b8 1833 }
e7b8fc92 1834
baf27f6e 1835 return 0;
2177bab5
JH
1836}
1837
0ebca7d6
MH
1838static void hci_init0_req(struct hci_request *req, unsigned long opt)
1839{
1840 struct hci_dev *hdev = req->hdev;
1841
1842 BT_DBG("%s %ld", hdev->name, opt);
1843
1844 /* Reset */
1845 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1846 hci_reset_req(req, 0);
1847
1848 /* Read Local Version */
1849 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1850
1851 /* Read BD Address */
1852 if (hdev->set_bdaddr)
1853 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1854}
1855
1856static int __hci_unconf_init(struct hci_dev *hdev)
1857{
1858 int err;
1859
cc78b44b
MH
1860 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1861 return 0;
1862
0ebca7d6
MH
1863 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1864 if (err < 0)
1865 return err;
1866
1867 return 0;
1868}
1869
42c6b129 1870static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1871{
1872 __u8 scan = opt;
1873
42c6b129 1874 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1875
1876 /* Inquiry and Page scans */
42c6b129 1877 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1878}
1879
42c6b129 1880static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1881{
1882 __u8 auth = opt;
1883
42c6b129 1884 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1885
1886 /* Authentication */
42c6b129 1887 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1888}
1889
42c6b129 1890static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1891{
1892 __u8 encrypt = opt;
1893
42c6b129 1894 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1895
e4e8e37c 1896 /* Encryption */
42c6b129 1897 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1898}
1899
42c6b129 1900static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1901{
1902 __le16 policy = cpu_to_le16(opt);
1903
42c6b129 1904 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1905
1906 /* Default link policy */
42c6b129 1907 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1908}
1909
8e87d142 1910/* Get HCI device by index.
1da177e4
LT
1911 * Device is held on return. */
1912struct hci_dev *hci_dev_get(int index)
1913{
8035ded4 1914 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1915
1916 BT_DBG("%d", index);
1917
1918 if (index < 0)
1919 return NULL;
1920
1921 read_lock(&hci_dev_list_lock);
8035ded4 1922 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1923 if (d->id == index) {
1924 hdev = hci_dev_hold(d);
1925 break;
1926 }
1927 }
1928 read_unlock(&hci_dev_list_lock);
1929 return hdev;
1930}
1da177e4
LT
1931
1932/* ---- Inquiry support ---- */
ff9ef578 1933
30dc78e1
JH
1934bool hci_discovery_active(struct hci_dev *hdev)
1935{
1936 struct discovery_state *discov = &hdev->discovery;
1937
6fbe195d 1938 switch (discov->state) {
343f935b 1939 case DISCOVERY_FINDING:
6fbe195d 1940 case DISCOVERY_RESOLVING:
30dc78e1
JH
1941 return true;
1942
6fbe195d
AG
1943 default:
1944 return false;
1945 }
30dc78e1
JH
1946}
1947
ff9ef578
JH
1948void hci_discovery_set_state(struct hci_dev *hdev, int state)
1949{
bb3e0a33
JH
1950 int old_state = hdev->discovery.state;
1951
ff9ef578
JH
1952 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1953
bb3e0a33 1954 if (old_state == state)
ff9ef578
JH
1955 return;
1956
bb3e0a33
JH
1957 hdev->discovery.state = state;
1958
ff9ef578
JH
1959 switch (state) {
1960 case DISCOVERY_STOPPED:
c54c3860
AG
1961 hci_update_background_scan(hdev);
1962
bb3e0a33 1963 if (old_state != DISCOVERY_STARTING)
7b99b659 1964 mgmt_discovering(hdev, 0);
ff9ef578
JH
1965 break;
1966 case DISCOVERY_STARTING:
1967 break;
343f935b 1968 case DISCOVERY_FINDING:
ff9ef578
JH
1969 mgmt_discovering(hdev, 1);
1970 break;
30dc78e1
JH
1971 case DISCOVERY_RESOLVING:
1972 break;
ff9ef578
JH
1973 case DISCOVERY_STOPPING:
1974 break;
1975 }
ff9ef578
JH
1976}
1977
1f9b9a5d 1978void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1979{
30883512 1980 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1981 struct inquiry_entry *p, *n;
1da177e4 1982
561aafbc
JH
1983 list_for_each_entry_safe(p, n, &cache->all, all) {
1984 list_del(&p->all);
b57c1a56 1985 kfree(p);
1da177e4 1986 }
561aafbc
JH
1987
1988 INIT_LIST_HEAD(&cache->unknown);
1989 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1990}
1991
a8c5fb1a
GP
1992struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1993 bdaddr_t *bdaddr)
1da177e4 1994{
30883512 1995 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1996 struct inquiry_entry *e;
1997
6ed93dc6 1998 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1999
561aafbc
JH
2000 list_for_each_entry(e, &cache->all, all) {
2001 if (!bacmp(&e->data.bdaddr, bdaddr))
2002 return e;
2003 }
2004
2005 return NULL;
2006}
2007
2008struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2009 bdaddr_t *bdaddr)
561aafbc 2010{
30883512 2011 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2012 struct inquiry_entry *e;
2013
6ed93dc6 2014 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2015
2016 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2017 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2018 return e;
2019 }
2020
2021 return NULL;
1da177e4
LT
2022}
2023
30dc78e1 2024struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2025 bdaddr_t *bdaddr,
2026 int state)
30dc78e1
JH
2027{
2028 struct discovery_state *cache = &hdev->discovery;
2029 struct inquiry_entry *e;
2030
6ed93dc6 2031 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2032
2033 list_for_each_entry(e, &cache->resolve, list) {
2034 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2035 return e;
2036 if (!bacmp(&e->data.bdaddr, bdaddr))
2037 return e;
2038 }
2039
2040 return NULL;
2041}
2042
a3d4e20a 2043void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2044 struct inquiry_entry *ie)
a3d4e20a
JH
2045{
2046 struct discovery_state *cache = &hdev->discovery;
2047 struct list_head *pos = &cache->resolve;
2048 struct inquiry_entry *p;
2049
2050 list_del(&ie->list);
2051
2052 list_for_each_entry(p, &cache->resolve, list) {
2053 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2054 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2055 break;
2056 pos = &p->list;
2057 }
2058
2059 list_add(&ie->list, pos);
2060}
2061
af58925c
MH
2062u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2063 bool name_known)
1da177e4 2064{
30883512 2065 struct discovery_state *cache = &hdev->discovery;
70f23020 2066 struct inquiry_entry *ie;
af58925c 2067 u32 flags = 0;
1da177e4 2068
6ed93dc6 2069 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2070
2b2fec4d
SJ
2071 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2072
af58925c
MH
2073 if (!data->ssp_mode)
2074 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2075
70f23020 2076 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2077 if (ie) {
af58925c
MH
2078 if (!ie->data.ssp_mode)
2079 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2080
a3d4e20a 2081 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2082 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2083 ie->data.rssi = data->rssi;
2084 hci_inquiry_cache_update_resolve(hdev, ie);
2085 }
2086
561aafbc 2087 goto update;
a3d4e20a 2088 }
561aafbc
JH
2089
2090 /* Entry not in the cache. Add new one. */
27f70f3e 2091 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2092 if (!ie) {
2093 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2094 goto done;
2095 }
561aafbc
JH
2096
2097 list_add(&ie->all, &cache->all);
2098
2099 if (name_known) {
2100 ie->name_state = NAME_KNOWN;
2101 } else {
2102 ie->name_state = NAME_NOT_KNOWN;
2103 list_add(&ie->list, &cache->unknown);
2104 }
70f23020 2105
561aafbc
JH
2106update:
2107 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2108 ie->name_state != NAME_PENDING) {
561aafbc
JH
2109 ie->name_state = NAME_KNOWN;
2110 list_del(&ie->list);
1da177e4
LT
2111 }
2112
70f23020
AE
2113 memcpy(&ie->data, data, sizeof(*data));
2114 ie->timestamp = jiffies;
1da177e4 2115 cache->timestamp = jiffies;
3175405b
JH
2116
2117 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2118 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2119
af58925c
MH
2120done:
2121 return flags;
1da177e4
LT
2122}
2123
2124static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2125{
30883512 2126 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2127 struct inquiry_info *info = (struct inquiry_info *) buf;
2128 struct inquiry_entry *e;
2129 int copied = 0;
2130
561aafbc 2131 list_for_each_entry(e, &cache->all, all) {
1da177e4 2132 struct inquiry_data *data = &e->data;
b57c1a56
JH
2133
2134 if (copied >= num)
2135 break;
2136
1da177e4
LT
2137 bacpy(&info->bdaddr, &data->bdaddr);
2138 info->pscan_rep_mode = data->pscan_rep_mode;
2139 info->pscan_period_mode = data->pscan_period_mode;
2140 info->pscan_mode = data->pscan_mode;
2141 memcpy(info->dev_class, data->dev_class, 3);
2142 info->clock_offset = data->clock_offset;
b57c1a56 2143
1da177e4 2144 info++;
b57c1a56 2145 copied++;
1da177e4
LT
2146 }
2147
2148 BT_DBG("cache %p, copied %d", cache, copied);
2149 return copied;
2150}
2151
42c6b129 2152static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2153{
2154 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2155 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2156 struct hci_cp_inquiry cp;
2157
2158 BT_DBG("%s", hdev->name);
2159
2160 if (test_bit(HCI_INQUIRY, &hdev->flags))
2161 return;
2162
2163 /* Start Inquiry */
2164 memcpy(&cp.lap, &ir->lap, 3);
2165 cp.length = ir->length;
2166 cp.num_rsp = ir->num_rsp;
42c6b129 2167 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2168}
2169
3e13fa1e
AG
2170static int wait_inquiry(void *word)
2171{
2172 schedule();
2173 return signal_pending(current);
2174}
2175
1da177e4
LT
2176int hci_inquiry(void __user *arg)
2177{
2178 __u8 __user *ptr = arg;
2179 struct hci_inquiry_req ir;
2180 struct hci_dev *hdev;
2181 int err = 0, do_inquiry = 0, max_rsp;
2182 long timeo;
2183 __u8 *buf;
2184
2185 if (copy_from_user(&ir, ptr, sizeof(ir)))
2186 return -EFAULT;
2187
5a08ecce
AE
2188 hdev = hci_dev_get(ir.dev_id);
2189 if (!hdev)
1da177e4
LT
2190 return -ENODEV;
2191
0736cfa8
MH
2192 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2193 err = -EBUSY;
2194 goto done;
2195 }
2196
4a964404 2197 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2198 err = -EOPNOTSUPP;
2199 goto done;
2200 }
2201
5b69bef5
MH
2202 if (hdev->dev_type != HCI_BREDR) {
2203 err = -EOPNOTSUPP;
2204 goto done;
2205 }
2206
56f87901
JH
2207 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2208 err = -EOPNOTSUPP;
2209 goto done;
2210 }
2211
09fd0de5 2212 hci_dev_lock(hdev);
8e87d142 2213 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2214 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2215 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2216 do_inquiry = 1;
2217 }
09fd0de5 2218 hci_dev_unlock(hdev);
1da177e4 2219
04837f64 2220 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2221
2222 if (do_inquiry) {
01178cd4
JH
2223 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2224 timeo);
70f23020
AE
2225 if (err < 0)
2226 goto done;
3e13fa1e
AG
2227
2228 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2229 * cleared). If it is interrupted by a signal, return -EINTR.
2230 */
2231 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2232 TASK_INTERRUPTIBLE))
2233 return -EINTR;
70f23020 2234 }
1da177e4 2235
8fc9ced3
GP
2236 /* for unlimited number of responses we will use buffer with
2237 * 255 entries
2238 */
1da177e4
LT
2239 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2240
2241 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2242 * copy it to the user space.
2243 */
01df8c31 2244 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2245 if (!buf) {
1da177e4
LT
2246 err = -ENOMEM;
2247 goto done;
2248 }
2249
09fd0de5 2250 hci_dev_lock(hdev);
1da177e4 2251 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2252 hci_dev_unlock(hdev);
1da177e4
LT
2253
2254 BT_DBG("num_rsp %d", ir.num_rsp);
2255
2256 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2257 ptr += sizeof(ir);
2258 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2259 ir.num_rsp))
1da177e4 2260 err = -EFAULT;
8e87d142 2261 } else
1da177e4
LT
2262 err = -EFAULT;
2263
2264 kfree(buf);
2265
2266done:
2267 hci_dev_put(hdev);
2268 return err;
2269}
2270
cbed0ca1 2271static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2272{
1da177e4
LT
2273 int ret = 0;
2274
1da177e4
LT
2275 BT_DBG("%s %p", hdev->name, hdev);
2276
2277 hci_req_lock(hdev);
2278
94324962
JH
2279 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2280 ret = -ENODEV;
2281 goto done;
2282 }
2283
d603b76b
MH
2284 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2285 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2286 /* Check for rfkill but allow the HCI setup stage to
2287 * proceed (which in itself doesn't cause any RF activity).
2288 */
2289 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2290 ret = -ERFKILL;
2291 goto done;
2292 }
2293
2294 /* Check for valid public address or a configured static
2295 * random adddress, but let the HCI setup proceed to
2296 * be able to determine if there is a public address
2297 * or not.
2298 *
c6beca0e
MH
2299 * In case of user channel usage, it is not important
2300 * if a public address or static random address is
2301 * available.
2302 *
a5c8f270
MH
2303 * This check is only valid for BR/EDR controllers
2304 * since AMP controllers do not have an address.
2305 */
c6beca0e
MH
2306 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2307 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2308 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2309 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2310 ret = -EADDRNOTAVAIL;
2311 goto done;
2312 }
611b30f7
MH
2313 }
2314
1da177e4
LT
2315 if (test_bit(HCI_UP, &hdev->flags)) {
2316 ret = -EALREADY;
2317 goto done;
2318 }
2319
1da177e4
LT
2320 if (hdev->open(hdev)) {
2321 ret = -EIO;
2322 goto done;
2323 }
2324
f41c70c4
MH
2325 atomic_set(&hdev->cmd_cnt, 1);
2326 set_bit(HCI_INIT, &hdev->flags);
2327
af202f84
MH
2328 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2329 if (hdev->setup)
2330 ret = hdev->setup(hdev);
f41c70c4 2331
af202f84
MH
2332 /* The transport driver can set these quirks before
2333 * creating the HCI device or in its setup callback.
2334 *
2335 * In case any of them is set, the controller has to
2336 * start up as unconfigured.
2337 */
eb1904f4
MH
2338 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2339 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2340 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
0ebca7d6
MH
2341
2342 /* For an unconfigured controller it is required to
2343 * read at least the version information provided by
2344 * the Read Local Version Information command.
2345 *
2346 * If the set_bdaddr driver callback is provided, then
2347 * also the original Bluetooth public device address
2348 * will be read using the Read BD Address command.
2349 */
2350 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2351 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2352 }
2353
9713c17b
MH
2354 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2355 /* If public address change is configured, ensure that
2356 * the address gets programmed. If the driver does not
2357 * support changing the public address, fail the power
2358 * on procedure.
2359 */
2360 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2361 hdev->set_bdaddr)
24c457e2
MH
2362 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2363 else
2364 ret = -EADDRNOTAVAIL;
2365 }
2366
f41c70c4 2367 if (!ret) {
4a964404 2368 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2369 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2370 ret = __hci_init(hdev);
1da177e4
LT
2371 }
2372
f41c70c4
MH
2373 clear_bit(HCI_INIT, &hdev->flags);
2374
1da177e4
LT
2375 if (!ret) {
2376 hci_dev_hold(hdev);
d6bfd59c 2377 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2378 set_bit(HCI_UP, &hdev->flags);
2379 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2380 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2381 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2382 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2383 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2384 hdev->dev_type == HCI_BREDR) {
09fd0de5 2385 hci_dev_lock(hdev);
744cf19e 2386 mgmt_powered(hdev, 1);
09fd0de5 2387 hci_dev_unlock(hdev);
56e5cb86 2388 }
8e87d142 2389 } else {
1da177e4 2390 /* Init failed, cleanup */
3eff45ea 2391 flush_work(&hdev->tx_work);
c347b765 2392 flush_work(&hdev->cmd_work);
b78752cc 2393 flush_work(&hdev->rx_work);
1da177e4
LT
2394
2395 skb_queue_purge(&hdev->cmd_q);
2396 skb_queue_purge(&hdev->rx_q);
2397
2398 if (hdev->flush)
2399 hdev->flush(hdev);
2400
2401 if (hdev->sent_cmd) {
2402 kfree_skb(hdev->sent_cmd);
2403 hdev->sent_cmd = NULL;
2404 }
2405
2406 hdev->close(hdev);
fee746b0 2407 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2408 }
2409
2410done:
2411 hci_req_unlock(hdev);
1da177e4
LT
2412 return ret;
2413}
2414
cbed0ca1
JH
2415/* ---- HCI ioctl helpers ---- */
2416
2417int hci_dev_open(__u16 dev)
2418{
2419 struct hci_dev *hdev;
2420 int err;
2421
2422 hdev = hci_dev_get(dev);
2423 if (!hdev)
2424 return -ENODEV;
2425
4a964404 2426 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2427 * up as user channel. Trying to bring them up as normal devices
2428 * will result into a failure. Only user channel operation is
2429 * possible.
2430 *
2431 * When this function is called for a user channel, the flag
2432 * HCI_USER_CHANNEL will be set first before attempting to
2433 * open the device.
2434 */
4a964404 2435 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2436 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2437 err = -EOPNOTSUPP;
2438 goto done;
2439 }
2440
e1d08f40
JH
2441 /* We need to ensure that no other power on/off work is pending
2442 * before proceeding to call hci_dev_do_open. This is
2443 * particularly important if the setup procedure has not yet
2444 * completed.
2445 */
2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
a5c8f270
MH
2449 /* After this call it is guaranteed that the setup procedure
2450 * has finished. This means that error conditions like RFKILL
2451 * or no valid public or static random address apply.
2452 */
e1d08f40
JH
2453 flush_workqueue(hdev->req_workqueue);
2454
12aa4f0a
MH
2455 /* For controllers not using the management interface and that
2456 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2457 * so that pairing works for them. Once the management interface
2458 * is in use this bit will be cleared again and userspace has
2459 * to explicitly enable it.
2460 */
2461 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2462 !test_bit(HCI_MGMT, &hdev->dev_flags))
2463 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2464
cbed0ca1
JH
2465 err = hci_dev_do_open(hdev);
2466
fee746b0 2467done:
cbed0ca1 2468 hci_dev_put(hdev);
cbed0ca1
JH
2469 return err;
2470}
2471
d7347f3c
JH
2472/* This function requires the caller holds hdev->lock */
2473static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2474{
2475 struct hci_conn_params *p;
2476
2477 list_for_each_entry(p, &hdev->le_conn_params, list)
2478 list_del_init(&p->action);
2479
2480 BT_DBG("All LE pending actions cleared");
2481}
2482
1da177e4
LT
2483static int hci_dev_do_close(struct hci_dev *hdev)
2484{
2485 BT_DBG("%s %p", hdev->name, hdev);
2486
78c04c0b
VCG
2487 cancel_delayed_work(&hdev->power_off);
2488
1da177e4
LT
2489 hci_req_cancel(hdev, ENODEV);
2490 hci_req_lock(hdev);
2491
2492 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2493 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2494 hci_req_unlock(hdev);
2495 return 0;
2496 }
2497
3eff45ea
GP
2498 /* Flush RX and TX works */
2499 flush_work(&hdev->tx_work);
b78752cc 2500 flush_work(&hdev->rx_work);
1da177e4 2501
16ab91ab 2502 if (hdev->discov_timeout > 0) {
e0f9309f 2503 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2504 hdev->discov_timeout = 0;
5e5282bb 2505 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2506 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2507 }
2508
a8b2d5c2 2509 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2510 cancel_delayed_work(&hdev->service_cache);
2511
7ba8b4be 2512 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2513
2514 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2515 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2516
09fd0de5 2517 hci_dev_lock(hdev);
1f9b9a5d 2518 hci_inquiry_cache_flush(hdev);
1da177e4 2519 hci_conn_hash_flush(hdev);
d7347f3c 2520 hci_pend_le_actions_clear(hdev);
09fd0de5 2521 hci_dev_unlock(hdev);
1da177e4
LT
2522
2523 hci_notify(hdev, HCI_DEV_DOWN);
2524
2525 if (hdev->flush)
2526 hdev->flush(hdev);
2527
2528 /* Reset device */
2529 skb_queue_purge(&hdev->cmd_q);
2530 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2531 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2532 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2533 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2534 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2535 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2536 clear_bit(HCI_INIT, &hdev->flags);
2537 }
2538
c347b765
GP
2539 /* flush cmd work */
2540 flush_work(&hdev->cmd_work);
1da177e4
LT
2541
2542 /* Drop queues */
2543 skb_queue_purge(&hdev->rx_q);
2544 skb_queue_purge(&hdev->cmd_q);
2545 skb_queue_purge(&hdev->raw_q);
2546
2547 /* Drop last sent command */
2548 if (hdev->sent_cmd) {
65cc2b49 2549 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2550 kfree_skb(hdev->sent_cmd);
2551 hdev->sent_cmd = NULL;
2552 }
2553
b6ddb638
JH
2554 kfree_skb(hdev->recv_evt);
2555 hdev->recv_evt = NULL;
2556
1da177e4
LT
2557 /* After this point our queues are empty
2558 * and no tasks are scheduled. */
2559 hdev->close(hdev);
2560
35b973c9 2561 /* Clear flags */
fee746b0 2562 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2563 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2564
93c311a0
MH
2565 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2566 if (hdev->dev_type == HCI_BREDR) {
2567 hci_dev_lock(hdev);
2568 mgmt_powered(hdev, 0);
2569 hci_dev_unlock(hdev);
2570 }
8ee56540 2571 }
5add6af8 2572
ced5c338 2573 /* Controller radio is available but is currently powered down */
536619e8 2574 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2575
e59fda8d 2576 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2577 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2578 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2579
1da177e4
LT
2580 hci_req_unlock(hdev);
2581
2582 hci_dev_put(hdev);
2583 return 0;
2584}
2585
2586int hci_dev_close(__u16 dev)
2587{
2588 struct hci_dev *hdev;
2589 int err;
2590
70f23020
AE
2591 hdev = hci_dev_get(dev);
2592 if (!hdev)
1da177e4 2593 return -ENODEV;
8ee56540 2594
0736cfa8
MH
2595 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2596 err = -EBUSY;
2597 goto done;
2598 }
2599
8ee56540
MH
2600 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2601 cancel_delayed_work(&hdev->power_off);
2602
1da177e4 2603 err = hci_dev_do_close(hdev);
8ee56540 2604
0736cfa8 2605done:
1da177e4
LT
2606 hci_dev_put(hdev);
2607 return err;
2608}
2609
2610int hci_dev_reset(__u16 dev)
2611{
2612 struct hci_dev *hdev;
2613 int ret = 0;
2614
70f23020
AE
2615 hdev = hci_dev_get(dev);
2616 if (!hdev)
1da177e4
LT
2617 return -ENODEV;
2618
2619 hci_req_lock(hdev);
1da177e4 2620
808a049e
MH
2621 if (!test_bit(HCI_UP, &hdev->flags)) {
2622 ret = -ENETDOWN;
1da177e4 2623 goto done;
808a049e 2624 }
1da177e4 2625
0736cfa8
MH
2626 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2627 ret = -EBUSY;
2628 goto done;
2629 }
2630
4a964404 2631 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2632 ret = -EOPNOTSUPP;
2633 goto done;
2634 }
2635
1da177e4
LT
2636 /* Drop queues */
2637 skb_queue_purge(&hdev->rx_q);
2638 skb_queue_purge(&hdev->cmd_q);
2639
09fd0de5 2640 hci_dev_lock(hdev);
1f9b9a5d 2641 hci_inquiry_cache_flush(hdev);
1da177e4 2642 hci_conn_hash_flush(hdev);
09fd0de5 2643 hci_dev_unlock(hdev);
1da177e4
LT
2644
2645 if (hdev->flush)
2646 hdev->flush(hdev);
2647
8e87d142 2648 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2649 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2650
fee746b0 2651 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2652
2653done:
1da177e4
LT
2654 hci_req_unlock(hdev);
2655 hci_dev_put(hdev);
2656 return ret;
2657}
2658
2659int hci_dev_reset_stat(__u16 dev)
2660{
2661 struct hci_dev *hdev;
2662 int ret = 0;
2663
70f23020
AE
2664 hdev = hci_dev_get(dev);
2665 if (!hdev)
1da177e4
LT
2666 return -ENODEV;
2667
0736cfa8
MH
2668 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2669 ret = -EBUSY;
2670 goto done;
2671 }
2672
4a964404 2673 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2674 ret = -EOPNOTSUPP;
2675 goto done;
2676 }
2677
1da177e4
LT
2678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2679
0736cfa8 2680done:
1da177e4 2681 hci_dev_put(hdev);
1da177e4
LT
2682 return ret;
2683}
2684
123abc08
JH
2685static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2686{
bc6d2d04 2687 bool conn_changed, discov_changed;
123abc08
JH
2688
2689 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2690
2691 if ((scan & SCAN_PAGE))
2692 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2693 &hdev->dev_flags);
2694 else
2695 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2696 &hdev->dev_flags);
2697
bc6d2d04
JH
2698 if ((scan & SCAN_INQUIRY)) {
2699 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2700 &hdev->dev_flags);
2701 } else {
2702 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2703 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2704 &hdev->dev_flags);
2705 }
2706
123abc08
JH
2707 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2708 return;
2709
bc6d2d04
JH
2710 if (conn_changed || discov_changed) {
2711 /* In case this was disabled through mgmt */
2712 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2713
2714 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2715 mgmt_update_adv_data(hdev);
2716
123abc08 2717 mgmt_new_settings(hdev);
bc6d2d04 2718 }
123abc08
JH
2719}
2720
1da177e4
LT
2721int hci_dev_cmd(unsigned int cmd, void __user *arg)
2722{
2723 struct hci_dev *hdev;
2724 struct hci_dev_req dr;
2725 int err = 0;
2726
2727 if (copy_from_user(&dr, arg, sizeof(dr)))
2728 return -EFAULT;
2729
70f23020
AE
2730 hdev = hci_dev_get(dr.dev_id);
2731 if (!hdev)
1da177e4
LT
2732 return -ENODEV;
2733
0736cfa8
MH
2734 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2735 err = -EBUSY;
2736 goto done;
2737 }
2738
4a964404 2739 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2740 err = -EOPNOTSUPP;
2741 goto done;
2742 }
2743
5b69bef5
MH
2744 if (hdev->dev_type != HCI_BREDR) {
2745 err = -EOPNOTSUPP;
2746 goto done;
2747 }
2748
56f87901
JH
2749 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2750 err = -EOPNOTSUPP;
2751 goto done;
2752 }
2753
1da177e4
LT
2754 switch (cmd) {
2755 case HCISETAUTH:
01178cd4
JH
2756 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2757 HCI_INIT_TIMEOUT);
1da177e4
LT
2758 break;
2759
2760 case HCISETENCRYPT:
2761 if (!lmp_encrypt_capable(hdev)) {
2762 err = -EOPNOTSUPP;
2763 break;
2764 }
2765
2766 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2767 /* Auth must be enabled first */
01178cd4
JH
2768 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2769 HCI_INIT_TIMEOUT);
1da177e4
LT
2770 if (err)
2771 break;
2772 }
2773
01178cd4
JH
2774 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2775 HCI_INIT_TIMEOUT);
1da177e4
LT
2776 break;
2777
2778 case HCISETSCAN:
01178cd4
JH
2779 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2780 HCI_INIT_TIMEOUT);
91a668b0 2781
bc6d2d04
JH
2782 /* Ensure that the connectable and discoverable states
2783 * get correctly modified as this was a non-mgmt change.
91a668b0 2784 */
123abc08
JH
2785 if (!err)
2786 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2787 break;
2788
1da177e4 2789 case HCISETLINKPOL:
01178cd4
JH
2790 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2791 HCI_INIT_TIMEOUT);
1da177e4
LT
2792 break;
2793
2794 case HCISETLINKMODE:
e4e8e37c
MH
2795 hdev->link_mode = ((__u16) dr.dev_opt) &
2796 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2797 break;
2798
2799 case HCISETPTYPE:
2800 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2801 break;
2802
2803 case HCISETACLMTU:
e4e8e37c
MH
2804 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2805 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2806 break;
2807
2808 case HCISETSCOMTU:
e4e8e37c
MH
2809 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2810 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2811 break;
2812
2813 default:
2814 err = -EINVAL;
2815 break;
2816 }
e4e8e37c 2817
0736cfa8 2818done:
1da177e4
LT
2819 hci_dev_put(hdev);
2820 return err;
2821}
2822
2823int hci_get_dev_list(void __user *arg)
2824{
8035ded4 2825 struct hci_dev *hdev;
1da177e4
LT
2826 struct hci_dev_list_req *dl;
2827 struct hci_dev_req *dr;
1da177e4
LT
2828 int n = 0, size, err;
2829 __u16 dev_num;
2830
2831 if (get_user(dev_num, (__u16 __user *) arg))
2832 return -EFAULT;
2833
2834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2835 return -EINVAL;
2836
2837 size = sizeof(*dl) + dev_num * sizeof(*dr);
2838
70f23020
AE
2839 dl = kzalloc(size, GFP_KERNEL);
2840 if (!dl)
1da177e4
LT
2841 return -ENOMEM;
2842
2843 dr = dl->dev_req;
2844
f20d09d5 2845 read_lock(&hci_dev_list_lock);
8035ded4 2846 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db
MH
2847 unsigned long flags = hdev->flags;
2848
2849 /* When the auto-off is configured it means the transport
2850 * is running, but in that case still indicate that the
2851 * device is actually down.
2852 */
2853 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2854 flags &= ~BIT(HCI_UP);
c542a06c 2855
1da177e4 2856 (dr + n)->dev_id = hdev->id;
2e84d8db 2857 (dr + n)->dev_opt = flags;
c542a06c 2858
1da177e4
LT
2859 if (++n >= dev_num)
2860 break;
2861 }
f20d09d5 2862 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2863
2864 dl->dev_num = n;
2865 size = sizeof(*dl) + n * sizeof(*dr);
2866
2867 err = copy_to_user(arg, dl, size);
2868 kfree(dl);
2869
2870 return err ? -EFAULT : 0;
2871}
2872
2873int hci_get_dev_info(void __user *arg)
2874{
2875 struct hci_dev *hdev;
2876 struct hci_dev_info di;
2e84d8db 2877 unsigned long flags;
1da177e4
LT
2878 int err = 0;
2879
2880 if (copy_from_user(&di, arg, sizeof(di)))
2881 return -EFAULT;
2882
70f23020
AE
2883 hdev = hci_dev_get(di.dev_id);
2884 if (!hdev)
1da177e4
LT
2885 return -ENODEV;
2886
2e84d8db
MH
2887 /* When the auto-off is configured it means the transport
2888 * is running, but in that case still indicate that the
2889 * device is actually down.
2890 */
2891 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2892 flags = hdev->flags & ~BIT(HCI_UP);
2893 else
2894 flags = hdev->flags;
ab81cbf9 2895
1da177e4
LT
2896 strcpy(di.name, hdev->name);
2897 di.bdaddr = hdev->bdaddr;
60f2a3ed 2898 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2899 di.flags = flags;
1da177e4 2900 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2901 if (lmp_bredr_capable(hdev)) {
2902 di.acl_mtu = hdev->acl_mtu;
2903 di.acl_pkts = hdev->acl_pkts;
2904 di.sco_mtu = hdev->sco_mtu;
2905 di.sco_pkts = hdev->sco_pkts;
2906 } else {
2907 di.acl_mtu = hdev->le_mtu;
2908 di.acl_pkts = hdev->le_pkts;
2909 di.sco_mtu = 0;
2910 di.sco_pkts = 0;
2911 }
1da177e4
LT
2912 di.link_policy = hdev->link_policy;
2913 di.link_mode = hdev->link_mode;
2914
2915 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2916 memcpy(&di.features, &hdev->features, sizeof(di.features));
2917
2918 if (copy_to_user(arg, &di, sizeof(di)))
2919 err = -EFAULT;
2920
2921 hci_dev_put(hdev);
2922
2923 return err;
2924}
2925
2926/* ---- Interface to HCI drivers ---- */
2927
611b30f7
MH
2928static int hci_rfkill_set_block(void *data, bool blocked)
2929{
2930 struct hci_dev *hdev = data;
2931
2932 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2933
0736cfa8
MH
2934 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2935 return -EBUSY;
2936
5e130367
JH
2937 if (blocked) {
2938 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2939 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2940 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2941 hci_dev_do_close(hdev);
5e130367
JH
2942 } else {
2943 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2944 }
611b30f7
MH
2945
2946 return 0;
2947}
2948
2949static const struct rfkill_ops hci_rfkill_ops = {
2950 .set_block = hci_rfkill_set_block,
2951};
2952
ab81cbf9
JH
2953static void hci_power_on(struct work_struct *work)
2954{
2955 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2956 int err;
ab81cbf9
JH
2957
2958 BT_DBG("%s", hdev->name);
2959
cbed0ca1 2960 err = hci_dev_do_open(hdev);
96570ffc
JH
2961 if (err < 0) {
2962 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2963 return;
96570ffc 2964 }
ab81cbf9 2965
a5c8f270
MH
2966 /* During the HCI setup phase, a few error conditions are
2967 * ignored and they need to be checked now. If they are still
2968 * valid, it is important to turn the device back off.
2969 */
2970 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2971 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2972 (hdev->dev_type == HCI_BREDR &&
2973 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2974 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2975 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2976 hci_dev_do_close(hdev);
2977 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2978 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2979 HCI_AUTO_OFF_TIMEOUT);
bf543036 2980 }
ab81cbf9 2981
fee746b0 2982 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2983 /* For unconfigured devices, set the HCI_RAW flag
2984 * so that userspace can easily identify them.
4a964404
MH
2985 */
2986 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2987 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2988
2989 /* For fully configured devices, this will send
2990 * the Index Added event. For unconfigured devices,
2991 * it will send Unconfigued Index Added event.
2992 *
2993 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2994 * and no event will be send.
2995 */
2996 mgmt_index_added(hdev);
d603b76b 2997 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
2998 /* When the controller is now configured, then it
2999 * is important to clear the HCI_RAW flag.
3000 */
3001 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3002 clear_bit(HCI_RAW, &hdev->flags);
3003
d603b76b
MH
3004 /* Powering on the controller with HCI_CONFIG set only
3005 * happens with the transition from unconfigured to
3006 * configured. This will send the Index Added event.
3007 */
3008 mgmt_index_added(hdev);
fee746b0 3009 }
ab81cbf9
JH
3010}
3011
3012static void hci_power_off(struct work_struct *work)
3013{
3243553f 3014 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3015 power_off.work);
ab81cbf9
JH
3016
3017 BT_DBG("%s", hdev->name);
3018
8ee56540 3019 hci_dev_do_close(hdev);
ab81cbf9
JH
3020}
3021
16ab91ab
JH
3022static void hci_discov_off(struct work_struct *work)
3023{
3024 struct hci_dev *hdev;
16ab91ab
JH
3025
3026 hdev = container_of(work, struct hci_dev, discov_off.work);
3027
3028 BT_DBG("%s", hdev->name);
3029
d1967ff8 3030 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3031}
3032
35f7498a 3033void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3034{
4821002c 3035 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3036
4821002c
JH
3037 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3038 list_del(&uuid->list);
2aeb9a1a
JH
3039 kfree(uuid);
3040 }
2aeb9a1a
JH
3041}
3042
35f7498a 3043void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3044{
3045 struct list_head *p, *n;
3046
3047 list_for_each_safe(p, n, &hdev->link_keys) {
3048 struct link_key *key;
3049
3050 key = list_entry(p, struct link_key, list);
3051
3052 list_del(p);
3053 kfree(key);
3054 }
55ed8ca1
JH
3055}
3056
35f7498a 3057void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
3058{
3059 struct smp_ltk *k, *tmp;
3060
3061 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3062 list_del(&k->list);
3063 kfree(k);
3064 }
b899efaf
VCG
3065}
3066
970c4e46
JH
3067void hci_smp_irks_clear(struct hci_dev *hdev)
3068{
3069 struct smp_irk *k, *tmp;
3070
3071 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3072 list_del(&k->list);
3073 kfree(k);
3074 }
3075}
3076
55ed8ca1
JH
3077struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3078{
8035ded4 3079 struct link_key *k;
55ed8ca1 3080
8035ded4 3081 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3082 if (bacmp(bdaddr, &k->bdaddr) == 0)
3083 return k;
55ed8ca1
JH
3084
3085 return NULL;
3086}
3087
745c0ce3 3088static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3089 u8 key_type, u8 old_key_type)
d25e28ab
JH
3090{
3091 /* Legacy key */
3092 if (key_type < 0x03)
745c0ce3 3093 return true;
d25e28ab
JH
3094
3095 /* Debug keys are insecure so don't store them persistently */
3096 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3097 return false;
d25e28ab
JH
3098
3099 /* Changed combination key and there's no previous one */
3100 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3101 return false;
d25e28ab
JH
3102
3103 /* Security mode 3 case */
3104 if (!conn)
745c0ce3 3105 return true;
d25e28ab
JH
3106
3107 /* Neither local nor remote side had no-bonding as requirement */
3108 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3109 return true;
d25e28ab
JH
3110
3111 /* Local side had dedicated bonding as requirement */
3112 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3113 return true;
d25e28ab
JH
3114
3115 /* Remote side had dedicated bonding as requirement */
3116 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3117 return true;
d25e28ab
JH
3118
3119 /* If none of the above criteria match, then don't store the key
3120 * persistently */
745c0ce3 3121 return false;
d25e28ab
JH
3122}
3123
e804d25d 3124static u8 ltk_role(u8 type)
98a0b845 3125{
e804d25d
JH
3126 if (type == SMP_LTK)
3127 return HCI_ROLE_MASTER;
3128
3129 return HCI_ROLE_SLAVE;
98a0b845
JH
3130}
3131
fe39c7b2 3132struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3133 u8 role)
75d262c2 3134{
c9839a11 3135 struct smp_ltk *k;
75d262c2 3136
c9839a11 3137 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 3138 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3139 continue;
3140
e804d25d 3141 if (ltk_role(k->type) != role)
98a0b845
JH
3142 continue;
3143
c9839a11 3144 return k;
75d262c2
VCG
3145 }
3146
3147 return NULL;
3148}
75d262c2 3149
c9839a11 3150struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3151 u8 addr_type, u8 role)
75d262c2 3152{
c9839a11 3153 struct smp_ltk *k;
75d262c2 3154
c9839a11
VCG
3155 list_for_each_entry(k, &hdev->long_term_keys, list)
3156 if (addr_type == k->bdaddr_type &&
98a0b845 3157 bacmp(bdaddr, &k->bdaddr) == 0 &&
e804d25d 3158 ltk_role(k->type) == role)
75d262c2
VCG
3159 return k;
3160
3161 return NULL;
3162}
75d262c2 3163
970c4e46
JH
3164struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3165{
3166 struct smp_irk *irk;
3167
3168 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169 if (!bacmp(&irk->rpa, rpa))
3170 return irk;
3171 }
3172
3173 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3174 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3175 bacpy(&irk->rpa, rpa);
3176 return irk;
3177 }
3178 }
3179
3180 return NULL;
3181}
3182
3183struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3184 u8 addr_type)
3185{
3186 struct smp_irk *irk;
3187
6cfc9988
JH
3188 /* Identity Address must be public or static random */
3189 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3190 return NULL;
3191
970c4e46
JH
3192 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3193 if (addr_type == irk->addr_type &&
3194 bacmp(bdaddr, &irk->bdaddr) == 0)
3195 return irk;
3196 }
3197
3198 return NULL;
3199}
3200
567fa2aa 3201struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3202 bdaddr_t *bdaddr, u8 *val, u8 type,
3203 u8 pin_len, bool *persistent)
55ed8ca1
JH
3204{
3205 struct link_key *key, *old_key;
745c0ce3 3206 u8 old_key_type;
55ed8ca1
JH
3207
3208 old_key = hci_find_link_key(hdev, bdaddr);
3209 if (old_key) {
3210 old_key_type = old_key->type;
3211 key = old_key;
3212 } else {
12adcf3a 3213 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3214 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3215 if (!key)
567fa2aa 3216 return NULL;
55ed8ca1
JH
3217 list_add(&key->list, &hdev->link_keys);
3218 }
3219
6ed93dc6 3220 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3221
d25e28ab
JH
3222 /* Some buggy controller combinations generate a changed
3223 * combination key for legacy pairing even when there's no
3224 * previous key */
3225 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3226 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3227 type = HCI_LK_COMBINATION;
655fe6ec
JH
3228 if (conn)
3229 conn->key_type = type;
3230 }
d25e28ab 3231
55ed8ca1 3232 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3233 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3234 key->pin_len = pin_len;
3235
b6020ba0 3236 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3237 key->type = old_key_type;
4748fed2
JH
3238 else
3239 key->type = type;
3240
7652ff6a
JH
3241 if (persistent)
3242 *persistent = hci_persistent_key(hdev, conn, type,
3243 old_key_type);
55ed8ca1 3244
567fa2aa 3245 return key;
55ed8ca1
JH
3246}
3247
ca9142b8 3248struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3249 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3250 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3251{
c9839a11 3252 struct smp_ltk *key, *old_key;
e804d25d 3253 u8 role = ltk_role(type);
75d262c2 3254
e804d25d 3255 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3256 if (old_key)
75d262c2 3257 key = old_key;
c9839a11 3258 else {
0a14ab41 3259 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3260 if (!key)
ca9142b8 3261 return NULL;
c9839a11 3262 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3263 }
3264
75d262c2 3265 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3266 key->bdaddr_type = addr_type;
3267 memcpy(key->val, tk, sizeof(key->val));
3268 key->authenticated = authenticated;
3269 key->ediv = ediv;
fe39c7b2 3270 key->rand = rand;
c9839a11
VCG
3271 key->enc_size = enc_size;
3272 key->type = type;
75d262c2 3273
ca9142b8 3274 return key;
75d262c2
VCG
3275}
3276
ca9142b8
JH
3277struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3278 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3279{
3280 struct smp_irk *irk;
3281
3282 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3283 if (!irk) {
3284 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3285 if (!irk)
ca9142b8 3286 return NULL;
970c4e46
JH
3287
3288 bacpy(&irk->bdaddr, bdaddr);
3289 irk->addr_type = addr_type;
3290
3291 list_add(&irk->list, &hdev->identity_resolving_keys);
3292 }
3293
3294 memcpy(irk->val, val, 16);
3295 bacpy(&irk->rpa, rpa);
3296
ca9142b8 3297 return irk;
970c4e46
JH
3298}
3299
55ed8ca1
JH
3300int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3301{
3302 struct link_key *key;
3303
3304 key = hci_find_link_key(hdev, bdaddr);
3305 if (!key)
3306 return -ENOENT;
3307
6ed93dc6 3308 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3309
3310 list_del(&key->list);
3311 kfree(key);
3312
3313 return 0;
3314}
3315
e0b2b27e 3316int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3317{
3318 struct smp_ltk *k, *tmp;
c51ffa0b 3319 int removed = 0;
b899efaf
VCG
3320
3321 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3322 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3323 continue;
3324
6ed93dc6 3325 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3326
3327 list_del(&k->list);
3328 kfree(k);
c51ffa0b 3329 removed++;
b899efaf
VCG
3330 }
3331
c51ffa0b 3332 return removed ? 0 : -ENOENT;
b899efaf
VCG
3333}
3334
a7ec7338
JH
3335void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3336{
3337 struct smp_irk *k, *tmp;
3338
668b7b19 3339 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3340 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3341 continue;
3342
3343 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3344
3345 list_del(&k->list);
3346 kfree(k);
3347 }
3348}
3349
6bd32326 3350/* HCI command timer function */
65cc2b49 3351static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3352{
65cc2b49
MH
3353 struct hci_dev *hdev = container_of(work, struct hci_dev,
3354 cmd_timer.work);
6bd32326 3355
bda4f23a
AE
3356 if (hdev->sent_cmd) {
3357 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3358 u16 opcode = __le16_to_cpu(sent->opcode);
3359
3360 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3361 } else {
3362 BT_ERR("%s command tx timeout", hdev->name);
3363 }
3364
6bd32326 3365 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3366 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3367}
3368
2763eda6 3369struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3370 bdaddr_t *bdaddr)
2763eda6
SJ
3371{
3372 struct oob_data *data;
3373
3374 list_for_each_entry(data, &hdev->remote_oob_data, list)
3375 if (bacmp(bdaddr, &data->bdaddr) == 0)
3376 return data;
3377
3378 return NULL;
3379}
3380
3381int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3382{
3383 struct oob_data *data;
3384
3385 data = hci_find_remote_oob_data(hdev, bdaddr);
3386 if (!data)
3387 return -ENOENT;
3388
6ed93dc6 3389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3390
3391 list_del(&data->list);
3392 kfree(data);
3393
3394 return 0;
3395}
3396
35f7498a 3397void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3398{
3399 struct oob_data *data, *n;
3400
3401 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3402 list_del(&data->list);
3403 kfree(data);
3404 }
2763eda6
SJ
3405}
3406
0798872e
MH
3407int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3408 u8 *hash, u8 *randomizer)
2763eda6
SJ
3409{
3410 struct oob_data *data;
3411
3412 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3413 if (!data) {
0a14ab41 3414 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3415 if (!data)
3416 return -ENOMEM;
3417
3418 bacpy(&data->bdaddr, bdaddr);
3419 list_add(&data->list, &hdev->remote_oob_data);
3420 }
3421
519ca9d0
MH
3422 memcpy(data->hash192, hash, sizeof(data->hash192));
3423 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3424
0798872e
MH
3425 memset(data->hash256, 0, sizeof(data->hash256));
3426 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3427
3428 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3429
3430 return 0;
3431}
3432
3433int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3434 u8 *hash192, u8 *randomizer192,
3435 u8 *hash256, u8 *randomizer256)
3436{
3437 struct oob_data *data;
3438
3439 data = hci_find_remote_oob_data(hdev, bdaddr);
3440 if (!data) {
0a14ab41 3441 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3442 if (!data)
3443 return -ENOMEM;
3444
3445 bacpy(&data->bdaddr, bdaddr);
3446 list_add(&data->list, &hdev->remote_oob_data);
3447 }
3448
3449 memcpy(data->hash192, hash192, sizeof(data->hash192));
3450 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3451
3452 memcpy(data->hash256, hash256, sizeof(data->hash256));
3453 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3454
6ed93dc6 3455 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3456
3457 return 0;
3458}
3459
dcc36c16 3460struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3461 bdaddr_t *bdaddr, u8 type)
b2a66aad 3462{
8035ded4 3463 struct bdaddr_list *b;
b2a66aad 3464
dcc36c16 3465 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3466 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3467 return b;
b9ee0a78 3468 }
b2a66aad
AJ
3469
3470 return NULL;
3471}
3472
dcc36c16 3473void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3474{
3475 struct list_head *p, *n;
3476
dcc36c16 3477 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3478 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3479
3480 list_del(p);
3481 kfree(b);
3482 }
b2a66aad
AJ
3483}
3484
dcc36c16 3485int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3486{
3487 struct bdaddr_list *entry;
b2a66aad 3488
b9ee0a78 3489 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3490 return -EBADF;
3491
dcc36c16 3492 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3493 return -EEXIST;
b2a66aad 3494
27f70f3e 3495 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3496 if (!entry)
3497 return -ENOMEM;
b2a66aad
AJ
3498
3499 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3500 entry->bdaddr_type = type;
b2a66aad 3501
dcc36c16 3502 list_add(&entry->list, list);
b2a66aad 3503
2a8357f2 3504 return 0;
b2a66aad
AJ
3505}
3506
dcc36c16 3507int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3508{
3509 struct bdaddr_list *entry;
b2a66aad 3510
35f7498a 3511 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3512 hci_bdaddr_list_clear(list);
35f7498a
JH
3513 return 0;
3514 }
b2a66aad 3515
dcc36c16 3516 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3517 if (!entry)
3518 return -ENOENT;
3519
3520 list_del(&entry->list);
3521 kfree(entry);
3522
3523 return 0;
3524}
3525
15819a70
AG
3526/* This function requires the caller holds hdev->lock */
3527struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3528 bdaddr_t *addr, u8 addr_type)
3529{
3530 struct hci_conn_params *params;
3531
738f6185
JH
3532 /* The conn params list only contains identity addresses */
3533 if (!hci_is_identity_address(addr, addr_type))
3534 return NULL;
3535
15819a70
AG
3536 list_for_each_entry(params, &hdev->le_conn_params, list) {
3537 if (bacmp(&params->addr, addr) == 0 &&
3538 params->addr_type == addr_type) {
3539 return params;
3540 }
3541 }
3542
3543 return NULL;
3544}
3545
cef952ce
AG
3546static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3547{
3548 struct hci_conn *conn;
3549
3550 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3551 if (!conn)
3552 return false;
3553
3554 if (conn->dst_type != type)
3555 return false;
3556
3557 if (conn->state != BT_CONNECTED)
3558 return false;
3559
3560 return true;
3561}
3562
4b10966f 3563/* This function requires the caller holds hdev->lock */
501f8827
JH
3564struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3565 bdaddr_t *addr, u8 addr_type)
4b10966f 3566{
912b42ef 3567 struct hci_conn_params *param;
4b10966f 3568
738f6185
JH
3569 /* The list only contains identity addresses */
3570 if (!hci_is_identity_address(addr, addr_type))
3571 return NULL;
3572
501f8827 3573 list_for_each_entry(param, list, action) {
912b42ef
JH
3574 if (bacmp(&param->addr, addr) == 0 &&
3575 param->addr_type == addr_type)
3576 return param;
4b10966f
MH
3577 }
3578
3579 return NULL;
3580}
3581
3582/* This function requires the caller holds hdev->lock */
51d167c0
MH
3583struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3584 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3585{
3586 struct hci_conn_params *params;
3587
c46245b3 3588 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3589 return NULL;
bf5b3c8b
MH
3590
3591 params = hci_conn_params_lookup(hdev, addr, addr_type);
3592 if (params)
51d167c0 3593 return params;
bf5b3c8b
MH
3594
3595 params = kzalloc(sizeof(*params), GFP_KERNEL);
3596 if (!params) {
3597 BT_ERR("Out of memory");
51d167c0 3598 return NULL;
bf5b3c8b
MH
3599 }
3600
3601 bacpy(&params->addr, addr);
3602 params->addr_type = addr_type;
3603
3604 list_add(&params->list, &hdev->le_conn_params);
93450c75 3605 INIT_LIST_HEAD(&params->action);
bf5b3c8b
MH
3606
3607 params->conn_min_interval = hdev->le_conn_min_interval;
3608 params->conn_max_interval = hdev->le_conn_max_interval;
3609 params->conn_latency = hdev->le_conn_latency;
3610 params->supervision_timeout = hdev->le_supv_timeout;
3611 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3612
3613 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3614
51d167c0 3615 return params;
bf5b3c8b
MH
3616}
3617
3618/* This function requires the caller holds hdev->lock */
3619int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3620 u8 auto_connect)
15819a70
AG
3621{
3622 struct hci_conn_params *params;
3623
8c87aae1
MH
3624 params = hci_conn_params_add(hdev, addr, addr_type);
3625 if (!params)
3626 return -EIO;
cef952ce 3627
42ce26de
JH
3628 if (params->auto_connect == auto_connect)
3629 return 0;
3630
95305baa 3631 list_del_init(&params->action);
15819a70 3632
cef952ce
AG
3633 switch (auto_connect) {
3634 case HCI_AUTO_CONN_DISABLED:
3635 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3636 hci_update_background_scan(hdev);
cef952ce 3637 break;
851efca8 3638 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3639 list_add(&params->action, &hdev->pend_le_reports);
3640 hci_update_background_scan(hdev);
851efca8 3641 break;
cef952ce 3642 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3643 if (!is_connected(hdev, addr, addr_type)) {
3644 list_add(&params->action, &hdev->pend_le_conns);
3645 hci_update_background_scan(hdev);
3646 }
cef952ce
AG
3647 break;
3648 }
15819a70 3649
851efca8
JH
3650 params->auto_connect = auto_connect;
3651
d06b50ce
MH
3652 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3653 auto_connect);
a9b0a04c
AG
3654
3655 return 0;
15819a70
AG
3656}
3657
3658/* This function requires the caller holds hdev->lock */
3659void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3660{
3661 struct hci_conn_params *params;
3662
3663 params = hci_conn_params_lookup(hdev, addr, addr_type);
3664 if (!params)
3665 return;
3666
95305baa 3667 list_del(&params->action);
15819a70
AG
3668 list_del(&params->list);
3669 kfree(params);
3670
95305baa
JH
3671 hci_update_background_scan(hdev);
3672
15819a70
AG
3673 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3674}
3675
55af49a8
JH
3676/* This function requires the caller holds hdev->lock */
3677void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3678{
3679 struct hci_conn_params *params, *tmp;
3680
3681 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3682 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3683 continue;
3684 list_del(&params->list);
3685 kfree(params);
3686 }
3687
3688 BT_DBG("All LE disabled connection parameters were removed");
3689}
3690
15819a70 3691/* This function requires the caller holds hdev->lock */
373110c5 3692void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3693{
3694 struct hci_conn_params *params, *tmp;
3695
3696 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3697 list_del(&params->action);
15819a70
AG
3698 list_del(&params->list);
3699 kfree(params);
3700 }
3701
a2f41a8f 3702 hci_update_background_scan(hdev);
1089b67d 3703
15819a70
AG
3704 BT_DBG("All LE connection parameters were removed");
3705}
3706
4c87eaab 3707static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3708{
4c87eaab
AG
3709 if (status) {
3710 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3711
4c87eaab
AG
3712 hci_dev_lock(hdev);
3713 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3714 hci_dev_unlock(hdev);
3715 return;
3716 }
7ba8b4be
AG
3717}
3718
4c87eaab 3719static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3720{
4c87eaab
AG
3721 /* General inquiry access code (GIAC) */
3722 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3723 struct hci_request req;
3724 struct hci_cp_inquiry cp;
7ba8b4be
AG
3725 int err;
3726
4c87eaab
AG
3727 if (status) {
3728 BT_ERR("Failed to disable LE scanning: status %d", status);
3729 return;
3730 }
7ba8b4be 3731
4c87eaab
AG
3732 switch (hdev->discovery.type) {
3733 case DISCOV_TYPE_LE:
3734 hci_dev_lock(hdev);
3735 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3736 hci_dev_unlock(hdev);
3737 break;
7ba8b4be 3738
4c87eaab
AG
3739 case DISCOV_TYPE_INTERLEAVED:
3740 hci_req_init(&req, hdev);
7ba8b4be 3741
4c87eaab
AG
3742 memset(&cp, 0, sizeof(cp));
3743 memcpy(&cp.lap, lap, sizeof(cp.lap));
3744 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3745 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3746
4c87eaab 3747 hci_dev_lock(hdev);
7dbfac1d 3748
4c87eaab 3749 hci_inquiry_cache_flush(hdev);
7dbfac1d 3750
4c87eaab
AG
3751 err = hci_req_run(&req, inquiry_complete);
3752 if (err) {
3753 BT_ERR("Inquiry request failed: err %d", err);
3754 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3755 }
7dbfac1d 3756
4c87eaab
AG
3757 hci_dev_unlock(hdev);
3758 break;
7dbfac1d 3759 }
7dbfac1d
AG
3760}
3761
7ba8b4be
AG
3762static void le_scan_disable_work(struct work_struct *work)
3763{
3764 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3765 le_scan_disable.work);
4c87eaab
AG
3766 struct hci_request req;
3767 int err;
7ba8b4be
AG
3768
3769 BT_DBG("%s", hdev->name);
3770
4c87eaab 3771 hci_req_init(&req, hdev);
28b75a89 3772
b1efcc28 3773 hci_req_add_le_scan_disable(&req);
28b75a89 3774
4c87eaab
AG
3775 err = hci_req_run(&req, le_scan_disable_work_complete);
3776 if (err)
3777 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3778}
3779
8d97250e
JH
3780static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3781{
3782 struct hci_dev *hdev = req->hdev;
3783
3784 /* If we're advertising or initiating an LE connection we can't
3785 * go ahead and change the random address at this time. This is
3786 * because the eventual initiator address used for the
3787 * subsequently created connection will be undefined (some
3788 * controllers use the new address and others the one we had
3789 * when the operation started).
3790 *
3791 * In this kind of scenario skip the update and let the random
3792 * address be updated at the next cycle.
3793 */
5ce194c4 3794 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3795 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3796 BT_DBG("Deferring random address update");
3797 return;
3798 }
3799
3800 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3801}
3802
94b1fc92
MH
3803int hci_update_random_address(struct hci_request *req, bool require_privacy,
3804 u8 *own_addr_type)
ebd3a747
JH
3805{
3806 struct hci_dev *hdev = req->hdev;
3807 int err;
3808
3809 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3810 * current RPA has expired or there is something else than
3811 * the current RPA in use, then generate a new one.
ebd3a747
JH
3812 */
3813 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3814 int to;
3815
3816 *own_addr_type = ADDR_LE_DEV_RANDOM;
3817
3818 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3819 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3820 return 0;
3821
2b5224dc 3822 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3823 if (err < 0) {
3824 BT_ERR("%s failed to generate new RPA", hdev->name);
3825 return err;
3826 }
3827
8d97250e 3828 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3829
3830 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3831 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3832
3833 return 0;
94b1fc92
MH
3834 }
3835
3836 /* In case of required privacy without resolvable private address,
3837 * use an unresolvable private address. This is useful for active
3838 * scanning and non-connectable advertising.
3839 */
3840 if (require_privacy) {
3841 bdaddr_t urpa;
3842
3843 get_random_bytes(&urpa, 6);
3844 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3845
3846 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3847 set_random_addr(req, &urpa);
94b1fc92 3848 return 0;
ebd3a747
JH
3849 }
3850
3851 /* If forcing static address is in use or there is no public
3852 * address use the static address as random address (but skip
3853 * the HCI command if the current random address is already the
3854 * static one.
3855 */
111902f7 3856 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3857 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3858 *own_addr_type = ADDR_LE_DEV_RANDOM;
3859 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3860 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3861 &hdev->static_addr);
3862 return 0;
3863 }
3864
3865 /* Neither privacy nor static address is being used so use a
3866 * public address.
3867 */
3868 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3869
3870 return 0;
3871}
3872
a1f4c318
JH
3873/* Copy the Identity Address of the controller.
3874 *
3875 * If the controller has a public BD_ADDR, then by default use that one.
3876 * If this is a LE only controller without a public address, default to
3877 * the static random address.
3878 *
3879 * For debugging purposes it is possible to force controllers with a
3880 * public address to use the static random address instead.
3881 */
3882void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3883 u8 *bdaddr_type)
3884{
111902f7 3885 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3886 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3887 bacpy(bdaddr, &hdev->static_addr);
3888 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3889 } else {
3890 bacpy(bdaddr, &hdev->bdaddr);
3891 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3892 }
3893}
3894
9be0dab7
DH
3895/* Alloc HCI device */
3896struct hci_dev *hci_alloc_dev(void)
3897{
3898 struct hci_dev *hdev;
3899
27f70f3e 3900 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3901 if (!hdev)
3902 return NULL;
3903
b1b813d4
DH
3904 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3905 hdev->esco_type = (ESCO_HV1);
3906 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3907 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3908 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3909 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3910 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3911 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3912
b1b813d4
DH
3913 hdev->sniff_max_interval = 800;
3914 hdev->sniff_min_interval = 80;
3915
3f959d46 3916 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3917 hdev->le_scan_interval = 0x0060;
3918 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3919 hdev->le_conn_min_interval = 0x0028;
3920 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3921 hdev->le_conn_latency = 0x0000;
3922 hdev->le_supv_timeout = 0x002a;
bef64738 3923
d6bfd59c 3924 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3925 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3926 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3927 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3928
b1b813d4
DH
3929 mutex_init(&hdev->lock);
3930 mutex_init(&hdev->req_lock);
3931
3932 INIT_LIST_HEAD(&hdev->mgmt_pending);
3933 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3934 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3935 INIT_LIST_HEAD(&hdev->uuids);
3936 INIT_LIST_HEAD(&hdev->link_keys);
3937 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3938 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3939 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3940 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3941 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3942 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3943 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3944 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3945
3946 INIT_WORK(&hdev->rx_work, hci_rx_work);
3947 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3948 INIT_WORK(&hdev->tx_work, hci_tx_work);
3949 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3950
b1b813d4
DH
3951 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3952 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3953 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3954
b1b813d4
DH
3955 skb_queue_head_init(&hdev->rx_q);
3956 skb_queue_head_init(&hdev->cmd_q);
3957 skb_queue_head_init(&hdev->raw_q);
3958
3959 init_waitqueue_head(&hdev->req_wait_q);
3960
65cc2b49 3961 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3962
b1b813d4
DH
3963 hci_init_sysfs(hdev);
3964 discovery_init(hdev);
9be0dab7
DH
3965
3966 return hdev;
3967}
3968EXPORT_SYMBOL(hci_alloc_dev);
3969
3970/* Free HCI device */
3971void hci_free_dev(struct hci_dev *hdev)
3972{
9be0dab7
DH
3973 /* will free via device release */
3974 put_device(&hdev->dev);
3975}
3976EXPORT_SYMBOL(hci_free_dev);
3977
1da177e4
LT
3978/* Register HCI device */
3979int hci_register_dev(struct hci_dev *hdev)
3980{
b1b813d4 3981 int id, error;
1da177e4 3982
74292d5a 3983 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3984 return -EINVAL;
3985
08add513
MM
3986 /* Do not allow HCI_AMP devices to register at index 0,
3987 * so the index can be used as the AMP controller ID.
3988 */
3df92b31
SL
3989 switch (hdev->dev_type) {
3990 case HCI_BREDR:
3991 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3992 break;
3993 case HCI_AMP:
3994 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3995 break;
3996 default:
3997 return -EINVAL;
1da177e4 3998 }
8e87d142 3999
3df92b31
SL
4000 if (id < 0)
4001 return id;
4002
1da177e4
LT
4003 sprintf(hdev->name, "hci%d", id);
4004 hdev->id = id;
2d8b3a11
AE
4005
4006 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4007
d8537548
KC
4008 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4009 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4010 if (!hdev->workqueue) {
4011 error = -ENOMEM;
4012 goto err;
4013 }
f48fd9c8 4014
d8537548
KC
4015 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4016 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4017 if (!hdev->req_workqueue) {
4018 destroy_workqueue(hdev->workqueue);
4019 error = -ENOMEM;
4020 goto err;
4021 }
4022
0153e2ec
MH
4023 if (!IS_ERR_OR_NULL(bt_debugfs))
4024 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4025
bdc3e0f1
MH
4026 dev_set_name(&hdev->dev, "%s", hdev->name);
4027
99780a7b
JH
4028 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4029 CRYPTO_ALG_ASYNC);
4030 if (IS_ERR(hdev->tfm_aes)) {
4031 BT_ERR("Unable to create crypto context");
4032 error = PTR_ERR(hdev->tfm_aes);
4033 hdev->tfm_aes = NULL;
4034 goto err_wqueue;
4035 }
4036
bdc3e0f1 4037 error = device_add(&hdev->dev);
33ca954d 4038 if (error < 0)
99780a7b 4039 goto err_tfm;
1da177e4 4040
611b30f7 4041 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4042 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4043 hdev);
611b30f7
MH
4044 if (hdev->rfkill) {
4045 if (rfkill_register(hdev->rfkill) < 0) {
4046 rfkill_destroy(hdev->rfkill);
4047 hdev->rfkill = NULL;
4048 }
4049 }
4050
5e130367
JH
4051 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4052 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4053
a8b2d5c2 4054 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4055 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4056
01cd3404 4057 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4058 /* Assume BR/EDR support until proven otherwise (such as
4059 * through reading supported features during init.
4060 */
4061 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4062 }
ce2be9ac 4063
fcee3377
GP
4064 write_lock(&hci_dev_list_lock);
4065 list_add(&hdev->list, &hci_dev_list);
4066 write_unlock(&hci_dev_list_lock);
4067
4a964404
MH
4068 /* Devices that are marked for raw-only usage are unconfigured
4069 * and should not be included in normal operation.
fee746b0
MH
4070 */
4071 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4072 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4073
1da177e4 4074 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4075 hci_dev_hold(hdev);
1da177e4 4076
19202573 4077 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4078
1da177e4 4079 return id;
f48fd9c8 4080
99780a7b
JH
4081err_tfm:
4082 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4083err_wqueue:
4084 destroy_workqueue(hdev->workqueue);
6ead1bbc 4085 destroy_workqueue(hdev->req_workqueue);
33ca954d 4086err:
3df92b31 4087 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4088
33ca954d 4089 return error;
1da177e4
LT
4090}
4091EXPORT_SYMBOL(hci_register_dev);
4092
4093/* Unregister HCI device */
59735631 4094void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4095{
3df92b31 4096 int i, id;
ef222013 4097
c13854ce 4098 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4099
94324962
JH
4100 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4101
3df92b31
SL
4102 id = hdev->id;
4103
f20d09d5 4104 write_lock(&hci_dev_list_lock);
1da177e4 4105 list_del(&hdev->list);
f20d09d5 4106 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4107
4108 hci_dev_do_close(hdev);
4109
cd4c5391 4110 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4111 kfree_skb(hdev->reassembly[i]);
4112
b9b5ef18
GP
4113 cancel_work_sync(&hdev->power_on);
4114
ab81cbf9 4115 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4116 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4117 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4118 hci_dev_lock(hdev);
744cf19e 4119 mgmt_index_removed(hdev);
09fd0de5 4120 hci_dev_unlock(hdev);
56e5cb86 4121 }
ab81cbf9 4122
2e58ef3e
JH
4123 /* mgmt_index_removed should take care of emptying the
4124 * pending list */
4125 BUG_ON(!list_empty(&hdev->mgmt_pending));
4126
1da177e4
LT
4127 hci_notify(hdev, HCI_DEV_UNREG);
4128
611b30f7
MH
4129 if (hdev->rfkill) {
4130 rfkill_unregister(hdev->rfkill);
4131 rfkill_destroy(hdev->rfkill);
4132 }
4133
99780a7b
JH
4134 if (hdev->tfm_aes)
4135 crypto_free_blkcipher(hdev->tfm_aes);
4136
bdc3e0f1 4137 device_del(&hdev->dev);
147e2d59 4138
0153e2ec
MH
4139 debugfs_remove_recursive(hdev->debugfs);
4140
f48fd9c8 4141 destroy_workqueue(hdev->workqueue);
6ead1bbc 4142 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4143
09fd0de5 4144 hci_dev_lock(hdev);
dcc36c16 4145 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4146 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4147 hci_uuids_clear(hdev);
55ed8ca1 4148 hci_link_keys_clear(hdev);
b899efaf 4149 hci_smp_ltks_clear(hdev);
970c4e46 4150 hci_smp_irks_clear(hdev);
2763eda6 4151 hci_remote_oob_data_clear(hdev);
dcc36c16 4152 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4153 hci_conn_params_clear_all(hdev);
09fd0de5 4154 hci_dev_unlock(hdev);
e2e0cacb 4155
dc946bd8 4156 hci_dev_put(hdev);
3df92b31
SL
4157
4158 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4159}
4160EXPORT_SYMBOL(hci_unregister_dev);
4161
4162/* Suspend HCI device */
4163int hci_suspend_dev(struct hci_dev *hdev)
4164{
4165 hci_notify(hdev, HCI_DEV_SUSPEND);
4166 return 0;
4167}
4168EXPORT_SYMBOL(hci_suspend_dev);
4169
4170/* Resume HCI device */
4171int hci_resume_dev(struct hci_dev *hdev)
4172{
4173 hci_notify(hdev, HCI_DEV_RESUME);
4174 return 0;
4175}
4176EXPORT_SYMBOL(hci_resume_dev);
4177
76bca880 4178/* Receive frame from HCI drivers */
e1a26170 4179int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4180{
76bca880 4181 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4182 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4183 kfree_skb(skb);
4184 return -ENXIO;
4185 }
4186
d82603c6 4187 /* Incoming skb */
76bca880
MH
4188 bt_cb(skb)->incoming = 1;
4189
4190 /* Time stamp */
4191 __net_timestamp(skb);
4192
76bca880 4193 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4194 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4195
76bca880
MH
4196 return 0;
4197}
4198EXPORT_SYMBOL(hci_recv_frame);
4199
33e882a5 4200static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4201 int count, __u8 index)
33e882a5
SS
4202{
4203 int len = 0;
4204 int hlen = 0;
4205 int remain = count;
4206 struct sk_buff *skb;
4207 struct bt_skb_cb *scb;
4208
4209 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4210 index >= NUM_REASSEMBLY)
33e882a5
SS
4211 return -EILSEQ;
4212
4213 skb = hdev->reassembly[index];
4214
4215 if (!skb) {
4216 switch (type) {
4217 case HCI_ACLDATA_PKT:
4218 len = HCI_MAX_FRAME_SIZE;
4219 hlen = HCI_ACL_HDR_SIZE;
4220 break;
4221 case HCI_EVENT_PKT:
4222 len = HCI_MAX_EVENT_SIZE;
4223 hlen = HCI_EVENT_HDR_SIZE;
4224 break;
4225 case HCI_SCODATA_PKT:
4226 len = HCI_MAX_SCO_SIZE;
4227 hlen = HCI_SCO_HDR_SIZE;
4228 break;
4229 }
4230
1e429f38 4231 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4232 if (!skb)
4233 return -ENOMEM;
4234
4235 scb = (void *) skb->cb;
4236 scb->expect = hlen;
4237 scb->pkt_type = type;
4238
33e882a5
SS
4239 hdev->reassembly[index] = skb;
4240 }
4241
4242 while (count) {
4243 scb = (void *) skb->cb;
89bb46d0 4244 len = min_t(uint, scb->expect, count);
33e882a5
SS
4245
4246 memcpy(skb_put(skb, len), data, len);
4247
4248 count -= len;
4249 data += len;
4250 scb->expect -= len;
4251 remain = count;
4252
4253 switch (type) {
4254 case HCI_EVENT_PKT:
4255 if (skb->len == HCI_EVENT_HDR_SIZE) {
4256 struct hci_event_hdr *h = hci_event_hdr(skb);
4257 scb->expect = h->plen;
4258
4259 if (skb_tailroom(skb) < scb->expect) {
4260 kfree_skb(skb);
4261 hdev->reassembly[index] = NULL;
4262 return -ENOMEM;
4263 }
4264 }
4265 break;
4266
4267 case HCI_ACLDATA_PKT:
4268 if (skb->len == HCI_ACL_HDR_SIZE) {
4269 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4270 scb->expect = __le16_to_cpu(h->dlen);
4271
4272 if (skb_tailroom(skb) < scb->expect) {
4273 kfree_skb(skb);
4274 hdev->reassembly[index] = NULL;
4275 return -ENOMEM;
4276 }
4277 }
4278 break;
4279
4280 case HCI_SCODATA_PKT:
4281 if (skb->len == HCI_SCO_HDR_SIZE) {
4282 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4283 scb->expect = h->dlen;
4284
4285 if (skb_tailroom(skb) < scb->expect) {
4286 kfree_skb(skb);
4287 hdev->reassembly[index] = NULL;
4288 return -ENOMEM;
4289 }
4290 }
4291 break;
4292 }
4293
4294 if (scb->expect == 0) {
4295 /* Complete frame */
4296
4297 bt_cb(skb)->pkt_type = type;
e1a26170 4298 hci_recv_frame(hdev, skb);
33e882a5
SS
4299
4300 hdev->reassembly[index] = NULL;
4301 return remain;
4302 }
4303 }
4304
4305 return remain;
4306}
4307
ef222013
MH
4308int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4309{
f39a3c06
SS
4310 int rem = 0;
4311
ef222013
MH
4312 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4313 return -EILSEQ;
4314
da5f6c37 4315 while (count) {
1e429f38 4316 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4317 if (rem < 0)
4318 return rem;
ef222013 4319
f39a3c06
SS
4320 data += (count - rem);
4321 count = rem;
f81c6224 4322 }
ef222013 4323
f39a3c06 4324 return rem;
ef222013
MH
4325}
4326EXPORT_SYMBOL(hci_recv_fragment);
4327
99811510
SS
4328#define STREAM_REASSEMBLY 0
4329
4330int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4331{
4332 int type;
4333 int rem = 0;
4334
da5f6c37 4335 while (count) {
99811510
SS
4336 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4337
4338 if (!skb) {
4339 struct { char type; } *pkt;
4340
4341 /* Start of the frame */
4342 pkt = data;
4343 type = pkt->type;
4344
4345 data++;
4346 count--;
4347 } else
4348 type = bt_cb(skb)->pkt_type;
4349
1e429f38 4350 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4351 STREAM_REASSEMBLY);
99811510
SS
4352 if (rem < 0)
4353 return rem;
4354
4355 data += (count - rem);
4356 count = rem;
f81c6224 4357 }
99811510
SS
4358
4359 return rem;
4360}
4361EXPORT_SYMBOL(hci_recv_stream_fragment);
4362
1da177e4
LT
4363/* ---- Interface to upper protocols ---- */
4364
1da177e4
LT
4365int hci_register_cb(struct hci_cb *cb)
4366{
4367 BT_DBG("%p name %s", cb, cb->name);
4368
f20d09d5 4369 write_lock(&hci_cb_list_lock);
1da177e4 4370 list_add(&cb->list, &hci_cb_list);
f20d09d5 4371 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4372
4373 return 0;
4374}
4375EXPORT_SYMBOL(hci_register_cb);
4376
4377int hci_unregister_cb(struct hci_cb *cb)
4378{
4379 BT_DBG("%p name %s", cb, cb->name);
4380
f20d09d5 4381 write_lock(&hci_cb_list_lock);
1da177e4 4382 list_del(&cb->list);
f20d09d5 4383 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4384
4385 return 0;
4386}
4387EXPORT_SYMBOL(hci_unregister_cb);
4388
51086991 4389static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4390{
cdc52faa
MH
4391 int err;
4392
0d48d939 4393 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4394
cd82e61c
MH
4395 /* Time stamp */
4396 __net_timestamp(skb);
1da177e4 4397
cd82e61c
MH
4398 /* Send copy to monitor */
4399 hci_send_to_monitor(hdev, skb);
4400
4401 if (atomic_read(&hdev->promisc)) {
4402 /* Send copy to the sockets */
470fe1b5 4403 hci_send_to_sock(hdev, skb);
1da177e4
LT
4404 }
4405
4406 /* Get rid of skb owner, prior to sending to the driver. */
4407 skb_orphan(skb);
4408
cdc52faa
MH
4409 err = hdev->send(hdev, skb);
4410 if (err < 0) {
4411 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4412 kfree_skb(skb);
4413 }
1da177e4
LT
4414}
4415
3119ae95
JH
4416void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4417{
4418 skb_queue_head_init(&req->cmd_q);
4419 req->hdev = hdev;
5d73e034 4420 req->err = 0;
3119ae95
JH
4421}
4422
4423int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4424{
4425 struct hci_dev *hdev = req->hdev;
4426 struct sk_buff *skb;
4427 unsigned long flags;
4428
4429 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4430
5d73e034
AG
4431 /* If an error occured during request building, remove all HCI
4432 * commands queued on the HCI request queue.
4433 */
4434 if (req->err) {
4435 skb_queue_purge(&req->cmd_q);
4436 return req->err;
4437 }
4438
3119ae95
JH
4439 /* Do not allow empty requests */
4440 if (skb_queue_empty(&req->cmd_q))
382b0c39 4441 return -ENODATA;
3119ae95
JH
4442
4443 skb = skb_peek_tail(&req->cmd_q);
4444 bt_cb(skb)->req.complete = complete;
4445
4446 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4447 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4448 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4449
4450 queue_work(hdev->workqueue, &hdev->cmd_work);
4451
4452 return 0;
4453}
4454
899de765
MH
4455bool hci_req_pending(struct hci_dev *hdev)
4456{
4457 return (hdev->req_status == HCI_REQ_PEND);
4458}
4459
1ca3a9d0 4460static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4461 u32 plen, const void *param)
1da177e4
LT
4462{
4463 int len = HCI_COMMAND_HDR_SIZE + plen;
4464 struct hci_command_hdr *hdr;
4465 struct sk_buff *skb;
4466
1da177e4 4467 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4468 if (!skb)
4469 return NULL;
1da177e4
LT
4470
4471 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4472 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4473 hdr->plen = plen;
4474
4475 if (plen)
4476 memcpy(skb_put(skb, plen), param, plen);
4477
4478 BT_DBG("skb len %d", skb->len);
4479
0d48d939 4480 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4481
1ca3a9d0
JH
4482 return skb;
4483}
4484
4485/* Send HCI command */
07dc93dd
JH
4486int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4487 const void *param)
1ca3a9d0
JH
4488{
4489 struct sk_buff *skb;
4490
4491 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4492
4493 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4494 if (!skb) {
4495 BT_ERR("%s no memory for command", hdev->name);
4496 return -ENOMEM;
4497 }
4498
11714b3d
JH
4499 /* Stand-alone HCI commands must be flaged as
4500 * single-command requests.
4501 */
4502 bt_cb(skb)->req.start = true;
4503
1da177e4 4504 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4505 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4506
4507 return 0;
4508}
1da177e4 4509
71c76a17 4510/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4511void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4512 const void *param, u8 event)
71c76a17
JH
4513{
4514 struct hci_dev *hdev = req->hdev;
4515 struct sk_buff *skb;
4516
4517 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4518
34739c1e
AG
4519 /* If an error occured during request building, there is no point in
4520 * queueing the HCI command. We can simply return.
4521 */
4522 if (req->err)
4523 return;
4524
71c76a17
JH
4525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4526 if (!skb) {
5d73e034
AG
4527 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4528 hdev->name, opcode);
4529 req->err = -ENOMEM;
e348fe6b 4530 return;
71c76a17
JH
4531 }
4532
4533 if (skb_queue_empty(&req->cmd_q))
4534 bt_cb(skb)->req.start = true;
4535
02350a72
JH
4536 bt_cb(skb)->req.event = event;
4537
71c76a17 4538 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4539}
4540
07dc93dd
JH
4541void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4542 const void *param)
02350a72
JH
4543{
4544 hci_req_add_ev(req, opcode, plen, param, 0);
4545}
4546
1da177e4 4547/* Get data from the previously sent command */
a9de9248 4548void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4549{
4550 struct hci_command_hdr *hdr;
4551
4552 if (!hdev->sent_cmd)
4553 return NULL;
4554
4555 hdr = (void *) hdev->sent_cmd->data;
4556
a9de9248 4557 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4558 return NULL;
4559
f0e09510 4560 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4561
4562 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4563}
4564
4565/* Send ACL data */
4566static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4567{
4568 struct hci_acl_hdr *hdr;
4569 int len = skb->len;
4570
badff6d0
ACM
4571 skb_push(skb, HCI_ACL_HDR_SIZE);
4572 skb_reset_transport_header(skb);
9c70220b 4573 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4574 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4575 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4576}
4577
ee22be7e 4578static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4579 struct sk_buff *skb, __u16 flags)
1da177e4 4580{
ee22be7e 4581 struct hci_conn *conn = chan->conn;
1da177e4
LT
4582 struct hci_dev *hdev = conn->hdev;
4583 struct sk_buff *list;
4584
087bfd99
GP
4585 skb->len = skb_headlen(skb);
4586 skb->data_len = 0;
4587
4588 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4589
4590 switch (hdev->dev_type) {
4591 case HCI_BREDR:
4592 hci_add_acl_hdr(skb, conn->handle, flags);
4593 break;
4594 case HCI_AMP:
4595 hci_add_acl_hdr(skb, chan->handle, flags);
4596 break;
4597 default:
4598 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4599 return;
4600 }
087bfd99 4601
70f23020
AE
4602 list = skb_shinfo(skb)->frag_list;
4603 if (!list) {
1da177e4
LT
4604 /* Non fragmented */
4605 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4606
73d80deb 4607 skb_queue_tail(queue, skb);
1da177e4
LT
4608 } else {
4609 /* Fragmented */
4610 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4611
4612 skb_shinfo(skb)->frag_list = NULL;
4613
4614 /* Queue all fragments atomically */
af3e6359 4615 spin_lock(&queue->lock);
1da177e4 4616
73d80deb 4617 __skb_queue_tail(queue, skb);
e702112f
AE
4618
4619 flags &= ~ACL_START;
4620 flags |= ACL_CONT;
1da177e4
LT
4621 do {
4622 skb = list; list = list->next;
8e87d142 4623
0d48d939 4624 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4625 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4626
4627 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4628
73d80deb 4629 __skb_queue_tail(queue, skb);
1da177e4
LT
4630 } while (list);
4631
af3e6359 4632 spin_unlock(&queue->lock);
1da177e4 4633 }
73d80deb
LAD
4634}
4635
4636void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4637{
ee22be7e 4638 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4639
f0e09510 4640 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4641
ee22be7e 4642 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4643
3eff45ea 4644 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4645}
1da177e4
LT
4646
4647/* Send SCO data */
0d861d8b 4648void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4649{
4650 struct hci_dev *hdev = conn->hdev;
4651 struct hci_sco_hdr hdr;
4652
4653 BT_DBG("%s len %d", hdev->name, skb->len);
4654
aca3192c 4655 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4656 hdr.dlen = skb->len;
4657
badff6d0
ACM
4658 skb_push(skb, HCI_SCO_HDR_SIZE);
4659 skb_reset_transport_header(skb);
9c70220b 4660 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4661
0d48d939 4662 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4663
1da177e4 4664 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4665 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4666}
1da177e4
LT
4667
4668/* ---- HCI TX task (outgoing data) ---- */
4669
4670/* HCI Connection scheduler */
6039aa73
GP
4671static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4672 int *quote)
1da177e4
LT
4673{
4674 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4675 struct hci_conn *conn = NULL, *c;
abc5de8f 4676 unsigned int num = 0, min = ~0;
1da177e4 4677
8e87d142 4678 /* We don't have to lock device here. Connections are always
1da177e4 4679 * added and removed with TX task disabled. */
bf4c6325
GP
4680
4681 rcu_read_lock();
4682
4683 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4684 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4685 continue;
769be974
MH
4686
4687 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4688 continue;
4689
1da177e4
LT
4690 num++;
4691
4692 if (c->sent < min) {
4693 min = c->sent;
4694 conn = c;
4695 }
52087a79
LAD
4696
4697 if (hci_conn_num(hdev, type) == num)
4698 break;
1da177e4
LT
4699 }
4700
bf4c6325
GP
4701 rcu_read_unlock();
4702
1da177e4 4703 if (conn) {
6ed58ec5
VT
4704 int cnt, q;
4705
4706 switch (conn->type) {
4707 case ACL_LINK:
4708 cnt = hdev->acl_cnt;
4709 break;
4710 case SCO_LINK:
4711 case ESCO_LINK:
4712 cnt = hdev->sco_cnt;
4713 break;
4714 case LE_LINK:
4715 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4716 break;
4717 default:
4718 cnt = 0;
4719 BT_ERR("Unknown link type");
4720 }
4721
4722 q = cnt / num;
1da177e4
LT
4723 *quote = q ? q : 1;
4724 } else
4725 *quote = 0;
4726
4727 BT_DBG("conn %p quote %d", conn, *quote);
4728 return conn;
4729}
4730
6039aa73 4731static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4732{
4733 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4734 struct hci_conn *c;
1da177e4 4735
bae1f5d9 4736 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4737
bf4c6325
GP
4738 rcu_read_lock();
4739
1da177e4 4740 /* Kill stalled connections */
bf4c6325 4741 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4742 if (c->type == type && c->sent) {
6ed93dc6
AE
4743 BT_ERR("%s killing stalled connection %pMR",
4744 hdev->name, &c->dst);
bed71748 4745 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4746 }
4747 }
bf4c6325
GP
4748
4749 rcu_read_unlock();
1da177e4
LT
4750}
4751
6039aa73
GP
4752static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4753 int *quote)
1da177e4 4754{
73d80deb
LAD
4755 struct hci_conn_hash *h = &hdev->conn_hash;
4756 struct hci_chan *chan = NULL;
abc5de8f 4757 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4758 struct hci_conn *conn;
73d80deb
LAD
4759 int cnt, q, conn_num = 0;
4760
4761 BT_DBG("%s", hdev->name);
4762
bf4c6325
GP
4763 rcu_read_lock();
4764
4765 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4766 struct hci_chan *tmp;
4767
4768 if (conn->type != type)
4769 continue;
4770
4771 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4772 continue;
4773
4774 conn_num++;
4775
8192edef 4776 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4777 struct sk_buff *skb;
4778
4779 if (skb_queue_empty(&tmp->data_q))
4780 continue;
4781
4782 skb = skb_peek(&tmp->data_q);
4783 if (skb->priority < cur_prio)
4784 continue;
4785
4786 if (skb->priority > cur_prio) {
4787 num = 0;
4788 min = ~0;
4789 cur_prio = skb->priority;
4790 }
4791
4792 num++;
4793
4794 if (conn->sent < min) {
4795 min = conn->sent;
4796 chan = tmp;
4797 }
4798 }
4799
4800 if (hci_conn_num(hdev, type) == conn_num)
4801 break;
4802 }
4803
bf4c6325
GP
4804 rcu_read_unlock();
4805
73d80deb
LAD
4806 if (!chan)
4807 return NULL;
4808
4809 switch (chan->conn->type) {
4810 case ACL_LINK:
4811 cnt = hdev->acl_cnt;
4812 break;
bd1eb66b
AE
4813 case AMP_LINK:
4814 cnt = hdev->block_cnt;
4815 break;
73d80deb
LAD
4816 case SCO_LINK:
4817 case ESCO_LINK:
4818 cnt = hdev->sco_cnt;
4819 break;
4820 case LE_LINK:
4821 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4822 break;
4823 default:
4824 cnt = 0;
4825 BT_ERR("Unknown link type");
4826 }
4827
4828 q = cnt / num;
4829 *quote = q ? q : 1;
4830 BT_DBG("chan %p quote %d", chan, *quote);
4831 return chan;
4832}
4833
02b20f0b
LAD
4834static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4835{
4836 struct hci_conn_hash *h = &hdev->conn_hash;
4837 struct hci_conn *conn;
4838 int num = 0;
4839
4840 BT_DBG("%s", hdev->name);
4841
bf4c6325
GP
4842 rcu_read_lock();
4843
4844 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4845 struct hci_chan *chan;
4846
4847 if (conn->type != type)
4848 continue;
4849
4850 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4851 continue;
4852
4853 num++;
4854
8192edef 4855 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4856 struct sk_buff *skb;
4857
4858 if (chan->sent) {
4859 chan->sent = 0;
4860 continue;
4861 }
4862
4863 if (skb_queue_empty(&chan->data_q))
4864 continue;
4865
4866 skb = skb_peek(&chan->data_q);
4867 if (skb->priority >= HCI_PRIO_MAX - 1)
4868 continue;
4869
4870 skb->priority = HCI_PRIO_MAX - 1;
4871
4872 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4873 skb->priority);
02b20f0b
LAD
4874 }
4875
4876 if (hci_conn_num(hdev, type) == num)
4877 break;
4878 }
bf4c6325
GP
4879
4880 rcu_read_unlock();
4881
02b20f0b
LAD
4882}
4883
b71d385a
AE
4884static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4885{
4886 /* Calculate count of blocks used by this packet */
4887 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4888}
4889
6039aa73 4890static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4891{
4a964404 4892 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4893 /* ACL tx timeout must be longer than maximum
4894 * link supervision timeout (40.9 seconds) */
63d2bc1b 4895 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4896 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4897 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4898 }
63d2bc1b 4899}
1da177e4 4900
6039aa73 4901static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4902{
4903 unsigned int cnt = hdev->acl_cnt;
4904 struct hci_chan *chan;
4905 struct sk_buff *skb;
4906 int quote;
4907
4908 __check_timeout(hdev, cnt);
04837f64 4909
73d80deb 4910 while (hdev->acl_cnt &&
a8c5fb1a 4911 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4912 u32 priority = (skb_peek(&chan->data_q))->priority;
4913 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4914 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4915 skb->len, skb->priority);
73d80deb 4916
ec1cce24
LAD
4917 /* Stop if priority has changed */
4918 if (skb->priority < priority)
4919 break;
4920
4921 skb = skb_dequeue(&chan->data_q);
4922
73d80deb 4923 hci_conn_enter_active_mode(chan->conn,
04124681 4924 bt_cb(skb)->force_active);
04837f64 4925
57d17d70 4926 hci_send_frame(hdev, skb);
1da177e4
LT
4927 hdev->acl_last_tx = jiffies;
4928
4929 hdev->acl_cnt--;
73d80deb
LAD
4930 chan->sent++;
4931 chan->conn->sent++;
1da177e4
LT
4932 }
4933 }
02b20f0b
LAD
4934
4935 if (cnt != hdev->acl_cnt)
4936 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4937}
4938
6039aa73 4939static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4940{
63d2bc1b 4941 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4942 struct hci_chan *chan;
4943 struct sk_buff *skb;
4944 int quote;
bd1eb66b 4945 u8 type;
b71d385a 4946
63d2bc1b 4947 __check_timeout(hdev, cnt);
b71d385a 4948
bd1eb66b
AE
4949 BT_DBG("%s", hdev->name);
4950
4951 if (hdev->dev_type == HCI_AMP)
4952 type = AMP_LINK;
4953 else
4954 type = ACL_LINK;
4955
b71d385a 4956 while (hdev->block_cnt > 0 &&
bd1eb66b 4957 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4958 u32 priority = (skb_peek(&chan->data_q))->priority;
4959 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4960 int blocks;
4961
4962 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4963 skb->len, skb->priority);
b71d385a
AE
4964
4965 /* Stop if priority has changed */
4966 if (skb->priority < priority)
4967 break;
4968
4969 skb = skb_dequeue(&chan->data_q);
4970
4971 blocks = __get_blocks(hdev, skb);
4972 if (blocks > hdev->block_cnt)
4973 return;
4974
4975 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4976 bt_cb(skb)->force_active);
b71d385a 4977
57d17d70 4978 hci_send_frame(hdev, skb);
b71d385a
AE
4979 hdev->acl_last_tx = jiffies;
4980
4981 hdev->block_cnt -= blocks;
4982 quote -= blocks;
4983
4984 chan->sent += blocks;
4985 chan->conn->sent += blocks;
4986 }
4987 }
4988
4989 if (cnt != hdev->block_cnt)
bd1eb66b 4990 hci_prio_recalculate(hdev, type);
b71d385a
AE
4991}
4992
6039aa73 4993static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4994{
4995 BT_DBG("%s", hdev->name);
4996
bd1eb66b
AE
4997 /* No ACL link over BR/EDR controller */
4998 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4999 return;
5000
5001 /* No AMP link over AMP controller */
5002 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5003 return;
5004
5005 switch (hdev->flow_ctl_mode) {
5006 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5007 hci_sched_acl_pkt(hdev);
5008 break;
5009
5010 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5011 hci_sched_acl_blk(hdev);
5012 break;
5013 }
5014}
5015
1da177e4 5016/* Schedule SCO */
6039aa73 5017static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5018{
5019 struct hci_conn *conn;
5020 struct sk_buff *skb;
5021 int quote;
5022
5023 BT_DBG("%s", hdev->name);
5024
52087a79
LAD
5025 if (!hci_conn_num(hdev, SCO_LINK))
5026 return;
5027
1da177e4
LT
5028 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5029 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5030 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5031 hci_send_frame(hdev, skb);
1da177e4
LT
5032
5033 conn->sent++;
5034 if (conn->sent == ~0)
5035 conn->sent = 0;
5036 }
5037 }
5038}
5039
6039aa73 5040static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5041{
5042 struct hci_conn *conn;
5043 struct sk_buff *skb;
5044 int quote;
5045
5046 BT_DBG("%s", hdev->name);
5047
52087a79
LAD
5048 if (!hci_conn_num(hdev, ESCO_LINK))
5049 return;
5050
8fc9ced3
GP
5051 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5052 &quote))) {
b6a0dc82
MH
5053 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5054 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5055 hci_send_frame(hdev, skb);
b6a0dc82
MH
5056
5057 conn->sent++;
5058 if (conn->sent == ~0)
5059 conn->sent = 0;
5060 }
5061 }
5062}
5063
6039aa73 5064static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5065{
73d80deb 5066 struct hci_chan *chan;
6ed58ec5 5067 struct sk_buff *skb;
02b20f0b 5068 int quote, cnt, tmp;
6ed58ec5
VT
5069
5070 BT_DBG("%s", hdev->name);
5071
52087a79
LAD
5072 if (!hci_conn_num(hdev, LE_LINK))
5073 return;
5074
4a964404 5075 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5076 /* LE tx timeout must be longer than maximum
5077 * link supervision timeout (40.9 seconds) */
bae1f5d9 5078 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5079 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5080 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5081 }
5082
5083 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5084 tmp = cnt;
73d80deb 5085 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5086 u32 priority = (skb_peek(&chan->data_q))->priority;
5087 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5089 skb->len, skb->priority);
6ed58ec5 5090
ec1cce24
LAD
5091 /* Stop if priority has changed */
5092 if (skb->priority < priority)
5093 break;
5094
5095 skb = skb_dequeue(&chan->data_q);
5096
57d17d70 5097 hci_send_frame(hdev, skb);
6ed58ec5
VT
5098 hdev->le_last_tx = jiffies;
5099
5100 cnt--;
73d80deb
LAD
5101 chan->sent++;
5102 chan->conn->sent++;
6ed58ec5
VT
5103 }
5104 }
73d80deb 5105
6ed58ec5
VT
5106 if (hdev->le_pkts)
5107 hdev->le_cnt = cnt;
5108 else
5109 hdev->acl_cnt = cnt;
02b20f0b
LAD
5110
5111 if (cnt != tmp)
5112 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5113}
5114
3eff45ea 5115static void hci_tx_work(struct work_struct *work)
1da177e4 5116{
3eff45ea 5117 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5118 struct sk_buff *skb;
5119
6ed58ec5 5120 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5121 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5122
52de599e
MH
5123 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5124 /* Schedule queues and send stuff to HCI driver */
5125 hci_sched_acl(hdev);
5126 hci_sched_sco(hdev);
5127 hci_sched_esco(hdev);
5128 hci_sched_le(hdev);
5129 }
6ed58ec5 5130
1da177e4
LT
5131 /* Send next queued raw (unknown type) packet */
5132 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5133 hci_send_frame(hdev, skb);
1da177e4
LT
5134}
5135
25985edc 5136/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5137
5138/* ACL data packet */
6039aa73 5139static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5140{
5141 struct hci_acl_hdr *hdr = (void *) skb->data;
5142 struct hci_conn *conn;
5143 __u16 handle, flags;
5144
5145 skb_pull(skb, HCI_ACL_HDR_SIZE);
5146
5147 handle = __le16_to_cpu(hdr->handle);
5148 flags = hci_flags(handle);
5149 handle = hci_handle(handle);
5150
f0e09510 5151 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5152 handle, flags);
1da177e4
LT
5153
5154 hdev->stat.acl_rx++;
5155
5156 hci_dev_lock(hdev);
5157 conn = hci_conn_hash_lookup_handle(hdev, handle);
5158 hci_dev_unlock(hdev);
8e87d142 5159
1da177e4 5160 if (conn) {
65983fc7 5161 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5162
1da177e4 5163 /* Send to upper protocol */
686ebf28
UF
5164 l2cap_recv_acldata(conn, skb, flags);
5165 return;
1da177e4 5166 } else {
8e87d142 5167 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5168 hdev->name, handle);
1da177e4
LT
5169 }
5170
5171 kfree_skb(skb);
5172}
5173
5174/* SCO data packet */
6039aa73 5175static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5176{
5177 struct hci_sco_hdr *hdr = (void *) skb->data;
5178 struct hci_conn *conn;
5179 __u16 handle;
5180
5181 skb_pull(skb, HCI_SCO_HDR_SIZE);
5182
5183 handle = __le16_to_cpu(hdr->handle);
5184
f0e09510 5185 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5186
5187 hdev->stat.sco_rx++;
5188
5189 hci_dev_lock(hdev);
5190 conn = hci_conn_hash_lookup_handle(hdev, handle);
5191 hci_dev_unlock(hdev);
5192
5193 if (conn) {
1da177e4 5194 /* Send to upper protocol */
686ebf28
UF
5195 sco_recv_scodata(conn, skb);
5196 return;
1da177e4 5197 } else {
8e87d142 5198 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5199 hdev->name, handle);
1da177e4
LT
5200 }
5201
5202 kfree_skb(skb);
5203}
5204
9238f36a
JH
5205static bool hci_req_is_complete(struct hci_dev *hdev)
5206{
5207 struct sk_buff *skb;
5208
5209 skb = skb_peek(&hdev->cmd_q);
5210 if (!skb)
5211 return true;
5212
5213 return bt_cb(skb)->req.start;
5214}
5215
42c6b129
JH
5216static void hci_resend_last(struct hci_dev *hdev)
5217{
5218 struct hci_command_hdr *sent;
5219 struct sk_buff *skb;
5220 u16 opcode;
5221
5222 if (!hdev->sent_cmd)
5223 return;
5224
5225 sent = (void *) hdev->sent_cmd->data;
5226 opcode = __le16_to_cpu(sent->opcode);
5227 if (opcode == HCI_OP_RESET)
5228 return;
5229
5230 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5231 if (!skb)
5232 return;
5233
5234 skb_queue_head(&hdev->cmd_q, skb);
5235 queue_work(hdev->workqueue, &hdev->cmd_work);
5236}
5237
9238f36a
JH
5238void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5239{
5240 hci_req_complete_t req_complete = NULL;
5241 struct sk_buff *skb;
5242 unsigned long flags;
5243
5244 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5245
42c6b129
JH
5246 /* If the completed command doesn't match the last one that was
5247 * sent we need to do special handling of it.
9238f36a 5248 */
42c6b129
JH
5249 if (!hci_sent_cmd_data(hdev, opcode)) {
5250 /* Some CSR based controllers generate a spontaneous
5251 * reset complete event during init and any pending
5252 * command will never be completed. In such a case we
5253 * need to resend whatever was the last sent
5254 * command.
5255 */
5256 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5257 hci_resend_last(hdev);
5258
9238f36a 5259 return;
42c6b129 5260 }
9238f36a
JH
5261
5262 /* If the command succeeded and there's still more commands in
5263 * this request the request is not yet complete.
5264 */
5265 if (!status && !hci_req_is_complete(hdev))
5266 return;
5267
5268 /* If this was the last command in a request the complete
5269 * callback would be found in hdev->sent_cmd instead of the
5270 * command queue (hdev->cmd_q).
5271 */
5272 if (hdev->sent_cmd) {
5273 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5274
5275 if (req_complete) {
5276 /* We must set the complete callback to NULL to
5277 * avoid calling the callback more than once if
5278 * this function gets called again.
5279 */
5280 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5281
9238f36a 5282 goto call_complete;
53e21fbc 5283 }
9238f36a
JH
5284 }
5285
5286 /* Remove all pending commands belonging to this request */
5287 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5288 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5289 if (bt_cb(skb)->req.start) {
5290 __skb_queue_head(&hdev->cmd_q, skb);
5291 break;
5292 }
5293
5294 req_complete = bt_cb(skb)->req.complete;
5295 kfree_skb(skb);
5296 }
5297 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5298
5299call_complete:
5300 if (req_complete)
5301 req_complete(hdev, status);
5302}
5303
b78752cc 5304static void hci_rx_work(struct work_struct *work)
1da177e4 5305{
b78752cc 5306 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5307 struct sk_buff *skb;
5308
5309 BT_DBG("%s", hdev->name);
5310
1da177e4 5311 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5312 /* Send copy to monitor */
5313 hci_send_to_monitor(hdev, skb);
5314
1da177e4
LT
5315 if (atomic_read(&hdev->promisc)) {
5316 /* Send copy to the sockets */
470fe1b5 5317 hci_send_to_sock(hdev, skb);
1da177e4
LT
5318 }
5319
fee746b0 5320 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5321 kfree_skb(skb);
5322 continue;
5323 }
5324
5325 if (test_bit(HCI_INIT, &hdev->flags)) {
5326 /* Don't process data packets in this states. */
0d48d939 5327 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5328 case HCI_ACLDATA_PKT:
5329 case HCI_SCODATA_PKT:
5330 kfree_skb(skb);
5331 continue;
3ff50b79 5332 }
1da177e4
LT
5333 }
5334
5335 /* Process frame */
0d48d939 5336 switch (bt_cb(skb)->pkt_type) {
1da177e4 5337 case HCI_EVENT_PKT:
b78752cc 5338 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5339 hci_event_packet(hdev, skb);
5340 break;
5341
5342 case HCI_ACLDATA_PKT:
5343 BT_DBG("%s ACL data packet", hdev->name);
5344 hci_acldata_packet(hdev, skb);
5345 break;
5346
5347 case HCI_SCODATA_PKT:
5348 BT_DBG("%s SCO data packet", hdev->name);
5349 hci_scodata_packet(hdev, skb);
5350 break;
5351
5352 default:
5353 kfree_skb(skb);
5354 break;
5355 }
5356 }
1da177e4
LT
5357}
5358
c347b765 5359static void hci_cmd_work(struct work_struct *work)
1da177e4 5360{
c347b765 5361 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5362 struct sk_buff *skb;
5363
2104786b
AE
5364 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5365 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5366
1da177e4 5367 /* Send queued commands */
5a08ecce
AE
5368 if (atomic_read(&hdev->cmd_cnt)) {
5369 skb = skb_dequeue(&hdev->cmd_q);
5370 if (!skb)
5371 return;
5372
7585b97a 5373 kfree_skb(hdev->sent_cmd);
1da177e4 5374
a675d7f1 5375 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5376 if (hdev->sent_cmd) {
1da177e4 5377 atomic_dec(&hdev->cmd_cnt);
57d17d70 5378 hci_send_frame(hdev, skb);
7bdb8a5c 5379 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5380 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5381 else
65cc2b49
MH
5382 schedule_delayed_work(&hdev->cmd_timer,
5383 HCI_CMD_TIMEOUT);
1da177e4
LT
5384 } else {
5385 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5386 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5387 }
5388 }
5389}
b1efcc28
AG
5390
5391void hci_req_add_le_scan_disable(struct hci_request *req)
5392{
5393 struct hci_cp_le_set_scan_enable cp;
5394
5395 memset(&cp, 0, sizeof(cp));
5396 cp.enable = LE_SCAN_DISABLE;
5397 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5398}
a4790dbd 5399
8ef30fd3
AG
5400void hci_req_add_le_passive_scan(struct hci_request *req)
5401{
5402 struct hci_cp_le_set_scan_param param_cp;
5403 struct hci_cp_le_set_scan_enable enable_cp;
5404 struct hci_dev *hdev = req->hdev;
5405 u8 own_addr_type;
5406
6ab535a7
MH
5407 /* Set require_privacy to false since no SCAN_REQ are send
5408 * during passive scanning. Not using an unresolvable address
5409 * here is important so that peer devices using direct
5410 * advertising with our address will be correctly reported
5411 * by the controller.
8ef30fd3 5412 */
6ab535a7 5413 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5414 return;
5415
5416 memset(&param_cp, 0, sizeof(param_cp));
5417 param_cp.type = LE_SCAN_PASSIVE;
5418 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5419 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5420 param_cp.own_address_type = own_addr_type;
5421 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5422 &param_cp);
5423
5424 memset(&enable_cp, 0, sizeof(enable_cp));
5425 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5426 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5427 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5428 &enable_cp);
5429}
5430
a4790dbd
AG
5431static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5432{
5433 if (status)
5434 BT_DBG("HCI request failed to update background scanning: "
5435 "status 0x%2.2x", status);
5436}
5437
5438/* This function controls the background scanning based on hdev->pend_le_conns
5439 * list. If there are pending LE connection we start the background scanning,
5440 * otherwise we stop it.
5441 *
5442 * This function requires the caller holds hdev->lock.
5443 */
5444void hci_update_background_scan(struct hci_dev *hdev)
5445{
a4790dbd
AG
5446 struct hci_request req;
5447 struct hci_conn *conn;
5448 int err;
5449
c20c02d5
MH
5450 if (!test_bit(HCI_UP, &hdev->flags) ||
5451 test_bit(HCI_INIT, &hdev->flags) ||
5452 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5453 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5454 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5455 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5456 return;
5457
a70f4b5f
JH
5458 /* No point in doing scanning if LE support hasn't been enabled */
5459 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5460 return;
5461
ae23ada4
JH
5462 /* If discovery is active don't interfere with it */
5463 if (hdev->discovery.state != DISCOVERY_STOPPED)
5464 return;
5465
a4790dbd
AG
5466 hci_req_init(&req, hdev);
5467
d1d588c1 5468 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5469 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5470 /* If there is no pending LE connections or devices
5471 * to be scanned for, we should stop the background
5472 * scanning.
a4790dbd
AG
5473 */
5474
5475 /* If controller is not scanning we are done. */
5476 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5477 return;
5478
5479 hci_req_add_le_scan_disable(&req);
5480
5481 BT_DBG("%s stopping background scanning", hdev->name);
5482 } else {
a4790dbd
AG
5483 /* If there is at least one pending LE connection, we should
5484 * keep the background scan running.
5485 */
5486
a4790dbd
AG
5487 /* If controller is connecting, we should not start scanning
5488 * since some controllers are not able to scan and connect at
5489 * the same time.
5490 */
5491 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5492 if (conn)
5493 return;
5494
4340a124
AG
5495 /* If controller is currently scanning, we stop it to ensure we
5496 * don't miss any advertising (due to duplicates filter).
5497 */
5498 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5499 hci_req_add_le_scan_disable(&req);
5500
8ef30fd3 5501 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5502
5503 BT_DBG("%s starting background scanning", hdev->name);
5504 }
5505
5506 err = hci_req_run(&req, update_background_scan_complete);
5507 if (err)
5508 BT_ERR("Failed to run HCI request: err %d", err);
5509}