]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Enable LE encryption events only when supported
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
6659358e
JH
203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
47219839
MH
228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
235 u8 i, val[16];
236
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
243
244 seq_printf(f, "%pUb\n", val);
47219839
MH
245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
baf27f6e
MH
263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
02d08d15
MH
299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
babdbb3c
MH
327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
041000b9
MH
351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
ebd1e33b
MH
365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
5afeac14
MH
390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
111902f7 396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
111902f7 421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
422 return -EALREADY;
423
111902f7 424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
134c2a89
MH
436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
2bfa3531
MH
454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
2be48b65 462 hdev->idle_timeout = val;
2bfa3531
MH
463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
c982b2ea
JH
482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
2bfa3531
MH
513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
2be48b65 521 hdev->sniff_min_interval = val;
2bfa3531
MH
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
2be48b65 549 hdev->sniff_max_interval = val;
2bfa3531
MH
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
31ad1691
AK
569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
ac345813
MH
625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
a1f4c318 628 bdaddr_t addr;
ac345813
MH
629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
a1f4c318 633 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 634
a1f4c318 635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 636 16, hdev->irk, &hdev->rpa);
ac345813
MH
637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
7a4cd51d
MH
655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
e7b8fc92
MH
678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
b32bba6c
MH
701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
92202185 704{
b32bba6c
MH
705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
92202185 707
111902f7 708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
712}
713
b32bba6c
MH
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
92202185 717{
b32bba6c
MH
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
92202185 722
b32bba6c
MH
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
92202185 725
b32bba6c
MH
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
731 return -EINVAL;
732
111902f7 733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
734 return -EALREADY;
735
111902f7 736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
737
738 return count;
92202185
MH
739}
740
b32bba6c
MH
741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
92202185 747
d2ab0ac1
MH
748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
3698d704
MH
773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
8f8625cd
MH
803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
f813f1be 809 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 814 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
4e70c7e7
MH
833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
2be48b65 841 hdev->le_conn_min_interval = val;
4e70c7e7
MH
842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
2be48b65 869 hdev->le_conn_max_interval = val;
4e70c7e7
MH
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
816a93d1
MH
889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
f1649577
MH
917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
3f959d46
MH
945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
0b3c7d37 973static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 974{
0b3c7d37 975 struct hci_dev *hdev = f->private;
7d474e06
AG
976 struct hci_conn_params *p;
977
978 hci_dev_lock(hdev);
7d474e06 979 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 980 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
981 p->auto_connect);
982 }
7d474e06
AG
983 hci_dev_unlock(hdev);
984
985 return 0;
986}
987
0b3c7d37 988static int device_list_open(struct inode *inode, struct file *file)
7d474e06 989{
0b3c7d37 990 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
991}
992
0b3c7d37
MH
993static const struct file_operations device_list_fops = {
994 .open = device_list_open,
7d474e06 995 .read = seq_read,
7d474e06
AG
996 .llseek = seq_lseek,
997 .release = single_release,
998};
999
1da177e4
LT
1000/* ---- HCI requests ---- */
1001
42c6b129 1002static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1003{
42c6b129 1004 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1005
1006 if (hdev->req_status == HCI_REQ_PEND) {
1007 hdev->req_result = result;
1008 hdev->req_status = HCI_REQ_DONE;
1009 wake_up_interruptible(&hdev->req_wait_q);
1010 }
1011}
1012
1013static void hci_req_cancel(struct hci_dev *hdev, int err)
1014{
1015 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1016
1017 if (hdev->req_status == HCI_REQ_PEND) {
1018 hdev->req_result = err;
1019 hdev->req_status = HCI_REQ_CANCELED;
1020 wake_up_interruptible(&hdev->req_wait_q);
1021 }
1022}
1023
77a63e0a
FW
1024static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1025 u8 event)
75e84b7c
JH
1026{
1027 struct hci_ev_cmd_complete *ev;
1028 struct hci_event_hdr *hdr;
1029 struct sk_buff *skb;
1030
1031 hci_dev_lock(hdev);
1032
1033 skb = hdev->recv_evt;
1034 hdev->recv_evt = NULL;
1035
1036 hci_dev_unlock(hdev);
1037
1038 if (!skb)
1039 return ERR_PTR(-ENODATA);
1040
1041 if (skb->len < sizeof(*hdr)) {
1042 BT_ERR("Too short HCI event");
1043 goto failed;
1044 }
1045
1046 hdr = (void *) skb->data;
1047 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1048
7b1abbbe
JH
1049 if (event) {
1050 if (hdr->evt != event)
1051 goto failed;
1052 return skb;
1053 }
1054
75e84b7c
JH
1055 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1056 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1057 goto failed;
1058 }
1059
1060 if (skb->len < sizeof(*ev)) {
1061 BT_ERR("Too short cmd_complete event");
1062 goto failed;
1063 }
1064
1065 ev = (void *) skb->data;
1066 skb_pull(skb, sizeof(*ev));
1067
1068 if (opcode == __le16_to_cpu(ev->opcode))
1069 return skb;
1070
1071 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1072 __le16_to_cpu(ev->opcode));
1073
1074failed:
1075 kfree_skb(skb);
1076 return ERR_PTR(-ENODATA);
1077}
1078
7b1abbbe 1079struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1080 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1081{
1082 DECLARE_WAITQUEUE(wait, current);
1083 struct hci_request req;
1084 int err = 0;
1085
1086 BT_DBG("%s", hdev->name);
1087
1088 hci_req_init(&req, hdev);
1089
7b1abbbe 1090 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1091
1092 hdev->req_status = HCI_REQ_PEND;
1093
1094 err = hci_req_run(&req, hci_req_sync_complete);
1095 if (err < 0)
1096 return ERR_PTR(err);
1097
1098 add_wait_queue(&hdev->req_wait_q, &wait);
1099 set_current_state(TASK_INTERRUPTIBLE);
1100
1101 schedule_timeout(timeout);
1102
1103 remove_wait_queue(&hdev->req_wait_q, &wait);
1104
1105 if (signal_pending(current))
1106 return ERR_PTR(-EINTR);
1107
1108 switch (hdev->req_status) {
1109 case HCI_REQ_DONE:
1110 err = -bt_to_errno(hdev->req_result);
1111 break;
1112
1113 case HCI_REQ_CANCELED:
1114 err = -hdev->req_result;
1115 break;
1116
1117 default:
1118 err = -ETIMEDOUT;
1119 break;
1120 }
1121
1122 hdev->req_status = hdev->req_result = 0;
1123
1124 BT_DBG("%s end: err %d", hdev->name, err);
1125
1126 if (err < 0)
1127 return ERR_PTR(err);
1128
7b1abbbe
JH
1129 return hci_get_cmd_complete(hdev, opcode, event);
1130}
1131EXPORT_SYMBOL(__hci_cmd_sync_ev);
1132
1133struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1134 const void *param, u32 timeout)
7b1abbbe
JH
1135{
1136 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1137}
1138EXPORT_SYMBOL(__hci_cmd_sync);
1139
1da177e4 1140/* Execute request and wait for completion. */
01178cd4 1141static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1142 void (*func)(struct hci_request *req,
1143 unsigned long opt),
01178cd4 1144 unsigned long opt, __u32 timeout)
1da177e4 1145{
42c6b129 1146 struct hci_request req;
1da177e4
LT
1147 DECLARE_WAITQUEUE(wait, current);
1148 int err = 0;
1149
1150 BT_DBG("%s start", hdev->name);
1151
42c6b129
JH
1152 hci_req_init(&req, hdev);
1153
1da177e4
LT
1154 hdev->req_status = HCI_REQ_PEND;
1155
42c6b129 1156 func(&req, opt);
53cce22d 1157
42c6b129
JH
1158 err = hci_req_run(&req, hci_req_sync_complete);
1159 if (err < 0) {
53cce22d 1160 hdev->req_status = 0;
920c8300
AG
1161
1162 /* ENODATA means the HCI request command queue is empty.
1163 * This can happen when a request with conditionals doesn't
1164 * trigger any commands to be sent. This is normal behavior
1165 * and should not trigger an error return.
42c6b129 1166 */
920c8300
AG
1167 if (err == -ENODATA)
1168 return 0;
1169
1170 return err;
53cce22d
JH
1171 }
1172
bc4445c7
AG
1173 add_wait_queue(&hdev->req_wait_q, &wait);
1174 set_current_state(TASK_INTERRUPTIBLE);
1175
1da177e4
LT
1176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return -EINTR;
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
e175072f 1185 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
3ff50b79 1195 }
1da177e4 1196
a5040efa 1197 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 return err;
1202}
1203
01178cd4 1204static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1205 void (*req)(struct hci_request *req,
1206 unsigned long opt),
01178cd4 1207 unsigned long opt, __u32 timeout)
1da177e4
LT
1208{
1209 int ret;
1210
7c6a329e
MH
1211 if (!test_bit(HCI_UP, &hdev->flags))
1212 return -ENETDOWN;
1213
1da177e4
LT
1214 /* Serialize all requests */
1215 hci_req_lock(hdev);
01178cd4 1216 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1217 hci_req_unlock(hdev);
1218
1219 return ret;
1220}
1221
42c6b129 1222static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1223{
42c6b129 1224 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1225
1226 /* Reset device */
42c6b129
JH
1227 set_bit(HCI_RESET, &req->hdev->flags);
1228 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1229}
1230
42c6b129 1231static void bredr_init(struct hci_request *req)
1da177e4 1232{
42c6b129 1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1234
1da177e4 1235 /* Read Local Supported Features */
42c6b129 1236 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1237
1143e5a6 1238 /* Read Local Version */
42c6b129 1239 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1240
1241 /* Read BD Address */
42c6b129 1242 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1243}
1244
42c6b129 1245static void amp_init(struct hci_request *req)
e61ef499 1246{
42c6b129 1247 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1248
e61ef499 1249 /* Read Local Version */
42c6b129 1250 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1251
f6996cfe
MH
1252 /* Read Local Supported Commands */
1253 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1254
1255 /* Read Local Supported Features */
1256 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1257
6bcbc489 1258 /* Read Local AMP Info */
42c6b129 1259 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1260
1261 /* Read Data Blk size */
42c6b129 1262 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1263
f38ba941
MH
1264 /* Read Flow Control Mode */
1265 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1266
7528ca1c
MH
1267 /* Read Location Data */
1268 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1269}
1270
42c6b129 1271static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1272{
42c6b129 1273 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1274
1275 BT_DBG("%s %ld", hdev->name, opt);
1276
11778716
AE
1277 /* Reset */
1278 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1279 hci_reset_req(req, 0);
11778716 1280
e61ef499
AE
1281 switch (hdev->dev_type) {
1282 case HCI_BREDR:
42c6b129 1283 bredr_init(req);
e61ef499
AE
1284 break;
1285
1286 case HCI_AMP:
42c6b129 1287 amp_init(req);
e61ef499
AE
1288 break;
1289
1290 default:
1291 BT_ERR("Unknown device type %d", hdev->dev_type);
1292 break;
1293 }
e61ef499
AE
1294}
1295
42c6b129 1296static void bredr_setup(struct hci_request *req)
2177bab5 1297{
4ca048e3
MH
1298 struct hci_dev *hdev = req->hdev;
1299
2177bab5
JH
1300 __le16 param;
1301 __u8 flt_type;
1302
1303 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1304 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1305
1306 /* Read Class of Device */
42c6b129 1307 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1308
1309 /* Read Local Name */
42c6b129 1310 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1311
1312 /* Read Voice Setting */
42c6b129 1313 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1314
b4cb9fb2
MH
1315 /* Read Number of Supported IAC */
1316 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1317
4b836f39
MH
1318 /* Read Current IAC LAP */
1319 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1320
2177bab5
JH
1321 /* Clear Event Filters */
1322 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1323 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1324
1325 /* Connection accept timeout ~20 secs */
dcf4adbf 1326 param = cpu_to_le16(0x7d00);
42c6b129 1327 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1328
4ca048e3
MH
1329 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1330 * but it does not support page scan related HCI commands.
1331 */
1332 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1333 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1335 }
2177bab5
JH
1336}
1337
42c6b129 1338static void le_setup(struct hci_request *req)
2177bab5 1339{
c73eee91
JH
1340 struct hci_dev *hdev = req->hdev;
1341
2177bab5 1342 /* Read LE Buffer Size */
42c6b129 1343 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1344
1345 /* Read LE Local Supported Features */
42c6b129 1346 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1347
747d3f03
MH
1348 /* Read LE Supported States */
1349 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1350
2177bab5 1351 /* Read LE White List Size */
42c6b129 1352 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1353
747d3f03
MH
1354 /* Clear LE White List */
1355 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1356
1357 /* LE-only controllers have LE implicitly enabled */
1358 if (!lmp_bredr_capable(hdev))
1359 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1360}
1361
1362static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1363{
1364 if (lmp_ext_inq_capable(hdev))
1365 return 0x02;
1366
1367 if (lmp_inq_rssi_capable(hdev))
1368 return 0x01;
1369
1370 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1371 hdev->lmp_subver == 0x0757)
1372 return 0x01;
1373
1374 if (hdev->manufacturer == 15) {
1375 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1376 return 0x01;
1377 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1378 return 0x01;
1379 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1380 return 0x01;
1381 }
1382
1383 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1384 hdev->lmp_subver == 0x1805)
1385 return 0x01;
1386
1387 return 0x00;
1388}
1389
42c6b129 1390static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1391{
1392 u8 mode;
1393
42c6b129 1394 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1395
42c6b129 1396 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1397}
1398
42c6b129 1399static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1400{
42c6b129
JH
1401 struct hci_dev *hdev = req->hdev;
1402
2177bab5
JH
1403 /* The second byte is 0xff instead of 0x9f (two reserved bits
1404 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1405 * command otherwise.
1406 */
1407 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1408
1409 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1410 * any event mask for pre 1.2 devices.
1411 */
1412 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1413 return;
1414
1415 if (lmp_bredr_capable(hdev)) {
1416 events[4] |= 0x01; /* Flow Specification Complete */
1417 events[4] |= 0x02; /* Inquiry Result with RSSI */
1418 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1419 events[5] |= 0x08; /* Synchronous Connection Complete */
1420 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1421 } else {
1422 /* Use a different default for LE-only devices */
1423 memset(events, 0, sizeof(events));
1424 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1425 events[1] |= 0x08; /* Read Remote Version Information Complete */
1426 events[1] |= 0x20; /* Command Complete */
1427 events[1] |= 0x40; /* Command Status */
1428 events[1] |= 0x80; /* Hardware Error */
1429 events[2] |= 0x04; /* Number of Completed Packets */
1430 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1431
1432 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1435 }
2177bab5
JH
1436 }
1437
1438 if (lmp_inq_rssi_capable(hdev))
1439 events[4] |= 0x02; /* Inquiry Result with RSSI */
1440
1441 if (lmp_sniffsubr_capable(hdev))
1442 events[5] |= 0x20; /* Sniff Subrating */
1443
1444 if (lmp_pause_enc_capable(hdev))
1445 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1446
1447 if (lmp_ext_inq_capable(hdev))
1448 events[5] |= 0x40; /* Extended Inquiry Result */
1449
1450 if (lmp_no_flush_capable(hdev))
1451 events[7] |= 0x01; /* Enhanced Flush Complete */
1452
1453 if (lmp_lsto_capable(hdev))
1454 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1455
1456 if (lmp_ssp_capable(hdev)) {
1457 events[6] |= 0x01; /* IO Capability Request */
1458 events[6] |= 0x02; /* IO Capability Response */
1459 events[6] |= 0x04; /* User Confirmation Request */
1460 events[6] |= 0x08; /* User Passkey Request */
1461 events[6] |= 0x10; /* Remote OOB Data Request */
1462 events[6] |= 0x20; /* Simple Pairing Complete */
1463 events[7] |= 0x04; /* User Passkey Notification */
1464 events[7] |= 0x08; /* Keypress Notification */
1465 events[7] |= 0x10; /* Remote Host Supported
1466 * Features Notification
1467 */
1468 }
1469
1470 if (lmp_le_capable(hdev))
1471 events[7] |= 0x20; /* LE Meta-Event */
1472
42c6b129 1473 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1474}
1475
42c6b129 1476static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1477{
42c6b129
JH
1478 struct hci_dev *hdev = req->hdev;
1479
2177bab5 1480 if (lmp_bredr_capable(hdev))
42c6b129 1481 bredr_setup(req);
56f87901
JH
1482 else
1483 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1484
1485 if (lmp_le_capable(hdev))
42c6b129 1486 le_setup(req);
2177bab5 1487
3f8e2d75
JH
1488 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1489 * local supported commands HCI command.
1490 */
1491 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1492 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1493
1494 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1495 /* When SSP is available, then the host features page
1496 * should also be available as well. However some
1497 * controllers list the max_page as 0 as long as SSP
1498 * has not been enabled. To achieve proper debugging
1499 * output, force the minimum max_page to 1 at least.
1500 */
1501 hdev->max_page = 0x01;
1502
2177bab5
JH
1503 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1504 u8 mode = 0x01;
42c6b129
JH
1505 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1506 sizeof(mode), &mode);
2177bab5
JH
1507 } else {
1508 struct hci_cp_write_eir cp;
1509
1510 memset(hdev->eir, 0, sizeof(hdev->eir));
1511 memset(&cp, 0, sizeof(cp));
1512
42c6b129 1513 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1514 }
1515 }
1516
1517 if (lmp_inq_rssi_capable(hdev))
42c6b129 1518 hci_setup_inquiry_mode(req);
2177bab5
JH
1519
1520 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1521 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1522
1523 if (lmp_ext_feat_capable(hdev)) {
1524 struct hci_cp_read_local_ext_features cp;
1525
1526 cp.page = 0x01;
42c6b129
JH
1527 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1528 sizeof(cp), &cp);
2177bab5
JH
1529 }
1530
1531 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1532 u8 enable = 1;
42c6b129
JH
1533 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1534 &enable);
2177bab5
JH
1535 }
1536}
1537
42c6b129 1538static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1539{
42c6b129 1540 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1541 struct hci_cp_write_def_link_policy cp;
1542 u16 link_policy = 0;
1543
1544 if (lmp_rswitch_capable(hdev))
1545 link_policy |= HCI_LP_RSWITCH;
1546 if (lmp_hold_capable(hdev))
1547 link_policy |= HCI_LP_HOLD;
1548 if (lmp_sniff_capable(hdev))
1549 link_policy |= HCI_LP_SNIFF;
1550 if (lmp_park_capable(hdev))
1551 link_policy |= HCI_LP_PARK;
1552
1553 cp.policy = cpu_to_le16(link_policy);
42c6b129 1554 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1555}
1556
42c6b129 1557static void hci_set_le_support(struct hci_request *req)
2177bab5 1558{
42c6b129 1559 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1560 struct hci_cp_write_le_host_supported cp;
1561
c73eee91
JH
1562 /* LE-only devices do not support explicit enablement */
1563 if (!lmp_bredr_capable(hdev))
1564 return;
1565
2177bab5
JH
1566 memset(&cp, 0, sizeof(cp));
1567
1568 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1569 cp.le = 0x01;
1570 cp.simul = lmp_le_br_capable(hdev);
1571 }
1572
1573 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1574 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1575 &cp);
2177bab5
JH
1576}
1577
d62e6d67
JH
1578static void hci_set_event_mask_page_2(struct hci_request *req)
1579{
1580 struct hci_dev *hdev = req->hdev;
1581 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1582
1583 /* If Connectionless Slave Broadcast master role is supported
1584 * enable all necessary events for it.
1585 */
53b834d2 1586 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1587 events[1] |= 0x40; /* Triggered Clock Capture */
1588 events[1] |= 0x80; /* Synchronization Train Complete */
1589 events[2] |= 0x10; /* Slave Page Response Timeout */
1590 events[2] |= 0x20; /* CSB Channel Map Change */
1591 }
1592
1593 /* If Connectionless Slave Broadcast slave role is supported
1594 * enable all necessary events for it.
1595 */
53b834d2 1596 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1597 events[2] |= 0x01; /* Synchronization Train Received */
1598 events[2] |= 0x02; /* CSB Receive */
1599 events[2] |= 0x04; /* CSB Timeout */
1600 events[2] |= 0x08; /* Truncated Page Complete */
1601 }
1602
40c59fcb 1603 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1604 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1605 events[2] |= 0x80;
1606
d62e6d67
JH
1607 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1608}
1609
42c6b129 1610static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1611{
42c6b129 1612 struct hci_dev *hdev = req->hdev;
d2c5d77f 1613 u8 p;
42c6b129 1614
0da71f1b
MH
1615 hci_setup_event_mask(req);
1616
b8f4e068
GP
1617 /* Some Broadcom based Bluetooth controllers do not support the
1618 * Delete Stored Link Key command. They are clearly indicating its
1619 * absence in the bit mask of supported commands.
1620 *
1621 * Check the supported commands and only if the the command is marked
1622 * as supported send it. If not supported assume that the controller
1623 * does not have actual support for stored link keys which makes this
1624 * command redundant anyway.
f9f462fa
MH
1625 *
1626 * Some controllers indicate that they support handling deleting
1627 * stored link keys, but they don't. The quirk lets a driver
1628 * just disable this command.
637b4cae 1629 */
f9f462fa
MH
1630 if (hdev->commands[6] & 0x80 &&
1631 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1632 struct hci_cp_delete_stored_link_key cp;
1633
1634 bacpy(&cp.bdaddr, BDADDR_ANY);
1635 cp.delete_all = 0x01;
1636 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1637 sizeof(cp), &cp);
1638 }
1639
2177bab5 1640 if (hdev->commands[5] & 0x10)
42c6b129 1641 hci_setup_link_policy(req);
2177bab5 1642
9193c6e8
AG
1643 if (lmp_le_capable(hdev)) {
1644 u8 events[8];
1645
1646 memset(events, 0, sizeof(events));
1647 events[0] = 0x1f;
662bc2e6
AG
1648
1649 /* If controller supports the Connection Parameters Request
1650 * Link Layer Procedure, enable the corresponding event.
1651 */
1652 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1653 events[0] |= 0x20; /* LE Remote Connection
1654 * Parameter Request
1655 */
1656
9193c6e8
AG
1657 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1658 events);
1659
15a49cca
MH
1660 if (hdev->commands[25] & 0x40) {
1661 /* Read LE Advertising Channel TX Power */
1662 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1663 }
1664
42c6b129 1665 hci_set_le_support(req);
9193c6e8 1666 }
d2c5d77f
JH
1667
1668 /* Read features beyond page 1 if available */
1669 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1670 struct hci_cp_read_local_ext_features cp;
1671
1672 cp.page = p;
1673 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1674 sizeof(cp), &cp);
1675 }
2177bab5
JH
1676}
1677
5d4e7e8d
JH
1678static void hci_init4_req(struct hci_request *req, unsigned long opt)
1679{
1680 struct hci_dev *hdev = req->hdev;
1681
d62e6d67
JH
1682 /* Set event mask page 2 if the HCI command for it is supported */
1683 if (hdev->commands[22] & 0x04)
1684 hci_set_event_mask_page_2(req);
1685
5d4e7e8d 1686 /* Check for Synchronization Train support */
53b834d2 1687 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1688 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1689
1690 /* Enable Secure Connections if supported and configured */
5afeac14 1691 if ((lmp_sc_capable(hdev) ||
111902f7 1692 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1693 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1694 u8 support = 0x01;
1695 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1696 sizeof(support), &support);
1697 }
5d4e7e8d
JH
1698}
1699
2177bab5
JH
1700static int __hci_init(struct hci_dev *hdev)
1701{
1702 int err;
1703
1704 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1705 if (err < 0)
1706 return err;
1707
4b4148e9
MH
1708 /* The Device Under Test (DUT) mode is special and available for
1709 * all controller types. So just create it early on.
1710 */
1711 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1712 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1713 &dut_mode_fops);
1714 }
1715
2177bab5
JH
1716 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1717 * BR/EDR/LE type controllers. AMP controllers only need the
1718 * first stage init.
1719 */
1720 if (hdev->dev_type != HCI_BREDR)
1721 return 0;
1722
1723 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1724 if (err < 0)
1725 return err;
1726
5d4e7e8d
JH
1727 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1728 if (err < 0)
1729 return err;
1730
baf27f6e
MH
1731 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1732 if (err < 0)
1733 return err;
1734
1735 /* Only create debugfs entries during the initial setup
1736 * phase and not every time the controller gets powered on.
1737 */
1738 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1739 return 0;
1740
dfb826a8
MH
1741 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1742 &features_fops);
ceeb3bc0
MH
1743 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1744 &hdev->manufacturer);
1745 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1746 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1747 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1748 &blacklist_fops);
6659358e
JH
1749 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1750 &whitelist_fops);
47219839
MH
1751 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1752
31ad1691
AK
1753 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1754 &conn_info_min_age_fops);
1755 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1756 &conn_info_max_age_fops);
1757
baf27f6e
MH
1758 if (lmp_bredr_capable(hdev)) {
1759 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1760 hdev, &inquiry_cache_fops);
02d08d15
MH
1761 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1762 hdev, &link_keys_fops);
babdbb3c
MH
1763 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1764 hdev, &dev_class_fops);
041000b9
MH
1765 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1766 hdev, &voice_setting_fops);
baf27f6e
MH
1767 }
1768
06f5b778 1769 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1770 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1771 hdev, &auto_accept_delay_fops);
5afeac14
MH
1772 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1773 hdev, &force_sc_support_fops);
134c2a89
MH
1774 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1775 hdev, &sc_only_mode_fops);
06f5b778 1776 }
ebd1e33b 1777
2bfa3531
MH
1778 if (lmp_sniff_capable(hdev)) {
1779 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1780 hdev, &idle_timeout_fops);
1781 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1782 hdev, &sniff_min_interval_fops);
1783 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1784 hdev, &sniff_max_interval_fops);
1785 }
1786
d0f729b8 1787 if (lmp_le_capable(hdev)) {
ac345813
MH
1788 debugfs_create_file("identity", 0400, hdev->debugfs,
1789 hdev, &identity_fops);
1790 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1791 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1792 debugfs_create_file("random_address", 0444, hdev->debugfs,
1793 hdev, &random_address_fops);
b32bba6c
MH
1794 debugfs_create_file("static_address", 0444, hdev->debugfs,
1795 hdev, &static_address_fops);
1796
1797 /* For controllers with a public address, provide a debug
1798 * option to force the usage of the configured static
1799 * address. By default the public address is used.
1800 */
1801 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1802 debugfs_create_file("force_static_address", 0644,
1803 hdev->debugfs, hdev,
1804 &force_static_address_fops);
1805
d0f729b8
MH
1806 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1807 &hdev->le_white_list_size);
d2ab0ac1
MH
1808 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1809 &white_list_fops);
3698d704
MH
1810 debugfs_create_file("identity_resolving_keys", 0400,
1811 hdev->debugfs, hdev,
1812 &identity_resolving_keys_fops);
8f8625cd
MH
1813 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1814 hdev, &long_term_keys_fops);
4e70c7e7
MH
1815 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1816 hdev, &conn_min_interval_fops);
1817 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1818 hdev, &conn_max_interval_fops);
816a93d1
MH
1819 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1820 hdev, &conn_latency_fops);
f1649577
MH
1821 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1822 hdev, &supervision_timeout_fops);
3f959d46
MH
1823 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1824 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1825 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1826 &device_list_fops);
b9a7a61e
LR
1827 debugfs_create_u16("discov_interleaved_timeout", 0644,
1828 hdev->debugfs,
1829 &hdev->discov_interleaved_timeout);
d0f729b8 1830 }
e7b8fc92 1831
baf27f6e 1832 return 0;
2177bab5
JH
1833}
1834
0ebca7d6
MH
1835static void hci_init0_req(struct hci_request *req, unsigned long opt)
1836{
1837 struct hci_dev *hdev = req->hdev;
1838
1839 BT_DBG("%s %ld", hdev->name, opt);
1840
1841 /* Reset */
1842 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1843 hci_reset_req(req, 0);
1844
1845 /* Read Local Version */
1846 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1847
1848 /* Read BD Address */
1849 if (hdev->set_bdaddr)
1850 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1851}
1852
1853static int __hci_unconf_init(struct hci_dev *hdev)
1854{
1855 int err;
1856
cc78b44b
MH
1857 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858 return 0;
1859
0ebca7d6
MH
1860 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1861 if (err < 0)
1862 return err;
1863
1864 return 0;
1865}
1866
42c6b129 1867static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1868{
1869 __u8 scan = opt;
1870
42c6b129 1871 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1872
1873 /* Inquiry and Page scans */
42c6b129 1874 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1875}
1876
42c6b129 1877static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1878{
1879 __u8 auth = opt;
1880
42c6b129 1881 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1882
1883 /* Authentication */
42c6b129 1884 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1885}
1886
42c6b129 1887static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1888{
1889 __u8 encrypt = opt;
1890
42c6b129 1891 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1892
e4e8e37c 1893 /* Encryption */
42c6b129 1894 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1895}
1896
42c6b129 1897static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1898{
1899 __le16 policy = cpu_to_le16(opt);
1900
42c6b129 1901 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1902
1903 /* Default link policy */
42c6b129 1904 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1905}
1906
8e87d142 1907/* Get HCI device by index.
1da177e4
LT
1908 * Device is held on return. */
1909struct hci_dev *hci_dev_get(int index)
1910{
8035ded4 1911 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1912
1913 BT_DBG("%d", index);
1914
1915 if (index < 0)
1916 return NULL;
1917
1918 read_lock(&hci_dev_list_lock);
8035ded4 1919 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1920 if (d->id == index) {
1921 hdev = hci_dev_hold(d);
1922 break;
1923 }
1924 }
1925 read_unlock(&hci_dev_list_lock);
1926 return hdev;
1927}
1da177e4
LT
1928
1929/* ---- Inquiry support ---- */
ff9ef578 1930
30dc78e1
JH
1931bool hci_discovery_active(struct hci_dev *hdev)
1932{
1933 struct discovery_state *discov = &hdev->discovery;
1934
6fbe195d 1935 switch (discov->state) {
343f935b 1936 case DISCOVERY_FINDING:
6fbe195d 1937 case DISCOVERY_RESOLVING:
30dc78e1
JH
1938 return true;
1939
6fbe195d
AG
1940 default:
1941 return false;
1942 }
30dc78e1
JH
1943}
1944
ff9ef578
JH
1945void hci_discovery_set_state(struct hci_dev *hdev, int state)
1946{
bb3e0a33
JH
1947 int old_state = hdev->discovery.state;
1948
ff9ef578
JH
1949 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1950
bb3e0a33 1951 if (old_state == state)
ff9ef578
JH
1952 return;
1953
bb3e0a33
JH
1954 hdev->discovery.state = state;
1955
ff9ef578
JH
1956 switch (state) {
1957 case DISCOVERY_STOPPED:
c54c3860
AG
1958 hci_update_background_scan(hdev);
1959
bb3e0a33 1960 if (old_state != DISCOVERY_STARTING)
7b99b659 1961 mgmt_discovering(hdev, 0);
ff9ef578
JH
1962 break;
1963 case DISCOVERY_STARTING:
1964 break;
343f935b 1965 case DISCOVERY_FINDING:
ff9ef578
JH
1966 mgmt_discovering(hdev, 1);
1967 break;
30dc78e1
JH
1968 case DISCOVERY_RESOLVING:
1969 break;
ff9ef578
JH
1970 case DISCOVERY_STOPPING:
1971 break;
1972 }
ff9ef578
JH
1973}
1974
1f9b9a5d 1975void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1976{
30883512 1977 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1978 struct inquiry_entry *p, *n;
1da177e4 1979
561aafbc
JH
1980 list_for_each_entry_safe(p, n, &cache->all, all) {
1981 list_del(&p->all);
b57c1a56 1982 kfree(p);
1da177e4 1983 }
561aafbc
JH
1984
1985 INIT_LIST_HEAD(&cache->unknown);
1986 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1987}
1988
a8c5fb1a
GP
1989struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1990 bdaddr_t *bdaddr)
1da177e4 1991{
30883512 1992 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1993 struct inquiry_entry *e;
1994
6ed93dc6 1995 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1996
561aafbc
JH
1997 list_for_each_entry(e, &cache->all, all) {
1998 if (!bacmp(&e->data.bdaddr, bdaddr))
1999 return e;
2000 }
2001
2002 return NULL;
2003}
2004
2005struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2006 bdaddr_t *bdaddr)
561aafbc 2007{
30883512 2008 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2009 struct inquiry_entry *e;
2010
6ed93dc6 2011 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2012
2013 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2014 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2015 return e;
2016 }
2017
2018 return NULL;
1da177e4
LT
2019}
2020
30dc78e1 2021struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2022 bdaddr_t *bdaddr,
2023 int state)
30dc78e1
JH
2024{
2025 struct discovery_state *cache = &hdev->discovery;
2026 struct inquiry_entry *e;
2027
6ed93dc6 2028 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2029
2030 list_for_each_entry(e, &cache->resolve, list) {
2031 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2032 return e;
2033 if (!bacmp(&e->data.bdaddr, bdaddr))
2034 return e;
2035 }
2036
2037 return NULL;
2038}
2039
a3d4e20a 2040void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2041 struct inquiry_entry *ie)
a3d4e20a
JH
2042{
2043 struct discovery_state *cache = &hdev->discovery;
2044 struct list_head *pos = &cache->resolve;
2045 struct inquiry_entry *p;
2046
2047 list_del(&ie->list);
2048
2049 list_for_each_entry(p, &cache->resolve, list) {
2050 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2051 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2052 break;
2053 pos = &p->list;
2054 }
2055
2056 list_add(&ie->list, pos);
2057}
2058
af58925c
MH
2059u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2060 bool name_known)
1da177e4 2061{
30883512 2062 struct discovery_state *cache = &hdev->discovery;
70f23020 2063 struct inquiry_entry *ie;
af58925c 2064 u32 flags = 0;
1da177e4 2065
6ed93dc6 2066 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2067
2b2fec4d
SJ
2068 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2069
af58925c
MH
2070 if (!data->ssp_mode)
2071 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2072
70f23020 2073 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2074 if (ie) {
af58925c
MH
2075 if (!ie->data.ssp_mode)
2076 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2077
a3d4e20a 2078 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2079 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2080 ie->data.rssi = data->rssi;
2081 hci_inquiry_cache_update_resolve(hdev, ie);
2082 }
2083
561aafbc 2084 goto update;
a3d4e20a 2085 }
561aafbc
JH
2086
2087 /* Entry not in the cache. Add new one. */
2088 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2089 if (!ie) {
2090 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2091 goto done;
2092 }
561aafbc
JH
2093
2094 list_add(&ie->all, &cache->all);
2095
2096 if (name_known) {
2097 ie->name_state = NAME_KNOWN;
2098 } else {
2099 ie->name_state = NAME_NOT_KNOWN;
2100 list_add(&ie->list, &cache->unknown);
2101 }
70f23020 2102
561aafbc
JH
2103update:
2104 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2105 ie->name_state != NAME_PENDING) {
561aafbc
JH
2106 ie->name_state = NAME_KNOWN;
2107 list_del(&ie->list);
1da177e4
LT
2108 }
2109
70f23020
AE
2110 memcpy(&ie->data, data, sizeof(*data));
2111 ie->timestamp = jiffies;
1da177e4 2112 cache->timestamp = jiffies;
3175405b
JH
2113
2114 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2115 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2116
af58925c
MH
2117done:
2118 return flags;
1da177e4
LT
2119}
2120
2121static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2122{
30883512 2123 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2124 struct inquiry_info *info = (struct inquiry_info *) buf;
2125 struct inquiry_entry *e;
2126 int copied = 0;
2127
561aafbc 2128 list_for_each_entry(e, &cache->all, all) {
1da177e4 2129 struct inquiry_data *data = &e->data;
b57c1a56
JH
2130
2131 if (copied >= num)
2132 break;
2133
1da177e4
LT
2134 bacpy(&info->bdaddr, &data->bdaddr);
2135 info->pscan_rep_mode = data->pscan_rep_mode;
2136 info->pscan_period_mode = data->pscan_period_mode;
2137 info->pscan_mode = data->pscan_mode;
2138 memcpy(info->dev_class, data->dev_class, 3);
2139 info->clock_offset = data->clock_offset;
b57c1a56 2140
1da177e4 2141 info++;
b57c1a56 2142 copied++;
1da177e4
LT
2143 }
2144
2145 BT_DBG("cache %p, copied %d", cache, copied);
2146 return copied;
2147}
2148
42c6b129 2149static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2150{
2151 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2152 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2153 struct hci_cp_inquiry cp;
2154
2155 BT_DBG("%s", hdev->name);
2156
2157 if (test_bit(HCI_INQUIRY, &hdev->flags))
2158 return;
2159
2160 /* Start Inquiry */
2161 memcpy(&cp.lap, &ir->lap, 3);
2162 cp.length = ir->length;
2163 cp.num_rsp = ir->num_rsp;
42c6b129 2164 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2165}
2166
3e13fa1e
AG
2167static int wait_inquiry(void *word)
2168{
2169 schedule();
2170 return signal_pending(current);
2171}
2172
1da177e4
LT
2173int hci_inquiry(void __user *arg)
2174{
2175 __u8 __user *ptr = arg;
2176 struct hci_inquiry_req ir;
2177 struct hci_dev *hdev;
2178 int err = 0, do_inquiry = 0, max_rsp;
2179 long timeo;
2180 __u8 *buf;
2181
2182 if (copy_from_user(&ir, ptr, sizeof(ir)))
2183 return -EFAULT;
2184
5a08ecce
AE
2185 hdev = hci_dev_get(ir.dev_id);
2186 if (!hdev)
1da177e4
LT
2187 return -ENODEV;
2188
0736cfa8
MH
2189 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2190 err = -EBUSY;
2191 goto done;
2192 }
2193
4a964404 2194 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2195 err = -EOPNOTSUPP;
2196 goto done;
2197 }
2198
5b69bef5
MH
2199 if (hdev->dev_type != HCI_BREDR) {
2200 err = -EOPNOTSUPP;
2201 goto done;
2202 }
2203
56f87901
JH
2204 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2205 err = -EOPNOTSUPP;
2206 goto done;
2207 }
2208
09fd0de5 2209 hci_dev_lock(hdev);
8e87d142 2210 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2211 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2212 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2213 do_inquiry = 1;
2214 }
09fd0de5 2215 hci_dev_unlock(hdev);
1da177e4 2216
04837f64 2217 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2218
2219 if (do_inquiry) {
01178cd4
JH
2220 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2221 timeo);
70f23020
AE
2222 if (err < 0)
2223 goto done;
3e13fa1e
AG
2224
2225 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2226 * cleared). If it is interrupted by a signal, return -EINTR.
2227 */
2228 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2229 TASK_INTERRUPTIBLE))
2230 return -EINTR;
70f23020 2231 }
1da177e4 2232
8fc9ced3
GP
2233 /* for unlimited number of responses we will use buffer with
2234 * 255 entries
2235 */
1da177e4
LT
2236 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2237
2238 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2239 * copy it to the user space.
2240 */
01df8c31 2241 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2242 if (!buf) {
1da177e4
LT
2243 err = -ENOMEM;
2244 goto done;
2245 }
2246
09fd0de5 2247 hci_dev_lock(hdev);
1da177e4 2248 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2249 hci_dev_unlock(hdev);
1da177e4
LT
2250
2251 BT_DBG("num_rsp %d", ir.num_rsp);
2252
2253 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2254 ptr += sizeof(ir);
2255 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2256 ir.num_rsp))
1da177e4 2257 err = -EFAULT;
8e87d142 2258 } else
1da177e4
LT
2259 err = -EFAULT;
2260
2261 kfree(buf);
2262
2263done:
2264 hci_dev_put(hdev);
2265 return err;
2266}
2267
cbed0ca1 2268static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2269{
1da177e4
LT
2270 int ret = 0;
2271
1da177e4
LT
2272 BT_DBG("%s %p", hdev->name, hdev);
2273
2274 hci_req_lock(hdev);
2275
94324962
JH
2276 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2277 ret = -ENODEV;
2278 goto done;
2279 }
2280
d603b76b
MH
2281 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2282 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2283 /* Check for rfkill but allow the HCI setup stage to
2284 * proceed (which in itself doesn't cause any RF activity).
2285 */
2286 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2287 ret = -ERFKILL;
2288 goto done;
2289 }
2290
2291 /* Check for valid public address or a configured static
2292 * random adddress, but let the HCI setup proceed to
2293 * be able to determine if there is a public address
2294 * or not.
2295 *
c6beca0e
MH
2296 * In case of user channel usage, it is not important
2297 * if a public address or static random address is
2298 * available.
2299 *
a5c8f270
MH
2300 * This check is only valid for BR/EDR controllers
2301 * since AMP controllers do not have an address.
2302 */
c6beca0e
MH
2303 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2304 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2305 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2306 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2307 ret = -EADDRNOTAVAIL;
2308 goto done;
2309 }
611b30f7
MH
2310 }
2311
1da177e4
LT
2312 if (test_bit(HCI_UP, &hdev->flags)) {
2313 ret = -EALREADY;
2314 goto done;
2315 }
2316
1da177e4
LT
2317 if (hdev->open(hdev)) {
2318 ret = -EIO;
2319 goto done;
2320 }
2321
f41c70c4
MH
2322 atomic_set(&hdev->cmd_cnt, 1);
2323 set_bit(HCI_INIT, &hdev->flags);
2324
af202f84
MH
2325 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2326 if (hdev->setup)
2327 ret = hdev->setup(hdev);
f41c70c4 2328
af202f84
MH
2329 /* The transport driver can set these quirks before
2330 * creating the HCI device or in its setup callback.
2331 *
2332 * In case any of them is set, the controller has to
2333 * start up as unconfigured.
2334 */
eb1904f4
MH
2335 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2336 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2337 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
0ebca7d6
MH
2338
2339 /* For an unconfigured controller it is required to
2340 * read at least the version information provided by
2341 * the Read Local Version Information command.
2342 *
2343 * If the set_bdaddr driver callback is provided, then
2344 * also the original Bluetooth public device address
2345 * will be read using the Read BD Address command.
2346 */
2347 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2348 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2349 }
2350
9713c17b
MH
2351 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2352 /* If public address change is configured, ensure that
2353 * the address gets programmed. If the driver does not
2354 * support changing the public address, fail the power
2355 * on procedure.
2356 */
2357 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2358 hdev->set_bdaddr)
24c457e2
MH
2359 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2360 else
2361 ret = -EADDRNOTAVAIL;
2362 }
2363
f41c70c4 2364 if (!ret) {
4a964404 2365 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2367 ret = __hci_init(hdev);
1da177e4
LT
2368 }
2369
f41c70c4
MH
2370 clear_bit(HCI_INIT, &hdev->flags);
2371
1da177e4
LT
2372 if (!ret) {
2373 hci_dev_hold(hdev);
d6bfd59c 2374 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2375 set_bit(HCI_UP, &hdev->flags);
2376 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2377 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2378 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2379 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2380 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2381 hdev->dev_type == HCI_BREDR) {
09fd0de5 2382 hci_dev_lock(hdev);
744cf19e 2383 mgmt_powered(hdev, 1);
09fd0de5 2384 hci_dev_unlock(hdev);
56e5cb86 2385 }
8e87d142 2386 } else {
1da177e4 2387 /* Init failed, cleanup */
3eff45ea 2388 flush_work(&hdev->tx_work);
c347b765 2389 flush_work(&hdev->cmd_work);
b78752cc 2390 flush_work(&hdev->rx_work);
1da177e4
LT
2391
2392 skb_queue_purge(&hdev->cmd_q);
2393 skb_queue_purge(&hdev->rx_q);
2394
2395 if (hdev->flush)
2396 hdev->flush(hdev);
2397
2398 if (hdev->sent_cmd) {
2399 kfree_skb(hdev->sent_cmd);
2400 hdev->sent_cmd = NULL;
2401 }
2402
2403 hdev->close(hdev);
fee746b0 2404 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2405 }
2406
2407done:
2408 hci_req_unlock(hdev);
1da177e4
LT
2409 return ret;
2410}
2411
cbed0ca1
JH
2412/* ---- HCI ioctl helpers ---- */
2413
2414int hci_dev_open(__u16 dev)
2415{
2416 struct hci_dev *hdev;
2417 int err;
2418
2419 hdev = hci_dev_get(dev);
2420 if (!hdev)
2421 return -ENODEV;
2422
4a964404 2423 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2424 * up as user channel. Trying to bring them up as normal devices
2425 * will result into a failure. Only user channel operation is
2426 * possible.
2427 *
2428 * When this function is called for a user channel, the flag
2429 * HCI_USER_CHANNEL will be set first before attempting to
2430 * open the device.
2431 */
4a964404 2432 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2433 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2434 err = -EOPNOTSUPP;
2435 goto done;
2436 }
2437
e1d08f40
JH
2438 /* We need to ensure that no other power on/off work is pending
2439 * before proceeding to call hci_dev_do_open. This is
2440 * particularly important if the setup procedure has not yet
2441 * completed.
2442 */
2443 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2444 cancel_delayed_work(&hdev->power_off);
2445
a5c8f270
MH
2446 /* After this call it is guaranteed that the setup procedure
2447 * has finished. This means that error conditions like RFKILL
2448 * or no valid public or static random address apply.
2449 */
e1d08f40
JH
2450 flush_workqueue(hdev->req_workqueue);
2451
12aa4f0a
MH
2452 /* For controllers not using the management interface and that
2453 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2454 * so that pairing works for them. Once the management interface
2455 * is in use this bit will be cleared again and userspace has
2456 * to explicitly enable it.
2457 */
2458 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2459 !test_bit(HCI_MGMT, &hdev->dev_flags))
2460 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2461
cbed0ca1
JH
2462 err = hci_dev_do_open(hdev);
2463
fee746b0 2464done:
cbed0ca1 2465 hci_dev_put(hdev);
cbed0ca1
JH
2466 return err;
2467}
2468
d7347f3c
JH
2469/* This function requires the caller holds hdev->lock */
2470static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2471{
2472 struct hci_conn_params *p;
2473
2474 list_for_each_entry(p, &hdev->le_conn_params, list)
2475 list_del_init(&p->action);
2476
2477 BT_DBG("All LE pending actions cleared");
2478}
2479
1da177e4
LT
2480static int hci_dev_do_close(struct hci_dev *hdev)
2481{
2482 BT_DBG("%s %p", hdev->name, hdev);
2483
78c04c0b
VCG
2484 cancel_delayed_work(&hdev->power_off);
2485
1da177e4
LT
2486 hci_req_cancel(hdev, ENODEV);
2487 hci_req_lock(hdev);
2488
2489 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2490 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2491 hci_req_unlock(hdev);
2492 return 0;
2493 }
2494
3eff45ea
GP
2495 /* Flush RX and TX works */
2496 flush_work(&hdev->tx_work);
b78752cc 2497 flush_work(&hdev->rx_work);
1da177e4 2498
16ab91ab 2499 if (hdev->discov_timeout > 0) {
e0f9309f 2500 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2501 hdev->discov_timeout = 0;
5e5282bb 2502 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2503 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2504 }
2505
a8b2d5c2 2506 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2507 cancel_delayed_work(&hdev->service_cache);
2508
7ba8b4be 2509 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2510
2511 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2512 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2513
09fd0de5 2514 hci_dev_lock(hdev);
1f9b9a5d 2515 hci_inquiry_cache_flush(hdev);
1da177e4 2516 hci_conn_hash_flush(hdev);
d7347f3c 2517 hci_pend_le_actions_clear(hdev);
09fd0de5 2518 hci_dev_unlock(hdev);
1da177e4
LT
2519
2520 hci_notify(hdev, HCI_DEV_DOWN);
2521
2522 if (hdev->flush)
2523 hdev->flush(hdev);
2524
2525 /* Reset device */
2526 skb_queue_purge(&hdev->cmd_q);
2527 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2528 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2529 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2530 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2531 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2532 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2533 clear_bit(HCI_INIT, &hdev->flags);
2534 }
2535
c347b765
GP
2536 /* flush cmd work */
2537 flush_work(&hdev->cmd_work);
1da177e4
LT
2538
2539 /* Drop queues */
2540 skb_queue_purge(&hdev->rx_q);
2541 skb_queue_purge(&hdev->cmd_q);
2542 skb_queue_purge(&hdev->raw_q);
2543
2544 /* Drop last sent command */
2545 if (hdev->sent_cmd) {
65cc2b49 2546 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2547 kfree_skb(hdev->sent_cmd);
2548 hdev->sent_cmd = NULL;
2549 }
2550
b6ddb638
JH
2551 kfree_skb(hdev->recv_evt);
2552 hdev->recv_evt = NULL;
2553
1da177e4
LT
2554 /* After this point our queues are empty
2555 * and no tasks are scheduled. */
2556 hdev->close(hdev);
2557
35b973c9 2558 /* Clear flags */
fee746b0 2559 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2560 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2561
93c311a0
MH
2562 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2563 if (hdev->dev_type == HCI_BREDR) {
2564 hci_dev_lock(hdev);
2565 mgmt_powered(hdev, 0);
2566 hci_dev_unlock(hdev);
2567 }
8ee56540 2568 }
5add6af8 2569
ced5c338 2570 /* Controller radio is available but is currently powered down */
536619e8 2571 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2572
e59fda8d 2573 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2574 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2575 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2576
1da177e4
LT
2577 hci_req_unlock(hdev);
2578
2579 hci_dev_put(hdev);
2580 return 0;
2581}
2582
2583int hci_dev_close(__u16 dev)
2584{
2585 struct hci_dev *hdev;
2586 int err;
2587
70f23020
AE
2588 hdev = hci_dev_get(dev);
2589 if (!hdev)
1da177e4 2590 return -ENODEV;
8ee56540 2591
0736cfa8
MH
2592 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2593 err = -EBUSY;
2594 goto done;
2595 }
2596
8ee56540
MH
2597 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2598 cancel_delayed_work(&hdev->power_off);
2599
1da177e4 2600 err = hci_dev_do_close(hdev);
8ee56540 2601
0736cfa8 2602done:
1da177e4
LT
2603 hci_dev_put(hdev);
2604 return err;
2605}
2606
2607int hci_dev_reset(__u16 dev)
2608{
2609 struct hci_dev *hdev;
2610 int ret = 0;
2611
70f23020
AE
2612 hdev = hci_dev_get(dev);
2613 if (!hdev)
1da177e4
LT
2614 return -ENODEV;
2615
2616 hci_req_lock(hdev);
1da177e4 2617
808a049e
MH
2618 if (!test_bit(HCI_UP, &hdev->flags)) {
2619 ret = -ENETDOWN;
1da177e4 2620 goto done;
808a049e 2621 }
1da177e4 2622
0736cfa8
MH
2623 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2624 ret = -EBUSY;
2625 goto done;
2626 }
2627
4a964404 2628 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2629 ret = -EOPNOTSUPP;
2630 goto done;
2631 }
2632
1da177e4
LT
2633 /* Drop queues */
2634 skb_queue_purge(&hdev->rx_q);
2635 skb_queue_purge(&hdev->cmd_q);
2636
09fd0de5 2637 hci_dev_lock(hdev);
1f9b9a5d 2638 hci_inquiry_cache_flush(hdev);
1da177e4 2639 hci_conn_hash_flush(hdev);
09fd0de5 2640 hci_dev_unlock(hdev);
1da177e4
LT
2641
2642 if (hdev->flush)
2643 hdev->flush(hdev);
2644
8e87d142 2645 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2646 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2647
fee746b0 2648 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2649
2650done:
1da177e4
LT
2651 hci_req_unlock(hdev);
2652 hci_dev_put(hdev);
2653 return ret;
2654}
2655
2656int hci_dev_reset_stat(__u16 dev)
2657{
2658 struct hci_dev *hdev;
2659 int ret = 0;
2660
70f23020
AE
2661 hdev = hci_dev_get(dev);
2662 if (!hdev)
1da177e4
LT
2663 return -ENODEV;
2664
0736cfa8
MH
2665 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2666 ret = -EBUSY;
2667 goto done;
2668 }
2669
4a964404 2670 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2671 ret = -EOPNOTSUPP;
2672 goto done;
2673 }
2674
1da177e4
LT
2675 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2676
0736cfa8 2677done:
1da177e4 2678 hci_dev_put(hdev);
1da177e4
LT
2679 return ret;
2680}
2681
123abc08
JH
2682static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2683{
bc6d2d04 2684 bool conn_changed, discov_changed;
123abc08
JH
2685
2686 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2687
2688 if ((scan & SCAN_PAGE))
2689 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2690 &hdev->dev_flags);
2691 else
2692 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2693 &hdev->dev_flags);
2694
bc6d2d04
JH
2695 if ((scan & SCAN_INQUIRY)) {
2696 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2697 &hdev->dev_flags);
2698 } else {
2699 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2700 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2701 &hdev->dev_flags);
2702 }
2703
123abc08
JH
2704 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2705 return;
2706
bc6d2d04
JH
2707 if (conn_changed || discov_changed) {
2708 /* In case this was disabled through mgmt */
2709 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2710
2711 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2712 mgmt_update_adv_data(hdev);
2713
123abc08 2714 mgmt_new_settings(hdev);
bc6d2d04 2715 }
123abc08
JH
2716}
2717
1da177e4
LT
2718int hci_dev_cmd(unsigned int cmd, void __user *arg)
2719{
2720 struct hci_dev *hdev;
2721 struct hci_dev_req dr;
2722 int err = 0;
2723
2724 if (copy_from_user(&dr, arg, sizeof(dr)))
2725 return -EFAULT;
2726
70f23020
AE
2727 hdev = hci_dev_get(dr.dev_id);
2728 if (!hdev)
1da177e4
LT
2729 return -ENODEV;
2730
0736cfa8
MH
2731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2732 err = -EBUSY;
2733 goto done;
2734 }
2735
4a964404 2736 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2737 err = -EOPNOTSUPP;
2738 goto done;
2739 }
2740
5b69bef5
MH
2741 if (hdev->dev_type != HCI_BREDR) {
2742 err = -EOPNOTSUPP;
2743 goto done;
2744 }
2745
56f87901
JH
2746 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2747 err = -EOPNOTSUPP;
2748 goto done;
2749 }
2750
1da177e4
LT
2751 switch (cmd) {
2752 case HCISETAUTH:
01178cd4
JH
2753 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2754 HCI_INIT_TIMEOUT);
1da177e4
LT
2755 break;
2756
2757 case HCISETENCRYPT:
2758 if (!lmp_encrypt_capable(hdev)) {
2759 err = -EOPNOTSUPP;
2760 break;
2761 }
2762
2763 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2764 /* Auth must be enabled first */
01178cd4
JH
2765 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2766 HCI_INIT_TIMEOUT);
1da177e4
LT
2767 if (err)
2768 break;
2769 }
2770
01178cd4
JH
2771 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2772 HCI_INIT_TIMEOUT);
1da177e4
LT
2773 break;
2774
2775 case HCISETSCAN:
01178cd4
JH
2776 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2777 HCI_INIT_TIMEOUT);
91a668b0 2778
bc6d2d04
JH
2779 /* Ensure that the connectable and discoverable states
2780 * get correctly modified as this was a non-mgmt change.
91a668b0 2781 */
123abc08
JH
2782 if (!err)
2783 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2784 break;
2785
1da177e4 2786 case HCISETLINKPOL:
01178cd4
JH
2787 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2788 HCI_INIT_TIMEOUT);
1da177e4
LT
2789 break;
2790
2791 case HCISETLINKMODE:
e4e8e37c
MH
2792 hdev->link_mode = ((__u16) dr.dev_opt) &
2793 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2794 break;
2795
2796 case HCISETPTYPE:
2797 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2798 break;
2799
2800 case HCISETACLMTU:
e4e8e37c
MH
2801 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2802 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2803 break;
2804
2805 case HCISETSCOMTU:
e4e8e37c
MH
2806 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2807 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2808 break;
2809
2810 default:
2811 err = -EINVAL;
2812 break;
2813 }
e4e8e37c 2814
0736cfa8 2815done:
1da177e4
LT
2816 hci_dev_put(hdev);
2817 return err;
2818}
2819
2820int hci_get_dev_list(void __user *arg)
2821{
8035ded4 2822 struct hci_dev *hdev;
1da177e4
LT
2823 struct hci_dev_list_req *dl;
2824 struct hci_dev_req *dr;
1da177e4
LT
2825 int n = 0, size, err;
2826 __u16 dev_num;
2827
2828 if (get_user(dev_num, (__u16 __user *) arg))
2829 return -EFAULT;
2830
2831 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2832 return -EINVAL;
2833
2834 size = sizeof(*dl) + dev_num * sizeof(*dr);
2835
70f23020
AE
2836 dl = kzalloc(size, GFP_KERNEL);
2837 if (!dl)
1da177e4
LT
2838 return -ENOMEM;
2839
2840 dr = dl->dev_req;
2841
f20d09d5 2842 read_lock(&hci_dev_list_lock);
8035ded4 2843 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db
MH
2844 unsigned long flags = hdev->flags;
2845
2846 /* When the auto-off is configured it means the transport
2847 * is running, but in that case still indicate that the
2848 * device is actually down.
2849 */
2850 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2851 flags &= ~BIT(HCI_UP);
c542a06c 2852
1da177e4 2853 (dr + n)->dev_id = hdev->id;
2e84d8db 2854 (dr + n)->dev_opt = flags;
c542a06c 2855
1da177e4
LT
2856 if (++n >= dev_num)
2857 break;
2858 }
f20d09d5 2859 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2860
2861 dl->dev_num = n;
2862 size = sizeof(*dl) + n * sizeof(*dr);
2863
2864 err = copy_to_user(arg, dl, size);
2865 kfree(dl);
2866
2867 return err ? -EFAULT : 0;
2868}
2869
2870int hci_get_dev_info(void __user *arg)
2871{
2872 struct hci_dev *hdev;
2873 struct hci_dev_info di;
2e84d8db 2874 unsigned long flags;
1da177e4
LT
2875 int err = 0;
2876
2877 if (copy_from_user(&di, arg, sizeof(di)))
2878 return -EFAULT;
2879
70f23020
AE
2880 hdev = hci_dev_get(di.dev_id);
2881 if (!hdev)
1da177e4
LT
2882 return -ENODEV;
2883
2e84d8db
MH
2884 /* When the auto-off is configured it means the transport
2885 * is running, but in that case still indicate that the
2886 * device is actually down.
2887 */
2888 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2889 flags = hdev->flags & ~BIT(HCI_UP);
2890 else
2891 flags = hdev->flags;
ab81cbf9 2892
1da177e4
LT
2893 strcpy(di.name, hdev->name);
2894 di.bdaddr = hdev->bdaddr;
60f2a3ed 2895 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2896 di.flags = flags;
1da177e4 2897 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2898 if (lmp_bredr_capable(hdev)) {
2899 di.acl_mtu = hdev->acl_mtu;
2900 di.acl_pkts = hdev->acl_pkts;
2901 di.sco_mtu = hdev->sco_mtu;
2902 di.sco_pkts = hdev->sco_pkts;
2903 } else {
2904 di.acl_mtu = hdev->le_mtu;
2905 di.acl_pkts = hdev->le_pkts;
2906 di.sco_mtu = 0;
2907 di.sco_pkts = 0;
2908 }
1da177e4
LT
2909 di.link_policy = hdev->link_policy;
2910 di.link_mode = hdev->link_mode;
2911
2912 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2913 memcpy(&di.features, &hdev->features, sizeof(di.features));
2914
2915 if (copy_to_user(arg, &di, sizeof(di)))
2916 err = -EFAULT;
2917
2918 hci_dev_put(hdev);
2919
2920 return err;
2921}
2922
2923/* ---- Interface to HCI drivers ---- */
2924
611b30f7
MH
2925static int hci_rfkill_set_block(void *data, bool blocked)
2926{
2927 struct hci_dev *hdev = data;
2928
2929 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2930
0736cfa8
MH
2931 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2932 return -EBUSY;
2933
5e130367
JH
2934 if (blocked) {
2935 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2936 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2937 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2938 hci_dev_do_close(hdev);
5e130367
JH
2939 } else {
2940 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2941 }
611b30f7
MH
2942
2943 return 0;
2944}
2945
2946static const struct rfkill_ops hci_rfkill_ops = {
2947 .set_block = hci_rfkill_set_block,
2948};
2949
ab81cbf9
JH
2950static void hci_power_on(struct work_struct *work)
2951{
2952 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2953 int err;
ab81cbf9
JH
2954
2955 BT_DBG("%s", hdev->name);
2956
cbed0ca1 2957 err = hci_dev_do_open(hdev);
96570ffc
JH
2958 if (err < 0) {
2959 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2960 return;
96570ffc 2961 }
ab81cbf9 2962
a5c8f270
MH
2963 /* During the HCI setup phase, a few error conditions are
2964 * ignored and they need to be checked now. If they are still
2965 * valid, it is important to turn the device back off.
2966 */
2967 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2968 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2969 (hdev->dev_type == HCI_BREDR &&
2970 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2971 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2972 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2973 hci_dev_do_close(hdev);
2974 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2975 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2976 HCI_AUTO_OFF_TIMEOUT);
bf543036 2977 }
ab81cbf9 2978
fee746b0 2979 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2980 /* For unconfigured devices, set the HCI_RAW flag
2981 * so that userspace can easily identify them.
4a964404
MH
2982 */
2983 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2984 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2985
2986 /* For fully configured devices, this will send
2987 * the Index Added event. For unconfigured devices,
2988 * it will send Unconfigued Index Added event.
2989 *
2990 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2991 * and no event will be send.
2992 */
2993 mgmt_index_added(hdev);
d603b76b 2994 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
2995 /* When the controller is now configured, then it
2996 * is important to clear the HCI_RAW flag.
2997 */
2998 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2999 clear_bit(HCI_RAW, &hdev->flags);
3000
d603b76b
MH
3001 /* Powering on the controller with HCI_CONFIG set only
3002 * happens with the transition from unconfigured to
3003 * configured. This will send the Index Added event.
3004 */
3005 mgmt_index_added(hdev);
fee746b0 3006 }
ab81cbf9
JH
3007}
3008
3009static void hci_power_off(struct work_struct *work)
3010{
3243553f 3011 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3012 power_off.work);
ab81cbf9
JH
3013
3014 BT_DBG("%s", hdev->name);
3015
8ee56540 3016 hci_dev_do_close(hdev);
ab81cbf9
JH
3017}
3018
16ab91ab
JH
3019static void hci_discov_off(struct work_struct *work)
3020{
3021 struct hci_dev *hdev;
16ab91ab
JH
3022
3023 hdev = container_of(work, struct hci_dev, discov_off.work);
3024
3025 BT_DBG("%s", hdev->name);
3026
d1967ff8 3027 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3028}
3029
35f7498a 3030void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3031{
4821002c 3032 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3033
4821002c
JH
3034 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3035 list_del(&uuid->list);
2aeb9a1a
JH
3036 kfree(uuid);
3037 }
2aeb9a1a
JH
3038}
3039
35f7498a 3040void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3041{
3042 struct list_head *p, *n;
3043
3044 list_for_each_safe(p, n, &hdev->link_keys) {
3045 struct link_key *key;
3046
3047 key = list_entry(p, struct link_key, list);
3048
3049 list_del(p);
3050 kfree(key);
3051 }
55ed8ca1
JH
3052}
3053
35f7498a 3054void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
3055{
3056 struct smp_ltk *k, *tmp;
3057
3058 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3059 list_del(&k->list);
3060 kfree(k);
3061 }
b899efaf
VCG
3062}
3063
970c4e46
JH
3064void hci_smp_irks_clear(struct hci_dev *hdev)
3065{
3066 struct smp_irk *k, *tmp;
3067
3068 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3069 list_del(&k->list);
3070 kfree(k);
3071 }
3072}
3073
55ed8ca1
JH
3074struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3075{
8035ded4 3076 struct link_key *k;
55ed8ca1 3077
8035ded4 3078 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3079 if (bacmp(bdaddr, &k->bdaddr) == 0)
3080 return k;
55ed8ca1
JH
3081
3082 return NULL;
3083}
3084
745c0ce3 3085static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3086 u8 key_type, u8 old_key_type)
d25e28ab
JH
3087{
3088 /* Legacy key */
3089 if (key_type < 0x03)
745c0ce3 3090 return true;
d25e28ab
JH
3091
3092 /* Debug keys are insecure so don't store them persistently */
3093 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3094 return false;
d25e28ab
JH
3095
3096 /* Changed combination key and there's no previous one */
3097 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3098 return false;
d25e28ab
JH
3099
3100 /* Security mode 3 case */
3101 if (!conn)
745c0ce3 3102 return true;
d25e28ab
JH
3103
3104 /* Neither local nor remote side had no-bonding as requirement */
3105 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3106 return true;
d25e28ab
JH
3107
3108 /* Local side had dedicated bonding as requirement */
3109 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3110 return true;
d25e28ab
JH
3111
3112 /* Remote side had dedicated bonding as requirement */
3113 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3114 return true;
d25e28ab
JH
3115
3116 /* If none of the above criteria match, then don't store the key
3117 * persistently */
745c0ce3 3118 return false;
d25e28ab
JH
3119}
3120
98a0b845
JH
3121static bool ltk_type_master(u8 type)
3122{
d97c9fb0 3123 return (type == SMP_LTK);
98a0b845
JH
3124}
3125
fe39c7b2 3126struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 3127 bool master)
75d262c2 3128{
c9839a11 3129 struct smp_ltk *k;
75d262c2 3130
c9839a11 3131 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 3132 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3133 continue;
3134
98a0b845
JH
3135 if (ltk_type_master(k->type) != master)
3136 continue;
3137
c9839a11 3138 return k;
75d262c2
VCG
3139 }
3140
3141 return NULL;
3142}
75d262c2 3143
c9839a11 3144struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 3145 u8 addr_type, bool master)
75d262c2 3146{
c9839a11 3147 struct smp_ltk *k;
75d262c2 3148
c9839a11
VCG
3149 list_for_each_entry(k, &hdev->long_term_keys, list)
3150 if (addr_type == k->bdaddr_type &&
98a0b845
JH
3151 bacmp(bdaddr, &k->bdaddr) == 0 &&
3152 ltk_type_master(k->type) == master)
75d262c2
VCG
3153 return k;
3154
3155 return NULL;
3156}
75d262c2 3157
970c4e46
JH
3158struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3159{
3160 struct smp_irk *irk;
3161
3162 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3163 if (!bacmp(&irk->rpa, rpa))
3164 return irk;
3165 }
3166
3167 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3168 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3169 bacpy(&irk->rpa, rpa);
3170 return irk;
3171 }
3172 }
3173
3174 return NULL;
3175}
3176
3177struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3178 u8 addr_type)
3179{
3180 struct smp_irk *irk;
3181
6cfc9988
JH
3182 /* Identity Address must be public or static random */
3183 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3184 return NULL;
3185
970c4e46
JH
3186 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3187 if (addr_type == irk->addr_type &&
3188 bacmp(bdaddr, &irk->bdaddr) == 0)
3189 return irk;
3190 }
3191
3192 return NULL;
3193}
3194
567fa2aa 3195struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3196 bdaddr_t *bdaddr, u8 *val, u8 type,
3197 u8 pin_len, bool *persistent)
55ed8ca1
JH
3198{
3199 struct link_key *key, *old_key;
745c0ce3 3200 u8 old_key_type;
55ed8ca1
JH
3201
3202 old_key = hci_find_link_key(hdev, bdaddr);
3203 if (old_key) {
3204 old_key_type = old_key->type;
3205 key = old_key;
3206 } else {
12adcf3a 3207 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3208 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3209 if (!key)
567fa2aa 3210 return NULL;
55ed8ca1
JH
3211 list_add(&key->list, &hdev->link_keys);
3212 }
3213
6ed93dc6 3214 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3215
d25e28ab
JH
3216 /* Some buggy controller combinations generate a changed
3217 * combination key for legacy pairing even when there's no
3218 * previous key */
3219 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3220 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3221 type = HCI_LK_COMBINATION;
655fe6ec
JH
3222 if (conn)
3223 conn->key_type = type;
3224 }
d25e28ab 3225
55ed8ca1 3226 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3227 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3228 key->pin_len = pin_len;
3229
b6020ba0 3230 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3231 key->type = old_key_type;
4748fed2
JH
3232 else
3233 key->type = type;
3234
7652ff6a
JH
3235 if (persistent)
3236 *persistent = hci_persistent_key(hdev, conn, type,
3237 old_key_type);
55ed8ca1 3238
567fa2aa 3239 return key;
55ed8ca1
JH
3240}
3241
ca9142b8 3242struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3243 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3244 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3245{
c9839a11 3246 struct smp_ltk *key, *old_key;
98a0b845 3247 bool master = ltk_type_master(type);
75d262c2 3248
98a0b845 3249 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3250 if (old_key)
75d262c2 3251 key = old_key;
c9839a11 3252 else {
0a14ab41 3253 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3254 if (!key)
ca9142b8 3255 return NULL;
c9839a11 3256 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3257 }
3258
75d262c2 3259 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3260 key->bdaddr_type = addr_type;
3261 memcpy(key->val, tk, sizeof(key->val));
3262 key->authenticated = authenticated;
3263 key->ediv = ediv;
fe39c7b2 3264 key->rand = rand;
c9839a11
VCG
3265 key->enc_size = enc_size;
3266 key->type = type;
75d262c2 3267
ca9142b8 3268 return key;
75d262c2
VCG
3269}
3270
ca9142b8
JH
3271struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3272 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3273{
3274 struct smp_irk *irk;
3275
3276 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3277 if (!irk) {
3278 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3279 if (!irk)
ca9142b8 3280 return NULL;
970c4e46
JH
3281
3282 bacpy(&irk->bdaddr, bdaddr);
3283 irk->addr_type = addr_type;
3284
3285 list_add(&irk->list, &hdev->identity_resolving_keys);
3286 }
3287
3288 memcpy(irk->val, val, 16);
3289 bacpy(&irk->rpa, rpa);
3290
ca9142b8 3291 return irk;
970c4e46
JH
3292}
3293
55ed8ca1
JH
3294int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3295{
3296 struct link_key *key;
3297
3298 key = hci_find_link_key(hdev, bdaddr);
3299 if (!key)
3300 return -ENOENT;
3301
6ed93dc6 3302 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3303
3304 list_del(&key->list);
3305 kfree(key);
3306
3307 return 0;
3308}
3309
e0b2b27e 3310int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3311{
3312 struct smp_ltk *k, *tmp;
c51ffa0b 3313 int removed = 0;
b899efaf
VCG
3314
3315 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3316 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3317 continue;
3318
6ed93dc6 3319 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3320
3321 list_del(&k->list);
3322 kfree(k);
c51ffa0b 3323 removed++;
b899efaf
VCG
3324 }
3325
c51ffa0b 3326 return removed ? 0 : -ENOENT;
b899efaf
VCG
3327}
3328
a7ec7338
JH
3329void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3330{
3331 struct smp_irk *k, *tmp;
3332
668b7b19 3333 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3334 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3335 continue;
3336
3337 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3338
3339 list_del(&k->list);
3340 kfree(k);
3341 }
3342}
3343
6bd32326 3344/* HCI command timer function */
65cc2b49 3345static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3346{
65cc2b49
MH
3347 struct hci_dev *hdev = container_of(work, struct hci_dev,
3348 cmd_timer.work);
6bd32326 3349
bda4f23a
AE
3350 if (hdev->sent_cmd) {
3351 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3352 u16 opcode = __le16_to_cpu(sent->opcode);
3353
3354 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3355 } else {
3356 BT_ERR("%s command tx timeout", hdev->name);
3357 }
3358
6bd32326 3359 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3360 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3361}
3362
2763eda6 3363struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3364 bdaddr_t *bdaddr)
2763eda6
SJ
3365{
3366 struct oob_data *data;
3367
3368 list_for_each_entry(data, &hdev->remote_oob_data, list)
3369 if (bacmp(bdaddr, &data->bdaddr) == 0)
3370 return data;
3371
3372 return NULL;
3373}
3374
3375int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3376{
3377 struct oob_data *data;
3378
3379 data = hci_find_remote_oob_data(hdev, bdaddr);
3380 if (!data)
3381 return -ENOENT;
3382
6ed93dc6 3383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3384
3385 list_del(&data->list);
3386 kfree(data);
3387
3388 return 0;
3389}
3390
35f7498a 3391void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3392{
3393 struct oob_data *data, *n;
3394
3395 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3396 list_del(&data->list);
3397 kfree(data);
3398 }
2763eda6
SJ
3399}
3400
0798872e
MH
3401int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3402 u8 *hash, u8 *randomizer)
2763eda6
SJ
3403{
3404 struct oob_data *data;
3405
3406 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3407 if (!data) {
0a14ab41 3408 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3409 if (!data)
3410 return -ENOMEM;
3411
3412 bacpy(&data->bdaddr, bdaddr);
3413 list_add(&data->list, &hdev->remote_oob_data);
3414 }
3415
519ca9d0
MH
3416 memcpy(data->hash192, hash, sizeof(data->hash192));
3417 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3418
0798872e
MH
3419 memset(data->hash256, 0, sizeof(data->hash256));
3420 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3421
3422 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3423
3424 return 0;
3425}
3426
3427int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3428 u8 *hash192, u8 *randomizer192,
3429 u8 *hash256, u8 *randomizer256)
3430{
3431 struct oob_data *data;
3432
3433 data = hci_find_remote_oob_data(hdev, bdaddr);
3434 if (!data) {
0a14ab41 3435 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3436 if (!data)
3437 return -ENOMEM;
3438
3439 bacpy(&data->bdaddr, bdaddr);
3440 list_add(&data->list, &hdev->remote_oob_data);
3441 }
3442
3443 memcpy(data->hash192, hash192, sizeof(data->hash192));
3444 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3445
3446 memcpy(data->hash256, hash256, sizeof(data->hash256));
3447 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3448
6ed93dc6 3449 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3450
3451 return 0;
3452}
3453
dcc36c16 3454struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3455 bdaddr_t *bdaddr, u8 type)
b2a66aad 3456{
8035ded4 3457 struct bdaddr_list *b;
b2a66aad 3458
dcc36c16 3459 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3460 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3461 return b;
b9ee0a78 3462 }
b2a66aad
AJ
3463
3464 return NULL;
3465}
3466
dcc36c16 3467void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3468{
3469 struct list_head *p, *n;
3470
dcc36c16 3471 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3472 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3473
3474 list_del(p);
3475 kfree(b);
3476 }
b2a66aad
AJ
3477}
3478
dcc36c16 3479int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3480{
3481 struct bdaddr_list *entry;
b2a66aad 3482
b9ee0a78 3483 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3484 return -EBADF;
3485
dcc36c16 3486 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3487 return -EEXIST;
b2a66aad
AJ
3488
3489 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3490 if (!entry)
3491 return -ENOMEM;
b2a66aad
AJ
3492
3493 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3494 entry->bdaddr_type = type;
b2a66aad 3495
dcc36c16 3496 list_add(&entry->list, list);
b2a66aad 3497
2a8357f2 3498 return 0;
b2a66aad
AJ
3499}
3500
dcc36c16 3501int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3502{
3503 struct bdaddr_list *entry;
b2a66aad 3504
35f7498a 3505 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3506 hci_bdaddr_list_clear(list);
35f7498a
JH
3507 return 0;
3508 }
b2a66aad 3509
dcc36c16 3510 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3511 if (!entry)
3512 return -ENOENT;
3513
3514 list_del(&entry->list);
3515 kfree(entry);
3516
3517 return 0;
3518}
3519
15819a70
AG
3520/* This function requires the caller holds hdev->lock */
3521struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3522 bdaddr_t *addr, u8 addr_type)
3523{
3524 struct hci_conn_params *params;
3525
738f6185
JH
3526 /* The conn params list only contains identity addresses */
3527 if (!hci_is_identity_address(addr, addr_type))
3528 return NULL;
3529
15819a70
AG
3530 list_for_each_entry(params, &hdev->le_conn_params, list) {
3531 if (bacmp(&params->addr, addr) == 0 &&
3532 params->addr_type == addr_type) {
3533 return params;
3534 }
3535 }
3536
3537 return NULL;
3538}
3539
cef952ce
AG
3540static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3541{
3542 struct hci_conn *conn;
3543
3544 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3545 if (!conn)
3546 return false;
3547
3548 if (conn->dst_type != type)
3549 return false;
3550
3551 if (conn->state != BT_CONNECTED)
3552 return false;
3553
3554 return true;
3555}
3556
4b10966f 3557/* This function requires the caller holds hdev->lock */
501f8827
JH
3558struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3559 bdaddr_t *addr, u8 addr_type)
4b10966f 3560{
912b42ef 3561 struct hci_conn_params *param;
4b10966f 3562
738f6185
JH
3563 /* The list only contains identity addresses */
3564 if (!hci_is_identity_address(addr, addr_type))
3565 return NULL;
3566
501f8827 3567 list_for_each_entry(param, list, action) {
912b42ef
JH
3568 if (bacmp(&param->addr, addr) == 0 &&
3569 param->addr_type == addr_type)
3570 return param;
4b10966f
MH
3571 }
3572
3573 return NULL;
3574}
3575
3576/* This function requires the caller holds hdev->lock */
51d167c0
MH
3577struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3578 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3579{
3580 struct hci_conn_params *params;
3581
c46245b3 3582 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3583 return NULL;
bf5b3c8b
MH
3584
3585 params = hci_conn_params_lookup(hdev, addr, addr_type);
3586 if (params)
51d167c0 3587 return params;
bf5b3c8b
MH
3588
3589 params = kzalloc(sizeof(*params), GFP_KERNEL);
3590 if (!params) {
3591 BT_ERR("Out of memory");
51d167c0 3592 return NULL;
bf5b3c8b
MH
3593 }
3594
3595 bacpy(&params->addr, addr);
3596 params->addr_type = addr_type;
3597
3598 list_add(&params->list, &hdev->le_conn_params);
93450c75 3599 INIT_LIST_HEAD(&params->action);
bf5b3c8b
MH
3600
3601 params->conn_min_interval = hdev->le_conn_min_interval;
3602 params->conn_max_interval = hdev->le_conn_max_interval;
3603 params->conn_latency = hdev->le_conn_latency;
3604 params->supervision_timeout = hdev->le_supv_timeout;
3605 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3606
3607 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3608
51d167c0 3609 return params;
bf5b3c8b
MH
3610}
3611
3612/* This function requires the caller holds hdev->lock */
3613int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3614 u8 auto_connect)
15819a70
AG
3615{
3616 struct hci_conn_params *params;
3617
8c87aae1
MH
3618 params = hci_conn_params_add(hdev, addr, addr_type);
3619 if (!params)
3620 return -EIO;
cef952ce 3621
42ce26de
JH
3622 if (params->auto_connect == auto_connect)
3623 return 0;
3624
95305baa 3625 list_del_init(&params->action);
15819a70 3626
cef952ce
AG
3627 switch (auto_connect) {
3628 case HCI_AUTO_CONN_DISABLED:
3629 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3630 hci_update_background_scan(hdev);
cef952ce 3631 break;
851efca8 3632 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3633 list_add(&params->action, &hdev->pend_le_reports);
3634 hci_update_background_scan(hdev);
851efca8 3635 break;
cef952ce 3636 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3637 if (!is_connected(hdev, addr, addr_type)) {
3638 list_add(&params->action, &hdev->pend_le_conns);
3639 hci_update_background_scan(hdev);
3640 }
cef952ce
AG
3641 break;
3642 }
15819a70 3643
851efca8
JH
3644 params->auto_connect = auto_connect;
3645
d06b50ce
MH
3646 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3647 auto_connect);
a9b0a04c
AG
3648
3649 return 0;
15819a70
AG
3650}
3651
3652/* This function requires the caller holds hdev->lock */
3653void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3654{
3655 struct hci_conn_params *params;
3656
3657 params = hci_conn_params_lookup(hdev, addr, addr_type);
3658 if (!params)
3659 return;
3660
95305baa 3661 list_del(&params->action);
15819a70
AG
3662 list_del(&params->list);
3663 kfree(params);
3664
95305baa
JH
3665 hci_update_background_scan(hdev);
3666
15819a70
AG
3667 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3668}
3669
55af49a8
JH
3670/* This function requires the caller holds hdev->lock */
3671void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3672{
3673 struct hci_conn_params *params, *tmp;
3674
3675 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3676 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3677 continue;
3678 list_del(&params->list);
3679 kfree(params);
3680 }
3681
3682 BT_DBG("All LE disabled connection parameters were removed");
3683}
3684
15819a70 3685/* This function requires the caller holds hdev->lock */
373110c5 3686void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3687{
3688 struct hci_conn_params *params, *tmp;
3689
3690 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3691 list_del(&params->action);
15819a70
AG
3692 list_del(&params->list);
3693 kfree(params);
3694 }
3695
a2f41a8f 3696 hci_update_background_scan(hdev);
1089b67d 3697
15819a70
AG
3698 BT_DBG("All LE connection parameters were removed");
3699}
3700
4c87eaab 3701static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3702{
4c87eaab
AG
3703 if (status) {
3704 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3705
4c87eaab
AG
3706 hci_dev_lock(hdev);
3707 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3708 hci_dev_unlock(hdev);
3709 return;
3710 }
7ba8b4be
AG
3711}
3712
4c87eaab 3713static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3714{
4c87eaab
AG
3715 /* General inquiry access code (GIAC) */
3716 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3717 struct hci_request req;
3718 struct hci_cp_inquiry cp;
7ba8b4be
AG
3719 int err;
3720
4c87eaab
AG
3721 if (status) {
3722 BT_ERR("Failed to disable LE scanning: status %d", status);
3723 return;
3724 }
7ba8b4be 3725
4c87eaab
AG
3726 switch (hdev->discovery.type) {
3727 case DISCOV_TYPE_LE:
3728 hci_dev_lock(hdev);
3729 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3730 hci_dev_unlock(hdev);
3731 break;
7ba8b4be 3732
4c87eaab
AG
3733 case DISCOV_TYPE_INTERLEAVED:
3734 hci_req_init(&req, hdev);
7ba8b4be 3735
4c87eaab
AG
3736 memset(&cp, 0, sizeof(cp));
3737 memcpy(&cp.lap, lap, sizeof(cp.lap));
3738 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3739 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3740
4c87eaab 3741 hci_dev_lock(hdev);
7dbfac1d 3742
4c87eaab 3743 hci_inquiry_cache_flush(hdev);
7dbfac1d 3744
4c87eaab
AG
3745 err = hci_req_run(&req, inquiry_complete);
3746 if (err) {
3747 BT_ERR("Inquiry request failed: err %d", err);
3748 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3749 }
7dbfac1d 3750
4c87eaab
AG
3751 hci_dev_unlock(hdev);
3752 break;
7dbfac1d 3753 }
7dbfac1d
AG
3754}
3755
7ba8b4be
AG
3756static void le_scan_disable_work(struct work_struct *work)
3757{
3758 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3759 le_scan_disable.work);
4c87eaab
AG
3760 struct hci_request req;
3761 int err;
7ba8b4be
AG
3762
3763 BT_DBG("%s", hdev->name);
3764
4c87eaab 3765 hci_req_init(&req, hdev);
28b75a89 3766
b1efcc28 3767 hci_req_add_le_scan_disable(&req);
28b75a89 3768
4c87eaab
AG
3769 err = hci_req_run(&req, le_scan_disable_work_complete);
3770 if (err)
3771 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3772}
3773
8d97250e
JH
3774static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3775{
3776 struct hci_dev *hdev = req->hdev;
3777
3778 /* If we're advertising or initiating an LE connection we can't
3779 * go ahead and change the random address at this time. This is
3780 * because the eventual initiator address used for the
3781 * subsequently created connection will be undefined (some
3782 * controllers use the new address and others the one we had
3783 * when the operation started).
3784 *
3785 * In this kind of scenario skip the update and let the random
3786 * address be updated at the next cycle.
3787 */
5ce194c4 3788 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3789 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3790 BT_DBG("Deferring random address update");
3791 return;
3792 }
3793
3794 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3795}
3796
94b1fc92
MH
3797int hci_update_random_address(struct hci_request *req, bool require_privacy,
3798 u8 *own_addr_type)
ebd3a747
JH
3799{
3800 struct hci_dev *hdev = req->hdev;
3801 int err;
3802
3803 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3804 * current RPA has expired or there is something else than
3805 * the current RPA in use, then generate a new one.
ebd3a747
JH
3806 */
3807 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3808 int to;
3809
3810 *own_addr_type = ADDR_LE_DEV_RANDOM;
3811
3812 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3813 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3814 return 0;
3815
2b5224dc 3816 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3817 if (err < 0) {
3818 BT_ERR("%s failed to generate new RPA", hdev->name);
3819 return err;
3820 }
3821
8d97250e 3822 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3823
3824 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3825 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3826
3827 return 0;
94b1fc92
MH
3828 }
3829
3830 /* In case of required privacy without resolvable private address,
3831 * use an unresolvable private address. This is useful for active
3832 * scanning and non-connectable advertising.
3833 */
3834 if (require_privacy) {
3835 bdaddr_t urpa;
3836
3837 get_random_bytes(&urpa, 6);
3838 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3839
3840 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3841 set_random_addr(req, &urpa);
94b1fc92 3842 return 0;
ebd3a747
JH
3843 }
3844
3845 /* If forcing static address is in use or there is no public
3846 * address use the static address as random address (but skip
3847 * the HCI command if the current random address is already the
3848 * static one.
3849 */
111902f7 3850 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3851 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3852 *own_addr_type = ADDR_LE_DEV_RANDOM;
3853 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3854 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3855 &hdev->static_addr);
3856 return 0;
3857 }
3858
3859 /* Neither privacy nor static address is being used so use a
3860 * public address.
3861 */
3862 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3863
3864 return 0;
3865}
3866
a1f4c318
JH
3867/* Copy the Identity Address of the controller.
3868 *
3869 * If the controller has a public BD_ADDR, then by default use that one.
3870 * If this is a LE only controller without a public address, default to
3871 * the static random address.
3872 *
3873 * For debugging purposes it is possible to force controllers with a
3874 * public address to use the static random address instead.
3875 */
3876void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3877 u8 *bdaddr_type)
3878{
111902f7 3879 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3880 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3881 bacpy(bdaddr, &hdev->static_addr);
3882 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3883 } else {
3884 bacpy(bdaddr, &hdev->bdaddr);
3885 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3886 }
3887}
3888
9be0dab7
DH
3889/* Alloc HCI device */
3890struct hci_dev *hci_alloc_dev(void)
3891{
3892 struct hci_dev *hdev;
3893
3894 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3895 if (!hdev)
3896 return NULL;
3897
b1b813d4
DH
3898 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3899 hdev->esco_type = (ESCO_HV1);
3900 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3901 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3902 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3903 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3904 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3905 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3906
b1b813d4
DH
3907 hdev->sniff_max_interval = 800;
3908 hdev->sniff_min_interval = 80;
3909
3f959d46 3910 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3911 hdev->le_scan_interval = 0x0060;
3912 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3913 hdev->le_conn_min_interval = 0x0028;
3914 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3915 hdev->le_conn_latency = 0x0000;
3916 hdev->le_supv_timeout = 0x002a;
bef64738 3917
d6bfd59c 3918 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3919 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3920 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3921 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3922
b1b813d4
DH
3923 mutex_init(&hdev->lock);
3924 mutex_init(&hdev->req_lock);
3925
3926 INIT_LIST_HEAD(&hdev->mgmt_pending);
3927 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3928 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3929 INIT_LIST_HEAD(&hdev->uuids);
3930 INIT_LIST_HEAD(&hdev->link_keys);
3931 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3932 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3933 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3934 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3935 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3936 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3937 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3938 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3939
3940 INIT_WORK(&hdev->rx_work, hci_rx_work);
3941 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3942 INIT_WORK(&hdev->tx_work, hci_tx_work);
3943 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3944
b1b813d4
DH
3945 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3946 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3947 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3948
b1b813d4
DH
3949 skb_queue_head_init(&hdev->rx_q);
3950 skb_queue_head_init(&hdev->cmd_q);
3951 skb_queue_head_init(&hdev->raw_q);
3952
3953 init_waitqueue_head(&hdev->req_wait_q);
3954
65cc2b49 3955 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3956
b1b813d4
DH
3957 hci_init_sysfs(hdev);
3958 discovery_init(hdev);
9be0dab7
DH
3959
3960 return hdev;
3961}
3962EXPORT_SYMBOL(hci_alloc_dev);
3963
3964/* Free HCI device */
3965void hci_free_dev(struct hci_dev *hdev)
3966{
9be0dab7
DH
3967 /* will free via device release */
3968 put_device(&hdev->dev);
3969}
3970EXPORT_SYMBOL(hci_free_dev);
3971
1da177e4
LT
3972/* Register HCI device */
3973int hci_register_dev(struct hci_dev *hdev)
3974{
b1b813d4 3975 int id, error;
1da177e4 3976
74292d5a 3977 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3978 return -EINVAL;
3979
08add513
MM
3980 /* Do not allow HCI_AMP devices to register at index 0,
3981 * so the index can be used as the AMP controller ID.
3982 */
3df92b31
SL
3983 switch (hdev->dev_type) {
3984 case HCI_BREDR:
3985 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3986 break;
3987 case HCI_AMP:
3988 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3989 break;
3990 default:
3991 return -EINVAL;
1da177e4 3992 }
8e87d142 3993
3df92b31
SL
3994 if (id < 0)
3995 return id;
3996
1da177e4
LT
3997 sprintf(hdev->name, "hci%d", id);
3998 hdev->id = id;
2d8b3a11
AE
3999
4000 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4001
d8537548
KC
4002 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4003 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4004 if (!hdev->workqueue) {
4005 error = -ENOMEM;
4006 goto err;
4007 }
f48fd9c8 4008
d8537548
KC
4009 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4010 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4011 if (!hdev->req_workqueue) {
4012 destroy_workqueue(hdev->workqueue);
4013 error = -ENOMEM;
4014 goto err;
4015 }
4016
0153e2ec
MH
4017 if (!IS_ERR_OR_NULL(bt_debugfs))
4018 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4019
bdc3e0f1
MH
4020 dev_set_name(&hdev->dev, "%s", hdev->name);
4021
99780a7b
JH
4022 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4023 CRYPTO_ALG_ASYNC);
4024 if (IS_ERR(hdev->tfm_aes)) {
4025 BT_ERR("Unable to create crypto context");
4026 error = PTR_ERR(hdev->tfm_aes);
4027 hdev->tfm_aes = NULL;
4028 goto err_wqueue;
4029 }
4030
bdc3e0f1 4031 error = device_add(&hdev->dev);
33ca954d 4032 if (error < 0)
99780a7b 4033 goto err_tfm;
1da177e4 4034
611b30f7 4035 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4036 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4037 hdev);
611b30f7
MH
4038 if (hdev->rfkill) {
4039 if (rfkill_register(hdev->rfkill) < 0) {
4040 rfkill_destroy(hdev->rfkill);
4041 hdev->rfkill = NULL;
4042 }
4043 }
4044
5e130367
JH
4045 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4046 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4047
a8b2d5c2 4048 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4049 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4050
01cd3404 4051 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4052 /* Assume BR/EDR support until proven otherwise (such as
4053 * through reading supported features during init.
4054 */
4055 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4056 }
ce2be9ac 4057
fcee3377
GP
4058 write_lock(&hci_dev_list_lock);
4059 list_add(&hdev->list, &hci_dev_list);
4060 write_unlock(&hci_dev_list_lock);
4061
4a964404
MH
4062 /* Devices that are marked for raw-only usage are unconfigured
4063 * and should not be included in normal operation.
fee746b0
MH
4064 */
4065 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4066 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4067
1da177e4 4068 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4069 hci_dev_hold(hdev);
1da177e4 4070
19202573 4071 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4072
1da177e4 4073 return id;
f48fd9c8 4074
99780a7b
JH
4075err_tfm:
4076 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4077err_wqueue:
4078 destroy_workqueue(hdev->workqueue);
6ead1bbc 4079 destroy_workqueue(hdev->req_workqueue);
33ca954d 4080err:
3df92b31 4081 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4082
33ca954d 4083 return error;
1da177e4
LT
4084}
4085EXPORT_SYMBOL(hci_register_dev);
4086
4087/* Unregister HCI device */
59735631 4088void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4089{
3df92b31 4090 int i, id;
ef222013 4091
c13854ce 4092 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4093
94324962
JH
4094 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4095
3df92b31
SL
4096 id = hdev->id;
4097
f20d09d5 4098 write_lock(&hci_dev_list_lock);
1da177e4 4099 list_del(&hdev->list);
f20d09d5 4100 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4101
4102 hci_dev_do_close(hdev);
4103
cd4c5391 4104 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4105 kfree_skb(hdev->reassembly[i]);
4106
b9b5ef18
GP
4107 cancel_work_sync(&hdev->power_on);
4108
ab81cbf9 4109 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4110 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4111 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4112 hci_dev_lock(hdev);
744cf19e 4113 mgmt_index_removed(hdev);
09fd0de5 4114 hci_dev_unlock(hdev);
56e5cb86 4115 }
ab81cbf9 4116
2e58ef3e
JH
4117 /* mgmt_index_removed should take care of emptying the
4118 * pending list */
4119 BUG_ON(!list_empty(&hdev->mgmt_pending));
4120
1da177e4
LT
4121 hci_notify(hdev, HCI_DEV_UNREG);
4122
611b30f7
MH
4123 if (hdev->rfkill) {
4124 rfkill_unregister(hdev->rfkill);
4125 rfkill_destroy(hdev->rfkill);
4126 }
4127
99780a7b
JH
4128 if (hdev->tfm_aes)
4129 crypto_free_blkcipher(hdev->tfm_aes);
4130
bdc3e0f1 4131 device_del(&hdev->dev);
147e2d59 4132
0153e2ec
MH
4133 debugfs_remove_recursive(hdev->debugfs);
4134
f48fd9c8 4135 destroy_workqueue(hdev->workqueue);
6ead1bbc 4136 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4137
09fd0de5 4138 hci_dev_lock(hdev);
dcc36c16 4139 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4140 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4141 hci_uuids_clear(hdev);
55ed8ca1 4142 hci_link_keys_clear(hdev);
b899efaf 4143 hci_smp_ltks_clear(hdev);
970c4e46 4144 hci_smp_irks_clear(hdev);
2763eda6 4145 hci_remote_oob_data_clear(hdev);
dcc36c16 4146 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4147 hci_conn_params_clear_all(hdev);
09fd0de5 4148 hci_dev_unlock(hdev);
e2e0cacb 4149
dc946bd8 4150 hci_dev_put(hdev);
3df92b31
SL
4151
4152 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4153}
4154EXPORT_SYMBOL(hci_unregister_dev);
4155
4156/* Suspend HCI device */
4157int hci_suspend_dev(struct hci_dev *hdev)
4158{
4159 hci_notify(hdev, HCI_DEV_SUSPEND);
4160 return 0;
4161}
4162EXPORT_SYMBOL(hci_suspend_dev);
4163
4164/* Resume HCI device */
4165int hci_resume_dev(struct hci_dev *hdev)
4166{
4167 hci_notify(hdev, HCI_DEV_RESUME);
4168 return 0;
4169}
4170EXPORT_SYMBOL(hci_resume_dev);
4171
76bca880 4172/* Receive frame from HCI drivers */
e1a26170 4173int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4174{
76bca880 4175 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4176 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4177 kfree_skb(skb);
4178 return -ENXIO;
4179 }
4180
d82603c6 4181 /* Incoming skb */
76bca880
MH
4182 bt_cb(skb)->incoming = 1;
4183
4184 /* Time stamp */
4185 __net_timestamp(skb);
4186
76bca880 4187 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4188 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4189
76bca880
MH
4190 return 0;
4191}
4192EXPORT_SYMBOL(hci_recv_frame);
4193
33e882a5 4194static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4195 int count, __u8 index)
33e882a5
SS
4196{
4197 int len = 0;
4198 int hlen = 0;
4199 int remain = count;
4200 struct sk_buff *skb;
4201 struct bt_skb_cb *scb;
4202
4203 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4204 index >= NUM_REASSEMBLY)
33e882a5
SS
4205 return -EILSEQ;
4206
4207 skb = hdev->reassembly[index];
4208
4209 if (!skb) {
4210 switch (type) {
4211 case HCI_ACLDATA_PKT:
4212 len = HCI_MAX_FRAME_SIZE;
4213 hlen = HCI_ACL_HDR_SIZE;
4214 break;
4215 case HCI_EVENT_PKT:
4216 len = HCI_MAX_EVENT_SIZE;
4217 hlen = HCI_EVENT_HDR_SIZE;
4218 break;
4219 case HCI_SCODATA_PKT:
4220 len = HCI_MAX_SCO_SIZE;
4221 hlen = HCI_SCO_HDR_SIZE;
4222 break;
4223 }
4224
1e429f38 4225 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4226 if (!skb)
4227 return -ENOMEM;
4228
4229 scb = (void *) skb->cb;
4230 scb->expect = hlen;
4231 scb->pkt_type = type;
4232
33e882a5
SS
4233 hdev->reassembly[index] = skb;
4234 }
4235
4236 while (count) {
4237 scb = (void *) skb->cb;
89bb46d0 4238 len = min_t(uint, scb->expect, count);
33e882a5
SS
4239
4240 memcpy(skb_put(skb, len), data, len);
4241
4242 count -= len;
4243 data += len;
4244 scb->expect -= len;
4245 remain = count;
4246
4247 switch (type) {
4248 case HCI_EVENT_PKT:
4249 if (skb->len == HCI_EVENT_HDR_SIZE) {
4250 struct hci_event_hdr *h = hci_event_hdr(skb);
4251 scb->expect = h->plen;
4252
4253 if (skb_tailroom(skb) < scb->expect) {
4254 kfree_skb(skb);
4255 hdev->reassembly[index] = NULL;
4256 return -ENOMEM;
4257 }
4258 }
4259 break;
4260
4261 case HCI_ACLDATA_PKT:
4262 if (skb->len == HCI_ACL_HDR_SIZE) {
4263 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4264 scb->expect = __le16_to_cpu(h->dlen);
4265
4266 if (skb_tailroom(skb) < scb->expect) {
4267 kfree_skb(skb);
4268 hdev->reassembly[index] = NULL;
4269 return -ENOMEM;
4270 }
4271 }
4272 break;
4273
4274 case HCI_SCODATA_PKT:
4275 if (skb->len == HCI_SCO_HDR_SIZE) {
4276 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4277 scb->expect = h->dlen;
4278
4279 if (skb_tailroom(skb) < scb->expect) {
4280 kfree_skb(skb);
4281 hdev->reassembly[index] = NULL;
4282 return -ENOMEM;
4283 }
4284 }
4285 break;
4286 }
4287
4288 if (scb->expect == 0) {
4289 /* Complete frame */
4290
4291 bt_cb(skb)->pkt_type = type;
e1a26170 4292 hci_recv_frame(hdev, skb);
33e882a5
SS
4293
4294 hdev->reassembly[index] = NULL;
4295 return remain;
4296 }
4297 }
4298
4299 return remain;
4300}
4301
ef222013
MH
4302int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4303{
f39a3c06
SS
4304 int rem = 0;
4305
ef222013
MH
4306 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4307 return -EILSEQ;
4308
da5f6c37 4309 while (count) {
1e429f38 4310 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4311 if (rem < 0)
4312 return rem;
ef222013 4313
f39a3c06
SS
4314 data += (count - rem);
4315 count = rem;
f81c6224 4316 }
ef222013 4317
f39a3c06 4318 return rem;
ef222013
MH
4319}
4320EXPORT_SYMBOL(hci_recv_fragment);
4321
99811510
SS
4322#define STREAM_REASSEMBLY 0
4323
4324int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4325{
4326 int type;
4327 int rem = 0;
4328
da5f6c37 4329 while (count) {
99811510
SS
4330 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4331
4332 if (!skb) {
4333 struct { char type; } *pkt;
4334
4335 /* Start of the frame */
4336 pkt = data;
4337 type = pkt->type;
4338
4339 data++;
4340 count--;
4341 } else
4342 type = bt_cb(skb)->pkt_type;
4343
1e429f38 4344 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4345 STREAM_REASSEMBLY);
99811510
SS
4346 if (rem < 0)
4347 return rem;
4348
4349 data += (count - rem);
4350 count = rem;
f81c6224 4351 }
99811510
SS
4352
4353 return rem;
4354}
4355EXPORT_SYMBOL(hci_recv_stream_fragment);
4356
1da177e4
LT
4357/* ---- Interface to upper protocols ---- */
4358
1da177e4
LT
4359int hci_register_cb(struct hci_cb *cb)
4360{
4361 BT_DBG("%p name %s", cb, cb->name);
4362
f20d09d5 4363 write_lock(&hci_cb_list_lock);
1da177e4 4364 list_add(&cb->list, &hci_cb_list);
f20d09d5 4365 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4366
4367 return 0;
4368}
4369EXPORT_SYMBOL(hci_register_cb);
4370
4371int hci_unregister_cb(struct hci_cb *cb)
4372{
4373 BT_DBG("%p name %s", cb, cb->name);
4374
f20d09d5 4375 write_lock(&hci_cb_list_lock);
1da177e4 4376 list_del(&cb->list);
f20d09d5 4377 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4378
4379 return 0;
4380}
4381EXPORT_SYMBOL(hci_unregister_cb);
4382
51086991 4383static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4384{
cdc52faa
MH
4385 int err;
4386
0d48d939 4387 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4388
cd82e61c
MH
4389 /* Time stamp */
4390 __net_timestamp(skb);
1da177e4 4391
cd82e61c
MH
4392 /* Send copy to monitor */
4393 hci_send_to_monitor(hdev, skb);
4394
4395 if (atomic_read(&hdev->promisc)) {
4396 /* Send copy to the sockets */
470fe1b5 4397 hci_send_to_sock(hdev, skb);
1da177e4
LT
4398 }
4399
4400 /* Get rid of skb owner, prior to sending to the driver. */
4401 skb_orphan(skb);
4402
cdc52faa
MH
4403 err = hdev->send(hdev, skb);
4404 if (err < 0) {
4405 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4406 kfree_skb(skb);
4407 }
1da177e4
LT
4408}
4409
3119ae95
JH
4410void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4411{
4412 skb_queue_head_init(&req->cmd_q);
4413 req->hdev = hdev;
5d73e034 4414 req->err = 0;
3119ae95
JH
4415}
4416
4417int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4418{
4419 struct hci_dev *hdev = req->hdev;
4420 struct sk_buff *skb;
4421 unsigned long flags;
4422
4423 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4424
5d73e034
AG
4425 /* If an error occured during request building, remove all HCI
4426 * commands queued on the HCI request queue.
4427 */
4428 if (req->err) {
4429 skb_queue_purge(&req->cmd_q);
4430 return req->err;
4431 }
4432
3119ae95
JH
4433 /* Do not allow empty requests */
4434 if (skb_queue_empty(&req->cmd_q))
382b0c39 4435 return -ENODATA;
3119ae95
JH
4436
4437 skb = skb_peek_tail(&req->cmd_q);
4438 bt_cb(skb)->req.complete = complete;
4439
4440 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4441 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4442 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4443
4444 queue_work(hdev->workqueue, &hdev->cmd_work);
4445
4446 return 0;
4447}
4448
899de765
MH
4449bool hci_req_pending(struct hci_dev *hdev)
4450{
4451 return (hdev->req_status == HCI_REQ_PEND);
4452}
4453
1ca3a9d0 4454static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4455 u32 plen, const void *param)
1da177e4
LT
4456{
4457 int len = HCI_COMMAND_HDR_SIZE + plen;
4458 struct hci_command_hdr *hdr;
4459 struct sk_buff *skb;
4460
1da177e4 4461 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4462 if (!skb)
4463 return NULL;
1da177e4
LT
4464
4465 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4466 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4467 hdr->plen = plen;
4468
4469 if (plen)
4470 memcpy(skb_put(skb, plen), param, plen);
4471
4472 BT_DBG("skb len %d", skb->len);
4473
0d48d939 4474 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4475
1ca3a9d0
JH
4476 return skb;
4477}
4478
4479/* Send HCI command */
07dc93dd
JH
4480int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4481 const void *param)
1ca3a9d0
JH
4482{
4483 struct sk_buff *skb;
4484
4485 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4486
4487 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4488 if (!skb) {
4489 BT_ERR("%s no memory for command", hdev->name);
4490 return -ENOMEM;
4491 }
4492
11714b3d
JH
4493 /* Stand-alone HCI commands must be flaged as
4494 * single-command requests.
4495 */
4496 bt_cb(skb)->req.start = true;
4497
1da177e4 4498 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4499 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4500
4501 return 0;
4502}
1da177e4 4503
71c76a17 4504/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4505void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4506 const void *param, u8 event)
71c76a17
JH
4507{
4508 struct hci_dev *hdev = req->hdev;
4509 struct sk_buff *skb;
4510
4511 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4512
34739c1e
AG
4513 /* If an error occured during request building, there is no point in
4514 * queueing the HCI command. We can simply return.
4515 */
4516 if (req->err)
4517 return;
4518
71c76a17
JH
4519 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4520 if (!skb) {
5d73e034
AG
4521 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4522 hdev->name, opcode);
4523 req->err = -ENOMEM;
e348fe6b 4524 return;
71c76a17
JH
4525 }
4526
4527 if (skb_queue_empty(&req->cmd_q))
4528 bt_cb(skb)->req.start = true;
4529
02350a72
JH
4530 bt_cb(skb)->req.event = event;
4531
71c76a17 4532 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4533}
4534
07dc93dd
JH
4535void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4536 const void *param)
02350a72
JH
4537{
4538 hci_req_add_ev(req, opcode, plen, param, 0);
4539}
4540
1da177e4 4541/* Get data from the previously sent command */
a9de9248 4542void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4543{
4544 struct hci_command_hdr *hdr;
4545
4546 if (!hdev->sent_cmd)
4547 return NULL;
4548
4549 hdr = (void *) hdev->sent_cmd->data;
4550
a9de9248 4551 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4552 return NULL;
4553
f0e09510 4554 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4555
4556 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4557}
4558
4559/* Send ACL data */
4560static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4561{
4562 struct hci_acl_hdr *hdr;
4563 int len = skb->len;
4564
badff6d0
ACM
4565 skb_push(skb, HCI_ACL_HDR_SIZE);
4566 skb_reset_transport_header(skb);
9c70220b 4567 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4568 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4569 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4570}
4571
ee22be7e 4572static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4573 struct sk_buff *skb, __u16 flags)
1da177e4 4574{
ee22be7e 4575 struct hci_conn *conn = chan->conn;
1da177e4
LT
4576 struct hci_dev *hdev = conn->hdev;
4577 struct sk_buff *list;
4578
087bfd99
GP
4579 skb->len = skb_headlen(skb);
4580 skb->data_len = 0;
4581
4582 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4583
4584 switch (hdev->dev_type) {
4585 case HCI_BREDR:
4586 hci_add_acl_hdr(skb, conn->handle, flags);
4587 break;
4588 case HCI_AMP:
4589 hci_add_acl_hdr(skb, chan->handle, flags);
4590 break;
4591 default:
4592 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4593 return;
4594 }
087bfd99 4595
70f23020
AE
4596 list = skb_shinfo(skb)->frag_list;
4597 if (!list) {
1da177e4
LT
4598 /* Non fragmented */
4599 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4600
73d80deb 4601 skb_queue_tail(queue, skb);
1da177e4
LT
4602 } else {
4603 /* Fragmented */
4604 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4605
4606 skb_shinfo(skb)->frag_list = NULL;
4607
4608 /* Queue all fragments atomically */
af3e6359 4609 spin_lock(&queue->lock);
1da177e4 4610
73d80deb 4611 __skb_queue_tail(queue, skb);
e702112f
AE
4612
4613 flags &= ~ACL_START;
4614 flags |= ACL_CONT;
1da177e4
LT
4615 do {
4616 skb = list; list = list->next;
8e87d142 4617
0d48d939 4618 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4619 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4620
4621 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4622
73d80deb 4623 __skb_queue_tail(queue, skb);
1da177e4
LT
4624 } while (list);
4625
af3e6359 4626 spin_unlock(&queue->lock);
1da177e4 4627 }
73d80deb
LAD
4628}
4629
4630void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4631{
ee22be7e 4632 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4633
f0e09510 4634 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4635
ee22be7e 4636 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4637
3eff45ea 4638 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4639}
1da177e4
LT
4640
4641/* Send SCO data */
0d861d8b 4642void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4643{
4644 struct hci_dev *hdev = conn->hdev;
4645 struct hci_sco_hdr hdr;
4646
4647 BT_DBG("%s len %d", hdev->name, skb->len);
4648
aca3192c 4649 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4650 hdr.dlen = skb->len;
4651
badff6d0
ACM
4652 skb_push(skb, HCI_SCO_HDR_SIZE);
4653 skb_reset_transport_header(skb);
9c70220b 4654 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4655
0d48d939 4656 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4657
1da177e4 4658 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4659 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4660}
1da177e4
LT
4661
4662/* ---- HCI TX task (outgoing data) ---- */
4663
4664/* HCI Connection scheduler */
6039aa73
GP
4665static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4666 int *quote)
1da177e4
LT
4667{
4668 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4669 struct hci_conn *conn = NULL, *c;
abc5de8f 4670 unsigned int num = 0, min = ~0;
1da177e4 4671
8e87d142 4672 /* We don't have to lock device here. Connections are always
1da177e4 4673 * added and removed with TX task disabled. */
bf4c6325
GP
4674
4675 rcu_read_lock();
4676
4677 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4678 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4679 continue;
769be974
MH
4680
4681 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4682 continue;
4683
1da177e4
LT
4684 num++;
4685
4686 if (c->sent < min) {
4687 min = c->sent;
4688 conn = c;
4689 }
52087a79
LAD
4690
4691 if (hci_conn_num(hdev, type) == num)
4692 break;
1da177e4
LT
4693 }
4694
bf4c6325
GP
4695 rcu_read_unlock();
4696
1da177e4 4697 if (conn) {
6ed58ec5
VT
4698 int cnt, q;
4699
4700 switch (conn->type) {
4701 case ACL_LINK:
4702 cnt = hdev->acl_cnt;
4703 break;
4704 case SCO_LINK:
4705 case ESCO_LINK:
4706 cnt = hdev->sco_cnt;
4707 break;
4708 case LE_LINK:
4709 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4710 break;
4711 default:
4712 cnt = 0;
4713 BT_ERR("Unknown link type");
4714 }
4715
4716 q = cnt / num;
1da177e4
LT
4717 *quote = q ? q : 1;
4718 } else
4719 *quote = 0;
4720
4721 BT_DBG("conn %p quote %d", conn, *quote);
4722 return conn;
4723}
4724
6039aa73 4725static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4726{
4727 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4728 struct hci_conn *c;
1da177e4 4729
bae1f5d9 4730 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4731
bf4c6325
GP
4732 rcu_read_lock();
4733
1da177e4 4734 /* Kill stalled connections */
bf4c6325 4735 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4736 if (c->type == type && c->sent) {
6ed93dc6
AE
4737 BT_ERR("%s killing stalled connection %pMR",
4738 hdev->name, &c->dst);
bed71748 4739 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4740 }
4741 }
bf4c6325
GP
4742
4743 rcu_read_unlock();
1da177e4
LT
4744}
4745
6039aa73
GP
4746static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4747 int *quote)
1da177e4 4748{
73d80deb
LAD
4749 struct hci_conn_hash *h = &hdev->conn_hash;
4750 struct hci_chan *chan = NULL;
abc5de8f 4751 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4752 struct hci_conn *conn;
73d80deb
LAD
4753 int cnt, q, conn_num = 0;
4754
4755 BT_DBG("%s", hdev->name);
4756
bf4c6325
GP
4757 rcu_read_lock();
4758
4759 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4760 struct hci_chan *tmp;
4761
4762 if (conn->type != type)
4763 continue;
4764
4765 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4766 continue;
4767
4768 conn_num++;
4769
8192edef 4770 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4771 struct sk_buff *skb;
4772
4773 if (skb_queue_empty(&tmp->data_q))
4774 continue;
4775
4776 skb = skb_peek(&tmp->data_q);
4777 if (skb->priority < cur_prio)
4778 continue;
4779
4780 if (skb->priority > cur_prio) {
4781 num = 0;
4782 min = ~0;
4783 cur_prio = skb->priority;
4784 }
4785
4786 num++;
4787
4788 if (conn->sent < min) {
4789 min = conn->sent;
4790 chan = tmp;
4791 }
4792 }
4793
4794 if (hci_conn_num(hdev, type) == conn_num)
4795 break;
4796 }
4797
bf4c6325
GP
4798 rcu_read_unlock();
4799
73d80deb
LAD
4800 if (!chan)
4801 return NULL;
4802
4803 switch (chan->conn->type) {
4804 case ACL_LINK:
4805 cnt = hdev->acl_cnt;
4806 break;
bd1eb66b
AE
4807 case AMP_LINK:
4808 cnt = hdev->block_cnt;
4809 break;
73d80deb
LAD
4810 case SCO_LINK:
4811 case ESCO_LINK:
4812 cnt = hdev->sco_cnt;
4813 break;
4814 case LE_LINK:
4815 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4816 break;
4817 default:
4818 cnt = 0;
4819 BT_ERR("Unknown link type");
4820 }
4821
4822 q = cnt / num;
4823 *quote = q ? q : 1;
4824 BT_DBG("chan %p quote %d", chan, *quote);
4825 return chan;
4826}
4827
02b20f0b
LAD
4828static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4829{
4830 struct hci_conn_hash *h = &hdev->conn_hash;
4831 struct hci_conn *conn;
4832 int num = 0;
4833
4834 BT_DBG("%s", hdev->name);
4835
bf4c6325
GP
4836 rcu_read_lock();
4837
4838 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4839 struct hci_chan *chan;
4840
4841 if (conn->type != type)
4842 continue;
4843
4844 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4845 continue;
4846
4847 num++;
4848
8192edef 4849 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4850 struct sk_buff *skb;
4851
4852 if (chan->sent) {
4853 chan->sent = 0;
4854 continue;
4855 }
4856
4857 if (skb_queue_empty(&chan->data_q))
4858 continue;
4859
4860 skb = skb_peek(&chan->data_q);
4861 if (skb->priority >= HCI_PRIO_MAX - 1)
4862 continue;
4863
4864 skb->priority = HCI_PRIO_MAX - 1;
4865
4866 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4867 skb->priority);
02b20f0b
LAD
4868 }
4869
4870 if (hci_conn_num(hdev, type) == num)
4871 break;
4872 }
bf4c6325
GP
4873
4874 rcu_read_unlock();
4875
02b20f0b
LAD
4876}
4877
b71d385a
AE
4878static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4879{
4880 /* Calculate count of blocks used by this packet */
4881 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4882}
4883
6039aa73 4884static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4885{
4a964404 4886 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4887 /* ACL tx timeout must be longer than maximum
4888 * link supervision timeout (40.9 seconds) */
63d2bc1b 4889 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4890 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4891 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4892 }
63d2bc1b 4893}
1da177e4 4894
6039aa73 4895static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4896{
4897 unsigned int cnt = hdev->acl_cnt;
4898 struct hci_chan *chan;
4899 struct sk_buff *skb;
4900 int quote;
4901
4902 __check_timeout(hdev, cnt);
04837f64 4903
73d80deb 4904 while (hdev->acl_cnt &&
a8c5fb1a 4905 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4906 u32 priority = (skb_peek(&chan->data_q))->priority;
4907 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4908 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4909 skb->len, skb->priority);
73d80deb 4910
ec1cce24
LAD
4911 /* Stop if priority has changed */
4912 if (skb->priority < priority)
4913 break;
4914
4915 skb = skb_dequeue(&chan->data_q);
4916
73d80deb 4917 hci_conn_enter_active_mode(chan->conn,
04124681 4918 bt_cb(skb)->force_active);
04837f64 4919
57d17d70 4920 hci_send_frame(hdev, skb);
1da177e4
LT
4921 hdev->acl_last_tx = jiffies;
4922
4923 hdev->acl_cnt--;
73d80deb
LAD
4924 chan->sent++;
4925 chan->conn->sent++;
1da177e4
LT
4926 }
4927 }
02b20f0b
LAD
4928
4929 if (cnt != hdev->acl_cnt)
4930 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4931}
4932
6039aa73 4933static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4934{
63d2bc1b 4935 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4936 struct hci_chan *chan;
4937 struct sk_buff *skb;
4938 int quote;
bd1eb66b 4939 u8 type;
b71d385a 4940
63d2bc1b 4941 __check_timeout(hdev, cnt);
b71d385a 4942
bd1eb66b
AE
4943 BT_DBG("%s", hdev->name);
4944
4945 if (hdev->dev_type == HCI_AMP)
4946 type = AMP_LINK;
4947 else
4948 type = ACL_LINK;
4949
b71d385a 4950 while (hdev->block_cnt > 0 &&
bd1eb66b 4951 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4952 u32 priority = (skb_peek(&chan->data_q))->priority;
4953 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4954 int blocks;
4955
4956 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4957 skb->len, skb->priority);
b71d385a
AE
4958
4959 /* Stop if priority has changed */
4960 if (skb->priority < priority)
4961 break;
4962
4963 skb = skb_dequeue(&chan->data_q);
4964
4965 blocks = __get_blocks(hdev, skb);
4966 if (blocks > hdev->block_cnt)
4967 return;
4968
4969 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4970 bt_cb(skb)->force_active);
b71d385a 4971
57d17d70 4972 hci_send_frame(hdev, skb);
b71d385a
AE
4973 hdev->acl_last_tx = jiffies;
4974
4975 hdev->block_cnt -= blocks;
4976 quote -= blocks;
4977
4978 chan->sent += blocks;
4979 chan->conn->sent += blocks;
4980 }
4981 }
4982
4983 if (cnt != hdev->block_cnt)
bd1eb66b 4984 hci_prio_recalculate(hdev, type);
b71d385a
AE
4985}
4986
6039aa73 4987static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4988{
4989 BT_DBG("%s", hdev->name);
4990
bd1eb66b
AE
4991 /* No ACL link over BR/EDR controller */
4992 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4993 return;
4994
4995 /* No AMP link over AMP controller */
4996 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4997 return;
4998
4999 switch (hdev->flow_ctl_mode) {
5000 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5001 hci_sched_acl_pkt(hdev);
5002 break;
5003
5004 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5005 hci_sched_acl_blk(hdev);
5006 break;
5007 }
5008}
5009
1da177e4 5010/* Schedule SCO */
6039aa73 5011static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5012{
5013 struct hci_conn *conn;
5014 struct sk_buff *skb;
5015 int quote;
5016
5017 BT_DBG("%s", hdev->name);
5018
52087a79
LAD
5019 if (!hci_conn_num(hdev, SCO_LINK))
5020 return;
5021
1da177e4
LT
5022 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5023 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5024 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5025 hci_send_frame(hdev, skb);
1da177e4
LT
5026
5027 conn->sent++;
5028 if (conn->sent == ~0)
5029 conn->sent = 0;
5030 }
5031 }
5032}
5033
6039aa73 5034static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5035{
5036 struct hci_conn *conn;
5037 struct sk_buff *skb;
5038 int quote;
5039
5040 BT_DBG("%s", hdev->name);
5041
52087a79
LAD
5042 if (!hci_conn_num(hdev, ESCO_LINK))
5043 return;
5044
8fc9ced3
GP
5045 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5046 &quote))) {
b6a0dc82
MH
5047 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5048 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5049 hci_send_frame(hdev, skb);
b6a0dc82
MH
5050
5051 conn->sent++;
5052 if (conn->sent == ~0)
5053 conn->sent = 0;
5054 }
5055 }
5056}
5057
6039aa73 5058static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5059{
73d80deb 5060 struct hci_chan *chan;
6ed58ec5 5061 struct sk_buff *skb;
02b20f0b 5062 int quote, cnt, tmp;
6ed58ec5
VT
5063
5064 BT_DBG("%s", hdev->name);
5065
52087a79
LAD
5066 if (!hci_conn_num(hdev, LE_LINK))
5067 return;
5068
4a964404 5069 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5070 /* LE tx timeout must be longer than maximum
5071 * link supervision timeout (40.9 seconds) */
bae1f5d9 5072 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5073 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5074 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5075 }
5076
5077 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5078 tmp = cnt;
73d80deb 5079 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5080 u32 priority = (skb_peek(&chan->data_q))->priority;
5081 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5082 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5083 skb->len, skb->priority);
6ed58ec5 5084
ec1cce24
LAD
5085 /* Stop if priority has changed */
5086 if (skb->priority < priority)
5087 break;
5088
5089 skb = skb_dequeue(&chan->data_q);
5090
57d17d70 5091 hci_send_frame(hdev, skb);
6ed58ec5
VT
5092 hdev->le_last_tx = jiffies;
5093
5094 cnt--;
73d80deb
LAD
5095 chan->sent++;
5096 chan->conn->sent++;
6ed58ec5
VT
5097 }
5098 }
73d80deb 5099
6ed58ec5
VT
5100 if (hdev->le_pkts)
5101 hdev->le_cnt = cnt;
5102 else
5103 hdev->acl_cnt = cnt;
02b20f0b
LAD
5104
5105 if (cnt != tmp)
5106 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5107}
5108
3eff45ea 5109static void hci_tx_work(struct work_struct *work)
1da177e4 5110{
3eff45ea 5111 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5112 struct sk_buff *skb;
5113
6ed58ec5 5114 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5115 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5116
52de599e
MH
5117 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5118 /* Schedule queues and send stuff to HCI driver */
5119 hci_sched_acl(hdev);
5120 hci_sched_sco(hdev);
5121 hci_sched_esco(hdev);
5122 hci_sched_le(hdev);
5123 }
6ed58ec5 5124
1da177e4
LT
5125 /* Send next queued raw (unknown type) packet */
5126 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5127 hci_send_frame(hdev, skb);
1da177e4
LT
5128}
5129
25985edc 5130/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5131
5132/* ACL data packet */
6039aa73 5133static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5134{
5135 struct hci_acl_hdr *hdr = (void *) skb->data;
5136 struct hci_conn *conn;
5137 __u16 handle, flags;
5138
5139 skb_pull(skb, HCI_ACL_HDR_SIZE);
5140
5141 handle = __le16_to_cpu(hdr->handle);
5142 flags = hci_flags(handle);
5143 handle = hci_handle(handle);
5144
f0e09510 5145 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5146 handle, flags);
1da177e4
LT
5147
5148 hdev->stat.acl_rx++;
5149
5150 hci_dev_lock(hdev);
5151 conn = hci_conn_hash_lookup_handle(hdev, handle);
5152 hci_dev_unlock(hdev);
8e87d142 5153
1da177e4 5154 if (conn) {
65983fc7 5155 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5156
1da177e4 5157 /* Send to upper protocol */
686ebf28
UF
5158 l2cap_recv_acldata(conn, skb, flags);
5159 return;
1da177e4 5160 } else {
8e87d142 5161 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5162 hdev->name, handle);
1da177e4
LT
5163 }
5164
5165 kfree_skb(skb);
5166}
5167
5168/* SCO data packet */
6039aa73 5169static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5170{
5171 struct hci_sco_hdr *hdr = (void *) skb->data;
5172 struct hci_conn *conn;
5173 __u16 handle;
5174
5175 skb_pull(skb, HCI_SCO_HDR_SIZE);
5176
5177 handle = __le16_to_cpu(hdr->handle);
5178
f0e09510 5179 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5180
5181 hdev->stat.sco_rx++;
5182
5183 hci_dev_lock(hdev);
5184 conn = hci_conn_hash_lookup_handle(hdev, handle);
5185 hci_dev_unlock(hdev);
5186
5187 if (conn) {
1da177e4 5188 /* Send to upper protocol */
686ebf28
UF
5189 sco_recv_scodata(conn, skb);
5190 return;
1da177e4 5191 } else {
8e87d142 5192 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5193 hdev->name, handle);
1da177e4
LT
5194 }
5195
5196 kfree_skb(skb);
5197}
5198
9238f36a
JH
5199static bool hci_req_is_complete(struct hci_dev *hdev)
5200{
5201 struct sk_buff *skb;
5202
5203 skb = skb_peek(&hdev->cmd_q);
5204 if (!skb)
5205 return true;
5206
5207 return bt_cb(skb)->req.start;
5208}
5209
42c6b129
JH
5210static void hci_resend_last(struct hci_dev *hdev)
5211{
5212 struct hci_command_hdr *sent;
5213 struct sk_buff *skb;
5214 u16 opcode;
5215
5216 if (!hdev->sent_cmd)
5217 return;
5218
5219 sent = (void *) hdev->sent_cmd->data;
5220 opcode = __le16_to_cpu(sent->opcode);
5221 if (opcode == HCI_OP_RESET)
5222 return;
5223
5224 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5225 if (!skb)
5226 return;
5227
5228 skb_queue_head(&hdev->cmd_q, skb);
5229 queue_work(hdev->workqueue, &hdev->cmd_work);
5230}
5231
9238f36a
JH
5232void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5233{
5234 hci_req_complete_t req_complete = NULL;
5235 struct sk_buff *skb;
5236 unsigned long flags;
5237
5238 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5239
42c6b129
JH
5240 /* If the completed command doesn't match the last one that was
5241 * sent we need to do special handling of it.
9238f36a 5242 */
42c6b129
JH
5243 if (!hci_sent_cmd_data(hdev, opcode)) {
5244 /* Some CSR based controllers generate a spontaneous
5245 * reset complete event during init and any pending
5246 * command will never be completed. In such a case we
5247 * need to resend whatever was the last sent
5248 * command.
5249 */
5250 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5251 hci_resend_last(hdev);
5252
9238f36a 5253 return;
42c6b129 5254 }
9238f36a
JH
5255
5256 /* If the command succeeded and there's still more commands in
5257 * this request the request is not yet complete.
5258 */
5259 if (!status && !hci_req_is_complete(hdev))
5260 return;
5261
5262 /* If this was the last command in a request the complete
5263 * callback would be found in hdev->sent_cmd instead of the
5264 * command queue (hdev->cmd_q).
5265 */
5266 if (hdev->sent_cmd) {
5267 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5268
5269 if (req_complete) {
5270 /* We must set the complete callback to NULL to
5271 * avoid calling the callback more than once if
5272 * this function gets called again.
5273 */
5274 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5275
9238f36a 5276 goto call_complete;
53e21fbc 5277 }
9238f36a
JH
5278 }
5279
5280 /* Remove all pending commands belonging to this request */
5281 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5282 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5283 if (bt_cb(skb)->req.start) {
5284 __skb_queue_head(&hdev->cmd_q, skb);
5285 break;
5286 }
5287
5288 req_complete = bt_cb(skb)->req.complete;
5289 kfree_skb(skb);
5290 }
5291 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5292
5293call_complete:
5294 if (req_complete)
5295 req_complete(hdev, status);
5296}
5297
b78752cc 5298static void hci_rx_work(struct work_struct *work)
1da177e4 5299{
b78752cc 5300 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5301 struct sk_buff *skb;
5302
5303 BT_DBG("%s", hdev->name);
5304
1da177e4 5305 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5306 /* Send copy to monitor */
5307 hci_send_to_monitor(hdev, skb);
5308
1da177e4
LT
5309 if (atomic_read(&hdev->promisc)) {
5310 /* Send copy to the sockets */
470fe1b5 5311 hci_send_to_sock(hdev, skb);
1da177e4
LT
5312 }
5313
fee746b0 5314 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5315 kfree_skb(skb);
5316 continue;
5317 }
5318
5319 if (test_bit(HCI_INIT, &hdev->flags)) {
5320 /* Don't process data packets in this states. */
0d48d939 5321 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5322 case HCI_ACLDATA_PKT:
5323 case HCI_SCODATA_PKT:
5324 kfree_skb(skb);
5325 continue;
3ff50b79 5326 }
1da177e4
LT
5327 }
5328
5329 /* Process frame */
0d48d939 5330 switch (bt_cb(skb)->pkt_type) {
1da177e4 5331 case HCI_EVENT_PKT:
b78752cc 5332 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5333 hci_event_packet(hdev, skb);
5334 break;
5335
5336 case HCI_ACLDATA_PKT:
5337 BT_DBG("%s ACL data packet", hdev->name);
5338 hci_acldata_packet(hdev, skb);
5339 break;
5340
5341 case HCI_SCODATA_PKT:
5342 BT_DBG("%s SCO data packet", hdev->name);
5343 hci_scodata_packet(hdev, skb);
5344 break;
5345
5346 default:
5347 kfree_skb(skb);
5348 break;
5349 }
5350 }
1da177e4
LT
5351}
5352
c347b765 5353static void hci_cmd_work(struct work_struct *work)
1da177e4 5354{
c347b765 5355 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5356 struct sk_buff *skb;
5357
2104786b
AE
5358 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5359 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5360
1da177e4 5361 /* Send queued commands */
5a08ecce
AE
5362 if (atomic_read(&hdev->cmd_cnt)) {
5363 skb = skb_dequeue(&hdev->cmd_q);
5364 if (!skb)
5365 return;
5366
7585b97a 5367 kfree_skb(hdev->sent_cmd);
1da177e4 5368
a675d7f1 5369 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5370 if (hdev->sent_cmd) {
1da177e4 5371 atomic_dec(&hdev->cmd_cnt);
57d17d70 5372 hci_send_frame(hdev, skb);
7bdb8a5c 5373 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5374 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5375 else
65cc2b49
MH
5376 schedule_delayed_work(&hdev->cmd_timer,
5377 HCI_CMD_TIMEOUT);
1da177e4
LT
5378 } else {
5379 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5380 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5381 }
5382 }
5383}
b1efcc28
AG
5384
5385void hci_req_add_le_scan_disable(struct hci_request *req)
5386{
5387 struct hci_cp_le_set_scan_enable cp;
5388
5389 memset(&cp, 0, sizeof(cp));
5390 cp.enable = LE_SCAN_DISABLE;
5391 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5392}
a4790dbd 5393
8ef30fd3
AG
5394void hci_req_add_le_passive_scan(struct hci_request *req)
5395{
5396 struct hci_cp_le_set_scan_param param_cp;
5397 struct hci_cp_le_set_scan_enable enable_cp;
5398 struct hci_dev *hdev = req->hdev;
5399 u8 own_addr_type;
5400
6ab535a7
MH
5401 /* Set require_privacy to false since no SCAN_REQ are send
5402 * during passive scanning. Not using an unresolvable address
5403 * here is important so that peer devices using direct
5404 * advertising with our address will be correctly reported
5405 * by the controller.
8ef30fd3 5406 */
6ab535a7 5407 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5408 return;
5409
5410 memset(&param_cp, 0, sizeof(param_cp));
5411 param_cp.type = LE_SCAN_PASSIVE;
5412 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5413 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5414 param_cp.own_address_type = own_addr_type;
5415 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5416 &param_cp);
5417
5418 memset(&enable_cp, 0, sizeof(enable_cp));
5419 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5420 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5421 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5422 &enable_cp);
5423}
5424
a4790dbd
AG
5425static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5426{
5427 if (status)
5428 BT_DBG("HCI request failed to update background scanning: "
5429 "status 0x%2.2x", status);
5430}
5431
5432/* This function controls the background scanning based on hdev->pend_le_conns
5433 * list. If there are pending LE connection we start the background scanning,
5434 * otherwise we stop it.
5435 *
5436 * This function requires the caller holds hdev->lock.
5437 */
5438void hci_update_background_scan(struct hci_dev *hdev)
5439{
a4790dbd
AG
5440 struct hci_request req;
5441 struct hci_conn *conn;
5442 int err;
5443
c20c02d5
MH
5444 if (!test_bit(HCI_UP, &hdev->flags) ||
5445 test_bit(HCI_INIT, &hdev->flags) ||
5446 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5447 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5448 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5449 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5450 return;
5451
a70f4b5f
JH
5452 /* No point in doing scanning if LE support hasn't been enabled */
5453 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5454 return;
5455
ae23ada4
JH
5456 /* If discovery is active don't interfere with it */
5457 if (hdev->discovery.state != DISCOVERY_STOPPED)
5458 return;
5459
a4790dbd
AG
5460 hci_req_init(&req, hdev);
5461
2b7be33e
JH
5462 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5463 list_empty(&hdev->pend_le_conns) &&
66f8455a 5464 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5465 /* If there is no pending LE connections or devices
5466 * to be scanned for, we should stop the background
5467 * scanning.
a4790dbd
AG
5468 */
5469
5470 /* If controller is not scanning we are done. */
5471 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5472 return;
5473
5474 hci_req_add_le_scan_disable(&req);
5475
5476 BT_DBG("%s stopping background scanning", hdev->name);
5477 } else {
a4790dbd
AG
5478 /* If there is at least one pending LE connection, we should
5479 * keep the background scan running.
5480 */
5481
a4790dbd
AG
5482 /* If controller is connecting, we should not start scanning
5483 * since some controllers are not able to scan and connect at
5484 * the same time.
5485 */
5486 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5487 if (conn)
5488 return;
5489
4340a124
AG
5490 /* If controller is currently scanning, we stop it to ensure we
5491 * don't miss any advertising (due to duplicates filter).
5492 */
5493 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5494 hci_req_add_le_scan_disable(&req);
5495
8ef30fd3 5496 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5497
5498 BT_DBG("%s starting background scanning", hdev->name);
5499 }
5500
5501 err = hci_req_run(&req, update_background_scan_complete);
5502 if (err)
5503 BT_ERR("Failed to run HCI request: err %d", err);
5504}