]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Expose default LE advertising interval via debugfs
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
6659358e
JH
203static int whitelist_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bdaddr_list *b;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(b, &hdev->whitelist, list)
210 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int whitelist_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, whitelist_show, inode->i_private);
219}
220
221static const struct file_operations whitelist_fops = {
222 .open = whitelist_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
47219839
MH
228static int uuids_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct bt_uuid *uuid;
232
233 hci_dev_lock(hdev);
234 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
235 u8 i, val[16];
236
237 /* The Bluetooth UUID values are stored in big endian,
238 * but with reversed byte order. So convert them into
239 * the right order for the %pUb modifier.
240 */
241 for (i = 0; i < 16; i++)
242 val[i] = uuid->uuid[15 - i];
243
244 seq_printf(f, "%pUb\n", val);
47219839
MH
245 }
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int uuids_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, uuids_show, inode->i_private);
254}
255
256static const struct file_operations uuids_fops = {
257 .open = uuids_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
baf27f6e
MH
263static int inquiry_cache_show(struct seq_file *f, void *p)
264{
265 struct hci_dev *hdev = f->private;
266 struct discovery_state *cache = &hdev->discovery;
267 struct inquiry_entry *e;
268
269 hci_dev_lock(hdev);
270
271 list_for_each_entry(e, &cache->all, all) {
272 struct inquiry_data *data = &e->data;
273 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
274 &data->bdaddr,
275 data->pscan_rep_mode, data->pscan_period_mode,
276 data->pscan_mode, data->dev_class[2],
277 data->dev_class[1], data->dev_class[0],
278 __le16_to_cpu(data->clock_offset),
279 data->rssi, data->ssp_mode, e->timestamp);
280 }
281
282 hci_dev_unlock(hdev);
283
284 return 0;
285}
286
287static int inquiry_cache_open(struct inode *inode, struct file *file)
288{
289 return single_open(file, inquiry_cache_show, inode->i_private);
290}
291
292static const struct file_operations inquiry_cache_fops = {
293 .open = inquiry_cache_open,
294 .read = seq_read,
295 .llseek = seq_lseek,
296 .release = single_release,
297};
298
02d08d15
MH
299static int link_keys_show(struct seq_file *f, void *ptr)
300{
301 struct hci_dev *hdev = f->private;
302 struct list_head *p, *n;
303
304 hci_dev_lock(hdev);
305 list_for_each_safe(p, n, &hdev->link_keys) {
306 struct link_key *key = list_entry(p, struct link_key, list);
307 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
308 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
309 }
310 hci_dev_unlock(hdev);
311
312 return 0;
313}
314
315static int link_keys_open(struct inode *inode, struct file *file)
316{
317 return single_open(file, link_keys_show, inode->i_private);
318}
319
320static const struct file_operations link_keys_fops = {
321 .open = link_keys_open,
322 .read = seq_read,
323 .llseek = seq_lseek,
324 .release = single_release,
325};
326
babdbb3c
MH
327static int dev_class_show(struct seq_file *f, void *ptr)
328{
329 struct hci_dev *hdev = f->private;
330
331 hci_dev_lock(hdev);
332 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
333 hdev->dev_class[1], hdev->dev_class[0]);
334 hci_dev_unlock(hdev);
335
336 return 0;
337}
338
339static int dev_class_open(struct inode *inode, struct file *file)
340{
341 return single_open(file, dev_class_show, inode->i_private);
342}
343
344static const struct file_operations dev_class_fops = {
345 .open = dev_class_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = single_release,
349};
350
041000b9
MH
351static int voice_setting_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->voice_setting;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
363 NULL, "0x%4.4llx\n");
364
ebd1e33b
MH
365static int auto_accept_delay_set(void *data, u64 val)
366{
367 struct hci_dev *hdev = data;
368
369 hci_dev_lock(hdev);
370 hdev->auto_accept_delay = val;
371 hci_dev_unlock(hdev);
372
373 return 0;
374}
375
376static int auto_accept_delay_get(void *data, u64 *val)
377{
378 struct hci_dev *hdev = data;
379
380 hci_dev_lock(hdev);
381 *val = hdev->auto_accept_delay;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
388 auto_accept_delay_set, "%llu\n");
389
5afeac14
MH
390static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
391 size_t count, loff_t *ppos)
392{
393 struct hci_dev *hdev = file->private_data;
394 char buf[3];
395
111902f7 396 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
397 buf[1] = '\n';
398 buf[2] = '\0';
399 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
400}
401
402static ssize_t force_sc_support_write(struct file *file,
403 const char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[32];
408 size_t buf_size = min(count, (sizeof(buf)-1));
409 bool enable;
410
411 if (test_bit(HCI_UP, &hdev->flags))
412 return -EBUSY;
413
414 if (copy_from_user(buf, user_buf, buf_size))
415 return -EFAULT;
416
417 buf[buf_size] = '\0';
418 if (strtobool(buf, &enable))
419 return -EINVAL;
420
111902f7 421 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
422 return -EALREADY;
423
111902f7 424 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
425
426 return count;
427}
428
429static const struct file_operations force_sc_support_fops = {
430 .open = simple_open,
431 .read = force_sc_support_read,
432 .write = force_sc_support_write,
433 .llseek = default_llseek,
434};
435
134c2a89
MH
436static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
437 size_t count, loff_t *ppos)
438{
439 struct hci_dev *hdev = file->private_data;
440 char buf[3];
441
442 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
443 buf[1] = '\n';
444 buf[2] = '\0';
445 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
446}
447
448static const struct file_operations sc_only_mode_fops = {
449 .open = simple_open,
450 .read = sc_only_mode_read,
451 .llseek = default_llseek,
452};
453
2bfa3531
MH
454static int idle_timeout_set(void *data, u64 val)
455{
456 struct hci_dev *hdev = data;
457
458 if (val != 0 && (val < 500 || val > 3600000))
459 return -EINVAL;
460
461 hci_dev_lock(hdev);
2be48b65 462 hdev->idle_timeout = val;
2bfa3531
MH
463 hci_dev_unlock(hdev);
464
465 return 0;
466}
467
468static int idle_timeout_get(void *data, u64 *val)
469{
470 struct hci_dev *hdev = data;
471
472 hci_dev_lock(hdev);
473 *val = hdev->idle_timeout;
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
480 idle_timeout_set, "%llu\n");
481
c982b2ea
JH
482static int rpa_timeout_set(void *data, u64 val)
483{
484 struct hci_dev *hdev = data;
485
486 /* Require the RPA timeout to be at least 30 seconds and at most
487 * 24 hours.
488 */
489 if (val < 30 || val > (60 * 60 * 24))
490 return -EINVAL;
491
492 hci_dev_lock(hdev);
493 hdev->rpa_timeout = val;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499static int rpa_timeout_get(void *data, u64 *val)
500{
501 struct hci_dev *hdev = data;
502
503 hci_dev_lock(hdev);
504 *val = hdev->rpa_timeout;
505 hci_dev_unlock(hdev);
506
507 return 0;
508}
509
510DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
511 rpa_timeout_set, "%llu\n");
512
2bfa3531
MH
513static int sniff_min_interval_set(void *data, u64 val)
514{
515 struct hci_dev *hdev = data;
516
517 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
518 return -EINVAL;
519
520 hci_dev_lock(hdev);
2be48b65 521 hdev->sniff_min_interval = val;
2bfa3531
MH
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527static int sniff_min_interval_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock(hdev);
532 *val = hdev->sniff_min_interval;
533 hci_dev_unlock(hdev);
534
535 return 0;
536}
537
538DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
539 sniff_min_interval_set, "%llu\n");
540
541static int sniff_max_interval_set(void *data, u64 val)
542{
543 struct hci_dev *hdev = data;
544
545 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
546 return -EINVAL;
547
548 hci_dev_lock(hdev);
2be48b65 549 hdev->sniff_max_interval = val;
2bfa3531
MH
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555static int sniff_max_interval_get(void *data, u64 *val)
556{
557 struct hci_dev *hdev = data;
558
559 hci_dev_lock(hdev);
560 *val = hdev->sniff_max_interval;
561 hci_dev_unlock(hdev);
562
563 return 0;
564}
565
566DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
567 sniff_max_interval_set, "%llu\n");
568
31ad1691
AK
569static int conn_info_min_age_set(void *data, u64 val)
570{
571 struct hci_dev *hdev = data;
572
573 if (val == 0 || val > hdev->conn_info_max_age)
574 return -EINVAL;
575
576 hci_dev_lock(hdev);
577 hdev->conn_info_min_age = val;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583static int conn_info_min_age_get(void *data, u64 *val)
584{
585 struct hci_dev *hdev = data;
586
587 hci_dev_lock(hdev);
588 *val = hdev->conn_info_min_age;
589 hci_dev_unlock(hdev);
590
591 return 0;
592}
593
594DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
595 conn_info_min_age_set, "%llu\n");
596
597static int conn_info_max_age_set(void *data, u64 val)
598{
599 struct hci_dev *hdev = data;
600
601 if (val == 0 || val < hdev->conn_info_min_age)
602 return -EINVAL;
603
604 hci_dev_lock(hdev);
605 hdev->conn_info_max_age = val;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611static int conn_info_max_age_get(void *data, u64 *val)
612{
613 struct hci_dev *hdev = data;
614
615 hci_dev_lock(hdev);
616 *val = hdev->conn_info_max_age;
617 hci_dev_unlock(hdev);
618
619 return 0;
620}
621
622DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
623 conn_info_max_age_set, "%llu\n");
624
ac345813
MH
625static int identity_show(struct seq_file *f, void *p)
626{
627 struct hci_dev *hdev = f->private;
a1f4c318 628 bdaddr_t addr;
ac345813
MH
629 u8 addr_type;
630
631 hci_dev_lock(hdev);
632
a1f4c318 633 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 634
a1f4c318 635 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 636 16, hdev->irk, &hdev->rpa);
ac345813
MH
637
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int identity_open(struct inode *inode, struct file *file)
644{
645 return single_open(file, identity_show, inode->i_private);
646}
647
648static const struct file_operations identity_fops = {
649 .open = identity_open,
650 .read = seq_read,
651 .llseek = seq_lseek,
652 .release = single_release,
653};
654
7a4cd51d
MH
655static int random_address_show(struct seq_file *f, void *p)
656{
657 struct hci_dev *hdev = f->private;
658
659 hci_dev_lock(hdev);
660 seq_printf(f, "%pMR\n", &hdev->random_addr);
661 hci_dev_unlock(hdev);
662
663 return 0;
664}
665
666static int random_address_open(struct inode *inode, struct file *file)
667{
668 return single_open(file, random_address_show, inode->i_private);
669}
670
671static const struct file_operations random_address_fops = {
672 .open = random_address_open,
673 .read = seq_read,
674 .llseek = seq_lseek,
675 .release = single_release,
676};
677
e7b8fc92
MH
678static int static_address_show(struct seq_file *f, void *p)
679{
680 struct hci_dev *hdev = f->private;
681
682 hci_dev_lock(hdev);
683 seq_printf(f, "%pMR\n", &hdev->static_addr);
684 hci_dev_unlock(hdev);
685
686 return 0;
687}
688
689static int static_address_open(struct inode *inode, struct file *file)
690{
691 return single_open(file, static_address_show, inode->i_private);
692}
693
694static const struct file_operations static_address_fops = {
695 .open = static_address_open,
696 .read = seq_read,
697 .llseek = seq_lseek,
698 .release = single_release,
699};
700
b32bba6c
MH
701static ssize_t force_static_address_read(struct file *file,
702 char __user *user_buf,
703 size_t count, loff_t *ppos)
92202185 704{
b32bba6c
MH
705 struct hci_dev *hdev = file->private_data;
706 char buf[3];
92202185 707
111902f7 708 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
709 buf[1] = '\n';
710 buf[2] = '\0';
711 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
712}
713
b32bba6c
MH
714static ssize_t force_static_address_write(struct file *file,
715 const char __user *user_buf,
716 size_t count, loff_t *ppos)
92202185 717{
b32bba6c
MH
718 struct hci_dev *hdev = file->private_data;
719 char buf[32];
720 size_t buf_size = min(count, (sizeof(buf)-1));
721 bool enable;
92202185 722
b32bba6c
MH
723 if (test_bit(HCI_UP, &hdev->flags))
724 return -EBUSY;
92202185 725
b32bba6c
MH
726 if (copy_from_user(buf, user_buf, buf_size))
727 return -EFAULT;
728
729 buf[buf_size] = '\0';
730 if (strtobool(buf, &enable))
731 return -EINVAL;
732
111902f7 733 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
734 return -EALREADY;
735
111902f7 736 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
737
738 return count;
92202185
MH
739}
740
b32bba6c
MH
741static const struct file_operations force_static_address_fops = {
742 .open = simple_open,
743 .read = force_static_address_read,
744 .write = force_static_address_write,
745 .llseek = default_llseek,
746};
92202185 747
d2ab0ac1
MH
748static int white_list_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct bdaddr_list *b;
752
753 hci_dev_lock(hdev);
754 list_for_each_entry(b, &hdev->le_white_list, list)
755 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
756 hci_dev_unlock(hdev);
757
758 return 0;
759}
760
761static int white_list_open(struct inode *inode, struct file *file)
762{
763 return single_open(file, white_list_show, inode->i_private);
764}
765
766static const struct file_operations white_list_fops = {
767 .open = white_list_open,
768 .read = seq_read,
769 .llseek = seq_lseek,
770 .release = single_release,
771};
772
3698d704
MH
773static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
774{
775 struct hci_dev *hdev = f->private;
776 struct list_head *p, *n;
777
778 hci_dev_lock(hdev);
779 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
780 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
781 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
782 &irk->bdaddr, irk->addr_type,
783 16, irk->val, &irk->rpa);
784 }
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790static int identity_resolving_keys_open(struct inode *inode, struct file *file)
791{
792 return single_open(file, identity_resolving_keys_show,
793 inode->i_private);
794}
795
796static const struct file_operations identity_resolving_keys_fops = {
797 .open = identity_resolving_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
8f8625cd
MH
803static int long_term_keys_show(struct seq_file *f, void *ptr)
804{
805 struct hci_dev *hdev = f->private;
806 struct list_head *p, *n;
807
808 hci_dev_lock(hdev);
f813f1be 809 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 810 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 811 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
812 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
813 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 814 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
815 }
816 hci_dev_unlock(hdev);
817
818 return 0;
819}
820
821static int long_term_keys_open(struct inode *inode, struct file *file)
822{
823 return single_open(file, long_term_keys_show, inode->i_private);
824}
825
826static const struct file_operations long_term_keys_fops = {
827 .open = long_term_keys_open,
828 .read = seq_read,
829 .llseek = seq_lseek,
830 .release = single_release,
831};
832
4e70c7e7
MH
833static int conn_min_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
2be48b65 841 hdev->le_conn_min_interval = val;
4e70c7e7
MH
842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_min_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_min_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
859 conn_min_interval_set, "%llu\n");
860
861static int conn_max_interval_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
2be48b65 869 hdev->le_conn_max_interval = val;
4e70c7e7
MH
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_max_interval_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_max_interval;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
887 conn_max_interval_set, "%llu\n");
888
816a93d1
MH
889static int conn_latency_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val > 0x01f3)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_conn_latency = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int conn_latency_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_conn_latency;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
915 conn_latency_set, "%llu\n");
916
f1649577
MH
917static int supervision_timeout_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x000a || val > 0x0c80)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_supv_timeout = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int supervision_timeout_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_supv_timeout;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
943 supervision_timeout_set, "%llu\n");
944
3f959d46
MH
945static int adv_channel_map_set(void *data, u64 val)
946{
947 struct hci_dev *hdev = data;
948
949 if (val < 0x01 || val > 0x07)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_channel_map = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
959static int adv_channel_map_get(void *data, u64 *val)
960{
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_channel_map;
965 hci_dev_unlock(hdev);
966
967 return 0;
968}
969
970DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
971 adv_channel_map_set, "%llu\n");
972
729a1051
GL
973static int adv_min_interval_set(void *data, u64 val)
974{
975 struct hci_dev *hdev = data;
976
977 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
978 return -EINVAL;
979
980 hci_dev_lock(hdev);
981 hdev->le_adv_min_interval = val;
982 hci_dev_unlock(hdev);
983
984 return 0;
985}
986
987static int adv_min_interval_get(void *data, u64 *val)
988{
989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_min_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
996}
997
998DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
999 adv_min_interval_set, "%llu\n");
1000
1001static int adv_max_interval_set(void *data, u64 val)
1002{
1003 struct hci_dev *hdev = data;
1004
1005 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
1006 return -EINVAL;
1007
1008 hci_dev_lock(hdev);
1009 hdev->le_adv_max_interval = val;
1010 hci_dev_unlock(hdev);
1011
1012 return 0;
1013}
1014
1015static int adv_max_interval_get(void *data, u64 *val)
1016{
1017 struct hci_dev *hdev = data;
1018
1019 hci_dev_lock(hdev);
1020 *val = hdev->le_adv_max_interval;
1021 hci_dev_unlock(hdev);
1022
1023 return 0;
1024}
1025
1026DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1027 adv_max_interval_set, "%llu\n");
1028
0b3c7d37 1029static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1030{
0b3c7d37 1031 struct hci_dev *hdev = f->private;
7d474e06
AG
1032 struct hci_conn_params *p;
1033
1034 hci_dev_lock(hdev);
7d474e06 1035 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 1036 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
1037 p->auto_connect);
1038 }
7d474e06
AG
1039 hci_dev_unlock(hdev);
1040
1041 return 0;
1042}
1043
0b3c7d37 1044static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1045{
0b3c7d37 1046 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1047}
1048
0b3c7d37
MH
1049static const struct file_operations device_list_fops = {
1050 .open = device_list_open,
7d474e06 1051 .read = seq_read,
7d474e06
AG
1052 .llseek = seq_lseek,
1053 .release = single_release,
1054};
1055
1da177e4
LT
1056/* ---- HCI requests ---- */
1057
42c6b129 1058static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1059{
42c6b129 1060 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1061
1062 if (hdev->req_status == HCI_REQ_PEND) {
1063 hdev->req_result = result;
1064 hdev->req_status = HCI_REQ_DONE;
1065 wake_up_interruptible(&hdev->req_wait_q);
1066 }
1067}
1068
1069static void hci_req_cancel(struct hci_dev *hdev, int err)
1070{
1071 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1072
1073 if (hdev->req_status == HCI_REQ_PEND) {
1074 hdev->req_result = err;
1075 hdev->req_status = HCI_REQ_CANCELED;
1076 wake_up_interruptible(&hdev->req_wait_q);
1077 }
1078}
1079
77a63e0a
FW
1080static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1081 u8 event)
75e84b7c
JH
1082{
1083 struct hci_ev_cmd_complete *ev;
1084 struct hci_event_hdr *hdr;
1085 struct sk_buff *skb;
1086
1087 hci_dev_lock(hdev);
1088
1089 skb = hdev->recv_evt;
1090 hdev->recv_evt = NULL;
1091
1092 hci_dev_unlock(hdev);
1093
1094 if (!skb)
1095 return ERR_PTR(-ENODATA);
1096
1097 if (skb->len < sizeof(*hdr)) {
1098 BT_ERR("Too short HCI event");
1099 goto failed;
1100 }
1101
1102 hdr = (void *) skb->data;
1103 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1104
7b1abbbe
JH
1105 if (event) {
1106 if (hdr->evt != event)
1107 goto failed;
1108 return skb;
1109 }
1110
75e84b7c
JH
1111 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1112 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1113 goto failed;
1114 }
1115
1116 if (skb->len < sizeof(*ev)) {
1117 BT_ERR("Too short cmd_complete event");
1118 goto failed;
1119 }
1120
1121 ev = (void *) skb->data;
1122 skb_pull(skb, sizeof(*ev));
1123
1124 if (opcode == __le16_to_cpu(ev->opcode))
1125 return skb;
1126
1127 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1128 __le16_to_cpu(ev->opcode));
1129
1130failed:
1131 kfree_skb(skb);
1132 return ERR_PTR(-ENODATA);
1133}
1134
7b1abbbe 1135struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1136 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1137{
1138 DECLARE_WAITQUEUE(wait, current);
1139 struct hci_request req;
1140 int err = 0;
1141
1142 BT_DBG("%s", hdev->name);
1143
1144 hci_req_init(&req, hdev);
1145
7b1abbbe 1146 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1147
1148 hdev->req_status = HCI_REQ_PEND;
1149
1150 err = hci_req_run(&req, hci_req_sync_complete);
1151 if (err < 0)
1152 return ERR_PTR(err);
1153
1154 add_wait_queue(&hdev->req_wait_q, &wait);
1155 set_current_state(TASK_INTERRUPTIBLE);
1156
1157 schedule_timeout(timeout);
1158
1159 remove_wait_queue(&hdev->req_wait_q, &wait);
1160
1161 if (signal_pending(current))
1162 return ERR_PTR(-EINTR);
1163
1164 switch (hdev->req_status) {
1165 case HCI_REQ_DONE:
1166 err = -bt_to_errno(hdev->req_result);
1167 break;
1168
1169 case HCI_REQ_CANCELED:
1170 err = -hdev->req_result;
1171 break;
1172
1173 default:
1174 err = -ETIMEDOUT;
1175 break;
1176 }
1177
1178 hdev->req_status = hdev->req_result = 0;
1179
1180 BT_DBG("%s end: err %d", hdev->name, err);
1181
1182 if (err < 0)
1183 return ERR_PTR(err);
1184
7b1abbbe
JH
1185 return hci_get_cmd_complete(hdev, opcode, event);
1186}
1187EXPORT_SYMBOL(__hci_cmd_sync_ev);
1188
1189struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1190 const void *param, u32 timeout)
7b1abbbe
JH
1191{
1192 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1193}
1194EXPORT_SYMBOL(__hci_cmd_sync);
1195
1da177e4 1196/* Execute request and wait for completion. */
01178cd4 1197static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1198 void (*func)(struct hci_request *req,
1199 unsigned long opt),
01178cd4 1200 unsigned long opt, __u32 timeout)
1da177e4 1201{
42c6b129 1202 struct hci_request req;
1da177e4
LT
1203 DECLARE_WAITQUEUE(wait, current);
1204 int err = 0;
1205
1206 BT_DBG("%s start", hdev->name);
1207
42c6b129
JH
1208 hci_req_init(&req, hdev);
1209
1da177e4
LT
1210 hdev->req_status = HCI_REQ_PEND;
1211
42c6b129 1212 func(&req, opt);
53cce22d 1213
42c6b129
JH
1214 err = hci_req_run(&req, hci_req_sync_complete);
1215 if (err < 0) {
53cce22d 1216 hdev->req_status = 0;
920c8300
AG
1217
1218 /* ENODATA means the HCI request command queue is empty.
1219 * This can happen when a request with conditionals doesn't
1220 * trigger any commands to be sent. This is normal behavior
1221 * and should not trigger an error return.
42c6b129 1222 */
920c8300
AG
1223 if (err == -ENODATA)
1224 return 0;
1225
1226 return err;
53cce22d
JH
1227 }
1228
bc4445c7
AG
1229 add_wait_queue(&hdev->req_wait_q, &wait);
1230 set_current_state(TASK_INTERRUPTIBLE);
1231
1da177e4
LT
1232 schedule_timeout(timeout);
1233
1234 remove_wait_queue(&hdev->req_wait_q, &wait);
1235
1236 if (signal_pending(current))
1237 return -EINTR;
1238
1239 switch (hdev->req_status) {
1240 case HCI_REQ_DONE:
e175072f 1241 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1242 break;
1243
1244 case HCI_REQ_CANCELED:
1245 err = -hdev->req_result;
1246 break;
1247
1248 default:
1249 err = -ETIMEDOUT;
1250 break;
3ff50b79 1251 }
1da177e4 1252
a5040efa 1253 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1254
1255 BT_DBG("%s end: err %d", hdev->name, err);
1256
1257 return err;
1258}
1259
01178cd4 1260static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1261 void (*req)(struct hci_request *req,
1262 unsigned long opt),
01178cd4 1263 unsigned long opt, __u32 timeout)
1da177e4
LT
1264{
1265 int ret;
1266
7c6a329e
MH
1267 if (!test_bit(HCI_UP, &hdev->flags))
1268 return -ENETDOWN;
1269
1da177e4
LT
1270 /* Serialize all requests */
1271 hci_req_lock(hdev);
01178cd4 1272 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1273 hci_req_unlock(hdev);
1274
1275 return ret;
1276}
1277
42c6b129 1278static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1279{
42c6b129 1280 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1281
1282 /* Reset device */
42c6b129
JH
1283 set_bit(HCI_RESET, &req->hdev->flags);
1284 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1285}
1286
42c6b129 1287static void bredr_init(struct hci_request *req)
1da177e4 1288{
42c6b129 1289 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1290
1da177e4 1291 /* Read Local Supported Features */
42c6b129 1292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1293
1143e5a6 1294 /* Read Local Version */
42c6b129 1295 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1296
1297 /* Read BD Address */
42c6b129 1298 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1299}
1300
42c6b129 1301static void amp_init(struct hci_request *req)
e61ef499 1302{
42c6b129 1303 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1304
e61ef499 1305 /* Read Local Version */
42c6b129 1306 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1307
f6996cfe
MH
1308 /* Read Local Supported Commands */
1309 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1310
1311 /* Read Local Supported Features */
1312 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1313
6bcbc489 1314 /* Read Local AMP Info */
42c6b129 1315 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1316
1317 /* Read Data Blk size */
42c6b129 1318 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1319
f38ba941
MH
1320 /* Read Flow Control Mode */
1321 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1322
7528ca1c
MH
1323 /* Read Location Data */
1324 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1325}
1326
42c6b129 1327static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1328{
42c6b129 1329 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1330
1331 BT_DBG("%s %ld", hdev->name, opt);
1332
11778716
AE
1333 /* Reset */
1334 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1335 hci_reset_req(req, 0);
11778716 1336
e61ef499
AE
1337 switch (hdev->dev_type) {
1338 case HCI_BREDR:
42c6b129 1339 bredr_init(req);
e61ef499
AE
1340 break;
1341
1342 case HCI_AMP:
42c6b129 1343 amp_init(req);
e61ef499
AE
1344 break;
1345
1346 default:
1347 BT_ERR("Unknown device type %d", hdev->dev_type);
1348 break;
1349 }
e61ef499
AE
1350}
1351
42c6b129 1352static void bredr_setup(struct hci_request *req)
2177bab5 1353{
4ca048e3
MH
1354 struct hci_dev *hdev = req->hdev;
1355
2177bab5
JH
1356 __le16 param;
1357 __u8 flt_type;
1358
1359 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1360 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1361
1362 /* Read Class of Device */
42c6b129 1363 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1364
1365 /* Read Local Name */
42c6b129 1366 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1367
1368 /* Read Voice Setting */
42c6b129 1369 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1370
b4cb9fb2
MH
1371 /* Read Number of Supported IAC */
1372 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1373
4b836f39
MH
1374 /* Read Current IAC LAP */
1375 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1376
2177bab5
JH
1377 /* Clear Event Filters */
1378 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1379 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1380
1381 /* Connection accept timeout ~20 secs */
dcf4adbf 1382 param = cpu_to_le16(0x7d00);
42c6b129 1383 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1384
4ca048e3
MH
1385 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1386 * but it does not support page scan related HCI commands.
1387 */
1388 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1389 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1390 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1391 }
2177bab5
JH
1392}
1393
42c6b129 1394static void le_setup(struct hci_request *req)
2177bab5 1395{
c73eee91
JH
1396 struct hci_dev *hdev = req->hdev;
1397
2177bab5 1398 /* Read LE Buffer Size */
42c6b129 1399 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1400
1401 /* Read LE Local Supported Features */
42c6b129 1402 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1403
747d3f03
MH
1404 /* Read LE Supported States */
1405 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1406
2177bab5 1407 /* Read LE White List Size */
42c6b129 1408 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1409
747d3f03
MH
1410 /* Clear LE White List */
1411 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1412
1413 /* LE-only controllers have LE implicitly enabled */
1414 if (!lmp_bredr_capable(hdev))
1415 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1416}
1417
1418static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1419{
1420 if (lmp_ext_inq_capable(hdev))
1421 return 0x02;
1422
1423 if (lmp_inq_rssi_capable(hdev))
1424 return 0x01;
1425
1426 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1427 hdev->lmp_subver == 0x0757)
1428 return 0x01;
1429
1430 if (hdev->manufacturer == 15) {
1431 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1432 return 0x01;
1433 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1434 return 0x01;
1435 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1436 return 0x01;
1437 }
1438
1439 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1440 hdev->lmp_subver == 0x1805)
1441 return 0x01;
1442
1443 return 0x00;
1444}
1445
42c6b129 1446static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1447{
1448 u8 mode;
1449
42c6b129 1450 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1451
42c6b129 1452 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1453}
1454
42c6b129 1455static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1456{
42c6b129
JH
1457 struct hci_dev *hdev = req->hdev;
1458
2177bab5
JH
1459 /* The second byte is 0xff instead of 0x9f (two reserved bits
1460 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1461 * command otherwise.
1462 */
1463 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1464
1465 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1466 * any event mask for pre 1.2 devices.
1467 */
1468 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1469 return;
1470
1471 if (lmp_bredr_capable(hdev)) {
1472 events[4] |= 0x01; /* Flow Specification Complete */
1473 events[4] |= 0x02; /* Inquiry Result with RSSI */
1474 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1475 events[5] |= 0x08; /* Synchronous Connection Complete */
1476 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1477 } else {
1478 /* Use a different default for LE-only devices */
1479 memset(events, 0, sizeof(events));
1480 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1481 events[1] |= 0x08; /* Read Remote Version Information Complete */
1482 events[1] |= 0x20; /* Command Complete */
1483 events[1] |= 0x40; /* Command Status */
1484 events[1] |= 0x80; /* Hardware Error */
1485 events[2] |= 0x04; /* Number of Completed Packets */
1486 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1487
1488 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1489 events[0] |= 0x80; /* Encryption Change */
1490 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1491 }
2177bab5
JH
1492 }
1493
1494 if (lmp_inq_rssi_capable(hdev))
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496
1497 if (lmp_sniffsubr_capable(hdev))
1498 events[5] |= 0x20; /* Sniff Subrating */
1499
1500 if (lmp_pause_enc_capable(hdev))
1501 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1502
1503 if (lmp_ext_inq_capable(hdev))
1504 events[5] |= 0x40; /* Extended Inquiry Result */
1505
1506 if (lmp_no_flush_capable(hdev))
1507 events[7] |= 0x01; /* Enhanced Flush Complete */
1508
1509 if (lmp_lsto_capable(hdev))
1510 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1511
1512 if (lmp_ssp_capable(hdev)) {
1513 events[6] |= 0x01; /* IO Capability Request */
1514 events[6] |= 0x02; /* IO Capability Response */
1515 events[6] |= 0x04; /* User Confirmation Request */
1516 events[6] |= 0x08; /* User Passkey Request */
1517 events[6] |= 0x10; /* Remote OOB Data Request */
1518 events[6] |= 0x20; /* Simple Pairing Complete */
1519 events[7] |= 0x04; /* User Passkey Notification */
1520 events[7] |= 0x08; /* Keypress Notification */
1521 events[7] |= 0x10; /* Remote Host Supported
1522 * Features Notification
1523 */
1524 }
1525
1526 if (lmp_le_capable(hdev))
1527 events[7] |= 0x20; /* LE Meta-Event */
1528
42c6b129 1529 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1530}
1531
42c6b129 1532static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1533{
42c6b129
JH
1534 struct hci_dev *hdev = req->hdev;
1535
2177bab5 1536 if (lmp_bredr_capable(hdev))
42c6b129 1537 bredr_setup(req);
56f87901
JH
1538 else
1539 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1540
1541 if (lmp_le_capable(hdev))
42c6b129 1542 le_setup(req);
2177bab5 1543
3f8e2d75
JH
1544 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1545 * local supported commands HCI command.
1546 */
1547 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1548 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1549
1550 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1551 /* When SSP is available, then the host features page
1552 * should also be available as well. However some
1553 * controllers list the max_page as 0 as long as SSP
1554 * has not been enabled. To achieve proper debugging
1555 * output, force the minimum max_page to 1 at least.
1556 */
1557 hdev->max_page = 0x01;
1558
2177bab5
JH
1559 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1560 u8 mode = 0x01;
42c6b129
JH
1561 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1562 sizeof(mode), &mode);
2177bab5
JH
1563 } else {
1564 struct hci_cp_write_eir cp;
1565
1566 memset(hdev->eir, 0, sizeof(hdev->eir));
1567 memset(&cp, 0, sizeof(cp));
1568
42c6b129 1569 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1570 }
1571 }
1572
1573 if (lmp_inq_rssi_capable(hdev))
42c6b129 1574 hci_setup_inquiry_mode(req);
2177bab5
JH
1575
1576 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1577 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1578
1579 if (lmp_ext_feat_capable(hdev)) {
1580 struct hci_cp_read_local_ext_features cp;
1581
1582 cp.page = 0x01;
42c6b129
JH
1583 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1584 sizeof(cp), &cp);
2177bab5
JH
1585 }
1586
1587 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1588 u8 enable = 1;
42c6b129
JH
1589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1590 &enable);
2177bab5
JH
1591 }
1592}
1593
42c6b129 1594static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1595{
42c6b129 1596 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1597 struct hci_cp_write_def_link_policy cp;
1598 u16 link_policy = 0;
1599
1600 if (lmp_rswitch_capable(hdev))
1601 link_policy |= HCI_LP_RSWITCH;
1602 if (lmp_hold_capable(hdev))
1603 link_policy |= HCI_LP_HOLD;
1604 if (lmp_sniff_capable(hdev))
1605 link_policy |= HCI_LP_SNIFF;
1606 if (lmp_park_capable(hdev))
1607 link_policy |= HCI_LP_PARK;
1608
1609 cp.policy = cpu_to_le16(link_policy);
42c6b129 1610 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1611}
1612
42c6b129 1613static void hci_set_le_support(struct hci_request *req)
2177bab5 1614{
42c6b129 1615 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1616 struct hci_cp_write_le_host_supported cp;
1617
c73eee91
JH
1618 /* LE-only devices do not support explicit enablement */
1619 if (!lmp_bredr_capable(hdev))
1620 return;
1621
2177bab5
JH
1622 memset(&cp, 0, sizeof(cp));
1623
1624 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1625 cp.le = 0x01;
1626 cp.simul = lmp_le_br_capable(hdev);
1627 }
1628
1629 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1630 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1631 &cp);
2177bab5
JH
1632}
1633
d62e6d67
JH
1634static void hci_set_event_mask_page_2(struct hci_request *req)
1635{
1636 struct hci_dev *hdev = req->hdev;
1637 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1638
1639 /* If Connectionless Slave Broadcast master role is supported
1640 * enable all necessary events for it.
1641 */
53b834d2 1642 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1643 events[1] |= 0x40; /* Triggered Clock Capture */
1644 events[1] |= 0x80; /* Synchronization Train Complete */
1645 events[2] |= 0x10; /* Slave Page Response Timeout */
1646 events[2] |= 0x20; /* CSB Channel Map Change */
1647 }
1648
1649 /* If Connectionless Slave Broadcast slave role is supported
1650 * enable all necessary events for it.
1651 */
53b834d2 1652 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1653 events[2] |= 0x01; /* Synchronization Train Received */
1654 events[2] |= 0x02; /* CSB Receive */
1655 events[2] |= 0x04; /* CSB Timeout */
1656 events[2] |= 0x08; /* Truncated Page Complete */
1657 }
1658
40c59fcb 1659 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1660 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1661 events[2] |= 0x80;
1662
d62e6d67
JH
1663 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1664}
1665
42c6b129 1666static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1667{
42c6b129 1668 struct hci_dev *hdev = req->hdev;
d2c5d77f 1669 u8 p;
42c6b129 1670
0da71f1b
MH
1671 hci_setup_event_mask(req);
1672
b8f4e068
GP
1673 /* Some Broadcom based Bluetooth controllers do not support the
1674 * Delete Stored Link Key command. They are clearly indicating its
1675 * absence in the bit mask of supported commands.
1676 *
1677 * Check the supported commands and only if the the command is marked
1678 * as supported send it. If not supported assume that the controller
1679 * does not have actual support for stored link keys which makes this
1680 * command redundant anyway.
f9f462fa
MH
1681 *
1682 * Some controllers indicate that they support handling deleting
1683 * stored link keys, but they don't. The quirk lets a driver
1684 * just disable this command.
637b4cae 1685 */
f9f462fa
MH
1686 if (hdev->commands[6] & 0x80 &&
1687 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1688 struct hci_cp_delete_stored_link_key cp;
1689
1690 bacpy(&cp.bdaddr, BDADDR_ANY);
1691 cp.delete_all = 0x01;
1692 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1693 sizeof(cp), &cp);
1694 }
1695
2177bab5 1696 if (hdev->commands[5] & 0x10)
42c6b129 1697 hci_setup_link_policy(req);
2177bab5 1698
9193c6e8
AG
1699 if (lmp_le_capable(hdev)) {
1700 u8 events[8];
1701
1702 memset(events, 0, sizeof(events));
4d6c705b
MH
1703 events[0] = 0x0f;
1704
1705 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1706 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1707
1708 /* If controller supports the Connection Parameters Request
1709 * Link Layer Procedure, enable the corresponding event.
1710 */
1711 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1712 events[0] |= 0x20; /* LE Remote Connection
1713 * Parameter Request
1714 */
1715
9193c6e8
AG
1716 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1717 events);
1718
15a49cca
MH
1719 if (hdev->commands[25] & 0x40) {
1720 /* Read LE Advertising Channel TX Power */
1721 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1722 }
1723
42c6b129 1724 hci_set_le_support(req);
9193c6e8 1725 }
d2c5d77f
JH
1726
1727 /* Read features beyond page 1 if available */
1728 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1729 struct hci_cp_read_local_ext_features cp;
1730
1731 cp.page = p;
1732 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1733 sizeof(cp), &cp);
1734 }
2177bab5
JH
1735}
1736
5d4e7e8d
JH
1737static void hci_init4_req(struct hci_request *req, unsigned long opt)
1738{
1739 struct hci_dev *hdev = req->hdev;
1740
d62e6d67
JH
1741 /* Set event mask page 2 if the HCI command for it is supported */
1742 if (hdev->commands[22] & 0x04)
1743 hci_set_event_mask_page_2(req);
1744
109e3191
MH
1745 /* Read local codec list if the HCI command is supported */
1746 if (hdev->commands[29] & 0x20)
1747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1748
f4fe73ed
MH
1749 /* Get MWS transport configuration if the HCI command is supported */
1750 if (hdev->commands[30] & 0x08)
1751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1752
5d4e7e8d 1753 /* Check for Synchronization Train support */
53b834d2 1754 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1756
1757 /* Enable Secure Connections if supported and configured */
5afeac14 1758 if ((lmp_sc_capable(hdev) ||
111902f7 1759 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1760 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1761 u8 support = 0x01;
1762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1763 sizeof(support), &support);
1764 }
5d4e7e8d
JH
1765}
1766
2177bab5
JH
1767static int __hci_init(struct hci_dev *hdev)
1768{
1769 int err;
1770
1771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
4b4148e9
MH
1775 /* The Device Under Test (DUT) mode is special and available for
1776 * all controller types. So just create it early on.
1777 */
1778 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1779 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1780 &dut_mode_fops);
1781 }
1782
2177bab5
JH
1783 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1784 * BR/EDR/LE type controllers. AMP controllers only need the
1785 * first stage init.
1786 */
1787 if (hdev->dev_type != HCI_BREDR)
1788 return 0;
1789
1790 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
5d4e7e8d
JH
1794 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
baf27f6e
MH
1798 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1799 if (err < 0)
1800 return err;
1801
1802 /* Only create debugfs entries during the initial setup
1803 * phase and not every time the controller gets powered on.
1804 */
1805 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1806 return 0;
1807
dfb826a8
MH
1808 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1809 &features_fops);
ceeb3bc0
MH
1810 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1811 &hdev->manufacturer);
1812 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1813 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1814 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1815 &blacklist_fops);
6659358e
JH
1816 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1817 &whitelist_fops);
47219839
MH
1818 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1819
31ad1691
AK
1820 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1821 &conn_info_min_age_fops);
1822 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1823 &conn_info_max_age_fops);
1824
baf27f6e
MH
1825 if (lmp_bredr_capable(hdev)) {
1826 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1827 hdev, &inquiry_cache_fops);
02d08d15
MH
1828 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1829 hdev, &link_keys_fops);
babdbb3c
MH
1830 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1831 hdev, &dev_class_fops);
041000b9
MH
1832 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1833 hdev, &voice_setting_fops);
baf27f6e
MH
1834 }
1835
06f5b778 1836 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1837 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1838 hdev, &auto_accept_delay_fops);
5afeac14
MH
1839 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1840 hdev, &force_sc_support_fops);
134c2a89
MH
1841 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1842 hdev, &sc_only_mode_fops);
06f5b778 1843 }
ebd1e33b 1844
2bfa3531
MH
1845 if (lmp_sniff_capable(hdev)) {
1846 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1847 hdev, &idle_timeout_fops);
1848 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1849 hdev, &sniff_min_interval_fops);
1850 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1851 hdev, &sniff_max_interval_fops);
1852 }
1853
d0f729b8 1854 if (lmp_le_capable(hdev)) {
ac345813
MH
1855 debugfs_create_file("identity", 0400, hdev->debugfs,
1856 hdev, &identity_fops);
1857 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1858 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1859 debugfs_create_file("random_address", 0444, hdev->debugfs,
1860 hdev, &random_address_fops);
b32bba6c
MH
1861 debugfs_create_file("static_address", 0444, hdev->debugfs,
1862 hdev, &static_address_fops);
1863
1864 /* For controllers with a public address, provide a debug
1865 * option to force the usage of the configured static
1866 * address. By default the public address is used.
1867 */
1868 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1869 debugfs_create_file("force_static_address", 0644,
1870 hdev->debugfs, hdev,
1871 &force_static_address_fops);
1872
d0f729b8
MH
1873 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1874 &hdev->le_white_list_size);
d2ab0ac1
MH
1875 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1876 &white_list_fops);
3698d704
MH
1877 debugfs_create_file("identity_resolving_keys", 0400,
1878 hdev->debugfs, hdev,
1879 &identity_resolving_keys_fops);
8f8625cd
MH
1880 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1881 hdev, &long_term_keys_fops);
4e70c7e7
MH
1882 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1883 hdev, &conn_min_interval_fops);
1884 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1885 hdev, &conn_max_interval_fops);
816a93d1
MH
1886 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1887 hdev, &conn_latency_fops);
f1649577
MH
1888 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1889 hdev, &supervision_timeout_fops);
3f959d46
MH
1890 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1891 hdev, &adv_channel_map_fops);
729a1051
GL
1892 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1893 hdev, &adv_min_interval_fops);
1894 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1895 hdev, &adv_max_interval_fops);
0b3c7d37
MH
1896 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1897 &device_list_fops);
b9a7a61e
LR
1898 debugfs_create_u16("discov_interleaved_timeout", 0644,
1899 hdev->debugfs,
1900 &hdev->discov_interleaved_timeout);
d0f729b8 1901 }
e7b8fc92 1902
baf27f6e 1903 return 0;
2177bab5
JH
1904}
1905
0ebca7d6
MH
1906static void hci_init0_req(struct hci_request *req, unsigned long opt)
1907{
1908 struct hci_dev *hdev = req->hdev;
1909
1910 BT_DBG("%s %ld", hdev->name, opt);
1911
1912 /* Reset */
1913 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1914 hci_reset_req(req, 0);
1915
1916 /* Read Local Version */
1917 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1918
1919 /* Read BD Address */
1920 if (hdev->set_bdaddr)
1921 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1922}
1923
1924static int __hci_unconf_init(struct hci_dev *hdev)
1925{
1926 int err;
1927
cc78b44b
MH
1928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1929 return 0;
1930
0ebca7d6
MH
1931 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1932 if (err < 0)
1933 return err;
1934
1935 return 0;
1936}
1937
42c6b129 1938static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1939{
1940 __u8 scan = opt;
1941
42c6b129 1942 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1943
1944 /* Inquiry and Page scans */
42c6b129 1945 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1946}
1947
42c6b129 1948static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1949{
1950 __u8 auth = opt;
1951
42c6b129 1952 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1953
1954 /* Authentication */
42c6b129 1955 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1956}
1957
42c6b129 1958static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1959{
1960 __u8 encrypt = opt;
1961
42c6b129 1962 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1963
e4e8e37c 1964 /* Encryption */
42c6b129 1965 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1966}
1967
42c6b129 1968static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1969{
1970 __le16 policy = cpu_to_le16(opt);
1971
42c6b129 1972 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1973
1974 /* Default link policy */
42c6b129 1975 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1976}
1977
8e87d142 1978/* Get HCI device by index.
1da177e4
LT
1979 * Device is held on return. */
1980struct hci_dev *hci_dev_get(int index)
1981{
8035ded4 1982 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1983
1984 BT_DBG("%d", index);
1985
1986 if (index < 0)
1987 return NULL;
1988
1989 read_lock(&hci_dev_list_lock);
8035ded4 1990 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1991 if (d->id == index) {
1992 hdev = hci_dev_hold(d);
1993 break;
1994 }
1995 }
1996 read_unlock(&hci_dev_list_lock);
1997 return hdev;
1998}
1da177e4
LT
1999
2000/* ---- Inquiry support ---- */
ff9ef578 2001
30dc78e1
JH
2002bool hci_discovery_active(struct hci_dev *hdev)
2003{
2004 struct discovery_state *discov = &hdev->discovery;
2005
6fbe195d 2006 switch (discov->state) {
343f935b 2007 case DISCOVERY_FINDING:
6fbe195d 2008 case DISCOVERY_RESOLVING:
30dc78e1
JH
2009 return true;
2010
6fbe195d
AG
2011 default:
2012 return false;
2013 }
30dc78e1
JH
2014}
2015
ff9ef578
JH
2016void hci_discovery_set_state(struct hci_dev *hdev, int state)
2017{
bb3e0a33
JH
2018 int old_state = hdev->discovery.state;
2019
ff9ef578
JH
2020 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2021
bb3e0a33 2022 if (old_state == state)
ff9ef578
JH
2023 return;
2024
bb3e0a33
JH
2025 hdev->discovery.state = state;
2026
ff9ef578
JH
2027 switch (state) {
2028 case DISCOVERY_STOPPED:
c54c3860
AG
2029 hci_update_background_scan(hdev);
2030
bb3e0a33 2031 if (old_state != DISCOVERY_STARTING)
7b99b659 2032 mgmt_discovering(hdev, 0);
ff9ef578
JH
2033 break;
2034 case DISCOVERY_STARTING:
2035 break;
343f935b 2036 case DISCOVERY_FINDING:
ff9ef578
JH
2037 mgmt_discovering(hdev, 1);
2038 break;
30dc78e1
JH
2039 case DISCOVERY_RESOLVING:
2040 break;
ff9ef578
JH
2041 case DISCOVERY_STOPPING:
2042 break;
2043 }
ff9ef578
JH
2044}
2045
1f9b9a5d 2046void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2047{
30883512 2048 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2049 struct inquiry_entry *p, *n;
1da177e4 2050
561aafbc
JH
2051 list_for_each_entry_safe(p, n, &cache->all, all) {
2052 list_del(&p->all);
b57c1a56 2053 kfree(p);
1da177e4 2054 }
561aafbc
JH
2055
2056 INIT_LIST_HEAD(&cache->unknown);
2057 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2058}
2059
a8c5fb1a
GP
2060struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2061 bdaddr_t *bdaddr)
1da177e4 2062{
30883512 2063 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2064 struct inquiry_entry *e;
2065
6ed93dc6 2066 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2067
561aafbc
JH
2068 list_for_each_entry(e, &cache->all, all) {
2069 if (!bacmp(&e->data.bdaddr, bdaddr))
2070 return e;
2071 }
2072
2073 return NULL;
2074}
2075
2076struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2077 bdaddr_t *bdaddr)
561aafbc 2078{
30883512 2079 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2080 struct inquiry_entry *e;
2081
6ed93dc6 2082 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2083
2084 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2085 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2086 return e;
2087 }
2088
2089 return NULL;
1da177e4
LT
2090}
2091
30dc78e1 2092struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2093 bdaddr_t *bdaddr,
2094 int state)
30dc78e1
JH
2095{
2096 struct discovery_state *cache = &hdev->discovery;
2097 struct inquiry_entry *e;
2098
6ed93dc6 2099 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2100
2101 list_for_each_entry(e, &cache->resolve, list) {
2102 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2103 return e;
2104 if (!bacmp(&e->data.bdaddr, bdaddr))
2105 return e;
2106 }
2107
2108 return NULL;
2109}
2110
a3d4e20a 2111void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2112 struct inquiry_entry *ie)
a3d4e20a
JH
2113{
2114 struct discovery_state *cache = &hdev->discovery;
2115 struct list_head *pos = &cache->resolve;
2116 struct inquiry_entry *p;
2117
2118 list_del(&ie->list);
2119
2120 list_for_each_entry(p, &cache->resolve, list) {
2121 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2122 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2123 break;
2124 pos = &p->list;
2125 }
2126
2127 list_add(&ie->list, pos);
2128}
2129
af58925c
MH
2130u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2131 bool name_known)
1da177e4 2132{
30883512 2133 struct discovery_state *cache = &hdev->discovery;
70f23020 2134 struct inquiry_entry *ie;
af58925c 2135 u32 flags = 0;
1da177e4 2136
6ed93dc6 2137 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2138
2b2fec4d
SJ
2139 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2140
af58925c
MH
2141 if (!data->ssp_mode)
2142 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2143
70f23020 2144 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2145 if (ie) {
af58925c
MH
2146 if (!ie->data.ssp_mode)
2147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2148
a3d4e20a 2149 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2150 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2151 ie->data.rssi = data->rssi;
2152 hci_inquiry_cache_update_resolve(hdev, ie);
2153 }
2154
561aafbc 2155 goto update;
a3d4e20a 2156 }
561aafbc
JH
2157
2158 /* Entry not in the cache. Add new one. */
27f70f3e 2159 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2160 if (!ie) {
2161 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2162 goto done;
2163 }
561aafbc
JH
2164
2165 list_add(&ie->all, &cache->all);
2166
2167 if (name_known) {
2168 ie->name_state = NAME_KNOWN;
2169 } else {
2170 ie->name_state = NAME_NOT_KNOWN;
2171 list_add(&ie->list, &cache->unknown);
2172 }
70f23020 2173
561aafbc
JH
2174update:
2175 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2176 ie->name_state != NAME_PENDING) {
561aafbc
JH
2177 ie->name_state = NAME_KNOWN;
2178 list_del(&ie->list);
1da177e4
LT
2179 }
2180
70f23020
AE
2181 memcpy(&ie->data, data, sizeof(*data));
2182 ie->timestamp = jiffies;
1da177e4 2183 cache->timestamp = jiffies;
3175405b
JH
2184
2185 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2186 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2187
af58925c
MH
2188done:
2189 return flags;
1da177e4
LT
2190}
2191
2192static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2193{
30883512 2194 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2195 struct inquiry_info *info = (struct inquiry_info *) buf;
2196 struct inquiry_entry *e;
2197 int copied = 0;
2198
561aafbc 2199 list_for_each_entry(e, &cache->all, all) {
1da177e4 2200 struct inquiry_data *data = &e->data;
b57c1a56
JH
2201
2202 if (copied >= num)
2203 break;
2204
1da177e4
LT
2205 bacpy(&info->bdaddr, &data->bdaddr);
2206 info->pscan_rep_mode = data->pscan_rep_mode;
2207 info->pscan_period_mode = data->pscan_period_mode;
2208 info->pscan_mode = data->pscan_mode;
2209 memcpy(info->dev_class, data->dev_class, 3);
2210 info->clock_offset = data->clock_offset;
b57c1a56 2211
1da177e4 2212 info++;
b57c1a56 2213 copied++;
1da177e4
LT
2214 }
2215
2216 BT_DBG("cache %p, copied %d", cache, copied);
2217 return copied;
2218}
2219
42c6b129 2220static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2221{
2222 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2223 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2224 struct hci_cp_inquiry cp;
2225
2226 BT_DBG("%s", hdev->name);
2227
2228 if (test_bit(HCI_INQUIRY, &hdev->flags))
2229 return;
2230
2231 /* Start Inquiry */
2232 memcpy(&cp.lap, &ir->lap, 3);
2233 cp.length = ir->length;
2234 cp.num_rsp = ir->num_rsp;
42c6b129 2235 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2236}
2237
3e13fa1e
AG
2238static int wait_inquiry(void *word)
2239{
2240 schedule();
2241 return signal_pending(current);
2242}
2243
1da177e4
LT
2244int hci_inquiry(void __user *arg)
2245{
2246 __u8 __user *ptr = arg;
2247 struct hci_inquiry_req ir;
2248 struct hci_dev *hdev;
2249 int err = 0, do_inquiry = 0, max_rsp;
2250 long timeo;
2251 __u8 *buf;
2252
2253 if (copy_from_user(&ir, ptr, sizeof(ir)))
2254 return -EFAULT;
2255
5a08ecce
AE
2256 hdev = hci_dev_get(ir.dev_id);
2257 if (!hdev)
1da177e4
LT
2258 return -ENODEV;
2259
0736cfa8
MH
2260 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2261 err = -EBUSY;
2262 goto done;
2263 }
2264
4a964404 2265 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2266 err = -EOPNOTSUPP;
2267 goto done;
2268 }
2269
5b69bef5
MH
2270 if (hdev->dev_type != HCI_BREDR) {
2271 err = -EOPNOTSUPP;
2272 goto done;
2273 }
2274
56f87901
JH
2275 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2276 err = -EOPNOTSUPP;
2277 goto done;
2278 }
2279
09fd0de5 2280 hci_dev_lock(hdev);
8e87d142 2281 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2282 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2283 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2284 do_inquiry = 1;
2285 }
09fd0de5 2286 hci_dev_unlock(hdev);
1da177e4 2287
04837f64 2288 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2289
2290 if (do_inquiry) {
01178cd4
JH
2291 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2292 timeo);
70f23020
AE
2293 if (err < 0)
2294 goto done;
3e13fa1e
AG
2295
2296 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2297 * cleared). If it is interrupted by a signal, return -EINTR.
2298 */
2299 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2300 TASK_INTERRUPTIBLE))
2301 return -EINTR;
70f23020 2302 }
1da177e4 2303
8fc9ced3
GP
2304 /* for unlimited number of responses we will use buffer with
2305 * 255 entries
2306 */
1da177e4
LT
2307 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2308
2309 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2310 * copy it to the user space.
2311 */
01df8c31 2312 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2313 if (!buf) {
1da177e4
LT
2314 err = -ENOMEM;
2315 goto done;
2316 }
2317
09fd0de5 2318 hci_dev_lock(hdev);
1da177e4 2319 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2320 hci_dev_unlock(hdev);
1da177e4
LT
2321
2322 BT_DBG("num_rsp %d", ir.num_rsp);
2323
2324 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2325 ptr += sizeof(ir);
2326 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2327 ir.num_rsp))
1da177e4 2328 err = -EFAULT;
8e87d142 2329 } else
1da177e4
LT
2330 err = -EFAULT;
2331
2332 kfree(buf);
2333
2334done:
2335 hci_dev_put(hdev);
2336 return err;
2337}
2338
cbed0ca1 2339static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2340{
1da177e4
LT
2341 int ret = 0;
2342
1da177e4
LT
2343 BT_DBG("%s %p", hdev->name, hdev);
2344
2345 hci_req_lock(hdev);
2346
94324962
JH
2347 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2348 ret = -ENODEV;
2349 goto done;
2350 }
2351
d603b76b
MH
2352 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2353 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2354 /* Check for rfkill but allow the HCI setup stage to
2355 * proceed (which in itself doesn't cause any RF activity).
2356 */
2357 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2358 ret = -ERFKILL;
2359 goto done;
2360 }
2361
2362 /* Check for valid public address or a configured static
2363 * random adddress, but let the HCI setup proceed to
2364 * be able to determine if there is a public address
2365 * or not.
2366 *
c6beca0e
MH
2367 * In case of user channel usage, it is not important
2368 * if a public address or static random address is
2369 * available.
2370 *
a5c8f270
MH
2371 * This check is only valid for BR/EDR controllers
2372 * since AMP controllers do not have an address.
2373 */
c6beca0e
MH
2374 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2375 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2376 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2377 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2378 ret = -EADDRNOTAVAIL;
2379 goto done;
2380 }
611b30f7
MH
2381 }
2382
1da177e4
LT
2383 if (test_bit(HCI_UP, &hdev->flags)) {
2384 ret = -EALREADY;
2385 goto done;
2386 }
2387
1da177e4
LT
2388 if (hdev->open(hdev)) {
2389 ret = -EIO;
2390 goto done;
2391 }
2392
f41c70c4
MH
2393 atomic_set(&hdev->cmd_cnt, 1);
2394 set_bit(HCI_INIT, &hdev->flags);
2395
af202f84
MH
2396 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2397 if (hdev->setup)
2398 ret = hdev->setup(hdev);
f41c70c4 2399
af202f84
MH
2400 /* The transport driver can set these quirks before
2401 * creating the HCI device or in its setup callback.
2402 *
2403 * In case any of them is set, the controller has to
2404 * start up as unconfigured.
2405 */
eb1904f4
MH
2406 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2407 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2408 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
0ebca7d6
MH
2409
2410 /* For an unconfigured controller it is required to
2411 * read at least the version information provided by
2412 * the Read Local Version Information command.
2413 *
2414 * If the set_bdaddr driver callback is provided, then
2415 * also the original Bluetooth public device address
2416 * will be read using the Read BD Address command.
2417 */
2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2419 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2420 }
2421
9713c17b
MH
2422 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2423 /* If public address change is configured, ensure that
2424 * the address gets programmed. If the driver does not
2425 * support changing the public address, fail the power
2426 * on procedure.
2427 */
2428 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2429 hdev->set_bdaddr)
24c457e2
MH
2430 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2431 else
2432 ret = -EADDRNOTAVAIL;
2433 }
2434
f41c70c4 2435 if (!ret) {
4a964404 2436 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2437 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2438 ret = __hci_init(hdev);
1da177e4
LT
2439 }
2440
f41c70c4
MH
2441 clear_bit(HCI_INIT, &hdev->flags);
2442
1da177e4
LT
2443 if (!ret) {
2444 hci_dev_hold(hdev);
d6bfd59c 2445 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2446 set_bit(HCI_UP, &hdev->flags);
2447 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2448 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2449 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2450 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2451 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2452 hdev->dev_type == HCI_BREDR) {
09fd0de5 2453 hci_dev_lock(hdev);
744cf19e 2454 mgmt_powered(hdev, 1);
09fd0de5 2455 hci_dev_unlock(hdev);
56e5cb86 2456 }
8e87d142 2457 } else {
1da177e4 2458 /* Init failed, cleanup */
3eff45ea 2459 flush_work(&hdev->tx_work);
c347b765 2460 flush_work(&hdev->cmd_work);
b78752cc 2461 flush_work(&hdev->rx_work);
1da177e4
LT
2462
2463 skb_queue_purge(&hdev->cmd_q);
2464 skb_queue_purge(&hdev->rx_q);
2465
2466 if (hdev->flush)
2467 hdev->flush(hdev);
2468
2469 if (hdev->sent_cmd) {
2470 kfree_skb(hdev->sent_cmd);
2471 hdev->sent_cmd = NULL;
2472 }
2473
2474 hdev->close(hdev);
fee746b0 2475 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2476 }
2477
2478done:
2479 hci_req_unlock(hdev);
1da177e4
LT
2480 return ret;
2481}
2482
cbed0ca1
JH
2483/* ---- HCI ioctl helpers ---- */
2484
2485int hci_dev_open(__u16 dev)
2486{
2487 struct hci_dev *hdev;
2488 int err;
2489
2490 hdev = hci_dev_get(dev);
2491 if (!hdev)
2492 return -ENODEV;
2493
4a964404 2494 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2495 * up as user channel. Trying to bring them up as normal devices
2496 * will result into a failure. Only user channel operation is
2497 * possible.
2498 *
2499 * When this function is called for a user channel, the flag
2500 * HCI_USER_CHANNEL will be set first before attempting to
2501 * open the device.
2502 */
4a964404 2503 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2504 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2505 err = -EOPNOTSUPP;
2506 goto done;
2507 }
2508
e1d08f40
JH
2509 /* We need to ensure that no other power on/off work is pending
2510 * before proceeding to call hci_dev_do_open. This is
2511 * particularly important if the setup procedure has not yet
2512 * completed.
2513 */
2514 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2515 cancel_delayed_work(&hdev->power_off);
2516
a5c8f270
MH
2517 /* After this call it is guaranteed that the setup procedure
2518 * has finished. This means that error conditions like RFKILL
2519 * or no valid public or static random address apply.
2520 */
e1d08f40
JH
2521 flush_workqueue(hdev->req_workqueue);
2522
12aa4f0a
MH
2523 /* For controllers not using the management interface and that
2524 * are brought up using legacy ioctl, set the HCI_PAIRABLE bit
2525 * so that pairing works for them. Once the management interface
2526 * is in use this bit will be cleared again and userspace has
2527 * to explicitly enable it.
2528 */
2529 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2530 !test_bit(HCI_MGMT, &hdev->dev_flags))
2531 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2532
cbed0ca1
JH
2533 err = hci_dev_do_open(hdev);
2534
fee746b0 2535done:
cbed0ca1 2536 hci_dev_put(hdev);
cbed0ca1
JH
2537 return err;
2538}
2539
d7347f3c
JH
2540/* This function requires the caller holds hdev->lock */
2541static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2542{
2543 struct hci_conn_params *p;
2544
2545 list_for_each_entry(p, &hdev->le_conn_params, list)
2546 list_del_init(&p->action);
2547
2548 BT_DBG("All LE pending actions cleared");
2549}
2550
1da177e4
LT
2551static int hci_dev_do_close(struct hci_dev *hdev)
2552{
2553 BT_DBG("%s %p", hdev->name, hdev);
2554
78c04c0b
VCG
2555 cancel_delayed_work(&hdev->power_off);
2556
1da177e4
LT
2557 hci_req_cancel(hdev, ENODEV);
2558 hci_req_lock(hdev);
2559
2560 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2561 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2562 hci_req_unlock(hdev);
2563 return 0;
2564 }
2565
3eff45ea
GP
2566 /* Flush RX and TX works */
2567 flush_work(&hdev->tx_work);
b78752cc 2568 flush_work(&hdev->rx_work);
1da177e4 2569
16ab91ab 2570 if (hdev->discov_timeout > 0) {
e0f9309f 2571 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2572 hdev->discov_timeout = 0;
5e5282bb 2573 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2574 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2575 }
2576
a8b2d5c2 2577 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2578 cancel_delayed_work(&hdev->service_cache);
2579
7ba8b4be 2580 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2581
2582 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2583 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2584
09fd0de5 2585 hci_dev_lock(hdev);
1f9b9a5d 2586 hci_inquiry_cache_flush(hdev);
1da177e4 2587 hci_conn_hash_flush(hdev);
d7347f3c 2588 hci_pend_le_actions_clear(hdev);
09fd0de5 2589 hci_dev_unlock(hdev);
1da177e4
LT
2590
2591 hci_notify(hdev, HCI_DEV_DOWN);
2592
2593 if (hdev->flush)
2594 hdev->flush(hdev);
2595
2596 /* Reset device */
2597 skb_queue_purge(&hdev->cmd_q);
2598 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2599 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2600 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2601 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2602 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2603 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2604 clear_bit(HCI_INIT, &hdev->flags);
2605 }
2606
c347b765
GP
2607 /* flush cmd work */
2608 flush_work(&hdev->cmd_work);
1da177e4
LT
2609
2610 /* Drop queues */
2611 skb_queue_purge(&hdev->rx_q);
2612 skb_queue_purge(&hdev->cmd_q);
2613 skb_queue_purge(&hdev->raw_q);
2614
2615 /* Drop last sent command */
2616 if (hdev->sent_cmd) {
65cc2b49 2617 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2618 kfree_skb(hdev->sent_cmd);
2619 hdev->sent_cmd = NULL;
2620 }
2621
b6ddb638
JH
2622 kfree_skb(hdev->recv_evt);
2623 hdev->recv_evt = NULL;
2624
1da177e4
LT
2625 /* After this point our queues are empty
2626 * and no tasks are scheduled. */
2627 hdev->close(hdev);
2628
35b973c9 2629 /* Clear flags */
fee746b0 2630 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2631 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2632
93c311a0
MH
2633 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2634 if (hdev->dev_type == HCI_BREDR) {
2635 hci_dev_lock(hdev);
2636 mgmt_powered(hdev, 0);
2637 hci_dev_unlock(hdev);
2638 }
8ee56540 2639 }
5add6af8 2640
ced5c338 2641 /* Controller radio is available but is currently powered down */
536619e8 2642 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2643
e59fda8d 2644 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2645 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2646 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2647
1da177e4
LT
2648 hci_req_unlock(hdev);
2649
2650 hci_dev_put(hdev);
2651 return 0;
2652}
2653
2654int hci_dev_close(__u16 dev)
2655{
2656 struct hci_dev *hdev;
2657 int err;
2658
70f23020
AE
2659 hdev = hci_dev_get(dev);
2660 if (!hdev)
1da177e4 2661 return -ENODEV;
8ee56540 2662
0736cfa8
MH
2663 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2664 err = -EBUSY;
2665 goto done;
2666 }
2667
8ee56540
MH
2668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2669 cancel_delayed_work(&hdev->power_off);
2670
1da177e4 2671 err = hci_dev_do_close(hdev);
8ee56540 2672
0736cfa8 2673done:
1da177e4
LT
2674 hci_dev_put(hdev);
2675 return err;
2676}
2677
2678int hci_dev_reset(__u16 dev)
2679{
2680 struct hci_dev *hdev;
2681 int ret = 0;
2682
70f23020
AE
2683 hdev = hci_dev_get(dev);
2684 if (!hdev)
1da177e4
LT
2685 return -ENODEV;
2686
2687 hci_req_lock(hdev);
1da177e4 2688
808a049e
MH
2689 if (!test_bit(HCI_UP, &hdev->flags)) {
2690 ret = -ENETDOWN;
1da177e4 2691 goto done;
808a049e 2692 }
1da177e4 2693
0736cfa8
MH
2694 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2695 ret = -EBUSY;
2696 goto done;
2697 }
2698
4a964404 2699 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2700 ret = -EOPNOTSUPP;
2701 goto done;
2702 }
2703
1da177e4
LT
2704 /* Drop queues */
2705 skb_queue_purge(&hdev->rx_q);
2706 skb_queue_purge(&hdev->cmd_q);
2707
09fd0de5 2708 hci_dev_lock(hdev);
1f9b9a5d 2709 hci_inquiry_cache_flush(hdev);
1da177e4 2710 hci_conn_hash_flush(hdev);
09fd0de5 2711 hci_dev_unlock(hdev);
1da177e4
LT
2712
2713 if (hdev->flush)
2714 hdev->flush(hdev);
2715
8e87d142 2716 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2717 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2718
fee746b0 2719 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2720
2721done:
1da177e4
LT
2722 hci_req_unlock(hdev);
2723 hci_dev_put(hdev);
2724 return ret;
2725}
2726
2727int hci_dev_reset_stat(__u16 dev)
2728{
2729 struct hci_dev *hdev;
2730 int ret = 0;
2731
70f23020
AE
2732 hdev = hci_dev_get(dev);
2733 if (!hdev)
1da177e4
LT
2734 return -ENODEV;
2735
0736cfa8
MH
2736 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2737 ret = -EBUSY;
2738 goto done;
2739 }
2740
4a964404 2741 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2742 ret = -EOPNOTSUPP;
2743 goto done;
2744 }
2745
1da177e4
LT
2746 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2747
0736cfa8 2748done:
1da177e4 2749 hci_dev_put(hdev);
1da177e4
LT
2750 return ret;
2751}
2752
123abc08
JH
2753static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2754{
bc6d2d04 2755 bool conn_changed, discov_changed;
123abc08
JH
2756
2757 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2758
2759 if ((scan & SCAN_PAGE))
2760 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2761 &hdev->dev_flags);
2762 else
2763 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2764 &hdev->dev_flags);
2765
bc6d2d04
JH
2766 if ((scan & SCAN_INQUIRY)) {
2767 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2768 &hdev->dev_flags);
2769 } else {
2770 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2771 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2772 &hdev->dev_flags);
2773 }
2774
123abc08
JH
2775 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2776 return;
2777
bc6d2d04
JH
2778 if (conn_changed || discov_changed) {
2779 /* In case this was disabled through mgmt */
2780 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2781
2782 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2783 mgmt_update_adv_data(hdev);
2784
123abc08 2785 mgmt_new_settings(hdev);
bc6d2d04 2786 }
123abc08
JH
2787}
2788
1da177e4
LT
2789int hci_dev_cmd(unsigned int cmd, void __user *arg)
2790{
2791 struct hci_dev *hdev;
2792 struct hci_dev_req dr;
2793 int err = 0;
2794
2795 if (copy_from_user(&dr, arg, sizeof(dr)))
2796 return -EFAULT;
2797
70f23020
AE
2798 hdev = hci_dev_get(dr.dev_id);
2799 if (!hdev)
1da177e4
LT
2800 return -ENODEV;
2801
0736cfa8
MH
2802 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2803 err = -EBUSY;
2804 goto done;
2805 }
2806
4a964404 2807 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2808 err = -EOPNOTSUPP;
2809 goto done;
2810 }
2811
5b69bef5
MH
2812 if (hdev->dev_type != HCI_BREDR) {
2813 err = -EOPNOTSUPP;
2814 goto done;
2815 }
2816
56f87901
JH
2817 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2818 err = -EOPNOTSUPP;
2819 goto done;
2820 }
2821
1da177e4
LT
2822 switch (cmd) {
2823 case HCISETAUTH:
01178cd4
JH
2824 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2825 HCI_INIT_TIMEOUT);
1da177e4
LT
2826 break;
2827
2828 case HCISETENCRYPT:
2829 if (!lmp_encrypt_capable(hdev)) {
2830 err = -EOPNOTSUPP;
2831 break;
2832 }
2833
2834 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2835 /* Auth must be enabled first */
01178cd4
JH
2836 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2837 HCI_INIT_TIMEOUT);
1da177e4
LT
2838 if (err)
2839 break;
2840 }
2841
01178cd4
JH
2842 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2843 HCI_INIT_TIMEOUT);
1da177e4
LT
2844 break;
2845
2846 case HCISETSCAN:
01178cd4
JH
2847 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2848 HCI_INIT_TIMEOUT);
91a668b0 2849
bc6d2d04
JH
2850 /* Ensure that the connectable and discoverable states
2851 * get correctly modified as this was a non-mgmt change.
91a668b0 2852 */
123abc08
JH
2853 if (!err)
2854 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2855 break;
2856
1da177e4 2857 case HCISETLINKPOL:
01178cd4
JH
2858 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2859 HCI_INIT_TIMEOUT);
1da177e4
LT
2860 break;
2861
2862 case HCISETLINKMODE:
e4e8e37c
MH
2863 hdev->link_mode = ((__u16) dr.dev_opt) &
2864 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2865 break;
2866
2867 case HCISETPTYPE:
2868 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2869 break;
2870
2871 case HCISETACLMTU:
e4e8e37c
MH
2872 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2873 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2874 break;
2875
2876 case HCISETSCOMTU:
e4e8e37c
MH
2877 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2878 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2879 break;
2880
2881 default:
2882 err = -EINVAL;
2883 break;
2884 }
e4e8e37c 2885
0736cfa8 2886done:
1da177e4
LT
2887 hci_dev_put(hdev);
2888 return err;
2889}
2890
2891int hci_get_dev_list(void __user *arg)
2892{
8035ded4 2893 struct hci_dev *hdev;
1da177e4
LT
2894 struct hci_dev_list_req *dl;
2895 struct hci_dev_req *dr;
1da177e4
LT
2896 int n = 0, size, err;
2897 __u16 dev_num;
2898
2899 if (get_user(dev_num, (__u16 __user *) arg))
2900 return -EFAULT;
2901
2902 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2903 return -EINVAL;
2904
2905 size = sizeof(*dl) + dev_num * sizeof(*dr);
2906
70f23020
AE
2907 dl = kzalloc(size, GFP_KERNEL);
2908 if (!dl)
1da177e4
LT
2909 return -ENOMEM;
2910
2911 dr = dl->dev_req;
2912
f20d09d5 2913 read_lock(&hci_dev_list_lock);
8035ded4 2914 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db
MH
2915 unsigned long flags = hdev->flags;
2916
2917 /* When the auto-off is configured it means the transport
2918 * is running, but in that case still indicate that the
2919 * device is actually down.
2920 */
2921 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2922 flags &= ~BIT(HCI_UP);
c542a06c 2923
1da177e4 2924 (dr + n)->dev_id = hdev->id;
2e84d8db 2925 (dr + n)->dev_opt = flags;
c542a06c 2926
1da177e4
LT
2927 if (++n >= dev_num)
2928 break;
2929 }
f20d09d5 2930 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2931
2932 dl->dev_num = n;
2933 size = sizeof(*dl) + n * sizeof(*dr);
2934
2935 err = copy_to_user(arg, dl, size);
2936 kfree(dl);
2937
2938 return err ? -EFAULT : 0;
2939}
2940
2941int hci_get_dev_info(void __user *arg)
2942{
2943 struct hci_dev *hdev;
2944 struct hci_dev_info di;
2e84d8db 2945 unsigned long flags;
1da177e4
LT
2946 int err = 0;
2947
2948 if (copy_from_user(&di, arg, sizeof(di)))
2949 return -EFAULT;
2950
70f23020
AE
2951 hdev = hci_dev_get(di.dev_id);
2952 if (!hdev)
1da177e4
LT
2953 return -ENODEV;
2954
2e84d8db
MH
2955 /* When the auto-off is configured it means the transport
2956 * is running, but in that case still indicate that the
2957 * device is actually down.
2958 */
2959 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2960 flags = hdev->flags & ~BIT(HCI_UP);
2961 else
2962 flags = hdev->flags;
ab81cbf9 2963
1da177e4
LT
2964 strcpy(di.name, hdev->name);
2965 di.bdaddr = hdev->bdaddr;
60f2a3ed 2966 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2967 di.flags = flags;
1da177e4 2968 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2969 if (lmp_bredr_capable(hdev)) {
2970 di.acl_mtu = hdev->acl_mtu;
2971 di.acl_pkts = hdev->acl_pkts;
2972 di.sco_mtu = hdev->sco_mtu;
2973 di.sco_pkts = hdev->sco_pkts;
2974 } else {
2975 di.acl_mtu = hdev->le_mtu;
2976 di.acl_pkts = hdev->le_pkts;
2977 di.sco_mtu = 0;
2978 di.sco_pkts = 0;
2979 }
1da177e4
LT
2980 di.link_policy = hdev->link_policy;
2981 di.link_mode = hdev->link_mode;
2982
2983 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2984 memcpy(&di.features, &hdev->features, sizeof(di.features));
2985
2986 if (copy_to_user(arg, &di, sizeof(di)))
2987 err = -EFAULT;
2988
2989 hci_dev_put(hdev);
2990
2991 return err;
2992}
2993
2994/* ---- Interface to HCI drivers ---- */
2995
611b30f7
MH
2996static int hci_rfkill_set_block(void *data, bool blocked)
2997{
2998 struct hci_dev *hdev = data;
2999
3000 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3001
0736cfa8
MH
3002 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3003 return -EBUSY;
3004
5e130367
JH
3005 if (blocked) {
3006 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
3007 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3008 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 3009 hci_dev_do_close(hdev);
5e130367
JH
3010 } else {
3011 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 3012 }
611b30f7
MH
3013
3014 return 0;
3015}
3016
3017static const struct rfkill_ops hci_rfkill_ops = {
3018 .set_block = hci_rfkill_set_block,
3019};
3020
ab81cbf9
JH
3021static void hci_power_on(struct work_struct *work)
3022{
3023 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3024 int err;
ab81cbf9
JH
3025
3026 BT_DBG("%s", hdev->name);
3027
cbed0ca1 3028 err = hci_dev_do_open(hdev);
96570ffc
JH
3029 if (err < 0) {
3030 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3031 return;
96570ffc 3032 }
ab81cbf9 3033
a5c8f270
MH
3034 /* During the HCI setup phase, a few error conditions are
3035 * ignored and they need to be checked now. If they are still
3036 * valid, it is important to turn the device back off.
3037 */
3038 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3039 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3040 (hdev->dev_type == HCI_BREDR &&
3041 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3042 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3043 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3044 hci_dev_do_close(hdev);
3045 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3046 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3047 HCI_AUTO_OFF_TIMEOUT);
bf543036 3048 }
ab81cbf9 3049
fee746b0 3050 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3051 /* For unconfigured devices, set the HCI_RAW flag
3052 * so that userspace can easily identify them.
4a964404
MH
3053 */
3054 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3055 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3056
3057 /* For fully configured devices, this will send
3058 * the Index Added event. For unconfigured devices,
3059 * it will send Unconfigued Index Added event.
3060 *
3061 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3062 * and no event will be send.
3063 */
3064 mgmt_index_added(hdev);
d603b76b 3065 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3066 /* When the controller is now configured, then it
3067 * is important to clear the HCI_RAW flag.
3068 */
3069 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3070 clear_bit(HCI_RAW, &hdev->flags);
3071
d603b76b
MH
3072 /* Powering on the controller with HCI_CONFIG set only
3073 * happens with the transition from unconfigured to
3074 * configured. This will send the Index Added event.
3075 */
3076 mgmt_index_added(hdev);
fee746b0 3077 }
ab81cbf9
JH
3078}
3079
3080static void hci_power_off(struct work_struct *work)
3081{
3243553f 3082 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3083 power_off.work);
ab81cbf9
JH
3084
3085 BT_DBG("%s", hdev->name);
3086
8ee56540 3087 hci_dev_do_close(hdev);
ab81cbf9
JH
3088}
3089
16ab91ab
JH
3090static void hci_discov_off(struct work_struct *work)
3091{
3092 struct hci_dev *hdev;
16ab91ab
JH
3093
3094 hdev = container_of(work, struct hci_dev, discov_off.work);
3095
3096 BT_DBG("%s", hdev->name);
3097
d1967ff8 3098 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3099}
3100
35f7498a 3101void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3102{
4821002c 3103 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3104
4821002c
JH
3105 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3106 list_del(&uuid->list);
2aeb9a1a
JH
3107 kfree(uuid);
3108 }
2aeb9a1a
JH
3109}
3110
35f7498a 3111void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3112{
3113 struct list_head *p, *n;
3114
3115 list_for_each_safe(p, n, &hdev->link_keys) {
3116 struct link_key *key;
3117
3118 key = list_entry(p, struct link_key, list);
3119
3120 list_del(p);
3121 kfree(key);
3122 }
55ed8ca1
JH
3123}
3124
35f7498a 3125void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
3126{
3127 struct smp_ltk *k, *tmp;
3128
3129 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3130 list_del(&k->list);
3131 kfree(k);
3132 }
b899efaf
VCG
3133}
3134
970c4e46
JH
3135void hci_smp_irks_clear(struct hci_dev *hdev)
3136{
3137 struct smp_irk *k, *tmp;
3138
3139 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3140 list_del(&k->list);
3141 kfree(k);
3142 }
3143}
3144
55ed8ca1
JH
3145struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3146{
8035ded4 3147 struct link_key *k;
55ed8ca1 3148
8035ded4 3149 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3150 if (bacmp(bdaddr, &k->bdaddr) == 0)
3151 return k;
55ed8ca1
JH
3152
3153 return NULL;
3154}
3155
745c0ce3 3156static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3157 u8 key_type, u8 old_key_type)
d25e28ab
JH
3158{
3159 /* Legacy key */
3160 if (key_type < 0x03)
745c0ce3 3161 return true;
d25e28ab
JH
3162
3163 /* Debug keys are insecure so don't store them persistently */
3164 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3165 return false;
d25e28ab
JH
3166
3167 /* Changed combination key and there's no previous one */
3168 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3169 return false;
d25e28ab
JH
3170
3171 /* Security mode 3 case */
3172 if (!conn)
745c0ce3 3173 return true;
d25e28ab
JH
3174
3175 /* Neither local nor remote side had no-bonding as requirement */
3176 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3177 return true;
d25e28ab
JH
3178
3179 /* Local side had dedicated bonding as requirement */
3180 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3181 return true;
d25e28ab
JH
3182
3183 /* Remote side had dedicated bonding as requirement */
3184 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3185 return true;
d25e28ab
JH
3186
3187 /* If none of the above criteria match, then don't store the key
3188 * persistently */
745c0ce3 3189 return false;
d25e28ab
JH
3190}
3191
e804d25d 3192static u8 ltk_role(u8 type)
98a0b845 3193{
e804d25d
JH
3194 if (type == SMP_LTK)
3195 return HCI_ROLE_MASTER;
3196
3197 return HCI_ROLE_SLAVE;
98a0b845
JH
3198}
3199
fe39c7b2 3200struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3201 u8 role)
75d262c2 3202{
c9839a11 3203 struct smp_ltk *k;
75d262c2 3204
c9839a11 3205 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 3206 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3207 continue;
3208
e804d25d 3209 if (ltk_role(k->type) != role)
98a0b845
JH
3210 continue;
3211
c9839a11 3212 return k;
75d262c2
VCG
3213 }
3214
3215 return NULL;
3216}
75d262c2 3217
c9839a11 3218struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3219 u8 addr_type, u8 role)
75d262c2 3220{
c9839a11 3221 struct smp_ltk *k;
75d262c2 3222
c9839a11
VCG
3223 list_for_each_entry(k, &hdev->long_term_keys, list)
3224 if (addr_type == k->bdaddr_type &&
98a0b845 3225 bacmp(bdaddr, &k->bdaddr) == 0 &&
e804d25d 3226 ltk_role(k->type) == role)
75d262c2
VCG
3227 return k;
3228
3229 return NULL;
3230}
75d262c2 3231
970c4e46
JH
3232struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3233{
3234 struct smp_irk *irk;
3235
3236 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3237 if (!bacmp(&irk->rpa, rpa))
3238 return irk;
3239 }
3240
3241 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3242 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3243 bacpy(&irk->rpa, rpa);
3244 return irk;
3245 }
3246 }
3247
3248 return NULL;
3249}
3250
3251struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3252 u8 addr_type)
3253{
3254 struct smp_irk *irk;
3255
6cfc9988
JH
3256 /* Identity Address must be public or static random */
3257 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3258 return NULL;
3259
970c4e46
JH
3260 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3261 if (addr_type == irk->addr_type &&
3262 bacmp(bdaddr, &irk->bdaddr) == 0)
3263 return irk;
3264 }
3265
3266 return NULL;
3267}
3268
567fa2aa 3269struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3270 bdaddr_t *bdaddr, u8 *val, u8 type,
3271 u8 pin_len, bool *persistent)
55ed8ca1
JH
3272{
3273 struct link_key *key, *old_key;
745c0ce3 3274 u8 old_key_type;
55ed8ca1
JH
3275
3276 old_key = hci_find_link_key(hdev, bdaddr);
3277 if (old_key) {
3278 old_key_type = old_key->type;
3279 key = old_key;
3280 } else {
12adcf3a 3281 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3282 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3283 if (!key)
567fa2aa 3284 return NULL;
55ed8ca1
JH
3285 list_add(&key->list, &hdev->link_keys);
3286 }
3287
6ed93dc6 3288 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3289
d25e28ab
JH
3290 /* Some buggy controller combinations generate a changed
3291 * combination key for legacy pairing even when there's no
3292 * previous key */
3293 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3294 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3295 type = HCI_LK_COMBINATION;
655fe6ec
JH
3296 if (conn)
3297 conn->key_type = type;
3298 }
d25e28ab 3299
55ed8ca1 3300 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3301 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3302 key->pin_len = pin_len;
3303
b6020ba0 3304 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3305 key->type = old_key_type;
4748fed2
JH
3306 else
3307 key->type = type;
3308
7652ff6a
JH
3309 if (persistent)
3310 *persistent = hci_persistent_key(hdev, conn, type,
3311 old_key_type);
55ed8ca1 3312
567fa2aa 3313 return key;
55ed8ca1
JH
3314}
3315
ca9142b8 3316struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3317 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3318 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3319{
c9839a11 3320 struct smp_ltk *key, *old_key;
e804d25d 3321 u8 role = ltk_role(type);
75d262c2 3322
e804d25d 3323 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3324 if (old_key)
75d262c2 3325 key = old_key;
c9839a11 3326 else {
0a14ab41 3327 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3328 if (!key)
ca9142b8 3329 return NULL;
c9839a11 3330 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3331 }
3332
75d262c2 3333 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3334 key->bdaddr_type = addr_type;
3335 memcpy(key->val, tk, sizeof(key->val));
3336 key->authenticated = authenticated;
3337 key->ediv = ediv;
fe39c7b2 3338 key->rand = rand;
c9839a11
VCG
3339 key->enc_size = enc_size;
3340 key->type = type;
75d262c2 3341
ca9142b8 3342 return key;
75d262c2
VCG
3343}
3344
ca9142b8
JH
3345struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3346 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3347{
3348 struct smp_irk *irk;
3349
3350 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3351 if (!irk) {
3352 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3353 if (!irk)
ca9142b8 3354 return NULL;
970c4e46
JH
3355
3356 bacpy(&irk->bdaddr, bdaddr);
3357 irk->addr_type = addr_type;
3358
3359 list_add(&irk->list, &hdev->identity_resolving_keys);
3360 }
3361
3362 memcpy(irk->val, val, 16);
3363 bacpy(&irk->rpa, rpa);
3364
ca9142b8 3365 return irk;
970c4e46
JH
3366}
3367
55ed8ca1
JH
3368int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3369{
3370 struct link_key *key;
3371
3372 key = hci_find_link_key(hdev, bdaddr);
3373 if (!key)
3374 return -ENOENT;
3375
6ed93dc6 3376 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3377
3378 list_del(&key->list);
3379 kfree(key);
3380
3381 return 0;
3382}
3383
e0b2b27e 3384int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3385{
3386 struct smp_ltk *k, *tmp;
c51ffa0b 3387 int removed = 0;
b899efaf
VCG
3388
3389 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3390 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3391 continue;
3392
6ed93dc6 3393 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3394
3395 list_del(&k->list);
3396 kfree(k);
c51ffa0b 3397 removed++;
b899efaf
VCG
3398 }
3399
c51ffa0b 3400 return removed ? 0 : -ENOENT;
b899efaf
VCG
3401}
3402
a7ec7338
JH
3403void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3404{
3405 struct smp_irk *k, *tmp;
3406
668b7b19 3407 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3408 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3409 continue;
3410
3411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3412
3413 list_del(&k->list);
3414 kfree(k);
3415 }
3416}
3417
6bd32326 3418/* HCI command timer function */
65cc2b49 3419static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3420{
65cc2b49
MH
3421 struct hci_dev *hdev = container_of(work, struct hci_dev,
3422 cmd_timer.work);
6bd32326 3423
bda4f23a
AE
3424 if (hdev->sent_cmd) {
3425 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3426 u16 opcode = __le16_to_cpu(sent->opcode);
3427
3428 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3429 } else {
3430 BT_ERR("%s command tx timeout", hdev->name);
3431 }
3432
6bd32326 3433 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3434 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3435}
3436
2763eda6 3437struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3438 bdaddr_t *bdaddr)
2763eda6
SJ
3439{
3440 struct oob_data *data;
3441
3442 list_for_each_entry(data, &hdev->remote_oob_data, list)
3443 if (bacmp(bdaddr, &data->bdaddr) == 0)
3444 return data;
3445
3446 return NULL;
3447}
3448
3449int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3450{
3451 struct oob_data *data;
3452
3453 data = hci_find_remote_oob_data(hdev, bdaddr);
3454 if (!data)
3455 return -ENOENT;
3456
6ed93dc6 3457 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3458
3459 list_del(&data->list);
3460 kfree(data);
3461
3462 return 0;
3463}
3464
35f7498a 3465void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3466{
3467 struct oob_data *data, *n;
3468
3469 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3470 list_del(&data->list);
3471 kfree(data);
3472 }
2763eda6
SJ
3473}
3474
0798872e
MH
3475int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3476 u8 *hash, u8 *randomizer)
2763eda6
SJ
3477{
3478 struct oob_data *data;
3479
3480 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3481 if (!data) {
0a14ab41 3482 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3483 if (!data)
3484 return -ENOMEM;
3485
3486 bacpy(&data->bdaddr, bdaddr);
3487 list_add(&data->list, &hdev->remote_oob_data);
3488 }
3489
519ca9d0
MH
3490 memcpy(data->hash192, hash, sizeof(data->hash192));
3491 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3492
0798872e
MH
3493 memset(data->hash256, 0, sizeof(data->hash256));
3494 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3495
3496 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3497
3498 return 0;
3499}
3500
3501int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3502 u8 *hash192, u8 *randomizer192,
3503 u8 *hash256, u8 *randomizer256)
3504{
3505 struct oob_data *data;
3506
3507 data = hci_find_remote_oob_data(hdev, bdaddr);
3508 if (!data) {
0a14ab41 3509 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3510 if (!data)
3511 return -ENOMEM;
3512
3513 bacpy(&data->bdaddr, bdaddr);
3514 list_add(&data->list, &hdev->remote_oob_data);
3515 }
3516
3517 memcpy(data->hash192, hash192, sizeof(data->hash192));
3518 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3519
3520 memcpy(data->hash256, hash256, sizeof(data->hash256));
3521 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3522
6ed93dc6 3523 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3524
3525 return 0;
3526}
3527
dcc36c16 3528struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3529 bdaddr_t *bdaddr, u8 type)
b2a66aad 3530{
8035ded4 3531 struct bdaddr_list *b;
b2a66aad 3532
dcc36c16 3533 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3534 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3535 return b;
b9ee0a78 3536 }
b2a66aad
AJ
3537
3538 return NULL;
3539}
3540
dcc36c16 3541void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3542{
3543 struct list_head *p, *n;
3544
dcc36c16 3545 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3546 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3547
3548 list_del(p);
3549 kfree(b);
3550 }
b2a66aad
AJ
3551}
3552
dcc36c16 3553int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3554{
3555 struct bdaddr_list *entry;
b2a66aad 3556
b9ee0a78 3557 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3558 return -EBADF;
3559
dcc36c16 3560 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3561 return -EEXIST;
b2a66aad 3562
27f70f3e 3563 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3564 if (!entry)
3565 return -ENOMEM;
b2a66aad
AJ
3566
3567 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3568 entry->bdaddr_type = type;
b2a66aad 3569
dcc36c16 3570 list_add(&entry->list, list);
b2a66aad 3571
2a8357f2 3572 return 0;
b2a66aad
AJ
3573}
3574
dcc36c16 3575int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3576{
3577 struct bdaddr_list *entry;
b2a66aad 3578
35f7498a 3579 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3580 hci_bdaddr_list_clear(list);
35f7498a
JH
3581 return 0;
3582 }
b2a66aad 3583
dcc36c16 3584 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3585 if (!entry)
3586 return -ENOENT;
3587
3588 list_del(&entry->list);
3589 kfree(entry);
3590
3591 return 0;
3592}
3593
15819a70
AG
3594/* This function requires the caller holds hdev->lock */
3595struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3596 bdaddr_t *addr, u8 addr_type)
3597{
3598 struct hci_conn_params *params;
3599
738f6185
JH
3600 /* The conn params list only contains identity addresses */
3601 if (!hci_is_identity_address(addr, addr_type))
3602 return NULL;
3603
15819a70
AG
3604 list_for_each_entry(params, &hdev->le_conn_params, list) {
3605 if (bacmp(&params->addr, addr) == 0 &&
3606 params->addr_type == addr_type) {
3607 return params;
3608 }
3609 }
3610
3611 return NULL;
3612}
3613
cef952ce
AG
3614static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3615{
3616 struct hci_conn *conn;
3617
3618 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3619 if (!conn)
3620 return false;
3621
3622 if (conn->dst_type != type)
3623 return false;
3624
3625 if (conn->state != BT_CONNECTED)
3626 return false;
3627
3628 return true;
3629}
3630
4b10966f 3631/* This function requires the caller holds hdev->lock */
501f8827
JH
3632struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3633 bdaddr_t *addr, u8 addr_type)
4b10966f 3634{
912b42ef 3635 struct hci_conn_params *param;
4b10966f 3636
738f6185
JH
3637 /* The list only contains identity addresses */
3638 if (!hci_is_identity_address(addr, addr_type))
3639 return NULL;
3640
501f8827 3641 list_for_each_entry(param, list, action) {
912b42ef
JH
3642 if (bacmp(&param->addr, addr) == 0 &&
3643 param->addr_type == addr_type)
3644 return param;
4b10966f
MH
3645 }
3646
3647 return NULL;
3648}
3649
3650/* This function requires the caller holds hdev->lock */
51d167c0
MH
3651struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3652 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3653{
3654 struct hci_conn_params *params;
3655
c46245b3 3656 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3657 return NULL;
bf5b3c8b
MH
3658
3659 params = hci_conn_params_lookup(hdev, addr, addr_type);
3660 if (params)
51d167c0 3661 return params;
bf5b3c8b
MH
3662
3663 params = kzalloc(sizeof(*params), GFP_KERNEL);
3664 if (!params) {
3665 BT_ERR("Out of memory");
51d167c0 3666 return NULL;
bf5b3c8b
MH
3667 }
3668
3669 bacpy(&params->addr, addr);
3670 params->addr_type = addr_type;
3671
3672 list_add(&params->list, &hdev->le_conn_params);
93450c75 3673 INIT_LIST_HEAD(&params->action);
bf5b3c8b
MH
3674
3675 params->conn_min_interval = hdev->le_conn_min_interval;
3676 params->conn_max_interval = hdev->le_conn_max_interval;
3677 params->conn_latency = hdev->le_conn_latency;
3678 params->supervision_timeout = hdev->le_supv_timeout;
3679 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3680
3681 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3682
51d167c0 3683 return params;
bf5b3c8b
MH
3684}
3685
3686/* This function requires the caller holds hdev->lock */
3687int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3688 u8 auto_connect)
15819a70
AG
3689{
3690 struct hci_conn_params *params;
3691
8c87aae1
MH
3692 params = hci_conn_params_add(hdev, addr, addr_type);
3693 if (!params)
3694 return -EIO;
cef952ce 3695
42ce26de
JH
3696 if (params->auto_connect == auto_connect)
3697 return 0;
3698
95305baa 3699 list_del_init(&params->action);
15819a70 3700
cef952ce
AG
3701 switch (auto_connect) {
3702 case HCI_AUTO_CONN_DISABLED:
3703 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3704 hci_update_background_scan(hdev);
cef952ce 3705 break;
851efca8 3706 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3707 list_add(&params->action, &hdev->pend_le_reports);
3708 hci_update_background_scan(hdev);
851efca8 3709 break;
4b9e7e75 3710 case HCI_AUTO_CONN_DIRECT:
cef952ce 3711 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3712 if (!is_connected(hdev, addr, addr_type)) {
3713 list_add(&params->action, &hdev->pend_le_conns);
3714 hci_update_background_scan(hdev);
3715 }
cef952ce
AG
3716 break;
3717 }
15819a70 3718
851efca8
JH
3719 params->auto_connect = auto_connect;
3720
d06b50ce
MH
3721 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3722 auto_connect);
a9b0a04c
AG
3723
3724 return 0;
15819a70
AG
3725}
3726
3727/* This function requires the caller holds hdev->lock */
3728void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3729{
3730 struct hci_conn_params *params;
3731
3732 params = hci_conn_params_lookup(hdev, addr, addr_type);
3733 if (!params)
3734 return;
3735
95305baa 3736 list_del(&params->action);
15819a70
AG
3737 list_del(&params->list);
3738 kfree(params);
3739
95305baa
JH
3740 hci_update_background_scan(hdev);
3741
15819a70
AG
3742 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3743}
3744
55af49a8
JH
3745/* This function requires the caller holds hdev->lock */
3746void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3747{
3748 struct hci_conn_params *params, *tmp;
3749
3750 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3751 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3752 continue;
3753 list_del(&params->list);
3754 kfree(params);
3755 }
3756
3757 BT_DBG("All LE disabled connection parameters were removed");
3758}
3759
15819a70 3760/* This function requires the caller holds hdev->lock */
373110c5 3761void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3762{
3763 struct hci_conn_params *params, *tmp;
3764
3765 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3766 list_del(&params->action);
15819a70
AG
3767 list_del(&params->list);
3768 kfree(params);
3769 }
3770
a2f41a8f 3771 hci_update_background_scan(hdev);
1089b67d 3772
15819a70
AG
3773 BT_DBG("All LE connection parameters were removed");
3774}
3775
4c87eaab 3776static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3777{
4c87eaab
AG
3778 if (status) {
3779 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3780
4c87eaab
AG
3781 hci_dev_lock(hdev);
3782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3783 hci_dev_unlock(hdev);
3784 return;
3785 }
7ba8b4be
AG
3786}
3787
4c87eaab 3788static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3789{
4c87eaab
AG
3790 /* General inquiry access code (GIAC) */
3791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3792 struct hci_request req;
3793 struct hci_cp_inquiry cp;
7ba8b4be
AG
3794 int err;
3795
4c87eaab
AG
3796 if (status) {
3797 BT_ERR("Failed to disable LE scanning: status %d", status);
3798 return;
3799 }
7ba8b4be 3800
4c87eaab
AG
3801 switch (hdev->discovery.type) {
3802 case DISCOV_TYPE_LE:
3803 hci_dev_lock(hdev);
3804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805 hci_dev_unlock(hdev);
3806 break;
7ba8b4be 3807
4c87eaab
AG
3808 case DISCOV_TYPE_INTERLEAVED:
3809 hci_req_init(&req, hdev);
7ba8b4be 3810
4c87eaab
AG
3811 memset(&cp, 0, sizeof(cp));
3812 memcpy(&cp.lap, lap, sizeof(cp.lap));
3813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3815
4c87eaab 3816 hci_dev_lock(hdev);
7dbfac1d 3817
4c87eaab 3818 hci_inquiry_cache_flush(hdev);
7dbfac1d 3819
4c87eaab
AG
3820 err = hci_req_run(&req, inquiry_complete);
3821 if (err) {
3822 BT_ERR("Inquiry request failed: err %d", err);
3823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3824 }
7dbfac1d 3825
4c87eaab
AG
3826 hci_dev_unlock(hdev);
3827 break;
7dbfac1d 3828 }
7dbfac1d
AG
3829}
3830
7ba8b4be
AG
3831static void le_scan_disable_work(struct work_struct *work)
3832{
3833 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3834 le_scan_disable.work);
4c87eaab
AG
3835 struct hci_request req;
3836 int err;
7ba8b4be
AG
3837
3838 BT_DBG("%s", hdev->name);
3839
4c87eaab 3840 hci_req_init(&req, hdev);
28b75a89 3841
b1efcc28 3842 hci_req_add_le_scan_disable(&req);
28b75a89 3843
4c87eaab
AG
3844 err = hci_req_run(&req, le_scan_disable_work_complete);
3845 if (err)
3846 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3847}
3848
8d97250e
JH
3849static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3850{
3851 struct hci_dev *hdev = req->hdev;
3852
3853 /* If we're advertising or initiating an LE connection we can't
3854 * go ahead and change the random address at this time. This is
3855 * because the eventual initiator address used for the
3856 * subsequently created connection will be undefined (some
3857 * controllers use the new address and others the one we had
3858 * when the operation started).
3859 *
3860 * In this kind of scenario skip the update and let the random
3861 * address be updated at the next cycle.
3862 */
5ce194c4 3863 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3864 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3865 BT_DBG("Deferring random address update");
3866 return;
3867 }
3868
3869 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3870}
3871
94b1fc92
MH
3872int hci_update_random_address(struct hci_request *req, bool require_privacy,
3873 u8 *own_addr_type)
ebd3a747
JH
3874{
3875 struct hci_dev *hdev = req->hdev;
3876 int err;
3877
3878 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3879 * current RPA has expired or there is something else than
3880 * the current RPA in use, then generate a new one.
ebd3a747
JH
3881 */
3882 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3883 int to;
3884
3885 *own_addr_type = ADDR_LE_DEV_RANDOM;
3886
3887 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3888 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3889 return 0;
3890
2b5224dc 3891 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3892 if (err < 0) {
3893 BT_ERR("%s failed to generate new RPA", hdev->name);
3894 return err;
3895 }
3896
8d97250e 3897 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3898
3899 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3900 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3901
3902 return 0;
94b1fc92
MH
3903 }
3904
3905 /* In case of required privacy without resolvable private address,
3906 * use an unresolvable private address. This is useful for active
3907 * scanning and non-connectable advertising.
3908 */
3909 if (require_privacy) {
3910 bdaddr_t urpa;
3911
3912 get_random_bytes(&urpa, 6);
3913 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3914
3915 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3916 set_random_addr(req, &urpa);
94b1fc92 3917 return 0;
ebd3a747
JH
3918 }
3919
3920 /* If forcing static address is in use or there is no public
3921 * address use the static address as random address (but skip
3922 * the HCI command if the current random address is already the
3923 * static one.
3924 */
111902f7 3925 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3926 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3927 *own_addr_type = ADDR_LE_DEV_RANDOM;
3928 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3929 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3930 &hdev->static_addr);
3931 return 0;
3932 }
3933
3934 /* Neither privacy nor static address is being used so use a
3935 * public address.
3936 */
3937 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3938
3939 return 0;
3940}
3941
a1f4c318
JH
3942/* Copy the Identity Address of the controller.
3943 *
3944 * If the controller has a public BD_ADDR, then by default use that one.
3945 * If this is a LE only controller without a public address, default to
3946 * the static random address.
3947 *
3948 * For debugging purposes it is possible to force controllers with a
3949 * public address to use the static random address instead.
3950 */
3951void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3952 u8 *bdaddr_type)
3953{
111902f7 3954 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3955 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3956 bacpy(bdaddr, &hdev->static_addr);
3957 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3958 } else {
3959 bacpy(bdaddr, &hdev->bdaddr);
3960 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3961 }
3962}
3963
9be0dab7
DH
3964/* Alloc HCI device */
3965struct hci_dev *hci_alloc_dev(void)
3966{
3967 struct hci_dev *hdev;
3968
27f70f3e 3969 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3970 if (!hdev)
3971 return NULL;
3972
b1b813d4
DH
3973 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3974 hdev->esco_type = (ESCO_HV1);
3975 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3976 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3977 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3978 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3979 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3980 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3981
b1b813d4
DH
3982 hdev->sniff_max_interval = 800;
3983 hdev->sniff_min_interval = 80;
3984
3f959d46 3985 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3986 hdev->le_adv_min_interval = 0x0800;
3987 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3988 hdev->le_scan_interval = 0x0060;
3989 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3990 hdev->le_conn_min_interval = 0x0028;
3991 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3992 hdev->le_conn_latency = 0x0000;
3993 hdev->le_supv_timeout = 0x002a;
bef64738 3994
d6bfd59c 3995 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3996 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3997 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3998 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3999
b1b813d4
DH
4000 mutex_init(&hdev->lock);
4001 mutex_init(&hdev->req_lock);
4002
4003 INIT_LIST_HEAD(&hdev->mgmt_pending);
4004 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4005 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4006 INIT_LIST_HEAD(&hdev->uuids);
4007 INIT_LIST_HEAD(&hdev->link_keys);
4008 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4009 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4010 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4011 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4012 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4013 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4014 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4015 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4016
4017 INIT_WORK(&hdev->rx_work, hci_rx_work);
4018 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4019 INIT_WORK(&hdev->tx_work, hci_tx_work);
4020 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4021
b1b813d4
DH
4022 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4023 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4024 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4025
b1b813d4
DH
4026 skb_queue_head_init(&hdev->rx_q);
4027 skb_queue_head_init(&hdev->cmd_q);
4028 skb_queue_head_init(&hdev->raw_q);
4029
4030 init_waitqueue_head(&hdev->req_wait_q);
4031
65cc2b49 4032 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4033
b1b813d4
DH
4034 hci_init_sysfs(hdev);
4035 discovery_init(hdev);
9be0dab7
DH
4036
4037 return hdev;
4038}
4039EXPORT_SYMBOL(hci_alloc_dev);
4040
4041/* Free HCI device */
4042void hci_free_dev(struct hci_dev *hdev)
4043{
9be0dab7
DH
4044 /* will free via device release */
4045 put_device(&hdev->dev);
4046}
4047EXPORT_SYMBOL(hci_free_dev);
4048
1da177e4
LT
4049/* Register HCI device */
4050int hci_register_dev(struct hci_dev *hdev)
4051{
b1b813d4 4052 int id, error;
1da177e4 4053
74292d5a 4054 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4055 return -EINVAL;
4056
08add513
MM
4057 /* Do not allow HCI_AMP devices to register at index 0,
4058 * so the index can be used as the AMP controller ID.
4059 */
3df92b31
SL
4060 switch (hdev->dev_type) {
4061 case HCI_BREDR:
4062 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4063 break;
4064 case HCI_AMP:
4065 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4066 break;
4067 default:
4068 return -EINVAL;
1da177e4 4069 }
8e87d142 4070
3df92b31
SL
4071 if (id < 0)
4072 return id;
4073
1da177e4
LT
4074 sprintf(hdev->name, "hci%d", id);
4075 hdev->id = id;
2d8b3a11
AE
4076
4077 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4078
d8537548
KC
4079 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4080 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4081 if (!hdev->workqueue) {
4082 error = -ENOMEM;
4083 goto err;
4084 }
f48fd9c8 4085
d8537548
KC
4086 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4087 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4088 if (!hdev->req_workqueue) {
4089 destroy_workqueue(hdev->workqueue);
4090 error = -ENOMEM;
4091 goto err;
4092 }
4093
0153e2ec
MH
4094 if (!IS_ERR_OR_NULL(bt_debugfs))
4095 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4096
bdc3e0f1
MH
4097 dev_set_name(&hdev->dev, "%s", hdev->name);
4098
99780a7b
JH
4099 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4100 CRYPTO_ALG_ASYNC);
4101 if (IS_ERR(hdev->tfm_aes)) {
4102 BT_ERR("Unable to create crypto context");
4103 error = PTR_ERR(hdev->tfm_aes);
4104 hdev->tfm_aes = NULL;
4105 goto err_wqueue;
4106 }
4107
bdc3e0f1 4108 error = device_add(&hdev->dev);
33ca954d 4109 if (error < 0)
99780a7b 4110 goto err_tfm;
1da177e4 4111
611b30f7 4112 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4113 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4114 hdev);
611b30f7
MH
4115 if (hdev->rfkill) {
4116 if (rfkill_register(hdev->rfkill) < 0) {
4117 rfkill_destroy(hdev->rfkill);
4118 hdev->rfkill = NULL;
4119 }
4120 }
4121
5e130367
JH
4122 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4123 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4124
a8b2d5c2 4125 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4126 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4127
01cd3404 4128 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4129 /* Assume BR/EDR support until proven otherwise (such as
4130 * through reading supported features during init.
4131 */
4132 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4133 }
ce2be9ac 4134
fcee3377
GP
4135 write_lock(&hci_dev_list_lock);
4136 list_add(&hdev->list, &hci_dev_list);
4137 write_unlock(&hci_dev_list_lock);
4138
4a964404
MH
4139 /* Devices that are marked for raw-only usage are unconfigured
4140 * and should not be included in normal operation.
fee746b0
MH
4141 */
4142 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4143 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4144
1da177e4 4145 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4146 hci_dev_hold(hdev);
1da177e4 4147
19202573 4148 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4149
1da177e4 4150 return id;
f48fd9c8 4151
99780a7b
JH
4152err_tfm:
4153 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4154err_wqueue:
4155 destroy_workqueue(hdev->workqueue);
6ead1bbc 4156 destroy_workqueue(hdev->req_workqueue);
33ca954d 4157err:
3df92b31 4158 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4159
33ca954d 4160 return error;
1da177e4
LT
4161}
4162EXPORT_SYMBOL(hci_register_dev);
4163
4164/* Unregister HCI device */
59735631 4165void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4166{
3df92b31 4167 int i, id;
ef222013 4168
c13854ce 4169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4170
94324962
JH
4171 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4172
3df92b31
SL
4173 id = hdev->id;
4174
f20d09d5 4175 write_lock(&hci_dev_list_lock);
1da177e4 4176 list_del(&hdev->list);
f20d09d5 4177 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4178
4179 hci_dev_do_close(hdev);
4180
cd4c5391 4181 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4182 kfree_skb(hdev->reassembly[i]);
4183
b9b5ef18
GP
4184 cancel_work_sync(&hdev->power_on);
4185
ab81cbf9 4186 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4187 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4188 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4189 hci_dev_lock(hdev);
744cf19e 4190 mgmt_index_removed(hdev);
09fd0de5 4191 hci_dev_unlock(hdev);
56e5cb86 4192 }
ab81cbf9 4193
2e58ef3e
JH
4194 /* mgmt_index_removed should take care of emptying the
4195 * pending list */
4196 BUG_ON(!list_empty(&hdev->mgmt_pending));
4197
1da177e4
LT
4198 hci_notify(hdev, HCI_DEV_UNREG);
4199
611b30f7
MH
4200 if (hdev->rfkill) {
4201 rfkill_unregister(hdev->rfkill);
4202 rfkill_destroy(hdev->rfkill);
4203 }
4204
99780a7b
JH
4205 if (hdev->tfm_aes)
4206 crypto_free_blkcipher(hdev->tfm_aes);
4207
bdc3e0f1 4208 device_del(&hdev->dev);
147e2d59 4209
0153e2ec
MH
4210 debugfs_remove_recursive(hdev->debugfs);
4211
f48fd9c8 4212 destroy_workqueue(hdev->workqueue);
6ead1bbc 4213 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4214
09fd0de5 4215 hci_dev_lock(hdev);
dcc36c16 4216 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4217 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4218 hci_uuids_clear(hdev);
55ed8ca1 4219 hci_link_keys_clear(hdev);
b899efaf 4220 hci_smp_ltks_clear(hdev);
970c4e46 4221 hci_smp_irks_clear(hdev);
2763eda6 4222 hci_remote_oob_data_clear(hdev);
dcc36c16 4223 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4224 hci_conn_params_clear_all(hdev);
09fd0de5 4225 hci_dev_unlock(hdev);
e2e0cacb 4226
dc946bd8 4227 hci_dev_put(hdev);
3df92b31
SL
4228
4229 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4230}
4231EXPORT_SYMBOL(hci_unregister_dev);
4232
4233/* Suspend HCI device */
4234int hci_suspend_dev(struct hci_dev *hdev)
4235{
4236 hci_notify(hdev, HCI_DEV_SUSPEND);
4237 return 0;
4238}
4239EXPORT_SYMBOL(hci_suspend_dev);
4240
4241/* Resume HCI device */
4242int hci_resume_dev(struct hci_dev *hdev)
4243{
4244 hci_notify(hdev, HCI_DEV_RESUME);
4245 return 0;
4246}
4247EXPORT_SYMBOL(hci_resume_dev);
4248
76bca880 4249/* Receive frame from HCI drivers */
e1a26170 4250int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4251{
76bca880 4252 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4253 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4254 kfree_skb(skb);
4255 return -ENXIO;
4256 }
4257
d82603c6 4258 /* Incoming skb */
76bca880
MH
4259 bt_cb(skb)->incoming = 1;
4260
4261 /* Time stamp */
4262 __net_timestamp(skb);
4263
76bca880 4264 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4265 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4266
76bca880
MH
4267 return 0;
4268}
4269EXPORT_SYMBOL(hci_recv_frame);
4270
33e882a5 4271static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4272 int count, __u8 index)
33e882a5
SS
4273{
4274 int len = 0;
4275 int hlen = 0;
4276 int remain = count;
4277 struct sk_buff *skb;
4278 struct bt_skb_cb *scb;
4279
4280 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4281 index >= NUM_REASSEMBLY)
33e882a5
SS
4282 return -EILSEQ;
4283
4284 skb = hdev->reassembly[index];
4285
4286 if (!skb) {
4287 switch (type) {
4288 case HCI_ACLDATA_PKT:
4289 len = HCI_MAX_FRAME_SIZE;
4290 hlen = HCI_ACL_HDR_SIZE;
4291 break;
4292 case HCI_EVENT_PKT:
4293 len = HCI_MAX_EVENT_SIZE;
4294 hlen = HCI_EVENT_HDR_SIZE;
4295 break;
4296 case HCI_SCODATA_PKT:
4297 len = HCI_MAX_SCO_SIZE;
4298 hlen = HCI_SCO_HDR_SIZE;
4299 break;
4300 }
4301
1e429f38 4302 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4303 if (!skb)
4304 return -ENOMEM;
4305
4306 scb = (void *) skb->cb;
4307 scb->expect = hlen;
4308 scb->pkt_type = type;
4309
33e882a5
SS
4310 hdev->reassembly[index] = skb;
4311 }
4312
4313 while (count) {
4314 scb = (void *) skb->cb;
89bb46d0 4315 len = min_t(uint, scb->expect, count);
33e882a5
SS
4316
4317 memcpy(skb_put(skb, len), data, len);
4318
4319 count -= len;
4320 data += len;
4321 scb->expect -= len;
4322 remain = count;
4323
4324 switch (type) {
4325 case HCI_EVENT_PKT:
4326 if (skb->len == HCI_EVENT_HDR_SIZE) {
4327 struct hci_event_hdr *h = hci_event_hdr(skb);
4328 scb->expect = h->plen;
4329
4330 if (skb_tailroom(skb) < scb->expect) {
4331 kfree_skb(skb);
4332 hdev->reassembly[index] = NULL;
4333 return -ENOMEM;
4334 }
4335 }
4336 break;
4337
4338 case HCI_ACLDATA_PKT:
4339 if (skb->len == HCI_ACL_HDR_SIZE) {
4340 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4341 scb->expect = __le16_to_cpu(h->dlen);
4342
4343 if (skb_tailroom(skb) < scb->expect) {
4344 kfree_skb(skb);
4345 hdev->reassembly[index] = NULL;
4346 return -ENOMEM;
4347 }
4348 }
4349 break;
4350
4351 case HCI_SCODATA_PKT:
4352 if (skb->len == HCI_SCO_HDR_SIZE) {
4353 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4354 scb->expect = h->dlen;
4355
4356 if (skb_tailroom(skb) < scb->expect) {
4357 kfree_skb(skb);
4358 hdev->reassembly[index] = NULL;
4359 return -ENOMEM;
4360 }
4361 }
4362 break;
4363 }
4364
4365 if (scb->expect == 0) {
4366 /* Complete frame */
4367
4368 bt_cb(skb)->pkt_type = type;
e1a26170 4369 hci_recv_frame(hdev, skb);
33e882a5
SS
4370
4371 hdev->reassembly[index] = NULL;
4372 return remain;
4373 }
4374 }
4375
4376 return remain;
4377}
4378
ef222013
MH
4379int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4380{
f39a3c06
SS
4381 int rem = 0;
4382
ef222013
MH
4383 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4384 return -EILSEQ;
4385
da5f6c37 4386 while (count) {
1e429f38 4387 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4388 if (rem < 0)
4389 return rem;
ef222013 4390
f39a3c06
SS
4391 data += (count - rem);
4392 count = rem;
f81c6224 4393 }
ef222013 4394
f39a3c06 4395 return rem;
ef222013
MH
4396}
4397EXPORT_SYMBOL(hci_recv_fragment);
4398
99811510
SS
4399#define STREAM_REASSEMBLY 0
4400
4401int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4402{
4403 int type;
4404 int rem = 0;
4405
da5f6c37 4406 while (count) {
99811510
SS
4407 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4408
4409 if (!skb) {
4410 struct { char type; } *pkt;
4411
4412 /* Start of the frame */
4413 pkt = data;
4414 type = pkt->type;
4415
4416 data++;
4417 count--;
4418 } else
4419 type = bt_cb(skb)->pkt_type;
4420
1e429f38 4421 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4422 STREAM_REASSEMBLY);
99811510
SS
4423 if (rem < 0)
4424 return rem;
4425
4426 data += (count - rem);
4427 count = rem;
f81c6224 4428 }
99811510
SS
4429
4430 return rem;
4431}
4432EXPORT_SYMBOL(hci_recv_stream_fragment);
4433
1da177e4
LT
4434/* ---- Interface to upper protocols ---- */
4435
1da177e4
LT
4436int hci_register_cb(struct hci_cb *cb)
4437{
4438 BT_DBG("%p name %s", cb, cb->name);
4439
f20d09d5 4440 write_lock(&hci_cb_list_lock);
1da177e4 4441 list_add(&cb->list, &hci_cb_list);
f20d09d5 4442 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4443
4444 return 0;
4445}
4446EXPORT_SYMBOL(hci_register_cb);
4447
4448int hci_unregister_cb(struct hci_cb *cb)
4449{
4450 BT_DBG("%p name %s", cb, cb->name);
4451
f20d09d5 4452 write_lock(&hci_cb_list_lock);
1da177e4 4453 list_del(&cb->list);
f20d09d5 4454 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4455
4456 return 0;
4457}
4458EXPORT_SYMBOL(hci_unregister_cb);
4459
51086991 4460static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4461{
cdc52faa
MH
4462 int err;
4463
0d48d939 4464 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4465
cd82e61c
MH
4466 /* Time stamp */
4467 __net_timestamp(skb);
1da177e4 4468
cd82e61c
MH
4469 /* Send copy to monitor */
4470 hci_send_to_monitor(hdev, skb);
4471
4472 if (atomic_read(&hdev->promisc)) {
4473 /* Send copy to the sockets */
470fe1b5 4474 hci_send_to_sock(hdev, skb);
1da177e4
LT
4475 }
4476
4477 /* Get rid of skb owner, prior to sending to the driver. */
4478 skb_orphan(skb);
4479
cdc52faa
MH
4480 err = hdev->send(hdev, skb);
4481 if (err < 0) {
4482 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4483 kfree_skb(skb);
4484 }
1da177e4
LT
4485}
4486
3119ae95
JH
4487void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4488{
4489 skb_queue_head_init(&req->cmd_q);
4490 req->hdev = hdev;
5d73e034 4491 req->err = 0;
3119ae95
JH
4492}
4493
4494int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4495{
4496 struct hci_dev *hdev = req->hdev;
4497 struct sk_buff *skb;
4498 unsigned long flags;
4499
4500 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4501
5d73e034
AG
4502 /* If an error occured during request building, remove all HCI
4503 * commands queued on the HCI request queue.
4504 */
4505 if (req->err) {
4506 skb_queue_purge(&req->cmd_q);
4507 return req->err;
4508 }
4509
3119ae95
JH
4510 /* Do not allow empty requests */
4511 if (skb_queue_empty(&req->cmd_q))
382b0c39 4512 return -ENODATA;
3119ae95
JH
4513
4514 skb = skb_peek_tail(&req->cmd_q);
4515 bt_cb(skb)->req.complete = complete;
4516
4517 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4518 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4519 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4520
4521 queue_work(hdev->workqueue, &hdev->cmd_work);
4522
4523 return 0;
4524}
4525
899de765
MH
4526bool hci_req_pending(struct hci_dev *hdev)
4527{
4528 return (hdev->req_status == HCI_REQ_PEND);
4529}
4530
1ca3a9d0 4531static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4532 u32 plen, const void *param)
1da177e4
LT
4533{
4534 int len = HCI_COMMAND_HDR_SIZE + plen;
4535 struct hci_command_hdr *hdr;
4536 struct sk_buff *skb;
4537
1da177e4 4538 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4539 if (!skb)
4540 return NULL;
1da177e4
LT
4541
4542 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4543 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4544 hdr->plen = plen;
4545
4546 if (plen)
4547 memcpy(skb_put(skb, plen), param, plen);
4548
4549 BT_DBG("skb len %d", skb->len);
4550
0d48d939 4551 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4552
1ca3a9d0
JH
4553 return skb;
4554}
4555
4556/* Send HCI command */
07dc93dd
JH
4557int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4558 const void *param)
1ca3a9d0
JH
4559{
4560 struct sk_buff *skb;
4561
4562 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4563
4564 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4565 if (!skb) {
4566 BT_ERR("%s no memory for command", hdev->name);
4567 return -ENOMEM;
4568 }
4569
11714b3d
JH
4570 /* Stand-alone HCI commands must be flaged as
4571 * single-command requests.
4572 */
4573 bt_cb(skb)->req.start = true;
4574
1da177e4 4575 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4576 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4577
4578 return 0;
4579}
1da177e4 4580
71c76a17 4581/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4582void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4583 const void *param, u8 event)
71c76a17
JH
4584{
4585 struct hci_dev *hdev = req->hdev;
4586 struct sk_buff *skb;
4587
4588 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4589
34739c1e
AG
4590 /* If an error occured during request building, there is no point in
4591 * queueing the HCI command. We can simply return.
4592 */
4593 if (req->err)
4594 return;
4595
71c76a17
JH
4596 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4597 if (!skb) {
5d73e034
AG
4598 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4599 hdev->name, opcode);
4600 req->err = -ENOMEM;
e348fe6b 4601 return;
71c76a17
JH
4602 }
4603
4604 if (skb_queue_empty(&req->cmd_q))
4605 bt_cb(skb)->req.start = true;
4606
02350a72
JH
4607 bt_cb(skb)->req.event = event;
4608
71c76a17 4609 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4610}
4611
07dc93dd
JH
4612void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4613 const void *param)
02350a72
JH
4614{
4615 hci_req_add_ev(req, opcode, plen, param, 0);
4616}
4617
1da177e4 4618/* Get data from the previously sent command */
a9de9248 4619void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4620{
4621 struct hci_command_hdr *hdr;
4622
4623 if (!hdev->sent_cmd)
4624 return NULL;
4625
4626 hdr = (void *) hdev->sent_cmd->data;
4627
a9de9248 4628 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4629 return NULL;
4630
f0e09510 4631 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4632
4633 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4634}
4635
4636/* Send ACL data */
4637static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4638{
4639 struct hci_acl_hdr *hdr;
4640 int len = skb->len;
4641
badff6d0
ACM
4642 skb_push(skb, HCI_ACL_HDR_SIZE);
4643 skb_reset_transport_header(skb);
9c70220b 4644 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4645 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4646 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4647}
4648
ee22be7e 4649static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4650 struct sk_buff *skb, __u16 flags)
1da177e4 4651{
ee22be7e 4652 struct hci_conn *conn = chan->conn;
1da177e4
LT
4653 struct hci_dev *hdev = conn->hdev;
4654 struct sk_buff *list;
4655
087bfd99
GP
4656 skb->len = skb_headlen(skb);
4657 skb->data_len = 0;
4658
4659 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4660
4661 switch (hdev->dev_type) {
4662 case HCI_BREDR:
4663 hci_add_acl_hdr(skb, conn->handle, flags);
4664 break;
4665 case HCI_AMP:
4666 hci_add_acl_hdr(skb, chan->handle, flags);
4667 break;
4668 default:
4669 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4670 return;
4671 }
087bfd99 4672
70f23020
AE
4673 list = skb_shinfo(skb)->frag_list;
4674 if (!list) {
1da177e4
LT
4675 /* Non fragmented */
4676 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4677
73d80deb 4678 skb_queue_tail(queue, skb);
1da177e4
LT
4679 } else {
4680 /* Fragmented */
4681 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4682
4683 skb_shinfo(skb)->frag_list = NULL;
4684
4685 /* Queue all fragments atomically */
af3e6359 4686 spin_lock(&queue->lock);
1da177e4 4687
73d80deb 4688 __skb_queue_tail(queue, skb);
e702112f
AE
4689
4690 flags &= ~ACL_START;
4691 flags |= ACL_CONT;
1da177e4
LT
4692 do {
4693 skb = list; list = list->next;
8e87d142 4694
0d48d939 4695 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4696 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4697
4698 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4699
73d80deb 4700 __skb_queue_tail(queue, skb);
1da177e4
LT
4701 } while (list);
4702
af3e6359 4703 spin_unlock(&queue->lock);
1da177e4 4704 }
73d80deb
LAD
4705}
4706
4707void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4708{
ee22be7e 4709 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4710
f0e09510 4711 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4712
ee22be7e 4713 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4714
3eff45ea 4715 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4716}
1da177e4
LT
4717
4718/* Send SCO data */
0d861d8b 4719void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4720{
4721 struct hci_dev *hdev = conn->hdev;
4722 struct hci_sco_hdr hdr;
4723
4724 BT_DBG("%s len %d", hdev->name, skb->len);
4725
aca3192c 4726 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4727 hdr.dlen = skb->len;
4728
badff6d0
ACM
4729 skb_push(skb, HCI_SCO_HDR_SIZE);
4730 skb_reset_transport_header(skb);
9c70220b 4731 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4732
0d48d939 4733 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4734
1da177e4 4735 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4736 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4737}
1da177e4
LT
4738
4739/* ---- HCI TX task (outgoing data) ---- */
4740
4741/* HCI Connection scheduler */
6039aa73
GP
4742static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4743 int *quote)
1da177e4
LT
4744{
4745 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4746 struct hci_conn *conn = NULL, *c;
abc5de8f 4747 unsigned int num = 0, min = ~0;
1da177e4 4748
8e87d142 4749 /* We don't have to lock device here. Connections are always
1da177e4 4750 * added and removed with TX task disabled. */
bf4c6325
GP
4751
4752 rcu_read_lock();
4753
4754 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4755 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4756 continue;
769be974
MH
4757
4758 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4759 continue;
4760
1da177e4
LT
4761 num++;
4762
4763 if (c->sent < min) {
4764 min = c->sent;
4765 conn = c;
4766 }
52087a79
LAD
4767
4768 if (hci_conn_num(hdev, type) == num)
4769 break;
1da177e4
LT
4770 }
4771
bf4c6325
GP
4772 rcu_read_unlock();
4773
1da177e4 4774 if (conn) {
6ed58ec5
VT
4775 int cnt, q;
4776
4777 switch (conn->type) {
4778 case ACL_LINK:
4779 cnt = hdev->acl_cnt;
4780 break;
4781 case SCO_LINK:
4782 case ESCO_LINK:
4783 cnt = hdev->sco_cnt;
4784 break;
4785 case LE_LINK:
4786 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4787 break;
4788 default:
4789 cnt = 0;
4790 BT_ERR("Unknown link type");
4791 }
4792
4793 q = cnt / num;
1da177e4
LT
4794 *quote = q ? q : 1;
4795 } else
4796 *quote = 0;
4797
4798 BT_DBG("conn %p quote %d", conn, *quote);
4799 return conn;
4800}
4801
6039aa73 4802static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4803{
4804 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4805 struct hci_conn *c;
1da177e4 4806
bae1f5d9 4807 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4808
bf4c6325
GP
4809 rcu_read_lock();
4810
1da177e4 4811 /* Kill stalled connections */
bf4c6325 4812 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4813 if (c->type == type && c->sent) {
6ed93dc6
AE
4814 BT_ERR("%s killing stalled connection %pMR",
4815 hdev->name, &c->dst);
bed71748 4816 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4817 }
4818 }
bf4c6325
GP
4819
4820 rcu_read_unlock();
1da177e4
LT
4821}
4822
6039aa73
GP
4823static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4824 int *quote)
1da177e4 4825{
73d80deb
LAD
4826 struct hci_conn_hash *h = &hdev->conn_hash;
4827 struct hci_chan *chan = NULL;
abc5de8f 4828 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4829 struct hci_conn *conn;
73d80deb
LAD
4830 int cnt, q, conn_num = 0;
4831
4832 BT_DBG("%s", hdev->name);
4833
bf4c6325
GP
4834 rcu_read_lock();
4835
4836 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4837 struct hci_chan *tmp;
4838
4839 if (conn->type != type)
4840 continue;
4841
4842 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4843 continue;
4844
4845 conn_num++;
4846
8192edef 4847 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4848 struct sk_buff *skb;
4849
4850 if (skb_queue_empty(&tmp->data_q))
4851 continue;
4852
4853 skb = skb_peek(&tmp->data_q);
4854 if (skb->priority < cur_prio)
4855 continue;
4856
4857 if (skb->priority > cur_prio) {
4858 num = 0;
4859 min = ~0;
4860 cur_prio = skb->priority;
4861 }
4862
4863 num++;
4864
4865 if (conn->sent < min) {
4866 min = conn->sent;
4867 chan = tmp;
4868 }
4869 }
4870
4871 if (hci_conn_num(hdev, type) == conn_num)
4872 break;
4873 }
4874
bf4c6325
GP
4875 rcu_read_unlock();
4876
73d80deb
LAD
4877 if (!chan)
4878 return NULL;
4879
4880 switch (chan->conn->type) {
4881 case ACL_LINK:
4882 cnt = hdev->acl_cnt;
4883 break;
bd1eb66b
AE
4884 case AMP_LINK:
4885 cnt = hdev->block_cnt;
4886 break;
73d80deb
LAD
4887 case SCO_LINK:
4888 case ESCO_LINK:
4889 cnt = hdev->sco_cnt;
4890 break;
4891 case LE_LINK:
4892 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4893 break;
4894 default:
4895 cnt = 0;
4896 BT_ERR("Unknown link type");
4897 }
4898
4899 q = cnt / num;
4900 *quote = q ? q : 1;
4901 BT_DBG("chan %p quote %d", chan, *quote);
4902 return chan;
4903}
4904
02b20f0b
LAD
4905static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4906{
4907 struct hci_conn_hash *h = &hdev->conn_hash;
4908 struct hci_conn *conn;
4909 int num = 0;
4910
4911 BT_DBG("%s", hdev->name);
4912
bf4c6325
GP
4913 rcu_read_lock();
4914
4915 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4916 struct hci_chan *chan;
4917
4918 if (conn->type != type)
4919 continue;
4920
4921 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4922 continue;
4923
4924 num++;
4925
8192edef 4926 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4927 struct sk_buff *skb;
4928
4929 if (chan->sent) {
4930 chan->sent = 0;
4931 continue;
4932 }
4933
4934 if (skb_queue_empty(&chan->data_q))
4935 continue;
4936
4937 skb = skb_peek(&chan->data_q);
4938 if (skb->priority >= HCI_PRIO_MAX - 1)
4939 continue;
4940
4941 skb->priority = HCI_PRIO_MAX - 1;
4942
4943 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4944 skb->priority);
02b20f0b
LAD
4945 }
4946
4947 if (hci_conn_num(hdev, type) == num)
4948 break;
4949 }
bf4c6325
GP
4950
4951 rcu_read_unlock();
4952
02b20f0b
LAD
4953}
4954
b71d385a
AE
4955static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4956{
4957 /* Calculate count of blocks used by this packet */
4958 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4959}
4960
6039aa73 4961static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4962{
4a964404 4963 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4964 /* ACL tx timeout must be longer than maximum
4965 * link supervision timeout (40.9 seconds) */
63d2bc1b 4966 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4967 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4968 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4969 }
63d2bc1b 4970}
1da177e4 4971
6039aa73 4972static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4973{
4974 unsigned int cnt = hdev->acl_cnt;
4975 struct hci_chan *chan;
4976 struct sk_buff *skb;
4977 int quote;
4978
4979 __check_timeout(hdev, cnt);
04837f64 4980
73d80deb 4981 while (hdev->acl_cnt &&
a8c5fb1a 4982 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4983 u32 priority = (skb_peek(&chan->data_q))->priority;
4984 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4985 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4986 skb->len, skb->priority);
73d80deb 4987
ec1cce24
LAD
4988 /* Stop if priority has changed */
4989 if (skb->priority < priority)
4990 break;
4991
4992 skb = skb_dequeue(&chan->data_q);
4993
73d80deb 4994 hci_conn_enter_active_mode(chan->conn,
04124681 4995 bt_cb(skb)->force_active);
04837f64 4996
57d17d70 4997 hci_send_frame(hdev, skb);
1da177e4
LT
4998 hdev->acl_last_tx = jiffies;
4999
5000 hdev->acl_cnt--;
73d80deb
LAD
5001 chan->sent++;
5002 chan->conn->sent++;
1da177e4
LT
5003 }
5004 }
02b20f0b
LAD
5005
5006 if (cnt != hdev->acl_cnt)
5007 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5008}
5009
6039aa73 5010static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5011{
63d2bc1b 5012 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5013 struct hci_chan *chan;
5014 struct sk_buff *skb;
5015 int quote;
bd1eb66b 5016 u8 type;
b71d385a 5017
63d2bc1b 5018 __check_timeout(hdev, cnt);
b71d385a 5019
bd1eb66b
AE
5020 BT_DBG("%s", hdev->name);
5021
5022 if (hdev->dev_type == HCI_AMP)
5023 type = AMP_LINK;
5024 else
5025 type = ACL_LINK;
5026
b71d385a 5027 while (hdev->block_cnt > 0 &&
bd1eb66b 5028 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5029 u32 priority = (skb_peek(&chan->data_q))->priority;
5030 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5031 int blocks;
5032
5033 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5034 skb->len, skb->priority);
b71d385a
AE
5035
5036 /* Stop if priority has changed */
5037 if (skb->priority < priority)
5038 break;
5039
5040 skb = skb_dequeue(&chan->data_q);
5041
5042 blocks = __get_blocks(hdev, skb);
5043 if (blocks > hdev->block_cnt)
5044 return;
5045
5046 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5047 bt_cb(skb)->force_active);
b71d385a 5048
57d17d70 5049 hci_send_frame(hdev, skb);
b71d385a
AE
5050 hdev->acl_last_tx = jiffies;
5051
5052 hdev->block_cnt -= blocks;
5053 quote -= blocks;
5054
5055 chan->sent += blocks;
5056 chan->conn->sent += blocks;
5057 }
5058 }
5059
5060 if (cnt != hdev->block_cnt)
bd1eb66b 5061 hci_prio_recalculate(hdev, type);
b71d385a
AE
5062}
5063
6039aa73 5064static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5065{
5066 BT_DBG("%s", hdev->name);
5067
bd1eb66b
AE
5068 /* No ACL link over BR/EDR controller */
5069 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5070 return;
5071
5072 /* No AMP link over AMP controller */
5073 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5074 return;
5075
5076 switch (hdev->flow_ctl_mode) {
5077 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5078 hci_sched_acl_pkt(hdev);
5079 break;
5080
5081 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5082 hci_sched_acl_blk(hdev);
5083 break;
5084 }
5085}
5086
1da177e4 5087/* Schedule SCO */
6039aa73 5088static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5089{
5090 struct hci_conn *conn;
5091 struct sk_buff *skb;
5092 int quote;
5093
5094 BT_DBG("%s", hdev->name);
5095
52087a79
LAD
5096 if (!hci_conn_num(hdev, SCO_LINK))
5097 return;
5098
1da177e4
LT
5099 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5100 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5101 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5102 hci_send_frame(hdev, skb);
1da177e4
LT
5103
5104 conn->sent++;
5105 if (conn->sent == ~0)
5106 conn->sent = 0;
5107 }
5108 }
5109}
5110
6039aa73 5111static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5112{
5113 struct hci_conn *conn;
5114 struct sk_buff *skb;
5115 int quote;
5116
5117 BT_DBG("%s", hdev->name);
5118
52087a79
LAD
5119 if (!hci_conn_num(hdev, ESCO_LINK))
5120 return;
5121
8fc9ced3
GP
5122 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5123 &quote))) {
b6a0dc82
MH
5124 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5125 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5126 hci_send_frame(hdev, skb);
b6a0dc82
MH
5127
5128 conn->sent++;
5129 if (conn->sent == ~0)
5130 conn->sent = 0;
5131 }
5132 }
5133}
5134
6039aa73 5135static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5136{
73d80deb 5137 struct hci_chan *chan;
6ed58ec5 5138 struct sk_buff *skb;
02b20f0b 5139 int quote, cnt, tmp;
6ed58ec5
VT
5140
5141 BT_DBG("%s", hdev->name);
5142
52087a79
LAD
5143 if (!hci_conn_num(hdev, LE_LINK))
5144 return;
5145
4a964404 5146 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5147 /* LE tx timeout must be longer than maximum
5148 * link supervision timeout (40.9 seconds) */
bae1f5d9 5149 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5150 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5151 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5152 }
5153
5154 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5155 tmp = cnt;
73d80deb 5156 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5157 u32 priority = (skb_peek(&chan->data_q))->priority;
5158 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5159 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5160 skb->len, skb->priority);
6ed58ec5 5161
ec1cce24
LAD
5162 /* Stop if priority has changed */
5163 if (skb->priority < priority)
5164 break;
5165
5166 skb = skb_dequeue(&chan->data_q);
5167
57d17d70 5168 hci_send_frame(hdev, skb);
6ed58ec5
VT
5169 hdev->le_last_tx = jiffies;
5170
5171 cnt--;
73d80deb
LAD
5172 chan->sent++;
5173 chan->conn->sent++;
6ed58ec5
VT
5174 }
5175 }
73d80deb 5176
6ed58ec5
VT
5177 if (hdev->le_pkts)
5178 hdev->le_cnt = cnt;
5179 else
5180 hdev->acl_cnt = cnt;
02b20f0b
LAD
5181
5182 if (cnt != tmp)
5183 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5184}
5185
3eff45ea 5186static void hci_tx_work(struct work_struct *work)
1da177e4 5187{
3eff45ea 5188 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5189 struct sk_buff *skb;
5190
6ed58ec5 5191 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5192 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5193
52de599e
MH
5194 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5195 /* Schedule queues and send stuff to HCI driver */
5196 hci_sched_acl(hdev);
5197 hci_sched_sco(hdev);
5198 hci_sched_esco(hdev);
5199 hci_sched_le(hdev);
5200 }
6ed58ec5 5201
1da177e4
LT
5202 /* Send next queued raw (unknown type) packet */
5203 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5204 hci_send_frame(hdev, skb);
1da177e4
LT
5205}
5206
25985edc 5207/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5208
5209/* ACL data packet */
6039aa73 5210static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5211{
5212 struct hci_acl_hdr *hdr = (void *) skb->data;
5213 struct hci_conn *conn;
5214 __u16 handle, flags;
5215
5216 skb_pull(skb, HCI_ACL_HDR_SIZE);
5217
5218 handle = __le16_to_cpu(hdr->handle);
5219 flags = hci_flags(handle);
5220 handle = hci_handle(handle);
5221
f0e09510 5222 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5223 handle, flags);
1da177e4
LT
5224
5225 hdev->stat.acl_rx++;
5226
5227 hci_dev_lock(hdev);
5228 conn = hci_conn_hash_lookup_handle(hdev, handle);
5229 hci_dev_unlock(hdev);
8e87d142 5230
1da177e4 5231 if (conn) {
65983fc7 5232 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5233
1da177e4 5234 /* Send to upper protocol */
686ebf28
UF
5235 l2cap_recv_acldata(conn, skb, flags);
5236 return;
1da177e4 5237 } else {
8e87d142 5238 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5239 hdev->name, handle);
1da177e4
LT
5240 }
5241
5242 kfree_skb(skb);
5243}
5244
5245/* SCO data packet */
6039aa73 5246static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5247{
5248 struct hci_sco_hdr *hdr = (void *) skb->data;
5249 struct hci_conn *conn;
5250 __u16 handle;
5251
5252 skb_pull(skb, HCI_SCO_HDR_SIZE);
5253
5254 handle = __le16_to_cpu(hdr->handle);
5255
f0e09510 5256 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5257
5258 hdev->stat.sco_rx++;
5259
5260 hci_dev_lock(hdev);
5261 conn = hci_conn_hash_lookup_handle(hdev, handle);
5262 hci_dev_unlock(hdev);
5263
5264 if (conn) {
1da177e4 5265 /* Send to upper protocol */
686ebf28
UF
5266 sco_recv_scodata(conn, skb);
5267 return;
1da177e4 5268 } else {
8e87d142 5269 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5270 hdev->name, handle);
1da177e4
LT
5271 }
5272
5273 kfree_skb(skb);
5274}
5275
9238f36a
JH
5276static bool hci_req_is_complete(struct hci_dev *hdev)
5277{
5278 struct sk_buff *skb;
5279
5280 skb = skb_peek(&hdev->cmd_q);
5281 if (!skb)
5282 return true;
5283
5284 return bt_cb(skb)->req.start;
5285}
5286
42c6b129
JH
5287static void hci_resend_last(struct hci_dev *hdev)
5288{
5289 struct hci_command_hdr *sent;
5290 struct sk_buff *skb;
5291 u16 opcode;
5292
5293 if (!hdev->sent_cmd)
5294 return;
5295
5296 sent = (void *) hdev->sent_cmd->data;
5297 opcode = __le16_to_cpu(sent->opcode);
5298 if (opcode == HCI_OP_RESET)
5299 return;
5300
5301 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5302 if (!skb)
5303 return;
5304
5305 skb_queue_head(&hdev->cmd_q, skb);
5306 queue_work(hdev->workqueue, &hdev->cmd_work);
5307}
5308
9238f36a
JH
5309void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5310{
5311 hci_req_complete_t req_complete = NULL;
5312 struct sk_buff *skb;
5313 unsigned long flags;
5314
5315 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5316
42c6b129
JH
5317 /* If the completed command doesn't match the last one that was
5318 * sent we need to do special handling of it.
9238f36a 5319 */
42c6b129
JH
5320 if (!hci_sent_cmd_data(hdev, opcode)) {
5321 /* Some CSR based controllers generate a spontaneous
5322 * reset complete event during init and any pending
5323 * command will never be completed. In such a case we
5324 * need to resend whatever was the last sent
5325 * command.
5326 */
5327 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5328 hci_resend_last(hdev);
5329
9238f36a 5330 return;
42c6b129 5331 }
9238f36a
JH
5332
5333 /* If the command succeeded and there's still more commands in
5334 * this request the request is not yet complete.
5335 */
5336 if (!status && !hci_req_is_complete(hdev))
5337 return;
5338
5339 /* If this was the last command in a request the complete
5340 * callback would be found in hdev->sent_cmd instead of the
5341 * command queue (hdev->cmd_q).
5342 */
5343 if (hdev->sent_cmd) {
5344 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5345
5346 if (req_complete) {
5347 /* We must set the complete callback to NULL to
5348 * avoid calling the callback more than once if
5349 * this function gets called again.
5350 */
5351 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5352
9238f36a 5353 goto call_complete;
53e21fbc 5354 }
9238f36a
JH
5355 }
5356
5357 /* Remove all pending commands belonging to this request */
5358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5359 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5360 if (bt_cb(skb)->req.start) {
5361 __skb_queue_head(&hdev->cmd_q, skb);
5362 break;
5363 }
5364
5365 req_complete = bt_cb(skb)->req.complete;
5366 kfree_skb(skb);
5367 }
5368 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5369
5370call_complete:
5371 if (req_complete)
5372 req_complete(hdev, status);
5373}
5374
b78752cc 5375static void hci_rx_work(struct work_struct *work)
1da177e4 5376{
b78752cc 5377 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5378 struct sk_buff *skb;
5379
5380 BT_DBG("%s", hdev->name);
5381
1da177e4 5382 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5383 /* Send copy to monitor */
5384 hci_send_to_monitor(hdev, skb);
5385
1da177e4
LT
5386 if (atomic_read(&hdev->promisc)) {
5387 /* Send copy to the sockets */
470fe1b5 5388 hci_send_to_sock(hdev, skb);
1da177e4
LT
5389 }
5390
fee746b0 5391 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5392 kfree_skb(skb);
5393 continue;
5394 }
5395
5396 if (test_bit(HCI_INIT, &hdev->flags)) {
5397 /* Don't process data packets in this states. */
0d48d939 5398 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5399 case HCI_ACLDATA_PKT:
5400 case HCI_SCODATA_PKT:
5401 kfree_skb(skb);
5402 continue;
3ff50b79 5403 }
1da177e4
LT
5404 }
5405
5406 /* Process frame */
0d48d939 5407 switch (bt_cb(skb)->pkt_type) {
1da177e4 5408 case HCI_EVENT_PKT:
b78752cc 5409 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5410 hci_event_packet(hdev, skb);
5411 break;
5412
5413 case HCI_ACLDATA_PKT:
5414 BT_DBG("%s ACL data packet", hdev->name);
5415 hci_acldata_packet(hdev, skb);
5416 break;
5417
5418 case HCI_SCODATA_PKT:
5419 BT_DBG("%s SCO data packet", hdev->name);
5420 hci_scodata_packet(hdev, skb);
5421 break;
5422
5423 default:
5424 kfree_skb(skb);
5425 break;
5426 }
5427 }
1da177e4
LT
5428}
5429
c347b765 5430static void hci_cmd_work(struct work_struct *work)
1da177e4 5431{
c347b765 5432 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5433 struct sk_buff *skb;
5434
2104786b
AE
5435 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5436 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5437
1da177e4 5438 /* Send queued commands */
5a08ecce
AE
5439 if (atomic_read(&hdev->cmd_cnt)) {
5440 skb = skb_dequeue(&hdev->cmd_q);
5441 if (!skb)
5442 return;
5443
7585b97a 5444 kfree_skb(hdev->sent_cmd);
1da177e4 5445
a675d7f1 5446 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5447 if (hdev->sent_cmd) {
1da177e4 5448 atomic_dec(&hdev->cmd_cnt);
57d17d70 5449 hci_send_frame(hdev, skb);
7bdb8a5c 5450 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5451 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5452 else
65cc2b49
MH
5453 schedule_delayed_work(&hdev->cmd_timer,
5454 HCI_CMD_TIMEOUT);
1da177e4
LT
5455 } else {
5456 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5457 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5458 }
5459 }
5460}
b1efcc28
AG
5461
5462void hci_req_add_le_scan_disable(struct hci_request *req)
5463{
5464 struct hci_cp_le_set_scan_enable cp;
5465
5466 memset(&cp, 0, sizeof(cp));
5467 cp.enable = LE_SCAN_DISABLE;
5468 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5469}
a4790dbd 5470
8540f6c0
MH
5471static void add_to_white_list(struct hci_request *req,
5472 struct hci_conn_params *params)
5473{
5474 struct hci_cp_le_add_to_white_list cp;
5475
5476 cp.bdaddr_type = params->addr_type;
5477 bacpy(&cp.bdaddr, &params->addr);
5478
5479 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5480}
5481
5482static u8 update_white_list(struct hci_request *req)
5483{
5484 struct hci_dev *hdev = req->hdev;
5485 struct hci_conn_params *params;
5486 struct bdaddr_list *b;
5487 uint8_t white_list_entries = 0;
5488
5489 /* Go through the current white list programmed into the
5490 * controller one by one and check if that address is still
5491 * in the list of pending connections or list of devices to
5492 * report. If not present in either list, then queue the
5493 * command to remove it from the controller.
5494 */
5495 list_for_each_entry(b, &hdev->le_white_list, list) {
5496 struct hci_cp_le_del_from_white_list cp;
5497
5498 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5499 &b->bdaddr, b->bdaddr_type) ||
5500 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5501 &b->bdaddr, b->bdaddr_type)) {
5502 white_list_entries++;
5503 continue;
5504 }
5505
5506 cp.bdaddr_type = b->bdaddr_type;
5507 bacpy(&cp.bdaddr, &b->bdaddr);
5508
5509 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5510 sizeof(cp), &cp);
5511 }
5512
5513 /* Since all no longer valid white list entries have been
5514 * removed, walk through the list of pending connections
5515 * and ensure that any new device gets programmed into
5516 * the controller.
5517 *
5518 * If the list of the devices is larger than the list of
5519 * available white list entries in the controller, then
5520 * just abort and return filer policy value to not use the
5521 * white list.
5522 */
5523 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5524 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5525 &params->addr, params->addr_type))
5526 continue;
5527
5528 if (white_list_entries >= hdev->le_white_list_size) {
5529 /* Select filter policy to accept all advertising */
5530 return 0x00;
5531 }
5532
66d8e837
MH
5533 if (hci_find_irk_by_addr(hdev, &params->addr,
5534 params->addr_type)) {
5535 /* White list can not be used with RPAs */
5536 return 0x00;
5537 }
5538
8540f6c0
MH
5539 white_list_entries++;
5540 add_to_white_list(req, params);
5541 }
5542
5543 /* After adding all new pending connections, walk through
5544 * the list of pending reports and also add these to the
5545 * white list if there is still space.
5546 */
5547 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5548 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5549 &params->addr, params->addr_type))
5550 continue;
5551
5552 if (white_list_entries >= hdev->le_white_list_size) {
5553 /* Select filter policy to accept all advertising */
5554 return 0x00;
5555 }
5556
66d8e837
MH
5557 if (hci_find_irk_by_addr(hdev, &params->addr,
5558 params->addr_type)) {
5559 /* White list can not be used with RPAs */
5560 return 0x00;
5561 }
5562
8540f6c0
MH
5563 white_list_entries++;
5564 add_to_white_list(req, params);
5565 }
5566
5567 /* Select filter policy to use white list */
5568 return 0x01;
5569}
5570
8ef30fd3
AG
5571void hci_req_add_le_passive_scan(struct hci_request *req)
5572{
5573 struct hci_cp_le_set_scan_param param_cp;
5574 struct hci_cp_le_set_scan_enable enable_cp;
5575 struct hci_dev *hdev = req->hdev;
5576 u8 own_addr_type;
8540f6c0 5577 u8 filter_policy;
8ef30fd3 5578
6ab535a7
MH
5579 /* Set require_privacy to false since no SCAN_REQ are send
5580 * during passive scanning. Not using an unresolvable address
5581 * here is important so that peer devices using direct
5582 * advertising with our address will be correctly reported
5583 * by the controller.
8ef30fd3 5584 */
6ab535a7 5585 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5586 return;
5587
8540f6c0
MH
5588 /* Adding or removing entries from the white list must
5589 * happen before enabling scanning. The controller does
5590 * not allow white list modification while scanning.
5591 */
5592 filter_policy = update_white_list(req);
5593
8ef30fd3
AG
5594 memset(&param_cp, 0, sizeof(param_cp));
5595 param_cp.type = LE_SCAN_PASSIVE;
5596 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5597 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5598 param_cp.own_address_type = own_addr_type;
8540f6c0 5599 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5600 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5601 &param_cp);
5602
5603 memset(&enable_cp, 0, sizeof(enable_cp));
5604 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5605 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5606 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5607 &enable_cp);
5608}
5609
a4790dbd
AG
5610static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5611{
5612 if (status)
5613 BT_DBG("HCI request failed to update background scanning: "
5614 "status 0x%2.2x", status);
5615}
5616
5617/* This function controls the background scanning based on hdev->pend_le_conns
5618 * list. If there are pending LE connection we start the background scanning,
5619 * otherwise we stop it.
5620 *
5621 * This function requires the caller holds hdev->lock.
5622 */
5623void hci_update_background_scan(struct hci_dev *hdev)
5624{
a4790dbd
AG
5625 struct hci_request req;
5626 struct hci_conn *conn;
5627 int err;
5628
c20c02d5
MH
5629 if (!test_bit(HCI_UP, &hdev->flags) ||
5630 test_bit(HCI_INIT, &hdev->flags) ||
5631 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5632 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5633 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5634 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5635 return;
5636
a70f4b5f
JH
5637 /* No point in doing scanning if LE support hasn't been enabled */
5638 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5639 return;
5640
ae23ada4
JH
5641 /* If discovery is active don't interfere with it */
5642 if (hdev->discovery.state != DISCOVERY_STOPPED)
5643 return;
5644
a4790dbd
AG
5645 hci_req_init(&req, hdev);
5646
d1d588c1 5647 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5648 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5649 /* If there is no pending LE connections or devices
5650 * to be scanned for, we should stop the background
5651 * scanning.
a4790dbd
AG
5652 */
5653
5654 /* If controller is not scanning we are done. */
5655 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5656 return;
5657
5658 hci_req_add_le_scan_disable(&req);
5659
5660 BT_DBG("%s stopping background scanning", hdev->name);
5661 } else {
a4790dbd
AG
5662 /* If there is at least one pending LE connection, we should
5663 * keep the background scan running.
5664 */
5665
a4790dbd
AG
5666 /* If controller is connecting, we should not start scanning
5667 * since some controllers are not able to scan and connect at
5668 * the same time.
5669 */
5670 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5671 if (conn)
5672 return;
5673
4340a124
AG
5674 /* If controller is currently scanning, we stop it to ensure we
5675 * don't miss any advertising (due to duplicates filter).
5676 */
5677 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5678 hci_req_add_le_scan_disable(&req);
5679
8ef30fd3 5680 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5681
5682 BT_DBG("%s starting background scanning", hdev->name);
5683 }
5684
5685 err = hci_req_run(&req, update_background_scan_complete);
5686 if (err)
5687 BT_ERR("Failed to run HCI request: err %d", err);
5688}