]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Mark controller is down when HCI_AUTO_OFF is set
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
1da177e4
LT
57/* ---- HCI notifications ---- */
58
6516455d 59static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 60{
040030ef 61 hci_sock_dev_event(hdev, event);
1da177e4
LT
62}
63
baf27f6e
MH
64/* ---- HCI debugfs entries ---- */
65
4b4148e9
MH
66static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
111902f7 72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
111902f7 98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
111902f7 119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
dfb826a8
MH
131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
cfbb2b5b
MH
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
70afe0b8
MH
169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
6659358e
JH
194static int whitelist_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bdaddr_list *b;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(b, &hdev->whitelist, list)
201 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
202 hci_dev_unlock(hdev);
203
204 return 0;
205}
206
207static int whitelist_open(struct inode *inode, struct file *file)
208{
209 return single_open(file, whitelist_show, inode->i_private);
210}
211
212static const struct file_operations whitelist_fops = {
213 .open = whitelist_open,
214 .read = seq_read,
215 .llseek = seq_lseek,
216 .release = single_release,
217};
218
47219839
MH
219static int uuids_show(struct seq_file *f, void *p)
220{
221 struct hci_dev *hdev = f->private;
222 struct bt_uuid *uuid;
223
224 hci_dev_lock(hdev);
225 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
226 u8 i, val[16];
227
228 /* The Bluetooth UUID values are stored in big endian,
229 * but with reversed byte order. So convert them into
230 * the right order for the %pUb modifier.
231 */
232 for (i = 0; i < 16; i++)
233 val[i] = uuid->uuid[15 - i];
234
235 seq_printf(f, "%pUb\n", val);
47219839
MH
236 }
237 hci_dev_unlock(hdev);
238
239 return 0;
240}
241
242static int uuids_open(struct inode *inode, struct file *file)
243{
244 return single_open(file, uuids_show, inode->i_private);
245}
246
247static const struct file_operations uuids_fops = {
248 .open = uuids_open,
249 .read = seq_read,
250 .llseek = seq_lseek,
251 .release = single_release,
252};
253
baf27f6e
MH
254static int inquiry_cache_show(struct seq_file *f, void *p)
255{
256 struct hci_dev *hdev = f->private;
257 struct discovery_state *cache = &hdev->discovery;
258 struct inquiry_entry *e;
259
260 hci_dev_lock(hdev);
261
262 list_for_each_entry(e, &cache->all, all) {
263 struct inquiry_data *data = &e->data;
264 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
265 &data->bdaddr,
266 data->pscan_rep_mode, data->pscan_period_mode,
267 data->pscan_mode, data->dev_class[2],
268 data->dev_class[1], data->dev_class[0],
269 __le16_to_cpu(data->clock_offset),
270 data->rssi, data->ssp_mode, e->timestamp);
271 }
272
273 hci_dev_unlock(hdev);
274
275 return 0;
276}
277
278static int inquiry_cache_open(struct inode *inode, struct file *file)
279{
280 return single_open(file, inquiry_cache_show, inode->i_private);
281}
282
283static const struct file_operations inquiry_cache_fops = {
284 .open = inquiry_cache_open,
285 .read = seq_read,
286 .llseek = seq_lseek,
287 .release = single_release,
288};
289
02d08d15
MH
290static int link_keys_show(struct seq_file *f, void *ptr)
291{
292 struct hci_dev *hdev = f->private;
293 struct list_head *p, *n;
294
295 hci_dev_lock(hdev);
296 list_for_each_safe(p, n, &hdev->link_keys) {
297 struct link_key *key = list_entry(p, struct link_key, list);
298 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
299 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
300 }
301 hci_dev_unlock(hdev);
302
303 return 0;
304}
305
306static int link_keys_open(struct inode *inode, struct file *file)
307{
308 return single_open(file, link_keys_show, inode->i_private);
309}
310
311static const struct file_operations link_keys_fops = {
312 .open = link_keys_open,
313 .read = seq_read,
314 .llseek = seq_lseek,
315 .release = single_release,
316};
317
babdbb3c
MH
318static int dev_class_show(struct seq_file *f, void *ptr)
319{
320 struct hci_dev *hdev = f->private;
321
322 hci_dev_lock(hdev);
323 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
324 hdev->dev_class[1], hdev->dev_class[0]);
325 hci_dev_unlock(hdev);
326
327 return 0;
328}
329
330static int dev_class_open(struct inode *inode, struct file *file)
331{
332 return single_open(file, dev_class_show, inode->i_private);
333}
334
335static const struct file_operations dev_class_fops = {
336 .open = dev_class_open,
337 .read = seq_read,
338 .llseek = seq_lseek,
339 .release = single_release,
340};
341
041000b9
MH
342static int voice_setting_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->voice_setting;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
354 NULL, "0x%4.4llx\n");
355
ebd1e33b
MH
356static int auto_accept_delay_set(void *data, u64 val)
357{
358 struct hci_dev *hdev = data;
359
360 hci_dev_lock(hdev);
361 hdev->auto_accept_delay = val;
362 hci_dev_unlock(hdev);
363
364 return 0;
365}
366
367static int auto_accept_delay_get(void *data, u64 *val)
368{
369 struct hci_dev *hdev = data;
370
371 hci_dev_lock(hdev);
372 *val = hdev->auto_accept_delay;
373 hci_dev_unlock(hdev);
374
375 return 0;
376}
377
378DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
379 auto_accept_delay_set, "%llu\n");
380
5afeac14
MH
381static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
382 size_t count, loff_t *ppos)
383{
384 struct hci_dev *hdev = file->private_data;
385 char buf[3];
386
111902f7 387 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
388 buf[1] = '\n';
389 buf[2] = '\0';
390 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
391}
392
393static ssize_t force_sc_support_write(struct file *file,
394 const char __user *user_buf,
395 size_t count, loff_t *ppos)
396{
397 struct hci_dev *hdev = file->private_data;
398 char buf[32];
399 size_t buf_size = min(count, (sizeof(buf)-1));
400 bool enable;
401
402 if (test_bit(HCI_UP, &hdev->flags))
403 return -EBUSY;
404
405 if (copy_from_user(buf, user_buf, buf_size))
406 return -EFAULT;
407
408 buf[buf_size] = '\0';
409 if (strtobool(buf, &enable))
410 return -EINVAL;
411
111902f7 412 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
413 return -EALREADY;
414
111902f7 415 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
416
417 return count;
418}
419
420static const struct file_operations force_sc_support_fops = {
421 .open = simple_open,
422 .read = force_sc_support_read,
423 .write = force_sc_support_write,
424 .llseek = default_llseek,
425};
426
134c2a89
MH
427static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
428 size_t count, loff_t *ppos)
429{
430 struct hci_dev *hdev = file->private_data;
431 char buf[3];
432
433 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
434 buf[1] = '\n';
435 buf[2] = '\0';
436 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
437}
438
439static const struct file_operations sc_only_mode_fops = {
440 .open = simple_open,
441 .read = sc_only_mode_read,
442 .llseek = default_llseek,
443};
444
2bfa3531
MH
445static int idle_timeout_set(void *data, u64 val)
446{
447 struct hci_dev *hdev = data;
448
449 if (val != 0 && (val < 500 || val > 3600000))
450 return -EINVAL;
451
452 hci_dev_lock(hdev);
2be48b65 453 hdev->idle_timeout = val;
2bfa3531
MH
454 hci_dev_unlock(hdev);
455
456 return 0;
457}
458
459static int idle_timeout_get(void *data, u64 *val)
460{
461 struct hci_dev *hdev = data;
462
463 hci_dev_lock(hdev);
464 *val = hdev->idle_timeout;
465 hci_dev_unlock(hdev);
466
467 return 0;
468}
469
470DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
471 idle_timeout_set, "%llu\n");
472
c982b2ea
JH
473static int rpa_timeout_set(void *data, u64 val)
474{
475 struct hci_dev *hdev = data;
476
477 /* Require the RPA timeout to be at least 30 seconds and at most
478 * 24 hours.
479 */
480 if (val < 30 || val > (60 * 60 * 24))
481 return -EINVAL;
482
483 hci_dev_lock(hdev);
484 hdev->rpa_timeout = val;
485 hci_dev_unlock(hdev);
486
487 return 0;
488}
489
490static int rpa_timeout_get(void *data, u64 *val)
491{
492 struct hci_dev *hdev = data;
493
494 hci_dev_lock(hdev);
495 *val = hdev->rpa_timeout;
496 hci_dev_unlock(hdev);
497
498 return 0;
499}
500
501DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
502 rpa_timeout_set, "%llu\n");
503
2bfa3531
MH
504static int sniff_min_interval_set(void *data, u64 val)
505{
506 struct hci_dev *hdev = data;
507
508 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
509 return -EINVAL;
510
511 hci_dev_lock(hdev);
2be48b65 512 hdev->sniff_min_interval = val;
2bfa3531
MH
513 hci_dev_unlock(hdev);
514
515 return 0;
516}
517
518static int sniff_min_interval_get(void *data, u64 *val)
519{
520 struct hci_dev *hdev = data;
521
522 hci_dev_lock(hdev);
523 *val = hdev->sniff_min_interval;
524 hci_dev_unlock(hdev);
525
526 return 0;
527}
528
529DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
530 sniff_min_interval_set, "%llu\n");
531
532static int sniff_max_interval_set(void *data, u64 val)
533{
534 struct hci_dev *hdev = data;
535
536 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
537 return -EINVAL;
538
539 hci_dev_lock(hdev);
2be48b65 540 hdev->sniff_max_interval = val;
2bfa3531
MH
541 hci_dev_unlock(hdev);
542
543 return 0;
544}
545
546static int sniff_max_interval_get(void *data, u64 *val)
547{
548 struct hci_dev *hdev = data;
549
550 hci_dev_lock(hdev);
551 *val = hdev->sniff_max_interval;
552 hci_dev_unlock(hdev);
553
554 return 0;
555}
556
557DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
558 sniff_max_interval_set, "%llu\n");
559
31ad1691
AK
560static int conn_info_min_age_set(void *data, u64 val)
561{
562 struct hci_dev *hdev = data;
563
564 if (val == 0 || val > hdev->conn_info_max_age)
565 return -EINVAL;
566
567 hci_dev_lock(hdev);
568 hdev->conn_info_min_age = val;
569 hci_dev_unlock(hdev);
570
571 return 0;
572}
573
574static int conn_info_min_age_get(void *data, u64 *val)
575{
576 struct hci_dev *hdev = data;
577
578 hci_dev_lock(hdev);
579 *val = hdev->conn_info_min_age;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
586 conn_info_min_age_set, "%llu\n");
587
588static int conn_info_max_age_set(void *data, u64 val)
589{
590 struct hci_dev *hdev = data;
591
592 if (val == 0 || val < hdev->conn_info_min_age)
593 return -EINVAL;
594
595 hci_dev_lock(hdev);
596 hdev->conn_info_max_age = val;
597 hci_dev_unlock(hdev);
598
599 return 0;
600}
601
602static int conn_info_max_age_get(void *data, u64 *val)
603{
604 struct hci_dev *hdev = data;
605
606 hci_dev_lock(hdev);
607 *val = hdev->conn_info_max_age;
608 hci_dev_unlock(hdev);
609
610 return 0;
611}
612
613DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
614 conn_info_max_age_set, "%llu\n");
615
ac345813
MH
616static int identity_show(struct seq_file *f, void *p)
617{
618 struct hci_dev *hdev = f->private;
a1f4c318 619 bdaddr_t addr;
ac345813
MH
620 u8 addr_type;
621
622 hci_dev_lock(hdev);
623
a1f4c318 624 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 625
a1f4c318 626 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 627 16, hdev->irk, &hdev->rpa);
ac345813
MH
628
629 hci_dev_unlock(hdev);
630
631 return 0;
632}
633
634static int identity_open(struct inode *inode, struct file *file)
635{
636 return single_open(file, identity_show, inode->i_private);
637}
638
639static const struct file_operations identity_fops = {
640 .open = identity_open,
641 .read = seq_read,
642 .llseek = seq_lseek,
643 .release = single_release,
644};
645
7a4cd51d
MH
646static int random_address_show(struct seq_file *f, void *p)
647{
648 struct hci_dev *hdev = f->private;
649
650 hci_dev_lock(hdev);
651 seq_printf(f, "%pMR\n", &hdev->random_addr);
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657static int random_address_open(struct inode *inode, struct file *file)
658{
659 return single_open(file, random_address_show, inode->i_private);
660}
661
662static const struct file_operations random_address_fops = {
663 .open = random_address_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667};
668
e7b8fc92
MH
669static int static_address_show(struct seq_file *f, void *p)
670{
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->static_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678}
679
680static int static_address_open(struct inode *inode, struct file *file)
681{
682 return single_open(file, static_address_show, inode->i_private);
683}
684
685static const struct file_operations static_address_fops = {
686 .open = static_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690};
691
b32bba6c
MH
692static ssize_t force_static_address_read(struct file *file,
693 char __user *user_buf,
694 size_t count, loff_t *ppos)
92202185 695{
b32bba6c
MH
696 struct hci_dev *hdev = file->private_data;
697 char buf[3];
92202185 698
111902f7 699 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
700 buf[1] = '\n';
701 buf[2] = '\0';
702 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
703}
704
b32bba6c
MH
705static ssize_t force_static_address_write(struct file *file,
706 const char __user *user_buf,
707 size_t count, loff_t *ppos)
92202185 708{
b32bba6c
MH
709 struct hci_dev *hdev = file->private_data;
710 char buf[32];
711 size_t buf_size = min(count, (sizeof(buf)-1));
712 bool enable;
92202185 713
b32bba6c
MH
714 if (test_bit(HCI_UP, &hdev->flags))
715 return -EBUSY;
92202185 716
b32bba6c
MH
717 if (copy_from_user(buf, user_buf, buf_size))
718 return -EFAULT;
719
720 buf[buf_size] = '\0';
721 if (strtobool(buf, &enable))
722 return -EINVAL;
723
111902f7 724 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
725 return -EALREADY;
726
111902f7 727 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
728
729 return count;
92202185
MH
730}
731
b32bba6c
MH
732static const struct file_operations force_static_address_fops = {
733 .open = simple_open,
734 .read = force_static_address_read,
735 .write = force_static_address_write,
736 .llseek = default_llseek,
737};
92202185 738
d2ab0ac1
MH
739static int white_list_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct bdaddr_list *b;
743
744 hci_dev_lock(hdev);
745 list_for_each_entry(b, &hdev->le_white_list, list)
746 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
747 hci_dev_unlock(hdev);
748
749 return 0;
750}
751
752static int white_list_open(struct inode *inode, struct file *file)
753{
754 return single_open(file, white_list_show, inode->i_private);
755}
756
757static const struct file_operations white_list_fops = {
758 .open = white_list_open,
759 .read = seq_read,
760 .llseek = seq_lseek,
761 .release = single_release,
762};
763
3698d704
MH
764static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct list_head *p, *n;
768
769 hci_dev_lock(hdev);
770 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
771 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
772 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
773 &irk->bdaddr, irk->addr_type,
774 16, irk->val, &irk->rpa);
775 }
776 hci_dev_unlock(hdev);
777
778 return 0;
779}
780
781static int identity_resolving_keys_open(struct inode *inode, struct file *file)
782{
783 return single_open(file, identity_resolving_keys_show,
784 inode->i_private);
785}
786
787static const struct file_operations identity_resolving_keys_fops = {
788 .open = identity_resolving_keys_open,
789 .read = seq_read,
790 .llseek = seq_lseek,
791 .release = single_release,
792};
793
8f8625cd
MH
794static int long_term_keys_show(struct seq_file *f, void *ptr)
795{
796 struct hci_dev *hdev = f->private;
797 struct list_head *p, *n;
798
799 hci_dev_lock(hdev);
f813f1be 800 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 801 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 802 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
803 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
804 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 805 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
806 }
807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int long_term_keys_open(struct inode *inode, struct file *file)
813{
814 return single_open(file, long_term_keys_show, inode->i_private);
815}
816
817static const struct file_operations long_term_keys_fops = {
818 .open = long_term_keys_open,
819 .read = seq_read,
820 .llseek = seq_lseek,
821 .release = single_release,
822};
823
4e70c7e7
MH
824static int conn_min_interval_set(void *data, u64 val)
825{
826 struct hci_dev *hdev = data;
827
828 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
829 return -EINVAL;
830
831 hci_dev_lock(hdev);
2be48b65 832 hdev->le_conn_min_interval = val;
4e70c7e7
MH
833 hci_dev_unlock(hdev);
834
835 return 0;
836}
837
838static int conn_min_interval_get(void *data, u64 *val)
839{
840 struct hci_dev *hdev = data;
841
842 hci_dev_lock(hdev);
843 *val = hdev->le_conn_min_interval;
844 hci_dev_unlock(hdev);
845
846 return 0;
847}
848
849DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
850 conn_min_interval_set, "%llu\n");
851
852static int conn_max_interval_set(void *data, u64 val)
853{
854 struct hci_dev *hdev = data;
855
856 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
857 return -EINVAL;
858
859 hci_dev_lock(hdev);
2be48b65 860 hdev->le_conn_max_interval = val;
4e70c7e7
MH
861 hci_dev_unlock(hdev);
862
863 return 0;
864}
865
866static int conn_max_interval_get(void *data, u64 *val)
867{
868 struct hci_dev *hdev = data;
869
870 hci_dev_lock(hdev);
871 *val = hdev->le_conn_max_interval;
872 hci_dev_unlock(hdev);
873
874 return 0;
875}
876
877DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
878 conn_max_interval_set, "%llu\n");
879
816a93d1
MH
880static int conn_latency_set(void *data, u64 val)
881{
882 struct hci_dev *hdev = data;
883
884 if (val > 0x01f3)
885 return -EINVAL;
886
887 hci_dev_lock(hdev);
888 hdev->le_conn_latency = val;
889 hci_dev_unlock(hdev);
890
891 return 0;
892}
893
894static int conn_latency_get(void *data, u64 *val)
895{
896 struct hci_dev *hdev = data;
897
898 hci_dev_lock(hdev);
899 *val = hdev->le_conn_latency;
900 hci_dev_unlock(hdev);
901
902 return 0;
903}
904
905DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
906 conn_latency_set, "%llu\n");
907
f1649577
MH
908static int supervision_timeout_set(void *data, u64 val)
909{
910 struct hci_dev *hdev = data;
911
912 if (val < 0x000a || val > 0x0c80)
913 return -EINVAL;
914
915 hci_dev_lock(hdev);
916 hdev->le_supv_timeout = val;
917 hci_dev_unlock(hdev);
918
919 return 0;
920}
921
922static int supervision_timeout_get(void *data, u64 *val)
923{
924 struct hci_dev *hdev = data;
925
926 hci_dev_lock(hdev);
927 *val = hdev->le_supv_timeout;
928 hci_dev_unlock(hdev);
929
930 return 0;
931}
932
933DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
934 supervision_timeout_set, "%llu\n");
935
3f959d46
MH
936static int adv_channel_map_set(void *data, u64 val)
937{
938 struct hci_dev *hdev = data;
939
940 if (val < 0x01 || val > 0x07)
941 return -EINVAL;
942
943 hci_dev_lock(hdev);
944 hdev->le_adv_channel_map = val;
945 hci_dev_unlock(hdev);
946
947 return 0;
948}
949
950static int adv_channel_map_get(void *data, u64 *val)
951{
952 struct hci_dev *hdev = data;
953
954 hci_dev_lock(hdev);
955 *val = hdev->le_adv_channel_map;
956 hci_dev_unlock(hdev);
957
958 return 0;
959}
960
961DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
962 adv_channel_map_set, "%llu\n");
963
0b3c7d37 964static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 965{
0b3c7d37 966 struct hci_dev *hdev = f->private;
7d474e06
AG
967 struct hci_conn_params *p;
968
969 hci_dev_lock(hdev);
7d474e06 970 list_for_each_entry(p, &hdev->le_conn_params, list) {
0b3c7d37 971 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
7d474e06
AG
972 p->auto_connect);
973 }
7d474e06
AG
974 hci_dev_unlock(hdev);
975
976 return 0;
977}
978
0b3c7d37 979static int device_list_open(struct inode *inode, struct file *file)
7d474e06 980{
0b3c7d37 981 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
982}
983
0b3c7d37
MH
984static const struct file_operations device_list_fops = {
985 .open = device_list_open,
7d474e06 986 .read = seq_read,
7d474e06
AG
987 .llseek = seq_lseek,
988 .release = single_release,
989};
990
1da177e4
LT
991/* ---- HCI requests ---- */
992
42c6b129 993static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 994{
42c6b129 995 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
996
997 if (hdev->req_status == HCI_REQ_PEND) {
998 hdev->req_result = result;
999 hdev->req_status = HCI_REQ_DONE;
1000 wake_up_interruptible(&hdev->req_wait_q);
1001 }
1002}
1003
1004static void hci_req_cancel(struct hci_dev *hdev, int err)
1005{
1006 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1007
1008 if (hdev->req_status == HCI_REQ_PEND) {
1009 hdev->req_result = err;
1010 hdev->req_status = HCI_REQ_CANCELED;
1011 wake_up_interruptible(&hdev->req_wait_q);
1012 }
1013}
1014
77a63e0a
FW
1015static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1016 u8 event)
75e84b7c
JH
1017{
1018 struct hci_ev_cmd_complete *ev;
1019 struct hci_event_hdr *hdr;
1020 struct sk_buff *skb;
1021
1022 hci_dev_lock(hdev);
1023
1024 skb = hdev->recv_evt;
1025 hdev->recv_evt = NULL;
1026
1027 hci_dev_unlock(hdev);
1028
1029 if (!skb)
1030 return ERR_PTR(-ENODATA);
1031
1032 if (skb->len < sizeof(*hdr)) {
1033 BT_ERR("Too short HCI event");
1034 goto failed;
1035 }
1036
1037 hdr = (void *) skb->data;
1038 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1039
7b1abbbe
JH
1040 if (event) {
1041 if (hdr->evt != event)
1042 goto failed;
1043 return skb;
1044 }
1045
75e84b7c
JH
1046 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1047 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1048 goto failed;
1049 }
1050
1051 if (skb->len < sizeof(*ev)) {
1052 BT_ERR("Too short cmd_complete event");
1053 goto failed;
1054 }
1055
1056 ev = (void *) skb->data;
1057 skb_pull(skb, sizeof(*ev));
1058
1059 if (opcode == __le16_to_cpu(ev->opcode))
1060 return skb;
1061
1062 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1063 __le16_to_cpu(ev->opcode));
1064
1065failed:
1066 kfree_skb(skb);
1067 return ERR_PTR(-ENODATA);
1068}
1069
7b1abbbe 1070struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1071 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1072{
1073 DECLARE_WAITQUEUE(wait, current);
1074 struct hci_request req;
1075 int err = 0;
1076
1077 BT_DBG("%s", hdev->name);
1078
1079 hci_req_init(&req, hdev);
1080
7b1abbbe 1081 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1082
1083 hdev->req_status = HCI_REQ_PEND;
1084
1085 err = hci_req_run(&req, hci_req_sync_complete);
1086 if (err < 0)
1087 return ERR_PTR(err);
1088
1089 add_wait_queue(&hdev->req_wait_q, &wait);
1090 set_current_state(TASK_INTERRUPTIBLE);
1091
1092 schedule_timeout(timeout);
1093
1094 remove_wait_queue(&hdev->req_wait_q, &wait);
1095
1096 if (signal_pending(current))
1097 return ERR_PTR(-EINTR);
1098
1099 switch (hdev->req_status) {
1100 case HCI_REQ_DONE:
1101 err = -bt_to_errno(hdev->req_result);
1102 break;
1103
1104 case HCI_REQ_CANCELED:
1105 err = -hdev->req_result;
1106 break;
1107
1108 default:
1109 err = -ETIMEDOUT;
1110 break;
1111 }
1112
1113 hdev->req_status = hdev->req_result = 0;
1114
1115 BT_DBG("%s end: err %d", hdev->name, err);
1116
1117 if (err < 0)
1118 return ERR_PTR(err);
1119
7b1abbbe
JH
1120 return hci_get_cmd_complete(hdev, opcode, event);
1121}
1122EXPORT_SYMBOL(__hci_cmd_sync_ev);
1123
1124struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1125 const void *param, u32 timeout)
7b1abbbe
JH
1126{
1127 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1128}
1129EXPORT_SYMBOL(__hci_cmd_sync);
1130
1da177e4 1131/* Execute request and wait for completion. */
01178cd4 1132static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1133 void (*func)(struct hci_request *req,
1134 unsigned long opt),
01178cd4 1135 unsigned long opt, __u32 timeout)
1da177e4 1136{
42c6b129 1137 struct hci_request req;
1da177e4
LT
1138 DECLARE_WAITQUEUE(wait, current);
1139 int err = 0;
1140
1141 BT_DBG("%s start", hdev->name);
1142
42c6b129
JH
1143 hci_req_init(&req, hdev);
1144
1da177e4
LT
1145 hdev->req_status = HCI_REQ_PEND;
1146
42c6b129 1147 func(&req, opt);
53cce22d 1148
42c6b129
JH
1149 err = hci_req_run(&req, hci_req_sync_complete);
1150 if (err < 0) {
53cce22d 1151 hdev->req_status = 0;
920c8300
AG
1152
1153 /* ENODATA means the HCI request command queue is empty.
1154 * This can happen when a request with conditionals doesn't
1155 * trigger any commands to be sent. This is normal behavior
1156 * and should not trigger an error return.
42c6b129 1157 */
920c8300
AG
1158 if (err == -ENODATA)
1159 return 0;
1160
1161 return err;
53cce22d
JH
1162 }
1163
bc4445c7
AG
1164 add_wait_queue(&hdev->req_wait_q, &wait);
1165 set_current_state(TASK_INTERRUPTIBLE);
1166
1da177e4
LT
1167 schedule_timeout(timeout);
1168
1169 remove_wait_queue(&hdev->req_wait_q, &wait);
1170
1171 if (signal_pending(current))
1172 return -EINTR;
1173
1174 switch (hdev->req_status) {
1175 case HCI_REQ_DONE:
e175072f 1176 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1177 break;
1178
1179 case HCI_REQ_CANCELED:
1180 err = -hdev->req_result;
1181 break;
1182
1183 default:
1184 err = -ETIMEDOUT;
1185 break;
3ff50b79 1186 }
1da177e4 1187
a5040efa 1188 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1189
1190 BT_DBG("%s end: err %d", hdev->name, err);
1191
1192 return err;
1193}
1194
01178cd4 1195static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1196 void (*req)(struct hci_request *req,
1197 unsigned long opt),
01178cd4 1198 unsigned long opt, __u32 timeout)
1da177e4
LT
1199{
1200 int ret;
1201
7c6a329e
MH
1202 if (!test_bit(HCI_UP, &hdev->flags))
1203 return -ENETDOWN;
1204
1da177e4
LT
1205 /* Serialize all requests */
1206 hci_req_lock(hdev);
01178cd4 1207 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1208 hci_req_unlock(hdev);
1209
1210 return ret;
1211}
1212
42c6b129 1213static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1214{
42c6b129 1215 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1216
1217 /* Reset device */
42c6b129
JH
1218 set_bit(HCI_RESET, &req->hdev->flags);
1219 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1220}
1221
42c6b129 1222static void bredr_init(struct hci_request *req)
1da177e4 1223{
42c6b129 1224 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1225
1da177e4 1226 /* Read Local Supported Features */
42c6b129 1227 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1228
1143e5a6 1229 /* Read Local Version */
42c6b129 1230 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1231
1232 /* Read BD Address */
42c6b129 1233 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1234}
1235
42c6b129 1236static void amp_init(struct hci_request *req)
e61ef499 1237{
42c6b129 1238 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1239
e61ef499 1240 /* Read Local Version */
42c6b129 1241 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1242
f6996cfe
MH
1243 /* Read Local Supported Commands */
1244 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1245
1246 /* Read Local Supported Features */
1247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1248
6bcbc489 1249 /* Read Local AMP Info */
42c6b129 1250 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1251
1252 /* Read Data Blk size */
42c6b129 1253 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1254
f38ba941
MH
1255 /* Read Flow Control Mode */
1256 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1257
7528ca1c
MH
1258 /* Read Location Data */
1259 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1260}
1261
42c6b129 1262static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1263{
42c6b129 1264 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1265
1266 BT_DBG("%s %ld", hdev->name, opt);
1267
11778716
AE
1268 /* Reset */
1269 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1270 hci_reset_req(req, 0);
11778716 1271
e61ef499
AE
1272 switch (hdev->dev_type) {
1273 case HCI_BREDR:
42c6b129 1274 bredr_init(req);
e61ef499
AE
1275 break;
1276
1277 case HCI_AMP:
42c6b129 1278 amp_init(req);
e61ef499
AE
1279 break;
1280
1281 default:
1282 BT_ERR("Unknown device type %d", hdev->dev_type);
1283 break;
1284 }
e61ef499
AE
1285}
1286
42c6b129 1287static void bredr_setup(struct hci_request *req)
2177bab5 1288{
4ca048e3
MH
1289 struct hci_dev *hdev = req->hdev;
1290
2177bab5
JH
1291 __le16 param;
1292 __u8 flt_type;
1293
1294 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1295 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1296
1297 /* Read Class of Device */
42c6b129 1298 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1299
1300 /* Read Local Name */
42c6b129 1301 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1302
1303 /* Read Voice Setting */
42c6b129 1304 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1305
b4cb9fb2
MH
1306 /* Read Number of Supported IAC */
1307 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1308
4b836f39
MH
1309 /* Read Current IAC LAP */
1310 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1311
2177bab5
JH
1312 /* Clear Event Filters */
1313 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1314 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1315
1316 /* Connection accept timeout ~20 secs */
dcf4adbf 1317 param = cpu_to_le16(0x7d00);
42c6b129 1318 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1319
4ca048e3
MH
1320 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1321 * but it does not support page scan related HCI commands.
1322 */
1323 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1324 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1325 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1326 }
2177bab5
JH
1327}
1328
42c6b129 1329static void le_setup(struct hci_request *req)
2177bab5 1330{
c73eee91
JH
1331 struct hci_dev *hdev = req->hdev;
1332
2177bab5 1333 /* Read LE Buffer Size */
42c6b129 1334 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1335
1336 /* Read LE Local Supported Features */
42c6b129 1337 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1338
747d3f03
MH
1339 /* Read LE Supported States */
1340 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1341
2177bab5 1342 /* Read LE Advertising Channel TX Power */
42c6b129 1343 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1344
1345 /* Read LE White List Size */
42c6b129 1346 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1347
747d3f03
MH
1348 /* Clear LE White List */
1349 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1350
1351 /* LE-only controllers have LE implicitly enabled */
1352 if (!lmp_bredr_capable(hdev))
1353 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1354}
1355
1356static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1357{
1358 if (lmp_ext_inq_capable(hdev))
1359 return 0x02;
1360
1361 if (lmp_inq_rssi_capable(hdev))
1362 return 0x01;
1363
1364 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1365 hdev->lmp_subver == 0x0757)
1366 return 0x01;
1367
1368 if (hdev->manufacturer == 15) {
1369 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1370 return 0x01;
1371 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1372 return 0x01;
1373 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1374 return 0x01;
1375 }
1376
1377 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1378 hdev->lmp_subver == 0x1805)
1379 return 0x01;
1380
1381 return 0x00;
1382}
1383
42c6b129 1384static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1385{
1386 u8 mode;
1387
42c6b129 1388 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1389
42c6b129 1390 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1391}
1392
42c6b129 1393static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1394{
42c6b129
JH
1395 struct hci_dev *hdev = req->hdev;
1396
2177bab5
JH
1397 /* The second byte is 0xff instead of 0x9f (two reserved bits
1398 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1399 * command otherwise.
1400 */
1401 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1402
1403 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1404 * any event mask for pre 1.2 devices.
1405 */
1406 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1407 return;
1408
1409 if (lmp_bredr_capable(hdev)) {
1410 events[4] |= 0x01; /* Flow Specification Complete */
1411 events[4] |= 0x02; /* Inquiry Result with RSSI */
1412 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1413 events[5] |= 0x08; /* Synchronous Connection Complete */
1414 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1415 } else {
1416 /* Use a different default for LE-only devices */
1417 memset(events, 0, sizeof(events));
1418 events[0] |= 0x10; /* Disconnection Complete */
1419 events[0] |= 0x80; /* Encryption Change */
1420 events[1] |= 0x08; /* Read Remote Version Information Complete */
1421 events[1] |= 0x20; /* Command Complete */
1422 events[1] |= 0x40; /* Command Status */
1423 events[1] |= 0x80; /* Hardware Error */
1424 events[2] |= 0x04; /* Number of Completed Packets */
1425 events[3] |= 0x02; /* Data Buffer Overflow */
1426 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1427 }
1428
1429 if (lmp_inq_rssi_capable(hdev))
1430 events[4] |= 0x02; /* Inquiry Result with RSSI */
1431
1432 if (lmp_sniffsubr_capable(hdev))
1433 events[5] |= 0x20; /* Sniff Subrating */
1434
1435 if (lmp_pause_enc_capable(hdev))
1436 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1437
1438 if (lmp_ext_inq_capable(hdev))
1439 events[5] |= 0x40; /* Extended Inquiry Result */
1440
1441 if (lmp_no_flush_capable(hdev))
1442 events[7] |= 0x01; /* Enhanced Flush Complete */
1443
1444 if (lmp_lsto_capable(hdev))
1445 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1446
1447 if (lmp_ssp_capable(hdev)) {
1448 events[6] |= 0x01; /* IO Capability Request */
1449 events[6] |= 0x02; /* IO Capability Response */
1450 events[6] |= 0x04; /* User Confirmation Request */
1451 events[6] |= 0x08; /* User Passkey Request */
1452 events[6] |= 0x10; /* Remote OOB Data Request */
1453 events[6] |= 0x20; /* Simple Pairing Complete */
1454 events[7] |= 0x04; /* User Passkey Notification */
1455 events[7] |= 0x08; /* Keypress Notification */
1456 events[7] |= 0x10; /* Remote Host Supported
1457 * Features Notification
1458 */
1459 }
1460
1461 if (lmp_le_capable(hdev))
1462 events[7] |= 0x20; /* LE Meta-Event */
1463
42c6b129 1464 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1465}
1466
42c6b129 1467static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1468{
42c6b129
JH
1469 struct hci_dev *hdev = req->hdev;
1470
2177bab5 1471 if (lmp_bredr_capable(hdev))
42c6b129 1472 bredr_setup(req);
56f87901
JH
1473 else
1474 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1475
1476 if (lmp_le_capable(hdev))
42c6b129 1477 le_setup(req);
2177bab5 1478
42c6b129 1479 hci_setup_event_mask(req);
2177bab5 1480
3f8e2d75
JH
1481 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1482 * local supported commands HCI command.
1483 */
1484 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1485 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1486
1487 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1488 /* When SSP is available, then the host features page
1489 * should also be available as well. However some
1490 * controllers list the max_page as 0 as long as SSP
1491 * has not been enabled. To achieve proper debugging
1492 * output, force the minimum max_page to 1 at least.
1493 */
1494 hdev->max_page = 0x01;
1495
2177bab5
JH
1496 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1497 u8 mode = 0x01;
42c6b129
JH
1498 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1499 sizeof(mode), &mode);
2177bab5
JH
1500 } else {
1501 struct hci_cp_write_eir cp;
1502
1503 memset(hdev->eir, 0, sizeof(hdev->eir));
1504 memset(&cp, 0, sizeof(cp));
1505
42c6b129 1506 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1507 }
1508 }
1509
1510 if (lmp_inq_rssi_capable(hdev))
42c6b129 1511 hci_setup_inquiry_mode(req);
2177bab5
JH
1512
1513 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1514 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1515
1516 if (lmp_ext_feat_capable(hdev)) {
1517 struct hci_cp_read_local_ext_features cp;
1518
1519 cp.page = 0x01;
42c6b129
JH
1520 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1521 sizeof(cp), &cp);
2177bab5
JH
1522 }
1523
1524 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1525 u8 enable = 1;
42c6b129
JH
1526 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1527 &enable);
2177bab5
JH
1528 }
1529}
1530
42c6b129 1531static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1532{
42c6b129 1533 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1534 struct hci_cp_write_def_link_policy cp;
1535 u16 link_policy = 0;
1536
1537 if (lmp_rswitch_capable(hdev))
1538 link_policy |= HCI_LP_RSWITCH;
1539 if (lmp_hold_capable(hdev))
1540 link_policy |= HCI_LP_HOLD;
1541 if (lmp_sniff_capable(hdev))
1542 link_policy |= HCI_LP_SNIFF;
1543 if (lmp_park_capable(hdev))
1544 link_policy |= HCI_LP_PARK;
1545
1546 cp.policy = cpu_to_le16(link_policy);
42c6b129 1547 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1548}
1549
42c6b129 1550static void hci_set_le_support(struct hci_request *req)
2177bab5 1551{
42c6b129 1552 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1553 struct hci_cp_write_le_host_supported cp;
1554
c73eee91
JH
1555 /* LE-only devices do not support explicit enablement */
1556 if (!lmp_bredr_capable(hdev))
1557 return;
1558
2177bab5
JH
1559 memset(&cp, 0, sizeof(cp));
1560
1561 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1562 cp.le = 0x01;
1563 cp.simul = lmp_le_br_capable(hdev);
1564 }
1565
1566 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1567 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1568 &cp);
2177bab5
JH
1569}
1570
d62e6d67
JH
1571static void hci_set_event_mask_page_2(struct hci_request *req)
1572{
1573 struct hci_dev *hdev = req->hdev;
1574 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1575
1576 /* If Connectionless Slave Broadcast master role is supported
1577 * enable all necessary events for it.
1578 */
53b834d2 1579 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1580 events[1] |= 0x40; /* Triggered Clock Capture */
1581 events[1] |= 0x80; /* Synchronization Train Complete */
1582 events[2] |= 0x10; /* Slave Page Response Timeout */
1583 events[2] |= 0x20; /* CSB Channel Map Change */
1584 }
1585
1586 /* If Connectionless Slave Broadcast slave role is supported
1587 * enable all necessary events for it.
1588 */
53b834d2 1589 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1590 events[2] |= 0x01; /* Synchronization Train Received */
1591 events[2] |= 0x02; /* CSB Receive */
1592 events[2] |= 0x04; /* CSB Timeout */
1593 events[2] |= 0x08; /* Truncated Page Complete */
1594 }
1595
40c59fcb 1596 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1597 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1598 events[2] |= 0x80;
1599
d62e6d67
JH
1600 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1601}
1602
42c6b129 1603static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1604{
42c6b129 1605 struct hci_dev *hdev = req->hdev;
d2c5d77f 1606 u8 p;
42c6b129 1607
b8f4e068
GP
1608 /* Some Broadcom based Bluetooth controllers do not support the
1609 * Delete Stored Link Key command. They are clearly indicating its
1610 * absence in the bit mask of supported commands.
1611 *
1612 * Check the supported commands and only if the the command is marked
1613 * as supported send it. If not supported assume that the controller
1614 * does not have actual support for stored link keys which makes this
1615 * command redundant anyway.
f9f462fa
MH
1616 *
1617 * Some controllers indicate that they support handling deleting
1618 * stored link keys, but they don't. The quirk lets a driver
1619 * just disable this command.
637b4cae 1620 */
f9f462fa
MH
1621 if (hdev->commands[6] & 0x80 &&
1622 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1623 struct hci_cp_delete_stored_link_key cp;
1624
1625 bacpy(&cp.bdaddr, BDADDR_ANY);
1626 cp.delete_all = 0x01;
1627 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1628 sizeof(cp), &cp);
1629 }
1630
2177bab5 1631 if (hdev->commands[5] & 0x10)
42c6b129 1632 hci_setup_link_policy(req);
2177bab5 1633
9193c6e8
AG
1634 if (lmp_le_capable(hdev)) {
1635 u8 events[8];
1636
1637 memset(events, 0, sizeof(events));
1638 events[0] = 0x1f;
662bc2e6
AG
1639
1640 /* If controller supports the Connection Parameters Request
1641 * Link Layer Procedure, enable the corresponding event.
1642 */
1643 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1644 events[0] |= 0x20; /* LE Remote Connection
1645 * Parameter Request
1646 */
1647
9193c6e8
AG
1648 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1649 events);
1650
42c6b129 1651 hci_set_le_support(req);
9193c6e8 1652 }
d2c5d77f
JH
1653
1654 /* Read features beyond page 1 if available */
1655 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1656 struct hci_cp_read_local_ext_features cp;
1657
1658 cp.page = p;
1659 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1660 sizeof(cp), &cp);
1661 }
2177bab5
JH
1662}
1663
5d4e7e8d
JH
1664static void hci_init4_req(struct hci_request *req, unsigned long opt)
1665{
1666 struct hci_dev *hdev = req->hdev;
1667
d62e6d67
JH
1668 /* Set event mask page 2 if the HCI command for it is supported */
1669 if (hdev->commands[22] & 0x04)
1670 hci_set_event_mask_page_2(req);
1671
5d4e7e8d 1672 /* Check for Synchronization Train support */
53b834d2 1673 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1674 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1675
1676 /* Enable Secure Connections if supported and configured */
5afeac14 1677 if ((lmp_sc_capable(hdev) ||
111902f7 1678 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1679 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1680 u8 support = 0x01;
1681 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1682 sizeof(support), &support);
1683 }
5d4e7e8d
JH
1684}
1685
2177bab5
JH
1686static int __hci_init(struct hci_dev *hdev)
1687{
1688 int err;
1689
1690 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1691 if (err < 0)
1692 return err;
1693
4b4148e9
MH
1694 /* The Device Under Test (DUT) mode is special and available for
1695 * all controller types. So just create it early on.
1696 */
1697 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1698 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1699 &dut_mode_fops);
1700 }
1701
2177bab5
JH
1702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1703 * BR/EDR/LE type controllers. AMP controllers only need the
1704 * first stage init.
1705 */
1706 if (hdev->dev_type != HCI_BREDR)
1707 return 0;
1708
1709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1710 if (err < 0)
1711 return err;
1712
5d4e7e8d
JH
1713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1714 if (err < 0)
1715 return err;
1716
baf27f6e
MH
1717 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1718 if (err < 0)
1719 return err;
1720
1721 /* Only create debugfs entries during the initial setup
1722 * phase and not every time the controller gets powered on.
1723 */
1724 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1725 return 0;
1726
dfb826a8
MH
1727 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1728 &features_fops);
ceeb3bc0
MH
1729 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1730 &hdev->manufacturer);
1731 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1732 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1733 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1734 &blacklist_fops);
6659358e
JH
1735 debugfs_create_file("whitelist", 0444, hdev->debugfs, hdev,
1736 &whitelist_fops);
47219839
MH
1737 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1738
31ad1691
AK
1739 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1740 &conn_info_min_age_fops);
1741 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1742 &conn_info_max_age_fops);
1743
baf27f6e
MH
1744 if (lmp_bredr_capable(hdev)) {
1745 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1746 hdev, &inquiry_cache_fops);
02d08d15
MH
1747 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1748 hdev, &link_keys_fops);
babdbb3c
MH
1749 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1750 hdev, &dev_class_fops);
041000b9
MH
1751 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1752 hdev, &voice_setting_fops);
baf27f6e
MH
1753 }
1754
06f5b778 1755 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1756 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1757 hdev, &auto_accept_delay_fops);
5afeac14
MH
1758 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1759 hdev, &force_sc_support_fops);
134c2a89
MH
1760 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1761 hdev, &sc_only_mode_fops);
06f5b778 1762 }
ebd1e33b 1763
2bfa3531
MH
1764 if (lmp_sniff_capable(hdev)) {
1765 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1766 hdev, &idle_timeout_fops);
1767 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1768 hdev, &sniff_min_interval_fops);
1769 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1770 hdev, &sniff_max_interval_fops);
1771 }
1772
d0f729b8 1773 if (lmp_le_capable(hdev)) {
ac345813
MH
1774 debugfs_create_file("identity", 0400, hdev->debugfs,
1775 hdev, &identity_fops);
1776 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1777 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1778 debugfs_create_file("random_address", 0444, hdev->debugfs,
1779 hdev, &random_address_fops);
b32bba6c
MH
1780 debugfs_create_file("static_address", 0444, hdev->debugfs,
1781 hdev, &static_address_fops);
1782
1783 /* For controllers with a public address, provide a debug
1784 * option to force the usage of the configured static
1785 * address. By default the public address is used.
1786 */
1787 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1788 debugfs_create_file("force_static_address", 0644,
1789 hdev->debugfs, hdev,
1790 &force_static_address_fops);
1791
d0f729b8
MH
1792 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1793 &hdev->le_white_list_size);
d2ab0ac1
MH
1794 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1795 &white_list_fops);
3698d704
MH
1796 debugfs_create_file("identity_resolving_keys", 0400,
1797 hdev->debugfs, hdev,
1798 &identity_resolving_keys_fops);
8f8625cd
MH
1799 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1800 hdev, &long_term_keys_fops);
4e70c7e7
MH
1801 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1802 hdev, &conn_min_interval_fops);
1803 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1804 hdev, &conn_max_interval_fops);
816a93d1
MH
1805 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1806 hdev, &conn_latency_fops);
f1649577
MH
1807 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1808 hdev, &supervision_timeout_fops);
3f959d46
MH
1809 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1810 hdev, &adv_channel_map_fops);
0b3c7d37
MH
1811 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1812 &device_list_fops);
b9a7a61e
LR
1813 debugfs_create_u16("discov_interleaved_timeout", 0644,
1814 hdev->debugfs,
1815 &hdev->discov_interleaved_timeout);
d0f729b8 1816 }
e7b8fc92 1817
baf27f6e 1818 return 0;
2177bab5
JH
1819}
1820
0ebca7d6
MH
1821static void hci_init0_req(struct hci_request *req, unsigned long opt)
1822{
1823 struct hci_dev *hdev = req->hdev;
1824
1825 BT_DBG("%s %ld", hdev->name, opt);
1826
1827 /* Reset */
1828 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1829 hci_reset_req(req, 0);
1830
1831 /* Read Local Version */
1832 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1833
1834 /* Read BD Address */
1835 if (hdev->set_bdaddr)
1836 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1837}
1838
1839static int __hci_unconf_init(struct hci_dev *hdev)
1840{
1841 int err;
1842
cc78b44b
MH
1843 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1844 return 0;
1845
0ebca7d6
MH
1846 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1847 if (err < 0)
1848 return err;
1849
1850 return 0;
1851}
1852
42c6b129 1853static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1854{
1855 __u8 scan = opt;
1856
42c6b129 1857 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1858
1859 /* Inquiry and Page scans */
42c6b129 1860 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1861}
1862
42c6b129 1863static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1864{
1865 __u8 auth = opt;
1866
42c6b129 1867 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1868
1869 /* Authentication */
42c6b129 1870 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1871}
1872
42c6b129 1873static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1874{
1875 __u8 encrypt = opt;
1876
42c6b129 1877 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1878
e4e8e37c 1879 /* Encryption */
42c6b129 1880 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1881}
1882
42c6b129 1883static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1884{
1885 __le16 policy = cpu_to_le16(opt);
1886
42c6b129 1887 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1888
1889 /* Default link policy */
42c6b129 1890 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1891}
1892
8e87d142 1893/* Get HCI device by index.
1da177e4
LT
1894 * Device is held on return. */
1895struct hci_dev *hci_dev_get(int index)
1896{
8035ded4 1897 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1898
1899 BT_DBG("%d", index);
1900
1901 if (index < 0)
1902 return NULL;
1903
1904 read_lock(&hci_dev_list_lock);
8035ded4 1905 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1906 if (d->id == index) {
1907 hdev = hci_dev_hold(d);
1908 break;
1909 }
1910 }
1911 read_unlock(&hci_dev_list_lock);
1912 return hdev;
1913}
1da177e4
LT
1914
1915/* ---- Inquiry support ---- */
ff9ef578 1916
30dc78e1
JH
1917bool hci_discovery_active(struct hci_dev *hdev)
1918{
1919 struct discovery_state *discov = &hdev->discovery;
1920
6fbe195d 1921 switch (discov->state) {
343f935b 1922 case DISCOVERY_FINDING:
6fbe195d 1923 case DISCOVERY_RESOLVING:
30dc78e1
JH
1924 return true;
1925
6fbe195d
AG
1926 default:
1927 return false;
1928 }
30dc78e1
JH
1929}
1930
ff9ef578
JH
1931void hci_discovery_set_state(struct hci_dev *hdev, int state)
1932{
bb3e0a33
JH
1933 int old_state = hdev->discovery.state;
1934
ff9ef578
JH
1935 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1936
bb3e0a33 1937 if (old_state == state)
ff9ef578
JH
1938 return;
1939
bb3e0a33
JH
1940 hdev->discovery.state = state;
1941
ff9ef578
JH
1942 switch (state) {
1943 case DISCOVERY_STOPPED:
c54c3860
AG
1944 hci_update_background_scan(hdev);
1945
bb3e0a33 1946 if (old_state != DISCOVERY_STARTING)
7b99b659 1947 mgmt_discovering(hdev, 0);
ff9ef578
JH
1948 break;
1949 case DISCOVERY_STARTING:
1950 break;
343f935b 1951 case DISCOVERY_FINDING:
ff9ef578
JH
1952 mgmt_discovering(hdev, 1);
1953 break;
30dc78e1
JH
1954 case DISCOVERY_RESOLVING:
1955 break;
ff9ef578
JH
1956 case DISCOVERY_STOPPING:
1957 break;
1958 }
ff9ef578
JH
1959}
1960
1f9b9a5d 1961void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1962{
30883512 1963 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1964 struct inquiry_entry *p, *n;
1da177e4 1965
561aafbc
JH
1966 list_for_each_entry_safe(p, n, &cache->all, all) {
1967 list_del(&p->all);
b57c1a56 1968 kfree(p);
1da177e4 1969 }
561aafbc
JH
1970
1971 INIT_LIST_HEAD(&cache->unknown);
1972 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1973}
1974
a8c5fb1a
GP
1975struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1976 bdaddr_t *bdaddr)
1da177e4 1977{
30883512 1978 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1979 struct inquiry_entry *e;
1980
6ed93dc6 1981 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1982
561aafbc
JH
1983 list_for_each_entry(e, &cache->all, all) {
1984 if (!bacmp(&e->data.bdaddr, bdaddr))
1985 return e;
1986 }
1987
1988 return NULL;
1989}
1990
1991struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1992 bdaddr_t *bdaddr)
561aafbc 1993{
30883512 1994 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1995 struct inquiry_entry *e;
1996
6ed93dc6 1997 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1998
1999 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2000 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2001 return e;
2002 }
2003
2004 return NULL;
1da177e4
LT
2005}
2006
30dc78e1 2007struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2008 bdaddr_t *bdaddr,
2009 int state)
30dc78e1
JH
2010{
2011 struct discovery_state *cache = &hdev->discovery;
2012 struct inquiry_entry *e;
2013
6ed93dc6 2014 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2015
2016 list_for_each_entry(e, &cache->resolve, list) {
2017 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2018 return e;
2019 if (!bacmp(&e->data.bdaddr, bdaddr))
2020 return e;
2021 }
2022
2023 return NULL;
2024}
2025
a3d4e20a 2026void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2027 struct inquiry_entry *ie)
a3d4e20a
JH
2028{
2029 struct discovery_state *cache = &hdev->discovery;
2030 struct list_head *pos = &cache->resolve;
2031 struct inquiry_entry *p;
2032
2033 list_del(&ie->list);
2034
2035 list_for_each_entry(p, &cache->resolve, list) {
2036 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2037 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2038 break;
2039 pos = &p->list;
2040 }
2041
2042 list_add(&ie->list, pos);
2043}
2044
af58925c
MH
2045u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2046 bool name_known)
1da177e4 2047{
30883512 2048 struct discovery_state *cache = &hdev->discovery;
70f23020 2049 struct inquiry_entry *ie;
af58925c 2050 u32 flags = 0;
1da177e4 2051
6ed93dc6 2052 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2053
2b2fec4d
SJ
2054 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2055
af58925c
MH
2056 if (!data->ssp_mode)
2057 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2058
70f23020 2059 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2060 if (ie) {
af58925c
MH
2061 if (!ie->data.ssp_mode)
2062 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2063
a3d4e20a 2064 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2065 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2066 ie->data.rssi = data->rssi;
2067 hci_inquiry_cache_update_resolve(hdev, ie);
2068 }
2069
561aafbc 2070 goto update;
a3d4e20a 2071 }
561aafbc
JH
2072
2073 /* Entry not in the cache. Add new one. */
2074 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
af58925c
MH
2075 if (!ie) {
2076 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2077 goto done;
2078 }
561aafbc
JH
2079
2080 list_add(&ie->all, &cache->all);
2081
2082 if (name_known) {
2083 ie->name_state = NAME_KNOWN;
2084 } else {
2085 ie->name_state = NAME_NOT_KNOWN;
2086 list_add(&ie->list, &cache->unknown);
2087 }
70f23020 2088
561aafbc
JH
2089update:
2090 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2091 ie->name_state != NAME_PENDING) {
561aafbc
JH
2092 ie->name_state = NAME_KNOWN;
2093 list_del(&ie->list);
1da177e4
LT
2094 }
2095
70f23020
AE
2096 memcpy(&ie->data, data, sizeof(*data));
2097 ie->timestamp = jiffies;
1da177e4 2098 cache->timestamp = jiffies;
3175405b
JH
2099
2100 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2101 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2102
af58925c
MH
2103done:
2104 return flags;
1da177e4
LT
2105}
2106
2107static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2108{
30883512 2109 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2110 struct inquiry_info *info = (struct inquiry_info *) buf;
2111 struct inquiry_entry *e;
2112 int copied = 0;
2113
561aafbc 2114 list_for_each_entry(e, &cache->all, all) {
1da177e4 2115 struct inquiry_data *data = &e->data;
b57c1a56
JH
2116
2117 if (copied >= num)
2118 break;
2119
1da177e4
LT
2120 bacpy(&info->bdaddr, &data->bdaddr);
2121 info->pscan_rep_mode = data->pscan_rep_mode;
2122 info->pscan_period_mode = data->pscan_period_mode;
2123 info->pscan_mode = data->pscan_mode;
2124 memcpy(info->dev_class, data->dev_class, 3);
2125 info->clock_offset = data->clock_offset;
b57c1a56 2126
1da177e4 2127 info++;
b57c1a56 2128 copied++;
1da177e4
LT
2129 }
2130
2131 BT_DBG("cache %p, copied %d", cache, copied);
2132 return copied;
2133}
2134
42c6b129 2135static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2136{
2137 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2138 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2139 struct hci_cp_inquiry cp;
2140
2141 BT_DBG("%s", hdev->name);
2142
2143 if (test_bit(HCI_INQUIRY, &hdev->flags))
2144 return;
2145
2146 /* Start Inquiry */
2147 memcpy(&cp.lap, &ir->lap, 3);
2148 cp.length = ir->length;
2149 cp.num_rsp = ir->num_rsp;
42c6b129 2150 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2151}
2152
3e13fa1e
AG
2153static int wait_inquiry(void *word)
2154{
2155 schedule();
2156 return signal_pending(current);
2157}
2158
1da177e4
LT
2159int hci_inquiry(void __user *arg)
2160{
2161 __u8 __user *ptr = arg;
2162 struct hci_inquiry_req ir;
2163 struct hci_dev *hdev;
2164 int err = 0, do_inquiry = 0, max_rsp;
2165 long timeo;
2166 __u8 *buf;
2167
2168 if (copy_from_user(&ir, ptr, sizeof(ir)))
2169 return -EFAULT;
2170
5a08ecce
AE
2171 hdev = hci_dev_get(ir.dev_id);
2172 if (!hdev)
1da177e4
LT
2173 return -ENODEV;
2174
0736cfa8
MH
2175 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2176 err = -EBUSY;
2177 goto done;
2178 }
2179
4a964404 2180 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2181 err = -EOPNOTSUPP;
2182 goto done;
2183 }
2184
5b69bef5
MH
2185 if (hdev->dev_type != HCI_BREDR) {
2186 err = -EOPNOTSUPP;
2187 goto done;
2188 }
2189
56f87901
JH
2190 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2191 err = -EOPNOTSUPP;
2192 goto done;
2193 }
2194
09fd0de5 2195 hci_dev_lock(hdev);
8e87d142 2196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2198 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2199 do_inquiry = 1;
2200 }
09fd0de5 2201 hci_dev_unlock(hdev);
1da177e4 2202
04837f64 2203 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2204
2205 if (do_inquiry) {
01178cd4
JH
2206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2207 timeo);
70f23020
AE
2208 if (err < 0)
2209 goto done;
3e13fa1e
AG
2210
2211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2212 * cleared). If it is interrupted by a signal, return -EINTR.
2213 */
2214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2215 TASK_INTERRUPTIBLE))
2216 return -EINTR;
70f23020 2217 }
1da177e4 2218
8fc9ced3
GP
2219 /* for unlimited number of responses we will use buffer with
2220 * 255 entries
2221 */
1da177e4
LT
2222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2223
2224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2225 * copy it to the user space.
2226 */
01df8c31 2227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2228 if (!buf) {
1da177e4
LT
2229 err = -ENOMEM;
2230 goto done;
2231 }
2232
09fd0de5 2233 hci_dev_lock(hdev);
1da177e4 2234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2235 hci_dev_unlock(hdev);
1da177e4
LT
2236
2237 BT_DBG("num_rsp %d", ir.num_rsp);
2238
2239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2240 ptr += sizeof(ir);
2241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2242 ir.num_rsp))
1da177e4 2243 err = -EFAULT;
8e87d142 2244 } else
1da177e4
LT
2245 err = -EFAULT;
2246
2247 kfree(buf);
2248
2249done:
2250 hci_dev_put(hdev);
2251 return err;
2252}
2253
cbed0ca1 2254static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2255{
1da177e4
LT
2256 int ret = 0;
2257
1da177e4
LT
2258 BT_DBG("%s %p", hdev->name, hdev);
2259
2260 hci_req_lock(hdev);
2261
94324962
JH
2262 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2263 ret = -ENODEV;
2264 goto done;
2265 }
2266
d603b76b
MH
2267 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2268 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2269 /* Check for rfkill but allow the HCI setup stage to
2270 * proceed (which in itself doesn't cause any RF activity).
2271 */
2272 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2273 ret = -ERFKILL;
2274 goto done;
2275 }
2276
2277 /* Check for valid public address or a configured static
2278 * random adddress, but let the HCI setup proceed to
2279 * be able to determine if there is a public address
2280 * or not.
2281 *
c6beca0e
MH
2282 * In case of user channel usage, it is not important
2283 * if a public address or static random address is
2284 * available.
2285 *
a5c8f270
MH
2286 * This check is only valid for BR/EDR controllers
2287 * since AMP controllers do not have an address.
2288 */
c6beca0e
MH
2289 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2290 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2293 ret = -EADDRNOTAVAIL;
2294 goto done;
2295 }
611b30f7
MH
2296 }
2297
1da177e4
LT
2298 if (test_bit(HCI_UP, &hdev->flags)) {
2299 ret = -EALREADY;
2300 goto done;
2301 }
2302
1da177e4
LT
2303 if (hdev->open(hdev)) {
2304 ret = -EIO;
2305 goto done;
2306 }
2307
f41c70c4
MH
2308 atomic_set(&hdev->cmd_cnt, 1);
2309 set_bit(HCI_INIT, &hdev->flags);
2310
af202f84
MH
2311 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2312 if (hdev->setup)
2313 ret = hdev->setup(hdev);
f41c70c4 2314
af202f84
MH
2315 /* The transport driver can set these quirks before
2316 * creating the HCI device or in its setup callback.
2317 *
2318 * In case any of them is set, the controller has to
2319 * start up as unconfigured.
2320 */
eb1904f4
MH
2321 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2322 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2323 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
0ebca7d6
MH
2324
2325 /* For an unconfigured controller it is required to
2326 * read at least the version information provided by
2327 * the Read Local Version Information command.
2328 *
2329 * If the set_bdaddr driver callback is provided, then
2330 * also the original Bluetooth public device address
2331 * will be read using the Read BD Address command.
2332 */
2333 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2334 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2335 }
2336
9713c17b
MH
2337 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2338 /* If public address change is configured, ensure that
2339 * the address gets programmed. If the driver does not
2340 * support changing the public address, fail the power
2341 * on procedure.
2342 */
2343 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2344 hdev->set_bdaddr)
24c457e2
MH
2345 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2346 else
2347 ret = -EADDRNOTAVAIL;
2348 }
2349
f41c70c4 2350 if (!ret) {
4a964404 2351 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2352 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2353 ret = __hci_init(hdev);
1da177e4
LT
2354 }
2355
f41c70c4
MH
2356 clear_bit(HCI_INIT, &hdev->flags);
2357
1da177e4
LT
2358 if (!ret) {
2359 hci_dev_hold(hdev);
d6bfd59c 2360 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2361 set_bit(HCI_UP, &hdev->flags);
2362 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2363 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2364 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2365 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2366 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2367 hdev->dev_type == HCI_BREDR) {
09fd0de5 2368 hci_dev_lock(hdev);
744cf19e 2369 mgmt_powered(hdev, 1);
09fd0de5 2370 hci_dev_unlock(hdev);
56e5cb86 2371 }
8e87d142 2372 } else {
1da177e4 2373 /* Init failed, cleanup */
3eff45ea 2374 flush_work(&hdev->tx_work);
c347b765 2375 flush_work(&hdev->cmd_work);
b78752cc 2376 flush_work(&hdev->rx_work);
1da177e4
LT
2377
2378 skb_queue_purge(&hdev->cmd_q);
2379 skb_queue_purge(&hdev->rx_q);
2380
2381 if (hdev->flush)
2382 hdev->flush(hdev);
2383
2384 if (hdev->sent_cmd) {
2385 kfree_skb(hdev->sent_cmd);
2386 hdev->sent_cmd = NULL;
2387 }
2388
2389 hdev->close(hdev);
fee746b0 2390 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2391 }
2392
2393done:
2394 hci_req_unlock(hdev);
1da177e4
LT
2395 return ret;
2396}
2397
cbed0ca1
JH
2398/* ---- HCI ioctl helpers ---- */
2399
2400int hci_dev_open(__u16 dev)
2401{
2402 struct hci_dev *hdev;
2403 int err;
2404
2405 hdev = hci_dev_get(dev);
2406 if (!hdev)
2407 return -ENODEV;
2408
4a964404 2409 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2410 * up as user channel. Trying to bring them up as normal devices
2411 * will result into a failure. Only user channel operation is
2412 * possible.
2413 *
2414 * When this function is called for a user channel, the flag
2415 * HCI_USER_CHANNEL will be set first before attempting to
2416 * open the device.
2417 */
4a964404 2418 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2419 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2420 err = -EOPNOTSUPP;
2421 goto done;
2422 }
2423
e1d08f40
JH
2424 /* We need to ensure that no other power on/off work is pending
2425 * before proceeding to call hci_dev_do_open. This is
2426 * particularly important if the setup procedure has not yet
2427 * completed.
2428 */
2429 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2430 cancel_delayed_work(&hdev->power_off);
2431
a5c8f270
MH
2432 /* After this call it is guaranteed that the setup procedure
2433 * has finished. This means that error conditions like RFKILL
2434 * or no valid public or static random address apply.
2435 */
e1d08f40
JH
2436 flush_workqueue(hdev->req_workqueue);
2437
cbed0ca1
JH
2438 err = hci_dev_do_open(hdev);
2439
fee746b0 2440done:
cbed0ca1 2441 hci_dev_put(hdev);
cbed0ca1
JH
2442 return err;
2443}
2444
d7347f3c
JH
2445/* This function requires the caller holds hdev->lock */
2446static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2447{
2448 struct hci_conn_params *p;
2449
2450 list_for_each_entry(p, &hdev->le_conn_params, list)
2451 list_del_init(&p->action);
2452
2453 BT_DBG("All LE pending actions cleared");
2454}
2455
1da177e4
LT
2456static int hci_dev_do_close(struct hci_dev *hdev)
2457{
2458 BT_DBG("%s %p", hdev->name, hdev);
2459
78c04c0b
VCG
2460 cancel_delayed_work(&hdev->power_off);
2461
1da177e4
LT
2462 hci_req_cancel(hdev, ENODEV);
2463 hci_req_lock(hdev);
2464
2465 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2466 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2467 hci_req_unlock(hdev);
2468 return 0;
2469 }
2470
3eff45ea
GP
2471 /* Flush RX and TX works */
2472 flush_work(&hdev->tx_work);
b78752cc 2473 flush_work(&hdev->rx_work);
1da177e4 2474
16ab91ab 2475 if (hdev->discov_timeout > 0) {
e0f9309f 2476 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2477 hdev->discov_timeout = 0;
5e5282bb 2478 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2479 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2480 }
2481
a8b2d5c2 2482 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2483 cancel_delayed_work(&hdev->service_cache);
2484
7ba8b4be 2485 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2486
2487 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2488 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2489
09fd0de5 2490 hci_dev_lock(hdev);
1f9b9a5d 2491 hci_inquiry_cache_flush(hdev);
1da177e4 2492 hci_conn_hash_flush(hdev);
d7347f3c 2493 hci_pend_le_actions_clear(hdev);
09fd0de5 2494 hci_dev_unlock(hdev);
1da177e4
LT
2495
2496 hci_notify(hdev, HCI_DEV_DOWN);
2497
2498 if (hdev->flush)
2499 hdev->flush(hdev);
2500
2501 /* Reset device */
2502 skb_queue_purge(&hdev->cmd_q);
2503 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2504 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2505 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2506 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2507 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2508 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2509 clear_bit(HCI_INIT, &hdev->flags);
2510 }
2511
c347b765
GP
2512 /* flush cmd work */
2513 flush_work(&hdev->cmd_work);
1da177e4
LT
2514
2515 /* Drop queues */
2516 skb_queue_purge(&hdev->rx_q);
2517 skb_queue_purge(&hdev->cmd_q);
2518 skb_queue_purge(&hdev->raw_q);
2519
2520 /* Drop last sent command */
2521 if (hdev->sent_cmd) {
65cc2b49 2522 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2523 kfree_skb(hdev->sent_cmd);
2524 hdev->sent_cmd = NULL;
2525 }
2526
b6ddb638
JH
2527 kfree_skb(hdev->recv_evt);
2528 hdev->recv_evt = NULL;
2529
1da177e4
LT
2530 /* After this point our queues are empty
2531 * and no tasks are scheduled. */
2532 hdev->close(hdev);
2533
35b973c9 2534 /* Clear flags */
fee746b0 2535 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2536 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2537
93c311a0
MH
2538 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2539 if (hdev->dev_type == HCI_BREDR) {
2540 hci_dev_lock(hdev);
2541 mgmt_powered(hdev, 0);
2542 hci_dev_unlock(hdev);
2543 }
8ee56540 2544 }
5add6af8 2545
ced5c338 2546 /* Controller radio is available but is currently powered down */
536619e8 2547 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2548
e59fda8d 2549 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2550 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2551 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2552
1da177e4
LT
2553 hci_req_unlock(hdev);
2554
2555 hci_dev_put(hdev);
2556 return 0;
2557}
2558
2559int hci_dev_close(__u16 dev)
2560{
2561 struct hci_dev *hdev;
2562 int err;
2563
70f23020
AE
2564 hdev = hci_dev_get(dev);
2565 if (!hdev)
1da177e4 2566 return -ENODEV;
8ee56540 2567
0736cfa8
MH
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569 err = -EBUSY;
2570 goto done;
2571 }
2572
8ee56540
MH
2573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2574 cancel_delayed_work(&hdev->power_off);
2575
1da177e4 2576 err = hci_dev_do_close(hdev);
8ee56540 2577
0736cfa8 2578done:
1da177e4
LT
2579 hci_dev_put(hdev);
2580 return err;
2581}
2582
2583int hci_dev_reset(__u16 dev)
2584{
2585 struct hci_dev *hdev;
2586 int ret = 0;
2587
70f23020
AE
2588 hdev = hci_dev_get(dev);
2589 if (!hdev)
1da177e4
LT
2590 return -ENODEV;
2591
2592 hci_req_lock(hdev);
1da177e4 2593
808a049e
MH
2594 if (!test_bit(HCI_UP, &hdev->flags)) {
2595 ret = -ENETDOWN;
1da177e4 2596 goto done;
808a049e 2597 }
1da177e4 2598
0736cfa8
MH
2599 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2600 ret = -EBUSY;
2601 goto done;
2602 }
2603
4a964404 2604 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2605 ret = -EOPNOTSUPP;
2606 goto done;
2607 }
2608
1da177e4
LT
2609 /* Drop queues */
2610 skb_queue_purge(&hdev->rx_q);
2611 skb_queue_purge(&hdev->cmd_q);
2612
09fd0de5 2613 hci_dev_lock(hdev);
1f9b9a5d 2614 hci_inquiry_cache_flush(hdev);
1da177e4 2615 hci_conn_hash_flush(hdev);
09fd0de5 2616 hci_dev_unlock(hdev);
1da177e4
LT
2617
2618 if (hdev->flush)
2619 hdev->flush(hdev);
2620
8e87d142 2621 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2622 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2623
fee746b0 2624 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2625
2626done:
1da177e4
LT
2627 hci_req_unlock(hdev);
2628 hci_dev_put(hdev);
2629 return ret;
2630}
2631
2632int hci_dev_reset_stat(__u16 dev)
2633{
2634 struct hci_dev *hdev;
2635 int ret = 0;
2636
70f23020
AE
2637 hdev = hci_dev_get(dev);
2638 if (!hdev)
1da177e4
LT
2639 return -ENODEV;
2640
0736cfa8
MH
2641 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2642 ret = -EBUSY;
2643 goto done;
2644 }
2645
4a964404 2646 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2647 ret = -EOPNOTSUPP;
2648 goto done;
2649 }
2650
1da177e4
LT
2651 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2652
0736cfa8 2653done:
1da177e4 2654 hci_dev_put(hdev);
1da177e4
LT
2655 return ret;
2656}
2657
123abc08
JH
2658static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2659{
bc6d2d04 2660 bool conn_changed, discov_changed;
123abc08
JH
2661
2662 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2663
2664 if ((scan & SCAN_PAGE))
2665 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2666 &hdev->dev_flags);
2667 else
2668 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2669 &hdev->dev_flags);
2670
bc6d2d04
JH
2671 if ((scan & SCAN_INQUIRY)) {
2672 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2673 &hdev->dev_flags);
2674 } else {
2675 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2676 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2677 &hdev->dev_flags);
2678 }
2679
123abc08
JH
2680 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2681 return;
2682
bc6d2d04
JH
2683 if (conn_changed || discov_changed) {
2684 /* In case this was disabled through mgmt */
2685 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2686
2687 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2688 mgmt_update_adv_data(hdev);
2689
123abc08 2690 mgmt_new_settings(hdev);
bc6d2d04 2691 }
123abc08
JH
2692}
2693
1da177e4
LT
2694int hci_dev_cmd(unsigned int cmd, void __user *arg)
2695{
2696 struct hci_dev *hdev;
2697 struct hci_dev_req dr;
2698 int err = 0;
2699
2700 if (copy_from_user(&dr, arg, sizeof(dr)))
2701 return -EFAULT;
2702
70f23020
AE
2703 hdev = hci_dev_get(dr.dev_id);
2704 if (!hdev)
1da177e4
LT
2705 return -ENODEV;
2706
0736cfa8
MH
2707 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2708 err = -EBUSY;
2709 goto done;
2710 }
2711
4a964404 2712 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2713 err = -EOPNOTSUPP;
2714 goto done;
2715 }
2716
5b69bef5
MH
2717 if (hdev->dev_type != HCI_BREDR) {
2718 err = -EOPNOTSUPP;
2719 goto done;
2720 }
2721
56f87901
JH
2722 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2723 err = -EOPNOTSUPP;
2724 goto done;
2725 }
2726
1da177e4
LT
2727 switch (cmd) {
2728 case HCISETAUTH:
01178cd4
JH
2729 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2730 HCI_INIT_TIMEOUT);
1da177e4
LT
2731 break;
2732
2733 case HCISETENCRYPT:
2734 if (!lmp_encrypt_capable(hdev)) {
2735 err = -EOPNOTSUPP;
2736 break;
2737 }
2738
2739 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2740 /* Auth must be enabled first */
01178cd4
JH
2741 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2742 HCI_INIT_TIMEOUT);
1da177e4
LT
2743 if (err)
2744 break;
2745 }
2746
01178cd4
JH
2747 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2748 HCI_INIT_TIMEOUT);
1da177e4
LT
2749 break;
2750
2751 case HCISETSCAN:
01178cd4
JH
2752 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2753 HCI_INIT_TIMEOUT);
91a668b0 2754
bc6d2d04
JH
2755 /* Ensure that the connectable and discoverable states
2756 * get correctly modified as this was a non-mgmt change.
91a668b0 2757 */
123abc08
JH
2758 if (!err)
2759 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2760 break;
2761
1da177e4 2762 case HCISETLINKPOL:
01178cd4
JH
2763 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2764 HCI_INIT_TIMEOUT);
1da177e4
LT
2765 break;
2766
2767 case HCISETLINKMODE:
e4e8e37c
MH
2768 hdev->link_mode = ((__u16) dr.dev_opt) &
2769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2770 break;
2771
2772 case HCISETPTYPE:
2773 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2774 break;
2775
2776 case HCISETACLMTU:
e4e8e37c
MH
2777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2779 break;
2780
2781 case HCISETSCOMTU:
e4e8e37c
MH
2782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2784 break;
2785
2786 default:
2787 err = -EINVAL;
2788 break;
2789 }
e4e8e37c 2790
0736cfa8 2791done:
1da177e4
LT
2792 hci_dev_put(hdev);
2793 return err;
2794}
2795
2796int hci_get_dev_list(void __user *arg)
2797{
8035ded4 2798 struct hci_dev *hdev;
1da177e4
LT
2799 struct hci_dev_list_req *dl;
2800 struct hci_dev_req *dr;
1da177e4
LT
2801 int n = 0, size, err;
2802 __u16 dev_num;
2803
2804 if (get_user(dev_num, (__u16 __user *) arg))
2805 return -EFAULT;
2806
2807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2808 return -EINVAL;
2809
2810 size = sizeof(*dl) + dev_num * sizeof(*dr);
2811
70f23020
AE
2812 dl = kzalloc(size, GFP_KERNEL);
2813 if (!dl)
1da177e4
LT
2814 return -ENOMEM;
2815
2816 dr = dl->dev_req;
2817
f20d09d5 2818 read_lock(&hci_dev_list_lock);
8035ded4 2819 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db
MH
2820 unsigned long flags = hdev->flags;
2821
2822 /* When the auto-off is configured it means the transport
2823 * is running, but in that case still indicate that the
2824 * device is actually down.
2825 */
2826 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2827 flags &= ~BIT(HCI_UP);
c542a06c 2828
a8b2d5c2
JH
2829 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2830 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2831
1da177e4 2832 (dr + n)->dev_id = hdev->id;
2e84d8db 2833 (dr + n)->dev_opt = flags;
c542a06c 2834
1da177e4
LT
2835 if (++n >= dev_num)
2836 break;
2837 }
f20d09d5 2838 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2839
2840 dl->dev_num = n;
2841 size = sizeof(*dl) + n * sizeof(*dr);
2842
2843 err = copy_to_user(arg, dl, size);
2844 kfree(dl);
2845
2846 return err ? -EFAULT : 0;
2847}
2848
2849int hci_get_dev_info(void __user *arg)
2850{
2851 struct hci_dev *hdev;
2852 struct hci_dev_info di;
2e84d8db 2853 unsigned long flags;
1da177e4
LT
2854 int err = 0;
2855
2856 if (copy_from_user(&di, arg, sizeof(di)))
2857 return -EFAULT;
2858
70f23020
AE
2859 hdev = hci_dev_get(di.dev_id);
2860 if (!hdev)
1da177e4
LT
2861 return -ENODEV;
2862
2e84d8db
MH
2863 /* When the auto-off is configured it means the transport
2864 * is running, but in that case still indicate that the
2865 * device is actually down.
2866 */
2867 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2868 flags = hdev->flags & ~BIT(HCI_UP);
2869 else
2870 flags = hdev->flags;
ab81cbf9 2871
a8b2d5c2
JH
2872 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2873 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2874
1da177e4
LT
2875 strcpy(di.name, hdev->name);
2876 di.bdaddr = hdev->bdaddr;
60f2a3ed 2877 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2878 di.flags = flags;
1da177e4 2879 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2880 if (lmp_bredr_capable(hdev)) {
2881 di.acl_mtu = hdev->acl_mtu;
2882 di.acl_pkts = hdev->acl_pkts;
2883 di.sco_mtu = hdev->sco_mtu;
2884 di.sco_pkts = hdev->sco_pkts;
2885 } else {
2886 di.acl_mtu = hdev->le_mtu;
2887 di.acl_pkts = hdev->le_pkts;
2888 di.sco_mtu = 0;
2889 di.sco_pkts = 0;
2890 }
1da177e4
LT
2891 di.link_policy = hdev->link_policy;
2892 di.link_mode = hdev->link_mode;
2893
2894 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2895 memcpy(&di.features, &hdev->features, sizeof(di.features));
2896
2897 if (copy_to_user(arg, &di, sizeof(di)))
2898 err = -EFAULT;
2899
2900 hci_dev_put(hdev);
2901
2902 return err;
2903}
2904
2905/* ---- Interface to HCI drivers ---- */
2906
611b30f7
MH
2907static int hci_rfkill_set_block(void *data, bool blocked)
2908{
2909 struct hci_dev *hdev = data;
2910
2911 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2912
0736cfa8
MH
2913 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2914 return -EBUSY;
2915
5e130367
JH
2916 if (blocked) {
2917 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2918 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2919 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2920 hci_dev_do_close(hdev);
5e130367
JH
2921 } else {
2922 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2923 }
611b30f7
MH
2924
2925 return 0;
2926}
2927
2928static const struct rfkill_ops hci_rfkill_ops = {
2929 .set_block = hci_rfkill_set_block,
2930};
2931
ab81cbf9
JH
2932static void hci_power_on(struct work_struct *work)
2933{
2934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2935 int err;
ab81cbf9
JH
2936
2937 BT_DBG("%s", hdev->name);
2938
cbed0ca1 2939 err = hci_dev_do_open(hdev);
96570ffc
JH
2940 if (err < 0) {
2941 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2942 return;
96570ffc 2943 }
ab81cbf9 2944
a5c8f270
MH
2945 /* During the HCI setup phase, a few error conditions are
2946 * ignored and they need to be checked now. If they are still
2947 * valid, it is important to turn the device back off.
2948 */
2949 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 2950 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
2951 (hdev->dev_type == HCI_BREDR &&
2952 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2953 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2954 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2955 hci_dev_do_close(hdev);
2956 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2957 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2958 HCI_AUTO_OFF_TIMEOUT);
bf543036 2959 }
ab81cbf9 2960
fee746b0 2961 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
2962 /* For unconfigured devices, set the HCI_RAW flag
2963 * so that userspace can easily identify them.
4a964404
MH
2964 */
2965 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2966 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2967
2968 /* For fully configured devices, this will send
2969 * the Index Added event. For unconfigured devices,
2970 * it will send Unconfigued Index Added event.
2971 *
2972 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2973 * and no event will be send.
2974 */
2975 mgmt_index_added(hdev);
d603b76b 2976 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
2977 /* When the controller is now configured, then it
2978 * is important to clear the HCI_RAW flag.
2979 */
2980 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2981 clear_bit(HCI_RAW, &hdev->flags);
2982
d603b76b
MH
2983 /* Powering on the controller with HCI_CONFIG set only
2984 * happens with the transition from unconfigured to
2985 * configured. This will send the Index Added event.
2986 */
2987 mgmt_index_added(hdev);
fee746b0 2988 }
ab81cbf9
JH
2989}
2990
2991static void hci_power_off(struct work_struct *work)
2992{
3243553f 2993 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2994 power_off.work);
ab81cbf9
JH
2995
2996 BT_DBG("%s", hdev->name);
2997
8ee56540 2998 hci_dev_do_close(hdev);
ab81cbf9
JH
2999}
3000
16ab91ab
JH
3001static void hci_discov_off(struct work_struct *work)
3002{
3003 struct hci_dev *hdev;
16ab91ab
JH
3004
3005 hdev = container_of(work, struct hci_dev, discov_off.work);
3006
3007 BT_DBG("%s", hdev->name);
3008
d1967ff8 3009 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3010}
3011
35f7498a 3012void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3013{
4821002c 3014 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3015
4821002c
JH
3016 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3017 list_del(&uuid->list);
2aeb9a1a
JH
3018 kfree(uuid);
3019 }
2aeb9a1a
JH
3020}
3021
35f7498a 3022void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
3023{
3024 struct list_head *p, *n;
3025
3026 list_for_each_safe(p, n, &hdev->link_keys) {
3027 struct link_key *key;
3028
3029 key = list_entry(p, struct link_key, list);
3030
3031 list_del(p);
3032 kfree(key);
3033 }
55ed8ca1
JH
3034}
3035
35f7498a 3036void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
3037{
3038 struct smp_ltk *k, *tmp;
3039
3040 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3041 list_del(&k->list);
3042 kfree(k);
3043 }
b899efaf
VCG
3044}
3045
970c4e46
JH
3046void hci_smp_irks_clear(struct hci_dev *hdev)
3047{
3048 struct smp_irk *k, *tmp;
3049
3050 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3051 list_del(&k->list);
3052 kfree(k);
3053 }
3054}
3055
55ed8ca1
JH
3056struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3057{
8035ded4 3058 struct link_key *k;
55ed8ca1 3059
8035ded4 3060 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
3061 if (bacmp(bdaddr, &k->bdaddr) == 0)
3062 return k;
55ed8ca1
JH
3063
3064 return NULL;
3065}
3066
745c0ce3 3067static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3068 u8 key_type, u8 old_key_type)
d25e28ab
JH
3069{
3070 /* Legacy key */
3071 if (key_type < 0x03)
745c0ce3 3072 return true;
d25e28ab
JH
3073
3074 /* Debug keys are insecure so don't store them persistently */
3075 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3076 return false;
d25e28ab
JH
3077
3078 /* Changed combination key and there's no previous one */
3079 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3080 return false;
d25e28ab
JH
3081
3082 /* Security mode 3 case */
3083 if (!conn)
745c0ce3 3084 return true;
d25e28ab
JH
3085
3086 /* Neither local nor remote side had no-bonding as requirement */
3087 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3088 return true;
d25e28ab
JH
3089
3090 /* Local side had dedicated bonding as requirement */
3091 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3092 return true;
d25e28ab
JH
3093
3094 /* Remote side had dedicated bonding as requirement */
3095 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3096 return true;
d25e28ab
JH
3097
3098 /* If none of the above criteria match, then don't store the key
3099 * persistently */
745c0ce3 3100 return false;
d25e28ab
JH
3101}
3102
98a0b845
JH
3103static bool ltk_type_master(u8 type)
3104{
d97c9fb0 3105 return (type == SMP_LTK);
98a0b845
JH
3106}
3107
fe39c7b2 3108struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 3109 bool master)
75d262c2 3110{
c9839a11 3111 struct smp_ltk *k;
75d262c2 3112
c9839a11 3113 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 3114 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3115 continue;
3116
98a0b845
JH
3117 if (ltk_type_master(k->type) != master)
3118 continue;
3119
c9839a11 3120 return k;
75d262c2
VCG
3121 }
3122
3123 return NULL;
3124}
75d262c2 3125
c9839a11 3126struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 3127 u8 addr_type, bool master)
75d262c2 3128{
c9839a11 3129 struct smp_ltk *k;
75d262c2 3130
c9839a11
VCG
3131 list_for_each_entry(k, &hdev->long_term_keys, list)
3132 if (addr_type == k->bdaddr_type &&
98a0b845
JH
3133 bacmp(bdaddr, &k->bdaddr) == 0 &&
3134 ltk_type_master(k->type) == master)
75d262c2
VCG
3135 return k;
3136
3137 return NULL;
3138}
75d262c2 3139
970c4e46
JH
3140struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3141{
3142 struct smp_irk *irk;
3143
3144 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3145 if (!bacmp(&irk->rpa, rpa))
3146 return irk;
3147 }
3148
3149 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3150 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3151 bacpy(&irk->rpa, rpa);
3152 return irk;
3153 }
3154 }
3155
3156 return NULL;
3157}
3158
3159struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3160 u8 addr_type)
3161{
3162 struct smp_irk *irk;
3163
6cfc9988
JH
3164 /* Identity Address must be public or static random */
3165 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3166 return NULL;
3167
970c4e46
JH
3168 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3169 if (addr_type == irk->addr_type &&
3170 bacmp(bdaddr, &irk->bdaddr) == 0)
3171 return irk;
3172 }
3173
3174 return NULL;
3175}
3176
567fa2aa 3177struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3178 bdaddr_t *bdaddr, u8 *val, u8 type,
3179 u8 pin_len, bool *persistent)
55ed8ca1
JH
3180{
3181 struct link_key *key, *old_key;
745c0ce3 3182 u8 old_key_type;
55ed8ca1
JH
3183
3184 old_key = hci_find_link_key(hdev, bdaddr);
3185 if (old_key) {
3186 old_key_type = old_key->type;
3187 key = old_key;
3188 } else {
12adcf3a 3189 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3190 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3191 if (!key)
567fa2aa 3192 return NULL;
55ed8ca1
JH
3193 list_add(&key->list, &hdev->link_keys);
3194 }
3195
6ed93dc6 3196 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3197
d25e28ab
JH
3198 /* Some buggy controller combinations generate a changed
3199 * combination key for legacy pairing even when there's no
3200 * previous key */
3201 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3202 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3203 type = HCI_LK_COMBINATION;
655fe6ec
JH
3204 if (conn)
3205 conn->key_type = type;
3206 }
d25e28ab 3207
55ed8ca1 3208 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3209 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3210 key->pin_len = pin_len;
3211
b6020ba0 3212 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3213 key->type = old_key_type;
4748fed2
JH
3214 else
3215 key->type = type;
3216
7652ff6a
JH
3217 if (persistent)
3218 *persistent = hci_persistent_key(hdev, conn, type,
3219 old_key_type);
55ed8ca1 3220
567fa2aa 3221 return key;
55ed8ca1
JH
3222}
3223
ca9142b8 3224struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3225 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3226 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3227{
c9839a11 3228 struct smp_ltk *key, *old_key;
98a0b845 3229 bool master = ltk_type_master(type);
75d262c2 3230
98a0b845 3231 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3232 if (old_key)
75d262c2 3233 key = old_key;
c9839a11 3234 else {
0a14ab41 3235 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3236 if (!key)
ca9142b8 3237 return NULL;
c9839a11 3238 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3239 }
3240
75d262c2 3241 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3242 key->bdaddr_type = addr_type;
3243 memcpy(key->val, tk, sizeof(key->val));
3244 key->authenticated = authenticated;
3245 key->ediv = ediv;
fe39c7b2 3246 key->rand = rand;
c9839a11
VCG
3247 key->enc_size = enc_size;
3248 key->type = type;
75d262c2 3249
ca9142b8 3250 return key;
75d262c2
VCG
3251}
3252
ca9142b8
JH
3253struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3254 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3255{
3256 struct smp_irk *irk;
3257
3258 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3259 if (!irk) {
3260 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3261 if (!irk)
ca9142b8 3262 return NULL;
970c4e46
JH
3263
3264 bacpy(&irk->bdaddr, bdaddr);
3265 irk->addr_type = addr_type;
3266
3267 list_add(&irk->list, &hdev->identity_resolving_keys);
3268 }
3269
3270 memcpy(irk->val, val, 16);
3271 bacpy(&irk->rpa, rpa);
3272
ca9142b8 3273 return irk;
970c4e46
JH
3274}
3275
55ed8ca1
JH
3276int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3277{
3278 struct link_key *key;
3279
3280 key = hci_find_link_key(hdev, bdaddr);
3281 if (!key)
3282 return -ENOENT;
3283
6ed93dc6 3284 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3285
3286 list_del(&key->list);
3287 kfree(key);
3288
3289 return 0;
3290}
3291
e0b2b27e 3292int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3293{
3294 struct smp_ltk *k, *tmp;
c51ffa0b 3295 int removed = 0;
b899efaf
VCG
3296
3297 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3298 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3299 continue;
3300
6ed93dc6 3301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3302
3303 list_del(&k->list);
3304 kfree(k);
c51ffa0b 3305 removed++;
b899efaf
VCG
3306 }
3307
c51ffa0b 3308 return removed ? 0 : -ENOENT;
b899efaf
VCG
3309}
3310
a7ec7338
JH
3311void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3312{
3313 struct smp_irk *k, *tmp;
3314
668b7b19 3315 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3316 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3317 continue;
3318
3319 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3320
3321 list_del(&k->list);
3322 kfree(k);
3323 }
3324}
3325
6bd32326 3326/* HCI command timer function */
65cc2b49 3327static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3328{
65cc2b49
MH
3329 struct hci_dev *hdev = container_of(work, struct hci_dev,
3330 cmd_timer.work);
6bd32326 3331
bda4f23a
AE
3332 if (hdev->sent_cmd) {
3333 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3334 u16 opcode = __le16_to_cpu(sent->opcode);
3335
3336 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3337 } else {
3338 BT_ERR("%s command tx timeout", hdev->name);
3339 }
3340
6bd32326 3341 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3342 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3343}
3344
2763eda6 3345struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3346 bdaddr_t *bdaddr)
2763eda6
SJ
3347{
3348 struct oob_data *data;
3349
3350 list_for_each_entry(data, &hdev->remote_oob_data, list)
3351 if (bacmp(bdaddr, &data->bdaddr) == 0)
3352 return data;
3353
3354 return NULL;
3355}
3356
3357int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3358{
3359 struct oob_data *data;
3360
3361 data = hci_find_remote_oob_data(hdev, bdaddr);
3362 if (!data)
3363 return -ENOENT;
3364
6ed93dc6 3365 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3366
3367 list_del(&data->list);
3368 kfree(data);
3369
3370 return 0;
3371}
3372
35f7498a 3373void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3374{
3375 struct oob_data *data, *n;
3376
3377 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3378 list_del(&data->list);
3379 kfree(data);
3380 }
2763eda6
SJ
3381}
3382
0798872e
MH
3383int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3384 u8 *hash, u8 *randomizer)
2763eda6
SJ
3385{
3386 struct oob_data *data;
3387
3388 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3389 if (!data) {
0a14ab41 3390 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3391 if (!data)
3392 return -ENOMEM;
3393
3394 bacpy(&data->bdaddr, bdaddr);
3395 list_add(&data->list, &hdev->remote_oob_data);
3396 }
3397
519ca9d0
MH
3398 memcpy(data->hash192, hash, sizeof(data->hash192));
3399 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3400
0798872e
MH
3401 memset(data->hash256, 0, sizeof(data->hash256));
3402 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3403
3404 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3405
3406 return 0;
3407}
3408
3409int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3410 u8 *hash192, u8 *randomizer192,
3411 u8 *hash256, u8 *randomizer256)
3412{
3413 struct oob_data *data;
3414
3415 data = hci_find_remote_oob_data(hdev, bdaddr);
3416 if (!data) {
0a14ab41 3417 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3418 if (!data)
3419 return -ENOMEM;
3420
3421 bacpy(&data->bdaddr, bdaddr);
3422 list_add(&data->list, &hdev->remote_oob_data);
3423 }
3424
3425 memcpy(data->hash192, hash192, sizeof(data->hash192));
3426 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3427
3428 memcpy(data->hash256, hash256, sizeof(data->hash256));
3429 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3430
6ed93dc6 3431 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3432
3433 return 0;
3434}
3435
dcc36c16 3436struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3437 bdaddr_t *bdaddr, u8 type)
b2a66aad 3438{
8035ded4 3439 struct bdaddr_list *b;
b2a66aad 3440
dcc36c16 3441 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3442 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3443 return b;
b9ee0a78 3444 }
b2a66aad
AJ
3445
3446 return NULL;
3447}
3448
dcc36c16 3449void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3450{
3451 struct list_head *p, *n;
3452
dcc36c16 3453 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3454 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3455
3456 list_del(p);
3457 kfree(b);
3458 }
b2a66aad
AJ
3459}
3460
dcc36c16 3461int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3462{
3463 struct bdaddr_list *entry;
b2a66aad 3464
b9ee0a78 3465 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3466 return -EBADF;
3467
dcc36c16 3468 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3469 return -EEXIST;
b2a66aad
AJ
3470
3471 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3472 if (!entry)
3473 return -ENOMEM;
b2a66aad
AJ
3474
3475 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3476 entry->bdaddr_type = type;
b2a66aad 3477
dcc36c16 3478 list_add(&entry->list, list);
b2a66aad 3479
2a8357f2 3480 return 0;
b2a66aad
AJ
3481}
3482
dcc36c16 3483int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3484{
3485 struct bdaddr_list *entry;
b2a66aad 3486
35f7498a 3487 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3488 hci_bdaddr_list_clear(list);
35f7498a
JH
3489 return 0;
3490 }
b2a66aad 3491
dcc36c16 3492 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3493 if (!entry)
3494 return -ENOENT;
3495
3496 list_del(&entry->list);
3497 kfree(entry);
3498
3499 return 0;
3500}
3501
15819a70
AG
3502/* This function requires the caller holds hdev->lock */
3503struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3504 bdaddr_t *addr, u8 addr_type)
3505{
3506 struct hci_conn_params *params;
3507
738f6185
JH
3508 /* The conn params list only contains identity addresses */
3509 if (!hci_is_identity_address(addr, addr_type))
3510 return NULL;
3511
15819a70
AG
3512 list_for_each_entry(params, &hdev->le_conn_params, list) {
3513 if (bacmp(&params->addr, addr) == 0 &&
3514 params->addr_type == addr_type) {
3515 return params;
3516 }
3517 }
3518
3519 return NULL;
3520}
3521
cef952ce
AG
3522static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3523{
3524 struct hci_conn *conn;
3525
3526 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3527 if (!conn)
3528 return false;
3529
3530 if (conn->dst_type != type)
3531 return false;
3532
3533 if (conn->state != BT_CONNECTED)
3534 return false;
3535
3536 return true;
3537}
3538
4b10966f 3539/* This function requires the caller holds hdev->lock */
501f8827
JH
3540struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3541 bdaddr_t *addr, u8 addr_type)
4b10966f 3542{
912b42ef 3543 struct hci_conn_params *param;
4b10966f 3544
738f6185
JH
3545 /* The list only contains identity addresses */
3546 if (!hci_is_identity_address(addr, addr_type))
3547 return NULL;
3548
501f8827 3549 list_for_each_entry(param, list, action) {
912b42ef
JH
3550 if (bacmp(&param->addr, addr) == 0 &&
3551 param->addr_type == addr_type)
3552 return param;
4b10966f
MH
3553 }
3554
3555 return NULL;
3556}
3557
3558/* This function requires the caller holds hdev->lock */
51d167c0
MH
3559struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3560 bdaddr_t *addr, u8 addr_type)
bf5b3c8b
MH
3561{
3562 struct hci_conn_params *params;
3563
c46245b3 3564 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3565 return NULL;
bf5b3c8b
MH
3566
3567 params = hci_conn_params_lookup(hdev, addr, addr_type);
3568 if (params)
51d167c0 3569 return params;
bf5b3c8b
MH
3570
3571 params = kzalloc(sizeof(*params), GFP_KERNEL);
3572 if (!params) {
3573 BT_ERR("Out of memory");
51d167c0 3574 return NULL;
bf5b3c8b
MH
3575 }
3576
3577 bacpy(&params->addr, addr);
3578 params->addr_type = addr_type;
3579
3580 list_add(&params->list, &hdev->le_conn_params);
93450c75 3581 INIT_LIST_HEAD(&params->action);
bf5b3c8b
MH
3582
3583 params->conn_min_interval = hdev->le_conn_min_interval;
3584 params->conn_max_interval = hdev->le_conn_max_interval;
3585 params->conn_latency = hdev->le_conn_latency;
3586 params->supervision_timeout = hdev->le_supv_timeout;
3587 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3588
3589 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3590
51d167c0 3591 return params;
bf5b3c8b
MH
3592}
3593
3594/* This function requires the caller holds hdev->lock */
3595int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3596 u8 auto_connect)
15819a70
AG
3597{
3598 struct hci_conn_params *params;
3599
8c87aae1
MH
3600 params = hci_conn_params_add(hdev, addr, addr_type);
3601 if (!params)
3602 return -EIO;
cef952ce 3603
42ce26de
JH
3604 if (params->auto_connect == auto_connect)
3605 return 0;
3606
95305baa 3607 list_del_init(&params->action);
15819a70 3608
cef952ce
AG
3609 switch (auto_connect) {
3610 case HCI_AUTO_CONN_DISABLED:
3611 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3612 hci_update_background_scan(hdev);
cef952ce 3613 break;
851efca8 3614 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3615 list_add(&params->action, &hdev->pend_le_reports);
3616 hci_update_background_scan(hdev);
851efca8 3617 break;
cef952ce 3618 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3619 if (!is_connected(hdev, addr, addr_type)) {
3620 list_add(&params->action, &hdev->pend_le_conns);
3621 hci_update_background_scan(hdev);
3622 }
cef952ce
AG
3623 break;
3624 }
15819a70 3625
851efca8
JH
3626 params->auto_connect = auto_connect;
3627
d06b50ce
MH
3628 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3629 auto_connect);
a9b0a04c
AG
3630
3631 return 0;
15819a70
AG
3632}
3633
3634/* This function requires the caller holds hdev->lock */
3635void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3636{
3637 struct hci_conn_params *params;
3638
3639 params = hci_conn_params_lookup(hdev, addr, addr_type);
3640 if (!params)
3641 return;
3642
95305baa 3643 list_del(&params->action);
15819a70
AG
3644 list_del(&params->list);
3645 kfree(params);
3646
95305baa
JH
3647 hci_update_background_scan(hdev);
3648
15819a70
AG
3649 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3650}
3651
55af49a8
JH
3652/* This function requires the caller holds hdev->lock */
3653void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3654{
3655 struct hci_conn_params *params, *tmp;
3656
3657 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3658 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3659 continue;
3660 list_del(&params->list);
3661 kfree(params);
3662 }
3663
3664 BT_DBG("All LE disabled connection parameters were removed");
3665}
3666
15819a70 3667/* This function requires the caller holds hdev->lock */
373110c5 3668void hci_conn_params_clear_all(struct hci_dev *hdev)
15819a70
AG
3669{
3670 struct hci_conn_params *params, *tmp;
3671
3672 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
a2f41a8f 3673 list_del(&params->action);
15819a70
AG
3674 list_del(&params->list);
3675 kfree(params);
3676 }
3677
a2f41a8f 3678 hci_update_background_scan(hdev);
1089b67d 3679
15819a70
AG
3680 BT_DBG("All LE connection parameters were removed");
3681}
3682
4c87eaab 3683static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3684{
4c87eaab
AG
3685 if (status) {
3686 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3687
4c87eaab
AG
3688 hci_dev_lock(hdev);
3689 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3690 hci_dev_unlock(hdev);
3691 return;
3692 }
7ba8b4be
AG
3693}
3694
4c87eaab 3695static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3696{
4c87eaab
AG
3697 /* General inquiry access code (GIAC) */
3698 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3699 struct hci_request req;
3700 struct hci_cp_inquiry cp;
7ba8b4be
AG
3701 int err;
3702
4c87eaab
AG
3703 if (status) {
3704 BT_ERR("Failed to disable LE scanning: status %d", status);
3705 return;
3706 }
7ba8b4be 3707
4c87eaab
AG
3708 switch (hdev->discovery.type) {
3709 case DISCOV_TYPE_LE:
3710 hci_dev_lock(hdev);
3711 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3712 hci_dev_unlock(hdev);
3713 break;
7ba8b4be 3714
4c87eaab
AG
3715 case DISCOV_TYPE_INTERLEAVED:
3716 hci_req_init(&req, hdev);
7ba8b4be 3717
4c87eaab
AG
3718 memset(&cp, 0, sizeof(cp));
3719 memcpy(&cp.lap, lap, sizeof(cp.lap));
3720 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3721 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3722
4c87eaab 3723 hci_dev_lock(hdev);
7dbfac1d 3724
4c87eaab 3725 hci_inquiry_cache_flush(hdev);
7dbfac1d 3726
4c87eaab
AG
3727 err = hci_req_run(&req, inquiry_complete);
3728 if (err) {
3729 BT_ERR("Inquiry request failed: err %d", err);
3730 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3731 }
7dbfac1d 3732
4c87eaab
AG
3733 hci_dev_unlock(hdev);
3734 break;
7dbfac1d 3735 }
7dbfac1d
AG
3736}
3737
7ba8b4be
AG
3738static void le_scan_disable_work(struct work_struct *work)
3739{
3740 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3741 le_scan_disable.work);
4c87eaab
AG
3742 struct hci_request req;
3743 int err;
7ba8b4be
AG
3744
3745 BT_DBG("%s", hdev->name);
3746
4c87eaab 3747 hci_req_init(&req, hdev);
28b75a89 3748
b1efcc28 3749 hci_req_add_le_scan_disable(&req);
28b75a89 3750
4c87eaab
AG
3751 err = hci_req_run(&req, le_scan_disable_work_complete);
3752 if (err)
3753 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3754}
3755
8d97250e
JH
3756static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3757{
3758 struct hci_dev *hdev = req->hdev;
3759
3760 /* If we're advertising or initiating an LE connection we can't
3761 * go ahead and change the random address at this time. This is
3762 * because the eventual initiator address used for the
3763 * subsequently created connection will be undefined (some
3764 * controllers use the new address and others the one we had
3765 * when the operation started).
3766 *
3767 * In this kind of scenario skip the update and let the random
3768 * address be updated at the next cycle.
3769 */
5ce194c4 3770 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3771 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3772 BT_DBG("Deferring random address update");
3773 return;
3774 }
3775
3776 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3777}
3778
94b1fc92
MH
3779int hci_update_random_address(struct hci_request *req, bool require_privacy,
3780 u8 *own_addr_type)
ebd3a747
JH
3781{
3782 struct hci_dev *hdev = req->hdev;
3783 int err;
3784
3785 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3786 * current RPA has expired or there is something else than
3787 * the current RPA in use, then generate a new one.
ebd3a747
JH
3788 */
3789 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3790 int to;
3791
3792 *own_addr_type = ADDR_LE_DEV_RANDOM;
3793
3794 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3795 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3796 return 0;
3797
2b5224dc 3798 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3799 if (err < 0) {
3800 BT_ERR("%s failed to generate new RPA", hdev->name);
3801 return err;
3802 }
3803
8d97250e 3804 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3805
3806 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3807 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3808
3809 return 0;
94b1fc92
MH
3810 }
3811
3812 /* In case of required privacy without resolvable private address,
3813 * use an unresolvable private address. This is useful for active
3814 * scanning and non-connectable advertising.
3815 */
3816 if (require_privacy) {
3817 bdaddr_t urpa;
3818
3819 get_random_bytes(&urpa, 6);
3820 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3821
3822 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3823 set_random_addr(req, &urpa);
94b1fc92 3824 return 0;
ebd3a747
JH
3825 }
3826
3827 /* If forcing static address is in use or there is no public
3828 * address use the static address as random address (but skip
3829 * the HCI command if the current random address is already the
3830 * static one.
3831 */
111902f7 3832 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3833 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3834 *own_addr_type = ADDR_LE_DEV_RANDOM;
3835 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3836 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3837 &hdev->static_addr);
3838 return 0;
3839 }
3840
3841 /* Neither privacy nor static address is being used so use a
3842 * public address.
3843 */
3844 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3845
3846 return 0;
3847}
3848
a1f4c318
JH
3849/* Copy the Identity Address of the controller.
3850 *
3851 * If the controller has a public BD_ADDR, then by default use that one.
3852 * If this is a LE only controller without a public address, default to
3853 * the static random address.
3854 *
3855 * For debugging purposes it is possible to force controllers with a
3856 * public address to use the static random address instead.
3857 */
3858void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3859 u8 *bdaddr_type)
3860{
111902f7 3861 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3862 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3863 bacpy(bdaddr, &hdev->static_addr);
3864 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3865 } else {
3866 bacpy(bdaddr, &hdev->bdaddr);
3867 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3868 }
3869}
3870
9be0dab7
DH
3871/* Alloc HCI device */
3872struct hci_dev *hci_alloc_dev(void)
3873{
3874 struct hci_dev *hdev;
3875
3876 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3877 if (!hdev)
3878 return NULL;
3879
b1b813d4
DH
3880 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3881 hdev->esco_type = (ESCO_HV1);
3882 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3883 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3884 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3885 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3886 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3887 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3888
b1b813d4
DH
3889 hdev->sniff_max_interval = 800;
3890 hdev->sniff_min_interval = 80;
3891
3f959d46 3892 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3893 hdev->le_scan_interval = 0x0060;
3894 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3895 hdev->le_conn_min_interval = 0x0028;
3896 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3897 hdev->le_conn_latency = 0x0000;
3898 hdev->le_supv_timeout = 0x002a;
bef64738 3899
d6bfd59c 3900 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3901 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3902 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3903 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3904
b1b813d4
DH
3905 mutex_init(&hdev->lock);
3906 mutex_init(&hdev->req_lock);
3907
3908 INIT_LIST_HEAD(&hdev->mgmt_pending);
3909 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3910 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3911 INIT_LIST_HEAD(&hdev->uuids);
3912 INIT_LIST_HEAD(&hdev->link_keys);
3913 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3914 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3915 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3916 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3917 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3918 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3919 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3920 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3921
3922 INIT_WORK(&hdev->rx_work, hci_rx_work);
3923 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3924 INIT_WORK(&hdev->tx_work, hci_tx_work);
3925 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3926
b1b813d4
DH
3927 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3928 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3929 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3930
b1b813d4
DH
3931 skb_queue_head_init(&hdev->rx_q);
3932 skb_queue_head_init(&hdev->cmd_q);
3933 skb_queue_head_init(&hdev->raw_q);
3934
3935 init_waitqueue_head(&hdev->req_wait_q);
3936
65cc2b49 3937 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3938
b1b813d4
DH
3939 hci_init_sysfs(hdev);
3940 discovery_init(hdev);
9be0dab7
DH
3941
3942 return hdev;
3943}
3944EXPORT_SYMBOL(hci_alloc_dev);
3945
3946/* Free HCI device */
3947void hci_free_dev(struct hci_dev *hdev)
3948{
9be0dab7
DH
3949 /* will free via device release */
3950 put_device(&hdev->dev);
3951}
3952EXPORT_SYMBOL(hci_free_dev);
3953
1da177e4
LT
3954/* Register HCI device */
3955int hci_register_dev(struct hci_dev *hdev)
3956{
b1b813d4 3957 int id, error;
1da177e4 3958
74292d5a 3959 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3960 return -EINVAL;
3961
08add513
MM
3962 /* Do not allow HCI_AMP devices to register at index 0,
3963 * so the index can be used as the AMP controller ID.
3964 */
3df92b31
SL
3965 switch (hdev->dev_type) {
3966 case HCI_BREDR:
3967 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3968 break;
3969 case HCI_AMP:
3970 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3971 break;
3972 default:
3973 return -EINVAL;
1da177e4 3974 }
8e87d142 3975
3df92b31
SL
3976 if (id < 0)
3977 return id;
3978
1da177e4
LT
3979 sprintf(hdev->name, "hci%d", id);
3980 hdev->id = id;
2d8b3a11
AE
3981
3982 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3983
d8537548
KC
3984 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3985 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3986 if (!hdev->workqueue) {
3987 error = -ENOMEM;
3988 goto err;
3989 }
f48fd9c8 3990
d8537548
KC
3991 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3992 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3993 if (!hdev->req_workqueue) {
3994 destroy_workqueue(hdev->workqueue);
3995 error = -ENOMEM;
3996 goto err;
3997 }
3998
0153e2ec
MH
3999 if (!IS_ERR_OR_NULL(bt_debugfs))
4000 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4001
bdc3e0f1
MH
4002 dev_set_name(&hdev->dev, "%s", hdev->name);
4003
99780a7b
JH
4004 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
4005 CRYPTO_ALG_ASYNC);
4006 if (IS_ERR(hdev->tfm_aes)) {
4007 BT_ERR("Unable to create crypto context");
4008 error = PTR_ERR(hdev->tfm_aes);
4009 hdev->tfm_aes = NULL;
4010 goto err_wqueue;
4011 }
4012
bdc3e0f1 4013 error = device_add(&hdev->dev);
33ca954d 4014 if (error < 0)
99780a7b 4015 goto err_tfm;
1da177e4 4016
611b30f7 4017 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4018 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4019 hdev);
611b30f7
MH
4020 if (hdev->rfkill) {
4021 if (rfkill_register(hdev->rfkill) < 0) {
4022 rfkill_destroy(hdev->rfkill);
4023 hdev->rfkill = NULL;
4024 }
4025 }
4026
5e130367
JH
4027 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4028 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4029
a8b2d5c2 4030 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4031 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4032
01cd3404 4033 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4034 /* Assume BR/EDR support until proven otherwise (such as
4035 * through reading supported features during init.
4036 */
4037 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4038 }
ce2be9ac 4039
fcee3377
GP
4040 write_lock(&hci_dev_list_lock);
4041 list_add(&hdev->list, &hci_dev_list);
4042 write_unlock(&hci_dev_list_lock);
4043
4a964404
MH
4044 /* Devices that are marked for raw-only usage are unconfigured
4045 * and should not be included in normal operation.
fee746b0
MH
4046 */
4047 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4048 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4049
1da177e4 4050 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4051 hci_dev_hold(hdev);
1da177e4 4052
19202573 4053 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4054
1da177e4 4055 return id;
f48fd9c8 4056
99780a7b
JH
4057err_tfm:
4058 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4059err_wqueue:
4060 destroy_workqueue(hdev->workqueue);
6ead1bbc 4061 destroy_workqueue(hdev->req_workqueue);
33ca954d 4062err:
3df92b31 4063 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4064
33ca954d 4065 return error;
1da177e4
LT
4066}
4067EXPORT_SYMBOL(hci_register_dev);
4068
4069/* Unregister HCI device */
59735631 4070void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4071{
3df92b31 4072 int i, id;
ef222013 4073
c13854ce 4074 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4075
94324962
JH
4076 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4077
3df92b31
SL
4078 id = hdev->id;
4079
f20d09d5 4080 write_lock(&hci_dev_list_lock);
1da177e4 4081 list_del(&hdev->list);
f20d09d5 4082 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4083
4084 hci_dev_do_close(hdev);
4085
cd4c5391 4086 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4087 kfree_skb(hdev->reassembly[i]);
4088
b9b5ef18
GP
4089 cancel_work_sync(&hdev->power_on);
4090
ab81cbf9 4091 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4092 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4093 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4094 hci_dev_lock(hdev);
744cf19e 4095 mgmt_index_removed(hdev);
09fd0de5 4096 hci_dev_unlock(hdev);
56e5cb86 4097 }
ab81cbf9 4098
2e58ef3e
JH
4099 /* mgmt_index_removed should take care of emptying the
4100 * pending list */
4101 BUG_ON(!list_empty(&hdev->mgmt_pending));
4102
1da177e4
LT
4103 hci_notify(hdev, HCI_DEV_UNREG);
4104
611b30f7
MH
4105 if (hdev->rfkill) {
4106 rfkill_unregister(hdev->rfkill);
4107 rfkill_destroy(hdev->rfkill);
4108 }
4109
99780a7b
JH
4110 if (hdev->tfm_aes)
4111 crypto_free_blkcipher(hdev->tfm_aes);
4112
bdc3e0f1 4113 device_del(&hdev->dev);
147e2d59 4114
0153e2ec
MH
4115 debugfs_remove_recursive(hdev->debugfs);
4116
f48fd9c8 4117 destroy_workqueue(hdev->workqueue);
6ead1bbc 4118 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4119
09fd0de5 4120 hci_dev_lock(hdev);
dcc36c16 4121 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4122 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4123 hci_uuids_clear(hdev);
55ed8ca1 4124 hci_link_keys_clear(hdev);
b899efaf 4125 hci_smp_ltks_clear(hdev);
970c4e46 4126 hci_smp_irks_clear(hdev);
2763eda6 4127 hci_remote_oob_data_clear(hdev);
dcc36c16 4128 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4129 hci_conn_params_clear_all(hdev);
09fd0de5 4130 hci_dev_unlock(hdev);
e2e0cacb 4131
dc946bd8 4132 hci_dev_put(hdev);
3df92b31
SL
4133
4134 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4135}
4136EXPORT_SYMBOL(hci_unregister_dev);
4137
4138/* Suspend HCI device */
4139int hci_suspend_dev(struct hci_dev *hdev)
4140{
4141 hci_notify(hdev, HCI_DEV_SUSPEND);
4142 return 0;
4143}
4144EXPORT_SYMBOL(hci_suspend_dev);
4145
4146/* Resume HCI device */
4147int hci_resume_dev(struct hci_dev *hdev)
4148{
4149 hci_notify(hdev, HCI_DEV_RESUME);
4150 return 0;
4151}
4152EXPORT_SYMBOL(hci_resume_dev);
4153
76bca880 4154/* Receive frame from HCI drivers */
e1a26170 4155int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4156{
76bca880 4157 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4158 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4159 kfree_skb(skb);
4160 return -ENXIO;
4161 }
4162
d82603c6 4163 /* Incoming skb */
76bca880
MH
4164 bt_cb(skb)->incoming = 1;
4165
4166 /* Time stamp */
4167 __net_timestamp(skb);
4168
76bca880 4169 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4170 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4171
76bca880
MH
4172 return 0;
4173}
4174EXPORT_SYMBOL(hci_recv_frame);
4175
33e882a5 4176static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4177 int count, __u8 index)
33e882a5
SS
4178{
4179 int len = 0;
4180 int hlen = 0;
4181 int remain = count;
4182 struct sk_buff *skb;
4183 struct bt_skb_cb *scb;
4184
4185 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4186 index >= NUM_REASSEMBLY)
33e882a5
SS
4187 return -EILSEQ;
4188
4189 skb = hdev->reassembly[index];
4190
4191 if (!skb) {
4192 switch (type) {
4193 case HCI_ACLDATA_PKT:
4194 len = HCI_MAX_FRAME_SIZE;
4195 hlen = HCI_ACL_HDR_SIZE;
4196 break;
4197 case HCI_EVENT_PKT:
4198 len = HCI_MAX_EVENT_SIZE;
4199 hlen = HCI_EVENT_HDR_SIZE;
4200 break;
4201 case HCI_SCODATA_PKT:
4202 len = HCI_MAX_SCO_SIZE;
4203 hlen = HCI_SCO_HDR_SIZE;
4204 break;
4205 }
4206
1e429f38 4207 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4208 if (!skb)
4209 return -ENOMEM;
4210
4211 scb = (void *) skb->cb;
4212 scb->expect = hlen;
4213 scb->pkt_type = type;
4214
33e882a5
SS
4215 hdev->reassembly[index] = skb;
4216 }
4217
4218 while (count) {
4219 scb = (void *) skb->cb;
89bb46d0 4220 len = min_t(uint, scb->expect, count);
33e882a5
SS
4221
4222 memcpy(skb_put(skb, len), data, len);
4223
4224 count -= len;
4225 data += len;
4226 scb->expect -= len;
4227 remain = count;
4228
4229 switch (type) {
4230 case HCI_EVENT_PKT:
4231 if (skb->len == HCI_EVENT_HDR_SIZE) {
4232 struct hci_event_hdr *h = hci_event_hdr(skb);
4233 scb->expect = h->plen;
4234
4235 if (skb_tailroom(skb) < scb->expect) {
4236 kfree_skb(skb);
4237 hdev->reassembly[index] = NULL;
4238 return -ENOMEM;
4239 }
4240 }
4241 break;
4242
4243 case HCI_ACLDATA_PKT:
4244 if (skb->len == HCI_ACL_HDR_SIZE) {
4245 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4246 scb->expect = __le16_to_cpu(h->dlen);
4247
4248 if (skb_tailroom(skb) < scb->expect) {
4249 kfree_skb(skb);
4250 hdev->reassembly[index] = NULL;
4251 return -ENOMEM;
4252 }
4253 }
4254 break;
4255
4256 case HCI_SCODATA_PKT:
4257 if (skb->len == HCI_SCO_HDR_SIZE) {
4258 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4259 scb->expect = h->dlen;
4260
4261 if (skb_tailroom(skb) < scb->expect) {
4262 kfree_skb(skb);
4263 hdev->reassembly[index] = NULL;
4264 return -ENOMEM;
4265 }
4266 }
4267 break;
4268 }
4269
4270 if (scb->expect == 0) {
4271 /* Complete frame */
4272
4273 bt_cb(skb)->pkt_type = type;
e1a26170 4274 hci_recv_frame(hdev, skb);
33e882a5
SS
4275
4276 hdev->reassembly[index] = NULL;
4277 return remain;
4278 }
4279 }
4280
4281 return remain;
4282}
4283
ef222013
MH
4284int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4285{
f39a3c06
SS
4286 int rem = 0;
4287
ef222013
MH
4288 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4289 return -EILSEQ;
4290
da5f6c37 4291 while (count) {
1e429f38 4292 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4293 if (rem < 0)
4294 return rem;
ef222013 4295
f39a3c06
SS
4296 data += (count - rem);
4297 count = rem;
f81c6224 4298 }
ef222013 4299
f39a3c06 4300 return rem;
ef222013
MH
4301}
4302EXPORT_SYMBOL(hci_recv_fragment);
4303
99811510
SS
4304#define STREAM_REASSEMBLY 0
4305
4306int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4307{
4308 int type;
4309 int rem = 0;
4310
da5f6c37 4311 while (count) {
99811510
SS
4312 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4313
4314 if (!skb) {
4315 struct { char type; } *pkt;
4316
4317 /* Start of the frame */
4318 pkt = data;
4319 type = pkt->type;
4320
4321 data++;
4322 count--;
4323 } else
4324 type = bt_cb(skb)->pkt_type;
4325
1e429f38 4326 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4327 STREAM_REASSEMBLY);
99811510
SS
4328 if (rem < 0)
4329 return rem;
4330
4331 data += (count - rem);
4332 count = rem;
f81c6224 4333 }
99811510
SS
4334
4335 return rem;
4336}
4337EXPORT_SYMBOL(hci_recv_stream_fragment);
4338
1da177e4
LT
4339/* ---- Interface to upper protocols ---- */
4340
1da177e4
LT
4341int hci_register_cb(struct hci_cb *cb)
4342{
4343 BT_DBG("%p name %s", cb, cb->name);
4344
f20d09d5 4345 write_lock(&hci_cb_list_lock);
1da177e4 4346 list_add(&cb->list, &hci_cb_list);
f20d09d5 4347 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4348
4349 return 0;
4350}
4351EXPORT_SYMBOL(hci_register_cb);
4352
4353int hci_unregister_cb(struct hci_cb *cb)
4354{
4355 BT_DBG("%p name %s", cb, cb->name);
4356
f20d09d5 4357 write_lock(&hci_cb_list_lock);
1da177e4 4358 list_del(&cb->list);
f20d09d5 4359 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4360
4361 return 0;
4362}
4363EXPORT_SYMBOL(hci_unregister_cb);
4364
51086991 4365static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4366{
cdc52faa
MH
4367 int err;
4368
0d48d939 4369 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4370
cd82e61c
MH
4371 /* Time stamp */
4372 __net_timestamp(skb);
1da177e4 4373
cd82e61c
MH
4374 /* Send copy to monitor */
4375 hci_send_to_monitor(hdev, skb);
4376
4377 if (atomic_read(&hdev->promisc)) {
4378 /* Send copy to the sockets */
470fe1b5 4379 hci_send_to_sock(hdev, skb);
1da177e4
LT
4380 }
4381
4382 /* Get rid of skb owner, prior to sending to the driver. */
4383 skb_orphan(skb);
4384
cdc52faa
MH
4385 err = hdev->send(hdev, skb);
4386 if (err < 0) {
4387 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4388 kfree_skb(skb);
4389 }
1da177e4
LT
4390}
4391
3119ae95
JH
4392void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4393{
4394 skb_queue_head_init(&req->cmd_q);
4395 req->hdev = hdev;
5d73e034 4396 req->err = 0;
3119ae95
JH
4397}
4398
4399int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4400{
4401 struct hci_dev *hdev = req->hdev;
4402 struct sk_buff *skb;
4403 unsigned long flags;
4404
4405 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4406
5d73e034
AG
4407 /* If an error occured during request building, remove all HCI
4408 * commands queued on the HCI request queue.
4409 */
4410 if (req->err) {
4411 skb_queue_purge(&req->cmd_q);
4412 return req->err;
4413 }
4414
3119ae95
JH
4415 /* Do not allow empty requests */
4416 if (skb_queue_empty(&req->cmd_q))
382b0c39 4417 return -ENODATA;
3119ae95
JH
4418
4419 skb = skb_peek_tail(&req->cmd_q);
4420 bt_cb(skb)->req.complete = complete;
4421
4422 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4423 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4424 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4425
4426 queue_work(hdev->workqueue, &hdev->cmd_work);
4427
4428 return 0;
4429}
4430
1ca3a9d0 4431static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4432 u32 plen, const void *param)
1da177e4
LT
4433{
4434 int len = HCI_COMMAND_HDR_SIZE + plen;
4435 struct hci_command_hdr *hdr;
4436 struct sk_buff *skb;
4437
1da177e4 4438 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4439 if (!skb)
4440 return NULL;
1da177e4
LT
4441
4442 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4443 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4444 hdr->plen = plen;
4445
4446 if (plen)
4447 memcpy(skb_put(skb, plen), param, plen);
4448
4449 BT_DBG("skb len %d", skb->len);
4450
0d48d939 4451 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4452
1ca3a9d0
JH
4453 return skb;
4454}
4455
4456/* Send HCI command */
07dc93dd
JH
4457int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4458 const void *param)
1ca3a9d0
JH
4459{
4460 struct sk_buff *skb;
4461
4462 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4463
4464 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4465 if (!skb) {
4466 BT_ERR("%s no memory for command", hdev->name);
4467 return -ENOMEM;
4468 }
4469
11714b3d
JH
4470 /* Stand-alone HCI commands must be flaged as
4471 * single-command requests.
4472 */
4473 bt_cb(skb)->req.start = true;
4474
1da177e4 4475 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4476 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4477
4478 return 0;
4479}
1da177e4 4480
71c76a17 4481/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4482void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4483 const void *param, u8 event)
71c76a17
JH
4484{
4485 struct hci_dev *hdev = req->hdev;
4486 struct sk_buff *skb;
4487
4488 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4489
34739c1e
AG
4490 /* If an error occured during request building, there is no point in
4491 * queueing the HCI command. We can simply return.
4492 */
4493 if (req->err)
4494 return;
4495
71c76a17
JH
4496 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4497 if (!skb) {
5d73e034
AG
4498 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4499 hdev->name, opcode);
4500 req->err = -ENOMEM;
e348fe6b 4501 return;
71c76a17
JH
4502 }
4503
4504 if (skb_queue_empty(&req->cmd_q))
4505 bt_cb(skb)->req.start = true;
4506
02350a72
JH
4507 bt_cb(skb)->req.event = event;
4508
71c76a17 4509 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4510}
4511
07dc93dd
JH
4512void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4513 const void *param)
02350a72
JH
4514{
4515 hci_req_add_ev(req, opcode, plen, param, 0);
4516}
4517
1da177e4 4518/* Get data from the previously sent command */
a9de9248 4519void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4520{
4521 struct hci_command_hdr *hdr;
4522
4523 if (!hdev->sent_cmd)
4524 return NULL;
4525
4526 hdr = (void *) hdev->sent_cmd->data;
4527
a9de9248 4528 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4529 return NULL;
4530
f0e09510 4531 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4532
4533 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4534}
4535
4536/* Send ACL data */
4537static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4538{
4539 struct hci_acl_hdr *hdr;
4540 int len = skb->len;
4541
badff6d0
ACM
4542 skb_push(skb, HCI_ACL_HDR_SIZE);
4543 skb_reset_transport_header(skb);
9c70220b 4544 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4545 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4546 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4547}
4548
ee22be7e 4549static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4550 struct sk_buff *skb, __u16 flags)
1da177e4 4551{
ee22be7e 4552 struct hci_conn *conn = chan->conn;
1da177e4
LT
4553 struct hci_dev *hdev = conn->hdev;
4554 struct sk_buff *list;
4555
087bfd99
GP
4556 skb->len = skb_headlen(skb);
4557 skb->data_len = 0;
4558
4559 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4560
4561 switch (hdev->dev_type) {
4562 case HCI_BREDR:
4563 hci_add_acl_hdr(skb, conn->handle, flags);
4564 break;
4565 case HCI_AMP:
4566 hci_add_acl_hdr(skb, chan->handle, flags);
4567 break;
4568 default:
4569 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4570 return;
4571 }
087bfd99 4572
70f23020
AE
4573 list = skb_shinfo(skb)->frag_list;
4574 if (!list) {
1da177e4
LT
4575 /* Non fragmented */
4576 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4577
73d80deb 4578 skb_queue_tail(queue, skb);
1da177e4
LT
4579 } else {
4580 /* Fragmented */
4581 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4582
4583 skb_shinfo(skb)->frag_list = NULL;
4584
4585 /* Queue all fragments atomically */
af3e6359 4586 spin_lock(&queue->lock);
1da177e4 4587
73d80deb 4588 __skb_queue_tail(queue, skb);
e702112f
AE
4589
4590 flags &= ~ACL_START;
4591 flags |= ACL_CONT;
1da177e4
LT
4592 do {
4593 skb = list; list = list->next;
8e87d142 4594
0d48d939 4595 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4596 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4597
4598 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4599
73d80deb 4600 __skb_queue_tail(queue, skb);
1da177e4
LT
4601 } while (list);
4602
af3e6359 4603 spin_unlock(&queue->lock);
1da177e4 4604 }
73d80deb
LAD
4605}
4606
4607void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4608{
ee22be7e 4609 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4610
f0e09510 4611 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4612
ee22be7e 4613 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4614
3eff45ea 4615 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4616}
1da177e4
LT
4617
4618/* Send SCO data */
0d861d8b 4619void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4620{
4621 struct hci_dev *hdev = conn->hdev;
4622 struct hci_sco_hdr hdr;
4623
4624 BT_DBG("%s len %d", hdev->name, skb->len);
4625
aca3192c 4626 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4627 hdr.dlen = skb->len;
4628
badff6d0
ACM
4629 skb_push(skb, HCI_SCO_HDR_SIZE);
4630 skb_reset_transport_header(skb);
9c70220b 4631 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4632
0d48d939 4633 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4634
1da177e4 4635 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4636 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4637}
1da177e4
LT
4638
4639/* ---- HCI TX task (outgoing data) ---- */
4640
4641/* HCI Connection scheduler */
6039aa73
GP
4642static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4643 int *quote)
1da177e4
LT
4644{
4645 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4646 struct hci_conn *conn = NULL, *c;
abc5de8f 4647 unsigned int num = 0, min = ~0;
1da177e4 4648
8e87d142 4649 /* We don't have to lock device here. Connections are always
1da177e4 4650 * added and removed with TX task disabled. */
bf4c6325
GP
4651
4652 rcu_read_lock();
4653
4654 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4655 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4656 continue;
769be974
MH
4657
4658 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4659 continue;
4660
1da177e4
LT
4661 num++;
4662
4663 if (c->sent < min) {
4664 min = c->sent;
4665 conn = c;
4666 }
52087a79
LAD
4667
4668 if (hci_conn_num(hdev, type) == num)
4669 break;
1da177e4
LT
4670 }
4671
bf4c6325
GP
4672 rcu_read_unlock();
4673
1da177e4 4674 if (conn) {
6ed58ec5
VT
4675 int cnt, q;
4676
4677 switch (conn->type) {
4678 case ACL_LINK:
4679 cnt = hdev->acl_cnt;
4680 break;
4681 case SCO_LINK:
4682 case ESCO_LINK:
4683 cnt = hdev->sco_cnt;
4684 break;
4685 case LE_LINK:
4686 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4687 break;
4688 default:
4689 cnt = 0;
4690 BT_ERR("Unknown link type");
4691 }
4692
4693 q = cnt / num;
1da177e4
LT
4694 *quote = q ? q : 1;
4695 } else
4696 *quote = 0;
4697
4698 BT_DBG("conn %p quote %d", conn, *quote);
4699 return conn;
4700}
4701
6039aa73 4702static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4703{
4704 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4705 struct hci_conn *c;
1da177e4 4706
bae1f5d9 4707 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4708
bf4c6325
GP
4709 rcu_read_lock();
4710
1da177e4 4711 /* Kill stalled connections */
bf4c6325 4712 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4713 if (c->type == type && c->sent) {
6ed93dc6
AE
4714 BT_ERR("%s killing stalled connection %pMR",
4715 hdev->name, &c->dst);
bed71748 4716 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4717 }
4718 }
bf4c6325
GP
4719
4720 rcu_read_unlock();
1da177e4
LT
4721}
4722
6039aa73
GP
4723static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4724 int *quote)
1da177e4 4725{
73d80deb
LAD
4726 struct hci_conn_hash *h = &hdev->conn_hash;
4727 struct hci_chan *chan = NULL;
abc5de8f 4728 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4729 struct hci_conn *conn;
73d80deb
LAD
4730 int cnt, q, conn_num = 0;
4731
4732 BT_DBG("%s", hdev->name);
4733
bf4c6325
GP
4734 rcu_read_lock();
4735
4736 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4737 struct hci_chan *tmp;
4738
4739 if (conn->type != type)
4740 continue;
4741
4742 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4743 continue;
4744
4745 conn_num++;
4746
8192edef 4747 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4748 struct sk_buff *skb;
4749
4750 if (skb_queue_empty(&tmp->data_q))
4751 continue;
4752
4753 skb = skb_peek(&tmp->data_q);
4754 if (skb->priority < cur_prio)
4755 continue;
4756
4757 if (skb->priority > cur_prio) {
4758 num = 0;
4759 min = ~0;
4760 cur_prio = skb->priority;
4761 }
4762
4763 num++;
4764
4765 if (conn->sent < min) {
4766 min = conn->sent;
4767 chan = tmp;
4768 }
4769 }
4770
4771 if (hci_conn_num(hdev, type) == conn_num)
4772 break;
4773 }
4774
bf4c6325
GP
4775 rcu_read_unlock();
4776
73d80deb
LAD
4777 if (!chan)
4778 return NULL;
4779
4780 switch (chan->conn->type) {
4781 case ACL_LINK:
4782 cnt = hdev->acl_cnt;
4783 break;
bd1eb66b
AE
4784 case AMP_LINK:
4785 cnt = hdev->block_cnt;
4786 break;
73d80deb
LAD
4787 case SCO_LINK:
4788 case ESCO_LINK:
4789 cnt = hdev->sco_cnt;
4790 break;
4791 case LE_LINK:
4792 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4793 break;
4794 default:
4795 cnt = 0;
4796 BT_ERR("Unknown link type");
4797 }
4798
4799 q = cnt / num;
4800 *quote = q ? q : 1;
4801 BT_DBG("chan %p quote %d", chan, *quote);
4802 return chan;
4803}
4804
02b20f0b
LAD
4805static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4806{
4807 struct hci_conn_hash *h = &hdev->conn_hash;
4808 struct hci_conn *conn;
4809 int num = 0;
4810
4811 BT_DBG("%s", hdev->name);
4812
bf4c6325
GP
4813 rcu_read_lock();
4814
4815 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4816 struct hci_chan *chan;
4817
4818 if (conn->type != type)
4819 continue;
4820
4821 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4822 continue;
4823
4824 num++;
4825
8192edef 4826 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4827 struct sk_buff *skb;
4828
4829 if (chan->sent) {
4830 chan->sent = 0;
4831 continue;
4832 }
4833
4834 if (skb_queue_empty(&chan->data_q))
4835 continue;
4836
4837 skb = skb_peek(&chan->data_q);
4838 if (skb->priority >= HCI_PRIO_MAX - 1)
4839 continue;
4840
4841 skb->priority = HCI_PRIO_MAX - 1;
4842
4843 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4844 skb->priority);
02b20f0b
LAD
4845 }
4846
4847 if (hci_conn_num(hdev, type) == num)
4848 break;
4849 }
bf4c6325
GP
4850
4851 rcu_read_unlock();
4852
02b20f0b
LAD
4853}
4854
b71d385a
AE
4855static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4856{
4857 /* Calculate count of blocks used by this packet */
4858 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4859}
4860
6039aa73 4861static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4862{
4a964404 4863 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4864 /* ACL tx timeout must be longer than maximum
4865 * link supervision timeout (40.9 seconds) */
63d2bc1b 4866 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4867 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4868 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4869 }
63d2bc1b 4870}
1da177e4 4871
6039aa73 4872static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4873{
4874 unsigned int cnt = hdev->acl_cnt;
4875 struct hci_chan *chan;
4876 struct sk_buff *skb;
4877 int quote;
4878
4879 __check_timeout(hdev, cnt);
04837f64 4880
73d80deb 4881 while (hdev->acl_cnt &&
a8c5fb1a 4882 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4883 u32 priority = (skb_peek(&chan->data_q))->priority;
4884 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4886 skb->len, skb->priority);
73d80deb 4887
ec1cce24
LAD
4888 /* Stop if priority has changed */
4889 if (skb->priority < priority)
4890 break;
4891
4892 skb = skb_dequeue(&chan->data_q);
4893
73d80deb 4894 hci_conn_enter_active_mode(chan->conn,
04124681 4895 bt_cb(skb)->force_active);
04837f64 4896
57d17d70 4897 hci_send_frame(hdev, skb);
1da177e4
LT
4898 hdev->acl_last_tx = jiffies;
4899
4900 hdev->acl_cnt--;
73d80deb
LAD
4901 chan->sent++;
4902 chan->conn->sent++;
1da177e4
LT
4903 }
4904 }
02b20f0b
LAD
4905
4906 if (cnt != hdev->acl_cnt)
4907 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4908}
4909
6039aa73 4910static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4911{
63d2bc1b 4912 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4913 struct hci_chan *chan;
4914 struct sk_buff *skb;
4915 int quote;
bd1eb66b 4916 u8 type;
b71d385a 4917
63d2bc1b 4918 __check_timeout(hdev, cnt);
b71d385a 4919
bd1eb66b
AE
4920 BT_DBG("%s", hdev->name);
4921
4922 if (hdev->dev_type == HCI_AMP)
4923 type = AMP_LINK;
4924 else
4925 type = ACL_LINK;
4926
b71d385a 4927 while (hdev->block_cnt > 0 &&
bd1eb66b 4928 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4929 u32 priority = (skb_peek(&chan->data_q))->priority;
4930 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4931 int blocks;
4932
4933 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4934 skb->len, skb->priority);
b71d385a
AE
4935
4936 /* Stop if priority has changed */
4937 if (skb->priority < priority)
4938 break;
4939
4940 skb = skb_dequeue(&chan->data_q);
4941
4942 blocks = __get_blocks(hdev, skb);
4943 if (blocks > hdev->block_cnt)
4944 return;
4945
4946 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4947 bt_cb(skb)->force_active);
b71d385a 4948
57d17d70 4949 hci_send_frame(hdev, skb);
b71d385a
AE
4950 hdev->acl_last_tx = jiffies;
4951
4952 hdev->block_cnt -= blocks;
4953 quote -= blocks;
4954
4955 chan->sent += blocks;
4956 chan->conn->sent += blocks;
4957 }
4958 }
4959
4960 if (cnt != hdev->block_cnt)
bd1eb66b 4961 hci_prio_recalculate(hdev, type);
b71d385a
AE
4962}
4963
6039aa73 4964static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4965{
4966 BT_DBG("%s", hdev->name);
4967
bd1eb66b
AE
4968 /* No ACL link over BR/EDR controller */
4969 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4970 return;
4971
4972 /* No AMP link over AMP controller */
4973 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4974 return;
4975
4976 switch (hdev->flow_ctl_mode) {
4977 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4978 hci_sched_acl_pkt(hdev);
4979 break;
4980
4981 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4982 hci_sched_acl_blk(hdev);
4983 break;
4984 }
4985}
4986
1da177e4 4987/* Schedule SCO */
6039aa73 4988static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4989{
4990 struct hci_conn *conn;
4991 struct sk_buff *skb;
4992 int quote;
4993
4994 BT_DBG("%s", hdev->name);
4995
52087a79
LAD
4996 if (!hci_conn_num(hdev, SCO_LINK))
4997 return;
4998
1da177e4
LT
4999 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5000 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5001 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5002 hci_send_frame(hdev, skb);
1da177e4
LT
5003
5004 conn->sent++;
5005 if (conn->sent == ~0)
5006 conn->sent = 0;
5007 }
5008 }
5009}
5010
6039aa73 5011static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5012{
5013 struct hci_conn *conn;
5014 struct sk_buff *skb;
5015 int quote;
5016
5017 BT_DBG("%s", hdev->name);
5018
52087a79
LAD
5019 if (!hci_conn_num(hdev, ESCO_LINK))
5020 return;
5021
8fc9ced3
GP
5022 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5023 &quote))) {
b6a0dc82
MH
5024 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5025 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5026 hci_send_frame(hdev, skb);
b6a0dc82
MH
5027
5028 conn->sent++;
5029 if (conn->sent == ~0)
5030 conn->sent = 0;
5031 }
5032 }
5033}
5034
6039aa73 5035static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5036{
73d80deb 5037 struct hci_chan *chan;
6ed58ec5 5038 struct sk_buff *skb;
02b20f0b 5039 int quote, cnt, tmp;
6ed58ec5
VT
5040
5041 BT_DBG("%s", hdev->name);
5042
52087a79
LAD
5043 if (!hci_conn_num(hdev, LE_LINK))
5044 return;
5045
4a964404 5046 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5047 /* LE tx timeout must be longer than maximum
5048 * link supervision timeout (40.9 seconds) */
bae1f5d9 5049 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5050 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5051 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5052 }
5053
5054 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5055 tmp = cnt;
73d80deb 5056 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5057 u32 priority = (skb_peek(&chan->data_q))->priority;
5058 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5059 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5060 skb->len, skb->priority);
6ed58ec5 5061
ec1cce24
LAD
5062 /* Stop if priority has changed */
5063 if (skb->priority < priority)
5064 break;
5065
5066 skb = skb_dequeue(&chan->data_q);
5067
57d17d70 5068 hci_send_frame(hdev, skb);
6ed58ec5
VT
5069 hdev->le_last_tx = jiffies;
5070
5071 cnt--;
73d80deb
LAD
5072 chan->sent++;
5073 chan->conn->sent++;
6ed58ec5
VT
5074 }
5075 }
73d80deb 5076
6ed58ec5
VT
5077 if (hdev->le_pkts)
5078 hdev->le_cnt = cnt;
5079 else
5080 hdev->acl_cnt = cnt;
02b20f0b
LAD
5081
5082 if (cnt != tmp)
5083 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5084}
5085
3eff45ea 5086static void hci_tx_work(struct work_struct *work)
1da177e4 5087{
3eff45ea 5088 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5089 struct sk_buff *skb;
5090
6ed58ec5 5091 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5092 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5093
52de599e
MH
5094 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5095 /* Schedule queues and send stuff to HCI driver */
5096 hci_sched_acl(hdev);
5097 hci_sched_sco(hdev);
5098 hci_sched_esco(hdev);
5099 hci_sched_le(hdev);
5100 }
6ed58ec5 5101
1da177e4
LT
5102 /* Send next queued raw (unknown type) packet */
5103 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5104 hci_send_frame(hdev, skb);
1da177e4
LT
5105}
5106
25985edc 5107/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5108
5109/* ACL data packet */
6039aa73 5110static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5111{
5112 struct hci_acl_hdr *hdr = (void *) skb->data;
5113 struct hci_conn *conn;
5114 __u16 handle, flags;
5115
5116 skb_pull(skb, HCI_ACL_HDR_SIZE);
5117
5118 handle = __le16_to_cpu(hdr->handle);
5119 flags = hci_flags(handle);
5120 handle = hci_handle(handle);
5121
f0e09510 5122 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5123 handle, flags);
1da177e4
LT
5124
5125 hdev->stat.acl_rx++;
5126
5127 hci_dev_lock(hdev);
5128 conn = hci_conn_hash_lookup_handle(hdev, handle);
5129 hci_dev_unlock(hdev);
8e87d142 5130
1da177e4 5131 if (conn) {
65983fc7 5132 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5133
1da177e4 5134 /* Send to upper protocol */
686ebf28
UF
5135 l2cap_recv_acldata(conn, skb, flags);
5136 return;
1da177e4 5137 } else {
8e87d142 5138 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5139 hdev->name, handle);
1da177e4
LT
5140 }
5141
5142 kfree_skb(skb);
5143}
5144
5145/* SCO data packet */
6039aa73 5146static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5147{
5148 struct hci_sco_hdr *hdr = (void *) skb->data;
5149 struct hci_conn *conn;
5150 __u16 handle;
5151
5152 skb_pull(skb, HCI_SCO_HDR_SIZE);
5153
5154 handle = __le16_to_cpu(hdr->handle);
5155
f0e09510 5156 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5157
5158 hdev->stat.sco_rx++;
5159
5160 hci_dev_lock(hdev);
5161 conn = hci_conn_hash_lookup_handle(hdev, handle);
5162 hci_dev_unlock(hdev);
5163
5164 if (conn) {
1da177e4 5165 /* Send to upper protocol */
686ebf28
UF
5166 sco_recv_scodata(conn, skb);
5167 return;
1da177e4 5168 } else {
8e87d142 5169 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5170 hdev->name, handle);
1da177e4
LT
5171 }
5172
5173 kfree_skb(skb);
5174}
5175
9238f36a
JH
5176static bool hci_req_is_complete(struct hci_dev *hdev)
5177{
5178 struct sk_buff *skb;
5179
5180 skb = skb_peek(&hdev->cmd_q);
5181 if (!skb)
5182 return true;
5183
5184 return bt_cb(skb)->req.start;
5185}
5186
42c6b129
JH
5187static void hci_resend_last(struct hci_dev *hdev)
5188{
5189 struct hci_command_hdr *sent;
5190 struct sk_buff *skb;
5191 u16 opcode;
5192
5193 if (!hdev->sent_cmd)
5194 return;
5195
5196 sent = (void *) hdev->sent_cmd->data;
5197 opcode = __le16_to_cpu(sent->opcode);
5198 if (opcode == HCI_OP_RESET)
5199 return;
5200
5201 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5202 if (!skb)
5203 return;
5204
5205 skb_queue_head(&hdev->cmd_q, skb);
5206 queue_work(hdev->workqueue, &hdev->cmd_work);
5207}
5208
9238f36a
JH
5209void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5210{
5211 hci_req_complete_t req_complete = NULL;
5212 struct sk_buff *skb;
5213 unsigned long flags;
5214
5215 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5216
42c6b129
JH
5217 /* If the completed command doesn't match the last one that was
5218 * sent we need to do special handling of it.
9238f36a 5219 */
42c6b129
JH
5220 if (!hci_sent_cmd_data(hdev, opcode)) {
5221 /* Some CSR based controllers generate a spontaneous
5222 * reset complete event during init and any pending
5223 * command will never be completed. In such a case we
5224 * need to resend whatever was the last sent
5225 * command.
5226 */
5227 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5228 hci_resend_last(hdev);
5229
9238f36a 5230 return;
42c6b129 5231 }
9238f36a
JH
5232
5233 /* If the command succeeded and there's still more commands in
5234 * this request the request is not yet complete.
5235 */
5236 if (!status && !hci_req_is_complete(hdev))
5237 return;
5238
5239 /* If this was the last command in a request the complete
5240 * callback would be found in hdev->sent_cmd instead of the
5241 * command queue (hdev->cmd_q).
5242 */
5243 if (hdev->sent_cmd) {
5244 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5245
5246 if (req_complete) {
5247 /* We must set the complete callback to NULL to
5248 * avoid calling the callback more than once if
5249 * this function gets called again.
5250 */
5251 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5252
9238f36a 5253 goto call_complete;
53e21fbc 5254 }
9238f36a
JH
5255 }
5256
5257 /* Remove all pending commands belonging to this request */
5258 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5259 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5260 if (bt_cb(skb)->req.start) {
5261 __skb_queue_head(&hdev->cmd_q, skb);
5262 break;
5263 }
5264
5265 req_complete = bt_cb(skb)->req.complete;
5266 kfree_skb(skb);
5267 }
5268 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5269
5270call_complete:
5271 if (req_complete)
5272 req_complete(hdev, status);
5273}
5274
b78752cc 5275static void hci_rx_work(struct work_struct *work)
1da177e4 5276{
b78752cc 5277 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5278 struct sk_buff *skb;
5279
5280 BT_DBG("%s", hdev->name);
5281
1da177e4 5282 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5283 /* Send copy to monitor */
5284 hci_send_to_monitor(hdev, skb);
5285
1da177e4
LT
5286 if (atomic_read(&hdev->promisc)) {
5287 /* Send copy to the sockets */
470fe1b5 5288 hci_send_to_sock(hdev, skb);
1da177e4
LT
5289 }
5290
fee746b0 5291 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5292 kfree_skb(skb);
5293 continue;
5294 }
5295
5296 if (test_bit(HCI_INIT, &hdev->flags)) {
5297 /* Don't process data packets in this states. */
0d48d939 5298 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5299 case HCI_ACLDATA_PKT:
5300 case HCI_SCODATA_PKT:
5301 kfree_skb(skb);
5302 continue;
3ff50b79 5303 }
1da177e4
LT
5304 }
5305
5306 /* Process frame */
0d48d939 5307 switch (bt_cb(skb)->pkt_type) {
1da177e4 5308 case HCI_EVENT_PKT:
b78752cc 5309 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5310 hci_event_packet(hdev, skb);
5311 break;
5312
5313 case HCI_ACLDATA_PKT:
5314 BT_DBG("%s ACL data packet", hdev->name);
5315 hci_acldata_packet(hdev, skb);
5316 break;
5317
5318 case HCI_SCODATA_PKT:
5319 BT_DBG("%s SCO data packet", hdev->name);
5320 hci_scodata_packet(hdev, skb);
5321 break;
5322
5323 default:
5324 kfree_skb(skb);
5325 break;
5326 }
5327 }
1da177e4
LT
5328}
5329
c347b765 5330static void hci_cmd_work(struct work_struct *work)
1da177e4 5331{
c347b765 5332 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5333 struct sk_buff *skb;
5334
2104786b
AE
5335 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5336 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5337
1da177e4 5338 /* Send queued commands */
5a08ecce
AE
5339 if (atomic_read(&hdev->cmd_cnt)) {
5340 skb = skb_dequeue(&hdev->cmd_q);
5341 if (!skb)
5342 return;
5343
7585b97a 5344 kfree_skb(hdev->sent_cmd);
1da177e4 5345
a675d7f1 5346 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5347 if (hdev->sent_cmd) {
1da177e4 5348 atomic_dec(&hdev->cmd_cnt);
57d17d70 5349 hci_send_frame(hdev, skb);
7bdb8a5c 5350 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5351 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5352 else
65cc2b49
MH
5353 schedule_delayed_work(&hdev->cmd_timer,
5354 HCI_CMD_TIMEOUT);
1da177e4
LT
5355 } else {
5356 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5357 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5358 }
5359 }
5360}
b1efcc28
AG
5361
5362void hci_req_add_le_scan_disable(struct hci_request *req)
5363{
5364 struct hci_cp_le_set_scan_enable cp;
5365
5366 memset(&cp, 0, sizeof(cp));
5367 cp.enable = LE_SCAN_DISABLE;
5368 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5369}
a4790dbd 5370
8ef30fd3
AG
5371void hci_req_add_le_passive_scan(struct hci_request *req)
5372{
5373 struct hci_cp_le_set_scan_param param_cp;
5374 struct hci_cp_le_set_scan_enable enable_cp;
5375 struct hci_dev *hdev = req->hdev;
5376 u8 own_addr_type;
5377
6ab535a7
MH
5378 /* Set require_privacy to false since no SCAN_REQ are send
5379 * during passive scanning. Not using an unresolvable address
5380 * here is important so that peer devices using direct
5381 * advertising with our address will be correctly reported
5382 * by the controller.
8ef30fd3 5383 */
6ab535a7 5384 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5385 return;
5386
5387 memset(&param_cp, 0, sizeof(param_cp));
5388 param_cp.type = LE_SCAN_PASSIVE;
5389 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5390 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5391 param_cp.own_address_type = own_addr_type;
5392 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5393 &param_cp);
5394
5395 memset(&enable_cp, 0, sizeof(enable_cp));
5396 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5397 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5398 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5399 &enable_cp);
5400}
5401
a4790dbd
AG
5402static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5403{
5404 if (status)
5405 BT_DBG("HCI request failed to update background scanning: "
5406 "status 0x%2.2x", status);
5407}
5408
5409/* This function controls the background scanning based on hdev->pend_le_conns
5410 * list. If there are pending LE connection we start the background scanning,
5411 * otherwise we stop it.
5412 *
5413 * This function requires the caller holds hdev->lock.
5414 */
5415void hci_update_background_scan(struct hci_dev *hdev)
5416{
a4790dbd
AG
5417 struct hci_request req;
5418 struct hci_conn *conn;
5419 int err;
5420
c20c02d5
MH
5421 if (!test_bit(HCI_UP, &hdev->flags) ||
5422 test_bit(HCI_INIT, &hdev->flags) ||
5423 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5424 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5425 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5426 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5427 return;
5428
a70f4b5f
JH
5429 /* No point in doing scanning if LE support hasn't been enabled */
5430 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5431 return;
5432
ae23ada4
JH
5433 /* If discovery is active don't interfere with it */
5434 if (hdev->discovery.state != DISCOVERY_STOPPED)
5435 return;
5436
a4790dbd
AG
5437 hci_req_init(&req, hdev);
5438
2b7be33e
JH
5439 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5440 list_empty(&hdev->pend_le_conns) &&
66f8455a 5441 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5442 /* If there is no pending LE connections or devices
5443 * to be scanned for, we should stop the background
5444 * scanning.
a4790dbd
AG
5445 */
5446
5447 /* If controller is not scanning we are done. */
5448 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5449 return;
5450
5451 hci_req_add_le_scan_disable(&req);
5452
5453 BT_DBG("%s stopping background scanning", hdev->name);
5454 } else {
a4790dbd
AG
5455 /* If there is at least one pending LE connection, we should
5456 * keep the background scan running.
5457 */
5458
a4790dbd
AG
5459 /* If controller is connecting, we should not start scanning
5460 * since some controllers are not able to scan and connect at
5461 * the same time.
5462 */
5463 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5464 if (conn)
5465 return;
5466
4340a124
AG
5467 /* If controller is currently scanning, we stop it to ensure we
5468 * don't miss any advertising (due to duplicates filter).
5469 */
5470 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5471 hci_req_add_le_scan_disable(&req);
5472
8ef30fd3 5473 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5474
5475 BT_DBG("%s starting background scanning", hdev->name);
5476 }
5477
5478 err = hci_req_run(&req, update_background_scan_complete);
5479 if (err)
5480 BT_ERR("Failed to run HCI request: err %d", err);
5481}