]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Add tracking of advertising address type
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
c982b2ea
JH
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
2bfa3531
MH
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
2be48b65 534 hdev->sniff_min_interval = val;
2bfa3531
MH
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
2be48b65 562 hdev->sniff_max_interval = val;
2bfa3531
MH
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
ac345813
MH
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
585 bdaddr_t *addr;
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
590 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
591 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
592 addr = &hdev->static_addr;
593 addr_type = ADDR_LE_DEV_RANDOM;
594 } else {
595 addr = &hdev->bdaddr;
596 addr_type = ADDR_LE_DEV_PUBLIC;
597 }
598
473deef2
MH
599 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", addr, addr_type,
600 16, hdev->irk, &hdev->rpa);
ac345813
MH
601
602 hci_dev_unlock(hdev);
603
604 return 0;
605}
606
607static int identity_open(struct inode *inode, struct file *file)
608{
609 return single_open(file, identity_show, inode->i_private);
610}
611
612static const struct file_operations identity_fops = {
613 .open = identity_open,
614 .read = seq_read,
615 .llseek = seq_lseek,
616 .release = single_release,
617};
618
7a4cd51d
MH
619static int random_address_show(struct seq_file *f, void *p)
620{
621 struct hci_dev *hdev = f->private;
622
623 hci_dev_lock(hdev);
624 seq_printf(f, "%pMR\n", &hdev->random_addr);
625 hci_dev_unlock(hdev);
626
627 return 0;
628}
629
630static int random_address_open(struct inode *inode, struct file *file)
631{
632 return single_open(file, random_address_show, inode->i_private);
633}
634
635static const struct file_operations random_address_fops = {
636 .open = random_address_open,
637 .read = seq_read,
638 .llseek = seq_lseek,
639 .release = single_release,
640};
641
e7b8fc92
MH
642static int static_address_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
645
646 hci_dev_lock(hdev);
647 seq_printf(f, "%pMR\n", &hdev->static_addr);
648 hci_dev_unlock(hdev);
649
650 return 0;
651}
652
653static int static_address_open(struct inode *inode, struct file *file)
654{
655 return single_open(file, static_address_show, inode->i_private);
656}
657
658static const struct file_operations static_address_fops = {
659 .open = static_address_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663};
664
b32bba6c
MH
665static ssize_t force_static_address_read(struct file *file,
666 char __user *user_buf,
667 size_t count, loff_t *ppos)
92202185 668{
b32bba6c
MH
669 struct hci_dev *hdev = file->private_data;
670 char buf[3];
92202185 671
b32bba6c
MH
672 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
673 buf[1] = '\n';
674 buf[2] = '\0';
675 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
676}
677
b32bba6c
MH
678static ssize_t force_static_address_write(struct file *file,
679 const char __user *user_buf,
680 size_t count, loff_t *ppos)
92202185 681{
b32bba6c
MH
682 struct hci_dev *hdev = file->private_data;
683 char buf[32];
684 size_t buf_size = min(count, (sizeof(buf)-1));
685 bool enable;
92202185 686
b32bba6c
MH
687 if (test_bit(HCI_UP, &hdev->flags))
688 return -EBUSY;
92202185 689
b32bba6c
MH
690 if (copy_from_user(buf, user_buf, buf_size))
691 return -EFAULT;
692
693 buf[buf_size] = '\0';
694 if (strtobool(buf, &enable))
695 return -EINVAL;
696
697 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
698 return -EALREADY;
699
700 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
701
702 return count;
92202185
MH
703}
704
b32bba6c
MH
705static const struct file_operations force_static_address_fops = {
706 .open = simple_open,
707 .read = force_static_address_read,
708 .write = force_static_address_write,
709 .llseek = default_llseek,
710};
92202185 711
3698d704
MH
712static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
713{
714 struct hci_dev *hdev = f->private;
715 struct list_head *p, *n;
716
717 hci_dev_lock(hdev);
718 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
719 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
720 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
721 &irk->bdaddr, irk->addr_type,
722 16, irk->val, &irk->rpa);
723 }
724 hci_dev_unlock(hdev);
725
726 return 0;
727}
728
729static int identity_resolving_keys_open(struct inode *inode, struct file *file)
730{
731 return single_open(file, identity_resolving_keys_show,
732 inode->i_private);
733}
734
735static const struct file_operations identity_resolving_keys_fops = {
736 .open = identity_resolving_keys_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = single_release,
740};
741
8f8625cd
MH
742static int long_term_keys_show(struct seq_file *f, void *ptr)
743{
744 struct hci_dev *hdev = f->private;
745 struct list_head *p, *n;
746
747 hci_dev_lock(hdev);
f813f1be 748 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 749 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 750 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
751 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
752 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
753 8, ltk->rand, 16, ltk->val);
754 }
755 hci_dev_unlock(hdev);
756
757 return 0;
758}
759
760static int long_term_keys_open(struct inode *inode, struct file *file)
761{
762 return single_open(file, long_term_keys_show, inode->i_private);
763}
764
765static const struct file_operations long_term_keys_fops = {
766 .open = long_term_keys_open,
767 .read = seq_read,
768 .llseek = seq_lseek,
769 .release = single_release,
770};
771
4e70c7e7
MH
772static int conn_min_interval_set(void *data, u64 val)
773{
774 struct hci_dev *hdev = data;
775
776 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
777 return -EINVAL;
778
779 hci_dev_lock(hdev);
2be48b65 780 hdev->le_conn_min_interval = val;
4e70c7e7
MH
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int conn_min_interval_get(void *data, u64 *val)
787{
788 struct hci_dev *hdev = data;
789
790 hci_dev_lock(hdev);
791 *val = hdev->le_conn_min_interval;
792 hci_dev_unlock(hdev);
793
794 return 0;
795}
796
797DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
798 conn_min_interval_set, "%llu\n");
799
800static int conn_max_interval_set(void *data, u64 val)
801{
802 struct hci_dev *hdev = data;
803
804 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
805 return -EINVAL;
806
807 hci_dev_lock(hdev);
2be48b65 808 hdev->le_conn_max_interval = val;
4e70c7e7
MH
809 hci_dev_unlock(hdev);
810
811 return 0;
812}
813
814static int conn_max_interval_get(void *data, u64 *val)
815{
816 struct hci_dev *hdev = data;
817
818 hci_dev_lock(hdev);
819 *val = hdev->le_conn_max_interval;
820 hci_dev_unlock(hdev);
821
822 return 0;
823}
824
825DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
826 conn_max_interval_set, "%llu\n");
827
3f959d46
MH
828static int adv_channel_map_set(void *data, u64 val)
829{
830 struct hci_dev *hdev = data;
831
832 if (val < 0x01 || val > 0x07)
833 return -EINVAL;
834
835 hci_dev_lock(hdev);
836 hdev->le_adv_channel_map = val;
837 hci_dev_unlock(hdev);
838
839 return 0;
840}
841
842static int adv_channel_map_get(void *data, u64 *val)
843{
844 struct hci_dev *hdev = data;
845
846 hci_dev_lock(hdev);
847 *val = hdev->le_adv_channel_map;
848 hci_dev_unlock(hdev);
849
850 return 0;
851}
852
853DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
854 adv_channel_map_set, "%llu\n");
855
89863109
JR
856static ssize_t lowpan_read(struct file *file, char __user *user_buf,
857 size_t count, loff_t *ppos)
858{
859 struct hci_dev *hdev = file->private_data;
860 char buf[3];
861
862 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
863 buf[1] = '\n';
864 buf[2] = '\0';
865 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
866}
867
868static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
869 size_t count, loff_t *position)
870{
871 struct hci_dev *hdev = fp->private_data;
872 bool enable;
873 char buf[32];
874 size_t buf_size = min(count, (sizeof(buf)-1));
875
876 if (copy_from_user(buf, user_buffer, buf_size))
877 return -EFAULT;
878
879 buf[buf_size] = '\0';
880
881 if (strtobool(buf, &enable) < 0)
882 return -EINVAL;
883
884 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
885 return -EALREADY;
886
887 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
888
889 return count;
890}
891
892static const struct file_operations lowpan_debugfs_fops = {
893 .open = simple_open,
894 .read = lowpan_read,
895 .write = lowpan_write,
896 .llseek = default_llseek,
897};
898
7d474e06
AG
899static int le_auto_conn_show(struct seq_file *sf, void *ptr)
900{
901 struct hci_dev *hdev = sf->private;
902 struct hci_conn_params *p;
903
904 hci_dev_lock(hdev);
905
906 list_for_each_entry(p, &hdev->le_conn_params, list) {
907 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
908 p->auto_connect);
909 }
910
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
916static int le_auto_conn_open(struct inode *inode, struct file *file)
917{
918 return single_open(file, le_auto_conn_show, inode->i_private);
919}
920
921static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
922 size_t count, loff_t *offset)
923{
924 struct seq_file *sf = file->private_data;
925 struct hci_dev *hdev = sf->private;
926 u8 auto_connect = 0;
927 bdaddr_t addr;
928 u8 addr_type;
929 char *buf;
930 int err = 0;
931 int n;
932
933 /* Don't allow partial write */
934 if (*offset != 0)
935 return -EINVAL;
936
937 if (count < 3)
938 return -EINVAL;
939
940 buf = kzalloc(count, GFP_KERNEL);
941 if (!buf)
942 return -ENOMEM;
943
944 if (copy_from_user(buf, data, count)) {
945 err = -EFAULT;
946 goto done;
947 }
948
949 if (memcmp(buf, "add", 3) == 0) {
950 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
951 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
952 &addr.b[1], &addr.b[0], &addr_type,
953 &auto_connect);
954
955 if (n < 7) {
956 err = -EINVAL;
957 goto done;
958 }
959
960 hci_dev_lock(hdev);
961 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
962 hdev->le_conn_min_interval,
963 hdev->le_conn_max_interval);
964 hci_dev_unlock(hdev);
965
966 if (err)
967 goto done;
968 } else if (memcmp(buf, "del", 3) == 0) {
969 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
970 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
971 &addr.b[1], &addr.b[0], &addr_type);
972
973 if (n < 7) {
974 err = -EINVAL;
975 goto done;
976 }
977
978 hci_dev_lock(hdev);
979 hci_conn_params_del(hdev, &addr, addr_type);
980 hci_dev_unlock(hdev);
981 } else if (memcmp(buf, "clr", 3) == 0) {
982 hci_dev_lock(hdev);
983 hci_conn_params_clear(hdev);
984 hci_pend_le_conns_clear(hdev);
985 hci_update_background_scan(hdev);
986 hci_dev_unlock(hdev);
987 } else {
988 err = -EINVAL;
989 }
990
991done:
992 kfree(buf);
993
994 if (err)
995 return err;
996 else
997 return count;
998}
999
1000static const struct file_operations le_auto_conn_fops = {
1001 .open = le_auto_conn_open,
1002 .read = seq_read,
1003 .write = le_auto_conn_write,
1004 .llseek = seq_lseek,
1005 .release = single_release,
1006};
1007
1da177e4
LT
1008/* ---- HCI requests ---- */
1009
42c6b129 1010static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1011{
42c6b129 1012 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1013
1014 if (hdev->req_status == HCI_REQ_PEND) {
1015 hdev->req_result = result;
1016 hdev->req_status = HCI_REQ_DONE;
1017 wake_up_interruptible(&hdev->req_wait_q);
1018 }
1019}
1020
1021static void hci_req_cancel(struct hci_dev *hdev, int err)
1022{
1023 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1024
1025 if (hdev->req_status == HCI_REQ_PEND) {
1026 hdev->req_result = err;
1027 hdev->req_status = HCI_REQ_CANCELED;
1028 wake_up_interruptible(&hdev->req_wait_q);
1029 }
1030}
1031
77a63e0a
FW
1032static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1033 u8 event)
75e84b7c
JH
1034{
1035 struct hci_ev_cmd_complete *ev;
1036 struct hci_event_hdr *hdr;
1037 struct sk_buff *skb;
1038
1039 hci_dev_lock(hdev);
1040
1041 skb = hdev->recv_evt;
1042 hdev->recv_evt = NULL;
1043
1044 hci_dev_unlock(hdev);
1045
1046 if (!skb)
1047 return ERR_PTR(-ENODATA);
1048
1049 if (skb->len < sizeof(*hdr)) {
1050 BT_ERR("Too short HCI event");
1051 goto failed;
1052 }
1053
1054 hdr = (void *) skb->data;
1055 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1056
7b1abbbe
JH
1057 if (event) {
1058 if (hdr->evt != event)
1059 goto failed;
1060 return skb;
1061 }
1062
75e84b7c
JH
1063 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1064 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1065 goto failed;
1066 }
1067
1068 if (skb->len < sizeof(*ev)) {
1069 BT_ERR("Too short cmd_complete event");
1070 goto failed;
1071 }
1072
1073 ev = (void *) skb->data;
1074 skb_pull(skb, sizeof(*ev));
1075
1076 if (opcode == __le16_to_cpu(ev->opcode))
1077 return skb;
1078
1079 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1080 __le16_to_cpu(ev->opcode));
1081
1082failed:
1083 kfree_skb(skb);
1084 return ERR_PTR(-ENODATA);
1085}
1086
7b1abbbe 1087struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1088 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1089{
1090 DECLARE_WAITQUEUE(wait, current);
1091 struct hci_request req;
1092 int err = 0;
1093
1094 BT_DBG("%s", hdev->name);
1095
1096 hci_req_init(&req, hdev);
1097
7b1abbbe 1098 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1099
1100 hdev->req_status = HCI_REQ_PEND;
1101
1102 err = hci_req_run(&req, hci_req_sync_complete);
1103 if (err < 0)
1104 return ERR_PTR(err);
1105
1106 add_wait_queue(&hdev->req_wait_q, &wait);
1107 set_current_state(TASK_INTERRUPTIBLE);
1108
1109 schedule_timeout(timeout);
1110
1111 remove_wait_queue(&hdev->req_wait_q, &wait);
1112
1113 if (signal_pending(current))
1114 return ERR_PTR(-EINTR);
1115
1116 switch (hdev->req_status) {
1117 case HCI_REQ_DONE:
1118 err = -bt_to_errno(hdev->req_result);
1119 break;
1120
1121 case HCI_REQ_CANCELED:
1122 err = -hdev->req_result;
1123 break;
1124
1125 default:
1126 err = -ETIMEDOUT;
1127 break;
1128 }
1129
1130 hdev->req_status = hdev->req_result = 0;
1131
1132 BT_DBG("%s end: err %d", hdev->name, err);
1133
1134 if (err < 0)
1135 return ERR_PTR(err);
1136
7b1abbbe
JH
1137 return hci_get_cmd_complete(hdev, opcode, event);
1138}
1139EXPORT_SYMBOL(__hci_cmd_sync_ev);
1140
1141struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1142 const void *param, u32 timeout)
7b1abbbe
JH
1143{
1144 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1145}
1146EXPORT_SYMBOL(__hci_cmd_sync);
1147
1da177e4 1148/* Execute request and wait for completion. */
01178cd4 1149static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1150 void (*func)(struct hci_request *req,
1151 unsigned long opt),
01178cd4 1152 unsigned long opt, __u32 timeout)
1da177e4 1153{
42c6b129 1154 struct hci_request req;
1da177e4
LT
1155 DECLARE_WAITQUEUE(wait, current);
1156 int err = 0;
1157
1158 BT_DBG("%s start", hdev->name);
1159
42c6b129
JH
1160 hci_req_init(&req, hdev);
1161
1da177e4
LT
1162 hdev->req_status = HCI_REQ_PEND;
1163
42c6b129 1164 func(&req, opt);
53cce22d 1165
42c6b129
JH
1166 err = hci_req_run(&req, hci_req_sync_complete);
1167 if (err < 0) {
53cce22d 1168 hdev->req_status = 0;
920c8300
AG
1169
1170 /* ENODATA means the HCI request command queue is empty.
1171 * This can happen when a request with conditionals doesn't
1172 * trigger any commands to be sent. This is normal behavior
1173 * and should not trigger an error return.
42c6b129 1174 */
920c8300
AG
1175 if (err == -ENODATA)
1176 return 0;
1177
1178 return err;
53cce22d
JH
1179 }
1180
bc4445c7
AG
1181 add_wait_queue(&hdev->req_wait_q, &wait);
1182 set_current_state(TASK_INTERRUPTIBLE);
1183
1da177e4
LT
1184 schedule_timeout(timeout);
1185
1186 remove_wait_queue(&hdev->req_wait_q, &wait);
1187
1188 if (signal_pending(current))
1189 return -EINTR;
1190
1191 switch (hdev->req_status) {
1192 case HCI_REQ_DONE:
e175072f 1193 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1194 break;
1195
1196 case HCI_REQ_CANCELED:
1197 err = -hdev->req_result;
1198 break;
1199
1200 default:
1201 err = -ETIMEDOUT;
1202 break;
3ff50b79 1203 }
1da177e4 1204
a5040efa 1205 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1206
1207 BT_DBG("%s end: err %d", hdev->name, err);
1208
1209 return err;
1210}
1211
01178cd4 1212static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1213 void (*req)(struct hci_request *req,
1214 unsigned long opt),
01178cd4 1215 unsigned long opt, __u32 timeout)
1da177e4
LT
1216{
1217 int ret;
1218
7c6a329e
MH
1219 if (!test_bit(HCI_UP, &hdev->flags))
1220 return -ENETDOWN;
1221
1da177e4
LT
1222 /* Serialize all requests */
1223 hci_req_lock(hdev);
01178cd4 1224 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1225 hci_req_unlock(hdev);
1226
1227 return ret;
1228}
1229
42c6b129 1230static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1231{
42c6b129 1232 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1233
1234 /* Reset device */
42c6b129
JH
1235 set_bit(HCI_RESET, &req->hdev->flags);
1236 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1237}
1238
42c6b129 1239static void bredr_init(struct hci_request *req)
1da177e4 1240{
42c6b129 1241 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1242
1da177e4 1243 /* Read Local Supported Features */
42c6b129 1244 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1245
1143e5a6 1246 /* Read Local Version */
42c6b129 1247 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1248
1249 /* Read BD Address */
42c6b129 1250 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1251}
1252
42c6b129 1253static void amp_init(struct hci_request *req)
e61ef499 1254{
42c6b129 1255 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1256
e61ef499 1257 /* Read Local Version */
42c6b129 1258 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1259
f6996cfe
MH
1260 /* Read Local Supported Commands */
1261 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1262
1263 /* Read Local Supported Features */
1264 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1265
6bcbc489 1266 /* Read Local AMP Info */
42c6b129 1267 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1268
1269 /* Read Data Blk size */
42c6b129 1270 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1271
f38ba941
MH
1272 /* Read Flow Control Mode */
1273 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1274
7528ca1c
MH
1275 /* Read Location Data */
1276 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1277}
1278
42c6b129 1279static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1280{
42c6b129 1281 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1282
1283 BT_DBG("%s %ld", hdev->name, opt);
1284
11778716
AE
1285 /* Reset */
1286 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1287 hci_reset_req(req, 0);
11778716 1288
e61ef499
AE
1289 switch (hdev->dev_type) {
1290 case HCI_BREDR:
42c6b129 1291 bredr_init(req);
e61ef499
AE
1292 break;
1293
1294 case HCI_AMP:
42c6b129 1295 amp_init(req);
e61ef499
AE
1296 break;
1297
1298 default:
1299 BT_ERR("Unknown device type %d", hdev->dev_type);
1300 break;
1301 }
e61ef499
AE
1302}
1303
42c6b129 1304static void bredr_setup(struct hci_request *req)
2177bab5 1305{
4ca048e3
MH
1306 struct hci_dev *hdev = req->hdev;
1307
2177bab5
JH
1308 __le16 param;
1309 __u8 flt_type;
1310
1311 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1312 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1313
1314 /* Read Class of Device */
42c6b129 1315 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1316
1317 /* Read Local Name */
42c6b129 1318 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1319
1320 /* Read Voice Setting */
42c6b129 1321 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1322
b4cb9fb2
MH
1323 /* Read Number of Supported IAC */
1324 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1325
4b836f39
MH
1326 /* Read Current IAC LAP */
1327 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1328
2177bab5
JH
1329 /* Clear Event Filters */
1330 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1331 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1332
1333 /* Connection accept timeout ~20 secs */
1334 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1335 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1336
4ca048e3
MH
1337 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1338 * but it does not support page scan related HCI commands.
1339 */
1340 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1341 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1342 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1343 }
2177bab5
JH
1344}
1345
42c6b129 1346static void le_setup(struct hci_request *req)
2177bab5 1347{
c73eee91
JH
1348 struct hci_dev *hdev = req->hdev;
1349
2177bab5 1350 /* Read LE Buffer Size */
42c6b129 1351 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1352
1353 /* Read LE Local Supported Features */
42c6b129 1354 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1355
1356 /* Read LE Advertising Channel TX Power */
42c6b129 1357 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1358
1359 /* Read LE White List Size */
42c6b129 1360 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1361
1362 /* Read LE Supported States */
42c6b129 1363 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1364
1365 /* LE-only controllers have LE implicitly enabled */
1366 if (!lmp_bredr_capable(hdev))
1367 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1368}
1369
1370static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1371{
1372 if (lmp_ext_inq_capable(hdev))
1373 return 0x02;
1374
1375 if (lmp_inq_rssi_capable(hdev))
1376 return 0x01;
1377
1378 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1379 hdev->lmp_subver == 0x0757)
1380 return 0x01;
1381
1382 if (hdev->manufacturer == 15) {
1383 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1384 return 0x01;
1385 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1386 return 0x01;
1387 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1388 return 0x01;
1389 }
1390
1391 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1392 hdev->lmp_subver == 0x1805)
1393 return 0x01;
1394
1395 return 0x00;
1396}
1397
42c6b129 1398static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1399{
1400 u8 mode;
1401
42c6b129 1402 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1403
42c6b129 1404 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1405}
1406
42c6b129 1407static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1408{
42c6b129
JH
1409 struct hci_dev *hdev = req->hdev;
1410
2177bab5
JH
1411 /* The second byte is 0xff instead of 0x9f (two reserved bits
1412 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1413 * command otherwise.
1414 */
1415 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1416
1417 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1418 * any event mask for pre 1.2 devices.
1419 */
1420 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1421 return;
1422
1423 if (lmp_bredr_capable(hdev)) {
1424 events[4] |= 0x01; /* Flow Specification Complete */
1425 events[4] |= 0x02; /* Inquiry Result with RSSI */
1426 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1427 events[5] |= 0x08; /* Synchronous Connection Complete */
1428 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1429 } else {
1430 /* Use a different default for LE-only devices */
1431 memset(events, 0, sizeof(events));
1432 events[0] |= 0x10; /* Disconnection Complete */
1433 events[0] |= 0x80; /* Encryption Change */
1434 events[1] |= 0x08; /* Read Remote Version Information Complete */
1435 events[1] |= 0x20; /* Command Complete */
1436 events[1] |= 0x40; /* Command Status */
1437 events[1] |= 0x80; /* Hardware Error */
1438 events[2] |= 0x04; /* Number of Completed Packets */
1439 events[3] |= 0x02; /* Data Buffer Overflow */
1440 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1441 }
1442
1443 if (lmp_inq_rssi_capable(hdev))
1444 events[4] |= 0x02; /* Inquiry Result with RSSI */
1445
1446 if (lmp_sniffsubr_capable(hdev))
1447 events[5] |= 0x20; /* Sniff Subrating */
1448
1449 if (lmp_pause_enc_capable(hdev))
1450 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1451
1452 if (lmp_ext_inq_capable(hdev))
1453 events[5] |= 0x40; /* Extended Inquiry Result */
1454
1455 if (lmp_no_flush_capable(hdev))
1456 events[7] |= 0x01; /* Enhanced Flush Complete */
1457
1458 if (lmp_lsto_capable(hdev))
1459 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1460
1461 if (lmp_ssp_capable(hdev)) {
1462 events[6] |= 0x01; /* IO Capability Request */
1463 events[6] |= 0x02; /* IO Capability Response */
1464 events[6] |= 0x04; /* User Confirmation Request */
1465 events[6] |= 0x08; /* User Passkey Request */
1466 events[6] |= 0x10; /* Remote OOB Data Request */
1467 events[6] |= 0x20; /* Simple Pairing Complete */
1468 events[7] |= 0x04; /* User Passkey Notification */
1469 events[7] |= 0x08; /* Keypress Notification */
1470 events[7] |= 0x10; /* Remote Host Supported
1471 * Features Notification
1472 */
1473 }
1474
1475 if (lmp_le_capable(hdev))
1476 events[7] |= 0x20; /* LE Meta-Event */
1477
42c6b129 1478 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1479
1480 if (lmp_le_capable(hdev)) {
1481 memset(events, 0, sizeof(events));
1482 events[0] = 0x1f;
42c6b129
JH
1483 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1484 sizeof(events), events);
2177bab5
JH
1485 }
1486}
1487
42c6b129 1488static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1489{
42c6b129
JH
1490 struct hci_dev *hdev = req->hdev;
1491
2177bab5 1492 if (lmp_bredr_capable(hdev))
42c6b129 1493 bredr_setup(req);
56f87901
JH
1494 else
1495 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1496
1497 if (lmp_le_capable(hdev))
42c6b129 1498 le_setup(req);
2177bab5 1499
42c6b129 1500 hci_setup_event_mask(req);
2177bab5 1501
3f8e2d75
JH
1502 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1503 * local supported commands HCI command.
1504 */
1505 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1506 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1507
1508 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1509 /* When SSP is available, then the host features page
1510 * should also be available as well. However some
1511 * controllers list the max_page as 0 as long as SSP
1512 * has not been enabled. To achieve proper debugging
1513 * output, force the minimum max_page to 1 at least.
1514 */
1515 hdev->max_page = 0x01;
1516
2177bab5
JH
1517 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1518 u8 mode = 0x01;
42c6b129
JH
1519 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1520 sizeof(mode), &mode);
2177bab5
JH
1521 } else {
1522 struct hci_cp_write_eir cp;
1523
1524 memset(hdev->eir, 0, sizeof(hdev->eir));
1525 memset(&cp, 0, sizeof(cp));
1526
42c6b129 1527 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1528 }
1529 }
1530
1531 if (lmp_inq_rssi_capable(hdev))
42c6b129 1532 hci_setup_inquiry_mode(req);
2177bab5
JH
1533
1534 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1535 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1536
1537 if (lmp_ext_feat_capable(hdev)) {
1538 struct hci_cp_read_local_ext_features cp;
1539
1540 cp.page = 0x01;
42c6b129
JH
1541 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1542 sizeof(cp), &cp);
2177bab5
JH
1543 }
1544
1545 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1546 u8 enable = 1;
42c6b129
JH
1547 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1548 &enable);
2177bab5
JH
1549 }
1550}
1551
42c6b129 1552static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1553{
42c6b129 1554 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1555 struct hci_cp_write_def_link_policy cp;
1556 u16 link_policy = 0;
1557
1558 if (lmp_rswitch_capable(hdev))
1559 link_policy |= HCI_LP_RSWITCH;
1560 if (lmp_hold_capable(hdev))
1561 link_policy |= HCI_LP_HOLD;
1562 if (lmp_sniff_capable(hdev))
1563 link_policy |= HCI_LP_SNIFF;
1564 if (lmp_park_capable(hdev))
1565 link_policy |= HCI_LP_PARK;
1566
1567 cp.policy = cpu_to_le16(link_policy);
42c6b129 1568 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1569}
1570
42c6b129 1571static void hci_set_le_support(struct hci_request *req)
2177bab5 1572{
42c6b129 1573 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1574 struct hci_cp_write_le_host_supported cp;
1575
c73eee91
JH
1576 /* LE-only devices do not support explicit enablement */
1577 if (!lmp_bredr_capable(hdev))
1578 return;
1579
2177bab5
JH
1580 memset(&cp, 0, sizeof(cp));
1581
1582 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1583 cp.le = 0x01;
1584 cp.simul = lmp_le_br_capable(hdev);
1585 }
1586
1587 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1588 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1589 &cp);
2177bab5
JH
1590}
1591
d62e6d67
JH
1592static void hci_set_event_mask_page_2(struct hci_request *req)
1593{
1594 struct hci_dev *hdev = req->hdev;
1595 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1596
1597 /* If Connectionless Slave Broadcast master role is supported
1598 * enable all necessary events for it.
1599 */
53b834d2 1600 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1601 events[1] |= 0x40; /* Triggered Clock Capture */
1602 events[1] |= 0x80; /* Synchronization Train Complete */
1603 events[2] |= 0x10; /* Slave Page Response Timeout */
1604 events[2] |= 0x20; /* CSB Channel Map Change */
1605 }
1606
1607 /* If Connectionless Slave Broadcast slave role is supported
1608 * enable all necessary events for it.
1609 */
53b834d2 1610 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1611 events[2] |= 0x01; /* Synchronization Train Received */
1612 events[2] |= 0x02; /* CSB Receive */
1613 events[2] |= 0x04; /* CSB Timeout */
1614 events[2] |= 0x08; /* Truncated Page Complete */
1615 }
1616
40c59fcb
MH
1617 /* Enable Authenticated Payload Timeout Expired event if supported */
1618 if (lmp_ping_capable(hdev))
1619 events[2] |= 0x80;
1620
d62e6d67
JH
1621 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1622}
1623
42c6b129 1624static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1625{
42c6b129 1626 struct hci_dev *hdev = req->hdev;
d2c5d77f 1627 u8 p;
42c6b129 1628
b8f4e068
GP
1629 /* Some Broadcom based Bluetooth controllers do not support the
1630 * Delete Stored Link Key command. They are clearly indicating its
1631 * absence in the bit mask of supported commands.
1632 *
1633 * Check the supported commands and only if the the command is marked
1634 * as supported send it. If not supported assume that the controller
1635 * does not have actual support for stored link keys which makes this
1636 * command redundant anyway.
f9f462fa
MH
1637 *
1638 * Some controllers indicate that they support handling deleting
1639 * stored link keys, but they don't. The quirk lets a driver
1640 * just disable this command.
637b4cae 1641 */
f9f462fa
MH
1642 if (hdev->commands[6] & 0x80 &&
1643 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1644 struct hci_cp_delete_stored_link_key cp;
1645
1646 bacpy(&cp.bdaddr, BDADDR_ANY);
1647 cp.delete_all = 0x01;
1648 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1649 sizeof(cp), &cp);
1650 }
1651
2177bab5 1652 if (hdev->commands[5] & 0x10)
42c6b129 1653 hci_setup_link_policy(req);
2177bab5 1654
7bf32048 1655 if (lmp_le_capable(hdev))
42c6b129 1656 hci_set_le_support(req);
d2c5d77f
JH
1657
1658 /* Read features beyond page 1 if available */
1659 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1660 struct hci_cp_read_local_ext_features cp;
1661
1662 cp.page = p;
1663 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1664 sizeof(cp), &cp);
1665 }
2177bab5
JH
1666}
1667
5d4e7e8d
JH
1668static void hci_init4_req(struct hci_request *req, unsigned long opt)
1669{
1670 struct hci_dev *hdev = req->hdev;
1671
d62e6d67
JH
1672 /* Set event mask page 2 if the HCI command for it is supported */
1673 if (hdev->commands[22] & 0x04)
1674 hci_set_event_mask_page_2(req);
1675
5d4e7e8d 1676 /* Check for Synchronization Train support */
53b834d2 1677 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1678 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1679
1680 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1681 if ((lmp_sc_capable(hdev) ||
1682 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1683 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1684 u8 support = 0x01;
1685 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1686 sizeof(support), &support);
1687 }
5d4e7e8d
JH
1688}
1689
2177bab5
JH
1690static int __hci_init(struct hci_dev *hdev)
1691{
1692 int err;
1693
1694 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1695 if (err < 0)
1696 return err;
1697
4b4148e9
MH
1698 /* The Device Under Test (DUT) mode is special and available for
1699 * all controller types. So just create it early on.
1700 */
1701 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1702 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1703 &dut_mode_fops);
1704 }
1705
2177bab5
JH
1706 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1707 * BR/EDR/LE type controllers. AMP controllers only need the
1708 * first stage init.
1709 */
1710 if (hdev->dev_type != HCI_BREDR)
1711 return 0;
1712
1713 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1714 if (err < 0)
1715 return err;
1716
5d4e7e8d
JH
1717 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1718 if (err < 0)
1719 return err;
1720
baf27f6e
MH
1721 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1722 if (err < 0)
1723 return err;
1724
1725 /* Only create debugfs entries during the initial setup
1726 * phase and not every time the controller gets powered on.
1727 */
1728 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1729 return 0;
1730
dfb826a8
MH
1731 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1732 &features_fops);
ceeb3bc0
MH
1733 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1734 &hdev->manufacturer);
1735 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1736 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1737 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1738 &blacklist_fops);
47219839
MH
1739 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1740
baf27f6e
MH
1741 if (lmp_bredr_capable(hdev)) {
1742 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1743 hdev, &inquiry_cache_fops);
02d08d15
MH
1744 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1745 hdev, &link_keys_fops);
babdbb3c
MH
1746 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1747 hdev, &dev_class_fops);
041000b9
MH
1748 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1749 hdev, &voice_setting_fops);
baf27f6e
MH
1750 }
1751
06f5b778 1752 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1753 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1754 hdev, &auto_accept_delay_fops);
06f5b778
MH
1755 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1756 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1757 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1758 hdev, &force_sc_support_fops);
134c2a89
MH
1759 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1760 hdev, &sc_only_mode_fops);
06f5b778 1761 }
ebd1e33b 1762
2bfa3531
MH
1763 if (lmp_sniff_capable(hdev)) {
1764 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1765 hdev, &idle_timeout_fops);
1766 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1767 hdev, &sniff_min_interval_fops);
1768 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1769 hdev, &sniff_max_interval_fops);
1770 }
1771
d0f729b8 1772 if (lmp_le_capable(hdev)) {
ac345813
MH
1773 debugfs_create_file("identity", 0400, hdev->debugfs,
1774 hdev, &identity_fops);
1775 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1776 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1777 debugfs_create_file("random_address", 0444, hdev->debugfs,
1778 hdev, &random_address_fops);
b32bba6c
MH
1779 debugfs_create_file("static_address", 0444, hdev->debugfs,
1780 hdev, &static_address_fops);
1781
1782 /* For controllers with a public address, provide a debug
1783 * option to force the usage of the configured static
1784 * address. By default the public address is used.
1785 */
1786 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1787 debugfs_create_file("force_static_address", 0644,
1788 hdev->debugfs, hdev,
1789 &force_static_address_fops);
1790
d0f729b8
MH
1791 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1792 &hdev->le_white_list_size);
3698d704
MH
1793 debugfs_create_file("identity_resolving_keys", 0400,
1794 hdev->debugfs, hdev,
1795 &identity_resolving_keys_fops);
8f8625cd
MH
1796 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1797 hdev, &long_term_keys_fops);
4e70c7e7
MH
1798 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1799 hdev, &conn_min_interval_fops);
1800 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1801 hdev, &conn_max_interval_fops);
3f959d46
MH
1802 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1803 hdev, &adv_channel_map_fops);
89863109
JR
1804 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1805 &lowpan_debugfs_fops);
7d474e06
AG
1806 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1807 &le_auto_conn_fops);
d0f729b8 1808 }
e7b8fc92 1809
baf27f6e 1810 return 0;
2177bab5
JH
1811}
1812
42c6b129 1813static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1814{
1815 __u8 scan = opt;
1816
42c6b129 1817 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1818
1819 /* Inquiry and Page scans */
42c6b129 1820 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1821}
1822
42c6b129 1823static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1824{
1825 __u8 auth = opt;
1826
42c6b129 1827 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1828
1829 /* Authentication */
42c6b129 1830 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1831}
1832
42c6b129 1833static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1834{
1835 __u8 encrypt = opt;
1836
42c6b129 1837 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1838
e4e8e37c 1839 /* Encryption */
42c6b129 1840 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1841}
1842
42c6b129 1843static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1844{
1845 __le16 policy = cpu_to_le16(opt);
1846
42c6b129 1847 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1848
1849 /* Default link policy */
42c6b129 1850 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1851}
1852
8e87d142 1853/* Get HCI device by index.
1da177e4
LT
1854 * Device is held on return. */
1855struct hci_dev *hci_dev_get(int index)
1856{
8035ded4 1857 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1858
1859 BT_DBG("%d", index);
1860
1861 if (index < 0)
1862 return NULL;
1863
1864 read_lock(&hci_dev_list_lock);
8035ded4 1865 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1866 if (d->id == index) {
1867 hdev = hci_dev_hold(d);
1868 break;
1869 }
1870 }
1871 read_unlock(&hci_dev_list_lock);
1872 return hdev;
1873}
1da177e4
LT
1874
1875/* ---- Inquiry support ---- */
ff9ef578 1876
30dc78e1
JH
1877bool hci_discovery_active(struct hci_dev *hdev)
1878{
1879 struct discovery_state *discov = &hdev->discovery;
1880
6fbe195d 1881 switch (discov->state) {
343f935b 1882 case DISCOVERY_FINDING:
6fbe195d 1883 case DISCOVERY_RESOLVING:
30dc78e1
JH
1884 return true;
1885
6fbe195d
AG
1886 default:
1887 return false;
1888 }
30dc78e1
JH
1889}
1890
ff9ef578
JH
1891void hci_discovery_set_state(struct hci_dev *hdev, int state)
1892{
1893 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1894
1895 if (hdev->discovery.state == state)
1896 return;
1897
1898 switch (state) {
1899 case DISCOVERY_STOPPED:
c54c3860
AG
1900 hci_update_background_scan(hdev);
1901
7b99b659
AG
1902 if (hdev->discovery.state != DISCOVERY_STARTING)
1903 mgmt_discovering(hdev, 0);
ff9ef578
JH
1904 break;
1905 case DISCOVERY_STARTING:
1906 break;
343f935b 1907 case DISCOVERY_FINDING:
ff9ef578
JH
1908 mgmt_discovering(hdev, 1);
1909 break;
30dc78e1
JH
1910 case DISCOVERY_RESOLVING:
1911 break;
ff9ef578
JH
1912 case DISCOVERY_STOPPING:
1913 break;
1914 }
1915
1916 hdev->discovery.state = state;
1917}
1918
1f9b9a5d 1919void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1920{
30883512 1921 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1922 struct inquiry_entry *p, *n;
1da177e4 1923
561aafbc
JH
1924 list_for_each_entry_safe(p, n, &cache->all, all) {
1925 list_del(&p->all);
b57c1a56 1926 kfree(p);
1da177e4 1927 }
561aafbc
JH
1928
1929 INIT_LIST_HEAD(&cache->unknown);
1930 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1931}
1932
a8c5fb1a
GP
1933struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1934 bdaddr_t *bdaddr)
1da177e4 1935{
30883512 1936 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1937 struct inquiry_entry *e;
1938
6ed93dc6 1939 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1940
561aafbc
JH
1941 list_for_each_entry(e, &cache->all, all) {
1942 if (!bacmp(&e->data.bdaddr, bdaddr))
1943 return e;
1944 }
1945
1946 return NULL;
1947}
1948
1949struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1950 bdaddr_t *bdaddr)
561aafbc 1951{
30883512 1952 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1953 struct inquiry_entry *e;
1954
6ed93dc6 1955 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1956
1957 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1958 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1959 return e;
1960 }
1961
1962 return NULL;
1da177e4
LT
1963}
1964
30dc78e1 1965struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1966 bdaddr_t *bdaddr,
1967 int state)
30dc78e1
JH
1968{
1969 struct discovery_state *cache = &hdev->discovery;
1970 struct inquiry_entry *e;
1971
6ed93dc6 1972 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1973
1974 list_for_each_entry(e, &cache->resolve, list) {
1975 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1976 return e;
1977 if (!bacmp(&e->data.bdaddr, bdaddr))
1978 return e;
1979 }
1980
1981 return NULL;
1982}
1983
a3d4e20a 1984void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1985 struct inquiry_entry *ie)
a3d4e20a
JH
1986{
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct list_head *pos = &cache->resolve;
1989 struct inquiry_entry *p;
1990
1991 list_del(&ie->list);
1992
1993 list_for_each_entry(p, &cache->resolve, list) {
1994 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1995 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1996 break;
1997 pos = &p->list;
1998 }
1999
2000 list_add(&ie->list, pos);
2001}
2002
3175405b 2003bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 2004 bool name_known, bool *ssp)
1da177e4 2005{
30883512 2006 struct discovery_state *cache = &hdev->discovery;
70f23020 2007 struct inquiry_entry *ie;
1da177e4 2008
6ed93dc6 2009 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2010
2b2fec4d
SJ
2011 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2012
388fc8fa
JH
2013 if (ssp)
2014 *ssp = data->ssp_mode;
2015
70f23020 2016 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2017 if (ie) {
388fc8fa
JH
2018 if (ie->data.ssp_mode && ssp)
2019 *ssp = true;
2020
a3d4e20a 2021 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2022 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2023 ie->data.rssi = data->rssi;
2024 hci_inquiry_cache_update_resolve(hdev, ie);
2025 }
2026
561aafbc 2027 goto update;
a3d4e20a 2028 }
561aafbc
JH
2029
2030 /* Entry not in the cache. Add new one. */
2031 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2032 if (!ie)
3175405b 2033 return false;
561aafbc
JH
2034
2035 list_add(&ie->all, &cache->all);
2036
2037 if (name_known) {
2038 ie->name_state = NAME_KNOWN;
2039 } else {
2040 ie->name_state = NAME_NOT_KNOWN;
2041 list_add(&ie->list, &cache->unknown);
2042 }
70f23020 2043
561aafbc
JH
2044update:
2045 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2046 ie->name_state != NAME_PENDING) {
561aafbc
JH
2047 ie->name_state = NAME_KNOWN;
2048 list_del(&ie->list);
1da177e4
LT
2049 }
2050
70f23020
AE
2051 memcpy(&ie->data, data, sizeof(*data));
2052 ie->timestamp = jiffies;
1da177e4 2053 cache->timestamp = jiffies;
3175405b
JH
2054
2055 if (ie->name_state == NAME_NOT_KNOWN)
2056 return false;
2057
2058 return true;
1da177e4
LT
2059}
2060
2061static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2062{
30883512 2063 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2064 struct inquiry_info *info = (struct inquiry_info *) buf;
2065 struct inquiry_entry *e;
2066 int copied = 0;
2067
561aafbc 2068 list_for_each_entry(e, &cache->all, all) {
1da177e4 2069 struct inquiry_data *data = &e->data;
b57c1a56
JH
2070
2071 if (copied >= num)
2072 break;
2073
1da177e4
LT
2074 bacpy(&info->bdaddr, &data->bdaddr);
2075 info->pscan_rep_mode = data->pscan_rep_mode;
2076 info->pscan_period_mode = data->pscan_period_mode;
2077 info->pscan_mode = data->pscan_mode;
2078 memcpy(info->dev_class, data->dev_class, 3);
2079 info->clock_offset = data->clock_offset;
b57c1a56 2080
1da177e4 2081 info++;
b57c1a56 2082 copied++;
1da177e4
LT
2083 }
2084
2085 BT_DBG("cache %p, copied %d", cache, copied);
2086 return copied;
2087}
2088
42c6b129 2089static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2090{
2091 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2092 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2093 struct hci_cp_inquiry cp;
2094
2095 BT_DBG("%s", hdev->name);
2096
2097 if (test_bit(HCI_INQUIRY, &hdev->flags))
2098 return;
2099
2100 /* Start Inquiry */
2101 memcpy(&cp.lap, &ir->lap, 3);
2102 cp.length = ir->length;
2103 cp.num_rsp = ir->num_rsp;
42c6b129 2104 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2105}
2106
3e13fa1e
AG
2107static int wait_inquiry(void *word)
2108{
2109 schedule();
2110 return signal_pending(current);
2111}
2112
1da177e4
LT
2113int hci_inquiry(void __user *arg)
2114{
2115 __u8 __user *ptr = arg;
2116 struct hci_inquiry_req ir;
2117 struct hci_dev *hdev;
2118 int err = 0, do_inquiry = 0, max_rsp;
2119 long timeo;
2120 __u8 *buf;
2121
2122 if (copy_from_user(&ir, ptr, sizeof(ir)))
2123 return -EFAULT;
2124
5a08ecce
AE
2125 hdev = hci_dev_get(ir.dev_id);
2126 if (!hdev)
1da177e4
LT
2127 return -ENODEV;
2128
0736cfa8
MH
2129 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2130 err = -EBUSY;
2131 goto done;
2132 }
2133
5b69bef5
MH
2134 if (hdev->dev_type != HCI_BREDR) {
2135 err = -EOPNOTSUPP;
2136 goto done;
2137 }
2138
56f87901
JH
2139 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2140 err = -EOPNOTSUPP;
2141 goto done;
2142 }
2143
09fd0de5 2144 hci_dev_lock(hdev);
8e87d142 2145 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2146 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2147 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2148 do_inquiry = 1;
2149 }
09fd0de5 2150 hci_dev_unlock(hdev);
1da177e4 2151
04837f64 2152 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2153
2154 if (do_inquiry) {
01178cd4
JH
2155 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2156 timeo);
70f23020
AE
2157 if (err < 0)
2158 goto done;
3e13fa1e
AG
2159
2160 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2161 * cleared). If it is interrupted by a signal, return -EINTR.
2162 */
2163 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2164 TASK_INTERRUPTIBLE))
2165 return -EINTR;
70f23020 2166 }
1da177e4 2167
8fc9ced3
GP
2168 /* for unlimited number of responses we will use buffer with
2169 * 255 entries
2170 */
1da177e4
LT
2171 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2172
2173 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2174 * copy it to the user space.
2175 */
01df8c31 2176 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2177 if (!buf) {
1da177e4
LT
2178 err = -ENOMEM;
2179 goto done;
2180 }
2181
09fd0de5 2182 hci_dev_lock(hdev);
1da177e4 2183 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2184 hci_dev_unlock(hdev);
1da177e4
LT
2185
2186 BT_DBG("num_rsp %d", ir.num_rsp);
2187
2188 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2189 ptr += sizeof(ir);
2190 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2191 ir.num_rsp))
1da177e4 2192 err = -EFAULT;
8e87d142 2193 } else
1da177e4
LT
2194 err = -EFAULT;
2195
2196 kfree(buf);
2197
2198done:
2199 hci_dev_put(hdev);
2200 return err;
2201}
2202
cbed0ca1 2203static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2204{
1da177e4
LT
2205 int ret = 0;
2206
1da177e4
LT
2207 BT_DBG("%s %p", hdev->name, hdev);
2208
2209 hci_req_lock(hdev);
2210
94324962
JH
2211 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2212 ret = -ENODEV;
2213 goto done;
2214 }
2215
a5c8f270
MH
2216 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2217 /* Check for rfkill but allow the HCI setup stage to
2218 * proceed (which in itself doesn't cause any RF activity).
2219 */
2220 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2221 ret = -ERFKILL;
2222 goto done;
2223 }
2224
2225 /* Check for valid public address or a configured static
2226 * random adddress, but let the HCI setup proceed to
2227 * be able to determine if there is a public address
2228 * or not.
2229 *
c6beca0e
MH
2230 * In case of user channel usage, it is not important
2231 * if a public address or static random address is
2232 * available.
2233 *
a5c8f270
MH
2234 * This check is only valid for BR/EDR controllers
2235 * since AMP controllers do not have an address.
2236 */
c6beca0e
MH
2237 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2238 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2239 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2240 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2241 ret = -EADDRNOTAVAIL;
2242 goto done;
2243 }
611b30f7
MH
2244 }
2245
1da177e4
LT
2246 if (test_bit(HCI_UP, &hdev->flags)) {
2247 ret = -EALREADY;
2248 goto done;
2249 }
2250
1da177e4
LT
2251 if (hdev->open(hdev)) {
2252 ret = -EIO;
2253 goto done;
2254 }
2255
f41c70c4
MH
2256 atomic_set(&hdev->cmd_cnt, 1);
2257 set_bit(HCI_INIT, &hdev->flags);
2258
2259 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2260 ret = hdev->setup(hdev);
2261
2262 if (!ret) {
f41c70c4
MH
2263 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2264 set_bit(HCI_RAW, &hdev->flags);
2265
0736cfa8
MH
2266 if (!test_bit(HCI_RAW, &hdev->flags) &&
2267 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2268 ret = __hci_init(hdev);
1da177e4
LT
2269 }
2270
f41c70c4
MH
2271 clear_bit(HCI_INIT, &hdev->flags);
2272
1da177e4
LT
2273 if (!ret) {
2274 hci_dev_hold(hdev);
d6bfd59c 2275 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2276 set_bit(HCI_UP, &hdev->flags);
2277 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2278 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2279 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2280 hdev->dev_type == HCI_BREDR) {
09fd0de5 2281 hci_dev_lock(hdev);
744cf19e 2282 mgmt_powered(hdev, 1);
09fd0de5 2283 hci_dev_unlock(hdev);
56e5cb86 2284 }
8e87d142 2285 } else {
1da177e4 2286 /* Init failed, cleanup */
3eff45ea 2287 flush_work(&hdev->tx_work);
c347b765 2288 flush_work(&hdev->cmd_work);
b78752cc 2289 flush_work(&hdev->rx_work);
1da177e4
LT
2290
2291 skb_queue_purge(&hdev->cmd_q);
2292 skb_queue_purge(&hdev->rx_q);
2293
2294 if (hdev->flush)
2295 hdev->flush(hdev);
2296
2297 if (hdev->sent_cmd) {
2298 kfree_skb(hdev->sent_cmd);
2299 hdev->sent_cmd = NULL;
2300 }
2301
2302 hdev->close(hdev);
2303 hdev->flags = 0;
2304 }
2305
2306done:
2307 hci_req_unlock(hdev);
1da177e4
LT
2308 return ret;
2309}
2310
cbed0ca1
JH
2311/* ---- HCI ioctl helpers ---- */
2312
2313int hci_dev_open(__u16 dev)
2314{
2315 struct hci_dev *hdev;
2316 int err;
2317
2318 hdev = hci_dev_get(dev);
2319 if (!hdev)
2320 return -ENODEV;
2321
e1d08f40
JH
2322 /* We need to ensure that no other power on/off work is pending
2323 * before proceeding to call hci_dev_do_open. This is
2324 * particularly important if the setup procedure has not yet
2325 * completed.
2326 */
2327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2328 cancel_delayed_work(&hdev->power_off);
2329
a5c8f270
MH
2330 /* After this call it is guaranteed that the setup procedure
2331 * has finished. This means that error conditions like RFKILL
2332 * or no valid public or static random address apply.
2333 */
e1d08f40
JH
2334 flush_workqueue(hdev->req_workqueue);
2335
cbed0ca1
JH
2336 err = hci_dev_do_open(hdev);
2337
2338 hci_dev_put(hdev);
2339
2340 return err;
2341}
2342
1da177e4
LT
2343static int hci_dev_do_close(struct hci_dev *hdev)
2344{
2345 BT_DBG("%s %p", hdev->name, hdev);
2346
78c04c0b
VCG
2347 cancel_delayed_work(&hdev->power_off);
2348
1da177e4
LT
2349 hci_req_cancel(hdev, ENODEV);
2350 hci_req_lock(hdev);
2351
2352 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2353 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2354 hci_req_unlock(hdev);
2355 return 0;
2356 }
2357
3eff45ea
GP
2358 /* Flush RX and TX works */
2359 flush_work(&hdev->tx_work);
b78752cc 2360 flush_work(&hdev->rx_work);
1da177e4 2361
16ab91ab 2362 if (hdev->discov_timeout > 0) {
e0f9309f 2363 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2364 hdev->discov_timeout = 0;
5e5282bb 2365 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2366 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2367 }
2368
a8b2d5c2 2369 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2370 cancel_delayed_work(&hdev->service_cache);
2371
7ba8b4be 2372 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2373
2374 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2375 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2376
09fd0de5 2377 hci_dev_lock(hdev);
1f9b9a5d 2378 hci_inquiry_cache_flush(hdev);
1da177e4 2379 hci_conn_hash_flush(hdev);
6046dc3e 2380 hci_pend_le_conns_clear(hdev);
09fd0de5 2381 hci_dev_unlock(hdev);
1da177e4
LT
2382
2383 hci_notify(hdev, HCI_DEV_DOWN);
2384
2385 if (hdev->flush)
2386 hdev->flush(hdev);
2387
2388 /* Reset device */
2389 skb_queue_purge(&hdev->cmd_q);
2390 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2391 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2392 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2393 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2394 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2395 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2396 clear_bit(HCI_INIT, &hdev->flags);
2397 }
2398
c347b765
GP
2399 /* flush cmd work */
2400 flush_work(&hdev->cmd_work);
1da177e4
LT
2401
2402 /* Drop queues */
2403 skb_queue_purge(&hdev->rx_q);
2404 skb_queue_purge(&hdev->cmd_q);
2405 skb_queue_purge(&hdev->raw_q);
2406
2407 /* Drop last sent command */
2408 if (hdev->sent_cmd) {
b79f44c1 2409 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2410 kfree_skb(hdev->sent_cmd);
2411 hdev->sent_cmd = NULL;
2412 }
2413
b6ddb638
JH
2414 kfree_skb(hdev->recv_evt);
2415 hdev->recv_evt = NULL;
2416
1da177e4
LT
2417 /* After this point our queues are empty
2418 * and no tasks are scheduled. */
2419 hdev->close(hdev);
2420
35b973c9
JH
2421 /* Clear flags */
2422 hdev->flags = 0;
2423 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2424
93c311a0
MH
2425 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2426 if (hdev->dev_type == HCI_BREDR) {
2427 hci_dev_lock(hdev);
2428 mgmt_powered(hdev, 0);
2429 hci_dev_unlock(hdev);
2430 }
8ee56540 2431 }
5add6af8 2432
ced5c338 2433 /* Controller radio is available but is currently powered down */
536619e8 2434 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2435
e59fda8d 2436 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2437 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2438 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2439
1da177e4
LT
2440 hci_req_unlock(hdev);
2441
2442 hci_dev_put(hdev);
2443 return 0;
2444}
2445
2446int hci_dev_close(__u16 dev)
2447{
2448 struct hci_dev *hdev;
2449 int err;
2450
70f23020
AE
2451 hdev = hci_dev_get(dev);
2452 if (!hdev)
1da177e4 2453 return -ENODEV;
8ee56540 2454
0736cfa8
MH
2455 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2456 err = -EBUSY;
2457 goto done;
2458 }
2459
8ee56540
MH
2460 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2461 cancel_delayed_work(&hdev->power_off);
2462
1da177e4 2463 err = hci_dev_do_close(hdev);
8ee56540 2464
0736cfa8 2465done:
1da177e4
LT
2466 hci_dev_put(hdev);
2467 return err;
2468}
2469
2470int hci_dev_reset(__u16 dev)
2471{
2472 struct hci_dev *hdev;
2473 int ret = 0;
2474
70f23020
AE
2475 hdev = hci_dev_get(dev);
2476 if (!hdev)
1da177e4
LT
2477 return -ENODEV;
2478
2479 hci_req_lock(hdev);
1da177e4 2480
808a049e
MH
2481 if (!test_bit(HCI_UP, &hdev->flags)) {
2482 ret = -ENETDOWN;
1da177e4 2483 goto done;
808a049e 2484 }
1da177e4 2485
0736cfa8
MH
2486 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2487 ret = -EBUSY;
2488 goto done;
2489 }
2490
1da177e4
LT
2491 /* Drop queues */
2492 skb_queue_purge(&hdev->rx_q);
2493 skb_queue_purge(&hdev->cmd_q);
2494
09fd0de5 2495 hci_dev_lock(hdev);
1f9b9a5d 2496 hci_inquiry_cache_flush(hdev);
1da177e4 2497 hci_conn_hash_flush(hdev);
09fd0de5 2498 hci_dev_unlock(hdev);
1da177e4
LT
2499
2500 if (hdev->flush)
2501 hdev->flush(hdev);
2502
8e87d142 2503 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2504 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2505
2506 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2507 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2508
2509done:
1da177e4
LT
2510 hci_req_unlock(hdev);
2511 hci_dev_put(hdev);
2512 return ret;
2513}
2514
2515int hci_dev_reset_stat(__u16 dev)
2516{
2517 struct hci_dev *hdev;
2518 int ret = 0;
2519
70f23020
AE
2520 hdev = hci_dev_get(dev);
2521 if (!hdev)
1da177e4
LT
2522 return -ENODEV;
2523
0736cfa8
MH
2524 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2525 ret = -EBUSY;
2526 goto done;
2527 }
2528
1da177e4
LT
2529 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2530
0736cfa8 2531done:
1da177e4 2532 hci_dev_put(hdev);
1da177e4
LT
2533 return ret;
2534}
2535
2536int hci_dev_cmd(unsigned int cmd, void __user *arg)
2537{
2538 struct hci_dev *hdev;
2539 struct hci_dev_req dr;
2540 int err = 0;
2541
2542 if (copy_from_user(&dr, arg, sizeof(dr)))
2543 return -EFAULT;
2544
70f23020
AE
2545 hdev = hci_dev_get(dr.dev_id);
2546 if (!hdev)
1da177e4
LT
2547 return -ENODEV;
2548
0736cfa8
MH
2549 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2550 err = -EBUSY;
2551 goto done;
2552 }
2553
5b69bef5
MH
2554 if (hdev->dev_type != HCI_BREDR) {
2555 err = -EOPNOTSUPP;
2556 goto done;
2557 }
2558
56f87901
JH
2559 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2560 err = -EOPNOTSUPP;
2561 goto done;
2562 }
2563
1da177e4
LT
2564 switch (cmd) {
2565 case HCISETAUTH:
01178cd4
JH
2566 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2567 HCI_INIT_TIMEOUT);
1da177e4
LT
2568 break;
2569
2570 case HCISETENCRYPT:
2571 if (!lmp_encrypt_capable(hdev)) {
2572 err = -EOPNOTSUPP;
2573 break;
2574 }
2575
2576 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2577 /* Auth must be enabled first */
01178cd4
JH
2578 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2579 HCI_INIT_TIMEOUT);
1da177e4
LT
2580 if (err)
2581 break;
2582 }
2583
01178cd4
JH
2584 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
1da177e4
LT
2586 break;
2587
2588 case HCISETSCAN:
01178cd4
JH
2589 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
1da177e4
LT
2591 break;
2592
1da177e4 2593 case HCISETLINKPOL:
01178cd4
JH
2594 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
1da177e4
LT
2596 break;
2597
2598 case HCISETLINKMODE:
e4e8e37c
MH
2599 hdev->link_mode = ((__u16) dr.dev_opt) &
2600 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2601 break;
2602
2603 case HCISETPTYPE:
2604 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2605 break;
2606
2607 case HCISETACLMTU:
e4e8e37c
MH
2608 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2609 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2610 break;
2611
2612 case HCISETSCOMTU:
e4e8e37c
MH
2613 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2614 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2615 break;
2616
2617 default:
2618 err = -EINVAL;
2619 break;
2620 }
e4e8e37c 2621
0736cfa8 2622done:
1da177e4
LT
2623 hci_dev_put(hdev);
2624 return err;
2625}
2626
2627int hci_get_dev_list(void __user *arg)
2628{
8035ded4 2629 struct hci_dev *hdev;
1da177e4
LT
2630 struct hci_dev_list_req *dl;
2631 struct hci_dev_req *dr;
1da177e4
LT
2632 int n = 0, size, err;
2633 __u16 dev_num;
2634
2635 if (get_user(dev_num, (__u16 __user *) arg))
2636 return -EFAULT;
2637
2638 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2639 return -EINVAL;
2640
2641 size = sizeof(*dl) + dev_num * sizeof(*dr);
2642
70f23020
AE
2643 dl = kzalloc(size, GFP_KERNEL);
2644 if (!dl)
1da177e4
LT
2645 return -ENOMEM;
2646
2647 dr = dl->dev_req;
2648
f20d09d5 2649 read_lock(&hci_dev_list_lock);
8035ded4 2650 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2651 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2652 cancel_delayed_work(&hdev->power_off);
c542a06c 2653
a8b2d5c2
JH
2654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2655 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2656
1da177e4
LT
2657 (dr + n)->dev_id = hdev->id;
2658 (dr + n)->dev_opt = hdev->flags;
c542a06c 2659
1da177e4
LT
2660 if (++n >= dev_num)
2661 break;
2662 }
f20d09d5 2663 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2664
2665 dl->dev_num = n;
2666 size = sizeof(*dl) + n * sizeof(*dr);
2667
2668 err = copy_to_user(arg, dl, size);
2669 kfree(dl);
2670
2671 return err ? -EFAULT : 0;
2672}
2673
2674int hci_get_dev_info(void __user *arg)
2675{
2676 struct hci_dev *hdev;
2677 struct hci_dev_info di;
2678 int err = 0;
2679
2680 if (copy_from_user(&di, arg, sizeof(di)))
2681 return -EFAULT;
2682
70f23020
AE
2683 hdev = hci_dev_get(di.dev_id);
2684 if (!hdev)
1da177e4
LT
2685 return -ENODEV;
2686
a8b2d5c2 2687 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2688 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2689
a8b2d5c2
JH
2690 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2691 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2692
1da177e4
LT
2693 strcpy(di.name, hdev->name);
2694 di.bdaddr = hdev->bdaddr;
60f2a3ed 2695 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2696 di.flags = hdev->flags;
2697 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2698 if (lmp_bredr_capable(hdev)) {
2699 di.acl_mtu = hdev->acl_mtu;
2700 di.acl_pkts = hdev->acl_pkts;
2701 di.sco_mtu = hdev->sco_mtu;
2702 di.sco_pkts = hdev->sco_pkts;
2703 } else {
2704 di.acl_mtu = hdev->le_mtu;
2705 di.acl_pkts = hdev->le_pkts;
2706 di.sco_mtu = 0;
2707 di.sco_pkts = 0;
2708 }
1da177e4
LT
2709 di.link_policy = hdev->link_policy;
2710 di.link_mode = hdev->link_mode;
2711
2712 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2713 memcpy(&di.features, &hdev->features, sizeof(di.features));
2714
2715 if (copy_to_user(arg, &di, sizeof(di)))
2716 err = -EFAULT;
2717
2718 hci_dev_put(hdev);
2719
2720 return err;
2721}
2722
2723/* ---- Interface to HCI drivers ---- */
2724
611b30f7
MH
2725static int hci_rfkill_set_block(void *data, bool blocked)
2726{
2727 struct hci_dev *hdev = data;
2728
2729 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2730
0736cfa8
MH
2731 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2732 return -EBUSY;
2733
5e130367
JH
2734 if (blocked) {
2735 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2736 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2737 hci_dev_do_close(hdev);
5e130367
JH
2738 } else {
2739 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2740 }
611b30f7
MH
2741
2742 return 0;
2743}
2744
2745static const struct rfkill_ops hci_rfkill_ops = {
2746 .set_block = hci_rfkill_set_block,
2747};
2748
ab81cbf9
JH
2749static void hci_power_on(struct work_struct *work)
2750{
2751 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2752 int err;
ab81cbf9
JH
2753
2754 BT_DBG("%s", hdev->name);
2755
cbed0ca1 2756 err = hci_dev_do_open(hdev);
96570ffc
JH
2757 if (err < 0) {
2758 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2759 return;
96570ffc 2760 }
ab81cbf9 2761
a5c8f270
MH
2762 /* During the HCI setup phase, a few error conditions are
2763 * ignored and they need to be checked now. If they are still
2764 * valid, it is important to turn the device back off.
2765 */
2766 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2767 (hdev->dev_type == HCI_BREDR &&
2768 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2769 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2770 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2771 hci_dev_do_close(hdev);
2772 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2773 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2774 HCI_AUTO_OFF_TIMEOUT);
bf543036 2775 }
ab81cbf9 2776
a8b2d5c2 2777 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2778 mgmt_index_added(hdev);
ab81cbf9
JH
2779}
2780
2781static void hci_power_off(struct work_struct *work)
2782{
3243553f 2783 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2784 power_off.work);
ab81cbf9
JH
2785
2786 BT_DBG("%s", hdev->name);
2787
8ee56540 2788 hci_dev_do_close(hdev);
ab81cbf9
JH
2789}
2790
16ab91ab
JH
2791static void hci_discov_off(struct work_struct *work)
2792{
2793 struct hci_dev *hdev;
16ab91ab
JH
2794
2795 hdev = container_of(work, struct hci_dev, discov_off.work);
2796
2797 BT_DBG("%s", hdev->name);
2798
d1967ff8 2799 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2800}
2801
35f7498a 2802void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2803{
4821002c 2804 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2805
4821002c
JH
2806 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2807 list_del(&uuid->list);
2aeb9a1a
JH
2808 kfree(uuid);
2809 }
2aeb9a1a
JH
2810}
2811
35f7498a 2812void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2813{
2814 struct list_head *p, *n;
2815
2816 list_for_each_safe(p, n, &hdev->link_keys) {
2817 struct link_key *key;
2818
2819 key = list_entry(p, struct link_key, list);
2820
2821 list_del(p);
2822 kfree(key);
2823 }
55ed8ca1
JH
2824}
2825
35f7498a 2826void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2827{
2828 struct smp_ltk *k, *tmp;
2829
2830 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2831 list_del(&k->list);
2832 kfree(k);
2833 }
b899efaf
VCG
2834}
2835
970c4e46
JH
2836void hci_smp_irks_clear(struct hci_dev *hdev)
2837{
2838 struct smp_irk *k, *tmp;
2839
2840 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2841 list_del(&k->list);
2842 kfree(k);
2843 }
2844}
2845
55ed8ca1
JH
2846struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2847{
8035ded4 2848 struct link_key *k;
55ed8ca1 2849
8035ded4 2850 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2851 if (bacmp(bdaddr, &k->bdaddr) == 0)
2852 return k;
55ed8ca1
JH
2853
2854 return NULL;
2855}
2856
745c0ce3 2857static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2858 u8 key_type, u8 old_key_type)
d25e28ab
JH
2859{
2860 /* Legacy key */
2861 if (key_type < 0x03)
745c0ce3 2862 return true;
d25e28ab
JH
2863
2864 /* Debug keys are insecure so don't store them persistently */
2865 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2866 return false;
d25e28ab
JH
2867
2868 /* Changed combination key and there's no previous one */
2869 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2870 return false;
d25e28ab
JH
2871
2872 /* Security mode 3 case */
2873 if (!conn)
745c0ce3 2874 return true;
d25e28ab
JH
2875
2876 /* Neither local nor remote side had no-bonding as requirement */
2877 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2878 return true;
d25e28ab
JH
2879
2880 /* Local side had dedicated bonding as requirement */
2881 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2882 return true;
d25e28ab
JH
2883
2884 /* Remote side had dedicated bonding as requirement */
2885 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2886 return true;
d25e28ab
JH
2887
2888 /* If none of the above criteria match, then don't store the key
2889 * persistently */
745c0ce3 2890 return false;
d25e28ab
JH
2891}
2892
98a0b845
JH
2893static bool ltk_type_master(u8 type)
2894{
2895 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2896 return true;
2897
2898 return false;
2899}
2900
2901struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2902 bool master)
75d262c2 2903{
c9839a11 2904 struct smp_ltk *k;
75d262c2 2905
c9839a11
VCG
2906 list_for_each_entry(k, &hdev->long_term_keys, list) {
2907 if (k->ediv != ediv ||
a8c5fb1a 2908 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2909 continue;
2910
98a0b845
JH
2911 if (ltk_type_master(k->type) != master)
2912 continue;
2913
c9839a11 2914 return k;
75d262c2
VCG
2915 }
2916
2917 return NULL;
2918}
75d262c2 2919
c9839a11 2920struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2921 u8 addr_type, bool master)
75d262c2 2922{
c9839a11 2923 struct smp_ltk *k;
75d262c2 2924
c9839a11
VCG
2925 list_for_each_entry(k, &hdev->long_term_keys, list)
2926 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2927 bacmp(bdaddr, &k->bdaddr) == 0 &&
2928 ltk_type_master(k->type) == master)
75d262c2
VCG
2929 return k;
2930
2931 return NULL;
2932}
75d262c2 2933
970c4e46
JH
2934struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2935{
2936 struct smp_irk *irk;
2937
2938 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2939 if (!bacmp(&irk->rpa, rpa))
2940 return irk;
2941 }
2942
2943 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2944 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2945 bacpy(&irk->rpa, rpa);
2946 return irk;
2947 }
2948 }
2949
2950 return NULL;
2951}
2952
2953struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2954 u8 addr_type)
2955{
2956 struct smp_irk *irk;
2957
6cfc9988
JH
2958 /* Identity Address must be public or static random */
2959 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2960 return NULL;
2961
970c4e46
JH
2962 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2963 if (addr_type == irk->addr_type &&
2964 bacmp(bdaddr, &irk->bdaddr) == 0)
2965 return irk;
2966 }
2967
2968 return NULL;
2969}
2970
d25e28ab 2971int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2972 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2973{
2974 struct link_key *key, *old_key;
745c0ce3
VA
2975 u8 old_key_type;
2976 bool persistent;
55ed8ca1
JH
2977
2978 old_key = hci_find_link_key(hdev, bdaddr);
2979 if (old_key) {
2980 old_key_type = old_key->type;
2981 key = old_key;
2982 } else {
12adcf3a 2983 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2984 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2985 if (!key)
2986 return -ENOMEM;
2987 list_add(&key->list, &hdev->link_keys);
2988 }
2989
6ed93dc6 2990 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2991
d25e28ab
JH
2992 /* Some buggy controller combinations generate a changed
2993 * combination key for legacy pairing even when there's no
2994 * previous key */
2995 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2996 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2997 type = HCI_LK_COMBINATION;
655fe6ec
JH
2998 if (conn)
2999 conn->key_type = type;
3000 }
d25e28ab 3001
55ed8ca1 3002 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3003 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3004 key->pin_len = pin_len;
3005
b6020ba0 3006 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3007 key->type = old_key_type;
4748fed2
JH
3008 else
3009 key->type = type;
3010
4df378a1
JH
3011 if (!new_key)
3012 return 0;
3013
3014 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3015
744cf19e 3016 mgmt_new_link_key(hdev, key, persistent);
4df378a1 3017
6ec5bcad
VA
3018 if (conn)
3019 conn->flush_key = !persistent;
55ed8ca1
JH
3020
3021 return 0;
3022}
3023
ca9142b8 3024struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
3025 u8 addr_type, u8 type, u8 authenticated,
3026 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 3027{
c9839a11 3028 struct smp_ltk *key, *old_key;
98a0b845 3029 bool master = ltk_type_master(type);
75d262c2 3030
98a0b845 3031 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3032 if (old_key)
75d262c2 3033 key = old_key;
c9839a11 3034 else {
0a14ab41 3035 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3036 if (!key)
ca9142b8 3037 return NULL;
c9839a11 3038 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3039 }
3040
75d262c2 3041 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3042 key->bdaddr_type = addr_type;
3043 memcpy(key->val, tk, sizeof(key->val));
3044 key->authenticated = authenticated;
3045 key->ediv = ediv;
3046 key->enc_size = enc_size;
3047 key->type = type;
3048 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 3049
ca9142b8 3050 return key;
75d262c2
VCG
3051}
3052
ca9142b8
JH
3053struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3054 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3055{
3056 struct smp_irk *irk;
3057
3058 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3059 if (!irk) {
3060 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3061 if (!irk)
ca9142b8 3062 return NULL;
970c4e46
JH
3063
3064 bacpy(&irk->bdaddr, bdaddr);
3065 irk->addr_type = addr_type;
3066
3067 list_add(&irk->list, &hdev->identity_resolving_keys);
3068 }
3069
3070 memcpy(irk->val, val, 16);
3071 bacpy(&irk->rpa, rpa);
3072
ca9142b8 3073 return irk;
970c4e46
JH
3074}
3075
55ed8ca1
JH
3076int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3077{
3078 struct link_key *key;
3079
3080 key = hci_find_link_key(hdev, bdaddr);
3081 if (!key)
3082 return -ENOENT;
3083
6ed93dc6 3084 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3085
3086 list_del(&key->list);
3087 kfree(key);
3088
3089 return 0;
3090}
3091
e0b2b27e 3092int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3093{
3094 struct smp_ltk *k, *tmp;
c51ffa0b 3095 int removed = 0;
b899efaf
VCG
3096
3097 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3098 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3099 continue;
3100
6ed93dc6 3101 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3102
3103 list_del(&k->list);
3104 kfree(k);
c51ffa0b 3105 removed++;
b899efaf
VCG
3106 }
3107
c51ffa0b 3108 return removed ? 0 : -ENOENT;
b899efaf
VCG
3109}
3110
a7ec7338
JH
3111void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3112{
3113 struct smp_irk *k, *tmp;
3114
668b7b19 3115 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3116 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3117 continue;
3118
3119 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3120
3121 list_del(&k->list);
3122 kfree(k);
3123 }
3124}
3125
6bd32326 3126/* HCI command timer function */
bda4f23a 3127static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3128{
3129 struct hci_dev *hdev = (void *) arg;
3130
bda4f23a
AE
3131 if (hdev->sent_cmd) {
3132 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3133 u16 opcode = __le16_to_cpu(sent->opcode);
3134
3135 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3136 } else {
3137 BT_ERR("%s command tx timeout", hdev->name);
3138 }
3139
6bd32326 3140 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3141 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3142}
3143
2763eda6 3144struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3145 bdaddr_t *bdaddr)
2763eda6
SJ
3146{
3147 struct oob_data *data;
3148
3149 list_for_each_entry(data, &hdev->remote_oob_data, list)
3150 if (bacmp(bdaddr, &data->bdaddr) == 0)
3151 return data;
3152
3153 return NULL;
3154}
3155
3156int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3157{
3158 struct oob_data *data;
3159
3160 data = hci_find_remote_oob_data(hdev, bdaddr);
3161 if (!data)
3162 return -ENOENT;
3163
6ed93dc6 3164 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3165
3166 list_del(&data->list);
3167 kfree(data);
3168
3169 return 0;
3170}
3171
35f7498a 3172void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3173{
3174 struct oob_data *data, *n;
3175
3176 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3177 list_del(&data->list);
3178 kfree(data);
3179 }
2763eda6
SJ
3180}
3181
0798872e
MH
3182int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3183 u8 *hash, u8 *randomizer)
2763eda6
SJ
3184{
3185 struct oob_data *data;
3186
3187 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3188 if (!data) {
0a14ab41 3189 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3190 if (!data)
3191 return -ENOMEM;
3192
3193 bacpy(&data->bdaddr, bdaddr);
3194 list_add(&data->list, &hdev->remote_oob_data);
3195 }
3196
519ca9d0
MH
3197 memcpy(data->hash192, hash, sizeof(data->hash192));
3198 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3199
0798872e
MH
3200 memset(data->hash256, 0, sizeof(data->hash256));
3201 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3202
3203 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3204
3205 return 0;
3206}
3207
3208int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3209 u8 *hash192, u8 *randomizer192,
3210 u8 *hash256, u8 *randomizer256)
3211{
3212 struct oob_data *data;
3213
3214 data = hci_find_remote_oob_data(hdev, bdaddr);
3215 if (!data) {
0a14ab41 3216 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3217 if (!data)
3218 return -ENOMEM;
3219
3220 bacpy(&data->bdaddr, bdaddr);
3221 list_add(&data->list, &hdev->remote_oob_data);
3222 }
3223
3224 memcpy(data->hash192, hash192, sizeof(data->hash192));
3225 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3226
3227 memcpy(data->hash256, hash256, sizeof(data->hash256));
3228 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3229
6ed93dc6 3230 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3231
3232 return 0;
3233}
3234
b9ee0a78
MH
3235struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3236 bdaddr_t *bdaddr, u8 type)
b2a66aad 3237{
8035ded4 3238 struct bdaddr_list *b;
b2a66aad 3239
b9ee0a78
MH
3240 list_for_each_entry(b, &hdev->blacklist, list) {
3241 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3242 return b;
b9ee0a78 3243 }
b2a66aad
AJ
3244
3245 return NULL;
3246}
3247
35f7498a 3248void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3249{
3250 struct list_head *p, *n;
3251
3252 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3253 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3254
3255 list_del(p);
3256 kfree(b);
3257 }
b2a66aad
AJ
3258}
3259
88c1fe4b 3260int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3261{
3262 struct bdaddr_list *entry;
b2a66aad 3263
b9ee0a78 3264 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3265 return -EBADF;
3266
b9ee0a78 3267 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3268 return -EEXIST;
b2a66aad
AJ
3269
3270 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3271 if (!entry)
3272 return -ENOMEM;
b2a66aad
AJ
3273
3274 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3275 entry->bdaddr_type = type;
b2a66aad
AJ
3276
3277 list_add(&entry->list, &hdev->blacklist);
3278
88c1fe4b 3279 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3280}
3281
88c1fe4b 3282int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3283{
3284 struct bdaddr_list *entry;
b2a66aad 3285
35f7498a
JH
3286 if (!bacmp(bdaddr, BDADDR_ANY)) {
3287 hci_blacklist_clear(hdev);
3288 return 0;
3289 }
b2a66aad 3290
b9ee0a78 3291 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3292 if (!entry)
5e762444 3293 return -ENOENT;
b2a66aad
AJ
3294
3295 list_del(&entry->list);
3296 kfree(entry);
3297
88c1fe4b 3298 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3299}
3300
15819a70
AG
3301/* This function requires the caller holds hdev->lock */
3302struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3303 bdaddr_t *addr, u8 addr_type)
3304{
3305 struct hci_conn_params *params;
3306
3307 list_for_each_entry(params, &hdev->le_conn_params, list) {
3308 if (bacmp(&params->addr, addr) == 0 &&
3309 params->addr_type == addr_type) {
3310 return params;
3311 }
3312 }
3313
3314 return NULL;
3315}
3316
cef952ce
AG
3317static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3318{
3319 struct hci_conn *conn;
3320
3321 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3322 if (!conn)
3323 return false;
3324
3325 if (conn->dst_type != type)
3326 return false;
3327
3328 if (conn->state != BT_CONNECTED)
3329 return false;
3330
3331 return true;
3332}
3333
a9b0a04c
AG
3334static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3335{
3336 if (addr_type == ADDR_LE_DEV_PUBLIC)
3337 return true;
3338
3339 /* Check for Random Static address type */
3340 if ((addr->b[5] & 0xc0) == 0xc0)
3341 return true;
3342
3343 return false;
3344}
3345
15819a70 3346/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3347int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3348 u8 auto_connect, u16 conn_min_interval,
3349 u16 conn_max_interval)
15819a70
AG
3350{
3351 struct hci_conn_params *params;
3352
a9b0a04c
AG
3353 if (!is_identity_address(addr, addr_type))
3354 return -EINVAL;
3355
15819a70 3356 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3357 if (params)
3358 goto update;
15819a70
AG
3359
3360 params = kzalloc(sizeof(*params), GFP_KERNEL);
3361 if (!params) {
3362 BT_ERR("Out of memory");
a9b0a04c 3363 return -ENOMEM;
15819a70
AG
3364 }
3365
3366 bacpy(&params->addr, addr);
3367 params->addr_type = addr_type;
cef952ce
AG
3368
3369 list_add(&params->list, &hdev->le_conn_params);
3370
3371update:
15819a70
AG
3372 params->conn_min_interval = conn_min_interval;
3373 params->conn_max_interval = conn_max_interval;
9fcb18ef 3374 params->auto_connect = auto_connect;
15819a70 3375
cef952ce
AG
3376 switch (auto_connect) {
3377 case HCI_AUTO_CONN_DISABLED:
3378 case HCI_AUTO_CONN_LINK_LOSS:
3379 hci_pend_le_conn_del(hdev, addr, addr_type);
3380 break;
3381 case HCI_AUTO_CONN_ALWAYS:
3382 if (!is_connected(hdev, addr, addr_type))
3383 hci_pend_le_conn_add(hdev, addr, addr_type);
3384 break;
3385 }
15819a70 3386
9fcb18ef
AG
3387 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3388 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3389 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3390
3391 return 0;
15819a70
AG
3392}
3393
3394/* This function requires the caller holds hdev->lock */
3395void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3396{
3397 struct hci_conn_params *params;
3398
3399 params = hci_conn_params_lookup(hdev, addr, addr_type);
3400 if (!params)
3401 return;
3402
cef952ce
AG
3403 hci_pend_le_conn_del(hdev, addr, addr_type);
3404
15819a70
AG
3405 list_del(&params->list);
3406 kfree(params);
3407
3408 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3409}
3410
3411/* This function requires the caller holds hdev->lock */
3412void hci_conn_params_clear(struct hci_dev *hdev)
3413{
3414 struct hci_conn_params *params, *tmp;
3415
3416 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3417 list_del(&params->list);
3418 kfree(params);
3419 }
3420
3421 BT_DBG("All LE connection parameters were removed");
3422}
3423
77a77a30
AG
3424/* This function requires the caller holds hdev->lock */
3425struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3426 bdaddr_t *addr, u8 addr_type)
3427{
3428 struct bdaddr_list *entry;
3429
3430 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3431 if (bacmp(&entry->bdaddr, addr) == 0 &&
3432 entry->bdaddr_type == addr_type)
3433 return entry;
3434 }
3435
3436 return NULL;
3437}
3438
3439/* This function requires the caller holds hdev->lock */
3440void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3441{
3442 struct bdaddr_list *entry;
3443
3444 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3445 if (entry)
a4790dbd 3446 goto done;
77a77a30
AG
3447
3448 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3449 if (!entry) {
3450 BT_ERR("Out of memory");
3451 return;
3452 }
3453
3454 bacpy(&entry->bdaddr, addr);
3455 entry->bdaddr_type = addr_type;
3456
3457 list_add(&entry->list, &hdev->pend_le_conns);
3458
3459 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3460
3461done:
3462 hci_update_background_scan(hdev);
77a77a30
AG
3463}
3464
3465/* This function requires the caller holds hdev->lock */
3466void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3467{
3468 struct bdaddr_list *entry;
3469
3470 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3471 if (!entry)
a4790dbd 3472 goto done;
77a77a30
AG
3473
3474 list_del(&entry->list);
3475 kfree(entry);
3476
3477 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3478
3479done:
3480 hci_update_background_scan(hdev);
77a77a30
AG
3481}
3482
3483/* This function requires the caller holds hdev->lock */
3484void hci_pend_le_conns_clear(struct hci_dev *hdev)
3485{
3486 struct bdaddr_list *entry, *tmp;
3487
3488 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3489 list_del(&entry->list);
3490 kfree(entry);
3491 }
3492
3493 BT_DBG("All LE pending connections cleared");
3494}
3495
4c87eaab 3496static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3497{
4c87eaab
AG
3498 if (status) {
3499 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3500
4c87eaab
AG
3501 hci_dev_lock(hdev);
3502 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3503 hci_dev_unlock(hdev);
3504 return;
3505 }
7ba8b4be
AG
3506}
3507
4c87eaab 3508static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3509{
4c87eaab
AG
3510 /* General inquiry access code (GIAC) */
3511 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3512 struct hci_request req;
3513 struct hci_cp_inquiry cp;
7ba8b4be
AG
3514 int err;
3515
4c87eaab
AG
3516 if (status) {
3517 BT_ERR("Failed to disable LE scanning: status %d", status);
3518 return;
3519 }
7ba8b4be 3520
4c87eaab
AG
3521 switch (hdev->discovery.type) {
3522 case DISCOV_TYPE_LE:
3523 hci_dev_lock(hdev);
3524 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3525 hci_dev_unlock(hdev);
3526 break;
7ba8b4be 3527
4c87eaab
AG
3528 case DISCOV_TYPE_INTERLEAVED:
3529 hci_req_init(&req, hdev);
7ba8b4be 3530
4c87eaab
AG
3531 memset(&cp, 0, sizeof(cp));
3532 memcpy(&cp.lap, lap, sizeof(cp.lap));
3533 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3534 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3535
4c87eaab 3536 hci_dev_lock(hdev);
7dbfac1d 3537
4c87eaab 3538 hci_inquiry_cache_flush(hdev);
7dbfac1d 3539
4c87eaab
AG
3540 err = hci_req_run(&req, inquiry_complete);
3541 if (err) {
3542 BT_ERR("Inquiry request failed: err %d", err);
3543 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3544 }
7dbfac1d 3545
4c87eaab
AG
3546 hci_dev_unlock(hdev);
3547 break;
7dbfac1d 3548 }
7dbfac1d
AG
3549}
3550
7ba8b4be
AG
3551static void le_scan_disable_work(struct work_struct *work)
3552{
3553 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3554 le_scan_disable.work);
4c87eaab
AG
3555 struct hci_request req;
3556 int err;
7ba8b4be
AG
3557
3558 BT_DBG("%s", hdev->name);
3559
4c87eaab 3560 hci_req_init(&req, hdev);
28b75a89 3561
b1efcc28 3562 hci_req_add_le_scan_disable(&req);
28b75a89 3563
4c87eaab
AG
3564 err = hci_req_run(&req, le_scan_disable_work_complete);
3565 if (err)
3566 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3567}
3568
94b1fc92
MH
3569int hci_update_random_address(struct hci_request *req, bool require_privacy,
3570 u8 *own_addr_type)
ebd3a747
JH
3571{
3572 struct hci_dev *hdev = req->hdev;
3573 int err;
3574
3575 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3576 * current RPA has expired or there is something else than
3577 * the current RPA in use, then generate a new one.
ebd3a747
JH
3578 */
3579 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3580 int to;
3581
3582 *own_addr_type = ADDR_LE_DEV_RANDOM;
3583
3584 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3585 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3586 return 0;
3587
2b5224dc 3588 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3589 if (err < 0) {
3590 BT_ERR("%s failed to generate new RPA", hdev->name);
3591 return err;
3592 }
3593
2b5224dc 3594 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
ebd3a747
JH
3595
3596 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3597 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3598
3599 return 0;
94b1fc92
MH
3600 }
3601
3602 /* In case of required privacy without resolvable private address,
3603 * use an unresolvable private address. This is useful for active
3604 * scanning and non-connectable advertising.
3605 */
3606 if (require_privacy) {
3607 bdaddr_t urpa;
3608
3609 get_random_bytes(&urpa, 6);
3610 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3611
3612 *own_addr_type = ADDR_LE_DEV_RANDOM;
3613 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3614 return 0;
ebd3a747
JH
3615 }
3616
3617 /* If forcing static address is in use or there is no public
3618 * address use the static address as random address (but skip
3619 * the HCI command if the current random address is already the
3620 * static one.
3621 */
3622 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3623 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3624 *own_addr_type = ADDR_LE_DEV_RANDOM;
3625 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3626 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3627 &hdev->static_addr);
3628 return 0;
3629 }
3630
3631 /* Neither privacy nor static address is being used so use a
3632 * public address.
3633 */
3634 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3635
3636 return 0;
3637}
3638
9be0dab7
DH
3639/* Alloc HCI device */
3640struct hci_dev *hci_alloc_dev(void)
3641{
3642 struct hci_dev *hdev;
3643
3644 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3645 if (!hdev)
3646 return NULL;
3647
b1b813d4
DH
3648 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3649 hdev->esco_type = (ESCO_HV1);
3650 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3651 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3652 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3653 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3654 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3655
b1b813d4
DH
3656 hdev->sniff_max_interval = 800;
3657 hdev->sniff_min_interval = 80;
3658
3f959d46 3659 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3660 hdev->le_scan_interval = 0x0060;
3661 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3662 hdev->le_conn_min_interval = 0x0028;
3663 hdev->le_conn_max_interval = 0x0038;
bef64738 3664
d6bfd59c
JH
3665 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3666
b1b813d4
DH
3667 mutex_init(&hdev->lock);
3668 mutex_init(&hdev->req_lock);
3669
3670 INIT_LIST_HEAD(&hdev->mgmt_pending);
3671 INIT_LIST_HEAD(&hdev->blacklist);
3672 INIT_LIST_HEAD(&hdev->uuids);
3673 INIT_LIST_HEAD(&hdev->link_keys);
3674 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3675 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3676 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3677 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3678 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3679 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3680
3681 INIT_WORK(&hdev->rx_work, hci_rx_work);
3682 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3683 INIT_WORK(&hdev->tx_work, hci_tx_work);
3684 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3685
b1b813d4
DH
3686 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3687 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3688 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3689
b1b813d4
DH
3690 skb_queue_head_init(&hdev->rx_q);
3691 skb_queue_head_init(&hdev->cmd_q);
3692 skb_queue_head_init(&hdev->raw_q);
3693
3694 init_waitqueue_head(&hdev->req_wait_q);
3695
bda4f23a 3696 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3697
b1b813d4
DH
3698 hci_init_sysfs(hdev);
3699 discovery_init(hdev);
9be0dab7
DH
3700
3701 return hdev;
3702}
3703EXPORT_SYMBOL(hci_alloc_dev);
3704
3705/* Free HCI device */
3706void hci_free_dev(struct hci_dev *hdev)
3707{
9be0dab7
DH
3708 /* will free via device release */
3709 put_device(&hdev->dev);
3710}
3711EXPORT_SYMBOL(hci_free_dev);
3712
1da177e4
LT
3713/* Register HCI device */
3714int hci_register_dev(struct hci_dev *hdev)
3715{
b1b813d4 3716 int id, error;
1da177e4 3717
010666a1 3718 if (!hdev->open || !hdev->close)
1da177e4
LT
3719 return -EINVAL;
3720
08add513
MM
3721 /* Do not allow HCI_AMP devices to register at index 0,
3722 * so the index can be used as the AMP controller ID.
3723 */
3df92b31
SL
3724 switch (hdev->dev_type) {
3725 case HCI_BREDR:
3726 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3727 break;
3728 case HCI_AMP:
3729 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3730 break;
3731 default:
3732 return -EINVAL;
1da177e4 3733 }
8e87d142 3734
3df92b31
SL
3735 if (id < 0)
3736 return id;
3737
1da177e4
LT
3738 sprintf(hdev->name, "hci%d", id);
3739 hdev->id = id;
2d8b3a11
AE
3740
3741 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3742
d8537548
KC
3743 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3744 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3745 if (!hdev->workqueue) {
3746 error = -ENOMEM;
3747 goto err;
3748 }
f48fd9c8 3749
d8537548
KC
3750 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3751 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3752 if (!hdev->req_workqueue) {
3753 destroy_workqueue(hdev->workqueue);
3754 error = -ENOMEM;
3755 goto err;
3756 }
3757
0153e2ec
MH
3758 if (!IS_ERR_OR_NULL(bt_debugfs))
3759 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3760
bdc3e0f1
MH
3761 dev_set_name(&hdev->dev, "%s", hdev->name);
3762
99780a7b
JH
3763 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3764 CRYPTO_ALG_ASYNC);
3765 if (IS_ERR(hdev->tfm_aes)) {
3766 BT_ERR("Unable to create crypto context");
3767 error = PTR_ERR(hdev->tfm_aes);
3768 hdev->tfm_aes = NULL;
3769 goto err_wqueue;
3770 }
3771
bdc3e0f1 3772 error = device_add(&hdev->dev);
33ca954d 3773 if (error < 0)
99780a7b 3774 goto err_tfm;
1da177e4 3775
611b30f7 3776 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3777 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3778 hdev);
611b30f7
MH
3779 if (hdev->rfkill) {
3780 if (rfkill_register(hdev->rfkill) < 0) {
3781 rfkill_destroy(hdev->rfkill);
3782 hdev->rfkill = NULL;
3783 }
3784 }
3785
5e130367
JH
3786 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3787 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3788
a8b2d5c2 3789 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3790 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3791
01cd3404 3792 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3793 /* Assume BR/EDR support until proven otherwise (such as
3794 * through reading supported features during init.
3795 */
3796 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3797 }
ce2be9ac 3798
fcee3377
GP
3799 write_lock(&hci_dev_list_lock);
3800 list_add(&hdev->list, &hci_dev_list);
3801 write_unlock(&hci_dev_list_lock);
3802
1da177e4 3803 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3804 hci_dev_hold(hdev);
1da177e4 3805
19202573 3806 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3807
1da177e4 3808 return id;
f48fd9c8 3809
99780a7b
JH
3810err_tfm:
3811 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3812err_wqueue:
3813 destroy_workqueue(hdev->workqueue);
6ead1bbc 3814 destroy_workqueue(hdev->req_workqueue);
33ca954d 3815err:
3df92b31 3816 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3817
33ca954d 3818 return error;
1da177e4
LT
3819}
3820EXPORT_SYMBOL(hci_register_dev);
3821
3822/* Unregister HCI device */
59735631 3823void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3824{
3df92b31 3825 int i, id;
ef222013 3826
c13854ce 3827 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3828
94324962
JH
3829 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3830
3df92b31
SL
3831 id = hdev->id;
3832
f20d09d5 3833 write_lock(&hci_dev_list_lock);
1da177e4 3834 list_del(&hdev->list);
f20d09d5 3835 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3836
3837 hci_dev_do_close(hdev);
3838
cd4c5391 3839 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3840 kfree_skb(hdev->reassembly[i]);
3841
b9b5ef18
GP
3842 cancel_work_sync(&hdev->power_on);
3843
ab81cbf9 3844 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3845 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3846 hci_dev_lock(hdev);
744cf19e 3847 mgmt_index_removed(hdev);
09fd0de5 3848 hci_dev_unlock(hdev);
56e5cb86 3849 }
ab81cbf9 3850
2e58ef3e
JH
3851 /* mgmt_index_removed should take care of emptying the
3852 * pending list */
3853 BUG_ON(!list_empty(&hdev->mgmt_pending));
3854
1da177e4
LT
3855 hci_notify(hdev, HCI_DEV_UNREG);
3856
611b30f7
MH
3857 if (hdev->rfkill) {
3858 rfkill_unregister(hdev->rfkill);
3859 rfkill_destroy(hdev->rfkill);
3860 }
3861
99780a7b
JH
3862 if (hdev->tfm_aes)
3863 crypto_free_blkcipher(hdev->tfm_aes);
3864
bdc3e0f1 3865 device_del(&hdev->dev);
147e2d59 3866
0153e2ec
MH
3867 debugfs_remove_recursive(hdev->debugfs);
3868
f48fd9c8 3869 destroy_workqueue(hdev->workqueue);
6ead1bbc 3870 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3871
09fd0de5 3872 hci_dev_lock(hdev);
e2e0cacb 3873 hci_blacklist_clear(hdev);
2aeb9a1a 3874 hci_uuids_clear(hdev);
55ed8ca1 3875 hci_link_keys_clear(hdev);
b899efaf 3876 hci_smp_ltks_clear(hdev);
970c4e46 3877 hci_smp_irks_clear(hdev);
2763eda6 3878 hci_remote_oob_data_clear(hdev);
15819a70 3879 hci_conn_params_clear(hdev);
77a77a30 3880 hci_pend_le_conns_clear(hdev);
09fd0de5 3881 hci_dev_unlock(hdev);
e2e0cacb 3882
dc946bd8 3883 hci_dev_put(hdev);
3df92b31
SL
3884
3885 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3886}
3887EXPORT_SYMBOL(hci_unregister_dev);
3888
3889/* Suspend HCI device */
3890int hci_suspend_dev(struct hci_dev *hdev)
3891{
3892 hci_notify(hdev, HCI_DEV_SUSPEND);
3893 return 0;
3894}
3895EXPORT_SYMBOL(hci_suspend_dev);
3896
3897/* Resume HCI device */
3898int hci_resume_dev(struct hci_dev *hdev)
3899{
3900 hci_notify(hdev, HCI_DEV_RESUME);
3901 return 0;
3902}
3903EXPORT_SYMBOL(hci_resume_dev);
3904
76bca880 3905/* Receive frame from HCI drivers */
e1a26170 3906int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3907{
76bca880 3908 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3909 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3910 kfree_skb(skb);
3911 return -ENXIO;
3912 }
3913
d82603c6 3914 /* Incoming skb */
76bca880
MH
3915 bt_cb(skb)->incoming = 1;
3916
3917 /* Time stamp */
3918 __net_timestamp(skb);
3919
76bca880 3920 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3921 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3922
76bca880
MH
3923 return 0;
3924}
3925EXPORT_SYMBOL(hci_recv_frame);
3926
33e882a5 3927static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3928 int count, __u8 index)
33e882a5
SS
3929{
3930 int len = 0;
3931 int hlen = 0;
3932 int remain = count;
3933 struct sk_buff *skb;
3934 struct bt_skb_cb *scb;
3935
3936 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3937 index >= NUM_REASSEMBLY)
33e882a5
SS
3938 return -EILSEQ;
3939
3940 skb = hdev->reassembly[index];
3941
3942 if (!skb) {
3943 switch (type) {
3944 case HCI_ACLDATA_PKT:
3945 len = HCI_MAX_FRAME_SIZE;
3946 hlen = HCI_ACL_HDR_SIZE;
3947 break;
3948 case HCI_EVENT_PKT:
3949 len = HCI_MAX_EVENT_SIZE;
3950 hlen = HCI_EVENT_HDR_SIZE;
3951 break;
3952 case HCI_SCODATA_PKT:
3953 len = HCI_MAX_SCO_SIZE;
3954 hlen = HCI_SCO_HDR_SIZE;
3955 break;
3956 }
3957
1e429f38 3958 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3959 if (!skb)
3960 return -ENOMEM;
3961
3962 scb = (void *) skb->cb;
3963 scb->expect = hlen;
3964 scb->pkt_type = type;
3965
33e882a5
SS
3966 hdev->reassembly[index] = skb;
3967 }
3968
3969 while (count) {
3970 scb = (void *) skb->cb;
89bb46d0 3971 len = min_t(uint, scb->expect, count);
33e882a5
SS
3972
3973 memcpy(skb_put(skb, len), data, len);
3974
3975 count -= len;
3976 data += len;
3977 scb->expect -= len;
3978 remain = count;
3979
3980 switch (type) {
3981 case HCI_EVENT_PKT:
3982 if (skb->len == HCI_EVENT_HDR_SIZE) {
3983 struct hci_event_hdr *h = hci_event_hdr(skb);
3984 scb->expect = h->plen;
3985
3986 if (skb_tailroom(skb) < scb->expect) {
3987 kfree_skb(skb);
3988 hdev->reassembly[index] = NULL;
3989 return -ENOMEM;
3990 }
3991 }
3992 break;
3993
3994 case HCI_ACLDATA_PKT:
3995 if (skb->len == HCI_ACL_HDR_SIZE) {
3996 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3997 scb->expect = __le16_to_cpu(h->dlen);
3998
3999 if (skb_tailroom(skb) < scb->expect) {
4000 kfree_skb(skb);
4001 hdev->reassembly[index] = NULL;
4002 return -ENOMEM;
4003 }
4004 }
4005 break;
4006
4007 case HCI_SCODATA_PKT:
4008 if (skb->len == HCI_SCO_HDR_SIZE) {
4009 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4010 scb->expect = h->dlen;
4011
4012 if (skb_tailroom(skb) < scb->expect) {
4013 kfree_skb(skb);
4014 hdev->reassembly[index] = NULL;
4015 return -ENOMEM;
4016 }
4017 }
4018 break;
4019 }
4020
4021 if (scb->expect == 0) {
4022 /* Complete frame */
4023
4024 bt_cb(skb)->pkt_type = type;
e1a26170 4025 hci_recv_frame(hdev, skb);
33e882a5
SS
4026
4027 hdev->reassembly[index] = NULL;
4028 return remain;
4029 }
4030 }
4031
4032 return remain;
4033}
4034
ef222013
MH
4035int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4036{
f39a3c06
SS
4037 int rem = 0;
4038
ef222013
MH
4039 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4040 return -EILSEQ;
4041
da5f6c37 4042 while (count) {
1e429f38 4043 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4044 if (rem < 0)
4045 return rem;
ef222013 4046
f39a3c06
SS
4047 data += (count - rem);
4048 count = rem;
f81c6224 4049 }
ef222013 4050
f39a3c06 4051 return rem;
ef222013
MH
4052}
4053EXPORT_SYMBOL(hci_recv_fragment);
4054
99811510
SS
4055#define STREAM_REASSEMBLY 0
4056
4057int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4058{
4059 int type;
4060 int rem = 0;
4061
da5f6c37 4062 while (count) {
99811510
SS
4063 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4064
4065 if (!skb) {
4066 struct { char type; } *pkt;
4067
4068 /* Start of the frame */
4069 pkt = data;
4070 type = pkt->type;
4071
4072 data++;
4073 count--;
4074 } else
4075 type = bt_cb(skb)->pkt_type;
4076
1e429f38 4077 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4078 STREAM_REASSEMBLY);
99811510
SS
4079 if (rem < 0)
4080 return rem;
4081
4082 data += (count - rem);
4083 count = rem;
f81c6224 4084 }
99811510
SS
4085
4086 return rem;
4087}
4088EXPORT_SYMBOL(hci_recv_stream_fragment);
4089
1da177e4
LT
4090/* ---- Interface to upper protocols ---- */
4091
1da177e4
LT
4092int hci_register_cb(struct hci_cb *cb)
4093{
4094 BT_DBG("%p name %s", cb, cb->name);
4095
f20d09d5 4096 write_lock(&hci_cb_list_lock);
1da177e4 4097 list_add(&cb->list, &hci_cb_list);
f20d09d5 4098 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4099
4100 return 0;
4101}
4102EXPORT_SYMBOL(hci_register_cb);
4103
4104int hci_unregister_cb(struct hci_cb *cb)
4105{
4106 BT_DBG("%p name %s", cb, cb->name);
4107
f20d09d5 4108 write_lock(&hci_cb_list_lock);
1da177e4 4109 list_del(&cb->list);
f20d09d5 4110 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4111
4112 return 0;
4113}
4114EXPORT_SYMBOL(hci_unregister_cb);
4115
51086991 4116static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4117{
0d48d939 4118 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4119
cd82e61c
MH
4120 /* Time stamp */
4121 __net_timestamp(skb);
1da177e4 4122
cd82e61c
MH
4123 /* Send copy to monitor */
4124 hci_send_to_monitor(hdev, skb);
4125
4126 if (atomic_read(&hdev->promisc)) {
4127 /* Send copy to the sockets */
470fe1b5 4128 hci_send_to_sock(hdev, skb);
1da177e4
LT
4129 }
4130
4131 /* Get rid of skb owner, prior to sending to the driver. */
4132 skb_orphan(skb);
4133
7bd8f09f 4134 if (hdev->send(hdev, skb) < 0)
51086991 4135 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4136}
4137
3119ae95
JH
4138void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4139{
4140 skb_queue_head_init(&req->cmd_q);
4141 req->hdev = hdev;
5d73e034 4142 req->err = 0;
3119ae95
JH
4143}
4144
4145int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4146{
4147 struct hci_dev *hdev = req->hdev;
4148 struct sk_buff *skb;
4149 unsigned long flags;
4150
4151 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4152
5d73e034
AG
4153 /* If an error occured during request building, remove all HCI
4154 * commands queued on the HCI request queue.
4155 */
4156 if (req->err) {
4157 skb_queue_purge(&req->cmd_q);
4158 return req->err;
4159 }
4160
3119ae95
JH
4161 /* Do not allow empty requests */
4162 if (skb_queue_empty(&req->cmd_q))
382b0c39 4163 return -ENODATA;
3119ae95
JH
4164
4165 skb = skb_peek_tail(&req->cmd_q);
4166 bt_cb(skb)->req.complete = complete;
4167
4168 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4169 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4170 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4171
4172 queue_work(hdev->workqueue, &hdev->cmd_work);
4173
4174 return 0;
4175}
4176
1ca3a9d0 4177static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4178 u32 plen, const void *param)
1da177e4
LT
4179{
4180 int len = HCI_COMMAND_HDR_SIZE + plen;
4181 struct hci_command_hdr *hdr;
4182 struct sk_buff *skb;
4183
1da177e4 4184 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4185 if (!skb)
4186 return NULL;
1da177e4
LT
4187
4188 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4189 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4190 hdr->plen = plen;
4191
4192 if (plen)
4193 memcpy(skb_put(skb, plen), param, plen);
4194
4195 BT_DBG("skb len %d", skb->len);
4196
0d48d939 4197 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4198
1ca3a9d0
JH
4199 return skb;
4200}
4201
4202/* Send HCI command */
07dc93dd
JH
4203int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4204 const void *param)
1ca3a9d0
JH
4205{
4206 struct sk_buff *skb;
4207
4208 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4209
4210 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4211 if (!skb) {
4212 BT_ERR("%s no memory for command", hdev->name);
4213 return -ENOMEM;
4214 }
4215
11714b3d
JH
4216 /* Stand-alone HCI commands must be flaged as
4217 * single-command requests.
4218 */
4219 bt_cb(skb)->req.start = true;
4220
1da177e4 4221 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4222 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4223
4224 return 0;
4225}
1da177e4 4226
71c76a17 4227/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4228void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4229 const void *param, u8 event)
71c76a17
JH
4230{
4231 struct hci_dev *hdev = req->hdev;
4232 struct sk_buff *skb;
4233
4234 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4235
34739c1e
AG
4236 /* If an error occured during request building, there is no point in
4237 * queueing the HCI command. We can simply return.
4238 */
4239 if (req->err)
4240 return;
4241
71c76a17
JH
4242 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4243 if (!skb) {
5d73e034
AG
4244 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4245 hdev->name, opcode);
4246 req->err = -ENOMEM;
e348fe6b 4247 return;
71c76a17
JH
4248 }
4249
4250 if (skb_queue_empty(&req->cmd_q))
4251 bt_cb(skb)->req.start = true;
4252
02350a72
JH
4253 bt_cb(skb)->req.event = event;
4254
71c76a17 4255 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4256}
4257
07dc93dd
JH
4258void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4259 const void *param)
02350a72
JH
4260{
4261 hci_req_add_ev(req, opcode, plen, param, 0);
4262}
4263
1da177e4 4264/* Get data from the previously sent command */
a9de9248 4265void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4266{
4267 struct hci_command_hdr *hdr;
4268
4269 if (!hdev->sent_cmd)
4270 return NULL;
4271
4272 hdr = (void *) hdev->sent_cmd->data;
4273
a9de9248 4274 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4275 return NULL;
4276
f0e09510 4277 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4278
4279 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4280}
4281
4282/* Send ACL data */
4283static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4284{
4285 struct hci_acl_hdr *hdr;
4286 int len = skb->len;
4287
badff6d0
ACM
4288 skb_push(skb, HCI_ACL_HDR_SIZE);
4289 skb_reset_transport_header(skb);
9c70220b 4290 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4291 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4292 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4293}
4294
ee22be7e 4295static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4296 struct sk_buff *skb, __u16 flags)
1da177e4 4297{
ee22be7e 4298 struct hci_conn *conn = chan->conn;
1da177e4
LT
4299 struct hci_dev *hdev = conn->hdev;
4300 struct sk_buff *list;
4301
087bfd99
GP
4302 skb->len = skb_headlen(skb);
4303 skb->data_len = 0;
4304
4305 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4306
4307 switch (hdev->dev_type) {
4308 case HCI_BREDR:
4309 hci_add_acl_hdr(skb, conn->handle, flags);
4310 break;
4311 case HCI_AMP:
4312 hci_add_acl_hdr(skb, chan->handle, flags);
4313 break;
4314 default:
4315 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4316 return;
4317 }
087bfd99 4318
70f23020
AE
4319 list = skb_shinfo(skb)->frag_list;
4320 if (!list) {
1da177e4
LT
4321 /* Non fragmented */
4322 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4323
73d80deb 4324 skb_queue_tail(queue, skb);
1da177e4
LT
4325 } else {
4326 /* Fragmented */
4327 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4328
4329 skb_shinfo(skb)->frag_list = NULL;
4330
4331 /* Queue all fragments atomically */
af3e6359 4332 spin_lock(&queue->lock);
1da177e4 4333
73d80deb 4334 __skb_queue_tail(queue, skb);
e702112f
AE
4335
4336 flags &= ~ACL_START;
4337 flags |= ACL_CONT;
1da177e4
LT
4338 do {
4339 skb = list; list = list->next;
8e87d142 4340
0d48d939 4341 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4342 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4343
4344 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4345
73d80deb 4346 __skb_queue_tail(queue, skb);
1da177e4
LT
4347 } while (list);
4348
af3e6359 4349 spin_unlock(&queue->lock);
1da177e4 4350 }
73d80deb
LAD
4351}
4352
4353void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4354{
ee22be7e 4355 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4356
f0e09510 4357 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4358
ee22be7e 4359 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4360
3eff45ea 4361 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4362}
1da177e4
LT
4363
4364/* Send SCO data */
0d861d8b 4365void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4366{
4367 struct hci_dev *hdev = conn->hdev;
4368 struct hci_sco_hdr hdr;
4369
4370 BT_DBG("%s len %d", hdev->name, skb->len);
4371
aca3192c 4372 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4373 hdr.dlen = skb->len;
4374
badff6d0
ACM
4375 skb_push(skb, HCI_SCO_HDR_SIZE);
4376 skb_reset_transport_header(skb);
9c70220b 4377 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4378
0d48d939 4379 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4380
1da177e4 4381 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4382 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4383}
1da177e4
LT
4384
4385/* ---- HCI TX task (outgoing data) ---- */
4386
4387/* HCI Connection scheduler */
6039aa73
GP
4388static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4389 int *quote)
1da177e4
LT
4390{
4391 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4392 struct hci_conn *conn = NULL, *c;
abc5de8f 4393 unsigned int num = 0, min = ~0;
1da177e4 4394
8e87d142 4395 /* We don't have to lock device here. Connections are always
1da177e4 4396 * added and removed with TX task disabled. */
bf4c6325
GP
4397
4398 rcu_read_lock();
4399
4400 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4401 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4402 continue;
769be974
MH
4403
4404 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4405 continue;
4406
1da177e4
LT
4407 num++;
4408
4409 if (c->sent < min) {
4410 min = c->sent;
4411 conn = c;
4412 }
52087a79
LAD
4413
4414 if (hci_conn_num(hdev, type) == num)
4415 break;
1da177e4
LT
4416 }
4417
bf4c6325
GP
4418 rcu_read_unlock();
4419
1da177e4 4420 if (conn) {
6ed58ec5
VT
4421 int cnt, q;
4422
4423 switch (conn->type) {
4424 case ACL_LINK:
4425 cnt = hdev->acl_cnt;
4426 break;
4427 case SCO_LINK:
4428 case ESCO_LINK:
4429 cnt = hdev->sco_cnt;
4430 break;
4431 case LE_LINK:
4432 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4433 break;
4434 default:
4435 cnt = 0;
4436 BT_ERR("Unknown link type");
4437 }
4438
4439 q = cnt / num;
1da177e4
LT
4440 *quote = q ? q : 1;
4441 } else
4442 *quote = 0;
4443
4444 BT_DBG("conn %p quote %d", conn, *quote);
4445 return conn;
4446}
4447
6039aa73 4448static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4449{
4450 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4451 struct hci_conn *c;
1da177e4 4452
bae1f5d9 4453 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4454
bf4c6325
GP
4455 rcu_read_lock();
4456
1da177e4 4457 /* Kill stalled connections */
bf4c6325 4458 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4459 if (c->type == type && c->sent) {
6ed93dc6
AE
4460 BT_ERR("%s killing stalled connection %pMR",
4461 hdev->name, &c->dst);
bed71748 4462 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4463 }
4464 }
bf4c6325
GP
4465
4466 rcu_read_unlock();
1da177e4
LT
4467}
4468
6039aa73
GP
4469static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4470 int *quote)
1da177e4 4471{
73d80deb
LAD
4472 struct hci_conn_hash *h = &hdev->conn_hash;
4473 struct hci_chan *chan = NULL;
abc5de8f 4474 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4475 struct hci_conn *conn;
73d80deb
LAD
4476 int cnt, q, conn_num = 0;
4477
4478 BT_DBG("%s", hdev->name);
4479
bf4c6325
GP
4480 rcu_read_lock();
4481
4482 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4483 struct hci_chan *tmp;
4484
4485 if (conn->type != type)
4486 continue;
4487
4488 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4489 continue;
4490
4491 conn_num++;
4492
8192edef 4493 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4494 struct sk_buff *skb;
4495
4496 if (skb_queue_empty(&tmp->data_q))
4497 continue;
4498
4499 skb = skb_peek(&tmp->data_q);
4500 if (skb->priority < cur_prio)
4501 continue;
4502
4503 if (skb->priority > cur_prio) {
4504 num = 0;
4505 min = ~0;
4506 cur_prio = skb->priority;
4507 }
4508
4509 num++;
4510
4511 if (conn->sent < min) {
4512 min = conn->sent;
4513 chan = tmp;
4514 }
4515 }
4516
4517 if (hci_conn_num(hdev, type) == conn_num)
4518 break;
4519 }
4520
bf4c6325
GP
4521 rcu_read_unlock();
4522
73d80deb
LAD
4523 if (!chan)
4524 return NULL;
4525
4526 switch (chan->conn->type) {
4527 case ACL_LINK:
4528 cnt = hdev->acl_cnt;
4529 break;
bd1eb66b
AE
4530 case AMP_LINK:
4531 cnt = hdev->block_cnt;
4532 break;
73d80deb
LAD
4533 case SCO_LINK:
4534 case ESCO_LINK:
4535 cnt = hdev->sco_cnt;
4536 break;
4537 case LE_LINK:
4538 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4539 break;
4540 default:
4541 cnt = 0;
4542 BT_ERR("Unknown link type");
4543 }
4544
4545 q = cnt / num;
4546 *quote = q ? q : 1;
4547 BT_DBG("chan %p quote %d", chan, *quote);
4548 return chan;
4549}
4550
02b20f0b
LAD
4551static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4552{
4553 struct hci_conn_hash *h = &hdev->conn_hash;
4554 struct hci_conn *conn;
4555 int num = 0;
4556
4557 BT_DBG("%s", hdev->name);
4558
bf4c6325
GP
4559 rcu_read_lock();
4560
4561 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4562 struct hci_chan *chan;
4563
4564 if (conn->type != type)
4565 continue;
4566
4567 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4568 continue;
4569
4570 num++;
4571
8192edef 4572 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4573 struct sk_buff *skb;
4574
4575 if (chan->sent) {
4576 chan->sent = 0;
4577 continue;
4578 }
4579
4580 if (skb_queue_empty(&chan->data_q))
4581 continue;
4582
4583 skb = skb_peek(&chan->data_q);
4584 if (skb->priority >= HCI_PRIO_MAX - 1)
4585 continue;
4586
4587 skb->priority = HCI_PRIO_MAX - 1;
4588
4589 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4590 skb->priority);
02b20f0b
LAD
4591 }
4592
4593 if (hci_conn_num(hdev, type) == num)
4594 break;
4595 }
bf4c6325
GP
4596
4597 rcu_read_unlock();
4598
02b20f0b
LAD
4599}
4600
b71d385a
AE
4601static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4602{
4603 /* Calculate count of blocks used by this packet */
4604 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4605}
4606
6039aa73 4607static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4608{
1da177e4
LT
4609 if (!test_bit(HCI_RAW, &hdev->flags)) {
4610 /* ACL tx timeout must be longer than maximum
4611 * link supervision timeout (40.9 seconds) */
63d2bc1b 4612 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4613 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4614 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4615 }
63d2bc1b 4616}
1da177e4 4617
6039aa73 4618static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4619{
4620 unsigned int cnt = hdev->acl_cnt;
4621 struct hci_chan *chan;
4622 struct sk_buff *skb;
4623 int quote;
4624
4625 __check_timeout(hdev, cnt);
04837f64 4626
73d80deb 4627 while (hdev->acl_cnt &&
a8c5fb1a 4628 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4629 u32 priority = (skb_peek(&chan->data_q))->priority;
4630 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4631 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4632 skb->len, skb->priority);
73d80deb 4633
ec1cce24
LAD
4634 /* Stop if priority has changed */
4635 if (skb->priority < priority)
4636 break;
4637
4638 skb = skb_dequeue(&chan->data_q);
4639
73d80deb 4640 hci_conn_enter_active_mode(chan->conn,
04124681 4641 bt_cb(skb)->force_active);
04837f64 4642
57d17d70 4643 hci_send_frame(hdev, skb);
1da177e4
LT
4644 hdev->acl_last_tx = jiffies;
4645
4646 hdev->acl_cnt--;
73d80deb
LAD
4647 chan->sent++;
4648 chan->conn->sent++;
1da177e4
LT
4649 }
4650 }
02b20f0b
LAD
4651
4652 if (cnt != hdev->acl_cnt)
4653 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4654}
4655
6039aa73 4656static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4657{
63d2bc1b 4658 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4659 struct hci_chan *chan;
4660 struct sk_buff *skb;
4661 int quote;
bd1eb66b 4662 u8 type;
b71d385a 4663
63d2bc1b 4664 __check_timeout(hdev, cnt);
b71d385a 4665
bd1eb66b
AE
4666 BT_DBG("%s", hdev->name);
4667
4668 if (hdev->dev_type == HCI_AMP)
4669 type = AMP_LINK;
4670 else
4671 type = ACL_LINK;
4672
b71d385a 4673 while (hdev->block_cnt > 0 &&
bd1eb66b 4674 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4675 u32 priority = (skb_peek(&chan->data_q))->priority;
4676 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4677 int blocks;
4678
4679 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4680 skb->len, skb->priority);
b71d385a
AE
4681
4682 /* Stop if priority has changed */
4683 if (skb->priority < priority)
4684 break;
4685
4686 skb = skb_dequeue(&chan->data_q);
4687
4688 blocks = __get_blocks(hdev, skb);
4689 if (blocks > hdev->block_cnt)
4690 return;
4691
4692 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4693 bt_cb(skb)->force_active);
b71d385a 4694
57d17d70 4695 hci_send_frame(hdev, skb);
b71d385a
AE
4696 hdev->acl_last_tx = jiffies;
4697
4698 hdev->block_cnt -= blocks;
4699 quote -= blocks;
4700
4701 chan->sent += blocks;
4702 chan->conn->sent += blocks;
4703 }
4704 }
4705
4706 if (cnt != hdev->block_cnt)
bd1eb66b 4707 hci_prio_recalculate(hdev, type);
b71d385a
AE
4708}
4709
6039aa73 4710static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4711{
4712 BT_DBG("%s", hdev->name);
4713
bd1eb66b
AE
4714 /* No ACL link over BR/EDR controller */
4715 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4716 return;
4717
4718 /* No AMP link over AMP controller */
4719 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4720 return;
4721
4722 switch (hdev->flow_ctl_mode) {
4723 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4724 hci_sched_acl_pkt(hdev);
4725 break;
4726
4727 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4728 hci_sched_acl_blk(hdev);
4729 break;
4730 }
4731}
4732
1da177e4 4733/* Schedule SCO */
6039aa73 4734static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4735{
4736 struct hci_conn *conn;
4737 struct sk_buff *skb;
4738 int quote;
4739
4740 BT_DBG("%s", hdev->name);
4741
52087a79
LAD
4742 if (!hci_conn_num(hdev, SCO_LINK))
4743 return;
4744
1da177e4
LT
4745 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4746 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4747 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4748 hci_send_frame(hdev, skb);
1da177e4
LT
4749
4750 conn->sent++;
4751 if (conn->sent == ~0)
4752 conn->sent = 0;
4753 }
4754 }
4755}
4756
6039aa73 4757static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4758{
4759 struct hci_conn *conn;
4760 struct sk_buff *skb;
4761 int quote;
4762
4763 BT_DBG("%s", hdev->name);
4764
52087a79
LAD
4765 if (!hci_conn_num(hdev, ESCO_LINK))
4766 return;
4767
8fc9ced3
GP
4768 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4769 &quote))) {
b6a0dc82
MH
4770 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4771 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4772 hci_send_frame(hdev, skb);
b6a0dc82
MH
4773
4774 conn->sent++;
4775 if (conn->sent == ~0)
4776 conn->sent = 0;
4777 }
4778 }
4779}
4780
6039aa73 4781static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4782{
73d80deb 4783 struct hci_chan *chan;
6ed58ec5 4784 struct sk_buff *skb;
02b20f0b 4785 int quote, cnt, tmp;
6ed58ec5
VT
4786
4787 BT_DBG("%s", hdev->name);
4788
52087a79
LAD
4789 if (!hci_conn_num(hdev, LE_LINK))
4790 return;
4791
6ed58ec5
VT
4792 if (!test_bit(HCI_RAW, &hdev->flags)) {
4793 /* LE tx timeout must be longer than maximum
4794 * link supervision timeout (40.9 seconds) */
bae1f5d9 4795 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4796 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4797 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4798 }
4799
4800 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4801 tmp = cnt;
73d80deb 4802 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4803 u32 priority = (skb_peek(&chan->data_q))->priority;
4804 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4805 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4806 skb->len, skb->priority);
6ed58ec5 4807
ec1cce24
LAD
4808 /* Stop if priority has changed */
4809 if (skb->priority < priority)
4810 break;
4811
4812 skb = skb_dequeue(&chan->data_q);
4813
57d17d70 4814 hci_send_frame(hdev, skb);
6ed58ec5
VT
4815 hdev->le_last_tx = jiffies;
4816
4817 cnt--;
73d80deb
LAD
4818 chan->sent++;
4819 chan->conn->sent++;
6ed58ec5
VT
4820 }
4821 }
73d80deb 4822
6ed58ec5
VT
4823 if (hdev->le_pkts)
4824 hdev->le_cnt = cnt;
4825 else
4826 hdev->acl_cnt = cnt;
02b20f0b
LAD
4827
4828 if (cnt != tmp)
4829 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4830}
4831
3eff45ea 4832static void hci_tx_work(struct work_struct *work)
1da177e4 4833{
3eff45ea 4834 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4835 struct sk_buff *skb;
4836
6ed58ec5 4837 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4838 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4839
52de599e
MH
4840 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4841 /* Schedule queues and send stuff to HCI driver */
4842 hci_sched_acl(hdev);
4843 hci_sched_sco(hdev);
4844 hci_sched_esco(hdev);
4845 hci_sched_le(hdev);
4846 }
6ed58ec5 4847
1da177e4
LT
4848 /* Send next queued raw (unknown type) packet */
4849 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4850 hci_send_frame(hdev, skb);
1da177e4
LT
4851}
4852
25985edc 4853/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4854
4855/* ACL data packet */
6039aa73 4856static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4857{
4858 struct hci_acl_hdr *hdr = (void *) skb->data;
4859 struct hci_conn *conn;
4860 __u16 handle, flags;
4861
4862 skb_pull(skb, HCI_ACL_HDR_SIZE);
4863
4864 handle = __le16_to_cpu(hdr->handle);
4865 flags = hci_flags(handle);
4866 handle = hci_handle(handle);
4867
f0e09510 4868 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4869 handle, flags);
1da177e4
LT
4870
4871 hdev->stat.acl_rx++;
4872
4873 hci_dev_lock(hdev);
4874 conn = hci_conn_hash_lookup_handle(hdev, handle);
4875 hci_dev_unlock(hdev);
8e87d142 4876
1da177e4 4877 if (conn) {
65983fc7 4878 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4879
1da177e4 4880 /* Send to upper protocol */
686ebf28
UF
4881 l2cap_recv_acldata(conn, skb, flags);
4882 return;
1da177e4 4883 } else {
8e87d142 4884 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4885 hdev->name, handle);
1da177e4
LT
4886 }
4887
4888 kfree_skb(skb);
4889}
4890
4891/* SCO data packet */
6039aa73 4892static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4893{
4894 struct hci_sco_hdr *hdr = (void *) skb->data;
4895 struct hci_conn *conn;
4896 __u16 handle;
4897
4898 skb_pull(skb, HCI_SCO_HDR_SIZE);
4899
4900 handle = __le16_to_cpu(hdr->handle);
4901
f0e09510 4902 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4903
4904 hdev->stat.sco_rx++;
4905
4906 hci_dev_lock(hdev);
4907 conn = hci_conn_hash_lookup_handle(hdev, handle);
4908 hci_dev_unlock(hdev);
4909
4910 if (conn) {
1da177e4 4911 /* Send to upper protocol */
686ebf28
UF
4912 sco_recv_scodata(conn, skb);
4913 return;
1da177e4 4914 } else {
8e87d142 4915 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4916 hdev->name, handle);
1da177e4
LT
4917 }
4918
4919 kfree_skb(skb);
4920}
4921
9238f36a
JH
4922static bool hci_req_is_complete(struct hci_dev *hdev)
4923{
4924 struct sk_buff *skb;
4925
4926 skb = skb_peek(&hdev->cmd_q);
4927 if (!skb)
4928 return true;
4929
4930 return bt_cb(skb)->req.start;
4931}
4932
42c6b129
JH
4933static void hci_resend_last(struct hci_dev *hdev)
4934{
4935 struct hci_command_hdr *sent;
4936 struct sk_buff *skb;
4937 u16 opcode;
4938
4939 if (!hdev->sent_cmd)
4940 return;
4941
4942 sent = (void *) hdev->sent_cmd->data;
4943 opcode = __le16_to_cpu(sent->opcode);
4944 if (opcode == HCI_OP_RESET)
4945 return;
4946
4947 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4948 if (!skb)
4949 return;
4950
4951 skb_queue_head(&hdev->cmd_q, skb);
4952 queue_work(hdev->workqueue, &hdev->cmd_work);
4953}
4954
9238f36a
JH
4955void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4956{
4957 hci_req_complete_t req_complete = NULL;
4958 struct sk_buff *skb;
4959 unsigned long flags;
4960
4961 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4962
42c6b129
JH
4963 /* If the completed command doesn't match the last one that was
4964 * sent we need to do special handling of it.
9238f36a 4965 */
42c6b129
JH
4966 if (!hci_sent_cmd_data(hdev, opcode)) {
4967 /* Some CSR based controllers generate a spontaneous
4968 * reset complete event during init and any pending
4969 * command will never be completed. In such a case we
4970 * need to resend whatever was the last sent
4971 * command.
4972 */
4973 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4974 hci_resend_last(hdev);
4975
9238f36a 4976 return;
42c6b129 4977 }
9238f36a
JH
4978
4979 /* If the command succeeded and there's still more commands in
4980 * this request the request is not yet complete.
4981 */
4982 if (!status && !hci_req_is_complete(hdev))
4983 return;
4984
4985 /* If this was the last command in a request the complete
4986 * callback would be found in hdev->sent_cmd instead of the
4987 * command queue (hdev->cmd_q).
4988 */
4989 if (hdev->sent_cmd) {
4990 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4991
4992 if (req_complete) {
4993 /* We must set the complete callback to NULL to
4994 * avoid calling the callback more than once if
4995 * this function gets called again.
4996 */
4997 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4998
9238f36a 4999 goto call_complete;
53e21fbc 5000 }
9238f36a
JH
5001 }
5002
5003 /* Remove all pending commands belonging to this request */
5004 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5005 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5006 if (bt_cb(skb)->req.start) {
5007 __skb_queue_head(&hdev->cmd_q, skb);
5008 break;
5009 }
5010
5011 req_complete = bt_cb(skb)->req.complete;
5012 kfree_skb(skb);
5013 }
5014 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5015
5016call_complete:
5017 if (req_complete)
5018 req_complete(hdev, status);
5019}
5020
b78752cc 5021static void hci_rx_work(struct work_struct *work)
1da177e4 5022{
b78752cc 5023 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5024 struct sk_buff *skb;
5025
5026 BT_DBG("%s", hdev->name);
5027
1da177e4 5028 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5029 /* Send copy to monitor */
5030 hci_send_to_monitor(hdev, skb);
5031
1da177e4
LT
5032 if (atomic_read(&hdev->promisc)) {
5033 /* Send copy to the sockets */
470fe1b5 5034 hci_send_to_sock(hdev, skb);
1da177e4
LT
5035 }
5036
0736cfa8
MH
5037 if (test_bit(HCI_RAW, &hdev->flags) ||
5038 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5039 kfree_skb(skb);
5040 continue;
5041 }
5042
5043 if (test_bit(HCI_INIT, &hdev->flags)) {
5044 /* Don't process data packets in this states. */
0d48d939 5045 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5046 case HCI_ACLDATA_PKT:
5047 case HCI_SCODATA_PKT:
5048 kfree_skb(skb);
5049 continue;
3ff50b79 5050 }
1da177e4
LT
5051 }
5052
5053 /* Process frame */
0d48d939 5054 switch (bt_cb(skb)->pkt_type) {
1da177e4 5055 case HCI_EVENT_PKT:
b78752cc 5056 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5057 hci_event_packet(hdev, skb);
5058 break;
5059
5060 case HCI_ACLDATA_PKT:
5061 BT_DBG("%s ACL data packet", hdev->name);
5062 hci_acldata_packet(hdev, skb);
5063 break;
5064
5065 case HCI_SCODATA_PKT:
5066 BT_DBG("%s SCO data packet", hdev->name);
5067 hci_scodata_packet(hdev, skb);
5068 break;
5069
5070 default:
5071 kfree_skb(skb);
5072 break;
5073 }
5074 }
1da177e4
LT
5075}
5076
c347b765 5077static void hci_cmd_work(struct work_struct *work)
1da177e4 5078{
c347b765 5079 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5080 struct sk_buff *skb;
5081
2104786b
AE
5082 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5083 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5084
1da177e4 5085 /* Send queued commands */
5a08ecce
AE
5086 if (atomic_read(&hdev->cmd_cnt)) {
5087 skb = skb_dequeue(&hdev->cmd_q);
5088 if (!skb)
5089 return;
5090
7585b97a 5091 kfree_skb(hdev->sent_cmd);
1da177e4 5092
a675d7f1 5093 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5094 if (hdev->sent_cmd) {
1da177e4 5095 atomic_dec(&hdev->cmd_cnt);
57d17d70 5096 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
5097 if (test_bit(HCI_RESET, &hdev->flags))
5098 del_timer(&hdev->cmd_timer);
5099 else
5100 mod_timer(&hdev->cmd_timer,
5f246e89 5101 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
5102 } else {
5103 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5104 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5105 }
5106 }
5107}
b1efcc28
AG
5108
5109void hci_req_add_le_scan_disable(struct hci_request *req)
5110{
5111 struct hci_cp_le_set_scan_enable cp;
5112
5113 memset(&cp, 0, sizeof(cp));
5114 cp.enable = LE_SCAN_DISABLE;
5115 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5116}
a4790dbd 5117
8ef30fd3
AG
5118void hci_req_add_le_passive_scan(struct hci_request *req)
5119{
5120 struct hci_cp_le_set_scan_param param_cp;
5121 struct hci_cp_le_set_scan_enable enable_cp;
5122 struct hci_dev *hdev = req->hdev;
5123 u8 own_addr_type;
5124
5125 /* Set require_privacy to true to avoid identification from
5126 * unknown peer devices. Since this is passive scanning, no
5127 * SCAN_REQ using the local identity should be sent. Mandating
5128 * privacy is just an extra precaution.
5129 */
5130 if (hci_update_random_address(req, true, &own_addr_type))
5131 return;
5132
5133 memset(&param_cp, 0, sizeof(param_cp));
5134 param_cp.type = LE_SCAN_PASSIVE;
5135 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5136 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5137 param_cp.own_address_type = own_addr_type;
5138 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5139 &param_cp);
5140
5141 memset(&enable_cp, 0, sizeof(enable_cp));
5142 enable_cp.enable = LE_SCAN_ENABLE;
5143 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5144 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5145 &enable_cp);
5146}
5147
a4790dbd
AG
5148static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5149{
5150 if (status)
5151 BT_DBG("HCI request failed to update background scanning: "
5152 "status 0x%2.2x", status);
5153}
5154
5155/* This function controls the background scanning based on hdev->pend_le_conns
5156 * list. If there are pending LE connection we start the background scanning,
5157 * otherwise we stop it.
5158 *
5159 * This function requires the caller holds hdev->lock.
5160 */
5161void hci_update_background_scan(struct hci_dev *hdev)
5162{
a4790dbd
AG
5163 struct hci_request req;
5164 struct hci_conn *conn;
5165 int err;
5166
5167 hci_req_init(&req, hdev);
5168
5169 if (list_empty(&hdev->pend_le_conns)) {
5170 /* If there is no pending LE connections, we should stop
5171 * the background scanning.
5172 */
5173
5174 /* If controller is not scanning we are done. */
5175 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5176 return;
5177
5178 hci_req_add_le_scan_disable(&req);
5179
5180 BT_DBG("%s stopping background scanning", hdev->name);
5181 } else {
a4790dbd
AG
5182 /* If there is at least one pending LE connection, we should
5183 * keep the background scan running.
5184 */
5185
5186 /* If controller is already scanning we are done. */
5187 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5188 return;
5189
5190 /* If controller is connecting, we should not start scanning
5191 * since some controllers are not able to scan and connect at
5192 * the same time.
5193 */
5194 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5195 if (conn)
5196 return;
5197
8ef30fd3 5198 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5199
5200 BT_DBG("%s starting background scanning", hdev->name);
5201 }
5202
5203 err = hci_req_run(&req, update_background_scan_complete);
5204 if (err)
5205 BT_ERR("Failed to run HCI request: err %d", err);
5206}