]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Add definitions for LE white list HCI commands
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
c982b2ea
JH
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
2bfa3531
MH
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
2be48b65 534 hdev->sniff_min_interval = val;
2bfa3531
MH
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
2be48b65 562 hdev->sniff_max_interval = val;
2bfa3531
MH
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
ac345813
MH
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
a1f4c318 585 bdaddr_t addr;
ac345813
MH
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
a1f4c318 590 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 591
a1f4c318 592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 593 16, hdev->irk, &hdev->rpa);
ac345813
MH
594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
7a4cd51d
MH
612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
e7b8fc92
MH
635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
b32bba6c
MH
658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
92202185 661{
b32bba6c
MH
662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
92202185 664
b32bba6c
MH
665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
669}
670
b32bba6c
MH
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
92202185 674{
b32bba6c
MH
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
92202185 679
b32bba6c
MH
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
92202185 682
b32bba6c
MH
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
688 return -EINVAL;
689
690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
692
693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
92202185
MH
696}
697
b32bba6c
MH
698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
92202185 704
3698d704
MH
705static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct list_head *p, *n;
709
710 hci_dev_lock(hdev);
711 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
712 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
713 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
714 &irk->bdaddr, irk->addr_type,
715 16, irk->val, &irk->rpa);
716 }
717 hci_dev_unlock(hdev);
718
719 return 0;
720}
721
722static int identity_resolving_keys_open(struct inode *inode, struct file *file)
723{
724 return single_open(file, identity_resolving_keys_show,
725 inode->i_private);
726}
727
728static const struct file_operations identity_resolving_keys_fops = {
729 .open = identity_resolving_keys_open,
730 .read = seq_read,
731 .llseek = seq_lseek,
732 .release = single_release,
733};
734
8f8625cd
MH
735static int long_term_keys_show(struct seq_file *f, void *ptr)
736{
737 struct hci_dev *hdev = f->private;
738 struct list_head *p, *n;
739
740 hci_dev_lock(hdev);
f813f1be 741 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 742 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 743 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
744 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
745 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
746 8, ltk->rand, 16, ltk->val);
747 }
748 hci_dev_unlock(hdev);
749
750 return 0;
751}
752
753static int long_term_keys_open(struct inode *inode, struct file *file)
754{
755 return single_open(file, long_term_keys_show, inode->i_private);
756}
757
758static const struct file_operations long_term_keys_fops = {
759 .open = long_term_keys_open,
760 .read = seq_read,
761 .llseek = seq_lseek,
762 .release = single_release,
763};
764
4e70c7e7
MH
765static int conn_min_interval_set(void *data, u64 val)
766{
767 struct hci_dev *hdev = data;
768
769 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
770 return -EINVAL;
771
772 hci_dev_lock(hdev);
2be48b65 773 hdev->le_conn_min_interval = val;
4e70c7e7
MH
774 hci_dev_unlock(hdev);
775
776 return 0;
777}
778
779static int conn_min_interval_get(void *data, u64 *val)
780{
781 struct hci_dev *hdev = data;
782
783 hci_dev_lock(hdev);
784 *val = hdev->le_conn_min_interval;
785 hci_dev_unlock(hdev);
786
787 return 0;
788}
789
790DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
791 conn_min_interval_set, "%llu\n");
792
793static int conn_max_interval_set(void *data, u64 val)
794{
795 struct hci_dev *hdev = data;
796
797 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
798 return -EINVAL;
799
800 hci_dev_lock(hdev);
2be48b65 801 hdev->le_conn_max_interval = val;
4e70c7e7
MH
802 hci_dev_unlock(hdev);
803
804 return 0;
805}
806
807static int conn_max_interval_get(void *data, u64 *val)
808{
809 struct hci_dev *hdev = data;
810
811 hci_dev_lock(hdev);
812 *val = hdev->le_conn_max_interval;
813 hci_dev_unlock(hdev);
814
815 return 0;
816}
817
818DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
819 conn_max_interval_set, "%llu\n");
820
3f959d46
MH
821static int adv_channel_map_set(void *data, u64 val)
822{
823 struct hci_dev *hdev = data;
824
825 if (val < 0x01 || val > 0x07)
826 return -EINVAL;
827
828 hci_dev_lock(hdev);
829 hdev->le_adv_channel_map = val;
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int adv_channel_map_get(void *data, u64 *val)
836{
837 struct hci_dev *hdev = data;
838
839 hci_dev_lock(hdev);
840 *val = hdev->le_adv_channel_map;
841 hci_dev_unlock(hdev);
842
843 return 0;
844}
845
846DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
847 adv_channel_map_set, "%llu\n");
848
89863109
JR
849static ssize_t lowpan_read(struct file *file, char __user *user_buf,
850 size_t count, loff_t *ppos)
851{
852 struct hci_dev *hdev = file->private_data;
853 char buf[3];
854
855 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
856 buf[1] = '\n';
857 buf[2] = '\0';
858 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
859}
860
861static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
862 size_t count, loff_t *position)
863{
864 struct hci_dev *hdev = fp->private_data;
865 bool enable;
866 char buf[32];
867 size_t buf_size = min(count, (sizeof(buf)-1));
868
869 if (copy_from_user(buf, user_buffer, buf_size))
870 return -EFAULT;
871
872 buf[buf_size] = '\0';
873
874 if (strtobool(buf, &enable) < 0)
875 return -EINVAL;
876
877 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
878 return -EALREADY;
879
880 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
881
882 return count;
883}
884
885static const struct file_operations lowpan_debugfs_fops = {
886 .open = simple_open,
887 .read = lowpan_read,
888 .write = lowpan_write,
889 .llseek = default_llseek,
890};
891
7d474e06
AG
892static int le_auto_conn_show(struct seq_file *sf, void *ptr)
893{
894 struct hci_dev *hdev = sf->private;
895 struct hci_conn_params *p;
896
897 hci_dev_lock(hdev);
898
899 list_for_each_entry(p, &hdev->le_conn_params, list) {
900 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
901 p->auto_connect);
902 }
903
904 hci_dev_unlock(hdev);
905
906 return 0;
907}
908
909static int le_auto_conn_open(struct inode *inode, struct file *file)
910{
911 return single_open(file, le_auto_conn_show, inode->i_private);
912}
913
914static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
915 size_t count, loff_t *offset)
916{
917 struct seq_file *sf = file->private_data;
918 struct hci_dev *hdev = sf->private;
919 u8 auto_connect = 0;
920 bdaddr_t addr;
921 u8 addr_type;
922 char *buf;
923 int err = 0;
924 int n;
925
926 /* Don't allow partial write */
927 if (*offset != 0)
928 return -EINVAL;
929
930 if (count < 3)
931 return -EINVAL;
932
933 buf = kzalloc(count, GFP_KERNEL);
934 if (!buf)
935 return -ENOMEM;
936
937 if (copy_from_user(buf, data, count)) {
938 err = -EFAULT;
939 goto done;
940 }
941
942 if (memcmp(buf, "add", 3) == 0) {
943 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
944 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
945 &addr.b[1], &addr.b[0], &addr_type,
946 &auto_connect);
947
948 if (n < 7) {
949 err = -EINVAL;
950 goto done;
951 }
952
953 hci_dev_lock(hdev);
954 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
955 hdev->le_conn_min_interval,
956 hdev->le_conn_max_interval);
957 hci_dev_unlock(hdev);
958
959 if (err)
960 goto done;
961 } else if (memcmp(buf, "del", 3) == 0) {
962 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
963 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
964 &addr.b[1], &addr.b[0], &addr_type);
965
966 if (n < 7) {
967 err = -EINVAL;
968 goto done;
969 }
970
971 hci_dev_lock(hdev);
972 hci_conn_params_del(hdev, &addr, addr_type);
973 hci_dev_unlock(hdev);
974 } else if (memcmp(buf, "clr", 3) == 0) {
975 hci_dev_lock(hdev);
976 hci_conn_params_clear(hdev);
977 hci_pend_le_conns_clear(hdev);
978 hci_update_background_scan(hdev);
979 hci_dev_unlock(hdev);
980 } else {
981 err = -EINVAL;
982 }
983
984done:
985 kfree(buf);
986
987 if (err)
988 return err;
989 else
990 return count;
991}
992
993static const struct file_operations le_auto_conn_fops = {
994 .open = le_auto_conn_open,
995 .read = seq_read,
996 .write = le_auto_conn_write,
997 .llseek = seq_lseek,
998 .release = single_release,
999};
1000
1da177e4
LT
1001/* ---- HCI requests ---- */
1002
42c6b129 1003static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1004{
42c6b129 1005 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1006
1007 if (hdev->req_status == HCI_REQ_PEND) {
1008 hdev->req_result = result;
1009 hdev->req_status = HCI_REQ_DONE;
1010 wake_up_interruptible(&hdev->req_wait_q);
1011 }
1012}
1013
1014static void hci_req_cancel(struct hci_dev *hdev, int err)
1015{
1016 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1017
1018 if (hdev->req_status == HCI_REQ_PEND) {
1019 hdev->req_result = err;
1020 hdev->req_status = HCI_REQ_CANCELED;
1021 wake_up_interruptible(&hdev->req_wait_q);
1022 }
1023}
1024
77a63e0a
FW
1025static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1026 u8 event)
75e84b7c
JH
1027{
1028 struct hci_ev_cmd_complete *ev;
1029 struct hci_event_hdr *hdr;
1030 struct sk_buff *skb;
1031
1032 hci_dev_lock(hdev);
1033
1034 skb = hdev->recv_evt;
1035 hdev->recv_evt = NULL;
1036
1037 hci_dev_unlock(hdev);
1038
1039 if (!skb)
1040 return ERR_PTR(-ENODATA);
1041
1042 if (skb->len < sizeof(*hdr)) {
1043 BT_ERR("Too short HCI event");
1044 goto failed;
1045 }
1046
1047 hdr = (void *) skb->data;
1048 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1049
7b1abbbe
JH
1050 if (event) {
1051 if (hdr->evt != event)
1052 goto failed;
1053 return skb;
1054 }
1055
75e84b7c
JH
1056 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1057 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1058 goto failed;
1059 }
1060
1061 if (skb->len < sizeof(*ev)) {
1062 BT_ERR("Too short cmd_complete event");
1063 goto failed;
1064 }
1065
1066 ev = (void *) skb->data;
1067 skb_pull(skb, sizeof(*ev));
1068
1069 if (opcode == __le16_to_cpu(ev->opcode))
1070 return skb;
1071
1072 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1073 __le16_to_cpu(ev->opcode));
1074
1075failed:
1076 kfree_skb(skb);
1077 return ERR_PTR(-ENODATA);
1078}
1079
7b1abbbe 1080struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1081 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1082{
1083 DECLARE_WAITQUEUE(wait, current);
1084 struct hci_request req;
1085 int err = 0;
1086
1087 BT_DBG("%s", hdev->name);
1088
1089 hci_req_init(&req, hdev);
1090
7b1abbbe 1091 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1092
1093 hdev->req_status = HCI_REQ_PEND;
1094
1095 err = hci_req_run(&req, hci_req_sync_complete);
1096 if (err < 0)
1097 return ERR_PTR(err);
1098
1099 add_wait_queue(&hdev->req_wait_q, &wait);
1100 set_current_state(TASK_INTERRUPTIBLE);
1101
1102 schedule_timeout(timeout);
1103
1104 remove_wait_queue(&hdev->req_wait_q, &wait);
1105
1106 if (signal_pending(current))
1107 return ERR_PTR(-EINTR);
1108
1109 switch (hdev->req_status) {
1110 case HCI_REQ_DONE:
1111 err = -bt_to_errno(hdev->req_result);
1112 break;
1113
1114 case HCI_REQ_CANCELED:
1115 err = -hdev->req_result;
1116 break;
1117
1118 default:
1119 err = -ETIMEDOUT;
1120 break;
1121 }
1122
1123 hdev->req_status = hdev->req_result = 0;
1124
1125 BT_DBG("%s end: err %d", hdev->name, err);
1126
1127 if (err < 0)
1128 return ERR_PTR(err);
1129
7b1abbbe
JH
1130 return hci_get_cmd_complete(hdev, opcode, event);
1131}
1132EXPORT_SYMBOL(__hci_cmd_sync_ev);
1133
1134struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1135 const void *param, u32 timeout)
7b1abbbe
JH
1136{
1137 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1138}
1139EXPORT_SYMBOL(__hci_cmd_sync);
1140
1da177e4 1141/* Execute request and wait for completion. */
01178cd4 1142static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1143 void (*func)(struct hci_request *req,
1144 unsigned long opt),
01178cd4 1145 unsigned long opt, __u32 timeout)
1da177e4 1146{
42c6b129 1147 struct hci_request req;
1da177e4
LT
1148 DECLARE_WAITQUEUE(wait, current);
1149 int err = 0;
1150
1151 BT_DBG("%s start", hdev->name);
1152
42c6b129
JH
1153 hci_req_init(&req, hdev);
1154
1da177e4
LT
1155 hdev->req_status = HCI_REQ_PEND;
1156
42c6b129 1157 func(&req, opt);
53cce22d 1158
42c6b129
JH
1159 err = hci_req_run(&req, hci_req_sync_complete);
1160 if (err < 0) {
53cce22d 1161 hdev->req_status = 0;
920c8300
AG
1162
1163 /* ENODATA means the HCI request command queue is empty.
1164 * This can happen when a request with conditionals doesn't
1165 * trigger any commands to be sent. This is normal behavior
1166 * and should not trigger an error return.
42c6b129 1167 */
920c8300
AG
1168 if (err == -ENODATA)
1169 return 0;
1170
1171 return err;
53cce22d
JH
1172 }
1173
bc4445c7
AG
1174 add_wait_queue(&hdev->req_wait_q, &wait);
1175 set_current_state(TASK_INTERRUPTIBLE);
1176
1da177e4
LT
1177 schedule_timeout(timeout);
1178
1179 remove_wait_queue(&hdev->req_wait_q, &wait);
1180
1181 if (signal_pending(current))
1182 return -EINTR;
1183
1184 switch (hdev->req_status) {
1185 case HCI_REQ_DONE:
e175072f 1186 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1187 break;
1188
1189 case HCI_REQ_CANCELED:
1190 err = -hdev->req_result;
1191 break;
1192
1193 default:
1194 err = -ETIMEDOUT;
1195 break;
3ff50b79 1196 }
1da177e4 1197
a5040efa 1198 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1199
1200 BT_DBG("%s end: err %d", hdev->name, err);
1201
1202 return err;
1203}
1204
01178cd4 1205static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1206 void (*req)(struct hci_request *req,
1207 unsigned long opt),
01178cd4 1208 unsigned long opt, __u32 timeout)
1da177e4
LT
1209{
1210 int ret;
1211
7c6a329e
MH
1212 if (!test_bit(HCI_UP, &hdev->flags))
1213 return -ENETDOWN;
1214
1da177e4
LT
1215 /* Serialize all requests */
1216 hci_req_lock(hdev);
01178cd4 1217 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1218 hci_req_unlock(hdev);
1219
1220 return ret;
1221}
1222
42c6b129 1223static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1224{
42c6b129 1225 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1226
1227 /* Reset device */
42c6b129
JH
1228 set_bit(HCI_RESET, &req->hdev->flags);
1229 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1230}
1231
42c6b129 1232static void bredr_init(struct hci_request *req)
1da177e4 1233{
42c6b129 1234 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1235
1da177e4 1236 /* Read Local Supported Features */
42c6b129 1237 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1238
1143e5a6 1239 /* Read Local Version */
42c6b129 1240 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1241
1242 /* Read BD Address */
42c6b129 1243 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1244}
1245
42c6b129 1246static void amp_init(struct hci_request *req)
e61ef499 1247{
42c6b129 1248 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1249
e61ef499 1250 /* Read Local Version */
42c6b129 1251 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1252
f6996cfe
MH
1253 /* Read Local Supported Commands */
1254 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1255
1256 /* Read Local Supported Features */
1257 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1258
6bcbc489 1259 /* Read Local AMP Info */
42c6b129 1260 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1261
1262 /* Read Data Blk size */
42c6b129 1263 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1264
f38ba941
MH
1265 /* Read Flow Control Mode */
1266 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1267
7528ca1c
MH
1268 /* Read Location Data */
1269 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1270}
1271
42c6b129 1272static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1273{
42c6b129 1274 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1275
1276 BT_DBG("%s %ld", hdev->name, opt);
1277
11778716
AE
1278 /* Reset */
1279 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1280 hci_reset_req(req, 0);
11778716 1281
e61ef499
AE
1282 switch (hdev->dev_type) {
1283 case HCI_BREDR:
42c6b129 1284 bredr_init(req);
e61ef499
AE
1285 break;
1286
1287 case HCI_AMP:
42c6b129 1288 amp_init(req);
e61ef499
AE
1289 break;
1290
1291 default:
1292 BT_ERR("Unknown device type %d", hdev->dev_type);
1293 break;
1294 }
e61ef499
AE
1295}
1296
42c6b129 1297static void bredr_setup(struct hci_request *req)
2177bab5 1298{
4ca048e3
MH
1299 struct hci_dev *hdev = req->hdev;
1300
2177bab5
JH
1301 __le16 param;
1302 __u8 flt_type;
1303
1304 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1305 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1306
1307 /* Read Class of Device */
42c6b129 1308 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1309
1310 /* Read Local Name */
42c6b129 1311 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1312
1313 /* Read Voice Setting */
42c6b129 1314 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1315
b4cb9fb2
MH
1316 /* Read Number of Supported IAC */
1317 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1318
4b836f39
MH
1319 /* Read Current IAC LAP */
1320 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1321
2177bab5
JH
1322 /* Clear Event Filters */
1323 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1324 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1325
1326 /* Connection accept timeout ~20 secs */
1327 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1328 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1329
4ca048e3
MH
1330 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1331 * but it does not support page scan related HCI commands.
1332 */
1333 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1334 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1335 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1336 }
2177bab5
JH
1337}
1338
42c6b129 1339static void le_setup(struct hci_request *req)
2177bab5 1340{
c73eee91
JH
1341 struct hci_dev *hdev = req->hdev;
1342
2177bab5 1343 /* Read LE Buffer Size */
42c6b129 1344 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1345
1346 /* Read LE Local Supported Features */
42c6b129 1347 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1348
1349 /* Read LE Advertising Channel TX Power */
42c6b129 1350 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1351
1352 /* Read LE White List Size */
42c6b129 1353 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1354
1355 /* Read LE Supported States */
42c6b129 1356 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1357
1358 /* LE-only controllers have LE implicitly enabled */
1359 if (!lmp_bredr_capable(hdev))
1360 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1361}
1362
1363static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1364{
1365 if (lmp_ext_inq_capable(hdev))
1366 return 0x02;
1367
1368 if (lmp_inq_rssi_capable(hdev))
1369 return 0x01;
1370
1371 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1372 hdev->lmp_subver == 0x0757)
1373 return 0x01;
1374
1375 if (hdev->manufacturer == 15) {
1376 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1377 return 0x01;
1378 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1379 return 0x01;
1380 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1381 return 0x01;
1382 }
1383
1384 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1385 hdev->lmp_subver == 0x1805)
1386 return 0x01;
1387
1388 return 0x00;
1389}
1390
42c6b129 1391static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1392{
1393 u8 mode;
1394
42c6b129 1395 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1396
42c6b129 1397 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1398}
1399
42c6b129 1400static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1401{
42c6b129
JH
1402 struct hci_dev *hdev = req->hdev;
1403
2177bab5
JH
1404 /* The second byte is 0xff instead of 0x9f (two reserved bits
1405 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1406 * command otherwise.
1407 */
1408 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1409
1410 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1411 * any event mask for pre 1.2 devices.
1412 */
1413 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1414 return;
1415
1416 if (lmp_bredr_capable(hdev)) {
1417 events[4] |= 0x01; /* Flow Specification Complete */
1418 events[4] |= 0x02; /* Inquiry Result with RSSI */
1419 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1420 events[5] |= 0x08; /* Synchronous Connection Complete */
1421 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1422 } else {
1423 /* Use a different default for LE-only devices */
1424 memset(events, 0, sizeof(events));
1425 events[0] |= 0x10; /* Disconnection Complete */
1426 events[0] |= 0x80; /* Encryption Change */
1427 events[1] |= 0x08; /* Read Remote Version Information Complete */
1428 events[1] |= 0x20; /* Command Complete */
1429 events[1] |= 0x40; /* Command Status */
1430 events[1] |= 0x80; /* Hardware Error */
1431 events[2] |= 0x04; /* Number of Completed Packets */
1432 events[3] |= 0x02; /* Data Buffer Overflow */
1433 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1434 }
1435
1436 if (lmp_inq_rssi_capable(hdev))
1437 events[4] |= 0x02; /* Inquiry Result with RSSI */
1438
1439 if (lmp_sniffsubr_capable(hdev))
1440 events[5] |= 0x20; /* Sniff Subrating */
1441
1442 if (lmp_pause_enc_capable(hdev))
1443 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1444
1445 if (lmp_ext_inq_capable(hdev))
1446 events[5] |= 0x40; /* Extended Inquiry Result */
1447
1448 if (lmp_no_flush_capable(hdev))
1449 events[7] |= 0x01; /* Enhanced Flush Complete */
1450
1451 if (lmp_lsto_capable(hdev))
1452 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1453
1454 if (lmp_ssp_capable(hdev)) {
1455 events[6] |= 0x01; /* IO Capability Request */
1456 events[6] |= 0x02; /* IO Capability Response */
1457 events[6] |= 0x04; /* User Confirmation Request */
1458 events[6] |= 0x08; /* User Passkey Request */
1459 events[6] |= 0x10; /* Remote OOB Data Request */
1460 events[6] |= 0x20; /* Simple Pairing Complete */
1461 events[7] |= 0x04; /* User Passkey Notification */
1462 events[7] |= 0x08; /* Keypress Notification */
1463 events[7] |= 0x10; /* Remote Host Supported
1464 * Features Notification
1465 */
1466 }
1467
1468 if (lmp_le_capable(hdev))
1469 events[7] |= 0x20; /* LE Meta-Event */
1470
42c6b129 1471 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1472
1473 if (lmp_le_capable(hdev)) {
1474 memset(events, 0, sizeof(events));
1475 events[0] = 0x1f;
42c6b129
JH
1476 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1477 sizeof(events), events);
2177bab5
JH
1478 }
1479}
1480
42c6b129 1481static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1482{
42c6b129
JH
1483 struct hci_dev *hdev = req->hdev;
1484
2177bab5 1485 if (lmp_bredr_capable(hdev))
42c6b129 1486 bredr_setup(req);
56f87901
JH
1487 else
1488 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1489
1490 if (lmp_le_capable(hdev))
42c6b129 1491 le_setup(req);
2177bab5 1492
42c6b129 1493 hci_setup_event_mask(req);
2177bab5 1494
3f8e2d75
JH
1495 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1496 * local supported commands HCI command.
1497 */
1498 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1499 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1500
1501 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1502 /* When SSP is available, then the host features page
1503 * should also be available as well. However some
1504 * controllers list the max_page as 0 as long as SSP
1505 * has not been enabled. To achieve proper debugging
1506 * output, force the minimum max_page to 1 at least.
1507 */
1508 hdev->max_page = 0x01;
1509
2177bab5
JH
1510 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1511 u8 mode = 0x01;
42c6b129
JH
1512 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1513 sizeof(mode), &mode);
2177bab5
JH
1514 } else {
1515 struct hci_cp_write_eir cp;
1516
1517 memset(hdev->eir, 0, sizeof(hdev->eir));
1518 memset(&cp, 0, sizeof(cp));
1519
42c6b129 1520 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1521 }
1522 }
1523
1524 if (lmp_inq_rssi_capable(hdev))
42c6b129 1525 hci_setup_inquiry_mode(req);
2177bab5
JH
1526
1527 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1528 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1529
1530 if (lmp_ext_feat_capable(hdev)) {
1531 struct hci_cp_read_local_ext_features cp;
1532
1533 cp.page = 0x01;
42c6b129
JH
1534 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1535 sizeof(cp), &cp);
2177bab5
JH
1536 }
1537
1538 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1539 u8 enable = 1;
42c6b129
JH
1540 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1541 &enable);
2177bab5
JH
1542 }
1543}
1544
42c6b129 1545static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1546{
42c6b129 1547 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1548 struct hci_cp_write_def_link_policy cp;
1549 u16 link_policy = 0;
1550
1551 if (lmp_rswitch_capable(hdev))
1552 link_policy |= HCI_LP_RSWITCH;
1553 if (lmp_hold_capable(hdev))
1554 link_policy |= HCI_LP_HOLD;
1555 if (lmp_sniff_capable(hdev))
1556 link_policy |= HCI_LP_SNIFF;
1557 if (lmp_park_capable(hdev))
1558 link_policy |= HCI_LP_PARK;
1559
1560 cp.policy = cpu_to_le16(link_policy);
42c6b129 1561 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1562}
1563
42c6b129 1564static void hci_set_le_support(struct hci_request *req)
2177bab5 1565{
42c6b129 1566 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1567 struct hci_cp_write_le_host_supported cp;
1568
c73eee91
JH
1569 /* LE-only devices do not support explicit enablement */
1570 if (!lmp_bredr_capable(hdev))
1571 return;
1572
2177bab5
JH
1573 memset(&cp, 0, sizeof(cp));
1574
1575 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1576 cp.le = 0x01;
1577 cp.simul = lmp_le_br_capable(hdev);
1578 }
1579
1580 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1581 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1582 &cp);
2177bab5
JH
1583}
1584
d62e6d67
JH
1585static void hci_set_event_mask_page_2(struct hci_request *req)
1586{
1587 struct hci_dev *hdev = req->hdev;
1588 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1589
1590 /* If Connectionless Slave Broadcast master role is supported
1591 * enable all necessary events for it.
1592 */
53b834d2 1593 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1594 events[1] |= 0x40; /* Triggered Clock Capture */
1595 events[1] |= 0x80; /* Synchronization Train Complete */
1596 events[2] |= 0x10; /* Slave Page Response Timeout */
1597 events[2] |= 0x20; /* CSB Channel Map Change */
1598 }
1599
1600 /* If Connectionless Slave Broadcast slave role is supported
1601 * enable all necessary events for it.
1602 */
53b834d2 1603 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1604 events[2] |= 0x01; /* Synchronization Train Received */
1605 events[2] |= 0x02; /* CSB Receive */
1606 events[2] |= 0x04; /* CSB Timeout */
1607 events[2] |= 0x08; /* Truncated Page Complete */
1608 }
1609
40c59fcb
MH
1610 /* Enable Authenticated Payload Timeout Expired event if supported */
1611 if (lmp_ping_capable(hdev))
1612 events[2] |= 0x80;
1613
d62e6d67
JH
1614 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1615}
1616
42c6b129 1617static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1618{
42c6b129 1619 struct hci_dev *hdev = req->hdev;
d2c5d77f 1620 u8 p;
42c6b129 1621
b8f4e068
GP
1622 /* Some Broadcom based Bluetooth controllers do not support the
1623 * Delete Stored Link Key command. They are clearly indicating its
1624 * absence in the bit mask of supported commands.
1625 *
1626 * Check the supported commands and only if the the command is marked
1627 * as supported send it. If not supported assume that the controller
1628 * does not have actual support for stored link keys which makes this
1629 * command redundant anyway.
f9f462fa
MH
1630 *
1631 * Some controllers indicate that they support handling deleting
1632 * stored link keys, but they don't. The quirk lets a driver
1633 * just disable this command.
637b4cae 1634 */
f9f462fa
MH
1635 if (hdev->commands[6] & 0x80 &&
1636 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1637 struct hci_cp_delete_stored_link_key cp;
1638
1639 bacpy(&cp.bdaddr, BDADDR_ANY);
1640 cp.delete_all = 0x01;
1641 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1642 sizeof(cp), &cp);
1643 }
1644
2177bab5 1645 if (hdev->commands[5] & 0x10)
42c6b129 1646 hci_setup_link_policy(req);
2177bab5 1647
7bf32048 1648 if (lmp_le_capable(hdev))
42c6b129 1649 hci_set_le_support(req);
d2c5d77f
JH
1650
1651 /* Read features beyond page 1 if available */
1652 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1653 struct hci_cp_read_local_ext_features cp;
1654
1655 cp.page = p;
1656 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1657 sizeof(cp), &cp);
1658 }
2177bab5
JH
1659}
1660
5d4e7e8d
JH
1661static void hci_init4_req(struct hci_request *req, unsigned long opt)
1662{
1663 struct hci_dev *hdev = req->hdev;
1664
d62e6d67
JH
1665 /* Set event mask page 2 if the HCI command for it is supported */
1666 if (hdev->commands[22] & 0x04)
1667 hci_set_event_mask_page_2(req);
1668
5d4e7e8d 1669 /* Check for Synchronization Train support */
53b834d2 1670 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1671 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1672
1673 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1674 if ((lmp_sc_capable(hdev) ||
1675 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1676 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1677 u8 support = 0x01;
1678 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1679 sizeof(support), &support);
1680 }
5d4e7e8d
JH
1681}
1682
2177bab5
JH
1683static int __hci_init(struct hci_dev *hdev)
1684{
1685 int err;
1686
1687 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1688 if (err < 0)
1689 return err;
1690
4b4148e9
MH
1691 /* The Device Under Test (DUT) mode is special and available for
1692 * all controller types. So just create it early on.
1693 */
1694 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1695 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1696 &dut_mode_fops);
1697 }
1698
2177bab5
JH
1699 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1700 * BR/EDR/LE type controllers. AMP controllers only need the
1701 * first stage init.
1702 */
1703 if (hdev->dev_type != HCI_BREDR)
1704 return 0;
1705
1706 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1707 if (err < 0)
1708 return err;
1709
5d4e7e8d
JH
1710 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1711 if (err < 0)
1712 return err;
1713
baf27f6e
MH
1714 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1715 if (err < 0)
1716 return err;
1717
1718 /* Only create debugfs entries during the initial setup
1719 * phase and not every time the controller gets powered on.
1720 */
1721 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1722 return 0;
1723
dfb826a8
MH
1724 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1725 &features_fops);
ceeb3bc0
MH
1726 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1727 &hdev->manufacturer);
1728 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1729 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1730 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1731 &blacklist_fops);
47219839
MH
1732 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1733
baf27f6e
MH
1734 if (lmp_bredr_capable(hdev)) {
1735 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1736 hdev, &inquiry_cache_fops);
02d08d15
MH
1737 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1738 hdev, &link_keys_fops);
babdbb3c
MH
1739 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1740 hdev, &dev_class_fops);
041000b9
MH
1741 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1742 hdev, &voice_setting_fops);
baf27f6e
MH
1743 }
1744
06f5b778 1745 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1746 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1747 hdev, &auto_accept_delay_fops);
06f5b778
MH
1748 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1749 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1750 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1751 hdev, &force_sc_support_fops);
134c2a89
MH
1752 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1753 hdev, &sc_only_mode_fops);
06f5b778 1754 }
ebd1e33b 1755
2bfa3531
MH
1756 if (lmp_sniff_capable(hdev)) {
1757 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1758 hdev, &idle_timeout_fops);
1759 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1760 hdev, &sniff_min_interval_fops);
1761 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1762 hdev, &sniff_max_interval_fops);
1763 }
1764
d0f729b8 1765 if (lmp_le_capable(hdev)) {
ac345813
MH
1766 debugfs_create_file("identity", 0400, hdev->debugfs,
1767 hdev, &identity_fops);
1768 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1769 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1770 debugfs_create_file("random_address", 0444, hdev->debugfs,
1771 hdev, &random_address_fops);
b32bba6c
MH
1772 debugfs_create_file("static_address", 0444, hdev->debugfs,
1773 hdev, &static_address_fops);
1774
1775 /* For controllers with a public address, provide a debug
1776 * option to force the usage of the configured static
1777 * address. By default the public address is used.
1778 */
1779 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1780 debugfs_create_file("force_static_address", 0644,
1781 hdev->debugfs, hdev,
1782 &force_static_address_fops);
1783
d0f729b8
MH
1784 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1785 &hdev->le_white_list_size);
3698d704
MH
1786 debugfs_create_file("identity_resolving_keys", 0400,
1787 hdev->debugfs, hdev,
1788 &identity_resolving_keys_fops);
8f8625cd
MH
1789 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1790 hdev, &long_term_keys_fops);
4e70c7e7
MH
1791 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1792 hdev, &conn_min_interval_fops);
1793 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1794 hdev, &conn_max_interval_fops);
3f959d46
MH
1795 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1796 hdev, &adv_channel_map_fops);
89863109
JR
1797 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1798 &lowpan_debugfs_fops);
7d474e06
AG
1799 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1800 &le_auto_conn_fops);
d0f729b8 1801 }
e7b8fc92 1802
baf27f6e 1803 return 0;
2177bab5
JH
1804}
1805
42c6b129 1806static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1807{
1808 __u8 scan = opt;
1809
42c6b129 1810 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1811
1812 /* Inquiry and Page scans */
42c6b129 1813 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1814}
1815
42c6b129 1816static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1817{
1818 __u8 auth = opt;
1819
42c6b129 1820 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1821
1822 /* Authentication */
42c6b129 1823 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1824}
1825
42c6b129 1826static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1827{
1828 __u8 encrypt = opt;
1829
42c6b129 1830 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1831
e4e8e37c 1832 /* Encryption */
42c6b129 1833 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1834}
1835
42c6b129 1836static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1837{
1838 __le16 policy = cpu_to_le16(opt);
1839
42c6b129 1840 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1841
1842 /* Default link policy */
42c6b129 1843 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1844}
1845
8e87d142 1846/* Get HCI device by index.
1da177e4
LT
1847 * Device is held on return. */
1848struct hci_dev *hci_dev_get(int index)
1849{
8035ded4 1850 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1851
1852 BT_DBG("%d", index);
1853
1854 if (index < 0)
1855 return NULL;
1856
1857 read_lock(&hci_dev_list_lock);
8035ded4 1858 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1859 if (d->id == index) {
1860 hdev = hci_dev_hold(d);
1861 break;
1862 }
1863 }
1864 read_unlock(&hci_dev_list_lock);
1865 return hdev;
1866}
1da177e4
LT
1867
1868/* ---- Inquiry support ---- */
ff9ef578 1869
30dc78e1
JH
1870bool hci_discovery_active(struct hci_dev *hdev)
1871{
1872 struct discovery_state *discov = &hdev->discovery;
1873
6fbe195d 1874 switch (discov->state) {
343f935b 1875 case DISCOVERY_FINDING:
6fbe195d 1876 case DISCOVERY_RESOLVING:
30dc78e1
JH
1877 return true;
1878
6fbe195d
AG
1879 default:
1880 return false;
1881 }
30dc78e1
JH
1882}
1883
ff9ef578
JH
1884void hci_discovery_set_state(struct hci_dev *hdev, int state)
1885{
1886 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1887
1888 if (hdev->discovery.state == state)
1889 return;
1890
1891 switch (state) {
1892 case DISCOVERY_STOPPED:
c54c3860
AG
1893 hci_update_background_scan(hdev);
1894
7b99b659
AG
1895 if (hdev->discovery.state != DISCOVERY_STARTING)
1896 mgmt_discovering(hdev, 0);
ff9ef578
JH
1897 break;
1898 case DISCOVERY_STARTING:
1899 break;
343f935b 1900 case DISCOVERY_FINDING:
ff9ef578
JH
1901 mgmt_discovering(hdev, 1);
1902 break;
30dc78e1
JH
1903 case DISCOVERY_RESOLVING:
1904 break;
ff9ef578
JH
1905 case DISCOVERY_STOPPING:
1906 break;
1907 }
1908
1909 hdev->discovery.state = state;
1910}
1911
1f9b9a5d 1912void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1913{
30883512 1914 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1915 struct inquiry_entry *p, *n;
1da177e4 1916
561aafbc
JH
1917 list_for_each_entry_safe(p, n, &cache->all, all) {
1918 list_del(&p->all);
b57c1a56 1919 kfree(p);
1da177e4 1920 }
561aafbc
JH
1921
1922 INIT_LIST_HEAD(&cache->unknown);
1923 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1924}
1925
a8c5fb1a
GP
1926struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1927 bdaddr_t *bdaddr)
1da177e4 1928{
30883512 1929 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1930 struct inquiry_entry *e;
1931
6ed93dc6 1932 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1933
561aafbc
JH
1934 list_for_each_entry(e, &cache->all, all) {
1935 if (!bacmp(&e->data.bdaddr, bdaddr))
1936 return e;
1937 }
1938
1939 return NULL;
1940}
1941
1942struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1943 bdaddr_t *bdaddr)
561aafbc 1944{
30883512 1945 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1946 struct inquiry_entry *e;
1947
6ed93dc6 1948 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1949
1950 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1951 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1952 return e;
1953 }
1954
1955 return NULL;
1da177e4
LT
1956}
1957
30dc78e1 1958struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1959 bdaddr_t *bdaddr,
1960 int state)
30dc78e1
JH
1961{
1962 struct discovery_state *cache = &hdev->discovery;
1963 struct inquiry_entry *e;
1964
6ed93dc6 1965 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1966
1967 list_for_each_entry(e, &cache->resolve, list) {
1968 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1969 return e;
1970 if (!bacmp(&e->data.bdaddr, bdaddr))
1971 return e;
1972 }
1973
1974 return NULL;
1975}
1976
a3d4e20a 1977void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1978 struct inquiry_entry *ie)
a3d4e20a
JH
1979{
1980 struct discovery_state *cache = &hdev->discovery;
1981 struct list_head *pos = &cache->resolve;
1982 struct inquiry_entry *p;
1983
1984 list_del(&ie->list);
1985
1986 list_for_each_entry(p, &cache->resolve, list) {
1987 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1988 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1989 break;
1990 pos = &p->list;
1991 }
1992
1993 list_add(&ie->list, pos);
1994}
1995
3175405b 1996bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1997 bool name_known, bool *ssp)
1da177e4 1998{
30883512 1999 struct discovery_state *cache = &hdev->discovery;
70f23020 2000 struct inquiry_entry *ie;
1da177e4 2001
6ed93dc6 2002 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2003
2b2fec4d
SJ
2004 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2005
388fc8fa
JH
2006 if (ssp)
2007 *ssp = data->ssp_mode;
2008
70f23020 2009 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2010 if (ie) {
388fc8fa
JH
2011 if (ie->data.ssp_mode && ssp)
2012 *ssp = true;
2013
a3d4e20a 2014 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2015 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2016 ie->data.rssi = data->rssi;
2017 hci_inquiry_cache_update_resolve(hdev, ie);
2018 }
2019
561aafbc 2020 goto update;
a3d4e20a 2021 }
561aafbc
JH
2022
2023 /* Entry not in the cache. Add new one. */
2024 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2025 if (!ie)
3175405b 2026 return false;
561aafbc
JH
2027
2028 list_add(&ie->all, &cache->all);
2029
2030 if (name_known) {
2031 ie->name_state = NAME_KNOWN;
2032 } else {
2033 ie->name_state = NAME_NOT_KNOWN;
2034 list_add(&ie->list, &cache->unknown);
2035 }
70f23020 2036
561aafbc
JH
2037update:
2038 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2039 ie->name_state != NAME_PENDING) {
561aafbc
JH
2040 ie->name_state = NAME_KNOWN;
2041 list_del(&ie->list);
1da177e4
LT
2042 }
2043
70f23020
AE
2044 memcpy(&ie->data, data, sizeof(*data));
2045 ie->timestamp = jiffies;
1da177e4 2046 cache->timestamp = jiffies;
3175405b
JH
2047
2048 if (ie->name_state == NAME_NOT_KNOWN)
2049 return false;
2050
2051 return true;
1da177e4
LT
2052}
2053
2054static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2055{
30883512 2056 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2057 struct inquiry_info *info = (struct inquiry_info *) buf;
2058 struct inquiry_entry *e;
2059 int copied = 0;
2060
561aafbc 2061 list_for_each_entry(e, &cache->all, all) {
1da177e4 2062 struct inquiry_data *data = &e->data;
b57c1a56
JH
2063
2064 if (copied >= num)
2065 break;
2066
1da177e4
LT
2067 bacpy(&info->bdaddr, &data->bdaddr);
2068 info->pscan_rep_mode = data->pscan_rep_mode;
2069 info->pscan_period_mode = data->pscan_period_mode;
2070 info->pscan_mode = data->pscan_mode;
2071 memcpy(info->dev_class, data->dev_class, 3);
2072 info->clock_offset = data->clock_offset;
b57c1a56 2073
1da177e4 2074 info++;
b57c1a56 2075 copied++;
1da177e4
LT
2076 }
2077
2078 BT_DBG("cache %p, copied %d", cache, copied);
2079 return copied;
2080}
2081
42c6b129 2082static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2083{
2084 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2085 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2086 struct hci_cp_inquiry cp;
2087
2088 BT_DBG("%s", hdev->name);
2089
2090 if (test_bit(HCI_INQUIRY, &hdev->flags))
2091 return;
2092
2093 /* Start Inquiry */
2094 memcpy(&cp.lap, &ir->lap, 3);
2095 cp.length = ir->length;
2096 cp.num_rsp = ir->num_rsp;
42c6b129 2097 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2098}
2099
3e13fa1e
AG
2100static int wait_inquiry(void *word)
2101{
2102 schedule();
2103 return signal_pending(current);
2104}
2105
1da177e4
LT
2106int hci_inquiry(void __user *arg)
2107{
2108 __u8 __user *ptr = arg;
2109 struct hci_inquiry_req ir;
2110 struct hci_dev *hdev;
2111 int err = 0, do_inquiry = 0, max_rsp;
2112 long timeo;
2113 __u8 *buf;
2114
2115 if (copy_from_user(&ir, ptr, sizeof(ir)))
2116 return -EFAULT;
2117
5a08ecce
AE
2118 hdev = hci_dev_get(ir.dev_id);
2119 if (!hdev)
1da177e4
LT
2120 return -ENODEV;
2121
0736cfa8
MH
2122 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2123 err = -EBUSY;
2124 goto done;
2125 }
2126
5b69bef5
MH
2127 if (hdev->dev_type != HCI_BREDR) {
2128 err = -EOPNOTSUPP;
2129 goto done;
2130 }
2131
56f87901
JH
2132 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2133 err = -EOPNOTSUPP;
2134 goto done;
2135 }
2136
09fd0de5 2137 hci_dev_lock(hdev);
8e87d142 2138 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2139 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2140 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2141 do_inquiry = 1;
2142 }
09fd0de5 2143 hci_dev_unlock(hdev);
1da177e4 2144
04837f64 2145 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2146
2147 if (do_inquiry) {
01178cd4
JH
2148 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2149 timeo);
70f23020
AE
2150 if (err < 0)
2151 goto done;
3e13fa1e
AG
2152
2153 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2154 * cleared). If it is interrupted by a signal, return -EINTR.
2155 */
2156 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2157 TASK_INTERRUPTIBLE))
2158 return -EINTR;
70f23020 2159 }
1da177e4 2160
8fc9ced3
GP
2161 /* for unlimited number of responses we will use buffer with
2162 * 255 entries
2163 */
1da177e4
LT
2164 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2165
2166 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2167 * copy it to the user space.
2168 */
01df8c31 2169 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2170 if (!buf) {
1da177e4
LT
2171 err = -ENOMEM;
2172 goto done;
2173 }
2174
09fd0de5 2175 hci_dev_lock(hdev);
1da177e4 2176 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2177 hci_dev_unlock(hdev);
1da177e4
LT
2178
2179 BT_DBG("num_rsp %d", ir.num_rsp);
2180
2181 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2182 ptr += sizeof(ir);
2183 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2184 ir.num_rsp))
1da177e4 2185 err = -EFAULT;
8e87d142 2186 } else
1da177e4
LT
2187 err = -EFAULT;
2188
2189 kfree(buf);
2190
2191done:
2192 hci_dev_put(hdev);
2193 return err;
2194}
2195
cbed0ca1 2196static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2197{
1da177e4
LT
2198 int ret = 0;
2199
1da177e4
LT
2200 BT_DBG("%s %p", hdev->name, hdev);
2201
2202 hci_req_lock(hdev);
2203
94324962
JH
2204 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2205 ret = -ENODEV;
2206 goto done;
2207 }
2208
a5c8f270
MH
2209 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2210 /* Check for rfkill but allow the HCI setup stage to
2211 * proceed (which in itself doesn't cause any RF activity).
2212 */
2213 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2214 ret = -ERFKILL;
2215 goto done;
2216 }
2217
2218 /* Check for valid public address or a configured static
2219 * random adddress, but let the HCI setup proceed to
2220 * be able to determine if there is a public address
2221 * or not.
2222 *
c6beca0e
MH
2223 * In case of user channel usage, it is not important
2224 * if a public address or static random address is
2225 * available.
2226 *
a5c8f270
MH
2227 * This check is only valid for BR/EDR controllers
2228 * since AMP controllers do not have an address.
2229 */
c6beca0e
MH
2230 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2231 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2232 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2233 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2234 ret = -EADDRNOTAVAIL;
2235 goto done;
2236 }
611b30f7
MH
2237 }
2238
1da177e4
LT
2239 if (test_bit(HCI_UP, &hdev->flags)) {
2240 ret = -EALREADY;
2241 goto done;
2242 }
2243
1da177e4
LT
2244 if (hdev->open(hdev)) {
2245 ret = -EIO;
2246 goto done;
2247 }
2248
f41c70c4
MH
2249 atomic_set(&hdev->cmd_cnt, 1);
2250 set_bit(HCI_INIT, &hdev->flags);
2251
2252 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2253 ret = hdev->setup(hdev);
2254
2255 if (!ret) {
f41c70c4
MH
2256 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2257 set_bit(HCI_RAW, &hdev->flags);
2258
0736cfa8
MH
2259 if (!test_bit(HCI_RAW, &hdev->flags) &&
2260 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2261 ret = __hci_init(hdev);
1da177e4
LT
2262 }
2263
f41c70c4
MH
2264 clear_bit(HCI_INIT, &hdev->flags);
2265
1da177e4
LT
2266 if (!ret) {
2267 hci_dev_hold(hdev);
d6bfd59c 2268 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2269 set_bit(HCI_UP, &hdev->flags);
2270 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2271 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2272 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2273 hdev->dev_type == HCI_BREDR) {
09fd0de5 2274 hci_dev_lock(hdev);
744cf19e 2275 mgmt_powered(hdev, 1);
09fd0de5 2276 hci_dev_unlock(hdev);
56e5cb86 2277 }
8e87d142 2278 } else {
1da177e4 2279 /* Init failed, cleanup */
3eff45ea 2280 flush_work(&hdev->tx_work);
c347b765 2281 flush_work(&hdev->cmd_work);
b78752cc 2282 flush_work(&hdev->rx_work);
1da177e4
LT
2283
2284 skb_queue_purge(&hdev->cmd_q);
2285 skb_queue_purge(&hdev->rx_q);
2286
2287 if (hdev->flush)
2288 hdev->flush(hdev);
2289
2290 if (hdev->sent_cmd) {
2291 kfree_skb(hdev->sent_cmd);
2292 hdev->sent_cmd = NULL;
2293 }
2294
2295 hdev->close(hdev);
2296 hdev->flags = 0;
2297 }
2298
2299done:
2300 hci_req_unlock(hdev);
1da177e4
LT
2301 return ret;
2302}
2303
cbed0ca1
JH
2304/* ---- HCI ioctl helpers ---- */
2305
2306int hci_dev_open(__u16 dev)
2307{
2308 struct hci_dev *hdev;
2309 int err;
2310
2311 hdev = hci_dev_get(dev);
2312 if (!hdev)
2313 return -ENODEV;
2314
e1d08f40
JH
2315 /* We need to ensure that no other power on/off work is pending
2316 * before proceeding to call hci_dev_do_open. This is
2317 * particularly important if the setup procedure has not yet
2318 * completed.
2319 */
2320 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2321 cancel_delayed_work(&hdev->power_off);
2322
a5c8f270
MH
2323 /* After this call it is guaranteed that the setup procedure
2324 * has finished. This means that error conditions like RFKILL
2325 * or no valid public or static random address apply.
2326 */
e1d08f40
JH
2327 flush_workqueue(hdev->req_workqueue);
2328
cbed0ca1
JH
2329 err = hci_dev_do_open(hdev);
2330
2331 hci_dev_put(hdev);
2332
2333 return err;
2334}
2335
1da177e4
LT
2336static int hci_dev_do_close(struct hci_dev *hdev)
2337{
2338 BT_DBG("%s %p", hdev->name, hdev);
2339
78c04c0b
VCG
2340 cancel_delayed_work(&hdev->power_off);
2341
1da177e4
LT
2342 hci_req_cancel(hdev, ENODEV);
2343 hci_req_lock(hdev);
2344
2345 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2346 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2347 hci_req_unlock(hdev);
2348 return 0;
2349 }
2350
3eff45ea
GP
2351 /* Flush RX and TX works */
2352 flush_work(&hdev->tx_work);
b78752cc 2353 flush_work(&hdev->rx_work);
1da177e4 2354
16ab91ab 2355 if (hdev->discov_timeout > 0) {
e0f9309f 2356 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2357 hdev->discov_timeout = 0;
5e5282bb 2358 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2359 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2360 }
2361
a8b2d5c2 2362 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2363 cancel_delayed_work(&hdev->service_cache);
2364
7ba8b4be 2365 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2366
2367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2368 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2369
09fd0de5 2370 hci_dev_lock(hdev);
1f9b9a5d 2371 hci_inquiry_cache_flush(hdev);
1da177e4 2372 hci_conn_hash_flush(hdev);
6046dc3e 2373 hci_pend_le_conns_clear(hdev);
09fd0de5 2374 hci_dev_unlock(hdev);
1da177e4
LT
2375
2376 hci_notify(hdev, HCI_DEV_DOWN);
2377
2378 if (hdev->flush)
2379 hdev->flush(hdev);
2380
2381 /* Reset device */
2382 skb_queue_purge(&hdev->cmd_q);
2383 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2384 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2385 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2386 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2387 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2388 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2389 clear_bit(HCI_INIT, &hdev->flags);
2390 }
2391
c347b765
GP
2392 /* flush cmd work */
2393 flush_work(&hdev->cmd_work);
1da177e4
LT
2394
2395 /* Drop queues */
2396 skb_queue_purge(&hdev->rx_q);
2397 skb_queue_purge(&hdev->cmd_q);
2398 skb_queue_purge(&hdev->raw_q);
2399
2400 /* Drop last sent command */
2401 if (hdev->sent_cmd) {
b79f44c1 2402 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2403 kfree_skb(hdev->sent_cmd);
2404 hdev->sent_cmd = NULL;
2405 }
2406
b6ddb638
JH
2407 kfree_skb(hdev->recv_evt);
2408 hdev->recv_evt = NULL;
2409
1da177e4
LT
2410 /* After this point our queues are empty
2411 * and no tasks are scheduled. */
2412 hdev->close(hdev);
2413
35b973c9
JH
2414 /* Clear flags */
2415 hdev->flags = 0;
2416 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2417
93c311a0
MH
2418 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2419 if (hdev->dev_type == HCI_BREDR) {
2420 hci_dev_lock(hdev);
2421 mgmt_powered(hdev, 0);
2422 hci_dev_unlock(hdev);
2423 }
8ee56540 2424 }
5add6af8 2425
ced5c338 2426 /* Controller radio is available but is currently powered down */
536619e8 2427 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2428
e59fda8d 2429 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2430 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2431 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2432
1da177e4
LT
2433 hci_req_unlock(hdev);
2434
2435 hci_dev_put(hdev);
2436 return 0;
2437}
2438
2439int hci_dev_close(__u16 dev)
2440{
2441 struct hci_dev *hdev;
2442 int err;
2443
70f23020
AE
2444 hdev = hci_dev_get(dev);
2445 if (!hdev)
1da177e4 2446 return -ENODEV;
8ee56540 2447
0736cfa8
MH
2448 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2449 err = -EBUSY;
2450 goto done;
2451 }
2452
8ee56540
MH
2453 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2454 cancel_delayed_work(&hdev->power_off);
2455
1da177e4 2456 err = hci_dev_do_close(hdev);
8ee56540 2457
0736cfa8 2458done:
1da177e4
LT
2459 hci_dev_put(hdev);
2460 return err;
2461}
2462
2463int hci_dev_reset(__u16 dev)
2464{
2465 struct hci_dev *hdev;
2466 int ret = 0;
2467
70f23020
AE
2468 hdev = hci_dev_get(dev);
2469 if (!hdev)
1da177e4
LT
2470 return -ENODEV;
2471
2472 hci_req_lock(hdev);
1da177e4 2473
808a049e
MH
2474 if (!test_bit(HCI_UP, &hdev->flags)) {
2475 ret = -ENETDOWN;
1da177e4 2476 goto done;
808a049e 2477 }
1da177e4 2478
0736cfa8
MH
2479 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2480 ret = -EBUSY;
2481 goto done;
2482 }
2483
1da177e4
LT
2484 /* Drop queues */
2485 skb_queue_purge(&hdev->rx_q);
2486 skb_queue_purge(&hdev->cmd_q);
2487
09fd0de5 2488 hci_dev_lock(hdev);
1f9b9a5d 2489 hci_inquiry_cache_flush(hdev);
1da177e4 2490 hci_conn_hash_flush(hdev);
09fd0de5 2491 hci_dev_unlock(hdev);
1da177e4
LT
2492
2493 if (hdev->flush)
2494 hdev->flush(hdev);
2495
8e87d142 2496 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2497 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2498
2499 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2500 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2501
2502done:
1da177e4
LT
2503 hci_req_unlock(hdev);
2504 hci_dev_put(hdev);
2505 return ret;
2506}
2507
2508int hci_dev_reset_stat(__u16 dev)
2509{
2510 struct hci_dev *hdev;
2511 int ret = 0;
2512
70f23020
AE
2513 hdev = hci_dev_get(dev);
2514 if (!hdev)
1da177e4
LT
2515 return -ENODEV;
2516
0736cfa8
MH
2517 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2518 ret = -EBUSY;
2519 goto done;
2520 }
2521
1da177e4
LT
2522 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2523
0736cfa8 2524done:
1da177e4 2525 hci_dev_put(hdev);
1da177e4
LT
2526 return ret;
2527}
2528
2529int hci_dev_cmd(unsigned int cmd, void __user *arg)
2530{
2531 struct hci_dev *hdev;
2532 struct hci_dev_req dr;
2533 int err = 0;
2534
2535 if (copy_from_user(&dr, arg, sizeof(dr)))
2536 return -EFAULT;
2537
70f23020
AE
2538 hdev = hci_dev_get(dr.dev_id);
2539 if (!hdev)
1da177e4
LT
2540 return -ENODEV;
2541
0736cfa8
MH
2542 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2543 err = -EBUSY;
2544 goto done;
2545 }
2546
5b69bef5
MH
2547 if (hdev->dev_type != HCI_BREDR) {
2548 err = -EOPNOTSUPP;
2549 goto done;
2550 }
2551
56f87901
JH
2552 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2553 err = -EOPNOTSUPP;
2554 goto done;
2555 }
2556
1da177e4
LT
2557 switch (cmd) {
2558 case HCISETAUTH:
01178cd4
JH
2559 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2560 HCI_INIT_TIMEOUT);
1da177e4
LT
2561 break;
2562
2563 case HCISETENCRYPT:
2564 if (!lmp_encrypt_capable(hdev)) {
2565 err = -EOPNOTSUPP;
2566 break;
2567 }
2568
2569 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2570 /* Auth must be enabled first */
01178cd4
JH
2571 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2572 HCI_INIT_TIMEOUT);
1da177e4
LT
2573 if (err)
2574 break;
2575 }
2576
01178cd4
JH
2577 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2578 HCI_INIT_TIMEOUT);
1da177e4
LT
2579 break;
2580
2581 case HCISETSCAN:
01178cd4
JH
2582 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2583 HCI_INIT_TIMEOUT);
1da177e4
LT
2584 break;
2585
1da177e4 2586 case HCISETLINKPOL:
01178cd4
JH
2587 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2588 HCI_INIT_TIMEOUT);
1da177e4
LT
2589 break;
2590
2591 case HCISETLINKMODE:
e4e8e37c
MH
2592 hdev->link_mode = ((__u16) dr.dev_opt) &
2593 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2594 break;
2595
2596 case HCISETPTYPE:
2597 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2598 break;
2599
2600 case HCISETACLMTU:
e4e8e37c
MH
2601 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2602 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2603 break;
2604
2605 case HCISETSCOMTU:
e4e8e37c
MH
2606 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2607 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2608 break;
2609
2610 default:
2611 err = -EINVAL;
2612 break;
2613 }
e4e8e37c 2614
0736cfa8 2615done:
1da177e4
LT
2616 hci_dev_put(hdev);
2617 return err;
2618}
2619
2620int hci_get_dev_list(void __user *arg)
2621{
8035ded4 2622 struct hci_dev *hdev;
1da177e4
LT
2623 struct hci_dev_list_req *dl;
2624 struct hci_dev_req *dr;
1da177e4
LT
2625 int n = 0, size, err;
2626 __u16 dev_num;
2627
2628 if (get_user(dev_num, (__u16 __user *) arg))
2629 return -EFAULT;
2630
2631 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2632 return -EINVAL;
2633
2634 size = sizeof(*dl) + dev_num * sizeof(*dr);
2635
70f23020
AE
2636 dl = kzalloc(size, GFP_KERNEL);
2637 if (!dl)
1da177e4
LT
2638 return -ENOMEM;
2639
2640 dr = dl->dev_req;
2641
f20d09d5 2642 read_lock(&hci_dev_list_lock);
8035ded4 2643 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2644 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2645 cancel_delayed_work(&hdev->power_off);
c542a06c 2646
a8b2d5c2
JH
2647 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2648 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2649
1da177e4
LT
2650 (dr + n)->dev_id = hdev->id;
2651 (dr + n)->dev_opt = hdev->flags;
c542a06c 2652
1da177e4
LT
2653 if (++n >= dev_num)
2654 break;
2655 }
f20d09d5 2656 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2657
2658 dl->dev_num = n;
2659 size = sizeof(*dl) + n * sizeof(*dr);
2660
2661 err = copy_to_user(arg, dl, size);
2662 kfree(dl);
2663
2664 return err ? -EFAULT : 0;
2665}
2666
2667int hci_get_dev_info(void __user *arg)
2668{
2669 struct hci_dev *hdev;
2670 struct hci_dev_info di;
2671 int err = 0;
2672
2673 if (copy_from_user(&di, arg, sizeof(di)))
2674 return -EFAULT;
2675
70f23020
AE
2676 hdev = hci_dev_get(di.dev_id);
2677 if (!hdev)
1da177e4
LT
2678 return -ENODEV;
2679
a8b2d5c2 2680 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2681 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2682
a8b2d5c2
JH
2683 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2684 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2685
1da177e4
LT
2686 strcpy(di.name, hdev->name);
2687 di.bdaddr = hdev->bdaddr;
60f2a3ed 2688 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2689 di.flags = hdev->flags;
2690 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2691 if (lmp_bredr_capable(hdev)) {
2692 di.acl_mtu = hdev->acl_mtu;
2693 di.acl_pkts = hdev->acl_pkts;
2694 di.sco_mtu = hdev->sco_mtu;
2695 di.sco_pkts = hdev->sco_pkts;
2696 } else {
2697 di.acl_mtu = hdev->le_mtu;
2698 di.acl_pkts = hdev->le_pkts;
2699 di.sco_mtu = 0;
2700 di.sco_pkts = 0;
2701 }
1da177e4
LT
2702 di.link_policy = hdev->link_policy;
2703 di.link_mode = hdev->link_mode;
2704
2705 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2706 memcpy(&di.features, &hdev->features, sizeof(di.features));
2707
2708 if (copy_to_user(arg, &di, sizeof(di)))
2709 err = -EFAULT;
2710
2711 hci_dev_put(hdev);
2712
2713 return err;
2714}
2715
2716/* ---- Interface to HCI drivers ---- */
2717
611b30f7
MH
2718static int hci_rfkill_set_block(void *data, bool blocked)
2719{
2720 struct hci_dev *hdev = data;
2721
2722 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2723
0736cfa8
MH
2724 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2725 return -EBUSY;
2726
5e130367
JH
2727 if (blocked) {
2728 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2729 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2730 hci_dev_do_close(hdev);
5e130367
JH
2731 } else {
2732 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2733 }
611b30f7
MH
2734
2735 return 0;
2736}
2737
2738static const struct rfkill_ops hci_rfkill_ops = {
2739 .set_block = hci_rfkill_set_block,
2740};
2741
ab81cbf9
JH
2742static void hci_power_on(struct work_struct *work)
2743{
2744 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2745 int err;
ab81cbf9
JH
2746
2747 BT_DBG("%s", hdev->name);
2748
cbed0ca1 2749 err = hci_dev_do_open(hdev);
96570ffc
JH
2750 if (err < 0) {
2751 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2752 return;
96570ffc 2753 }
ab81cbf9 2754
a5c8f270
MH
2755 /* During the HCI setup phase, a few error conditions are
2756 * ignored and they need to be checked now. If they are still
2757 * valid, it is important to turn the device back off.
2758 */
2759 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2760 (hdev->dev_type == HCI_BREDR &&
2761 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2762 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2763 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2764 hci_dev_do_close(hdev);
2765 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2766 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2767 HCI_AUTO_OFF_TIMEOUT);
bf543036 2768 }
ab81cbf9 2769
a8b2d5c2 2770 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2771 mgmt_index_added(hdev);
ab81cbf9
JH
2772}
2773
2774static void hci_power_off(struct work_struct *work)
2775{
3243553f 2776 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2777 power_off.work);
ab81cbf9
JH
2778
2779 BT_DBG("%s", hdev->name);
2780
8ee56540 2781 hci_dev_do_close(hdev);
ab81cbf9
JH
2782}
2783
16ab91ab
JH
2784static void hci_discov_off(struct work_struct *work)
2785{
2786 struct hci_dev *hdev;
16ab91ab
JH
2787
2788 hdev = container_of(work, struct hci_dev, discov_off.work);
2789
2790 BT_DBG("%s", hdev->name);
2791
d1967ff8 2792 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2793}
2794
35f7498a 2795void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2796{
4821002c 2797 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2798
4821002c
JH
2799 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2800 list_del(&uuid->list);
2aeb9a1a
JH
2801 kfree(uuid);
2802 }
2aeb9a1a
JH
2803}
2804
35f7498a 2805void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2806{
2807 struct list_head *p, *n;
2808
2809 list_for_each_safe(p, n, &hdev->link_keys) {
2810 struct link_key *key;
2811
2812 key = list_entry(p, struct link_key, list);
2813
2814 list_del(p);
2815 kfree(key);
2816 }
55ed8ca1
JH
2817}
2818
35f7498a 2819void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2820{
2821 struct smp_ltk *k, *tmp;
2822
2823 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2824 list_del(&k->list);
2825 kfree(k);
2826 }
b899efaf
VCG
2827}
2828
970c4e46
JH
2829void hci_smp_irks_clear(struct hci_dev *hdev)
2830{
2831 struct smp_irk *k, *tmp;
2832
2833 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2834 list_del(&k->list);
2835 kfree(k);
2836 }
2837}
2838
55ed8ca1
JH
2839struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2840{
8035ded4 2841 struct link_key *k;
55ed8ca1 2842
8035ded4 2843 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2844 if (bacmp(bdaddr, &k->bdaddr) == 0)
2845 return k;
55ed8ca1
JH
2846
2847 return NULL;
2848}
2849
745c0ce3 2850static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2851 u8 key_type, u8 old_key_type)
d25e28ab
JH
2852{
2853 /* Legacy key */
2854 if (key_type < 0x03)
745c0ce3 2855 return true;
d25e28ab
JH
2856
2857 /* Debug keys are insecure so don't store them persistently */
2858 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2859 return false;
d25e28ab
JH
2860
2861 /* Changed combination key and there's no previous one */
2862 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2863 return false;
d25e28ab
JH
2864
2865 /* Security mode 3 case */
2866 if (!conn)
745c0ce3 2867 return true;
d25e28ab
JH
2868
2869 /* Neither local nor remote side had no-bonding as requirement */
2870 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2871 return true;
d25e28ab
JH
2872
2873 /* Local side had dedicated bonding as requirement */
2874 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2875 return true;
d25e28ab
JH
2876
2877 /* Remote side had dedicated bonding as requirement */
2878 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2879 return true;
d25e28ab
JH
2880
2881 /* If none of the above criteria match, then don't store the key
2882 * persistently */
745c0ce3 2883 return false;
d25e28ab
JH
2884}
2885
98a0b845
JH
2886static bool ltk_type_master(u8 type)
2887{
2888 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2889 return true;
2890
2891 return false;
2892}
2893
2894struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2895 bool master)
75d262c2 2896{
c9839a11 2897 struct smp_ltk *k;
75d262c2 2898
c9839a11
VCG
2899 list_for_each_entry(k, &hdev->long_term_keys, list) {
2900 if (k->ediv != ediv ||
a8c5fb1a 2901 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2902 continue;
2903
98a0b845
JH
2904 if (ltk_type_master(k->type) != master)
2905 continue;
2906
c9839a11 2907 return k;
75d262c2
VCG
2908 }
2909
2910 return NULL;
2911}
75d262c2 2912
c9839a11 2913struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2914 u8 addr_type, bool master)
75d262c2 2915{
c9839a11 2916 struct smp_ltk *k;
75d262c2 2917
c9839a11
VCG
2918 list_for_each_entry(k, &hdev->long_term_keys, list)
2919 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2920 bacmp(bdaddr, &k->bdaddr) == 0 &&
2921 ltk_type_master(k->type) == master)
75d262c2
VCG
2922 return k;
2923
2924 return NULL;
2925}
75d262c2 2926
970c4e46
JH
2927struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2928{
2929 struct smp_irk *irk;
2930
2931 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2932 if (!bacmp(&irk->rpa, rpa))
2933 return irk;
2934 }
2935
2936 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2937 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2938 bacpy(&irk->rpa, rpa);
2939 return irk;
2940 }
2941 }
2942
2943 return NULL;
2944}
2945
2946struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2947 u8 addr_type)
2948{
2949 struct smp_irk *irk;
2950
6cfc9988
JH
2951 /* Identity Address must be public or static random */
2952 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2953 return NULL;
2954
970c4e46
JH
2955 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2956 if (addr_type == irk->addr_type &&
2957 bacmp(bdaddr, &irk->bdaddr) == 0)
2958 return irk;
2959 }
2960
2961 return NULL;
2962}
2963
d25e28ab 2964int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2965 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2966{
2967 struct link_key *key, *old_key;
745c0ce3
VA
2968 u8 old_key_type;
2969 bool persistent;
55ed8ca1
JH
2970
2971 old_key = hci_find_link_key(hdev, bdaddr);
2972 if (old_key) {
2973 old_key_type = old_key->type;
2974 key = old_key;
2975 } else {
12adcf3a 2976 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2977 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2978 if (!key)
2979 return -ENOMEM;
2980 list_add(&key->list, &hdev->link_keys);
2981 }
2982
6ed93dc6 2983 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2984
d25e28ab
JH
2985 /* Some buggy controller combinations generate a changed
2986 * combination key for legacy pairing even when there's no
2987 * previous key */
2988 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2989 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2990 type = HCI_LK_COMBINATION;
655fe6ec
JH
2991 if (conn)
2992 conn->key_type = type;
2993 }
d25e28ab 2994
55ed8ca1 2995 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2996 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2997 key->pin_len = pin_len;
2998
b6020ba0 2999 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3000 key->type = old_key_type;
4748fed2
JH
3001 else
3002 key->type = type;
3003
4df378a1
JH
3004 if (!new_key)
3005 return 0;
3006
3007 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3008
744cf19e 3009 mgmt_new_link_key(hdev, key, persistent);
4df378a1 3010
6ec5bcad
VA
3011 if (conn)
3012 conn->flush_key = !persistent;
55ed8ca1
JH
3013
3014 return 0;
3015}
3016
ca9142b8 3017struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
3018 u8 addr_type, u8 type, u8 authenticated,
3019 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 3020{
c9839a11 3021 struct smp_ltk *key, *old_key;
98a0b845 3022 bool master = ltk_type_master(type);
75d262c2 3023
98a0b845 3024 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3025 if (old_key)
75d262c2 3026 key = old_key;
c9839a11 3027 else {
0a14ab41 3028 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3029 if (!key)
ca9142b8 3030 return NULL;
c9839a11 3031 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3032 }
3033
75d262c2 3034 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3035 key->bdaddr_type = addr_type;
3036 memcpy(key->val, tk, sizeof(key->val));
3037 key->authenticated = authenticated;
3038 key->ediv = ediv;
3039 key->enc_size = enc_size;
3040 key->type = type;
3041 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 3042
ca9142b8 3043 return key;
75d262c2
VCG
3044}
3045
ca9142b8
JH
3046struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3047 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3048{
3049 struct smp_irk *irk;
3050
3051 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3052 if (!irk) {
3053 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3054 if (!irk)
ca9142b8 3055 return NULL;
970c4e46
JH
3056
3057 bacpy(&irk->bdaddr, bdaddr);
3058 irk->addr_type = addr_type;
3059
3060 list_add(&irk->list, &hdev->identity_resolving_keys);
3061 }
3062
3063 memcpy(irk->val, val, 16);
3064 bacpy(&irk->rpa, rpa);
3065
ca9142b8 3066 return irk;
970c4e46
JH
3067}
3068
55ed8ca1
JH
3069int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3070{
3071 struct link_key *key;
3072
3073 key = hci_find_link_key(hdev, bdaddr);
3074 if (!key)
3075 return -ENOENT;
3076
6ed93dc6 3077 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3078
3079 list_del(&key->list);
3080 kfree(key);
3081
3082 return 0;
3083}
3084
e0b2b27e 3085int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3086{
3087 struct smp_ltk *k, *tmp;
c51ffa0b 3088 int removed = 0;
b899efaf
VCG
3089
3090 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3091 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3092 continue;
3093
6ed93dc6 3094 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3095
3096 list_del(&k->list);
3097 kfree(k);
c51ffa0b 3098 removed++;
b899efaf
VCG
3099 }
3100
c51ffa0b 3101 return removed ? 0 : -ENOENT;
b899efaf
VCG
3102}
3103
a7ec7338
JH
3104void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3105{
3106 struct smp_irk *k, *tmp;
3107
668b7b19 3108 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3109 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3110 continue;
3111
3112 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3113
3114 list_del(&k->list);
3115 kfree(k);
3116 }
3117}
3118
6bd32326 3119/* HCI command timer function */
bda4f23a 3120static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3121{
3122 struct hci_dev *hdev = (void *) arg;
3123
bda4f23a
AE
3124 if (hdev->sent_cmd) {
3125 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3126 u16 opcode = __le16_to_cpu(sent->opcode);
3127
3128 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3129 } else {
3130 BT_ERR("%s command tx timeout", hdev->name);
3131 }
3132
6bd32326 3133 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3134 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3135}
3136
2763eda6 3137struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3138 bdaddr_t *bdaddr)
2763eda6
SJ
3139{
3140 struct oob_data *data;
3141
3142 list_for_each_entry(data, &hdev->remote_oob_data, list)
3143 if (bacmp(bdaddr, &data->bdaddr) == 0)
3144 return data;
3145
3146 return NULL;
3147}
3148
3149int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3150{
3151 struct oob_data *data;
3152
3153 data = hci_find_remote_oob_data(hdev, bdaddr);
3154 if (!data)
3155 return -ENOENT;
3156
6ed93dc6 3157 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3158
3159 list_del(&data->list);
3160 kfree(data);
3161
3162 return 0;
3163}
3164
35f7498a 3165void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3166{
3167 struct oob_data *data, *n;
3168
3169 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3170 list_del(&data->list);
3171 kfree(data);
3172 }
2763eda6
SJ
3173}
3174
0798872e
MH
3175int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3176 u8 *hash, u8 *randomizer)
2763eda6
SJ
3177{
3178 struct oob_data *data;
3179
3180 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3181 if (!data) {
0a14ab41 3182 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3183 if (!data)
3184 return -ENOMEM;
3185
3186 bacpy(&data->bdaddr, bdaddr);
3187 list_add(&data->list, &hdev->remote_oob_data);
3188 }
3189
519ca9d0
MH
3190 memcpy(data->hash192, hash, sizeof(data->hash192));
3191 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3192
0798872e
MH
3193 memset(data->hash256, 0, sizeof(data->hash256));
3194 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3195
3196 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3197
3198 return 0;
3199}
3200
3201int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3202 u8 *hash192, u8 *randomizer192,
3203 u8 *hash256, u8 *randomizer256)
3204{
3205 struct oob_data *data;
3206
3207 data = hci_find_remote_oob_data(hdev, bdaddr);
3208 if (!data) {
0a14ab41 3209 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3210 if (!data)
3211 return -ENOMEM;
3212
3213 bacpy(&data->bdaddr, bdaddr);
3214 list_add(&data->list, &hdev->remote_oob_data);
3215 }
3216
3217 memcpy(data->hash192, hash192, sizeof(data->hash192));
3218 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3219
3220 memcpy(data->hash256, hash256, sizeof(data->hash256));
3221 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3222
6ed93dc6 3223 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3224
3225 return 0;
3226}
3227
b9ee0a78
MH
3228struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3229 bdaddr_t *bdaddr, u8 type)
b2a66aad 3230{
8035ded4 3231 struct bdaddr_list *b;
b2a66aad 3232
b9ee0a78
MH
3233 list_for_each_entry(b, &hdev->blacklist, list) {
3234 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3235 return b;
b9ee0a78 3236 }
b2a66aad
AJ
3237
3238 return NULL;
3239}
3240
c9507490 3241static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3242{
3243 struct list_head *p, *n;
3244
3245 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3246 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3247
3248 list_del(p);
3249 kfree(b);
3250 }
b2a66aad
AJ
3251}
3252
88c1fe4b 3253int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3254{
3255 struct bdaddr_list *entry;
b2a66aad 3256
b9ee0a78 3257 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3258 return -EBADF;
3259
b9ee0a78 3260 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3261 return -EEXIST;
b2a66aad
AJ
3262
3263 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3264 if (!entry)
3265 return -ENOMEM;
b2a66aad
AJ
3266
3267 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3268 entry->bdaddr_type = type;
b2a66aad
AJ
3269
3270 list_add(&entry->list, &hdev->blacklist);
3271
88c1fe4b 3272 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3273}
3274
88c1fe4b 3275int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3276{
3277 struct bdaddr_list *entry;
b2a66aad 3278
35f7498a
JH
3279 if (!bacmp(bdaddr, BDADDR_ANY)) {
3280 hci_blacklist_clear(hdev);
3281 return 0;
3282 }
b2a66aad 3283
b9ee0a78 3284 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3285 if (!entry)
5e762444 3286 return -ENOENT;
b2a66aad
AJ
3287
3288 list_del(&entry->list);
3289 kfree(entry);
3290
88c1fe4b 3291 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3292}
3293
15819a70
AG
3294/* This function requires the caller holds hdev->lock */
3295struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3296 bdaddr_t *addr, u8 addr_type)
3297{
3298 struct hci_conn_params *params;
3299
3300 list_for_each_entry(params, &hdev->le_conn_params, list) {
3301 if (bacmp(&params->addr, addr) == 0 &&
3302 params->addr_type == addr_type) {
3303 return params;
3304 }
3305 }
3306
3307 return NULL;
3308}
3309
cef952ce
AG
3310static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3311{
3312 struct hci_conn *conn;
3313
3314 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3315 if (!conn)
3316 return false;
3317
3318 if (conn->dst_type != type)
3319 return false;
3320
3321 if (conn->state != BT_CONNECTED)
3322 return false;
3323
3324 return true;
3325}
3326
a9b0a04c
AG
3327static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3328{
3329 if (addr_type == ADDR_LE_DEV_PUBLIC)
3330 return true;
3331
3332 /* Check for Random Static address type */
3333 if ((addr->b[5] & 0xc0) == 0xc0)
3334 return true;
3335
3336 return false;
3337}
3338
15819a70 3339/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3340int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3341 u8 auto_connect, u16 conn_min_interval,
3342 u16 conn_max_interval)
15819a70
AG
3343{
3344 struct hci_conn_params *params;
3345
a9b0a04c
AG
3346 if (!is_identity_address(addr, addr_type))
3347 return -EINVAL;
3348
15819a70 3349 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3350 if (params)
3351 goto update;
15819a70
AG
3352
3353 params = kzalloc(sizeof(*params), GFP_KERNEL);
3354 if (!params) {
3355 BT_ERR("Out of memory");
a9b0a04c 3356 return -ENOMEM;
15819a70
AG
3357 }
3358
3359 bacpy(&params->addr, addr);
3360 params->addr_type = addr_type;
cef952ce
AG
3361
3362 list_add(&params->list, &hdev->le_conn_params);
3363
3364update:
15819a70
AG
3365 params->conn_min_interval = conn_min_interval;
3366 params->conn_max_interval = conn_max_interval;
9fcb18ef 3367 params->auto_connect = auto_connect;
15819a70 3368
cef952ce
AG
3369 switch (auto_connect) {
3370 case HCI_AUTO_CONN_DISABLED:
3371 case HCI_AUTO_CONN_LINK_LOSS:
3372 hci_pend_le_conn_del(hdev, addr, addr_type);
3373 break;
3374 case HCI_AUTO_CONN_ALWAYS:
3375 if (!is_connected(hdev, addr, addr_type))
3376 hci_pend_le_conn_add(hdev, addr, addr_type);
3377 break;
3378 }
15819a70 3379
9fcb18ef
AG
3380 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3381 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3382 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3383
3384 return 0;
15819a70
AG
3385}
3386
3387/* This function requires the caller holds hdev->lock */
3388void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3389{
3390 struct hci_conn_params *params;
3391
3392 params = hci_conn_params_lookup(hdev, addr, addr_type);
3393 if (!params)
3394 return;
3395
cef952ce
AG
3396 hci_pend_le_conn_del(hdev, addr, addr_type);
3397
15819a70
AG
3398 list_del(&params->list);
3399 kfree(params);
3400
3401 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3402}
3403
3404/* This function requires the caller holds hdev->lock */
3405void hci_conn_params_clear(struct hci_dev *hdev)
3406{
3407 struct hci_conn_params *params, *tmp;
3408
3409 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3410 list_del(&params->list);
3411 kfree(params);
3412 }
3413
3414 BT_DBG("All LE connection parameters were removed");
3415}
3416
77a77a30
AG
3417/* This function requires the caller holds hdev->lock */
3418struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3419 bdaddr_t *addr, u8 addr_type)
3420{
3421 struct bdaddr_list *entry;
3422
3423 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3424 if (bacmp(&entry->bdaddr, addr) == 0 &&
3425 entry->bdaddr_type == addr_type)
3426 return entry;
3427 }
3428
3429 return NULL;
3430}
3431
3432/* This function requires the caller holds hdev->lock */
3433void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3434{
3435 struct bdaddr_list *entry;
3436
3437 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3438 if (entry)
a4790dbd 3439 goto done;
77a77a30
AG
3440
3441 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3442 if (!entry) {
3443 BT_ERR("Out of memory");
3444 return;
3445 }
3446
3447 bacpy(&entry->bdaddr, addr);
3448 entry->bdaddr_type = addr_type;
3449
3450 list_add(&entry->list, &hdev->pend_le_conns);
3451
3452 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3453
3454done:
3455 hci_update_background_scan(hdev);
77a77a30
AG
3456}
3457
3458/* This function requires the caller holds hdev->lock */
3459void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3460{
3461 struct bdaddr_list *entry;
3462
3463 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3464 if (!entry)
a4790dbd 3465 goto done;
77a77a30
AG
3466
3467 list_del(&entry->list);
3468 kfree(entry);
3469
3470 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3471
3472done:
3473 hci_update_background_scan(hdev);
77a77a30
AG
3474}
3475
3476/* This function requires the caller holds hdev->lock */
3477void hci_pend_le_conns_clear(struct hci_dev *hdev)
3478{
3479 struct bdaddr_list *entry, *tmp;
3480
3481 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3482 list_del(&entry->list);
3483 kfree(entry);
3484 }
3485
3486 BT_DBG("All LE pending connections cleared");
3487}
3488
4c87eaab 3489static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3490{
4c87eaab
AG
3491 if (status) {
3492 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3493
4c87eaab
AG
3494 hci_dev_lock(hdev);
3495 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3496 hci_dev_unlock(hdev);
3497 return;
3498 }
7ba8b4be
AG
3499}
3500
4c87eaab 3501static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3502{
4c87eaab
AG
3503 /* General inquiry access code (GIAC) */
3504 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3505 struct hci_request req;
3506 struct hci_cp_inquiry cp;
7ba8b4be
AG
3507 int err;
3508
4c87eaab
AG
3509 if (status) {
3510 BT_ERR("Failed to disable LE scanning: status %d", status);
3511 return;
3512 }
7ba8b4be 3513
4c87eaab
AG
3514 switch (hdev->discovery.type) {
3515 case DISCOV_TYPE_LE:
3516 hci_dev_lock(hdev);
3517 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3518 hci_dev_unlock(hdev);
3519 break;
7ba8b4be 3520
4c87eaab
AG
3521 case DISCOV_TYPE_INTERLEAVED:
3522 hci_req_init(&req, hdev);
7ba8b4be 3523
4c87eaab
AG
3524 memset(&cp, 0, sizeof(cp));
3525 memcpy(&cp.lap, lap, sizeof(cp.lap));
3526 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3527 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3528
4c87eaab 3529 hci_dev_lock(hdev);
7dbfac1d 3530
4c87eaab 3531 hci_inquiry_cache_flush(hdev);
7dbfac1d 3532
4c87eaab
AG
3533 err = hci_req_run(&req, inquiry_complete);
3534 if (err) {
3535 BT_ERR("Inquiry request failed: err %d", err);
3536 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3537 }
7dbfac1d 3538
4c87eaab
AG
3539 hci_dev_unlock(hdev);
3540 break;
7dbfac1d 3541 }
7dbfac1d
AG
3542}
3543
7ba8b4be
AG
3544static void le_scan_disable_work(struct work_struct *work)
3545{
3546 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3547 le_scan_disable.work);
4c87eaab
AG
3548 struct hci_request req;
3549 int err;
7ba8b4be
AG
3550
3551 BT_DBG("%s", hdev->name);
3552
4c87eaab 3553 hci_req_init(&req, hdev);
28b75a89 3554
b1efcc28 3555 hci_req_add_le_scan_disable(&req);
28b75a89 3556
4c87eaab
AG
3557 err = hci_req_run(&req, le_scan_disable_work_complete);
3558 if (err)
3559 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3560}
3561
94b1fc92
MH
3562int hci_update_random_address(struct hci_request *req, bool require_privacy,
3563 u8 *own_addr_type)
ebd3a747
JH
3564{
3565 struct hci_dev *hdev = req->hdev;
3566 int err;
3567
3568 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3569 * current RPA has expired or there is something else than
3570 * the current RPA in use, then generate a new one.
ebd3a747
JH
3571 */
3572 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3573 int to;
3574
3575 *own_addr_type = ADDR_LE_DEV_RANDOM;
3576
3577 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3578 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3579 return 0;
3580
2b5224dc 3581 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3582 if (err < 0) {
3583 BT_ERR("%s failed to generate new RPA", hdev->name);
3584 return err;
3585 }
3586
2b5224dc 3587 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
ebd3a747
JH
3588
3589 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3590 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3591
3592 return 0;
94b1fc92
MH
3593 }
3594
3595 /* In case of required privacy without resolvable private address,
3596 * use an unresolvable private address. This is useful for active
3597 * scanning and non-connectable advertising.
3598 */
3599 if (require_privacy) {
3600 bdaddr_t urpa;
3601
3602 get_random_bytes(&urpa, 6);
3603 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3604
3605 *own_addr_type = ADDR_LE_DEV_RANDOM;
3606 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3607 return 0;
ebd3a747
JH
3608 }
3609
3610 /* If forcing static address is in use or there is no public
3611 * address use the static address as random address (but skip
3612 * the HCI command if the current random address is already the
3613 * static one.
3614 */
3615 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3616 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3617 *own_addr_type = ADDR_LE_DEV_RANDOM;
3618 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3619 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3620 &hdev->static_addr);
3621 return 0;
3622 }
3623
3624 /* Neither privacy nor static address is being used so use a
3625 * public address.
3626 */
3627 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3628
3629 return 0;
3630}
3631
a1f4c318
JH
3632/* Copy the Identity Address of the controller.
3633 *
3634 * If the controller has a public BD_ADDR, then by default use that one.
3635 * If this is a LE only controller without a public address, default to
3636 * the static random address.
3637 *
3638 * For debugging purposes it is possible to force controllers with a
3639 * public address to use the static random address instead.
3640 */
3641void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3642 u8 *bdaddr_type)
3643{
3644 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3645 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3646 bacpy(bdaddr, &hdev->static_addr);
3647 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3648 } else {
3649 bacpy(bdaddr, &hdev->bdaddr);
3650 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3651 }
3652}
3653
9be0dab7
DH
3654/* Alloc HCI device */
3655struct hci_dev *hci_alloc_dev(void)
3656{
3657 struct hci_dev *hdev;
3658
3659 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3660 if (!hdev)
3661 return NULL;
3662
b1b813d4
DH
3663 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3664 hdev->esco_type = (ESCO_HV1);
3665 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3666 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3667 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3668 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3669 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3670
b1b813d4
DH
3671 hdev->sniff_max_interval = 800;
3672 hdev->sniff_min_interval = 80;
3673
3f959d46 3674 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3675 hdev->le_scan_interval = 0x0060;
3676 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3677 hdev->le_conn_min_interval = 0x0028;
3678 hdev->le_conn_max_interval = 0x0038;
bef64738 3679
d6bfd59c
JH
3680 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3681
b1b813d4
DH
3682 mutex_init(&hdev->lock);
3683 mutex_init(&hdev->req_lock);
3684
3685 INIT_LIST_HEAD(&hdev->mgmt_pending);
3686 INIT_LIST_HEAD(&hdev->blacklist);
3687 INIT_LIST_HEAD(&hdev->uuids);
3688 INIT_LIST_HEAD(&hdev->link_keys);
3689 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3690 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3691 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3692 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3693 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3694 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3695
3696 INIT_WORK(&hdev->rx_work, hci_rx_work);
3697 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3698 INIT_WORK(&hdev->tx_work, hci_tx_work);
3699 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3700
b1b813d4
DH
3701 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3702 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3703 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3704
b1b813d4
DH
3705 skb_queue_head_init(&hdev->rx_q);
3706 skb_queue_head_init(&hdev->cmd_q);
3707 skb_queue_head_init(&hdev->raw_q);
3708
3709 init_waitqueue_head(&hdev->req_wait_q);
3710
bda4f23a 3711 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3712
b1b813d4
DH
3713 hci_init_sysfs(hdev);
3714 discovery_init(hdev);
9be0dab7
DH
3715
3716 return hdev;
3717}
3718EXPORT_SYMBOL(hci_alloc_dev);
3719
3720/* Free HCI device */
3721void hci_free_dev(struct hci_dev *hdev)
3722{
9be0dab7
DH
3723 /* will free via device release */
3724 put_device(&hdev->dev);
3725}
3726EXPORT_SYMBOL(hci_free_dev);
3727
1da177e4
LT
3728/* Register HCI device */
3729int hci_register_dev(struct hci_dev *hdev)
3730{
b1b813d4 3731 int id, error;
1da177e4 3732
010666a1 3733 if (!hdev->open || !hdev->close)
1da177e4
LT
3734 return -EINVAL;
3735
08add513
MM
3736 /* Do not allow HCI_AMP devices to register at index 0,
3737 * so the index can be used as the AMP controller ID.
3738 */
3df92b31
SL
3739 switch (hdev->dev_type) {
3740 case HCI_BREDR:
3741 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3742 break;
3743 case HCI_AMP:
3744 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3745 break;
3746 default:
3747 return -EINVAL;
1da177e4 3748 }
8e87d142 3749
3df92b31
SL
3750 if (id < 0)
3751 return id;
3752
1da177e4
LT
3753 sprintf(hdev->name, "hci%d", id);
3754 hdev->id = id;
2d8b3a11
AE
3755
3756 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3757
d8537548
KC
3758 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3759 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3760 if (!hdev->workqueue) {
3761 error = -ENOMEM;
3762 goto err;
3763 }
f48fd9c8 3764
d8537548
KC
3765 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3766 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3767 if (!hdev->req_workqueue) {
3768 destroy_workqueue(hdev->workqueue);
3769 error = -ENOMEM;
3770 goto err;
3771 }
3772
0153e2ec
MH
3773 if (!IS_ERR_OR_NULL(bt_debugfs))
3774 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3775
bdc3e0f1
MH
3776 dev_set_name(&hdev->dev, "%s", hdev->name);
3777
99780a7b
JH
3778 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3779 CRYPTO_ALG_ASYNC);
3780 if (IS_ERR(hdev->tfm_aes)) {
3781 BT_ERR("Unable to create crypto context");
3782 error = PTR_ERR(hdev->tfm_aes);
3783 hdev->tfm_aes = NULL;
3784 goto err_wqueue;
3785 }
3786
bdc3e0f1 3787 error = device_add(&hdev->dev);
33ca954d 3788 if (error < 0)
99780a7b 3789 goto err_tfm;
1da177e4 3790
611b30f7 3791 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3792 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3793 hdev);
611b30f7
MH
3794 if (hdev->rfkill) {
3795 if (rfkill_register(hdev->rfkill) < 0) {
3796 rfkill_destroy(hdev->rfkill);
3797 hdev->rfkill = NULL;
3798 }
3799 }
3800
5e130367
JH
3801 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3802 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3803
a8b2d5c2 3804 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3805 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3806
01cd3404 3807 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3808 /* Assume BR/EDR support until proven otherwise (such as
3809 * through reading supported features during init.
3810 */
3811 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3812 }
ce2be9ac 3813
fcee3377
GP
3814 write_lock(&hci_dev_list_lock);
3815 list_add(&hdev->list, &hci_dev_list);
3816 write_unlock(&hci_dev_list_lock);
3817
1da177e4 3818 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3819 hci_dev_hold(hdev);
1da177e4 3820
19202573 3821 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3822
1da177e4 3823 return id;
f48fd9c8 3824
99780a7b
JH
3825err_tfm:
3826 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3827err_wqueue:
3828 destroy_workqueue(hdev->workqueue);
6ead1bbc 3829 destroy_workqueue(hdev->req_workqueue);
33ca954d 3830err:
3df92b31 3831 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3832
33ca954d 3833 return error;
1da177e4
LT
3834}
3835EXPORT_SYMBOL(hci_register_dev);
3836
3837/* Unregister HCI device */
59735631 3838void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3839{
3df92b31 3840 int i, id;
ef222013 3841
c13854ce 3842 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3843
94324962
JH
3844 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3845
3df92b31
SL
3846 id = hdev->id;
3847
f20d09d5 3848 write_lock(&hci_dev_list_lock);
1da177e4 3849 list_del(&hdev->list);
f20d09d5 3850 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3851
3852 hci_dev_do_close(hdev);
3853
cd4c5391 3854 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3855 kfree_skb(hdev->reassembly[i]);
3856
b9b5ef18
GP
3857 cancel_work_sync(&hdev->power_on);
3858
ab81cbf9 3859 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3860 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3861 hci_dev_lock(hdev);
744cf19e 3862 mgmt_index_removed(hdev);
09fd0de5 3863 hci_dev_unlock(hdev);
56e5cb86 3864 }
ab81cbf9 3865
2e58ef3e
JH
3866 /* mgmt_index_removed should take care of emptying the
3867 * pending list */
3868 BUG_ON(!list_empty(&hdev->mgmt_pending));
3869
1da177e4
LT
3870 hci_notify(hdev, HCI_DEV_UNREG);
3871
611b30f7
MH
3872 if (hdev->rfkill) {
3873 rfkill_unregister(hdev->rfkill);
3874 rfkill_destroy(hdev->rfkill);
3875 }
3876
99780a7b
JH
3877 if (hdev->tfm_aes)
3878 crypto_free_blkcipher(hdev->tfm_aes);
3879
bdc3e0f1 3880 device_del(&hdev->dev);
147e2d59 3881
0153e2ec
MH
3882 debugfs_remove_recursive(hdev->debugfs);
3883
f48fd9c8 3884 destroy_workqueue(hdev->workqueue);
6ead1bbc 3885 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3886
09fd0de5 3887 hci_dev_lock(hdev);
e2e0cacb 3888 hci_blacklist_clear(hdev);
2aeb9a1a 3889 hci_uuids_clear(hdev);
55ed8ca1 3890 hci_link_keys_clear(hdev);
b899efaf 3891 hci_smp_ltks_clear(hdev);
970c4e46 3892 hci_smp_irks_clear(hdev);
2763eda6 3893 hci_remote_oob_data_clear(hdev);
15819a70 3894 hci_conn_params_clear(hdev);
77a77a30 3895 hci_pend_le_conns_clear(hdev);
09fd0de5 3896 hci_dev_unlock(hdev);
e2e0cacb 3897
dc946bd8 3898 hci_dev_put(hdev);
3df92b31
SL
3899
3900 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3901}
3902EXPORT_SYMBOL(hci_unregister_dev);
3903
3904/* Suspend HCI device */
3905int hci_suspend_dev(struct hci_dev *hdev)
3906{
3907 hci_notify(hdev, HCI_DEV_SUSPEND);
3908 return 0;
3909}
3910EXPORT_SYMBOL(hci_suspend_dev);
3911
3912/* Resume HCI device */
3913int hci_resume_dev(struct hci_dev *hdev)
3914{
3915 hci_notify(hdev, HCI_DEV_RESUME);
3916 return 0;
3917}
3918EXPORT_SYMBOL(hci_resume_dev);
3919
76bca880 3920/* Receive frame from HCI drivers */
e1a26170 3921int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3922{
76bca880 3923 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3924 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3925 kfree_skb(skb);
3926 return -ENXIO;
3927 }
3928
d82603c6 3929 /* Incoming skb */
76bca880
MH
3930 bt_cb(skb)->incoming = 1;
3931
3932 /* Time stamp */
3933 __net_timestamp(skb);
3934
76bca880 3935 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3936 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3937
76bca880
MH
3938 return 0;
3939}
3940EXPORT_SYMBOL(hci_recv_frame);
3941
33e882a5 3942static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3943 int count, __u8 index)
33e882a5
SS
3944{
3945 int len = 0;
3946 int hlen = 0;
3947 int remain = count;
3948 struct sk_buff *skb;
3949 struct bt_skb_cb *scb;
3950
3951 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3952 index >= NUM_REASSEMBLY)
33e882a5
SS
3953 return -EILSEQ;
3954
3955 skb = hdev->reassembly[index];
3956
3957 if (!skb) {
3958 switch (type) {
3959 case HCI_ACLDATA_PKT:
3960 len = HCI_MAX_FRAME_SIZE;
3961 hlen = HCI_ACL_HDR_SIZE;
3962 break;
3963 case HCI_EVENT_PKT:
3964 len = HCI_MAX_EVENT_SIZE;
3965 hlen = HCI_EVENT_HDR_SIZE;
3966 break;
3967 case HCI_SCODATA_PKT:
3968 len = HCI_MAX_SCO_SIZE;
3969 hlen = HCI_SCO_HDR_SIZE;
3970 break;
3971 }
3972
1e429f38 3973 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3974 if (!skb)
3975 return -ENOMEM;
3976
3977 scb = (void *) skb->cb;
3978 scb->expect = hlen;
3979 scb->pkt_type = type;
3980
33e882a5
SS
3981 hdev->reassembly[index] = skb;
3982 }
3983
3984 while (count) {
3985 scb = (void *) skb->cb;
89bb46d0 3986 len = min_t(uint, scb->expect, count);
33e882a5
SS
3987
3988 memcpy(skb_put(skb, len), data, len);
3989
3990 count -= len;
3991 data += len;
3992 scb->expect -= len;
3993 remain = count;
3994
3995 switch (type) {
3996 case HCI_EVENT_PKT:
3997 if (skb->len == HCI_EVENT_HDR_SIZE) {
3998 struct hci_event_hdr *h = hci_event_hdr(skb);
3999 scb->expect = h->plen;
4000
4001 if (skb_tailroom(skb) < scb->expect) {
4002 kfree_skb(skb);
4003 hdev->reassembly[index] = NULL;
4004 return -ENOMEM;
4005 }
4006 }
4007 break;
4008
4009 case HCI_ACLDATA_PKT:
4010 if (skb->len == HCI_ACL_HDR_SIZE) {
4011 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4012 scb->expect = __le16_to_cpu(h->dlen);
4013
4014 if (skb_tailroom(skb) < scb->expect) {
4015 kfree_skb(skb);
4016 hdev->reassembly[index] = NULL;
4017 return -ENOMEM;
4018 }
4019 }
4020 break;
4021
4022 case HCI_SCODATA_PKT:
4023 if (skb->len == HCI_SCO_HDR_SIZE) {
4024 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4025 scb->expect = h->dlen;
4026
4027 if (skb_tailroom(skb) < scb->expect) {
4028 kfree_skb(skb);
4029 hdev->reassembly[index] = NULL;
4030 return -ENOMEM;
4031 }
4032 }
4033 break;
4034 }
4035
4036 if (scb->expect == 0) {
4037 /* Complete frame */
4038
4039 bt_cb(skb)->pkt_type = type;
e1a26170 4040 hci_recv_frame(hdev, skb);
33e882a5
SS
4041
4042 hdev->reassembly[index] = NULL;
4043 return remain;
4044 }
4045 }
4046
4047 return remain;
4048}
4049
ef222013
MH
4050int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4051{
f39a3c06
SS
4052 int rem = 0;
4053
ef222013
MH
4054 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4055 return -EILSEQ;
4056
da5f6c37 4057 while (count) {
1e429f38 4058 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4059 if (rem < 0)
4060 return rem;
ef222013 4061
f39a3c06
SS
4062 data += (count - rem);
4063 count = rem;
f81c6224 4064 }
ef222013 4065
f39a3c06 4066 return rem;
ef222013
MH
4067}
4068EXPORT_SYMBOL(hci_recv_fragment);
4069
99811510
SS
4070#define STREAM_REASSEMBLY 0
4071
4072int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4073{
4074 int type;
4075 int rem = 0;
4076
da5f6c37 4077 while (count) {
99811510
SS
4078 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4079
4080 if (!skb) {
4081 struct { char type; } *pkt;
4082
4083 /* Start of the frame */
4084 pkt = data;
4085 type = pkt->type;
4086
4087 data++;
4088 count--;
4089 } else
4090 type = bt_cb(skb)->pkt_type;
4091
1e429f38 4092 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4093 STREAM_REASSEMBLY);
99811510
SS
4094 if (rem < 0)
4095 return rem;
4096
4097 data += (count - rem);
4098 count = rem;
f81c6224 4099 }
99811510
SS
4100
4101 return rem;
4102}
4103EXPORT_SYMBOL(hci_recv_stream_fragment);
4104
1da177e4
LT
4105/* ---- Interface to upper protocols ---- */
4106
1da177e4
LT
4107int hci_register_cb(struct hci_cb *cb)
4108{
4109 BT_DBG("%p name %s", cb, cb->name);
4110
f20d09d5 4111 write_lock(&hci_cb_list_lock);
1da177e4 4112 list_add(&cb->list, &hci_cb_list);
f20d09d5 4113 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4114
4115 return 0;
4116}
4117EXPORT_SYMBOL(hci_register_cb);
4118
4119int hci_unregister_cb(struct hci_cb *cb)
4120{
4121 BT_DBG("%p name %s", cb, cb->name);
4122
f20d09d5 4123 write_lock(&hci_cb_list_lock);
1da177e4 4124 list_del(&cb->list);
f20d09d5 4125 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4126
4127 return 0;
4128}
4129EXPORT_SYMBOL(hci_unregister_cb);
4130
51086991 4131static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4132{
0d48d939 4133 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4134
cd82e61c
MH
4135 /* Time stamp */
4136 __net_timestamp(skb);
1da177e4 4137
cd82e61c
MH
4138 /* Send copy to monitor */
4139 hci_send_to_monitor(hdev, skb);
4140
4141 if (atomic_read(&hdev->promisc)) {
4142 /* Send copy to the sockets */
470fe1b5 4143 hci_send_to_sock(hdev, skb);
1da177e4
LT
4144 }
4145
4146 /* Get rid of skb owner, prior to sending to the driver. */
4147 skb_orphan(skb);
4148
7bd8f09f 4149 if (hdev->send(hdev, skb) < 0)
51086991 4150 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4151}
4152
3119ae95
JH
4153void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4154{
4155 skb_queue_head_init(&req->cmd_q);
4156 req->hdev = hdev;
5d73e034 4157 req->err = 0;
3119ae95
JH
4158}
4159
4160int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4161{
4162 struct hci_dev *hdev = req->hdev;
4163 struct sk_buff *skb;
4164 unsigned long flags;
4165
4166 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4167
5d73e034
AG
4168 /* If an error occured during request building, remove all HCI
4169 * commands queued on the HCI request queue.
4170 */
4171 if (req->err) {
4172 skb_queue_purge(&req->cmd_q);
4173 return req->err;
4174 }
4175
3119ae95
JH
4176 /* Do not allow empty requests */
4177 if (skb_queue_empty(&req->cmd_q))
382b0c39 4178 return -ENODATA;
3119ae95
JH
4179
4180 skb = skb_peek_tail(&req->cmd_q);
4181 bt_cb(skb)->req.complete = complete;
4182
4183 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4184 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4185 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4186
4187 queue_work(hdev->workqueue, &hdev->cmd_work);
4188
4189 return 0;
4190}
4191
1ca3a9d0 4192static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4193 u32 plen, const void *param)
1da177e4
LT
4194{
4195 int len = HCI_COMMAND_HDR_SIZE + plen;
4196 struct hci_command_hdr *hdr;
4197 struct sk_buff *skb;
4198
1da177e4 4199 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4200 if (!skb)
4201 return NULL;
1da177e4
LT
4202
4203 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4204 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4205 hdr->plen = plen;
4206
4207 if (plen)
4208 memcpy(skb_put(skb, plen), param, plen);
4209
4210 BT_DBG("skb len %d", skb->len);
4211
0d48d939 4212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4213
1ca3a9d0
JH
4214 return skb;
4215}
4216
4217/* Send HCI command */
07dc93dd
JH
4218int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4219 const void *param)
1ca3a9d0
JH
4220{
4221 struct sk_buff *skb;
4222
4223 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4224
4225 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4226 if (!skb) {
4227 BT_ERR("%s no memory for command", hdev->name);
4228 return -ENOMEM;
4229 }
4230
11714b3d
JH
4231 /* Stand-alone HCI commands must be flaged as
4232 * single-command requests.
4233 */
4234 bt_cb(skb)->req.start = true;
4235
1da177e4 4236 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4237 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4238
4239 return 0;
4240}
1da177e4 4241
71c76a17 4242/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4243void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4244 const void *param, u8 event)
71c76a17
JH
4245{
4246 struct hci_dev *hdev = req->hdev;
4247 struct sk_buff *skb;
4248
4249 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4250
34739c1e
AG
4251 /* If an error occured during request building, there is no point in
4252 * queueing the HCI command. We can simply return.
4253 */
4254 if (req->err)
4255 return;
4256
71c76a17
JH
4257 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4258 if (!skb) {
5d73e034
AG
4259 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4260 hdev->name, opcode);
4261 req->err = -ENOMEM;
e348fe6b 4262 return;
71c76a17
JH
4263 }
4264
4265 if (skb_queue_empty(&req->cmd_q))
4266 bt_cb(skb)->req.start = true;
4267
02350a72
JH
4268 bt_cb(skb)->req.event = event;
4269
71c76a17 4270 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4271}
4272
07dc93dd
JH
4273void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4274 const void *param)
02350a72
JH
4275{
4276 hci_req_add_ev(req, opcode, plen, param, 0);
4277}
4278
1da177e4 4279/* Get data from the previously sent command */
a9de9248 4280void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4281{
4282 struct hci_command_hdr *hdr;
4283
4284 if (!hdev->sent_cmd)
4285 return NULL;
4286
4287 hdr = (void *) hdev->sent_cmd->data;
4288
a9de9248 4289 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4290 return NULL;
4291
f0e09510 4292 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4293
4294 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4295}
4296
4297/* Send ACL data */
4298static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4299{
4300 struct hci_acl_hdr *hdr;
4301 int len = skb->len;
4302
badff6d0
ACM
4303 skb_push(skb, HCI_ACL_HDR_SIZE);
4304 skb_reset_transport_header(skb);
9c70220b 4305 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4306 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4307 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4308}
4309
ee22be7e 4310static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4311 struct sk_buff *skb, __u16 flags)
1da177e4 4312{
ee22be7e 4313 struct hci_conn *conn = chan->conn;
1da177e4
LT
4314 struct hci_dev *hdev = conn->hdev;
4315 struct sk_buff *list;
4316
087bfd99
GP
4317 skb->len = skb_headlen(skb);
4318 skb->data_len = 0;
4319
4320 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4321
4322 switch (hdev->dev_type) {
4323 case HCI_BREDR:
4324 hci_add_acl_hdr(skb, conn->handle, flags);
4325 break;
4326 case HCI_AMP:
4327 hci_add_acl_hdr(skb, chan->handle, flags);
4328 break;
4329 default:
4330 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4331 return;
4332 }
087bfd99 4333
70f23020
AE
4334 list = skb_shinfo(skb)->frag_list;
4335 if (!list) {
1da177e4
LT
4336 /* Non fragmented */
4337 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4338
73d80deb 4339 skb_queue_tail(queue, skb);
1da177e4
LT
4340 } else {
4341 /* Fragmented */
4342 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4343
4344 skb_shinfo(skb)->frag_list = NULL;
4345
4346 /* Queue all fragments atomically */
af3e6359 4347 spin_lock(&queue->lock);
1da177e4 4348
73d80deb 4349 __skb_queue_tail(queue, skb);
e702112f
AE
4350
4351 flags &= ~ACL_START;
4352 flags |= ACL_CONT;
1da177e4
LT
4353 do {
4354 skb = list; list = list->next;
8e87d142 4355
0d48d939 4356 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4357 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4358
4359 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4360
73d80deb 4361 __skb_queue_tail(queue, skb);
1da177e4
LT
4362 } while (list);
4363
af3e6359 4364 spin_unlock(&queue->lock);
1da177e4 4365 }
73d80deb
LAD
4366}
4367
4368void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4369{
ee22be7e 4370 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4371
f0e09510 4372 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4373
ee22be7e 4374 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4375
3eff45ea 4376 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4377}
1da177e4
LT
4378
4379/* Send SCO data */
0d861d8b 4380void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4381{
4382 struct hci_dev *hdev = conn->hdev;
4383 struct hci_sco_hdr hdr;
4384
4385 BT_DBG("%s len %d", hdev->name, skb->len);
4386
aca3192c 4387 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4388 hdr.dlen = skb->len;
4389
badff6d0
ACM
4390 skb_push(skb, HCI_SCO_HDR_SIZE);
4391 skb_reset_transport_header(skb);
9c70220b 4392 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4393
0d48d939 4394 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4395
1da177e4 4396 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4397 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4398}
1da177e4
LT
4399
4400/* ---- HCI TX task (outgoing data) ---- */
4401
4402/* HCI Connection scheduler */
6039aa73
GP
4403static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4404 int *quote)
1da177e4
LT
4405{
4406 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4407 struct hci_conn *conn = NULL, *c;
abc5de8f 4408 unsigned int num = 0, min = ~0;
1da177e4 4409
8e87d142 4410 /* We don't have to lock device here. Connections are always
1da177e4 4411 * added and removed with TX task disabled. */
bf4c6325
GP
4412
4413 rcu_read_lock();
4414
4415 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4416 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4417 continue;
769be974
MH
4418
4419 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4420 continue;
4421
1da177e4
LT
4422 num++;
4423
4424 if (c->sent < min) {
4425 min = c->sent;
4426 conn = c;
4427 }
52087a79
LAD
4428
4429 if (hci_conn_num(hdev, type) == num)
4430 break;
1da177e4
LT
4431 }
4432
bf4c6325
GP
4433 rcu_read_unlock();
4434
1da177e4 4435 if (conn) {
6ed58ec5
VT
4436 int cnt, q;
4437
4438 switch (conn->type) {
4439 case ACL_LINK:
4440 cnt = hdev->acl_cnt;
4441 break;
4442 case SCO_LINK:
4443 case ESCO_LINK:
4444 cnt = hdev->sco_cnt;
4445 break;
4446 case LE_LINK:
4447 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4448 break;
4449 default:
4450 cnt = 0;
4451 BT_ERR("Unknown link type");
4452 }
4453
4454 q = cnt / num;
1da177e4
LT
4455 *quote = q ? q : 1;
4456 } else
4457 *quote = 0;
4458
4459 BT_DBG("conn %p quote %d", conn, *quote);
4460 return conn;
4461}
4462
6039aa73 4463static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4464{
4465 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4466 struct hci_conn *c;
1da177e4 4467
bae1f5d9 4468 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4469
bf4c6325
GP
4470 rcu_read_lock();
4471
1da177e4 4472 /* Kill stalled connections */
bf4c6325 4473 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4474 if (c->type == type && c->sent) {
6ed93dc6
AE
4475 BT_ERR("%s killing stalled connection %pMR",
4476 hdev->name, &c->dst);
bed71748 4477 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4478 }
4479 }
bf4c6325
GP
4480
4481 rcu_read_unlock();
1da177e4
LT
4482}
4483
6039aa73
GP
4484static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4485 int *quote)
1da177e4 4486{
73d80deb
LAD
4487 struct hci_conn_hash *h = &hdev->conn_hash;
4488 struct hci_chan *chan = NULL;
abc5de8f 4489 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4490 struct hci_conn *conn;
73d80deb
LAD
4491 int cnt, q, conn_num = 0;
4492
4493 BT_DBG("%s", hdev->name);
4494
bf4c6325
GP
4495 rcu_read_lock();
4496
4497 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4498 struct hci_chan *tmp;
4499
4500 if (conn->type != type)
4501 continue;
4502
4503 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4504 continue;
4505
4506 conn_num++;
4507
8192edef 4508 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4509 struct sk_buff *skb;
4510
4511 if (skb_queue_empty(&tmp->data_q))
4512 continue;
4513
4514 skb = skb_peek(&tmp->data_q);
4515 if (skb->priority < cur_prio)
4516 continue;
4517
4518 if (skb->priority > cur_prio) {
4519 num = 0;
4520 min = ~0;
4521 cur_prio = skb->priority;
4522 }
4523
4524 num++;
4525
4526 if (conn->sent < min) {
4527 min = conn->sent;
4528 chan = tmp;
4529 }
4530 }
4531
4532 if (hci_conn_num(hdev, type) == conn_num)
4533 break;
4534 }
4535
bf4c6325
GP
4536 rcu_read_unlock();
4537
73d80deb
LAD
4538 if (!chan)
4539 return NULL;
4540
4541 switch (chan->conn->type) {
4542 case ACL_LINK:
4543 cnt = hdev->acl_cnt;
4544 break;
bd1eb66b
AE
4545 case AMP_LINK:
4546 cnt = hdev->block_cnt;
4547 break;
73d80deb
LAD
4548 case SCO_LINK:
4549 case ESCO_LINK:
4550 cnt = hdev->sco_cnt;
4551 break;
4552 case LE_LINK:
4553 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4554 break;
4555 default:
4556 cnt = 0;
4557 BT_ERR("Unknown link type");
4558 }
4559
4560 q = cnt / num;
4561 *quote = q ? q : 1;
4562 BT_DBG("chan %p quote %d", chan, *quote);
4563 return chan;
4564}
4565
02b20f0b
LAD
4566static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4567{
4568 struct hci_conn_hash *h = &hdev->conn_hash;
4569 struct hci_conn *conn;
4570 int num = 0;
4571
4572 BT_DBG("%s", hdev->name);
4573
bf4c6325
GP
4574 rcu_read_lock();
4575
4576 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4577 struct hci_chan *chan;
4578
4579 if (conn->type != type)
4580 continue;
4581
4582 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4583 continue;
4584
4585 num++;
4586
8192edef 4587 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4588 struct sk_buff *skb;
4589
4590 if (chan->sent) {
4591 chan->sent = 0;
4592 continue;
4593 }
4594
4595 if (skb_queue_empty(&chan->data_q))
4596 continue;
4597
4598 skb = skb_peek(&chan->data_q);
4599 if (skb->priority >= HCI_PRIO_MAX - 1)
4600 continue;
4601
4602 skb->priority = HCI_PRIO_MAX - 1;
4603
4604 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4605 skb->priority);
02b20f0b
LAD
4606 }
4607
4608 if (hci_conn_num(hdev, type) == num)
4609 break;
4610 }
bf4c6325
GP
4611
4612 rcu_read_unlock();
4613
02b20f0b
LAD
4614}
4615
b71d385a
AE
4616static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4617{
4618 /* Calculate count of blocks used by this packet */
4619 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4620}
4621
6039aa73 4622static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4623{
1da177e4
LT
4624 if (!test_bit(HCI_RAW, &hdev->flags)) {
4625 /* ACL tx timeout must be longer than maximum
4626 * link supervision timeout (40.9 seconds) */
63d2bc1b 4627 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4628 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4629 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4630 }
63d2bc1b 4631}
1da177e4 4632
6039aa73 4633static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4634{
4635 unsigned int cnt = hdev->acl_cnt;
4636 struct hci_chan *chan;
4637 struct sk_buff *skb;
4638 int quote;
4639
4640 __check_timeout(hdev, cnt);
04837f64 4641
73d80deb 4642 while (hdev->acl_cnt &&
a8c5fb1a 4643 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4644 u32 priority = (skb_peek(&chan->data_q))->priority;
4645 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4646 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4647 skb->len, skb->priority);
73d80deb 4648
ec1cce24
LAD
4649 /* Stop if priority has changed */
4650 if (skb->priority < priority)
4651 break;
4652
4653 skb = skb_dequeue(&chan->data_q);
4654
73d80deb 4655 hci_conn_enter_active_mode(chan->conn,
04124681 4656 bt_cb(skb)->force_active);
04837f64 4657
57d17d70 4658 hci_send_frame(hdev, skb);
1da177e4
LT
4659 hdev->acl_last_tx = jiffies;
4660
4661 hdev->acl_cnt--;
73d80deb
LAD
4662 chan->sent++;
4663 chan->conn->sent++;
1da177e4
LT
4664 }
4665 }
02b20f0b
LAD
4666
4667 if (cnt != hdev->acl_cnt)
4668 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4669}
4670
6039aa73 4671static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4672{
63d2bc1b 4673 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4674 struct hci_chan *chan;
4675 struct sk_buff *skb;
4676 int quote;
bd1eb66b 4677 u8 type;
b71d385a 4678
63d2bc1b 4679 __check_timeout(hdev, cnt);
b71d385a 4680
bd1eb66b
AE
4681 BT_DBG("%s", hdev->name);
4682
4683 if (hdev->dev_type == HCI_AMP)
4684 type = AMP_LINK;
4685 else
4686 type = ACL_LINK;
4687
b71d385a 4688 while (hdev->block_cnt > 0 &&
bd1eb66b 4689 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4690 u32 priority = (skb_peek(&chan->data_q))->priority;
4691 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4692 int blocks;
4693
4694 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4695 skb->len, skb->priority);
b71d385a
AE
4696
4697 /* Stop if priority has changed */
4698 if (skb->priority < priority)
4699 break;
4700
4701 skb = skb_dequeue(&chan->data_q);
4702
4703 blocks = __get_blocks(hdev, skb);
4704 if (blocks > hdev->block_cnt)
4705 return;
4706
4707 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4708 bt_cb(skb)->force_active);
b71d385a 4709
57d17d70 4710 hci_send_frame(hdev, skb);
b71d385a
AE
4711 hdev->acl_last_tx = jiffies;
4712
4713 hdev->block_cnt -= blocks;
4714 quote -= blocks;
4715
4716 chan->sent += blocks;
4717 chan->conn->sent += blocks;
4718 }
4719 }
4720
4721 if (cnt != hdev->block_cnt)
bd1eb66b 4722 hci_prio_recalculate(hdev, type);
b71d385a
AE
4723}
4724
6039aa73 4725static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4726{
4727 BT_DBG("%s", hdev->name);
4728
bd1eb66b
AE
4729 /* No ACL link over BR/EDR controller */
4730 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4731 return;
4732
4733 /* No AMP link over AMP controller */
4734 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4735 return;
4736
4737 switch (hdev->flow_ctl_mode) {
4738 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4739 hci_sched_acl_pkt(hdev);
4740 break;
4741
4742 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4743 hci_sched_acl_blk(hdev);
4744 break;
4745 }
4746}
4747
1da177e4 4748/* Schedule SCO */
6039aa73 4749static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4750{
4751 struct hci_conn *conn;
4752 struct sk_buff *skb;
4753 int quote;
4754
4755 BT_DBG("%s", hdev->name);
4756
52087a79
LAD
4757 if (!hci_conn_num(hdev, SCO_LINK))
4758 return;
4759
1da177e4
LT
4760 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4761 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4762 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4763 hci_send_frame(hdev, skb);
1da177e4
LT
4764
4765 conn->sent++;
4766 if (conn->sent == ~0)
4767 conn->sent = 0;
4768 }
4769 }
4770}
4771
6039aa73 4772static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4773{
4774 struct hci_conn *conn;
4775 struct sk_buff *skb;
4776 int quote;
4777
4778 BT_DBG("%s", hdev->name);
4779
52087a79
LAD
4780 if (!hci_conn_num(hdev, ESCO_LINK))
4781 return;
4782
8fc9ced3
GP
4783 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4784 &quote))) {
b6a0dc82
MH
4785 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4786 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4787 hci_send_frame(hdev, skb);
b6a0dc82
MH
4788
4789 conn->sent++;
4790 if (conn->sent == ~0)
4791 conn->sent = 0;
4792 }
4793 }
4794}
4795
6039aa73 4796static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4797{
73d80deb 4798 struct hci_chan *chan;
6ed58ec5 4799 struct sk_buff *skb;
02b20f0b 4800 int quote, cnt, tmp;
6ed58ec5
VT
4801
4802 BT_DBG("%s", hdev->name);
4803
52087a79
LAD
4804 if (!hci_conn_num(hdev, LE_LINK))
4805 return;
4806
6ed58ec5
VT
4807 if (!test_bit(HCI_RAW, &hdev->flags)) {
4808 /* LE tx timeout must be longer than maximum
4809 * link supervision timeout (40.9 seconds) */
bae1f5d9 4810 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4811 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4812 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4813 }
4814
4815 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4816 tmp = cnt;
73d80deb 4817 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4818 u32 priority = (skb_peek(&chan->data_q))->priority;
4819 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4820 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4821 skb->len, skb->priority);
6ed58ec5 4822
ec1cce24
LAD
4823 /* Stop if priority has changed */
4824 if (skb->priority < priority)
4825 break;
4826
4827 skb = skb_dequeue(&chan->data_q);
4828
57d17d70 4829 hci_send_frame(hdev, skb);
6ed58ec5
VT
4830 hdev->le_last_tx = jiffies;
4831
4832 cnt--;
73d80deb
LAD
4833 chan->sent++;
4834 chan->conn->sent++;
6ed58ec5
VT
4835 }
4836 }
73d80deb 4837
6ed58ec5
VT
4838 if (hdev->le_pkts)
4839 hdev->le_cnt = cnt;
4840 else
4841 hdev->acl_cnt = cnt;
02b20f0b
LAD
4842
4843 if (cnt != tmp)
4844 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4845}
4846
3eff45ea 4847static void hci_tx_work(struct work_struct *work)
1da177e4 4848{
3eff45ea 4849 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4850 struct sk_buff *skb;
4851
6ed58ec5 4852 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4853 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4854
52de599e
MH
4855 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4856 /* Schedule queues and send stuff to HCI driver */
4857 hci_sched_acl(hdev);
4858 hci_sched_sco(hdev);
4859 hci_sched_esco(hdev);
4860 hci_sched_le(hdev);
4861 }
6ed58ec5 4862
1da177e4
LT
4863 /* Send next queued raw (unknown type) packet */
4864 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4865 hci_send_frame(hdev, skb);
1da177e4
LT
4866}
4867
25985edc 4868/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4869
4870/* ACL data packet */
6039aa73 4871static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4872{
4873 struct hci_acl_hdr *hdr = (void *) skb->data;
4874 struct hci_conn *conn;
4875 __u16 handle, flags;
4876
4877 skb_pull(skb, HCI_ACL_HDR_SIZE);
4878
4879 handle = __le16_to_cpu(hdr->handle);
4880 flags = hci_flags(handle);
4881 handle = hci_handle(handle);
4882
f0e09510 4883 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4884 handle, flags);
1da177e4
LT
4885
4886 hdev->stat.acl_rx++;
4887
4888 hci_dev_lock(hdev);
4889 conn = hci_conn_hash_lookup_handle(hdev, handle);
4890 hci_dev_unlock(hdev);
8e87d142 4891
1da177e4 4892 if (conn) {
65983fc7 4893 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4894
1da177e4 4895 /* Send to upper protocol */
686ebf28
UF
4896 l2cap_recv_acldata(conn, skb, flags);
4897 return;
1da177e4 4898 } else {
8e87d142 4899 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4900 hdev->name, handle);
1da177e4
LT
4901 }
4902
4903 kfree_skb(skb);
4904}
4905
4906/* SCO data packet */
6039aa73 4907static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4908{
4909 struct hci_sco_hdr *hdr = (void *) skb->data;
4910 struct hci_conn *conn;
4911 __u16 handle;
4912
4913 skb_pull(skb, HCI_SCO_HDR_SIZE);
4914
4915 handle = __le16_to_cpu(hdr->handle);
4916
f0e09510 4917 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4918
4919 hdev->stat.sco_rx++;
4920
4921 hci_dev_lock(hdev);
4922 conn = hci_conn_hash_lookup_handle(hdev, handle);
4923 hci_dev_unlock(hdev);
4924
4925 if (conn) {
1da177e4 4926 /* Send to upper protocol */
686ebf28
UF
4927 sco_recv_scodata(conn, skb);
4928 return;
1da177e4 4929 } else {
8e87d142 4930 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4931 hdev->name, handle);
1da177e4
LT
4932 }
4933
4934 kfree_skb(skb);
4935}
4936
9238f36a
JH
4937static bool hci_req_is_complete(struct hci_dev *hdev)
4938{
4939 struct sk_buff *skb;
4940
4941 skb = skb_peek(&hdev->cmd_q);
4942 if (!skb)
4943 return true;
4944
4945 return bt_cb(skb)->req.start;
4946}
4947
42c6b129
JH
4948static void hci_resend_last(struct hci_dev *hdev)
4949{
4950 struct hci_command_hdr *sent;
4951 struct sk_buff *skb;
4952 u16 opcode;
4953
4954 if (!hdev->sent_cmd)
4955 return;
4956
4957 sent = (void *) hdev->sent_cmd->data;
4958 opcode = __le16_to_cpu(sent->opcode);
4959 if (opcode == HCI_OP_RESET)
4960 return;
4961
4962 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4963 if (!skb)
4964 return;
4965
4966 skb_queue_head(&hdev->cmd_q, skb);
4967 queue_work(hdev->workqueue, &hdev->cmd_work);
4968}
4969
9238f36a
JH
4970void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4971{
4972 hci_req_complete_t req_complete = NULL;
4973 struct sk_buff *skb;
4974 unsigned long flags;
4975
4976 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4977
42c6b129
JH
4978 /* If the completed command doesn't match the last one that was
4979 * sent we need to do special handling of it.
9238f36a 4980 */
42c6b129
JH
4981 if (!hci_sent_cmd_data(hdev, opcode)) {
4982 /* Some CSR based controllers generate a spontaneous
4983 * reset complete event during init and any pending
4984 * command will never be completed. In such a case we
4985 * need to resend whatever was the last sent
4986 * command.
4987 */
4988 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4989 hci_resend_last(hdev);
4990
9238f36a 4991 return;
42c6b129 4992 }
9238f36a
JH
4993
4994 /* If the command succeeded and there's still more commands in
4995 * this request the request is not yet complete.
4996 */
4997 if (!status && !hci_req_is_complete(hdev))
4998 return;
4999
5000 /* If this was the last command in a request the complete
5001 * callback would be found in hdev->sent_cmd instead of the
5002 * command queue (hdev->cmd_q).
5003 */
5004 if (hdev->sent_cmd) {
5005 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5006
5007 if (req_complete) {
5008 /* We must set the complete callback to NULL to
5009 * avoid calling the callback more than once if
5010 * this function gets called again.
5011 */
5012 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5013
9238f36a 5014 goto call_complete;
53e21fbc 5015 }
9238f36a
JH
5016 }
5017
5018 /* Remove all pending commands belonging to this request */
5019 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5020 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5021 if (bt_cb(skb)->req.start) {
5022 __skb_queue_head(&hdev->cmd_q, skb);
5023 break;
5024 }
5025
5026 req_complete = bt_cb(skb)->req.complete;
5027 kfree_skb(skb);
5028 }
5029 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5030
5031call_complete:
5032 if (req_complete)
5033 req_complete(hdev, status);
5034}
5035
b78752cc 5036static void hci_rx_work(struct work_struct *work)
1da177e4 5037{
b78752cc 5038 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5039 struct sk_buff *skb;
5040
5041 BT_DBG("%s", hdev->name);
5042
1da177e4 5043 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5044 /* Send copy to monitor */
5045 hci_send_to_monitor(hdev, skb);
5046
1da177e4
LT
5047 if (atomic_read(&hdev->promisc)) {
5048 /* Send copy to the sockets */
470fe1b5 5049 hci_send_to_sock(hdev, skb);
1da177e4
LT
5050 }
5051
0736cfa8
MH
5052 if (test_bit(HCI_RAW, &hdev->flags) ||
5053 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5054 kfree_skb(skb);
5055 continue;
5056 }
5057
5058 if (test_bit(HCI_INIT, &hdev->flags)) {
5059 /* Don't process data packets in this states. */
0d48d939 5060 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5061 case HCI_ACLDATA_PKT:
5062 case HCI_SCODATA_PKT:
5063 kfree_skb(skb);
5064 continue;
3ff50b79 5065 }
1da177e4
LT
5066 }
5067
5068 /* Process frame */
0d48d939 5069 switch (bt_cb(skb)->pkt_type) {
1da177e4 5070 case HCI_EVENT_PKT:
b78752cc 5071 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5072 hci_event_packet(hdev, skb);
5073 break;
5074
5075 case HCI_ACLDATA_PKT:
5076 BT_DBG("%s ACL data packet", hdev->name);
5077 hci_acldata_packet(hdev, skb);
5078 break;
5079
5080 case HCI_SCODATA_PKT:
5081 BT_DBG("%s SCO data packet", hdev->name);
5082 hci_scodata_packet(hdev, skb);
5083 break;
5084
5085 default:
5086 kfree_skb(skb);
5087 break;
5088 }
5089 }
1da177e4
LT
5090}
5091
c347b765 5092static void hci_cmd_work(struct work_struct *work)
1da177e4 5093{
c347b765 5094 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5095 struct sk_buff *skb;
5096
2104786b
AE
5097 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5098 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5099
1da177e4 5100 /* Send queued commands */
5a08ecce
AE
5101 if (atomic_read(&hdev->cmd_cnt)) {
5102 skb = skb_dequeue(&hdev->cmd_q);
5103 if (!skb)
5104 return;
5105
7585b97a 5106 kfree_skb(hdev->sent_cmd);
1da177e4 5107
a675d7f1 5108 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5109 if (hdev->sent_cmd) {
1da177e4 5110 atomic_dec(&hdev->cmd_cnt);
57d17d70 5111 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
5112 if (test_bit(HCI_RESET, &hdev->flags))
5113 del_timer(&hdev->cmd_timer);
5114 else
5115 mod_timer(&hdev->cmd_timer,
5f246e89 5116 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
5117 } else {
5118 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5119 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5120 }
5121 }
5122}
b1efcc28
AG
5123
5124void hci_req_add_le_scan_disable(struct hci_request *req)
5125{
5126 struct hci_cp_le_set_scan_enable cp;
5127
5128 memset(&cp, 0, sizeof(cp));
5129 cp.enable = LE_SCAN_DISABLE;
5130 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5131}
a4790dbd 5132
8ef30fd3
AG
5133void hci_req_add_le_passive_scan(struct hci_request *req)
5134{
5135 struct hci_cp_le_set_scan_param param_cp;
5136 struct hci_cp_le_set_scan_enable enable_cp;
5137 struct hci_dev *hdev = req->hdev;
5138 u8 own_addr_type;
5139
5140 /* Set require_privacy to true to avoid identification from
5141 * unknown peer devices. Since this is passive scanning, no
5142 * SCAN_REQ using the local identity should be sent. Mandating
5143 * privacy is just an extra precaution.
5144 */
5145 if (hci_update_random_address(req, true, &own_addr_type))
5146 return;
5147
5148 memset(&param_cp, 0, sizeof(param_cp));
5149 param_cp.type = LE_SCAN_PASSIVE;
5150 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5151 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5152 param_cp.own_address_type = own_addr_type;
5153 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5154 &param_cp);
5155
5156 memset(&enable_cp, 0, sizeof(enable_cp));
5157 enable_cp.enable = LE_SCAN_ENABLE;
5158 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5159 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5160 &enable_cp);
5161}
5162
a4790dbd
AG
5163static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5164{
5165 if (status)
5166 BT_DBG("HCI request failed to update background scanning: "
5167 "status 0x%2.2x", status);
5168}
5169
5170/* This function controls the background scanning based on hdev->pend_le_conns
5171 * list. If there are pending LE connection we start the background scanning,
5172 * otherwise we stop it.
5173 *
5174 * This function requires the caller holds hdev->lock.
5175 */
5176void hci_update_background_scan(struct hci_dev *hdev)
5177{
a4790dbd
AG
5178 struct hci_request req;
5179 struct hci_conn *conn;
5180 int err;
5181
5182 hci_req_init(&req, hdev);
5183
5184 if (list_empty(&hdev->pend_le_conns)) {
5185 /* If there is no pending LE connections, we should stop
5186 * the background scanning.
5187 */
5188
5189 /* If controller is not scanning we are done. */
5190 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5191 return;
5192
5193 hci_req_add_le_scan_disable(&req);
5194
5195 BT_DBG("%s stopping background scanning", hdev->name);
5196 } else {
a4790dbd
AG
5197 /* If there is at least one pending LE connection, we should
5198 * keep the background scan running.
5199 */
5200
5201 /* If controller is already scanning we are done. */
5202 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5203 return;
5204
5205 /* If controller is connecting, we should not start scanning
5206 * since some controllers are not able to scan and connect at
5207 * the same time.
5208 */
5209 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5210 if (conn)
5211 return;
5212
8ef30fd3 5213 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5214
5215 BT_DBG("%s starting background scanning", hdev->name);
5216 }
5217
5218 err = hci_req_run(&req, update_background_scan_complete);
5219 if (err)
5220 BT_ERR("Failed to run HCI request: err %d", err);
5221}