]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Add support for Get Clock Info mgmt command
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
1da177e4 38
970c4e46
JH
39#include "smp.h"
40
b78752cc 41static void hci_rx_work(struct work_struct *work);
c347b765 42static void hci_cmd_work(struct work_struct *work);
3eff45ea 43static void hci_tx_work(struct work_struct *work);
1da177e4 44
1da177e4
LT
45/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
3df92b31
SL
53/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
1da177e4
LT
56/* ---- HCI notifications ---- */
57
6516455d 58static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 59{
040030ef 60 hci_sock_dev_event(hdev, event);
1da177e4
LT
61}
62
baf27f6e
MH
63/* ---- HCI debugfs entries ---- */
64
4b4148e9
MH
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
111902f7 71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
111902f7 97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
98 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
111902f7 118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
dfb826a8
MH
130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
cfbb2b5b
MH
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
70afe0b8
MH
168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
47219839
MH
193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
200 u8 i, val[16];
201
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
208
209 seq_printf(f, "%pUb\n", val);
47219839
MH
210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
baf27f6e
MH
228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
02d08d15
MH
264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
babdbb3c
MH
292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
041000b9
MH
316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
ebd1e33b
MH
330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
5afeac14
MH
355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
111902f7 361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
111902f7 386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
387 return -EALREADY;
388
111902f7 389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
134c2a89
MH
401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
2bfa3531
MH
419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
2be48b65 427 hdev->idle_timeout = val;
2bfa3531
MH
428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
c982b2ea
JH
447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
2bfa3531
MH
478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
2be48b65 486 hdev->sniff_min_interval = val;
2bfa3531
MH
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
2be48b65 514 hdev->sniff_max_interval = val;
2bfa3531
MH
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
31ad1691
AK
534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
ac345813
MH
590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
a1f4c318 593 bdaddr_t addr;
ac345813
MH
594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
a1f4c318 598 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 599
a1f4c318 600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 601 16, hdev->irk, &hdev->rpa);
ac345813
MH
602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
7a4cd51d
MH
620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
e7b8fc92
MH
643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
b32bba6c
MH
666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
92202185 669{
b32bba6c
MH
670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
92202185 672
111902f7 673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
677}
678
b32bba6c
MH
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
92202185 682{
b32bba6c
MH
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
92202185 687
b32bba6c
MH
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
92202185 690
b32bba6c
MH
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
696 return -EINVAL;
697
111902f7 698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
699 return -EALREADY;
700
111902f7 701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
702
703 return count;
92202185
MH
704}
705
b32bba6c
MH
706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
92202185 712
d2ab0ac1
MH
713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
3698d704
MH
738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
8f8625cd
MH
768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
f813f1be 774 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 779 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
4e70c7e7
MH
798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
2be48b65 806 hdev->le_conn_min_interval = val;
4e70c7e7
MH
807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
2be48b65 834 hdev->le_conn_max_interval = val;
4e70c7e7
MH
835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
3f959d46
MH
854static int adv_channel_map_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val < 0x01 || val > 0x07)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int adv_channel_map_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
881
7d474e06
AG
882static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883{
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
886
887 hci_dev_lock(hdev);
888
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891 p->auto_connect);
892 }
893
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899static int le_auto_conn_open(struct inode *inode, struct file *file)
900{
901 return single_open(file, le_auto_conn_show, inode->i_private);
902}
903
904static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
906{
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
909 u8 auto_connect = 0;
910 bdaddr_t addr;
911 u8 addr_type;
912 char *buf;
913 int err = 0;
914 int n;
915
916 /* Don't allow partial write */
917 if (*offset != 0)
918 return -EINVAL;
919
920 if (count < 3)
921 return -EINVAL;
922
4408dd15
AG
923 buf = memdup_user(data, count);
924 if (IS_ERR(buf))
925 return PTR_ERR(buf);
7d474e06
AG
926
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
931 &auto_connect);
932
933 if (n < 7) {
934 err = -EINVAL;
935 goto done;
936 }
937
938 hci_dev_lock(hdev);
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
943
944 if (err)
945 goto done;
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
950
951 if (n < 7) {
952 err = -EINVAL;
953 goto done;
954 }
955
956 hci_dev_lock(hdev);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
960 hci_dev_lock(hdev);
961 hci_conn_params_clear(hdev);
962 hci_pend_le_conns_clear(hdev);
963 hci_update_background_scan(hdev);
964 hci_dev_unlock(hdev);
965 } else {
966 err = -EINVAL;
967 }
968
969done:
970 kfree(buf);
971
972 if (err)
973 return err;
974 else
975 return count;
976}
977
978static const struct file_operations le_auto_conn_fops = {
979 .open = le_auto_conn_open,
980 .read = seq_read,
981 .write = le_auto_conn_write,
982 .llseek = seq_lseek,
983 .release = single_release,
984};
985
1da177e4
LT
986/* ---- HCI requests ---- */
987
42c6b129 988static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 989{
42c6b129 990 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
991
992 if (hdev->req_status == HCI_REQ_PEND) {
993 hdev->req_result = result;
994 hdev->req_status = HCI_REQ_DONE;
995 wake_up_interruptible(&hdev->req_wait_q);
996 }
997}
998
999static void hci_req_cancel(struct hci_dev *hdev, int err)
1000{
1001 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1002
1003 if (hdev->req_status == HCI_REQ_PEND) {
1004 hdev->req_result = err;
1005 hdev->req_status = HCI_REQ_CANCELED;
1006 wake_up_interruptible(&hdev->req_wait_q);
1007 }
1008}
1009
77a63e0a
FW
1010static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1011 u8 event)
75e84b7c
JH
1012{
1013 struct hci_ev_cmd_complete *ev;
1014 struct hci_event_hdr *hdr;
1015 struct sk_buff *skb;
1016
1017 hci_dev_lock(hdev);
1018
1019 skb = hdev->recv_evt;
1020 hdev->recv_evt = NULL;
1021
1022 hci_dev_unlock(hdev);
1023
1024 if (!skb)
1025 return ERR_PTR(-ENODATA);
1026
1027 if (skb->len < sizeof(*hdr)) {
1028 BT_ERR("Too short HCI event");
1029 goto failed;
1030 }
1031
1032 hdr = (void *) skb->data;
1033 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1034
7b1abbbe
JH
1035 if (event) {
1036 if (hdr->evt != event)
1037 goto failed;
1038 return skb;
1039 }
1040
75e84b7c
JH
1041 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1042 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1043 goto failed;
1044 }
1045
1046 if (skb->len < sizeof(*ev)) {
1047 BT_ERR("Too short cmd_complete event");
1048 goto failed;
1049 }
1050
1051 ev = (void *) skb->data;
1052 skb_pull(skb, sizeof(*ev));
1053
1054 if (opcode == __le16_to_cpu(ev->opcode))
1055 return skb;
1056
1057 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1058 __le16_to_cpu(ev->opcode));
1059
1060failed:
1061 kfree_skb(skb);
1062 return ERR_PTR(-ENODATA);
1063}
1064
7b1abbbe 1065struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1066 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1067{
1068 DECLARE_WAITQUEUE(wait, current);
1069 struct hci_request req;
1070 int err = 0;
1071
1072 BT_DBG("%s", hdev->name);
1073
1074 hci_req_init(&req, hdev);
1075
7b1abbbe 1076 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1077
1078 hdev->req_status = HCI_REQ_PEND;
1079
1080 err = hci_req_run(&req, hci_req_sync_complete);
1081 if (err < 0)
1082 return ERR_PTR(err);
1083
1084 add_wait_queue(&hdev->req_wait_q, &wait);
1085 set_current_state(TASK_INTERRUPTIBLE);
1086
1087 schedule_timeout(timeout);
1088
1089 remove_wait_queue(&hdev->req_wait_q, &wait);
1090
1091 if (signal_pending(current))
1092 return ERR_PTR(-EINTR);
1093
1094 switch (hdev->req_status) {
1095 case HCI_REQ_DONE:
1096 err = -bt_to_errno(hdev->req_result);
1097 break;
1098
1099 case HCI_REQ_CANCELED:
1100 err = -hdev->req_result;
1101 break;
1102
1103 default:
1104 err = -ETIMEDOUT;
1105 break;
1106 }
1107
1108 hdev->req_status = hdev->req_result = 0;
1109
1110 BT_DBG("%s end: err %d", hdev->name, err);
1111
1112 if (err < 0)
1113 return ERR_PTR(err);
1114
7b1abbbe
JH
1115 return hci_get_cmd_complete(hdev, opcode, event);
1116}
1117EXPORT_SYMBOL(__hci_cmd_sync_ev);
1118
1119struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1120 const void *param, u32 timeout)
7b1abbbe
JH
1121{
1122 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1123}
1124EXPORT_SYMBOL(__hci_cmd_sync);
1125
1da177e4 1126/* Execute request and wait for completion. */
01178cd4 1127static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1128 void (*func)(struct hci_request *req,
1129 unsigned long opt),
01178cd4 1130 unsigned long opt, __u32 timeout)
1da177e4 1131{
42c6b129 1132 struct hci_request req;
1da177e4
LT
1133 DECLARE_WAITQUEUE(wait, current);
1134 int err = 0;
1135
1136 BT_DBG("%s start", hdev->name);
1137
42c6b129
JH
1138 hci_req_init(&req, hdev);
1139
1da177e4
LT
1140 hdev->req_status = HCI_REQ_PEND;
1141
42c6b129 1142 func(&req, opt);
53cce22d 1143
42c6b129
JH
1144 err = hci_req_run(&req, hci_req_sync_complete);
1145 if (err < 0) {
53cce22d 1146 hdev->req_status = 0;
920c8300
AG
1147
1148 /* ENODATA means the HCI request command queue is empty.
1149 * This can happen when a request with conditionals doesn't
1150 * trigger any commands to be sent. This is normal behavior
1151 * and should not trigger an error return.
42c6b129 1152 */
920c8300
AG
1153 if (err == -ENODATA)
1154 return 0;
1155
1156 return err;
53cce22d
JH
1157 }
1158
bc4445c7
AG
1159 add_wait_queue(&hdev->req_wait_q, &wait);
1160 set_current_state(TASK_INTERRUPTIBLE);
1161
1da177e4
LT
1162 schedule_timeout(timeout);
1163
1164 remove_wait_queue(&hdev->req_wait_q, &wait);
1165
1166 if (signal_pending(current))
1167 return -EINTR;
1168
1169 switch (hdev->req_status) {
1170 case HCI_REQ_DONE:
e175072f 1171 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1172 break;
1173
1174 case HCI_REQ_CANCELED:
1175 err = -hdev->req_result;
1176 break;
1177
1178 default:
1179 err = -ETIMEDOUT;
1180 break;
3ff50b79 1181 }
1da177e4 1182
a5040efa 1183 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1184
1185 BT_DBG("%s end: err %d", hdev->name, err);
1186
1187 return err;
1188}
1189
01178cd4 1190static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1191 void (*req)(struct hci_request *req,
1192 unsigned long opt),
01178cd4 1193 unsigned long opt, __u32 timeout)
1da177e4
LT
1194{
1195 int ret;
1196
7c6a329e
MH
1197 if (!test_bit(HCI_UP, &hdev->flags))
1198 return -ENETDOWN;
1199
1da177e4
LT
1200 /* Serialize all requests */
1201 hci_req_lock(hdev);
01178cd4 1202 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1203 hci_req_unlock(hdev);
1204
1205 return ret;
1206}
1207
42c6b129 1208static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1209{
42c6b129 1210 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1211
1212 /* Reset device */
42c6b129
JH
1213 set_bit(HCI_RESET, &req->hdev->flags);
1214 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1215}
1216
42c6b129 1217static void bredr_init(struct hci_request *req)
1da177e4 1218{
42c6b129 1219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1220
1da177e4 1221 /* Read Local Supported Features */
42c6b129 1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1223
1143e5a6 1224 /* Read Local Version */
42c6b129 1225 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1226
1227 /* Read BD Address */
42c6b129 1228 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1229}
1230
42c6b129 1231static void amp_init(struct hci_request *req)
e61ef499 1232{
42c6b129 1233 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1234
e61ef499 1235 /* Read Local Version */
42c6b129 1236 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1237
f6996cfe
MH
1238 /* Read Local Supported Commands */
1239 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1240
1241 /* Read Local Supported Features */
1242 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1243
6bcbc489 1244 /* Read Local AMP Info */
42c6b129 1245 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1246
1247 /* Read Data Blk size */
42c6b129 1248 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1249
f38ba941
MH
1250 /* Read Flow Control Mode */
1251 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1252
7528ca1c
MH
1253 /* Read Location Data */
1254 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1255}
1256
42c6b129 1257static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1258{
42c6b129 1259 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1260
1261 BT_DBG("%s %ld", hdev->name, opt);
1262
11778716
AE
1263 /* Reset */
1264 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1265 hci_reset_req(req, 0);
11778716 1266
e61ef499
AE
1267 switch (hdev->dev_type) {
1268 case HCI_BREDR:
42c6b129 1269 bredr_init(req);
e61ef499
AE
1270 break;
1271
1272 case HCI_AMP:
42c6b129 1273 amp_init(req);
e61ef499
AE
1274 break;
1275
1276 default:
1277 BT_ERR("Unknown device type %d", hdev->dev_type);
1278 break;
1279 }
e61ef499
AE
1280}
1281
42c6b129 1282static void bredr_setup(struct hci_request *req)
2177bab5 1283{
4ca048e3
MH
1284 struct hci_dev *hdev = req->hdev;
1285
2177bab5
JH
1286 __le16 param;
1287 __u8 flt_type;
1288
1289 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1290 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1291
1292 /* Read Class of Device */
42c6b129 1293 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1294
1295 /* Read Local Name */
42c6b129 1296 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1297
1298 /* Read Voice Setting */
42c6b129 1299 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1300
b4cb9fb2
MH
1301 /* Read Number of Supported IAC */
1302 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1303
4b836f39
MH
1304 /* Read Current IAC LAP */
1305 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1306
2177bab5
JH
1307 /* Clear Event Filters */
1308 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1309 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1310
1311 /* Connection accept timeout ~20 secs */
dcf4adbf 1312 param = cpu_to_le16(0x7d00);
42c6b129 1313 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1314
4ca048e3
MH
1315 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1316 * but it does not support page scan related HCI commands.
1317 */
1318 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1319 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1320 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1321 }
2177bab5
JH
1322}
1323
42c6b129 1324static void le_setup(struct hci_request *req)
2177bab5 1325{
c73eee91
JH
1326 struct hci_dev *hdev = req->hdev;
1327
2177bab5 1328 /* Read LE Buffer Size */
42c6b129 1329 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1330
1331 /* Read LE Local Supported Features */
42c6b129 1332 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1333
747d3f03
MH
1334 /* Read LE Supported States */
1335 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1336
2177bab5 1337 /* Read LE Advertising Channel TX Power */
42c6b129 1338 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1339
1340 /* Read LE White List Size */
42c6b129 1341 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1342
747d3f03
MH
1343 /* Clear LE White List */
1344 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1345
1346 /* LE-only controllers have LE implicitly enabled */
1347 if (!lmp_bredr_capable(hdev))
1348 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1349}
1350
1351static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1352{
1353 if (lmp_ext_inq_capable(hdev))
1354 return 0x02;
1355
1356 if (lmp_inq_rssi_capable(hdev))
1357 return 0x01;
1358
1359 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1360 hdev->lmp_subver == 0x0757)
1361 return 0x01;
1362
1363 if (hdev->manufacturer == 15) {
1364 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1365 return 0x01;
1366 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1367 return 0x01;
1368 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1369 return 0x01;
1370 }
1371
1372 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1373 hdev->lmp_subver == 0x1805)
1374 return 0x01;
1375
1376 return 0x00;
1377}
1378
42c6b129 1379static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1380{
1381 u8 mode;
1382
42c6b129 1383 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1384
42c6b129 1385 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1386}
1387
42c6b129 1388static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1389{
42c6b129
JH
1390 struct hci_dev *hdev = req->hdev;
1391
2177bab5
JH
1392 /* The second byte is 0xff instead of 0x9f (two reserved bits
1393 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1394 * command otherwise.
1395 */
1396 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1397
1398 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1399 * any event mask for pre 1.2 devices.
1400 */
1401 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1402 return;
1403
1404 if (lmp_bredr_capable(hdev)) {
1405 events[4] |= 0x01; /* Flow Specification Complete */
1406 events[4] |= 0x02; /* Inquiry Result with RSSI */
1407 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1408 events[5] |= 0x08; /* Synchronous Connection Complete */
1409 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1410 } else {
1411 /* Use a different default for LE-only devices */
1412 memset(events, 0, sizeof(events));
1413 events[0] |= 0x10; /* Disconnection Complete */
1414 events[0] |= 0x80; /* Encryption Change */
1415 events[1] |= 0x08; /* Read Remote Version Information Complete */
1416 events[1] |= 0x20; /* Command Complete */
1417 events[1] |= 0x40; /* Command Status */
1418 events[1] |= 0x80; /* Hardware Error */
1419 events[2] |= 0x04; /* Number of Completed Packets */
1420 events[3] |= 0x02; /* Data Buffer Overflow */
1421 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1422 }
1423
1424 if (lmp_inq_rssi_capable(hdev))
1425 events[4] |= 0x02; /* Inquiry Result with RSSI */
1426
1427 if (lmp_sniffsubr_capable(hdev))
1428 events[5] |= 0x20; /* Sniff Subrating */
1429
1430 if (lmp_pause_enc_capable(hdev))
1431 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1432
1433 if (lmp_ext_inq_capable(hdev))
1434 events[5] |= 0x40; /* Extended Inquiry Result */
1435
1436 if (lmp_no_flush_capable(hdev))
1437 events[7] |= 0x01; /* Enhanced Flush Complete */
1438
1439 if (lmp_lsto_capable(hdev))
1440 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1441
1442 if (lmp_ssp_capable(hdev)) {
1443 events[6] |= 0x01; /* IO Capability Request */
1444 events[6] |= 0x02; /* IO Capability Response */
1445 events[6] |= 0x04; /* User Confirmation Request */
1446 events[6] |= 0x08; /* User Passkey Request */
1447 events[6] |= 0x10; /* Remote OOB Data Request */
1448 events[6] |= 0x20; /* Simple Pairing Complete */
1449 events[7] |= 0x04; /* User Passkey Notification */
1450 events[7] |= 0x08; /* Keypress Notification */
1451 events[7] |= 0x10; /* Remote Host Supported
1452 * Features Notification
1453 */
1454 }
1455
1456 if (lmp_le_capable(hdev))
1457 events[7] |= 0x20; /* LE Meta-Event */
1458
42c6b129 1459 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1460
1461 if (lmp_le_capable(hdev)) {
1462 memset(events, 0, sizeof(events));
1463 events[0] = 0x1f;
42c6b129
JH
1464 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1465 sizeof(events), events);
2177bab5
JH
1466 }
1467}
1468
42c6b129 1469static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1470{
42c6b129
JH
1471 struct hci_dev *hdev = req->hdev;
1472
2177bab5 1473 if (lmp_bredr_capable(hdev))
42c6b129 1474 bredr_setup(req);
56f87901
JH
1475 else
1476 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1477
1478 if (lmp_le_capable(hdev))
42c6b129 1479 le_setup(req);
2177bab5 1480
42c6b129 1481 hci_setup_event_mask(req);
2177bab5 1482
3f8e2d75
JH
1483 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1484 * local supported commands HCI command.
1485 */
1486 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1487 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1488
1489 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1490 /* When SSP is available, then the host features page
1491 * should also be available as well. However some
1492 * controllers list the max_page as 0 as long as SSP
1493 * has not been enabled. To achieve proper debugging
1494 * output, force the minimum max_page to 1 at least.
1495 */
1496 hdev->max_page = 0x01;
1497
2177bab5
JH
1498 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1499 u8 mode = 0x01;
42c6b129
JH
1500 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1501 sizeof(mode), &mode);
2177bab5
JH
1502 } else {
1503 struct hci_cp_write_eir cp;
1504
1505 memset(hdev->eir, 0, sizeof(hdev->eir));
1506 memset(&cp, 0, sizeof(cp));
1507
42c6b129 1508 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1509 }
1510 }
1511
1512 if (lmp_inq_rssi_capable(hdev))
42c6b129 1513 hci_setup_inquiry_mode(req);
2177bab5
JH
1514
1515 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1516 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1517
1518 if (lmp_ext_feat_capable(hdev)) {
1519 struct hci_cp_read_local_ext_features cp;
1520
1521 cp.page = 0x01;
42c6b129
JH
1522 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1523 sizeof(cp), &cp);
2177bab5
JH
1524 }
1525
1526 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1527 u8 enable = 1;
42c6b129
JH
1528 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1529 &enable);
2177bab5
JH
1530 }
1531}
1532
42c6b129 1533static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1534{
42c6b129 1535 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1536 struct hci_cp_write_def_link_policy cp;
1537 u16 link_policy = 0;
1538
1539 if (lmp_rswitch_capable(hdev))
1540 link_policy |= HCI_LP_RSWITCH;
1541 if (lmp_hold_capable(hdev))
1542 link_policy |= HCI_LP_HOLD;
1543 if (lmp_sniff_capable(hdev))
1544 link_policy |= HCI_LP_SNIFF;
1545 if (lmp_park_capable(hdev))
1546 link_policy |= HCI_LP_PARK;
1547
1548 cp.policy = cpu_to_le16(link_policy);
42c6b129 1549 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1550}
1551
42c6b129 1552static void hci_set_le_support(struct hci_request *req)
2177bab5 1553{
42c6b129 1554 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1555 struct hci_cp_write_le_host_supported cp;
1556
c73eee91
JH
1557 /* LE-only devices do not support explicit enablement */
1558 if (!lmp_bredr_capable(hdev))
1559 return;
1560
2177bab5
JH
1561 memset(&cp, 0, sizeof(cp));
1562
1563 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1564 cp.le = 0x01;
1565 cp.simul = lmp_le_br_capable(hdev);
1566 }
1567
1568 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1569 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1570 &cp);
2177bab5
JH
1571}
1572
d62e6d67
JH
1573static void hci_set_event_mask_page_2(struct hci_request *req)
1574{
1575 struct hci_dev *hdev = req->hdev;
1576 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1577
1578 /* If Connectionless Slave Broadcast master role is supported
1579 * enable all necessary events for it.
1580 */
53b834d2 1581 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1582 events[1] |= 0x40; /* Triggered Clock Capture */
1583 events[1] |= 0x80; /* Synchronization Train Complete */
1584 events[2] |= 0x10; /* Slave Page Response Timeout */
1585 events[2] |= 0x20; /* CSB Channel Map Change */
1586 }
1587
1588 /* If Connectionless Slave Broadcast slave role is supported
1589 * enable all necessary events for it.
1590 */
53b834d2 1591 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1592 events[2] |= 0x01; /* Synchronization Train Received */
1593 events[2] |= 0x02; /* CSB Receive */
1594 events[2] |= 0x04; /* CSB Timeout */
1595 events[2] |= 0x08; /* Truncated Page Complete */
1596 }
1597
40c59fcb
MH
1598 /* Enable Authenticated Payload Timeout Expired event if supported */
1599 if (lmp_ping_capable(hdev))
1600 events[2] |= 0x80;
1601
d62e6d67
JH
1602 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1603}
1604
42c6b129 1605static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1606{
42c6b129 1607 struct hci_dev *hdev = req->hdev;
d2c5d77f 1608 u8 p;
42c6b129 1609
b8f4e068
GP
1610 /* Some Broadcom based Bluetooth controllers do not support the
1611 * Delete Stored Link Key command. They are clearly indicating its
1612 * absence in the bit mask of supported commands.
1613 *
1614 * Check the supported commands and only if the the command is marked
1615 * as supported send it. If not supported assume that the controller
1616 * does not have actual support for stored link keys which makes this
1617 * command redundant anyway.
f9f462fa
MH
1618 *
1619 * Some controllers indicate that they support handling deleting
1620 * stored link keys, but they don't. The quirk lets a driver
1621 * just disable this command.
637b4cae 1622 */
f9f462fa
MH
1623 if (hdev->commands[6] & 0x80 &&
1624 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1625 struct hci_cp_delete_stored_link_key cp;
1626
1627 bacpy(&cp.bdaddr, BDADDR_ANY);
1628 cp.delete_all = 0x01;
1629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1630 sizeof(cp), &cp);
1631 }
1632
2177bab5 1633 if (hdev->commands[5] & 0x10)
42c6b129 1634 hci_setup_link_policy(req);
2177bab5 1635
7bf32048 1636 if (lmp_le_capable(hdev))
42c6b129 1637 hci_set_le_support(req);
d2c5d77f
JH
1638
1639 /* Read features beyond page 1 if available */
1640 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1641 struct hci_cp_read_local_ext_features cp;
1642
1643 cp.page = p;
1644 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1645 sizeof(cp), &cp);
1646 }
2177bab5
JH
1647}
1648
5d4e7e8d
JH
1649static void hci_init4_req(struct hci_request *req, unsigned long opt)
1650{
1651 struct hci_dev *hdev = req->hdev;
1652
d62e6d67
JH
1653 /* Set event mask page 2 if the HCI command for it is supported */
1654 if (hdev->commands[22] & 0x04)
1655 hci_set_event_mask_page_2(req);
1656
5d4e7e8d 1657 /* Check for Synchronization Train support */
53b834d2 1658 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1659 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1660
1661 /* Enable Secure Connections if supported and configured */
5afeac14 1662 if ((lmp_sc_capable(hdev) ||
111902f7 1663 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1664 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1665 u8 support = 0x01;
1666 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1667 sizeof(support), &support);
1668 }
5d4e7e8d
JH
1669}
1670
2177bab5
JH
1671static int __hci_init(struct hci_dev *hdev)
1672{
1673 int err;
1674
1675 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1676 if (err < 0)
1677 return err;
1678
4b4148e9
MH
1679 /* The Device Under Test (DUT) mode is special and available for
1680 * all controller types. So just create it early on.
1681 */
1682 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1683 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1684 &dut_mode_fops);
1685 }
1686
2177bab5
JH
1687 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1688 * BR/EDR/LE type controllers. AMP controllers only need the
1689 * first stage init.
1690 */
1691 if (hdev->dev_type != HCI_BREDR)
1692 return 0;
1693
1694 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1695 if (err < 0)
1696 return err;
1697
5d4e7e8d
JH
1698 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1699 if (err < 0)
1700 return err;
1701
baf27f6e
MH
1702 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1703 if (err < 0)
1704 return err;
1705
1706 /* Only create debugfs entries during the initial setup
1707 * phase and not every time the controller gets powered on.
1708 */
1709 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1710 return 0;
1711
dfb826a8
MH
1712 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1713 &features_fops);
ceeb3bc0
MH
1714 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1715 &hdev->manufacturer);
1716 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1717 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1718 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1719 &blacklist_fops);
47219839
MH
1720 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1721
31ad1691
AK
1722 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1723 &conn_info_min_age_fops);
1724 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1725 &conn_info_max_age_fops);
1726
baf27f6e
MH
1727 if (lmp_bredr_capable(hdev)) {
1728 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1729 hdev, &inquiry_cache_fops);
02d08d15
MH
1730 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1731 hdev, &link_keys_fops);
babdbb3c
MH
1732 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1733 hdev, &dev_class_fops);
041000b9
MH
1734 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1735 hdev, &voice_setting_fops);
baf27f6e
MH
1736 }
1737
06f5b778 1738 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1739 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1740 hdev, &auto_accept_delay_fops);
5afeac14
MH
1741 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1742 hdev, &force_sc_support_fops);
134c2a89
MH
1743 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1744 hdev, &sc_only_mode_fops);
06f5b778 1745 }
ebd1e33b 1746
2bfa3531
MH
1747 if (lmp_sniff_capable(hdev)) {
1748 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1749 hdev, &idle_timeout_fops);
1750 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1751 hdev, &sniff_min_interval_fops);
1752 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1753 hdev, &sniff_max_interval_fops);
1754 }
1755
d0f729b8 1756 if (lmp_le_capable(hdev)) {
ac345813
MH
1757 debugfs_create_file("identity", 0400, hdev->debugfs,
1758 hdev, &identity_fops);
1759 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1760 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1761 debugfs_create_file("random_address", 0444, hdev->debugfs,
1762 hdev, &random_address_fops);
b32bba6c
MH
1763 debugfs_create_file("static_address", 0444, hdev->debugfs,
1764 hdev, &static_address_fops);
1765
1766 /* For controllers with a public address, provide a debug
1767 * option to force the usage of the configured static
1768 * address. By default the public address is used.
1769 */
1770 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1771 debugfs_create_file("force_static_address", 0644,
1772 hdev->debugfs, hdev,
1773 &force_static_address_fops);
1774
d0f729b8
MH
1775 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1776 &hdev->le_white_list_size);
d2ab0ac1
MH
1777 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1778 &white_list_fops);
3698d704
MH
1779 debugfs_create_file("identity_resolving_keys", 0400,
1780 hdev->debugfs, hdev,
1781 &identity_resolving_keys_fops);
8f8625cd
MH
1782 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1783 hdev, &long_term_keys_fops);
4e70c7e7
MH
1784 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1785 hdev, &conn_min_interval_fops);
1786 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1787 hdev, &conn_max_interval_fops);
3f959d46
MH
1788 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1789 hdev, &adv_channel_map_fops);
7d474e06
AG
1790 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1791 &le_auto_conn_fops);
b9a7a61e
LR
1792 debugfs_create_u16("discov_interleaved_timeout", 0644,
1793 hdev->debugfs,
1794 &hdev->discov_interleaved_timeout);
d0f729b8 1795 }
e7b8fc92 1796
baf27f6e 1797 return 0;
2177bab5
JH
1798}
1799
42c6b129 1800static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1801{
1802 __u8 scan = opt;
1803
42c6b129 1804 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1805
1806 /* Inquiry and Page scans */
42c6b129 1807 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1808}
1809
42c6b129 1810static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1811{
1812 __u8 auth = opt;
1813
42c6b129 1814 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1815
1816 /* Authentication */
42c6b129 1817 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1818}
1819
42c6b129 1820static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1821{
1822 __u8 encrypt = opt;
1823
42c6b129 1824 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1825
e4e8e37c 1826 /* Encryption */
42c6b129 1827 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1828}
1829
42c6b129 1830static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1831{
1832 __le16 policy = cpu_to_le16(opt);
1833
42c6b129 1834 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1835
1836 /* Default link policy */
42c6b129 1837 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1838}
1839
8e87d142 1840/* Get HCI device by index.
1da177e4
LT
1841 * Device is held on return. */
1842struct hci_dev *hci_dev_get(int index)
1843{
8035ded4 1844 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1845
1846 BT_DBG("%d", index);
1847
1848 if (index < 0)
1849 return NULL;
1850
1851 read_lock(&hci_dev_list_lock);
8035ded4 1852 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1853 if (d->id == index) {
1854 hdev = hci_dev_hold(d);
1855 break;
1856 }
1857 }
1858 read_unlock(&hci_dev_list_lock);
1859 return hdev;
1860}
1da177e4
LT
1861
1862/* ---- Inquiry support ---- */
ff9ef578 1863
30dc78e1
JH
1864bool hci_discovery_active(struct hci_dev *hdev)
1865{
1866 struct discovery_state *discov = &hdev->discovery;
1867
6fbe195d 1868 switch (discov->state) {
343f935b 1869 case DISCOVERY_FINDING:
6fbe195d 1870 case DISCOVERY_RESOLVING:
30dc78e1
JH
1871 return true;
1872
6fbe195d
AG
1873 default:
1874 return false;
1875 }
30dc78e1
JH
1876}
1877
ff9ef578
JH
1878void hci_discovery_set_state(struct hci_dev *hdev, int state)
1879{
1880 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1881
1882 if (hdev->discovery.state == state)
1883 return;
1884
1885 switch (state) {
1886 case DISCOVERY_STOPPED:
c54c3860
AG
1887 hci_update_background_scan(hdev);
1888
7b99b659
AG
1889 if (hdev->discovery.state != DISCOVERY_STARTING)
1890 mgmt_discovering(hdev, 0);
ff9ef578
JH
1891 break;
1892 case DISCOVERY_STARTING:
1893 break;
343f935b 1894 case DISCOVERY_FINDING:
ff9ef578
JH
1895 mgmt_discovering(hdev, 1);
1896 break;
30dc78e1
JH
1897 case DISCOVERY_RESOLVING:
1898 break;
ff9ef578
JH
1899 case DISCOVERY_STOPPING:
1900 break;
1901 }
1902
1903 hdev->discovery.state = state;
1904}
1905
1f9b9a5d 1906void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1907{
30883512 1908 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1909 struct inquiry_entry *p, *n;
1da177e4 1910
561aafbc
JH
1911 list_for_each_entry_safe(p, n, &cache->all, all) {
1912 list_del(&p->all);
b57c1a56 1913 kfree(p);
1da177e4 1914 }
561aafbc
JH
1915
1916 INIT_LIST_HEAD(&cache->unknown);
1917 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1918}
1919
a8c5fb1a
GP
1920struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1921 bdaddr_t *bdaddr)
1da177e4 1922{
30883512 1923 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1924 struct inquiry_entry *e;
1925
6ed93dc6 1926 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1927
561aafbc
JH
1928 list_for_each_entry(e, &cache->all, all) {
1929 if (!bacmp(&e->data.bdaddr, bdaddr))
1930 return e;
1931 }
1932
1933 return NULL;
1934}
1935
1936struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1937 bdaddr_t *bdaddr)
561aafbc 1938{
30883512 1939 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1940 struct inquiry_entry *e;
1941
6ed93dc6 1942 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1943
1944 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1945 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1946 return e;
1947 }
1948
1949 return NULL;
1da177e4
LT
1950}
1951
30dc78e1 1952struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1953 bdaddr_t *bdaddr,
1954 int state)
30dc78e1
JH
1955{
1956 struct discovery_state *cache = &hdev->discovery;
1957 struct inquiry_entry *e;
1958
6ed93dc6 1959 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1960
1961 list_for_each_entry(e, &cache->resolve, list) {
1962 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1963 return e;
1964 if (!bacmp(&e->data.bdaddr, bdaddr))
1965 return e;
1966 }
1967
1968 return NULL;
1969}
1970
a3d4e20a 1971void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1972 struct inquiry_entry *ie)
a3d4e20a
JH
1973{
1974 struct discovery_state *cache = &hdev->discovery;
1975 struct list_head *pos = &cache->resolve;
1976 struct inquiry_entry *p;
1977
1978 list_del(&ie->list);
1979
1980 list_for_each_entry(p, &cache->resolve, list) {
1981 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1982 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1983 break;
1984 pos = &p->list;
1985 }
1986
1987 list_add(&ie->list, pos);
1988}
1989
3175405b 1990bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1991 bool name_known, bool *ssp)
1da177e4 1992{
30883512 1993 struct discovery_state *cache = &hdev->discovery;
70f23020 1994 struct inquiry_entry *ie;
1da177e4 1995
6ed93dc6 1996 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1997
2b2fec4d
SJ
1998 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1999
01735bbd 2000 *ssp = data->ssp_mode;
388fc8fa 2001
70f23020 2002 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2003 if (ie) {
8002d77c 2004 if (ie->data.ssp_mode)
388fc8fa
JH
2005 *ssp = true;
2006
a3d4e20a 2007 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2008 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2009 ie->data.rssi = data->rssi;
2010 hci_inquiry_cache_update_resolve(hdev, ie);
2011 }
2012
561aafbc 2013 goto update;
a3d4e20a 2014 }
561aafbc
JH
2015
2016 /* Entry not in the cache. Add new one. */
2017 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2018 if (!ie)
3175405b 2019 return false;
561aafbc
JH
2020
2021 list_add(&ie->all, &cache->all);
2022
2023 if (name_known) {
2024 ie->name_state = NAME_KNOWN;
2025 } else {
2026 ie->name_state = NAME_NOT_KNOWN;
2027 list_add(&ie->list, &cache->unknown);
2028 }
70f23020 2029
561aafbc
JH
2030update:
2031 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2032 ie->name_state != NAME_PENDING) {
561aafbc
JH
2033 ie->name_state = NAME_KNOWN;
2034 list_del(&ie->list);
1da177e4
LT
2035 }
2036
70f23020
AE
2037 memcpy(&ie->data, data, sizeof(*data));
2038 ie->timestamp = jiffies;
1da177e4 2039 cache->timestamp = jiffies;
3175405b
JH
2040
2041 if (ie->name_state == NAME_NOT_KNOWN)
2042 return false;
2043
2044 return true;
1da177e4
LT
2045}
2046
2047static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2048{
30883512 2049 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2050 struct inquiry_info *info = (struct inquiry_info *) buf;
2051 struct inquiry_entry *e;
2052 int copied = 0;
2053
561aafbc 2054 list_for_each_entry(e, &cache->all, all) {
1da177e4 2055 struct inquiry_data *data = &e->data;
b57c1a56
JH
2056
2057 if (copied >= num)
2058 break;
2059
1da177e4
LT
2060 bacpy(&info->bdaddr, &data->bdaddr);
2061 info->pscan_rep_mode = data->pscan_rep_mode;
2062 info->pscan_period_mode = data->pscan_period_mode;
2063 info->pscan_mode = data->pscan_mode;
2064 memcpy(info->dev_class, data->dev_class, 3);
2065 info->clock_offset = data->clock_offset;
b57c1a56 2066
1da177e4 2067 info++;
b57c1a56 2068 copied++;
1da177e4
LT
2069 }
2070
2071 BT_DBG("cache %p, copied %d", cache, copied);
2072 return copied;
2073}
2074
42c6b129 2075static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2076{
2077 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2078 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2079 struct hci_cp_inquiry cp;
2080
2081 BT_DBG("%s", hdev->name);
2082
2083 if (test_bit(HCI_INQUIRY, &hdev->flags))
2084 return;
2085
2086 /* Start Inquiry */
2087 memcpy(&cp.lap, &ir->lap, 3);
2088 cp.length = ir->length;
2089 cp.num_rsp = ir->num_rsp;
42c6b129 2090 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2091}
2092
3e13fa1e
AG
2093static int wait_inquiry(void *word)
2094{
2095 schedule();
2096 return signal_pending(current);
2097}
2098
1da177e4
LT
2099int hci_inquiry(void __user *arg)
2100{
2101 __u8 __user *ptr = arg;
2102 struct hci_inquiry_req ir;
2103 struct hci_dev *hdev;
2104 int err = 0, do_inquiry = 0, max_rsp;
2105 long timeo;
2106 __u8 *buf;
2107
2108 if (copy_from_user(&ir, ptr, sizeof(ir)))
2109 return -EFAULT;
2110
5a08ecce
AE
2111 hdev = hci_dev_get(ir.dev_id);
2112 if (!hdev)
1da177e4
LT
2113 return -ENODEV;
2114
0736cfa8
MH
2115 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2116 err = -EBUSY;
2117 goto done;
2118 }
2119
5b69bef5
MH
2120 if (hdev->dev_type != HCI_BREDR) {
2121 err = -EOPNOTSUPP;
2122 goto done;
2123 }
2124
56f87901
JH
2125 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2126 err = -EOPNOTSUPP;
2127 goto done;
2128 }
2129
09fd0de5 2130 hci_dev_lock(hdev);
8e87d142 2131 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2132 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2133 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2134 do_inquiry = 1;
2135 }
09fd0de5 2136 hci_dev_unlock(hdev);
1da177e4 2137
04837f64 2138 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2139
2140 if (do_inquiry) {
01178cd4
JH
2141 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2142 timeo);
70f23020
AE
2143 if (err < 0)
2144 goto done;
3e13fa1e
AG
2145
2146 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2147 * cleared). If it is interrupted by a signal, return -EINTR.
2148 */
2149 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2150 TASK_INTERRUPTIBLE))
2151 return -EINTR;
70f23020 2152 }
1da177e4 2153
8fc9ced3
GP
2154 /* for unlimited number of responses we will use buffer with
2155 * 255 entries
2156 */
1da177e4
LT
2157 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2158
2159 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2160 * copy it to the user space.
2161 */
01df8c31 2162 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2163 if (!buf) {
1da177e4
LT
2164 err = -ENOMEM;
2165 goto done;
2166 }
2167
09fd0de5 2168 hci_dev_lock(hdev);
1da177e4 2169 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2170 hci_dev_unlock(hdev);
1da177e4
LT
2171
2172 BT_DBG("num_rsp %d", ir.num_rsp);
2173
2174 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2175 ptr += sizeof(ir);
2176 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2177 ir.num_rsp))
1da177e4 2178 err = -EFAULT;
8e87d142 2179 } else
1da177e4
LT
2180 err = -EFAULT;
2181
2182 kfree(buf);
2183
2184done:
2185 hci_dev_put(hdev);
2186 return err;
2187}
2188
cbed0ca1 2189static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2190{
1da177e4
LT
2191 int ret = 0;
2192
1da177e4
LT
2193 BT_DBG("%s %p", hdev->name, hdev);
2194
2195 hci_req_lock(hdev);
2196
94324962
JH
2197 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2198 ret = -ENODEV;
2199 goto done;
2200 }
2201
a5c8f270
MH
2202 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2203 /* Check for rfkill but allow the HCI setup stage to
2204 * proceed (which in itself doesn't cause any RF activity).
2205 */
2206 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2207 ret = -ERFKILL;
2208 goto done;
2209 }
2210
2211 /* Check for valid public address or a configured static
2212 * random adddress, but let the HCI setup proceed to
2213 * be able to determine if there is a public address
2214 * or not.
2215 *
c6beca0e
MH
2216 * In case of user channel usage, it is not important
2217 * if a public address or static random address is
2218 * available.
2219 *
a5c8f270
MH
2220 * This check is only valid for BR/EDR controllers
2221 * since AMP controllers do not have an address.
2222 */
c6beca0e
MH
2223 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2224 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2225 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2226 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2227 ret = -EADDRNOTAVAIL;
2228 goto done;
2229 }
611b30f7
MH
2230 }
2231
1da177e4
LT
2232 if (test_bit(HCI_UP, &hdev->flags)) {
2233 ret = -EALREADY;
2234 goto done;
2235 }
2236
1da177e4
LT
2237 if (hdev->open(hdev)) {
2238 ret = -EIO;
2239 goto done;
2240 }
2241
f41c70c4
MH
2242 atomic_set(&hdev->cmd_cnt, 1);
2243 set_bit(HCI_INIT, &hdev->flags);
2244
2245 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2246 ret = hdev->setup(hdev);
2247
2248 if (!ret) {
f41c70c4
MH
2249 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2250 set_bit(HCI_RAW, &hdev->flags);
2251
0736cfa8
MH
2252 if (!test_bit(HCI_RAW, &hdev->flags) &&
2253 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2254 ret = __hci_init(hdev);
1da177e4
LT
2255 }
2256
f41c70c4
MH
2257 clear_bit(HCI_INIT, &hdev->flags);
2258
1da177e4
LT
2259 if (!ret) {
2260 hci_dev_hold(hdev);
d6bfd59c 2261 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2262 set_bit(HCI_UP, &hdev->flags);
2263 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2264 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2265 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2266 hdev->dev_type == HCI_BREDR) {
09fd0de5 2267 hci_dev_lock(hdev);
744cf19e 2268 mgmt_powered(hdev, 1);
09fd0de5 2269 hci_dev_unlock(hdev);
56e5cb86 2270 }
8e87d142 2271 } else {
1da177e4 2272 /* Init failed, cleanup */
3eff45ea 2273 flush_work(&hdev->tx_work);
c347b765 2274 flush_work(&hdev->cmd_work);
b78752cc 2275 flush_work(&hdev->rx_work);
1da177e4
LT
2276
2277 skb_queue_purge(&hdev->cmd_q);
2278 skb_queue_purge(&hdev->rx_q);
2279
2280 if (hdev->flush)
2281 hdev->flush(hdev);
2282
2283 if (hdev->sent_cmd) {
2284 kfree_skb(hdev->sent_cmd);
2285 hdev->sent_cmd = NULL;
2286 }
2287
2288 hdev->close(hdev);
2289 hdev->flags = 0;
2290 }
2291
2292done:
2293 hci_req_unlock(hdev);
1da177e4
LT
2294 return ret;
2295}
2296
cbed0ca1
JH
2297/* ---- HCI ioctl helpers ---- */
2298
2299int hci_dev_open(__u16 dev)
2300{
2301 struct hci_dev *hdev;
2302 int err;
2303
2304 hdev = hci_dev_get(dev);
2305 if (!hdev)
2306 return -ENODEV;
2307
e1d08f40
JH
2308 /* We need to ensure that no other power on/off work is pending
2309 * before proceeding to call hci_dev_do_open. This is
2310 * particularly important if the setup procedure has not yet
2311 * completed.
2312 */
2313 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2314 cancel_delayed_work(&hdev->power_off);
2315
a5c8f270
MH
2316 /* After this call it is guaranteed that the setup procedure
2317 * has finished. This means that error conditions like RFKILL
2318 * or no valid public or static random address apply.
2319 */
e1d08f40
JH
2320 flush_workqueue(hdev->req_workqueue);
2321
cbed0ca1
JH
2322 err = hci_dev_do_open(hdev);
2323
2324 hci_dev_put(hdev);
2325
2326 return err;
2327}
2328
1da177e4
LT
2329static int hci_dev_do_close(struct hci_dev *hdev)
2330{
2331 BT_DBG("%s %p", hdev->name, hdev);
2332
78c04c0b
VCG
2333 cancel_delayed_work(&hdev->power_off);
2334
1da177e4
LT
2335 hci_req_cancel(hdev, ENODEV);
2336 hci_req_lock(hdev);
2337
2338 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2339 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2340 hci_req_unlock(hdev);
2341 return 0;
2342 }
2343
3eff45ea
GP
2344 /* Flush RX and TX works */
2345 flush_work(&hdev->tx_work);
b78752cc 2346 flush_work(&hdev->rx_work);
1da177e4 2347
16ab91ab 2348 if (hdev->discov_timeout > 0) {
e0f9309f 2349 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2350 hdev->discov_timeout = 0;
5e5282bb 2351 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2352 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2353 }
2354
a8b2d5c2 2355 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2356 cancel_delayed_work(&hdev->service_cache);
2357
7ba8b4be 2358 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2359
2360 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2361 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2362
09fd0de5 2363 hci_dev_lock(hdev);
1f9b9a5d 2364 hci_inquiry_cache_flush(hdev);
1da177e4 2365 hci_conn_hash_flush(hdev);
6046dc3e 2366 hci_pend_le_conns_clear(hdev);
09fd0de5 2367 hci_dev_unlock(hdev);
1da177e4
LT
2368
2369 hci_notify(hdev, HCI_DEV_DOWN);
2370
2371 if (hdev->flush)
2372 hdev->flush(hdev);
2373
2374 /* Reset device */
2375 skb_queue_purge(&hdev->cmd_q);
2376 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2377 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2378 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2379 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2380 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2381 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2382 clear_bit(HCI_INIT, &hdev->flags);
2383 }
2384
c347b765
GP
2385 /* flush cmd work */
2386 flush_work(&hdev->cmd_work);
1da177e4
LT
2387
2388 /* Drop queues */
2389 skb_queue_purge(&hdev->rx_q);
2390 skb_queue_purge(&hdev->cmd_q);
2391 skb_queue_purge(&hdev->raw_q);
2392
2393 /* Drop last sent command */
2394 if (hdev->sent_cmd) {
65cc2b49 2395 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2396 kfree_skb(hdev->sent_cmd);
2397 hdev->sent_cmd = NULL;
2398 }
2399
b6ddb638
JH
2400 kfree_skb(hdev->recv_evt);
2401 hdev->recv_evt = NULL;
2402
1da177e4
LT
2403 /* After this point our queues are empty
2404 * and no tasks are scheduled. */
2405 hdev->close(hdev);
2406
35b973c9
JH
2407 /* Clear flags */
2408 hdev->flags = 0;
2409 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2410
93c311a0
MH
2411 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2412 if (hdev->dev_type == HCI_BREDR) {
2413 hci_dev_lock(hdev);
2414 mgmt_powered(hdev, 0);
2415 hci_dev_unlock(hdev);
2416 }
8ee56540 2417 }
5add6af8 2418
ced5c338 2419 /* Controller radio is available but is currently powered down */
536619e8 2420 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2421
e59fda8d 2422 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2423 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2424 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2425
1da177e4
LT
2426 hci_req_unlock(hdev);
2427
2428 hci_dev_put(hdev);
2429 return 0;
2430}
2431
2432int hci_dev_close(__u16 dev)
2433{
2434 struct hci_dev *hdev;
2435 int err;
2436
70f23020
AE
2437 hdev = hci_dev_get(dev);
2438 if (!hdev)
1da177e4 2439 return -ENODEV;
8ee56540 2440
0736cfa8
MH
2441 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2442 err = -EBUSY;
2443 goto done;
2444 }
2445
8ee56540
MH
2446 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2447 cancel_delayed_work(&hdev->power_off);
2448
1da177e4 2449 err = hci_dev_do_close(hdev);
8ee56540 2450
0736cfa8 2451done:
1da177e4
LT
2452 hci_dev_put(hdev);
2453 return err;
2454}
2455
2456int hci_dev_reset(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int ret = 0;
2460
70f23020
AE
2461 hdev = hci_dev_get(dev);
2462 if (!hdev)
1da177e4
LT
2463 return -ENODEV;
2464
2465 hci_req_lock(hdev);
1da177e4 2466
808a049e
MH
2467 if (!test_bit(HCI_UP, &hdev->flags)) {
2468 ret = -ENETDOWN;
1da177e4 2469 goto done;
808a049e 2470 }
1da177e4 2471
0736cfa8
MH
2472 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2473 ret = -EBUSY;
2474 goto done;
2475 }
2476
1da177e4
LT
2477 /* Drop queues */
2478 skb_queue_purge(&hdev->rx_q);
2479 skb_queue_purge(&hdev->cmd_q);
2480
09fd0de5 2481 hci_dev_lock(hdev);
1f9b9a5d 2482 hci_inquiry_cache_flush(hdev);
1da177e4 2483 hci_conn_hash_flush(hdev);
09fd0de5 2484 hci_dev_unlock(hdev);
1da177e4
LT
2485
2486 if (hdev->flush)
2487 hdev->flush(hdev);
2488
8e87d142 2489 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2490 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2491
2492 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2493 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2494
2495done:
1da177e4
LT
2496 hci_req_unlock(hdev);
2497 hci_dev_put(hdev);
2498 return ret;
2499}
2500
2501int hci_dev_reset_stat(__u16 dev)
2502{
2503 struct hci_dev *hdev;
2504 int ret = 0;
2505
70f23020
AE
2506 hdev = hci_dev_get(dev);
2507 if (!hdev)
1da177e4
LT
2508 return -ENODEV;
2509
0736cfa8
MH
2510 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2511 ret = -EBUSY;
2512 goto done;
2513 }
2514
1da177e4
LT
2515 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2516
0736cfa8 2517done:
1da177e4 2518 hci_dev_put(hdev);
1da177e4
LT
2519 return ret;
2520}
2521
2522int hci_dev_cmd(unsigned int cmd, void __user *arg)
2523{
2524 struct hci_dev *hdev;
2525 struct hci_dev_req dr;
2526 int err = 0;
2527
2528 if (copy_from_user(&dr, arg, sizeof(dr)))
2529 return -EFAULT;
2530
70f23020
AE
2531 hdev = hci_dev_get(dr.dev_id);
2532 if (!hdev)
1da177e4
LT
2533 return -ENODEV;
2534
0736cfa8
MH
2535 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2536 err = -EBUSY;
2537 goto done;
2538 }
2539
5b69bef5
MH
2540 if (hdev->dev_type != HCI_BREDR) {
2541 err = -EOPNOTSUPP;
2542 goto done;
2543 }
2544
56f87901
JH
2545 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2546 err = -EOPNOTSUPP;
2547 goto done;
2548 }
2549
1da177e4
LT
2550 switch (cmd) {
2551 case HCISETAUTH:
01178cd4
JH
2552 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2553 HCI_INIT_TIMEOUT);
1da177e4
LT
2554 break;
2555
2556 case HCISETENCRYPT:
2557 if (!lmp_encrypt_capable(hdev)) {
2558 err = -EOPNOTSUPP;
2559 break;
2560 }
2561
2562 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2563 /* Auth must be enabled first */
01178cd4
JH
2564 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2565 HCI_INIT_TIMEOUT);
1da177e4
LT
2566 if (err)
2567 break;
2568 }
2569
01178cd4
JH
2570 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2571 HCI_INIT_TIMEOUT);
1da177e4
LT
2572 break;
2573
2574 case HCISETSCAN:
01178cd4
JH
2575 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2576 HCI_INIT_TIMEOUT);
1da177e4
LT
2577 break;
2578
1da177e4 2579 case HCISETLINKPOL:
01178cd4
JH
2580 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2581 HCI_INIT_TIMEOUT);
1da177e4
LT
2582 break;
2583
2584 case HCISETLINKMODE:
e4e8e37c
MH
2585 hdev->link_mode = ((__u16) dr.dev_opt) &
2586 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2587 break;
2588
2589 case HCISETPTYPE:
2590 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2591 break;
2592
2593 case HCISETACLMTU:
e4e8e37c
MH
2594 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2595 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2596 break;
2597
2598 case HCISETSCOMTU:
e4e8e37c
MH
2599 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2600 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2601 break;
2602
2603 default:
2604 err = -EINVAL;
2605 break;
2606 }
e4e8e37c 2607
0736cfa8 2608done:
1da177e4
LT
2609 hci_dev_put(hdev);
2610 return err;
2611}
2612
2613int hci_get_dev_list(void __user *arg)
2614{
8035ded4 2615 struct hci_dev *hdev;
1da177e4
LT
2616 struct hci_dev_list_req *dl;
2617 struct hci_dev_req *dr;
1da177e4
LT
2618 int n = 0, size, err;
2619 __u16 dev_num;
2620
2621 if (get_user(dev_num, (__u16 __user *) arg))
2622 return -EFAULT;
2623
2624 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2625 return -EINVAL;
2626
2627 size = sizeof(*dl) + dev_num * sizeof(*dr);
2628
70f23020
AE
2629 dl = kzalloc(size, GFP_KERNEL);
2630 if (!dl)
1da177e4
LT
2631 return -ENOMEM;
2632
2633 dr = dl->dev_req;
2634
f20d09d5 2635 read_lock(&hci_dev_list_lock);
8035ded4 2636 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2637 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2638 cancel_delayed_work(&hdev->power_off);
c542a06c 2639
a8b2d5c2
JH
2640 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2641 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2642
1da177e4
LT
2643 (dr + n)->dev_id = hdev->id;
2644 (dr + n)->dev_opt = hdev->flags;
c542a06c 2645
1da177e4
LT
2646 if (++n >= dev_num)
2647 break;
2648 }
f20d09d5 2649 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2650
2651 dl->dev_num = n;
2652 size = sizeof(*dl) + n * sizeof(*dr);
2653
2654 err = copy_to_user(arg, dl, size);
2655 kfree(dl);
2656
2657 return err ? -EFAULT : 0;
2658}
2659
2660int hci_get_dev_info(void __user *arg)
2661{
2662 struct hci_dev *hdev;
2663 struct hci_dev_info di;
2664 int err = 0;
2665
2666 if (copy_from_user(&di, arg, sizeof(di)))
2667 return -EFAULT;
2668
70f23020
AE
2669 hdev = hci_dev_get(di.dev_id);
2670 if (!hdev)
1da177e4
LT
2671 return -ENODEV;
2672
a8b2d5c2 2673 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2674 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2675
a8b2d5c2
JH
2676 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2677 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2678
1da177e4
LT
2679 strcpy(di.name, hdev->name);
2680 di.bdaddr = hdev->bdaddr;
60f2a3ed 2681 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2682 di.flags = hdev->flags;
2683 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2684 if (lmp_bredr_capable(hdev)) {
2685 di.acl_mtu = hdev->acl_mtu;
2686 di.acl_pkts = hdev->acl_pkts;
2687 di.sco_mtu = hdev->sco_mtu;
2688 di.sco_pkts = hdev->sco_pkts;
2689 } else {
2690 di.acl_mtu = hdev->le_mtu;
2691 di.acl_pkts = hdev->le_pkts;
2692 di.sco_mtu = 0;
2693 di.sco_pkts = 0;
2694 }
1da177e4
LT
2695 di.link_policy = hdev->link_policy;
2696 di.link_mode = hdev->link_mode;
2697
2698 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2699 memcpy(&di.features, &hdev->features, sizeof(di.features));
2700
2701 if (copy_to_user(arg, &di, sizeof(di)))
2702 err = -EFAULT;
2703
2704 hci_dev_put(hdev);
2705
2706 return err;
2707}
2708
2709/* ---- Interface to HCI drivers ---- */
2710
611b30f7
MH
2711static int hci_rfkill_set_block(void *data, bool blocked)
2712{
2713 struct hci_dev *hdev = data;
2714
2715 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2716
0736cfa8
MH
2717 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2718 return -EBUSY;
2719
5e130367
JH
2720 if (blocked) {
2721 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2722 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2723 hci_dev_do_close(hdev);
5e130367
JH
2724 } else {
2725 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2726 }
611b30f7
MH
2727
2728 return 0;
2729}
2730
2731static const struct rfkill_ops hci_rfkill_ops = {
2732 .set_block = hci_rfkill_set_block,
2733};
2734
ab81cbf9
JH
2735static void hci_power_on(struct work_struct *work)
2736{
2737 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2738 int err;
ab81cbf9
JH
2739
2740 BT_DBG("%s", hdev->name);
2741
cbed0ca1 2742 err = hci_dev_do_open(hdev);
96570ffc
JH
2743 if (err < 0) {
2744 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2745 return;
96570ffc 2746 }
ab81cbf9 2747
a5c8f270
MH
2748 /* During the HCI setup phase, a few error conditions are
2749 * ignored and they need to be checked now. If they are still
2750 * valid, it is important to turn the device back off.
2751 */
2752 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2753 (hdev->dev_type == HCI_BREDR &&
2754 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2755 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2756 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2757 hci_dev_do_close(hdev);
2758 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2759 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2760 HCI_AUTO_OFF_TIMEOUT);
bf543036 2761 }
ab81cbf9 2762
a8b2d5c2 2763 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2764 mgmt_index_added(hdev);
ab81cbf9
JH
2765}
2766
2767static void hci_power_off(struct work_struct *work)
2768{
3243553f 2769 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2770 power_off.work);
ab81cbf9
JH
2771
2772 BT_DBG("%s", hdev->name);
2773
8ee56540 2774 hci_dev_do_close(hdev);
ab81cbf9
JH
2775}
2776
16ab91ab
JH
2777static void hci_discov_off(struct work_struct *work)
2778{
2779 struct hci_dev *hdev;
16ab91ab
JH
2780
2781 hdev = container_of(work, struct hci_dev, discov_off.work);
2782
2783 BT_DBG("%s", hdev->name);
2784
d1967ff8 2785 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2786}
2787
35f7498a 2788void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2789{
4821002c 2790 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2791
4821002c
JH
2792 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2793 list_del(&uuid->list);
2aeb9a1a
JH
2794 kfree(uuid);
2795 }
2aeb9a1a
JH
2796}
2797
35f7498a 2798void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2799{
2800 struct list_head *p, *n;
2801
2802 list_for_each_safe(p, n, &hdev->link_keys) {
2803 struct link_key *key;
2804
2805 key = list_entry(p, struct link_key, list);
2806
2807 list_del(p);
2808 kfree(key);
2809 }
55ed8ca1
JH
2810}
2811
35f7498a 2812void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2813{
2814 struct smp_ltk *k, *tmp;
2815
2816 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2817 list_del(&k->list);
2818 kfree(k);
2819 }
b899efaf
VCG
2820}
2821
970c4e46
JH
2822void hci_smp_irks_clear(struct hci_dev *hdev)
2823{
2824 struct smp_irk *k, *tmp;
2825
2826 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2827 list_del(&k->list);
2828 kfree(k);
2829 }
2830}
2831
55ed8ca1
JH
2832struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2833{
8035ded4 2834 struct link_key *k;
55ed8ca1 2835
8035ded4 2836 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2837 if (bacmp(bdaddr, &k->bdaddr) == 0)
2838 return k;
55ed8ca1
JH
2839
2840 return NULL;
2841}
2842
745c0ce3 2843static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2844 u8 key_type, u8 old_key_type)
d25e28ab
JH
2845{
2846 /* Legacy key */
2847 if (key_type < 0x03)
745c0ce3 2848 return true;
d25e28ab
JH
2849
2850 /* Debug keys are insecure so don't store them persistently */
2851 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2852 return false;
d25e28ab
JH
2853
2854 /* Changed combination key and there's no previous one */
2855 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2856 return false;
d25e28ab
JH
2857
2858 /* Security mode 3 case */
2859 if (!conn)
745c0ce3 2860 return true;
d25e28ab
JH
2861
2862 /* Neither local nor remote side had no-bonding as requirement */
2863 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2864 return true;
d25e28ab
JH
2865
2866 /* Local side had dedicated bonding as requirement */
2867 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2868 return true;
d25e28ab
JH
2869
2870 /* Remote side had dedicated bonding as requirement */
2871 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2872 return true;
d25e28ab
JH
2873
2874 /* If none of the above criteria match, then don't store the key
2875 * persistently */
745c0ce3 2876 return false;
d25e28ab
JH
2877}
2878
98a0b845
JH
2879static bool ltk_type_master(u8 type)
2880{
d97c9fb0 2881 return (type == SMP_LTK);
98a0b845
JH
2882}
2883
fe39c7b2 2884struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2885 bool master)
75d262c2 2886{
c9839a11 2887 struct smp_ltk *k;
75d262c2 2888
c9839a11 2889 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2890 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2891 continue;
2892
98a0b845
JH
2893 if (ltk_type_master(k->type) != master)
2894 continue;
2895
c9839a11 2896 return k;
75d262c2
VCG
2897 }
2898
2899 return NULL;
2900}
75d262c2 2901
c9839a11 2902struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2903 u8 addr_type, bool master)
75d262c2 2904{
c9839a11 2905 struct smp_ltk *k;
75d262c2 2906
c9839a11
VCG
2907 list_for_each_entry(k, &hdev->long_term_keys, list)
2908 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2909 bacmp(bdaddr, &k->bdaddr) == 0 &&
2910 ltk_type_master(k->type) == master)
75d262c2
VCG
2911 return k;
2912
2913 return NULL;
2914}
75d262c2 2915
970c4e46
JH
2916struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2917{
2918 struct smp_irk *irk;
2919
2920 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2921 if (!bacmp(&irk->rpa, rpa))
2922 return irk;
2923 }
2924
2925 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2926 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2927 bacpy(&irk->rpa, rpa);
2928 return irk;
2929 }
2930 }
2931
2932 return NULL;
2933}
2934
2935struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2936 u8 addr_type)
2937{
2938 struct smp_irk *irk;
2939
6cfc9988
JH
2940 /* Identity Address must be public or static random */
2941 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2942 return NULL;
2943
970c4e46
JH
2944 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2945 if (addr_type == irk->addr_type &&
2946 bacmp(bdaddr, &irk->bdaddr) == 0)
2947 return irk;
2948 }
2949
2950 return NULL;
2951}
2952
567fa2aa 2953struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2954 bdaddr_t *bdaddr, u8 *val, u8 type,
2955 u8 pin_len, bool *persistent)
55ed8ca1
JH
2956{
2957 struct link_key *key, *old_key;
745c0ce3 2958 u8 old_key_type;
55ed8ca1
JH
2959
2960 old_key = hci_find_link_key(hdev, bdaddr);
2961 if (old_key) {
2962 old_key_type = old_key->type;
2963 key = old_key;
2964 } else {
12adcf3a 2965 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2966 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2967 if (!key)
567fa2aa 2968 return NULL;
55ed8ca1
JH
2969 list_add(&key->list, &hdev->link_keys);
2970 }
2971
6ed93dc6 2972 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2973
d25e28ab
JH
2974 /* Some buggy controller combinations generate a changed
2975 * combination key for legacy pairing even when there's no
2976 * previous key */
2977 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2978 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2979 type = HCI_LK_COMBINATION;
655fe6ec
JH
2980 if (conn)
2981 conn->key_type = type;
2982 }
d25e28ab 2983
55ed8ca1 2984 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2985 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2986 key->pin_len = pin_len;
2987
b6020ba0 2988 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2989 key->type = old_key_type;
4748fed2
JH
2990 else
2991 key->type = type;
2992
7652ff6a
JH
2993 if (persistent)
2994 *persistent = hci_persistent_key(hdev, conn, type,
2995 old_key_type);
55ed8ca1 2996
567fa2aa 2997 return key;
55ed8ca1
JH
2998}
2999
ca9142b8 3000struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3001 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3002 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3003{
c9839a11 3004 struct smp_ltk *key, *old_key;
98a0b845 3005 bool master = ltk_type_master(type);
75d262c2 3006
98a0b845 3007 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3008 if (old_key)
75d262c2 3009 key = old_key;
c9839a11 3010 else {
0a14ab41 3011 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3012 if (!key)
ca9142b8 3013 return NULL;
c9839a11 3014 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3015 }
3016
75d262c2 3017 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3018 key->bdaddr_type = addr_type;
3019 memcpy(key->val, tk, sizeof(key->val));
3020 key->authenticated = authenticated;
3021 key->ediv = ediv;
fe39c7b2 3022 key->rand = rand;
c9839a11
VCG
3023 key->enc_size = enc_size;
3024 key->type = type;
75d262c2 3025
ca9142b8 3026 return key;
75d262c2
VCG
3027}
3028
ca9142b8
JH
3029struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3030 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3031{
3032 struct smp_irk *irk;
3033
3034 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3035 if (!irk) {
3036 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3037 if (!irk)
ca9142b8 3038 return NULL;
970c4e46
JH
3039
3040 bacpy(&irk->bdaddr, bdaddr);
3041 irk->addr_type = addr_type;
3042
3043 list_add(&irk->list, &hdev->identity_resolving_keys);
3044 }
3045
3046 memcpy(irk->val, val, 16);
3047 bacpy(&irk->rpa, rpa);
3048
ca9142b8 3049 return irk;
970c4e46
JH
3050}
3051
55ed8ca1
JH
3052int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3053{
3054 struct link_key *key;
3055
3056 key = hci_find_link_key(hdev, bdaddr);
3057 if (!key)
3058 return -ENOENT;
3059
6ed93dc6 3060 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3061
3062 list_del(&key->list);
3063 kfree(key);
3064
3065 return 0;
3066}
3067
e0b2b27e 3068int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3069{
3070 struct smp_ltk *k, *tmp;
c51ffa0b 3071 int removed = 0;
b899efaf
VCG
3072
3073 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3074 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3075 continue;
3076
6ed93dc6 3077 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3078
3079 list_del(&k->list);
3080 kfree(k);
c51ffa0b 3081 removed++;
b899efaf
VCG
3082 }
3083
c51ffa0b 3084 return removed ? 0 : -ENOENT;
b899efaf
VCG
3085}
3086
a7ec7338
JH
3087void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3088{
3089 struct smp_irk *k, *tmp;
3090
668b7b19 3091 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3092 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3093 continue;
3094
3095 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3096
3097 list_del(&k->list);
3098 kfree(k);
3099 }
3100}
3101
6bd32326 3102/* HCI command timer function */
65cc2b49 3103static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3104{
65cc2b49
MH
3105 struct hci_dev *hdev = container_of(work, struct hci_dev,
3106 cmd_timer.work);
6bd32326 3107
bda4f23a
AE
3108 if (hdev->sent_cmd) {
3109 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3110 u16 opcode = __le16_to_cpu(sent->opcode);
3111
3112 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3113 } else {
3114 BT_ERR("%s command tx timeout", hdev->name);
3115 }
3116
6bd32326 3117 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3118 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3119}
3120
2763eda6 3121struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3122 bdaddr_t *bdaddr)
2763eda6
SJ
3123{
3124 struct oob_data *data;
3125
3126 list_for_each_entry(data, &hdev->remote_oob_data, list)
3127 if (bacmp(bdaddr, &data->bdaddr) == 0)
3128 return data;
3129
3130 return NULL;
3131}
3132
3133int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3134{
3135 struct oob_data *data;
3136
3137 data = hci_find_remote_oob_data(hdev, bdaddr);
3138 if (!data)
3139 return -ENOENT;
3140
6ed93dc6 3141 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3142
3143 list_del(&data->list);
3144 kfree(data);
3145
3146 return 0;
3147}
3148
35f7498a 3149void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3150{
3151 struct oob_data *data, *n;
3152
3153 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3154 list_del(&data->list);
3155 kfree(data);
3156 }
2763eda6
SJ
3157}
3158
0798872e
MH
3159int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3160 u8 *hash, u8 *randomizer)
2763eda6
SJ
3161{
3162 struct oob_data *data;
3163
3164 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3165 if (!data) {
0a14ab41 3166 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3167 if (!data)
3168 return -ENOMEM;
3169
3170 bacpy(&data->bdaddr, bdaddr);
3171 list_add(&data->list, &hdev->remote_oob_data);
3172 }
3173
519ca9d0
MH
3174 memcpy(data->hash192, hash, sizeof(data->hash192));
3175 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3176
0798872e
MH
3177 memset(data->hash256, 0, sizeof(data->hash256));
3178 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3179
3180 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3181
3182 return 0;
3183}
3184
3185int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3186 u8 *hash192, u8 *randomizer192,
3187 u8 *hash256, u8 *randomizer256)
3188{
3189 struct oob_data *data;
3190
3191 data = hci_find_remote_oob_data(hdev, bdaddr);
3192 if (!data) {
0a14ab41 3193 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3194 if (!data)
3195 return -ENOMEM;
3196
3197 bacpy(&data->bdaddr, bdaddr);
3198 list_add(&data->list, &hdev->remote_oob_data);
3199 }
3200
3201 memcpy(data->hash192, hash192, sizeof(data->hash192));
3202 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3203
3204 memcpy(data->hash256, hash256, sizeof(data->hash256));
3205 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3206
6ed93dc6 3207 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3208
3209 return 0;
3210}
3211
b9ee0a78
MH
3212struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3213 bdaddr_t *bdaddr, u8 type)
b2a66aad 3214{
8035ded4 3215 struct bdaddr_list *b;
b2a66aad 3216
b9ee0a78
MH
3217 list_for_each_entry(b, &hdev->blacklist, list) {
3218 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3219 return b;
b9ee0a78 3220 }
b2a66aad
AJ
3221
3222 return NULL;
3223}
3224
c9507490 3225static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3226{
3227 struct list_head *p, *n;
3228
3229 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3230 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3231
3232 list_del(p);
3233 kfree(b);
3234 }
b2a66aad
AJ
3235}
3236
88c1fe4b 3237int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3238{
3239 struct bdaddr_list *entry;
b2a66aad 3240
b9ee0a78 3241 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3242 return -EBADF;
3243
b9ee0a78 3244 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3245 return -EEXIST;
b2a66aad
AJ
3246
3247 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3248 if (!entry)
3249 return -ENOMEM;
b2a66aad
AJ
3250
3251 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3252 entry->bdaddr_type = type;
b2a66aad
AJ
3253
3254 list_add(&entry->list, &hdev->blacklist);
3255
88c1fe4b 3256 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3257}
3258
88c1fe4b 3259int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3260{
3261 struct bdaddr_list *entry;
b2a66aad 3262
35f7498a
JH
3263 if (!bacmp(bdaddr, BDADDR_ANY)) {
3264 hci_blacklist_clear(hdev);
3265 return 0;
3266 }
b2a66aad 3267
b9ee0a78 3268 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3269 if (!entry)
5e762444 3270 return -ENOENT;
b2a66aad
AJ
3271
3272 list_del(&entry->list);
3273 kfree(entry);
3274
88c1fe4b 3275 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3276}
3277
d2ab0ac1
MH
3278struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3279 bdaddr_t *bdaddr, u8 type)
3280{
3281 struct bdaddr_list *b;
3282
3283 list_for_each_entry(b, &hdev->le_white_list, list) {
3284 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3285 return b;
3286 }
3287
3288 return NULL;
3289}
3290
3291void hci_white_list_clear(struct hci_dev *hdev)
3292{
3293 struct list_head *p, *n;
3294
3295 list_for_each_safe(p, n, &hdev->le_white_list) {
3296 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3297
3298 list_del(p);
3299 kfree(b);
3300 }
3301}
3302
3303int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3304{
3305 struct bdaddr_list *entry;
3306
3307 if (!bacmp(bdaddr, BDADDR_ANY))
3308 return -EBADF;
3309
3310 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3311 if (!entry)
3312 return -ENOMEM;
3313
3314 bacpy(&entry->bdaddr, bdaddr);
3315 entry->bdaddr_type = type;
3316
3317 list_add(&entry->list, &hdev->le_white_list);
3318
3319 return 0;
3320}
3321
3322int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3323{
3324 struct bdaddr_list *entry;
3325
3326 if (!bacmp(bdaddr, BDADDR_ANY))
3327 return -EBADF;
3328
3329 entry = hci_white_list_lookup(hdev, bdaddr, type);
3330 if (!entry)
3331 return -ENOENT;
3332
3333 list_del(&entry->list);
3334 kfree(entry);
3335
3336 return 0;
3337}
3338
15819a70
AG
3339/* This function requires the caller holds hdev->lock */
3340struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3341 bdaddr_t *addr, u8 addr_type)
3342{
3343 struct hci_conn_params *params;
3344
3345 list_for_each_entry(params, &hdev->le_conn_params, list) {
3346 if (bacmp(&params->addr, addr) == 0 &&
3347 params->addr_type == addr_type) {
3348 return params;
3349 }
3350 }
3351
3352 return NULL;
3353}
3354
cef952ce
AG
3355static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3356{
3357 struct hci_conn *conn;
3358
3359 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3360 if (!conn)
3361 return false;
3362
3363 if (conn->dst_type != type)
3364 return false;
3365
3366 if (conn->state != BT_CONNECTED)
3367 return false;
3368
3369 return true;
3370}
3371
a9b0a04c
AG
3372static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3373{
3374 if (addr_type == ADDR_LE_DEV_PUBLIC)
3375 return true;
3376
3377 /* Check for Random Static address type */
3378 if ((addr->b[5] & 0xc0) == 0xc0)
3379 return true;
3380
3381 return false;
3382}
3383
15819a70 3384/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3385int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3386 u8 auto_connect, u16 conn_min_interval,
3387 u16 conn_max_interval)
15819a70
AG
3388{
3389 struct hci_conn_params *params;
3390
a9b0a04c
AG
3391 if (!is_identity_address(addr, addr_type))
3392 return -EINVAL;
3393
15819a70 3394 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3395 if (params)
3396 goto update;
15819a70
AG
3397
3398 params = kzalloc(sizeof(*params), GFP_KERNEL);
3399 if (!params) {
3400 BT_ERR("Out of memory");
a9b0a04c 3401 return -ENOMEM;
15819a70
AG
3402 }
3403
3404 bacpy(&params->addr, addr);
3405 params->addr_type = addr_type;
cef952ce
AG
3406
3407 list_add(&params->list, &hdev->le_conn_params);
3408
3409update:
15819a70
AG
3410 params->conn_min_interval = conn_min_interval;
3411 params->conn_max_interval = conn_max_interval;
9fcb18ef 3412 params->auto_connect = auto_connect;
15819a70 3413
cef952ce
AG
3414 switch (auto_connect) {
3415 case HCI_AUTO_CONN_DISABLED:
3416 case HCI_AUTO_CONN_LINK_LOSS:
3417 hci_pend_le_conn_del(hdev, addr, addr_type);
3418 break;
3419 case HCI_AUTO_CONN_ALWAYS:
3420 if (!is_connected(hdev, addr, addr_type))
3421 hci_pend_le_conn_add(hdev, addr, addr_type);
3422 break;
3423 }
15819a70 3424
9fcb18ef
AG
3425 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3426 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3427 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3428
3429 return 0;
15819a70
AG
3430}
3431
3432/* This function requires the caller holds hdev->lock */
3433void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3434{
3435 struct hci_conn_params *params;
3436
3437 params = hci_conn_params_lookup(hdev, addr, addr_type);
3438 if (!params)
3439 return;
3440
cef952ce
AG
3441 hci_pend_le_conn_del(hdev, addr, addr_type);
3442
15819a70
AG
3443 list_del(&params->list);
3444 kfree(params);
3445
3446 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3447}
3448
3449/* This function requires the caller holds hdev->lock */
3450void hci_conn_params_clear(struct hci_dev *hdev)
3451{
3452 struct hci_conn_params *params, *tmp;
3453
3454 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3455 list_del(&params->list);
3456 kfree(params);
3457 }
3458
3459 BT_DBG("All LE connection parameters were removed");
3460}
3461
77a77a30
AG
3462/* This function requires the caller holds hdev->lock */
3463struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3464 bdaddr_t *addr, u8 addr_type)
3465{
3466 struct bdaddr_list *entry;
3467
3468 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3469 if (bacmp(&entry->bdaddr, addr) == 0 &&
3470 entry->bdaddr_type == addr_type)
3471 return entry;
3472 }
3473
3474 return NULL;
3475}
3476
3477/* This function requires the caller holds hdev->lock */
3478void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3479{
3480 struct bdaddr_list *entry;
3481
3482 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3483 if (entry)
a4790dbd 3484 goto done;
77a77a30
AG
3485
3486 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3487 if (!entry) {
3488 BT_ERR("Out of memory");
3489 return;
3490 }
3491
3492 bacpy(&entry->bdaddr, addr);
3493 entry->bdaddr_type = addr_type;
3494
3495 list_add(&entry->list, &hdev->pend_le_conns);
3496
3497 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3498
3499done:
3500 hci_update_background_scan(hdev);
77a77a30
AG
3501}
3502
3503/* This function requires the caller holds hdev->lock */
3504void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3505{
3506 struct bdaddr_list *entry;
3507
3508 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3509 if (!entry)
a4790dbd 3510 goto done;
77a77a30
AG
3511
3512 list_del(&entry->list);
3513 kfree(entry);
3514
3515 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3516
3517done:
3518 hci_update_background_scan(hdev);
77a77a30
AG
3519}
3520
3521/* This function requires the caller holds hdev->lock */
3522void hci_pend_le_conns_clear(struct hci_dev *hdev)
3523{
3524 struct bdaddr_list *entry, *tmp;
3525
3526 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3527 list_del(&entry->list);
3528 kfree(entry);
3529 }
3530
3531 BT_DBG("All LE pending connections cleared");
3532}
3533
4c87eaab 3534static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3535{
4c87eaab
AG
3536 if (status) {
3537 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3538
4c87eaab
AG
3539 hci_dev_lock(hdev);
3540 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3541 hci_dev_unlock(hdev);
3542 return;
3543 }
7ba8b4be
AG
3544}
3545
4c87eaab 3546static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3547{
4c87eaab
AG
3548 /* General inquiry access code (GIAC) */
3549 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3550 struct hci_request req;
3551 struct hci_cp_inquiry cp;
7ba8b4be
AG
3552 int err;
3553
4c87eaab
AG
3554 if (status) {
3555 BT_ERR("Failed to disable LE scanning: status %d", status);
3556 return;
3557 }
7ba8b4be 3558
4c87eaab
AG
3559 switch (hdev->discovery.type) {
3560 case DISCOV_TYPE_LE:
3561 hci_dev_lock(hdev);
3562 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3563 hci_dev_unlock(hdev);
3564 break;
7ba8b4be 3565
4c87eaab
AG
3566 case DISCOV_TYPE_INTERLEAVED:
3567 hci_req_init(&req, hdev);
7ba8b4be 3568
4c87eaab
AG
3569 memset(&cp, 0, sizeof(cp));
3570 memcpy(&cp.lap, lap, sizeof(cp.lap));
3571 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3572 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3573
4c87eaab 3574 hci_dev_lock(hdev);
7dbfac1d 3575
4c87eaab 3576 hci_inquiry_cache_flush(hdev);
7dbfac1d 3577
4c87eaab
AG
3578 err = hci_req_run(&req, inquiry_complete);
3579 if (err) {
3580 BT_ERR("Inquiry request failed: err %d", err);
3581 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3582 }
7dbfac1d 3583
4c87eaab
AG
3584 hci_dev_unlock(hdev);
3585 break;
7dbfac1d 3586 }
7dbfac1d
AG
3587}
3588
7ba8b4be
AG
3589static void le_scan_disable_work(struct work_struct *work)
3590{
3591 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3592 le_scan_disable.work);
4c87eaab
AG
3593 struct hci_request req;
3594 int err;
7ba8b4be
AG
3595
3596 BT_DBG("%s", hdev->name);
3597
4c87eaab 3598 hci_req_init(&req, hdev);
28b75a89 3599
b1efcc28 3600 hci_req_add_le_scan_disable(&req);
28b75a89 3601
4c87eaab
AG
3602 err = hci_req_run(&req, le_scan_disable_work_complete);
3603 if (err)
3604 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3605}
3606
8d97250e
JH
3607static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3608{
3609 struct hci_dev *hdev = req->hdev;
3610
3611 /* If we're advertising or initiating an LE connection we can't
3612 * go ahead and change the random address at this time. This is
3613 * because the eventual initiator address used for the
3614 * subsequently created connection will be undefined (some
3615 * controllers use the new address and others the one we had
3616 * when the operation started).
3617 *
3618 * In this kind of scenario skip the update and let the random
3619 * address be updated at the next cycle.
3620 */
3621 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3622 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3623 BT_DBG("Deferring random address update");
3624 return;
3625 }
3626
3627 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3628}
3629
94b1fc92
MH
3630int hci_update_random_address(struct hci_request *req, bool require_privacy,
3631 u8 *own_addr_type)
ebd3a747
JH
3632{
3633 struct hci_dev *hdev = req->hdev;
3634 int err;
3635
3636 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3637 * current RPA has expired or there is something else than
3638 * the current RPA in use, then generate a new one.
ebd3a747
JH
3639 */
3640 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3641 int to;
3642
3643 *own_addr_type = ADDR_LE_DEV_RANDOM;
3644
3645 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3646 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3647 return 0;
3648
2b5224dc 3649 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3650 if (err < 0) {
3651 BT_ERR("%s failed to generate new RPA", hdev->name);
3652 return err;
3653 }
3654
8d97250e 3655 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3656
3657 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3658 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3659
3660 return 0;
94b1fc92
MH
3661 }
3662
3663 /* In case of required privacy without resolvable private address,
3664 * use an unresolvable private address. This is useful for active
3665 * scanning and non-connectable advertising.
3666 */
3667 if (require_privacy) {
3668 bdaddr_t urpa;
3669
3670 get_random_bytes(&urpa, 6);
3671 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3672
3673 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3674 set_random_addr(req, &urpa);
94b1fc92 3675 return 0;
ebd3a747
JH
3676 }
3677
3678 /* If forcing static address is in use or there is no public
3679 * address use the static address as random address (but skip
3680 * the HCI command if the current random address is already the
3681 * static one.
3682 */
111902f7 3683 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3684 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3685 *own_addr_type = ADDR_LE_DEV_RANDOM;
3686 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3687 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3688 &hdev->static_addr);
3689 return 0;
3690 }
3691
3692 /* Neither privacy nor static address is being used so use a
3693 * public address.
3694 */
3695 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3696
3697 return 0;
3698}
3699
a1f4c318
JH
3700/* Copy the Identity Address of the controller.
3701 *
3702 * If the controller has a public BD_ADDR, then by default use that one.
3703 * If this is a LE only controller without a public address, default to
3704 * the static random address.
3705 *
3706 * For debugging purposes it is possible to force controllers with a
3707 * public address to use the static random address instead.
3708 */
3709void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3710 u8 *bdaddr_type)
3711{
111902f7 3712 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3713 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3714 bacpy(bdaddr, &hdev->static_addr);
3715 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3716 } else {
3717 bacpy(bdaddr, &hdev->bdaddr);
3718 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3719 }
3720}
3721
9be0dab7
DH
3722/* Alloc HCI device */
3723struct hci_dev *hci_alloc_dev(void)
3724{
3725 struct hci_dev *hdev;
3726
3727 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3728 if (!hdev)
3729 return NULL;
3730
b1b813d4
DH
3731 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3732 hdev->esco_type = (ESCO_HV1);
3733 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3734 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3735 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3736 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3737 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3738
b1b813d4
DH
3739 hdev->sniff_max_interval = 800;
3740 hdev->sniff_min_interval = 80;
3741
3f959d46 3742 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3743 hdev->le_scan_interval = 0x0060;
3744 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3745 hdev->le_conn_min_interval = 0x0028;
3746 hdev->le_conn_max_interval = 0x0038;
bef64738 3747
d6bfd59c 3748 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3749 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3750 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3751 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3752
b1b813d4
DH
3753 mutex_init(&hdev->lock);
3754 mutex_init(&hdev->req_lock);
3755
3756 INIT_LIST_HEAD(&hdev->mgmt_pending);
3757 INIT_LIST_HEAD(&hdev->blacklist);
3758 INIT_LIST_HEAD(&hdev->uuids);
3759 INIT_LIST_HEAD(&hdev->link_keys);
3760 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3761 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3762 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3763 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3764 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3765 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3766 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3767
3768 INIT_WORK(&hdev->rx_work, hci_rx_work);
3769 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3770 INIT_WORK(&hdev->tx_work, hci_tx_work);
3771 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3772
b1b813d4
DH
3773 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3774 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3775 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3776
b1b813d4
DH
3777 skb_queue_head_init(&hdev->rx_q);
3778 skb_queue_head_init(&hdev->cmd_q);
3779 skb_queue_head_init(&hdev->raw_q);
3780
3781 init_waitqueue_head(&hdev->req_wait_q);
3782
65cc2b49 3783 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3784
b1b813d4
DH
3785 hci_init_sysfs(hdev);
3786 discovery_init(hdev);
9be0dab7
DH
3787
3788 return hdev;
3789}
3790EXPORT_SYMBOL(hci_alloc_dev);
3791
3792/* Free HCI device */
3793void hci_free_dev(struct hci_dev *hdev)
3794{
9be0dab7
DH
3795 /* will free via device release */
3796 put_device(&hdev->dev);
3797}
3798EXPORT_SYMBOL(hci_free_dev);
3799
1da177e4
LT
3800/* Register HCI device */
3801int hci_register_dev(struct hci_dev *hdev)
3802{
b1b813d4 3803 int id, error;
1da177e4 3804
010666a1 3805 if (!hdev->open || !hdev->close)
1da177e4
LT
3806 return -EINVAL;
3807
08add513
MM
3808 /* Do not allow HCI_AMP devices to register at index 0,
3809 * so the index can be used as the AMP controller ID.
3810 */
3df92b31
SL
3811 switch (hdev->dev_type) {
3812 case HCI_BREDR:
3813 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3814 break;
3815 case HCI_AMP:
3816 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3817 break;
3818 default:
3819 return -EINVAL;
1da177e4 3820 }
8e87d142 3821
3df92b31
SL
3822 if (id < 0)
3823 return id;
3824
1da177e4
LT
3825 sprintf(hdev->name, "hci%d", id);
3826 hdev->id = id;
2d8b3a11
AE
3827
3828 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3829
d8537548
KC
3830 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3831 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3832 if (!hdev->workqueue) {
3833 error = -ENOMEM;
3834 goto err;
3835 }
f48fd9c8 3836
d8537548
KC
3837 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3838 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3839 if (!hdev->req_workqueue) {
3840 destroy_workqueue(hdev->workqueue);
3841 error = -ENOMEM;
3842 goto err;
3843 }
3844
0153e2ec
MH
3845 if (!IS_ERR_OR_NULL(bt_debugfs))
3846 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3847
bdc3e0f1
MH
3848 dev_set_name(&hdev->dev, "%s", hdev->name);
3849
99780a7b
JH
3850 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3851 CRYPTO_ALG_ASYNC);
3852 if (IS_ERR(hdev->tfm_aes)) {
3853 BT_ERR("Unable to create crypto context");
3854 error = PTR_ERR(hdev->tfm_aes);
3855 hdev->tfm_aes = NULL;
3856 goto err_wqueue;
3857 }
3858
bdc3e0f1 3859 error = device_add(&hdev->dev);
33ca954d 3860 if (error < 0)
99780a7b 3861 goto err_tfm;
1da177e4 3862
611b30f7 3863 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3864 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3865 hdev);
611b30f7
MH
3866 if (hdev->rfkill) {
3867 if (rfkill_register(hdev->rfkill) < 0) {
3868 rfkill_destroy(hdev->rfkill);
3869 hdev->rfkill = NULL;
3870 }
3871 }
3872
5e130367
JH
3873 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3874 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3875
a8b2d5c2 3876 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3877 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3878
01cd3404 3879 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3880 /* Assume BR/EDR support until proven otherwise (such as
3881 * through reading supported features during init.
3882 */
3883 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3884 }
ce2be9ac 3885
fcee3377
GP
3886 write_lock(&hci_dev_list_lock);
3887 list_add(&hdev->list, &hci_dev_list);
3888 write_unlock(&hci_dev_list_lock);
3889
1da177e4 3890 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3891 hci_dev_hold(hdev);
1da177e4 3892
19202573 3893 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3894
1da177e4 3895 return id;
f48fd9c8 3896
99780a7b
JH
3897err_tfm:
3898 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3899err_wqueue:
3900 destroy_workqueue(hdev->workqueue);
6ead1bbc 3901 destroy_workqueue(hdev->req_workqueue);
33ca954d 3902err:
3df92b31 3903 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3904
33ca954d 3905 return error;
1da177e4
LT
3906}
3907EXPORT_SYMBOL(hci_register_dev);
3908
3909/* Unregister HCI device */
59735631 3910void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3911{
3df92b31 3912 int i, id;
ef222013 3913
c13854ce 3914 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3915
94324962
JH
3916 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3917
3df92b31
SL
3918 id = hdev->id;
3919
f20d09d5 3920 write_lock(&hci_dev_list_lock);
1da177e4 3921 list_del(&hdev->list);
f20d09d5 3922 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3923
3924 hci_dev_do_close(hdev);
3925
cd4c5391 3926 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3927 kfree_skb(hdev->reassembly[i]);
3928
b9b5ef18
GP
3929 cancel_work_sync(&hdev->power_on);
3930
ab81cbf9 3931 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3932 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3933 hci_dev_lock(hdev);
744cf19e 3934 mgmt_index_removed(hdev);
09fd0de5 3935 hci_dev_unlock(hdev);
56e5cb86 3936 }
ab81cbf9 3937
2e58ef3e
JH
3938 /* mgmt_index_removed should take care of emptying the
3939 * pending list */
3940 BUG_ON(!list_empty(&hdev->mgmt_pending));
3941
1da177e4
LT
3942 hci_notify(hdev, HCI_DEV_UNREG);
3943
611b30f7
MH
3944 if (hdev->rfkill) {
3945 rfkill_unregister(hdev->rfkill);
3946 rfkill_destroy(hdev->rfkill);
3947 }
3948
99780a7b
JH
3949 if (hdev->tfm_aes)
3950 crypto_free_blkcipher(hdev->tfm_aes);
3951
bdc3e0f1 3952 device_del(&hdev->dev);
147e2d59 3953
0153e2ec
MH
3954 debugfs_remove_recursive(hdev->debugfs);
3955
f48fd9c8 3956 destroy_workqueue(hdev->workqueue);
6ead1bbc 3957 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3958
09fd0de5 3959 hci_dev_lock(hdev);
e2e0cacb 3960 hci_blacklist_clear(hdev);
2aeb9a1a 3961 hci_uuids_clear(hdev);
55ed8ca1 3962 hci_link_keys_clear(hdev);
b899efaf 3963 hci_smp_ltks_clear(hdev);
970c4e46 3964 hci_smp_irks_clear(hdev);
2763eda6 3965 hci_remote_oob_data_clear(hdev);
d2ab0ac1 3966 hci_white_list_clear(hdev);
15819a70 3967 hci_conn_params_clear(hdev);
77a77a30 3968 hci_pend_le_conns_clear(hdev);
09fd0de5 3969 hci_dev_unlock(hdev);
e2e0cacb 3970
dc946bd8 3971 hci_dev_put(hdev);
3df92b31
SL
3972
3973 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3974}
3975EXPORT_SYMBOL(hci_unregister_dev);
3976
3977/* Suspend HCI device */
3978int hci_suspend_dev(struct hci_dev *hdev)
3979{
3980 hci_notify(hdev, HCI_DEV_SUSPEND);
3981 return 0;
3982}
3983EXPORT_SYMBOL(hci_suspend_dev);
3984
3985/* Resume HCI device */
3986int hci_resume_dev(struct hci_dev *hdev)
3987{
3988 hci_notify(hdev, HCI_DEV_RESUME);
3989 return 0;
3990}
3991EXPORT_SYMBOL(hci_resume_dev);
3992
76bca880 3993/* Receive frame from HCI drivers */
e1a26170 3994int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3995{
76bca880 3996 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3997 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3998 kfree_skb(skb);
3999 return -ENXIO;
4000 }
4001
d82603c6 4002 /* Incoming skb */
76bca880
MH
4003 bt_cb(skb)->incoming = 1;
4004
4005 /* Time stamp */
4006 __net_timestamp(skb);
4007
76bca880 4008 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4009 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4010
76bca880
MH
4011 return 0;
4012}
4013EXPORT_SYMBOL(hci_recv_frame);
4014
33e882a5 4015static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4016 int count, __u8 index)
33e882a5
SS
4017{
4018 int len = 0;
4019 int hlen = 0;
4020 int remain = count;
4021 struct sk_buff *skb;
4022 struct bt_skb_cb *scb;
4023
4024 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4025 index >= NUM_REASSEMBLY)
33e882a5
SS
4026 return -EILSEQ;
4027
4028 skb = hdev->reassembly[index];
4029
4030 if (!skb) {
4031 switch (type) {
4032 case HCI_ACLDATA_PKT:
4033 len = HCI_MAX_FRAME_SIZE;
4034 hlen = HCI_ACL_HDR_SIZE;
4035 break;
4036 case HCI_EVENT_PKT:
4037 len = HCI_MAX_EVENT_SIZE;
4038 hlen = HCI_EVENT_HDR_SIZE;
4039 break;
4040 case HCI_SCODATA_PKT:
4041 len = HCI_MAX_SCO_SIZE;
4042 hlen = HCI_SCO_HDR_SIZE;
4043 break;
4044 }
4045
1e429f38 4046 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4047 if (!skb)
4048 return -ENOMEM;
4049
4050 scb = (void *) skb->cb;
4051 scb->expect = hlen;
4052 scb->pkt_type = type;
4053
33e882a5
SS
4054 hdev->reassembly[index] = skb;
4055 }
4056
4057 while (count) {
4058 scb = (void *) skb->cb;
89bb46d0 4059 len = min_t(uint, scb->expect, count);
33e882a5
SS
4060
4061 memcpy(skb_put(skb, len), data, len);
4062
4063 count -= len;
4064 data += len;
4065 scb->expect -= len;
4066 remain = count;
4067
4068 switch (type) {
4069 case HCI_EVENT_PKT:
4070 if (skb->len == HCI_EVENT_HDR_SIZE) {
4071 struct hci_event_hdr *h = hci_event_hdr(skb);
4072 scb->expect = h->plen;
4073
4074 if (skb_tailroom(skb) < scb->expect) {
4075 kfree_skb(skb);
4076 hdev->reassembly[index] = NULL;
4077 return -ENOMEM;
4078 }
4079 }
4080 break;
4081
4082 case HCI_ACLDATA_PKT:
4083 if (skb->len == HCI_ACL_HDR_SIZE) {
4084 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4085 scb->expect = __le16_to_cpu(h->dlen);
4086
4087 if (skb_tailroom(skb) < scb->expect) {
4088 kfree_skb(skb);
4089 hdev->reassembly[index] = NULL;
4090 return -ENOMEM;
4091 }
4092 }
4093 break;
4094
4095 case HCI_SCODATA_PKT:
4096 if (skb->len == HCI_SCO_HDR_SIZE) {
4097 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4098 scb->expect = h->dlen;
4099
4100 if (skb_tailroom(skb) < scb->expect) {
4101 kfree_skb(skb);
4102 hdev->reassembly[index] = NULL;
4103 return -ENOMEM;
4104 }
4105 }
4106 break;
4107 }
4108
4109 if (scb->expect == 0) {
4110 /* Complete frame */
4111
4112 bt_cb(skb)->pkt_type = type;
e1a26170 4113 hci_recv_frame(hdev, skb);
33e882a5
SS
4114
4115 hdev->reassembly[index] = NULL;
4116 return remain;
4117 }
4118 }
4119
4120 return remain;
4121}
4122
ef222013
MH
4123int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4124{
f39a3c06
SS
4125 int rem = 0;
4126
ef222013
MH
4127 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4128 return -EILSEQ;
4129
da5f6c37 4130 while (count) {
1e429f38 4131 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4132 if (rem < 0)
4133 return rem;
ef222013 4134
f39a3c06
SS
4135 data += (count - rem);
4136 count = rem;
f81c6224 4137 }
ef222013 4138
f39a3c06 4139 return rem;
ef222013
MH
4140}
4141EXPORT_SYMBOL(hci_recv_fragment);
4142
99811510
SS
4143#define STREAM_REASSEMBLY 0
4144
4145int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4146{
4147 int type;
4148 int rem = 0;
4149
da5f6c37 4150 while (count) {
99811510
SS
4151 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4152
4153 if (!skb) {
4154 struct { char type; } *pkt;
4155
4156 /* Start of the frame */
4157 pkt = data;
4158 type = pkt->type;
4159
4160 data++;
4161 count--;
4162 } else
4163 type = bt_cb(skb)->pkt_type;
4164
1e429f38 4165 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4166 STREAM_REASSEMBLY);
99811510
SS
4167 if (rem < 0)
4168 return rem;
4169
4170 data += (count - rem);
4171 count = rem;
f81c6224 4172 }
99811510
SS
4173
4174 return rem;
4175}
4176EXPORT_SYMBOL(hci_recv_stream_fragment);
4177
1da177e4
LT
4178/* ---- Interface to upper protocols ---- */
4179
1da177e4
LT
4180int hci_register_cb(struct hci_cb *cb)
4181{
4182 BT_DBG("%p name %s", cb, cb->name);
4183
f20d09d5 4184 write_lock(&hci_cb_list_lock);
1da177e4 4185 list_add(&cb->list, &hci_cb_list);
f20d09d5 4186 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4187
4188 return 0;
4189}
4190EXPORT_SYMBOL(hci_register_cb);
4191
4192int hci_unregister_cb(struct hci_cb *cb)
4193{
4194 BT_DBG("%p name %s", cb, cb->name);
4195
f20d09d5 4196 write_lock(&hci_cb_list_lock);
1da177e4 4197 list_del(&cb->list);
f20d09d5 4198 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4199
4200 return 0;
4201}
4202EXPORT_SYMBOL(hci_unregister_cb);
4203
51086991 4204static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4205{
0d48d939 4206 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4207
cd82e61c
MH
4208 /* Time stamp */
4209 __net_timestamp(skb);
1da177e4 4210
cd82e61c
MH
4211 /* Send copy to monitor */
4212 hci_send_to_monitor(hdev, skb);
4213
4214 if (atomic_read(&hdev->promisc)) {
4215 /* Send copy to the sockets */
470fe1b5 4216 hci_send_to_sock(hdev, skb);
1da177e4
LT
4217 }
4218
4219 /* Get rid of skb owner, prior to sending to the driver. */
4220 skb_orphan(skb);
4221
7bd8f09f 4222 if (hdev->send(hdev, skb) < 0)
51086991 4223 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4224}
4225
3119ae95
JH
4226void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4227{
4228 skb_queue_head_init(&req->cmd_q);
4229 req->hdev = hdev;
5d73e034 4230 req->err = 0;
3119ae95
JH
4231}
4232
4233int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4234{
4235 struct hci_dev *hdev = req->hdev;
4236 struct sk_buff *skb;
4237 unsigned long flags;
4238
4239 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4240
5d73e034
AG
4241 /* If an error occured during request building, remove all HCI
4242 * commands queued on the HCI request queue.
4243 */
4244 if (req->err) {
4245 skb_queue_purge(&req->cmd_q);
4246 return req->err;
4247 }
4248
3119ae95
JH
4249 /* Do not allow empty requests */
4250 if (skb_queue_empty(&req->cmd_q))
382b0c39 4251 return -ENODATA;
3119ae95
JH
4252
4253 skb = skb_peek_tail(&req->cmd_q);
4254 bt_cb(skb)->req.complete = complete;
4255
4256 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4257 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4258 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4259
4260 queue_work(hdev->workqueue, &hdev->cmd_work);
4261
4262 return 0;
4263}
4264
1ca3a9d0 4265static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4266 u32 plen, const void *param)
1da177e4
LT
4267{
4268 int len = HCI_COMMAND_HDR_SIZE + plen;
4269 struct hci_command_hdr *hdr;
4270 struct sk_buff *skb;
4271
1da177e4 4272 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4273 if (!skb)
4274 return NULL;
1da177e4
LT
4275
4276 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4277 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4278 hdr->plen = plen;
4279
4280 if (plen)
4281 memcpy(skb_put(skb, plen), param, plen);
4282
4283 BT_DBG("skb len %d", skb->len);
4284
0d48d939 4285 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4286
1ca3a9d0
JH
4287 return skb;
4288}
4289
4290/* Send HCI command */
07dc93dd
JH
4291int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4292 const void *param)
1ca3a9d0
JH
4293{
4294 struct sk_buff *skb;
4295
4296 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4297
4298 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4299 if (!skb) {
4300 BT_ERR("%s no memory for command", hdev->name);
4301 return -ENOMEM;
4302 }
4303
11714b3d
JH
4304 /* Stand-alone HCI commands must be flaged as
4305 * single-command requests.
4306 */
4307 bt_cb(skb)->req.start = true;
4308
1da177e4 4309 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4310 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4311
4312 return 0;
4313}
1da177e4 4314
71c76a17 4315/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4317 const void *param, u8 event)
71c76a17
JH
4318{
4319 struct hci_dev *hdev = req->hdev;
4320 struct sk_buff *skb;
4321
4322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4323
34739c1e
AG
4324 /* If an error occured during request building, there is no point in
4325 * queueing the HCI command. We can simply return.
4326 */
4327 if (req->err)
4328 return;
4329
71c76a17
JH
4330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4331 if (!skb) {
5d73e034
AG
4332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4333 hdev->name, opcode);
4334 req->err = -ENOMEM;
e348fe6b 4335 return;
71c76a17
JH
4336 }
4337
4338 if (skb_queue_empty(&req->cmd_q))
4339 bt_cb(skb)->req.start = true;
4340
02350a72
JH
4341 bt_cb(skb)->req.event = event;
4342
71c76a17 4343 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4344}
4345
07dc93dd
JH
4346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4347 const void *param)
02350a72
JH
4348{
4349 hci_req_add_ev(req, opcode, plen, param, 0);
4350}
4351
1da177e4 4352/* Get data from the previously sent command */
a9de9248 4353void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4354{
4355 struct hci_command_hdr *hdr;
4356
4357 if (!hdev->sent_cmd)
4358 return NULL;
4359
4360 hdr = (void *) hdev->sent_cmd->data;
4361
a9de9248 4362 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4363 return NULL;
4364
f0e09510 4365 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4366
4367 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4368}
4369
4370/* Send ACL data */
4371static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4372{
4373 struct hci_acl_hdr *hdr;
4374 int len = skb->len;
4375
badff6d0
ACM
4376 skb_push(skb, HCI_ACL_HDR_SIZE);
4377 skb_reset_transport_header(skb);
9c70220b 4378 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4379 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4380 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4381}
4382
ee22be7e 4383static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4384 struct sk_buff *skb, __u16 flags)
1da177e4 4385{
ee22be7e 4386 struct hci_conn *conn = chan->conn;
1da177e4
LT
4387 struct hci_dev *hdev = conn->hdev;
4388 struct sk_buff *list;
4389
087bfd99
GP
4390 skb->len = skb_headlen(skb);
4391 skb->data_len = 0;
4392
4393 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4394
4395 switch (hdev->dev_type) {
4396 case HCI_BREDR:
4397 hci_add_acl_hdr(skb, conn->handle, flags);
4398 break;
4399 case HCI_AMP:
4400 hci_add_acl_hdr(skb, chan->handle, flags);
4401 break;
4402 default:
4403 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4404 return;
4405 }
087bfd99 4406
70f23020
AE
4407 list = skb_shinfo(skb)->frag_list;
4408 if (!list) {
1da177e4
LT
4409 /* Non fragmented */
4410 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4411
73d80deb 4412 skb_queue_tail(queue, skb);
1da177e4
LT
4413 } else {
4414 /* Fragmented */
4415 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4416
4417 skb_shinfo(skb)->frag_list = NULL;
4418
4419 /* Queue all fragments atomically */
af3e6359 4420 spin_lock(&queue->lock);
1da177e4 4421
73d80deb 4422 __skb_queue_tail(queue, skb);
e702112f
AE
4423
4424 flags &= ~ACL_START;
4425 flags |= ACL_CONT;
1da177e4
LT
4426 do {
4427 skb = list; list = list->next;
8e87d142 4428
0d48d939 4429 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4430 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4431
4432 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4433
73d80deb 4434 __skb_queue_tail(queue, skb);
1da177e4
LT
4435 } while (list);
4436
af3e6359 4437 spin_unlock(&queue->lock);
1da177e4 4438 }
73d80deb
LAD
4439}
4440
4441void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4442{
ee22be7e 4443 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4444
f0e09510 4445 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4446
ee22be7e 4447 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4448
3eff45ea 4449 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4450}
1da177e4
LT
4451
4452/* Send SCO data */
0d861d8b 4453void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4454{
4455 struct hci_dev *hdev = conn->hdev;
4456 struct hci_sco_hdr hdr;
4457
4458 BT_DBG("%s len %d", hdev->name, skb->len);
4459
aca3192c 4460 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4461 hdr.dlen = skb->len;
4462
badff6d0
ACM
4463 skb_push(skb, HCI_SCO_HDR_SIZE);
4464 skb_reset_transport_header(skb);
9c70220b 4465 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4466
0d48d939 4467 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4468
1da177e4 4469 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4470 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4471}
1da177e4
LT
4472
4473/* ---- HCI TX task (outgoing data) ---- */
4474
4475/* HCI Connection scheduler */
6039aa73
GP
4476static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4477 int *quote)
1da177e4
LT
4478{
4479 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4480 struct hci_conn *conn = NULL, *c;
abc5de8f 4481 unsigned int num = 0, min = ~0;
1da177e4 4482
8e87d142 4483 /* We don't have to lock device here. Connections are always
1da177e4 4484 * added and removed with TX task disabled. */
bf4c6325
GP
4485
4486 rcu_read_lock();
4487
4488 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4489 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4490 continue;
769be974
MH
4491
4492 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4493 continue;
4494
1da177e4
LT
4495 num++;
4496
4497 if (c->sent < min) {
4498 min = c->sent;
4499 conn = c;
4500 }
52087a79
LAD
4501
4502 if (hci_conn_num(hdev, type) == num)
4503 break;
1da177e4
LT
4504 }
4505
bf4c6325
GP
4506 rcu_read_unlock();
4507
1da177e4 4508 if (conn) {
6ed58ec5
VT
4509 int cnt, q;
4510
4511 switch (conn->type) {
4512 case ACL_LINK:
4513 cnt = hdev->acl_cnt;
4514 break;
4515 case SCO_LINK:
4516 case ESCO_LINK:
4517 cnt = hdev->sco_cnt;
4518 break;
4519 case LE_LINK:
4520 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4521 break;
4522 default:
4523 cnt = 0;
4524 BT_ERR("Unknown link type");
4525 }
4526
4527 q = cnt / num;
1da177e4
LT
4528 *quote = q ? q : 1;
4529 } else
4530 *quote = 0;
4531
4532 BT_DBG("conn %p quote %d", conn, *quote);
4533 return conn;
4534}
4535
6039aa73 4536static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4537{
4538 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4539 struct hci_conn *c;
1da177e4 4540
bae1f5d9 4541 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4542
bf4c6325
GP
4543 rcu_read_lock();
4544
1da177e4 4545 /* Kill stalled connections */
bf4c6325 4546 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4547 if (c->type == type && c->sent) {
6ed93dc6
AE
4548 BT_ERR("%s killing stalled connection %pMR",
4549 hdev->name, &c->dst);
bed71748 4550 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4551 }
4552 }
bf4c6325
GP
4553
4554 rcu_read_unlock();
1da177e4
LT
4555}
4556
6039aa73
GP
4557static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4558 int *quote)
1da177e4 4559{
73d80deb
LAD
4560 struct hci_conn_hash *h = &hdev->conn_hash;
4561 struct hci_chan *chan = NULL;
abc5de8f 4562 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4563 struct hci_conn *conn;
73d80deb
LAD
4564 int cnt, q, conn_num = 0;
4565
4566 BT_DBG("%s", hdev->name);
4567
bf4c6325
GP
4568 rcu_read_lock();
4569
4570 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4571 struct hci_chan *tmp;
4572
4573 if (conn->type != type)
4574 continue;
4575
4576 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4577 continue;
4578
4579 conn_num++;
4580
8192edef 4581 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4582 struct sk_buff *skb;
4583
4584 if (skb_queue_empty(&tmp->data_q))
4585 continue;
4586
4587 skb = skb_peek(&tmp->data_q);
4588 if (skb->priority < cur_prio)
4589 continue;
4590
4591 if (skb->priority > cur_prio) {
4592 num = 0;
4593 min = ~0;
4594 cur_prio = skb->priority;
4595 }
4596
4597 num++;
4598
4599 if (conn->sent < min) {
4600 min = conn->sent;
4601 chan = tmp;
4602 }
4603 }
4604
4605 if (hci_conn_num(hdev, type) == conn_num)
4606 break;
4607 }
4608
bf4c6325
GP
4609 rcu_read_unlock();
4610
73d80deb
LAD
4611 if (!chan)
4612 return NULL;
4613
4614 switch (chan->conn->type) {
4615 case ACL_LINK:
4616 cnt = hdev->acl_cnt;
4617 break;
bd1eb66b
AE
4618 case AMP_LINK:
4619 cnt = hdev->block_cnt;
4620 break;
73d80deb
LAD
4621 case SCO_LINK:
4622 case ESCO_LINK:
4623 cnt = hdev->sco_cnt;
4624 break;
4625 case LE_LINK:
4626 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4627 break;
4628 default:
4629 cnt = 0;
4630 BT_ERR("Unknown link type");
4631 }
4632
4633 q = cnt / num;
4634 *quote = q ? q : 1;
4635 BT_DBG("chan %p quote %d", chan, *quote);
4636 return chan;
4637}
4638
02b20f0b
LAD
4639static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4640{
4641 struct hci_conn_hash *h = &hdev->conn_hash;
4642 struct hci_conn *conn;
4643 int num = 0;
4644
4645 BT_DBG("%s", hdev->name);
4646
bf4c6325
GP
4647 rcu_read_lock();
4648
4649 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4650 struct hci_chan *chan;
4651
4652 if (conn->type != type)
4653 continue;
4654
4655 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4656 continue;
4657
4658 num++;
4659
8192edef 4660 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4661 struct sk_buff *skb;
4662
4663 if (chan->sent) {
4664 chan->sent = 0;
4665 continue;
4666 }
4667
4668 if (skb_queue_empty(&chan->data_q))
4669 continue;
4670
4671 skb = skb_peek(&chan->data_q);
4672 if (skb->priority >= HCI_PRIO_MAX - 1)
4673 continue;
4674
4675 skb->priority = HCI_PRIO_MAX - 1;
4676
4677 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4678 skb->priority);
02b20f0b
LAD
4679 }
4680
4681 if (hci_conn_num(hdev, type) == num)
4682 break;
4683 }
bf4c6325
GP
4684
4685 rcu_read_unlock();
4686
02b20f0b
LAD
4687}
4688
b71d385a
AE
4689static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4690{
4691 /* Calculate count of blocks used by this packet */
4692 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4693}
4694
6039aa73 4695static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4696{
1da177e4
LT
4697 if (!test_bit(HCI_RAW, &hdev->flags)) {
4698 /* ACL tx timeout must be longer than maximum
4699 * link supervision timeout (40.9 seconds) */
63d2bc1b 4700 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4701 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4702 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4703 }
63d2bc1b 4704}
1da177e4 4705
6039aa73 4706static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4707{
4708 unsigned int cnt = hdev->acl_cnt;
4709 struct hci_chan *chan;
4710 struct sk_buff *skb;
4711 int quote;
4712
4713 __check_timeout(hdev, cnt);
04837f64 4714
73d80deb 4715 while (hdev->acl_cnt &&
a8c5fb1a 4716 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4717 u32 priority = (skb_peek(&chan->data_q))->priority;
4718 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4719 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4720 skb->len, skb->priority);
73d80deb 4721
ec1cce24
LAD
4722 /* Stop if priority has changed */
4723 if (skb->priority < priority)
4724 break;
4725
4726 skb = skb_dequeue(&chan->data_q);
4727
73d80deb 4728 hci_conn_enter_active_mode(chan->conn,
04124681 4729 bt_cb(skb)->force_active);
04837f64 4730
57d17d70 4731 hci_send_frame(hdev, skb);
1da177e4
LT
4732 hdev->acl_last_tx = jiffies;
4733
4734 hdev->acl_cnt--;
73d80deb
LAD
4735 chan->sent++;
4736 chan->conn->sent++;
1da177e4
LT
4737 }
4738 }
02b20f0b
LAD
4739
4740 if (cnt != hdev->acl_cnt)
4741 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4742}
4743
6039aa73 4744static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4745{
63d2bc1b 4746 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4747 struct hci_chan *chan;
4748 struct sk_buff *skb;
4749 int quote;
bd1eb66b 4750 u8 type;
b71d385a 4751
63d2bc1b 4752 __check_timeout(hdev, cnt);
b71d385a 4753
bd1eb66b
AE
4754 BT_DBG("%s", hdev->name);
4755
4756 if (hdev->dev_type == HCI_AMP)
4757 type = AMP_LINK;
4758 else
4759 type = ACL_LINK;
4760
b71d385a 4761 while (hdev->block_cnt > 0 &&
bd1eb66b 4762 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4763 u32 priority = (skb_peek(&chan->data_q))->priority;
4764 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4765 int blocks;
4766
4767 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4768 skb->len, skb->priority);
b71d385a
AE
4769
4770 /* Stop if priority has changed */
4771 if (skb->priority < priority)
4772 break;
4773
4774 skb = skb_dequeue(&chan->data_q);
4775
4776 blocks = __get_blocks(hdev, skb);
4777 if (blocks > hdev->block_cnt)
4778 return;
4779
4780 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4781 bt_cb(skb)->force_active);
b71d385a 4782
57d17d70 4783 hci_send_frame(hdev, skb);
b71d385a
AE
4784 hdev->acl_last_tx = jiffies;
4785
4786 hdev->block_cnt -= blocks;
4787 quote -= blocks;
4788
4789 chan->sent += blocks;
4790 chan->conn->sent += blocks;
4791 }
4792 }
4793
4794 if (cnt != hdev->block_cnt)
bd1eb66b 4795 hci_prio_recalculate(hdev, type);
b71d385a
AE
4796}
4797
6039aa73 4798static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4799{
4800 BT_DBG("%s", hdev->name);
4801
bd1eb66b
AE
4802 /* No ACL link over BR/EDR controller */
4803 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4804 return;
4805
4806 /* No AMP link over AMP controller */
4807 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4808 return;
4809
4810 switch (hdev->flow_ctl_mode) {
4811 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4812 hci_sched_acl_pkt(hdev);
4813 break;
4814
4815 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4816 hci_sched_acl_blk(hdev);
4817 break;
4818 }
4819}
4820
1da177e4 4821/* Schedule SCO */
6039aa73 4822static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4823{
4824 struct hci_conn *conn;
4825 struct sk_buff *skb;
4826 int quote;
4827
4828 BT_DBG("%s", hdev->name);
4829
52087a79
LAD
4830 if (!hci_conn_num(hdev, SCO_LINK))
4831 return;
4832
1da177e4
LT
4833 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4834 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4835 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4836 hci_send_frame(hdev, skb);
1da177e4
LT
4837
4838 conn->sent++;
4839 if (conn->sent == ~0)
4840 conn->sent = 0;
4841 }
4842 }
4843}
4844
6039aa73 4845static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4846{
4847 struct hci_conn *conn;
4848 struct sk_buff *skb;
4849 int quote;
4850
4851 BT_DBG("%s", hdev->name);
4852
52087a79
LAD
4853 if (!hci_conn_num(hdev, ESCO_LINK))
4854 return;
4855
8fc9ced3
GP
4856 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4857 &quote))) {
b6a0dc82
MH
4858 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4859 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4860 hci_send_frame(hdev, skb);
b6a0dc82
MH
4861
4862 conn->sent++;
4863 if (conn->sent == ~0)
4864 conn->sent = 0;
4865 }
4866 }
4867}
4868
6039aa73 4869static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4870{
73d80deb 4871 struct hci_chan *chan;
6ed58ec5 4872 struct sk_buff *skb;
02b20f0b 4873 int quote, cnt, tmp;
6ed58ec5
VT
4874
4875 BT_DBG("%s", hdev->name);
4876
52087a79
LAD
4877 if (!hci_conn_num(hdev, LE_LINK))
4878 return;
4879
6ed58ec5
VT
4880 if (!test_bit(HCI_RAW, &hdev->flags)) {
4881 /* LE tx timeout must be longer than maximum
4882 * link supervision timeout (40.9 seconds) */
bae1f5d9 4883 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4884 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4885 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4886 }
4887
4888 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4889 tmp = cnt;
73d80deb 4890 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4891 u32 priority = (skb_peek(&chan->data_q))->priority;
4892 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4893 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4894 skb->len, skb->priority);
6ed58ec5 4895
ec1cce24
LAD
4896 /* Stop if priority has changed */
4897 if (skb->priority < priority)
4898 break;
4899
4900 skb = skb_dequeue(&chan->data_q);
4901
57d17d70 4902 hci_send_frame(hdev, skb);
6ed58ec5
VT
4903 hdev->le_last_tx = jiffies;
4904
4905 cnt--;
73d80deb
LAD
4906 chan->sent++;
4907 chan->conn->sent++;
6ed58ec5
VT
4908 }
4909 }
73d80deb 4910
6ed58ec5
VT
4911 if (hdev->le_pkts)
4912 hdev->le_cnt = cnt;
4913 else
4914 hdev->acl_cnt = cnt;
02b20f0b
LAD
4915
4916 if (cnt != tmp)
4917 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4918}
4919
3eff45ea 4920static void hci_tx_work(struct work_struct *work)
1da177e4 4921{
3eff45ea 4922 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4923 struct sk_buff *skb;
4924
6ed58ec5 4925 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4926 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4927
52de599e
MH
4928 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4929 /* Schedule queues and send stuff to HCI driver */
4930 hci_sched_acl(hdev);
4931 hci_sched_sco(hdev);
4932 hci_sched_esco(hdev);
4933 hci_sched_le(hdev);
4934 }
6ed58ec5 4935
1da177e4
LT
4936 /* Send next queued raw (unknown type) packet */
4937 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4938 hci_send_frame(hdev, skb);
1da177e4
LT
4939}
4940
25985edc 4941/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4942
4943/* ACL data packet */
6039aa73 4944static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4945{
4946 struct hci_acl_hdr *hdr = (void *) skb->data;
4947 struct hci_conn *conn;
4948 __u16 handle, flags;
4949
4950 skb_pull(skb, HCI_ACL_HDR_SIZE);
4951
4952 handle = __le16_to_cpu(hdr->handle);
4953 flags = hci_flags(handle);
4954 handle = hci_handle(handle);
4955
f0e09510 4956 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4957 handle, flags);
1da177e4
LT
4958
4959 hdev->stat.acl_rx++;
4960
4961 hci_dev_lock(hdev);
4962 conn = hci_conn_hash_lookup_handle(hdev, handle);
4963 hci_dev_unlock(hdev);
8e87d142 4964
1da177e4 4965 if (conn) {
65983fc7 4966 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4967
1da177e4 4968 /* Send to upper protocol */
686ebf28
UF
4969 l2cap_recv_acldata(conn, skb, flags);
4970 return;
1da177e4 4971 } else {
8e87d142 4972 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4973 hdev->name, handle);
1da177e4
LT
4974 }
4975
4976 kfree_skb(skb);
4977}
4978
4979/* SCO data packet */
6039aa73 4980static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4981{
4982 struct hci_sco_hdr *hdr = (void *) skb->data;
4983 struct hci_conn *conn;
4984 __u16 handle;
4985
4986 skb_pull(skb, HCI_SCO_HDR_SIZE);
4987
4988 handle = __le16_to_cpu(hdr->handle);
4989
f0e09510 4990 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4991
4992 hdev->stat.sco_rx++;
4993
4994 hci_dev_lock(hdev);
4995 conn = hci_conn_hash_lookup_handle(hdev, handle);
4996 hci_dev_unlock(hdev);
4997
4998 if (conn) {
1da177e4 4999 /* Send to upper protocol */
686ebf28
UF
5000 sco_recv_scodata(conn, skb);
5001 return;
1da177e4 5002 } else {
8e87d142 5003 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5004 hdev->name, handle);
1da177e4
LT
5005 }
5006
5007 kfree_skb(skb);
5008}
5009
9238f36a
JH
5010static bool hci_req_is_complete(struct hci_dev *hdev)
5011{
5012 struct sk_buff *skb;
5013
5014 skb = skb_peek(&hdev->cmd_q);
5015 if (!skb)
5016 return true;
5017
5018 return bt_cb(skb)->req.start;
5019}
5020
42c6b129
JH
5021static void hci_resend_last(struct hci_dev *hdev)
5022{
5023 struct hci_command_hdr *sent;
5024 struct sk_buff *skb;
5025 u16 opcode;
5026
5027 if (!hdev->sent_cmd)
5028 return;
5029
5030 sent = (void *) hdev->sent_cmd->data;
5031 opcode = __le16_to_cpu(sent->opcode);
5032 if (opcode == HCI_OP_RESET)
5033 return;
5034
5035 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5036 if (!skb)
5037 return;
5038
5039 skb_queue_head(&hdev->cmd_q, skb);
5040 queue_work(hdev->workqueue, &hdev->cmd_work);
5041}
5042
9238f36a
JH
5043void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5044{
5045 hci_req_complete_t req_complete = NULL;
5046 struct sk_buff *skb;
5047 unsigned long flags;
5048
5049 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5050
42c6b129
JH
5051 /* If the completed command doesn't match the last one that was
5052 * sent we need to do special handling of it.
9238f36a 5053 */
42c6b129
JH
5054 if (!hci_sent_cmd_data(hdev, opcode)) {
5055 /* Some CSR based controllers generate a spontaneous
5056 * reset complete event during init and any pending
5057 * command will never be completed. In such a case we
5058 * need to resend whatever was the last sent
5059 * command.
5060 */
5061 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5062 hci_resend_last(hdev);
5063
9238f36a 5064 return;
42c6b129 5065 }
9238f36a
JH
5066
5067 /* If the command succeeded and there's still more commands in
5068 * this request the request is not yet complete.
5069 */
5070 if (!status && !hci_req_is_complete(hdev))
5071 return;
5072
5073 /* If this was the last command in a request the complete
5074 * callback would be found in hdev->sent_cmd instead of the
5075 * command queue (hdev->cmd_q).
5076 */
5077 if (hdev->sent_cmd) {
5078 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5079
5080 if (req_complete) {
5081 /* We must set the complete callback to NULL to
5082 * avoid calling the callback more than once if
5083 * this function gets called again.
5084 */
5085 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5086
9238f36a 5087 goto call_complete;
53e21fbc 5088 }
9238f36a
JH
5089 }
5090
5091 /* Remove all pending commands belonging to this request */
5092 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5093 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5094 if (bt_cb(skb)->req.start) {
5095 __skb_queue_head(&hdev->cmd_q, skb);
5096 break;
5097 }
5098
5099 req_complete = bt_cb(skb)->req.complete;
5100 kfree_skb(skb);
5101 }
5102 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5103
5104call_complete:
5105 if (req_complete)
5106 req_complete(hdev, status);
5107}
5108
b78752cc 5109static void hci_rx_work(struct work_struct *work)
1da177e4 5110{
b78752cc 5111 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5112 struct sk_buff *skb;
5113
5114 BT_DBG("%s", hdev->name);
5115
1da177e4 5116 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5117 /* Send copy to monitor */
5118 hci_send_to_monitor(hdev, skb);
5119
1da177e4
LT
5120 if (atomic_read(&hdev->promisc)) {
5121 /* Send copy to the sockets */
470fe1b5 5122 hci_send_to_sock(hdev, skb);
1da177e4
LT
5123 }
5124
0736cfa8
MH
5125 if (test_bit(HCI_RAW, &hdev->flags) ||
5126 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5127 kfree_skb(skb);
5128 continue;
5129 }
5130
5131 if (test_bit(HCI_INIT, &hdev->flags)) {
5132 /* Don't process data packets in this states. */
0d48d939 5133 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5134 case HCI_ACLDATA_PKT:
5135 case HCI_SCODATA_PKT:
5136 kfree_skb(skb);
5137 continue;
3ff50b79 5138 }
1da177e4
LT
5139 }
5140
5141 /* Process frame */
0d48d939 5142 switch (bt_cb(skb)->pkt_type) {
1da177e4 5143 case HCI_EVENT_PKT:
b78752cc 5144 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5145 hci_event_packet(hdev, skb);
5146 break;
5147
5148 case HCI_ACLDATA_PKT:
5149 BT_DBG("%s ACL data packet", hdev->name);
5150 hci_acldata_packet(hdev, skb);
5151 break;
5152
5153 case HCI_SCODATA_PKT:
5154 BT_DBG("%s SCO data packet", hdev->name);
5155 hci_scodata_packet(hdev, skb);
5156 break;
5157
5158 default:
5159 kfree_skb(skb);
5160 break;
5161 }
5162 }
1da177e4
LT
5163}
5164
c347b765 5165static void hci_cmd_work(struct work_struct *work)
1da177e4 5166{
c347b765 5167 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5168 struct sk_buff *skb;
5169
2104786b
AE
5170 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5171 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5172
1da177e4 5173 /* Send queued commands */
5a08ecce
AE
5174 if (atomic_read(&hdev->cmd_cnt)) {
5175 skb = skb_dequeue(&hdev->cmd_q);
5176 if (!skb)
5177 return;
5178
7585b97a 5179 kfree_skb(hdev->sent_cmd);
1da177e4 5180
a675d7f1 5181 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5182 if (hdev->sent_cmd) {
1da177e4 5183 atomic_dec(&hdev->cmd_cnt);
57d17d70 5184 hci_send_frame(hdev, skb);
7bdb8a5c 5185 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5186 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5187 else
65cc2b49
MH
5188 schedule_delayed_work(&hdev->cmd_timer,
5189 HCI_CMD_TIMEOUT);
1da177e4
LT
5190 } else {
5191 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5192 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5193 }
5194 }
5195}
b1efcc28
AG
5196
5197void hci_req_add_le_scan_disable(struct hci_request *req)
5198{
5199 struct hci_cp_le_set_scan_enable cp;
5200
5201 memset(&cp, 0, sizeof(cp));
5202 cp.enable = LE_SCAN_DISABLE;
5203 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5204}
a4790dbd 5205
8ef30fd3
AG
5206void hci_req_add_le_passive_scan(struct hci_request *req)
5207{
5208 struct hci_cp_le_set_scan_param param_cp;
5209 struct hci_cp_le_set_scan_enable enable_cp;
5210 struct hci_dev *hdev = req->hdev;
5211 u8 own_addr_type;
5212
5213 /* Set require_privacy to true to avoid identification from
5214 * unknown peer devices. Since this is passive scanning, no
5215 * SCAN_REQ using the local identity should be sent. Mandating
5216 * privacy is just an extra precaution.
5217 */
5218 if (hci_update_random_address(req, true, &own_addr_type))
5219 return;
5220
5221 memset(&param_cp, 0, sizeof(param_cp));
5222 param_cp.type = LE_SCAN_PASSIVE;
5223 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5224 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5225 param_cp.own_address_type = own_addr_type;
5226 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5227 &param_cp);
5228
5229 memset(&enable_cp, 0, sizeof(enable_cp));
5230 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5231 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5232 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5233 &enable_cp);
5234}
5235
a4790dbd
AG
5236static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5237{
5238 if (status)
5239 BT_DBG("HCI request failed to update background scanning: "
5240 "status 0x%2.2x", status);
5241}
5242
5243/* This function controls the background scanning based on hdev->pend_le_conns
5244 * list. If there are pending LE connection we start the background scanning,
5245 * otherwise we stop it.
5246 *
5247 * This function requires the caller holds hdev->lock.
5248 */
5249void hci_update_background_scan(struct hci_dev *hdev)
5250{
a4790dbd
AG
5251 struct hci_request req;
5252 struct hci_conn *conn;
5253 int err;
5254
5255 hci_req_init(&req, hdev);
5256
5257 if (list_empty(&hdev->pend_le_conns)) {
5258 /* If there is no pending LE connections, we should stop
5259 * the background scanning.
5260 */
5261
5262 /* If controller is not scanning we are done. */
5263 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5264 return;
5265
5266 hci_req_add_le_scan_disable(&req);
5267
5268 BT_DBG("%s stopping background scanning", hdev->name);
5269 } else {
a4790dbd
AG
5270 /* If there is at least one pending LE connection, we should
5271 * keep the background scan running.
5272 */
5273
a4790dbd
AG
5274 /* If controller is connecting, we should not start scanning
5275 * since some controllers are not able to scan and connect at
5276 * the same time.
5277 */
5278 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5279 if (conn)
5280 return;
5281
4340a124
AG
5282 /* If controller is currently scanning, we stop it to ensure we
5283 * don't miss any advertising (due to duplicates filter).
5284 */
5285 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5286 hci_req_add_le_scan_disable(&req);
5287
8ef30fd3 5288 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5289
5290 BT_DBG("%s starting background scanning", hdev->name);
5291 }
5292
5293 err = hci_req_run(&req, update_background_scan_complete);
5294 if (err)
5295 BT_ERR("Failed to run HCI request: err %d", err);
5296}