]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Unify remote OOB data functions
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
47219839
MH
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
47219839
MH
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
baf27f6e
MH
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
02d08d15
MH
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
0378b597 277 struct link_key *key;
02d08d15 278
0378b597
JH
279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
02d08d15
MH
281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
0378b597 283 rcu_read_unlock();
02d08d15
MH
284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
babdbb3c
MH
300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
041000b9
MH
324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
ebd1e33b
MH
338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
5afeac14
MH
363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
111902f7 369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
111902f7 394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
395 return -EALREADY;
396
111902f7 397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
858cdc78
JH
409static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static ssize_t force_lesc_support_write(struct file *file,
422 const char __user *user_buf,
423 size_t count, loff_t *ppos)
424{
425 struct hci_dev *hdev = file->private_data;
426 char buf[32];
427 size_t buf_size = min(count, (sizeof(buf)-1));
428 bool enable;
429
430 if (copy_from_user(buf, user_buf, buf_size))
431 return -EFAULT;
432
433 buf[buf_size] = '\0';
434 if (strtobool(buf, &enable))
435 return -EINVAL;
436
437 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
438 return -EALREADY;
439
440 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
441
442 return count;
443}
444
445static const struct file_operations force_lesc_support_fops = {
446 .open = simple_open,
447 .read = force_lesc_support_read,
448 .write = force_lesc_support_write,
449 .llseek = default_llseek,
450};
451
134c2a89
MH
452static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
453 size_t count, loff_t *ppos)
454{
455 struct hci_dev *hdev = file->private_data;
456 char buf[3];
457
458 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
459 buf[1] = '\n';
460 buf[2] = '\0';
461 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
462}
463
464static const struct file_operations sc_only_mode_fops = {
465 .open = simple_open,
466 .read = sc_only_mode_read,
467 .llseek = default_llseek,
468};
469
2bfa3531
MH
470static int idle_timeout_set(void *data, u64 val)
471{
472 struct hci_dev *hdev = data;
473
474 if (val != 0 && (val < 500 || val > 3600000))
475 return -EINVAL;
476
477 hci_dev_lock(hdev);
2be48b65 478 hdev->idle_timeout = val;
2bfa3531
MH
479 hci_dev_unlock(hdev);
480
481 return 0;
482}
483
484static int idle_timeout_get(void *data, u64 *val)
485{
486 struct hci_dev *hdev = data;
487
488 hci_dev_lock(hdev);
489 *val = hdev->idle_timeout;
490 hci_dev_unlock(hdev);
491
492 return 0;
493}
494
495DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
496 idle_timeout_set, "%llu\n");
497
c982b2ea
JH
498static int rpa_timeout_set(void *data, u64 val)
499{
500 struct hci_dev *hdev = data;
501
502 /* Require the RPA timeout to be at least 30 seconds and at most
503 * 24 hours.
504 */
505 if (val < 30 || val > (60 * 60 * 24))
506 return -EINVAL;
507
508 hci_dev_lock(hdev);
509 hdev->rpa_timeout = val;
510 hci_dev_unlock(hdev);
511
512 return 0;
513}
514
515static int rpa_timeout_get(void *data, u64 *val)
516{
517 struct hci_dev *hdev = data;
518
519 hci_dev_lock(hdev);
520 *val = hdev->rpa_timeout;
521 hci_dev_unlock(hdev);
522
523 return 0;
524}
525
526DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
527 rpa_timeout_set, "%llu\n");
528
2bfa3531
MH
529static int sniff_min_interval_set(void *data, u64 val)
530{
531 struct hci_dev *hdev = data;
532
533 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
534 return -EINVAL;
535
536 hci_dev_lock(hdev);
2be48b65 537 hdev->sniff_min_interval = val;
2bfa3531
MH
538 hci_dev_unlock(hdev);
539
540 return 0;
541}
542
543static int sniff_min_interval_get(void *data, u64 *val)
544{
545 struct hci_dev *hdev = data;
546
547 hci_dev_lock(hdev);
548 *val = hdev->sniff_min_interval;
549 hci_dev_unlock(hdev);
550
551 return 0;
552}
553
554DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
555 sniff_min_interval_set, "%llu\n");
556
557static int sniff_max_interval_set(void *data, u64 val)
558{
559 struct hci_dev *hdev = data;
560
561 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
562 return -EINVAL;
563
564 hci_dev_lock(hdev);
2be48b65 565 hdev->sniff_max_interval = val;
2bfa3531
MH
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int sniff_max_interval_get(void *data, u64 *val)
572{
573 struct hci_dev *hdev = data;
574
575 hci_dev_lock(hdev);
576 *val = hdev->sniff_max_interval;
577 hci_dev_unlock(hdev);
578
579 return 0;
580}
581
582DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
583 sniff_max_interval_set, "%llu\n");
584
31ad1691
AK
585static int conn_info_min_age_set(void *data, u64 val)
586{
587 struct hci_dev *hdev = data;
588
589 if (val == 0 || val > hdev->conn_info_max_age)
590 return -EINVAL;
591
592 hci_dev_lock(hdev);
593 hdev->conn_info_min_age = val;
594 hci_dev_unlock(hdev);
595
596 return 0;
597}
598
599static int conn_info_min_age_get(void *data, u64 *val)
600{
601 struct hci_dev *hdev = data;
602
603 hci_dev_lock(hdev);
604 *val = hdev->conn_info_min_age;
605 hci_dev_unlock(hdev);
606
607 return 0;
608}
609
610DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
611 conn_info_min_age_set, "%llu\n");
612
613static int conn_info_max_age_set(void *data, u64 val)
614{
615 struct hci_dev *hdev = data;
616
617 if (val == 0 || val < hdev->conn_info_min_age)
618 return -EINVAL;
619
620 hci_dev_lock(hdev);
621 hdev->conn_info_max_age = val;
622 hci_dev_unlock(hdev);
623
624 return 0;
625}
626
627static int conn_info_max_age_get(void *data, u64 *val)
628{
629 struct hci_dev *hdev = data;
630
631 hci_dev_lock(hdev);
632 *val = hdev->conn_info_max_age;
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
639 conn_info_max_age_set, "%llu\n");
640
ac345813
MH
641static int identity_show(struct seq_file *f, void *p)
642{
643 struct hci_dev *hdev = f->private;
a1f4c318 644 bdaddr_t addr;
ac345813
MH
645 u8 addr_type;
646
647 hci_dev_lock(hdev);
648
a1f4c318 649 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 650
a1f4c318 651 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 652 16, hdev->irk, &hdev->rpa);
ac345813
MH
653
654 hci_dev_unlock(hdev);
655
656 return 0;
657}
658
659static int identity_open(struct inode *inode, struct file *file)
660{
661 return single_open(file, identity_show, inode->i_private);
662}
663
664static const struct file_operations identity_fops = {
665 .open = identity_open,
666 .read = seq_read,
667 .llseek = seq_lseek,
668 .release = single_release,
669};
670
7a4cd51d
MH
671static int random_address_show(struct seq_file *f, void *p)
672{
673 struct hci_dev *hdev = f->private;
674
675 hci_dev_lock(hdev);
676 seq_printf(f, "%pMR\n", &hdev->random_addr);
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682static int random_address_open(struct inode *inode, struct file *file)
683{
684 return single_open(file, random_address_show, inode->i_private);
685}
686
687static const struct file_operations random_address_fops = {
688 .open = random_address_open,
689 .read = seq_read,
690 .llseek = seq_lseek,
691 .release = single_release,
692};
693
e7b8fc92
MH
694static int static_address_show(struct seq_file *f, void *p)
695{
696 struct hci_dev *hdev = f->private;
697
698 hci_dev_lock(hdev);
699 seq_printf(f, "%pMR\n", &hdev->static_addr);
700 hci_dev_unlock(hdev);
701
702 return 0;
703}
704
705static int static_address_open(struct inode *inode, struct file *file)
706{
707 return single_open(file, static_address_show, inode->i_private);
708}
709
710static const struct file_operations static_address_fops = {
711 .open = static_address_open,
712 .read = seq_read,
713 .llseek = seq_lseek,
714 .release = single_release,
715};
716
b32bba6c
MH
717static ssize_t force_static_address_read(struct file *file,
718 char __user *user_buf,
719 size_t count, loff_t *ppos)
92202185 720{
b32bba6c
MH
721 struct hci_dev *hdev = file->private_data;
722 char buf[3];
92202185 723
111902f7 724 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
725 buf[1] = '\n';
726 buf[2] = '\0';
727 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
728}
729
b32bba6c
MH
730static ssize_t force_static_address_write(struct file *file,
731 const char __user *user_buf,
732 size_t count, loff_t *ppos)
92202185 733{
b32bba6c
MH
734 struct hci_dev *hdev = file->private_data;
735 char buf[32];
736 size_t buf_size = min(count, (sizeof(buf)-1));
737 bool enable;
92202185 738
b32bba6c
MH
739 if (test_bit(HCI_UP, &hdev->flags))
740 return -EBUSY;
92202185 741
b32bba6c
MH
742 if (copy_from_user(buf, user_buf, buf_size))
743 return -EFAULT;
744
745 buf[buf_size] = '\0';
746 if (strtobool(buf, &enable))
747 return -EINVAL;
748
111902f7 749 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
750 return -EALREADY;
751
111902f7 752 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
753
754 return count;
92202185
MH
755}
756
b32bba6c
MH
757static const struct file_operations force_static_address_fops = {
758 .open = simple_open,
759 .read = force_static_address_read,
760 .write = force_static_address_write,
761 .llseek = default_llseek,
762};
92202185 763
d2ab0ac1
MH
764static int white_list_show(struct seq_file *f, void *ptr)
765{
766 struct hci_dev *hdev = f->private;
767 struct bdaddr_list *b;
768
769 hci_dev_lock(hdev);
770 list_for_each_entry(b, &hdev->le_white_list, list)
771 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
772 hci_dev_unlock(hdev);
773
774 return 0;
775}
776
777static int white_list_open(struct inode *inode, struct file *file)
778{
779 return single_open(file, white_list_show, inode->i_private);
780}
781
782static const struct file_operations white_list_fops = {
783 .open = white_list_open,
784 .read = seq_read,
785 .llseek = seq_lseek,
786 .release = single_release,
787};
788
3698d704
MH
789static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
790{
791 struct hci_dev *hdev = f->private;
adae20cb 792 struct smp_irk *irk;
3698d704 793
adae20cb
JH
794 rcu_read_lock();
795 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3698d704
MH
796 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
797 &irk->bdaddr, irk->addr_type,
798 16, irk->val, &irk->rpa);
799 }
adae20cb 800 rcu_read_unlock();
3698d704
MH
801
802 return 0;
803}
804
805static int identity_resolving_keys_open(struct inode *inode, struct file *file)
806{
807 return single_open(file, identity_resolving_keys_show,
808 inode->i_private);
809}
810
811static const struct file_operations identity_resolving_keys_fops = {
812 .open = identity_resolving_keys_open,
813 .read = seq_read,
814 .llseek = seq_lseek,
815 .release = single_release,
816};
817
8f8625cd
MH
818static int long_term_keys_show(struct seq_file *f, void *ptr)
819{
820 struct hci_dev *hdev = f->private;
970d0f1b 821 struct smp_ltk *ltk;
8f8625cd 822
970d0f1b
JH
823 rcu_read_lock();
824 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 828 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 829 rcu_read_unlock();
8f8625cd
MH
830
831 return 0;
832}
833
834static int long_term_keys_open(struct inode *inode, struct file *file)
835{
836 return single_open(file, long_term_keys_show, inode->i_private);
837}
838
839static const struct file_operations long_term_keys_fops = {
840 .open = long_term_keys_open,
841 .read = seq_read,
842 .llseek = seq_lseek,
843 .release = single_release,
844};
845
4e70c7e7
MH
846static int conn_min_interval_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
2be48b65 854 hdev->le_conn_min_interval = val;
4e70c7e7
MH
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int conn_min_interval_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_conn_min_interval;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
872 conn_min_interval_set, "%llu\n");
873
874static int conn_max_interval_set(void *data, u64 val)
875{
876 struct hci_dev *hdev = data;
877
878 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
879 return -EINVAL;
880
881 hci_dev_lock(hdev);
2be48b65 882 hdev->le_conn_max_interval = val;
4e70c7e7
MH
883 hci_dev_unlock(hdev);
884
885 return 0;
886}
887
888static int conn_max_interval_get(void *data, u64 *val)
889{
890 struct hci_dev *hdev = data;
891
892 hci_dev_lock(hdev);
893 *val = hdev->le_conn_max_interval;
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
900 conn_max_interval_set, "%llu\n");
901
816a93d1 902static int conn_latency_set(void *data, u64 val)
3f959d46
MH
903{
904 struct hci_dev *hdev = data;
905
816a93d1 906 if (val > 0x01f3)
3f959d46
MH
907 return -EINVAL;
908
909 hci_dev_lock(hdev);
816a93d1 910 hdev->le_conn_latency = val;
3f959d46
MH
911 hci_dev_unlock(hdev);
912
913 return 0;
914}
915
816a93d1 916static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
917{
918 struct hci_dev *hdev = data;
919
920 hci_dev_lock(hdev);
816a93d1 921 *val = hdev->le_conn_latency;
3f959d46
MH
922 hci_dev_unlock(hdev);
923
924 return 0;
925}
926
816a93d1
MH
927DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
928 conn_latency_set, "%llu\n");
3f959d46 929
f1649577 930static int supervision_timeout_set(void *data, u64 val)
89863109 931{
f1649577 932 struct hci_dev *hdev = data;
89863109 933
f1649577
MH
934 if (val < 0x000a || val > 0x0c80)
935 return -EINVAL;
936
937 hci_dev_lock(hdev);
938 hdev->le_supv_timeout = val;
939 hci_dev_unlock(hdev);
940
941 return 0;
89863109
JR
942}
943
f1649577 944static int supervision_timeout_get(void *data, u64 *val)
89863109 945{
f1649577 946 struct hci_dev *hdev = data;
89863109 947
f1649577
MH
948 hci_dev_lock(hdev);
949 *val = hdev->le_supv_timeout;
950 hci_dev_unlock(hdev);
89863109 951
f1649577
MH
952 return 0;
953}
89863109 954
f1649577
MH
955DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
956 supervision_timeout_set, "%llu\n");
89863109 957
3f959d46
MH
958static int adv_channel_map_set(void *data, u64 val)
959{
960 struct hci_dev *hdev = data;
89863109 961
3f959d46
MH
962 if (val < 0x01 || val > 0x07)
963 return -EINVAL;
89863109 964
3f959d46
MH
965 hci_dev_lock(hdev);
966 hdev->le_adv_channel_map = val;
967 hci_dev_unlock(hdev);
89863109 968
3f959d46
MH
969 return 0;
970}
89863109 971
3f959d46 972static int adv_channel_map_get(void *data, u64 *val)
7d474e06 973{
3f959d46 974 struct hci_dev *hdev = data;
7d474e06
AG
975
976 hci_dev_lock(hdev);
3f959d46
MH
977 *val = hdev->le_adv_channel_map;
978 hci_dev_unlock(hdev);
7d474e06 979
3f959d46
MH
980 return 0;
981}
982
983DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
984 adv_channel_map_set, "%llu\n");
7d474e06 985
729a1051
GL
986static int adv_min_interval_set(void *data, u64 val)
987{
988 struct hci_dev *hdev = data;
989
990 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
991 return -EINVAL;
992
993 hci_dev_lock(hdev);
994 hdev->le_adv_min_interval = val;
7d474e06
AG
995 hci_dev_unlock(hdev);
996
997 return 0;
998}
999
729a1051 1000static int adv_min_interval_get(void *data, u64 *val)
7d474e06 1001{
729a1051
GL
1002 struct hci_dev *hdev = data;
1003
1004 hci_dev_lock(hdev);
1005 *val = hdev->le_adv_min_interval;
1006 hci_dev_unlock(hdev);
1007
1008 return 0;
7d474e06
AG
1009}
1010
729a1051
GL
1011DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1012 adv_min_interval_set, "%llu\n");
1013
1014static int adv_max_interval_set(void *data, u64 val)
7d474e06 1015{
729a1051 1016 struct hci_dev *hdev = data;
7d474e06 1017
729a1051 1018 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
1019 return -EINVAL;
1020
729a1051
GL
1021 hci_dev_lock(hdev);
1022 hdev->le_adv_max_interval = val;
1023 hci_dev_unlock(hdev);
7d474e06 1024
729a1051
GL
1025 return 0;
1026}
7d474e06 1027
729a1051
GL
1028static int adv_max_interval_get(void *data, u64 *val)
1029{
1030 struct hci_dev *hdev = data;
7d474e06 1031
729a1051
GL
1032 hci_dev_lock(hdev);
1033 *val = hdev->le_adv_max_interval;
1034 hci_dev_unlock(hdev);
7d474e06 1035
729a1051
GL
1036 return 0;
1037}
7d474e06 1038
729a1051
GL
1039DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1040 adv_max_interval_set, "%llu\n");
7d474e06 1041
0b3c7d37 1042static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1043{
0b3c7d37 1044 struct hci_dev *hdev = f->private;
7d474e06 1045 struct hci_conn_params *p;
40f4938a 1046 struct bdaddr_list *b;
7d474e06 1047
7d474e06 1048 hci_dev_lock(hdev);
40f4938a
MH
1049 list_for_each_entry(b, &hdev->whitelist, list)
1050 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1051 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1052 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1053 p->auto_connect);
7d474e06 1054 }
7d474e06 1055 hci_dev_unlock(hdev);
7d474e06 1056
7d474e06
AG
1057 return 0;
1058}
7d474e06 1059
0b3c7d37 1060static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1061{
0b3c7d37 1062 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1063}
1064
0b3c7d37
MH
1065static const struct file_operations device_list_fops = {
1066 .open = device_list_open,
7d474e06 1067 .read = seq_read,
7d474e06
AG
1068 .llseek = seq_lseek,
1069 .release = single_release,
1070};
1071
1da177e4
LT
1072/* ---- HCI requests ---- */
1073
42c6b129 1074static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1075{
42c6b129 1076 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1077
1078 if (hdev->req_status == HCI_REQ_PEND) {
1079 hdev->req_result = result;
1080 hdev->req_status = HCI_REQ_DONE;
1081 wake_up_interruptible(&hdev->req_wait_q);
1082 }
1083}
1084
1085static void hci_req_cancel(struct hci_dev *hdev, int err)
1086{
1087 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1088
1089 if (hdev->req_status == HCI_REQ_PEND) {
1090 hdev->req_result = err;
1091 hdev->req_status = HCI_REQ_CANCELED;
1092 wake_up_interruptible(&hdev->req_wait_q);
1093 }
1094}
1095
77a63e0a
FW
1096static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1097 u8 event)
75e84b7c
JH
1098{
1099 struct hci_ev_cmd_complete *ev;
1100 struct hci_event_hdr *hdr;
1101 struct sk_buff *skb;
1102
1103 hci_dev_lock(hdev);
1104
1105 skb = hdev->recv_evt;
1106 hdev->recv_evt = NULL;
1107
1108 hci_dev_unlock(hdev);
1109
1110 if (!skb)
1111 return ERR_PTR(-ENODATA);
1112
1113 if (skb->len < sizeof(*hdr)) {
1114 BT_ERR("Too short HCI event");
1115 goto failed;
1116 }
1117
1118 hdr = (void *) skb->data;
1119 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1120
7b1abbbe
JH
1121 if (event) {
1122 if (hdr->evt != event)
1123 goto failed;
1124 return skb;
1125 }
1126
75e84b7c
JH
1127 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1128 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1129 goto failed;
1130 }
1131
1132 if (skb->len < sizeof(*ev)) {
1133 BT_ERR("Too short cmd_complete event");
1134 goto failed;
1135 }
1136
1137 ev = (void *) skb->data;
1138 skb_pull(skb, sizeof(*ev));
1139
1140 if (opcode == __le16_to_cpu(ev->opcode))
1141 return skb;
1142
1143 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1144 __le16_to_cpu(ev->opcode));
1145
1146failed:
1147 kfree_skb(skb);
1148 return ERR_PTR(-ENODATA);
1149}
1150
7b1abbbe 1151struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1152 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1153{
1154 DECLARE_WAITQUEUE(wait, current);
1155 struct hci_request req;
1156 int err = 0;
1157
1158 BT_DBG("%s", hdev->name);
1159
1160 hci_req_init(&req, hdev);
1161
7b1abbbe 1162 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1163
1164 hdev->req_status = HCI_REQ_PEND;
1165
75e84b7c
JH
1166 add_wait_queue(&hdev->req_wait_q, &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168
039fada5
CP
1169 err = hci_req_run(&req, hci_req_sync_complete);
1170 if (err < 0) {
1171 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1172 set_current_state(TASK_RUNNING);
039fada5
CP
1173 return ERR_PTR(err);
1174 }
1175
75e84b7c
JH
1176 schedule_timeout(timeout);
1177
1178 remove_wait_queue(&hdev->req_wait_q, &wait);
1179
1180 if (signal_pending(current))
1181 return ERR_PTR(-EINTR);
1182
1183 switch (hdev->req_status) {
1184 case HCI_REQ_DONE:
1185 err = -bt_to_errno(hdev->req_result);
1186 break;
1187
1188 case HCI_REQ_CANCELED:
1189 err = -hdev->req_result;
1190 break;
1191
1192 default:
1193 err = -ETIMEDOUT;
1194 break;
1195 }
1196
1197 hdev->req_status = hdev->req_result = 0;
1198
1199 BT_DBG("%s end: err %d", hdev->name, err);
1200
1201 if (err < 0)
1202 return ERR_PTR(err);
1203
7b1abbbe
JH
1204 return hci_get_cmd_complete(hdev, opcode, event);
1205}
1206EXPORT_SYMBOL(__hci_cmd_sync_ev);
1207
1208struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1209 const void *param, u32 timeout)
7b1abbbe
JH
1210{
1211 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1212}
1213EXPORT_SYMBOL(__hci_cmd_sync);
1214
1da177e4 1215/* Execute request and wait for completion. */
01178cd4 1216static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1217 void (*func)(struct hci_request *req,
1218 unsigned long opt),
01178cd4 1219 unsigned long opt, __u32 timeout)
1da177e4 1220{
42c6b129 1221 struct hci_request req;
1da177e4
LT
1222 DECLARE_WAITQUEUE(wait, current);
1223 int err = 0;
1224
1225 BT_DBG("%s start", hdev->name);
1226
42c6b129
JH
1227 hci_req_init(&req, hdev);
1228
1da177e4
LT
1229 hdev->req_status = HCI_REQ_PEND;
1230
42c6b129 1231 func(&req, opt);
53cce22d 1232
039fada5
CP
1233 add_wait_queue(&hdev->req_wait_q, &wait);
1234 set_current_state(TASK_INTERRUPTIBLE);
1235
42c6b129
JH
1236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
53cce22d 1238 hdev->req_status = 0;
920c8300 1239
039fada5 1240 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1241 set_current_state(TASK_RUNNING);
039fada5 1242
920c8300
AG
1243 /* ENODATA means the HCI request command queue is empty.
1244 * This can happen when a request with conditionals doesn't
1245 * trigger any commands to be sent. This is normal behavior
1246 * and should not trigger an error return.
42c6b129 1247 */
920c8300
AG
1248 if (err == -ENODATA)
1249 return 0;
1250
1251 return err;
53cce22d
JH
1252 }
1253
1da177e4
LT
1254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
e175072f 1263 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
3ff50b79 1273 }
1da177e4 1274
a5040efa 1275 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
01178cd4 1282static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
01178cd4 1285 unsigned long opt, __u32 timeout)
1da177e4
LT
1286{
1287 int ret;
1288
7c6a329e
MH
1289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
1da177e4
LT
1292 /* Serialize all requests */
1293 hci_req_lock(hdev);
01178cd4 1294 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
42c6b129 1300static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1301{
42c6b129 1302 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1303
1304 /* Reset device */
42c6b129
JH
1305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1307}
1308
42c6b129 1309static void bredr_init(struct hci_request *req)
1da177e4 1310{
42c6b129 1311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1312
1da177e4 1313 /* Read Local Supported Features */
42c6b129 1314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1315
1143e5a6 1316 /* Read Local Version */
42c6b129 1317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1318
1319 /* Read BD Address */
42c6b129 1320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1321}
1322
42c6b129 1323static void amp_init(struct hci_request *req)
e61ef499 1324{
42c6b129 1325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1326
e61ef499 1327 /* Read Local Version */
42c6b129 1328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1329
f6996cfe
MH
1330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
6bcbc489 1336 /* Read Local AMP Info */
42c6b129 1337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1338
1339 /* Read Data Blk size */
42c6b129 1340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1341
f38ba941
MH
1342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
7528ca1c
MH
1345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1347}
1348
42c6b129 1349static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1350{
42c6b129 1351 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
11778716
AE
1355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1357 hci_reset_req(req, 0);
11778716 1358
e61ef499
AE
1359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
42c6b129 1361 bredr_init(req);
e61ef499
AE
1362 break;
1363
1364 case HCI_AMP:
42c6b129 1365 amp_init(req);
e61ef499
AE
1366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
e61ef499
AE
1372}
1373
42c6b129 1374static void bredr_setup(struct hci_request *req)
2177bab5 1375{
4ca048e3
MH
1376 struct hci_dev *hdev = req->hdev;
1377
2177bab5
JH
1378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1383
1384 /* Read Class of Device */
42c6b129 1385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1386
1387 /* Read Local Name */
42c6b129 1388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1389
1390 /* Read Voice Setting */
42c6b129 1391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1392
b4cb9fb2
MH
1393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
4b836f39
MH
1396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
2177bab5
JH
1399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1402
1403 /* Connection accept timeout ~20 secs */
dcf4adbf 1404 param = cpu_to_le16(0x7d00);
42c6b129 1405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1406
4ca048e3
MH
1407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
2177bab5
JH
1414}
1415
42c6b129 1416static void le_setup(struct hci_request *req)
2177bab5 1417{
c73eee91
JH
1418 struct hci_dev *hdev = req->hdev;
1419
2177bab5 1420 /* Read LE Buffer Size */
42c6b129 1421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1422
1423 /* Read LE Local Supported Features */
42c6b129 1424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1425
747d3f03
MH
1426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
2177bab5 1429 /* Read LE White List Size */
42c6b129 1430 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1431
747d3f03
MH
1432 /* Clear LE White List */
1433 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1434
1435 /* LE-only controllers have LE implicitly enabled */
1436 if (!lmp_bredr_capable(hdev))
1437 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1438}
1439
1440static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1441{
1442 if (lmp_ext_inq_capable(hdev))
1443 return 0x02;
1444
1445 if (lmp_inq_rssi_capable(hdev))
1446 return 0x01;
1447
1448 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1449 hdev->lmp_subver == 0x0757)
1450 return 0x01;
1451
1452 if (hdev->manufacturer == 15) {
1453 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1454 return 0x01;
1455 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1456 return 0x01;
1457 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1458 return 0x01;
1459 }
1460
1461 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1462 hdev->lmp_subver == 0x1805)
1463 return 0x01;
1464
1465 return 0x00;
1466}
1467
42c6b129 1468static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1469{
1470 u8 mode;
1471
42c6b129 1472 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1473
42c6b129 1474 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1475}
1476
42c6b129 1477static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1478{
42c6b129
JH
1479 struct hci_dev *hdev = req->hdev;
1480
2177bab5
JH
1481 /* The second byte is 0xff instead of 0x9f (two reserved bits
1482 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1483 * command otherwise.
1484 */
1485 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1486
1487 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1488 * any event mask for pre 1.2 devices.
1489 */
1490 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1491 return;
1492
1493 if (lmp_bredr_capable(hdev)) {
1494 events[4] |= 0x01; /* Flow Specification Complete */
1495 events[4] |= 0x02; /* Inquiry Result with RSSI */
1496 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1497 events[5] |= 0x08; /* Synchronous Connection Complete */
1498 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1499 } else {
1500 /* Use a different default for LE-only devices */
1501 memset(events, 0, sizeof(events));
1502 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1503 events[1] |= 0x08; /* Read Remote Version Information Complete */
1504 events[1] |= 0x20; /* Command Complete */
1505 events[1] |= 0x40; /* Command Status */
1506 events[1] |= 0x80; /* Hardware Error */
1507 events[2] |= 0x04; /* Number of Completed Packets */
1508 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1509
1510 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1511 events[0] |= 0x80; /* Encryption Change */
1512 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1513 }
2177bab5
JH
1514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
42c6b129 1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1552}
1553
42c6b129 1554static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1555{
42c6b129
JH
1556 struct hci_dev *hdev = req->hdev;
1557
2177bab5 1558 if (lmp_bredr_capable(hdev))
42c6b129 1559 bredr_setup(req);
56f87901
JH
1560 else
1561 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1562
1563 if (lmp_le_capable(hdev))
42c6b129 1564 le_setup(req);
2177bab5 1565
3f8e2d75
JH
1566 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1567 * local supported commands HCI command.
1568 */
1569 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1570 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1571
1572 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1573 /* When SSP is available, then the host features page
1574 * should also be available as well. However some
1575 * controllers list the max_page as 0 as long as SSP
1576 * has not been enabled. To achieve proper debugging
1577 * output, force the minimum max_page to 1 at least.
1578 */
1579 hdev->max_page = 0x01;
1580
2177bab5
JH
1581 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1582 u8 mode = 0x01;
42c6b129
JH
1583 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1584 sizeof(mode), &mode);
2177bab5
JH
1585 } else {
1586 struct hci_cp_write_eir cp;
1587
1588 memset(hdev->eir, 0, sizeof(hdev->eir));
1589 memset(&cp, 0, sizeof(cp));
1590
42c6b129 1591 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1592 }
1593 }
1594
1595 if (lmp_inq_rssi_capable(hdev))
42c6b129 1596 hci_setup_inquiry_mode(req);
2177bab5
JH
1597
1598 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1599 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1600
1601 if (lmp_ext_feat_capable(hdev)) {
1602 struct hci_cp_read_local_ext_features cp;
1603
1604 cp.page = 0x01;
42c6b129
JH
1605 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1606 sizeof(cp), &cp);
2177bab5
JH
1607 }
1608
1609 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1610 u8 enable = 1;
42c6b129
JH
1611 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1612 &enable);
2177bab5
JH
1613 }
1614}
1615
42c6b129 1616static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1617{
42c6b129 1618 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1619 struct hci_cp_write_def_link_policy cp;
1620 u16 link_policy = 0;
1621
1622 if (lmp_rswitch_capable(hdev))
1623 link_policy |= HCI_LP_RSWITCH;
1624 if (lmp_hold_capable(hdev))
1625 link_policy |= HCI_LP_HOLD;
1626 if (lmp_sniff_capable(hdev))
1627 link_policy |= HCI_LP_SNIFF;
1628 if (lmp_park_capable(hdev))
1629 link_policy |= HCI_LP_PARK;
1630
1631 cp.policy = cpu_to_le16(link_policy);
42c6b129 1632 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1633}
1634
42c6b129 1635static void hci_set_le_support(struct hci_request *req)
2177bab5 1636{
42c6b129 1637 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1638 struct hci_cp_write_le_host_supported cp;
1639
c73eee91
JH
1640 /* LE-only devices do not support explicit enablement */
1641 if (!lmp_bredr_capable(hdev))
1642 return;
1643
2177bab5
JH
1644 memset(&cp, 0, sizeof(cp));
1645
1646 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1647 cp.le = 0x01;
32226e4f 1648 cp.simul = 0x00;
2177bab5
JH
1649 }
1650
1651 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1652 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1653 &cp);
2177bab5
JH
1654}
1655
d62e6d67
JH
1656static void hci_set_event_mask_page_2(struct hci_request *req)
1657{
1658 struct hci_dev *hdev = req->hdev;
1659 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1660
1661 /* If Connectionless Slave Broadcast master role is supported
1662 * enable all necessary events for it.
1663 */
53b834d2 1664 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1665 events[1] |= 0x40; /* Triggered Clock Capture */
1666 events[1] |= 0x80; /* Synchronization Train Complete */
1667 events[2] |= 0x10; /* Slave Page Response Timeout */
1668 events[2] |= 0x20; /* CSB Channel Map Change */
1669 }
1670
1671 /* If Connectionless Slave Broadcast slave role is supported
1672 * enable all necessary events for it.
1673 */
53b834d2 1674 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1675 events[2] |= 0x01; /* Synchronization Train Received */
1676 events[2] |= 0x02; /* CSB Receive */
1677 events[2] |= 0x04; /* CSB Timeout */
1678 events[2] |= 0x08; /* Truncated Page Complete */
1679 }
1680
40c59fcb 1681 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1682 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1683 events[2] |= 0x80;
1684
d62e6d67
JH
1685 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1686}
1687
42c6b129 1688static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1689{
42c6b129 1690 struct hci_dev *hdev = req->hdev;
d2c5d77f 1691 u8 p;
42c6b129 1692
0da71f1b
MH
1693 hci_setup_event_mask(req);
1694
b8f4e068
GP
1695 /* Some Broadcom based Bluetooth controllers do not support the
1696 * Delete Stored Link Key command. They are clearly indicating its
1697 * absence in the bit mask of supported commands.
1698 *
1699 * Check the supported commands and only if the the command is marked
1700 * as supported send it. If not supported assume that the controller
1701 * does not have actual support for stored link keys which makes this
1702 * command redundant anyway.
f9f462fa
MH
1703 *
1704 * Some controllers indicate that they support handling deleting
1705 * stored link keys, but they don't. The quirk lets a driver
1706 * just disable this command.
637b4cae 1707 */
f9f462fa
MH
1708 if (hdev->commands[6] & 0x80 &&
1709 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1710 struct hci_cp_delete_stored_link_key cp;
1711
1712 bacpy(&cp.bdaddr, BDADDR_ANY);
1713 cp.delete_all = 0x01;
1714 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1715 sizeof(cp), &cp);
1716 }
1717
2177bab5 1718 if (hdev->commands[5] & 0x10)
42c6b129 1719 hci_setup_link_policy(req);
2177bab5 1720
9193c6e8
AG
1721 if (lmp_le_capable(hdev)) {
1722 u8 events[8];
1723
1724 memset(events, 0, sizeof(events));
4d6c705b
MH
1725 events[0] = 0x0f;
1726
1727 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1728 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1729
1730 /* If controller supports the Connection Parameters Request
1731 * Link Layer Procedure, enable the corresponding event.
1732 */
1733 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1734 events[0] |= 0x20; /* LE Remote Connection
1735 * Parameter Request
1736 */
1737
9193c6e8
AG
1738 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1739 events);
1740
15a49cca
MH
1741 if (hdev->commands[25] & 0x40) {
1742 /* Read LE Advertising Channel TX Power */
1743 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1744 }
1745
42c6b129 1746 hci_set_le_support(req);
9193c6e8 1747 }
d2c5d77f
JH
1748
1749 /* Read features beyond page 1 if available */
1750 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1751 struct hci_cp_read_local_ext_features cp;
1752
1753 cp.page = p;
1754 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1755 sizeof(cp), &cp);
1756 }
2177bab5
JH
1757}
1758
5d4e7e8d
JH
1759static void hci_init4_req(struct hci_request *req, unsigned long opt)
1760{
1761 struct hci_dev *hdev = req->hdev;
1762
d62e6d67
JH
1763 /* Set event mask page 2 if the HCI command for it is supported */
1764 if (hdev->commands[22] & 0x04)
1765 hci_set_event_mask_page_2(req);
1766
109e3191
MH
1767 /* Read local codec list if the HCI command is supported */
1768 if (hdev->commands[29] & 0x20)
1769 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1770
f4fe73ed
MH
1771 /* Get MWS transport configuration if the HCI command is supported */
1772 if (hdev->commands[30] & 0x08)
1773 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1774
5d4e7e8d 1775 /* Check for Synchronization Train support */
53b834d2 1776 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1777 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1778
1779 /* Enable Secure Connections if supported and configured */
710f11c0 1780 if (bredr_sc_enabled(hdev)) {
a6d0d690
MH
1781 u8 support = 0x01;
1782 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1783 sizeof(support), &support);
1784 }
5d4e7e8d
JH
1785}
1786
2177bab5
JH
1787static int __hci_init(struct hci_dev *hdev)
1788{
1789 int err;
1790
1791 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1792 if (err < 0)
1793 return err;
1794
4b4148e9
MH
1795 /* The Device Under Test (DUT) mode is special and available for
1796 * all controller types. So just create it early on.
1797 */
1798 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1799 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1800 &dut_mode_fops);
1801 }
1802
2177bab5
JH
1803 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1804 * BR/EDR/LE type controllers. AMP controllers only need the
1805 * first stage init.
1806 */
1807 if (hdev->dev_type != HCI_BREDR)
1808 return 0;
1809
1810 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1811 if (err < 0)
1812 return err;
1813
5d4e7e8d
JH
1814 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
baf27f6e
MH
1818 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1819 if (err < 0)
1820 return err;
1821
1822 /* Only create debugfs entries during the initial setup
1823 * phase and not every time the controller gets powered on.
1824 */
1825 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1826 return 0;
1827
dfb826a8
MH
1828 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1829 &features_fops);
ceeb3bc0
MH
1830 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1831 &hdev->manufacturer);
1832 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1833 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1834 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1835 &device_list_fops);
70afe0b8
MH
1836 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1837 &blacklist_fops);
47219839
MH
1838 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1839
31ad1691
AK
1840 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1841 &conn_info_min_age_fops);
1842 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1843 &conn_info_max_age_fops);
1844
baf27f6e
MH
1845 if (lmp_bredr_capable(hdev)) {
1846 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1847 hdev, &inquiry_cache_fops);
02d08d15
MH
1848 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1849 hdev, &link_keys_fops);
babdbb3c
MH
1850 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1851 hdev, &dev_class_fops);
041000b9
MH
1852 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1853 hdev, &voice_setting_fops);
baf27f6e
MH
1854 }
1855
06f5b778 1856 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1857 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1858 hdev, &auto_accept_delay_fops);
5afeac14
MH
1859 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1860 hdev, &force_sc_support_fops);
134c2a89
MH
1861 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1862 hdev, &sc_only_mode_fops);
858cdc78
JH
1863 if (lmp_le_capable(hdev))
1864 debugfs_create_file("force_lesc_support", 0644,
1865 hdev->debugfs, hdev,
1866 &force_lesc_support_fops);
06f5b778 1867 }
ebd1e33b 1868
2bfa3531
MH
1869 if (lmp_sniff_capable(hdev)) {
1870 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1871 hdev, &idle_timeout_fops);
1872 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1873 hdev, &sniff_min_interval_fops);
1874 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1875 hdev, &sniff_max_interval_fops);
1876 }
1877
d0f729b8 1878 if (lmp_le_capable(hdev)) {
ac345813
MH
1879 debugfs_create_file("identity", 0400, hdev->debugfs,
1880 hdev, &identity_fops);
1881 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1882 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1883 debugfs_create_file("random_address", 0444, hdev->debugfs,
1884 hdev, &random_address_fops);
b32bba6c
MH
1885 debugfs_create_file("static_address", 0444, hdev->debugfs,
1886 hdev, &static_address_fops);
1887
1888 /* For controllers with a public address, provide a debug
1889 * option to force the usage of the configured static
1890 * address. By default the public address is used.
1891 */
1892 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1893 debugfs_create_file("force_static_address", 0644,
1894 hdev->debugfs, hdev,
1895 &force_static_address_fops);
1896
d0f729b8
MH
1897 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1898 &hdev->le_white_list_size);
d2ab0ac1
MH
1899 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1900 &white_list_fops);
3698d704
MH
1901 debugfs_create_file("identity_resolving_keys", 0400,
1902 hdev->debugfs, hdev,
1903 &identity_resolving_keys_fops);
8f8625cd
MH
1904 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1905 hdev, &long_term_keys_fops);
4e70c7e7
MH
1906 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1907 hdev, &conn_min_interval_fops);
1908 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1909 hdev, &conn_max_interval_fops);
816a93d1
MH
1910 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1911 hdev, &conn_latency_fops);
f1649577
MH
1912 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1913 hdev, &supervision_timeout_fops);
3f959d46
MH
1914 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1915 hdev, &adv_channel_map_fops);
729a1051
GL
1916 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1917 hdev, &adv_min_interval_fops);
1918 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1919 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1920 debugfs_create_u16("discov_interleaved_timeout", 0644,
1921 hdev->debugfs,
1922 &hdev->discov_interleaved_timeout);
54506918 1923
711eafe3 1924 smp_register(hdev);
d0f729b8 1925 }
e7b8fc92 1926
baf27f6e 1927 return 0;
2177bab5
JH
1928}
1929
0ebca7d6
MH
1930static void hci_init0_req(struct hci_request *req, unsigned long opt)
1931{
1932 struct hci_dev *hdev = req->hdev;
1933
1934 BT_DBG("%s %ld", hdev->name, opt);
1935
1936 /* Reset */
1937 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1938 hci_reset_req(req, 0);
1939
1940 /* Read Local Version */
1941 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1942
1943 /* Read BD Address */
1944 if (hdev->set_bdaddr)
1945 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1946}
1947
1948static int __hci_unconf_init(struct hci_dev *hdev)
1949{
1950 int err;
1951
cc78b44b
MH
1952 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1953 return 0;
1954
0ebca7d6
MH
1955 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1956 if (err < 0)
1957 return err;
1958
1959 return 0;
1960}
1961
42c6b129 1962static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1963{
1964 __u8 scan = opt;
1965
42c6b129 1966 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1967
1968 /* Inquiry and Page scans */
42c6b129 1969 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1970}
1971
42c6b129 1972static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1973{
1974 __u8 auth = opt;
1975
42c6b129 1976 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1977
1978 /* Authentication */
42c6b129 1979 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1980}
1981
42c6b129 1982static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1983{
1984 __u8 encrypt = opt;
1985
42c6b129 1986 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1987
e4e8e37c 1988 /* Encryption */
42c6b129 1989 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1990}
1991
42c6b129 1992static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1993{
1994 __le16 policy = cpu_to_le16(opt);
1995
42c6b129 1996 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1997
1998 /* Default link policy */
42c6b129 1999 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
2000}
2001
8e87d142 2002/* Get HCI device by index.
1da177e4
LT
2003 * Device is held on return. */
2004struct hci_dev *hci_dev_get(int index)
2005{
8035ded4 2006 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
2007
2008 BT_DBG("%d", index);
2009
2010 if (index < 0)
2011 return NULL;
2012
2013 read_lock(&hci_dev_list_lock);
8035ded4 2014 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
2015 if (d->id == index) {
2016 hdev = hci_dev_hold(d);
2017 break;
2018 }
2019 }
2020 read_unlock(&hci_dev_list_lock);
2021 return hdev;
2022}
1da177e4
LT
2023
2024/* ---- Inquiry support ---- */
ff9ef578 2025
30dc78e1
JH
2026bool hci_discovery_active(struct hci_dev *hdev)
2027{
2028 struct discovery_state *discov = &hdev->discovery;
2029
6fbe195d 2030 switch (discov->state) {
343f935b 2031 case DISCOVERY_FINDING:
6fbe195d 2032 case DISCOVERY_RESOLVING:
30dc78e1
JH
2033 return true;
2034
6fbe195d
AG
2035 default:
2036 return false;
2037 }
30dc78e1
JH
2038}
2039
ff9ef578
JH
2040void hci_discovery_set_state(struct hci_dev *hdev, int state)
2041{
bb3e0a33
JH
2042 int old_state = hdev->discovery.state;
2043
ff9ef578
JH
2044 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2045
bb3e0a33 2046 if (old_state == state)
ff9ef578
JH
2047 return;
2048
bb3e0a33
JH
2049 hdev->discovery.state = state;
2050
ff9ef578
JH
2051 switch (state) {
2052 case DISCOVERY_STOPPED:
c54c3860
AG
2053 hci_update_background_scan(hdev);
2054
bb3e0a33 2055 if (old_state != DISCOVERY_STARTING)
7b99b659 2056 mgmt_discovering(hdev, 0);
ff9ef578
JH
2057 break;
2058 case DISCOVERY_STARTING:
2059 break;
343f935b 2060 case DISCOVERY_FINDING:
ff9ef578
JH
2061 mgmt_discovering(hdev, 1);
2062 break;
30dc78e1
JH
2063 case DISCOVERY_RESOLVING:
2064 break;
ff9ef578
JH
2065 case DISCOVERY_STOPPING:
2066 break;
2067 }
ff9ef578
JH
2068}
2069
1f9b9a5d 2070void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2071{
30883512 2072 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2073 struct inquiry_entry *p, *n;
1da177e4 2074
561aafbc
JH
2075 list_for_each_entry_safe(p, n, &cache->all, all) {
2076 list_del(&p->all);
b57c1a56 2077 kfree(p);
1da177e4 2078 }
561aafbc
JH
2079
2080 INIT_LIST_HEAD(&cache->unknown);
2081 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2082}
2083
a8c5fb1a
GP
2084struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2085 bdaddr_t *bdaddr)
1da177e4 2086{
30883512 2087 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2088 struct inquiry_entry *e;
2089
6ed93dc6 2090 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2091
561aafbc
JH
2092 list_for_each_entry(e, &cache->all, all) {
2093 if (!bacmp(&e->data.bdaddr, bdaddr))
2094 return e;
2095 }
2096
2097 return NULL;
2098}
2099
2100struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2101 bdaddr_t *bdaddr)
561aafbc 2102{
30883512 2103 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2104 struct inquiry_entry *e;
2105
6ed93dc6 2106 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2107
2108 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2109 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2110 return e;
2111 }
2112
2113 return NULL;
1da177e4
LT
2114}
2115
30dc78e1 2116struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2117 bdaddr_t *bdaddr,
2118 int state)
30dc78e1
JH
2119{
2120 struct discovery_state *cache = &hdev->discovery;
2121 struct inquiry_entry *e;
2122
6ed93dc6 2123 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2124
2125 list_for_each_entry(e, &cache->resolve, list) {
2126 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2127 return e;
2128 if (!bacmp(&e->data.bdaddr, bdaddr))
2129 return e;
2130 }
2131
2132 return NULL;
2133}
2134
a3d4e20a 2135void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2136 struct inquiry_entry *ie)
a3d4e20a
JH
2137{
2138 struct discovery_state *cache = &hdev->discovery;
2139 struct list_head *pos = &cache->resolve;
2140 struct inquiry_entry *p;
2141
2142 list_del(&ie->list);
2143
2144 list_for_each_entry(p, &cache->resolve, list) {
2145 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2146 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2147 break;
2148 pos = &p->list;
2149 }
2150
2151 list_add(&ie->list, pos);
2152}
2153
af58925c
MH
2154u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2155 bool name_known)
1da177e4 2156{
30883512 2157 struct discovery_state *cache = &hdev->discovery;
70f23020 2158 struct inquiry_entry *ie;
af58925c 2159 u32 flags = 0;
1da177e4 2160
6ed93dc6 2161 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2162
2b2fec4d
SJ
2163 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2164
af58925c
MH
2165 if (!data->ssp_mode)
2166 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2167
70f23020 2168 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2169 if (ie) {
af58925c
MH
2170 if (!ie->data.ssp_mode)
2171 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2172
a3d4e20a 2173 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2174 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2175 ie->data.rssi = data->rssi;
2176 hci_inquiry_cache_update_resolve(hdev, ie);
2177 }
2178
561aafbc 2179 goto update;
a3d4e20a 2180 }
561aafbc
JH
2181
2182 /* Entry not in the cache. Add new one. */
27f70f3e 2183 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2184 if (!ie) {
2185 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2186 goto done;
2187 }
561aafbc
JH
2188
2189 list_add(&ie->all, &cache->all);
2190
2191 if (name_known) {
2192 ie->name_state = NAME_KNOWN;
2193 } else {
2194 ie->name_state = NAME_NOT_KNOWN;
2195 list_add(&ie->list, &cache->unknown);
2196 }
70f23020 2197
561aafbc
JH
2198update:
2199 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2200 ie->name_state != NAME_PENDING) {
561aafbc
JH
2201 ie->name_state = NAME_KNOWN;
2202 list_del(&ie->list);
1da177e4
LT
2203 }
2204
70f23020
AE
2205 memcpy(&ie->data, data, sizeof(*data));
2206 ie->timestamp = jiffies;
1da177e4 2207 cache->timestamp = jiffies;
3175405b
JH
2208
2209 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2210 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2211
af58925c
MH
2212done:
2213 return flags;
1da177e4
LT
2214}
2215
2216static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2217{
30883512 2218 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2219 struct inquiry_info *info = (struct inquiry_info *) buf;
2220 struct inquiry_entry *e;
2221 int copied = 0;
2222
561aafbc 2223 list_for_each_entry(e, &cache->all, all) {
1da177e4 2224 struct inquiry_data *data = &e->data;
b57c1a56
JH
2225
2226 if (copied >= num)
2227 break;
2228
1da177e4
LT
2229 bacpy(&info->bdaddr, &data->bdaddr);
2230 info->pscan_rep_mode = data->pscan_rep_mode;
2231 info->pscan_period_mode = data->pscan_period_mode;
2232 info->pscan_mode = data->pscan_mode;
2233 memcpy(info->dev_class, data->dev_class, 3);
2234 info->clock_offset = data->clock_offset;
b57c1a56 2235
1da177e4 2236 info++;
b57c1a56 2237 copied++;
1da177e4
LT
2238 }
2239
2240 BT_DBG("cache %p, copied %d", cache, copied);
2241 return copied;
2242}
2243
42c6b129 2244static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2245{
2246 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2247 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2248 struct hci_cp_inquiry cp;
2249
2250 BT_DBG("%s", hdev->name);
2251
2252 if (test_bit(HCI_INQUIRY, &hdev->flags))
2253 return;
2254
2255 /* Start Inquiry */
2256 memcpy(&cp.lap, &ir->lap, 3);
2257 cp.length = ir->length;
2258 cp.num_rsp = ir->num_rsp;
42c6b129 2259 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2260}
2261
2262int hci_inquiry(void __user *arg)
2263{
2264 __u8 __user *ptr = arg;
2265 struct hci_inquiry_req ir;
2266 struct hci_dev *hdev;
2267 int err = 0, do_inquiry = 0, max_rsp;
2268 long timeo;
2269 __u8 *buf;
2270
2271 if (copy_from_user(&ir, ptr, sizeof(ir)))
2272 return -EFAULT;
2273
5a08ecce
AE
2274 hdev = hci_dev_get(ir.dev_id);
2275 if (!hdev)
1da177e4
LT
2276 return -ENODEV;
2277
0736cfa8
MH
2278 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2279 err = -EBUSY;
2280 goto done;
2281 }
2282
4a964404 2283 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2284 err = -EOPNOTSUPP;
2285 goto done;
2286 }
2287
5b69bef5
MH
2288 if (hdev->dev_type != HCI_BREDR) {
2289 err = -EOPNOTSUPP;
2290 goto done;
2291 }
2292
56f87901
JH
2293 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2294 err = -EOPNOTSUPP;
2295 goto done;
2296 }
2297
09fd0de5 2298 hci_dev_lock(hdev);
8e87d142 2299 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2300 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2301 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2302 do_inquiry = 1;
2303 }
09fd0de5 2304 hci_dev_unlock(hdev);
1da177e4 2305
04837f64 2306 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2307
2308 if (do_inquiry) {
01178cd4
JH
2309 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2310 timeo);
70f23020
AE
2311 if (err < 0)
2312 goto done;
3e13fa1e
AG
2313
2314 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2315 * cleared). If it is interrupted by a signal, return -EINTR.
2316 */
74316201 2317 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2318 TASK_INTERRUPTIBLE))
2319 return -EINTR;
70f23020 2320 }
1da177e4 2321
8fc9ced3
GP
2322 /* for unlimited number of responses we will use buffer with
2323 * 255 entries
2324 */
1da177e4
LT
2325 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2326
2327 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2328 * copy it to the user space.
2329 */
01df8c31 2330 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2331 if (!buf) {
1da177e4
LT
2332 err = -ENOMEM;
2333 goto done;
2334 }
2335
09fd0de5 2336 hci_dev_lock(hdev);
1da177e4 2337 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2338 hci_dev_unlock(hdev);
1da177e4
LT
2339
2340 BT_DBG("num_rsp %d", ir.num_rsp);
2341
2342 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2343 ptr += sizeof(ir);
2344 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2345 ir.num_rsp))
1da177e4 2346 err = -EFAULT;
8e87d142 2347 } else
1da177e4
LT
2348 err = -EFAULT;
2349
2350 kfree(buf);
2351
2352done:
2353 hci_dev_put(hdev);
2354 return err;
2355}
2356
cbed0ca1 2357static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2358{
1da177e4
LT
2359 int ret = 0;
2360
1da177e4
LT
2361 BT_DBG("%s %p", hdev->name, hdev);
2362
2363 hci_req_lock(hdev);
2364
94324962
JH
2365 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2366 ret = -ENODEV;
2367 goto done;
2368 }
2369
d603b76b
MH
2370 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2371 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2372 /* Check for rfkill but allow the HCI setup stage to
2373 * proceed (which in itself doesn't cause any RF activity).
2374 */
2375 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2376 ret = -ERFKILL;
2377 goto done;
2378 }
2379
2380 /* Check for valid public address or a configured static
2381 * random adddress, but let the HCI setup proceed to
2382 * be able to determine if there is a public address
2383 * or not.
2384 *
c6beca0e
MH
2385 * In case of user channel usage, it is not important
2386 * if a public address or static random address is
2387 * available.
2388 *
a5c8f270
MH
2389 * This check is only valid for BR/EDR controllers
2390 * since AMP controllers do not have an address.
2391 */
c6beca0e
MH
2392 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2393 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2394 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2395 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2396 ret = -EADDRNOTAVAIL;
2397 goto done;
2398 }
611b30f7
MH
2399 }
2400
1da177e4
LT
2401 if (test_bit(HCI_UP, &hdev->flags)) {
2402 ret = -EALREADY;
2403 goto done;
2404 }
2405
1da177e4
LT
2406 if (hdev->open(hdev)) {
2407 ret = -EIO;
2408 goto done;
2409 }
2410
f41c70c4
MH
2411 atomic_set(&hdev->cmd_cnt, 1);
2412 set_bit(HCI_INIT, &hdev->flags);
2413
af202f84
MH
2414 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2415 if (hdev->setup)
2416 ret = hdev->setup(hdev);
f41c70c4 2417
af202f84
MH
2418 /* The transport driver can set these quirks before
2419 * creating the HCI device or in its setup callback.
2420 *
2421 * In case any of them is set, the controller has to
2422 * start up as unconfigured.
2423 */
eb1904f4
MH
2424 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2425 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2426 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2427
0ebca7d6
MH
2428 /* For an unconfigured controller it is required to
2429 * read at least the version information provided by
2430 * the Read Local Version Information command.
2431 *
2432 * If the set_bdaddr driver callback is provided, then
2433 * also the original Bluetooth public device address
2434 * will be read using the Read BD Address command.
2435 */
2436 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2437 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2438 }
2439
9713c17b
MH
2440 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2441 /* If public address change is configured, ensure that
2442 * the address gets programmed. If the driver does not
2443 * support changing the public address, fail the power
2444 * on procedure.
2445 */
2446 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2447 hdev->set_bdaddr)
24c457e2
MH
2448 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2449 else
2450 ret = -EADDRNOTAVAIL;
2451 }
2452
f41c70c4 2453 if (!ret) {
4a964404 2454 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2455 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2456 ret = __hci_init(hdev);
1da177e4
LT
2457 }
2458
f41c70c4
MH
2459 clear_bit(HCI_INIT, &hdev->flags);
2460
1da177e4
LT
2461 if (!ret) {
2462 hci_dev_hold(hdev);
d6bfd59c 2463 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2464 set_bit(HCI_UP, &hdev->flags);
2465 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2466 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2467 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2468 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2469 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2470 hdev->dev_type == HCI_BREDR) {
09fd0de5 2471 hci_dev_lock(hdev);
744cf19e 2472 mgmt_powered(hdev, 1);
09fd0de5 2473 hci_dev_unlock(hdev);
56e5cb86 2474 }
8e87d142 2475 } else {
1da177e4 2476 /* Init failed, cleanup */
3eff45ea 2477 flush_work(&hdev->tx_work);
c347b765 2478 flush_work(&hdev->cmd_work);
b78752cc 2479 flush_work(&hdev->rx_work);
1da177e4
LT
2480
2481 skb_queue_purge(&hdev->cmd_q);
2482 skb_queue_purge(&hdev->rx_q);
2483
2484 if (hdev->flush)
2485 hdev->flush(hdev);
2486
2487 if (hdev->sent_cmd) {
2488 kfree_skb(hdev->sent_cmd);
2489 hdev->sent_cmd = NULL;
2490 }
2491
2492 hdev->close(hdev);
fee746b0 2493 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2494 }
2495
2496done:
2497 hci_req_unlock(hdev);
1da177e4
LT
2498 return ret;
2499}
2500
cbed0ca1
JH
2501/* ---- HCI ioctl helpers ---- */
2502
2503int hci_dev_open(__u16 dev)
2504{
2505 struct hci_dev *hdev;
2506 int err;
2507
2508 hdev = hci_dev_get(dev);
2509 if (!hdev)
2510 return -ENODEV;
2511
4a964404 2512 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2513 * up as user channel. Trying to bring them up as normal devices
2514 * will result into a failure. Only user channel operation is
2515 * possible.
2516 *
2517 * When this function is called for a user channel, the flag
2518 * HCI_USER_CHANNEL will be set first before attempting to
2519 * open the device.
2520 */
4a964404 2521 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2522 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2523 err = -EOPNOTSUPP;
2524 goto done;
2525 }
2526
e1d08f40
JH
2527 /* We need to ensure that no other power on/off work is pending
2528 * before proceeding to call hci_dev_do_open. This is
2529 * particularly important if the setup procedure has not yet
2530 * completed.
2531 */
2532 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2533 cancel_delayed_work(&hdev->power_off);
2534
a5c8f270
MH
2535 /* After this call it is guaranteed that the setup procedure
2536 * has finished. This means that error conditions like RFKILL
2537 * or no valid public or static random address apply.
2538 */
e1d08f40
JH
2539 flush_workqueue(hdev->req_workqueue);
2540
12aa4f0a 2541 /* For controllers not using the management interface and that
b6ae8457 2542 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2543 * so that pairing works for them. Once the management interface
2544 * is in use this bit will be cleared again and userspace has
2545 * to explicitly enable it.
2546 */
2547 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2548 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2549 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2550
cbed0ca1
JH
2551 err = hci_dev_do_open(hdev);
2552
fee746b0 2553done:
cbed0ca1 2554 hci_dev_put(hdev);
cbed0ca1
JH
2555 return err;
2556}
2557
d7347f3c
JH
2558/* This function requires the caller holds hdev->lock */
2559static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2560{
2561 struct hci_conn_params *p;
2562
f161dd41
JH
2563 list_for_each_entry(p, &hdev->le_conn_params, list) {
2564 if (p->conn) {
2565 hci_conn_drop(p->conn);
f8aaf9b6 2566 hci_conn_put(p->conn);
f161dd41
JH
2567 p->conn = NULL;
2568 }
d7347f3c 2569 list_del_init(&p->action);
f161dd41 2570 }
d7347f3c
JH
2571
2572 BT_DBG("All LE pending actions cleared");
2573}
2574
1da177e4
LT
2575static int hci_dev_do_close(struct hci_dev *hdev)
2576{
2577 BT_DBG("%s %p", hdev->name, hdev);
2578
78c04c0b
VCG
2579 cancel_delayed_work(&hdev->power_off);
2580
1da177e4
LT
2581 hci_req_cancel(hdev, ENODEV);
2582 hci_req_lock(hdev);
2583
2584 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2585 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2586 hci_req_unlock(hdev);
2587 return 0;
2588 }
2589
3eff45ea
GP
2590 /* Flush RX and TX works */
2591 flush_work(&hdev->tx_work);
b78752cc 2592 flush_work(&hdev->rx_work);
1da177e4 2593
16ab91ab 2594 if (hdev->discov_timeout > 0) {
e0f9309f 2595 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2596 hdev->discov_timeout = 0;
5e5282bb 2597 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2598 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2599 }
2600
a8b2d5c2 2601 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2602 cancel_delayed_work(&hdev->service_cache);
2603
7ba8b4be 2604 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2605
2606 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2607 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2608
76727c02
JH
2609 /* Avoid potential lockdep warnings from the *_flush() calls by
2610 * ensuring the workqueue is empty up front.
2611 */
2612 drain_workqueue(hdev->workqueue);
2613
09fd0de5 2614 hci_dev_lock(hdev);
1f9b9a5d 2615 hci_inquiry_cache_flush(hdev);
d7347f3c 2616 hci_pend_le_actions_clear(hdev);
f161dd41 2617 hci_conn_hash_flush(hdev);
09fd0de5 2618 hci_dev_unlock(hdev);
1da177e4
LT
2619
2620 hci_notify(hdev, HCI_DEV_DOWN);
2621
2622 if (hdev->flush)
2623 hdev->flush(hdev);
2624
2625 /* Reset device */
2626 skb_queue_purge(&hdev->cmd_q);
2627 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2628 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2629 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2630 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2631 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2633 clear_bit(HCI_INIT, &hdev->flags);
2634 }
2635
c347b765
GP
2636 /* flush cmd work */
2637 flush_work(&hdev->cmd_work);
1da177e4
LT
2638
2639 /* Drop queues */
2640 skb_queue_purge(&hdev->rx_q);
2641 skb_queue_purge(&hdev->cmd_q);
2642 skb_queue_purge(&hdev->raw_q);
2643
2644 /* Drop last sent command */
2645 if (hdev->sent_cmd) {
65cc2b49 2646 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2647 kfree_skb(hdev->sent_cmd);
2648 hdev->sent_cmd = NULL;
2649 }
2650
b6ddb638
JH
2651 kfree_skb(hdev->recv_evt);
2652 hdev->recv_evt = NULL;
2653
1da177e4
LT
2654 /* After this point our queues are empty
2655 * and no tasks are scheduled. */
2656 hdev->close(hdev);
2657
35b973c9 2658 /* Clear flags */
fee746b0 2659 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2660 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2661
93c311a0
MH
2662 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2663 if (hdev->dev_type == HCI_BREDR) {
2664 hci_dev_lock(hdev);
2665 mgmt_powered(hdev, 0);
2666 hci_dev_unlock(hdev);
2667 }
8ee56540 2668 }
5add6af8 2669
ced5c338 2670 /* Controller radio is available but is currently powered down */
536619e8 2671 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2672
e59fda8d 2673 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2674 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2675 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2676
1da177e4
LT
2677 hci_req_unlock(hdev);
2678
2679 hci_dev_put(hdev);
2680 return 0;
2681}
2682
2683int hci_dev_close(__u16 dev)
2684{
2685 struct hci_dev *hdev;
2686 int err;
2687
70f23020
AE
2688 hdev = hci_dev_get(dev);
2689 if (!hdev)
1da177e4 2690 return -ENODEV;
8ee56540 2691
0736cfa8
MH
2692 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2693 err = -EBUSY;
2694 goto done;
2695 }
2696
8ee56540
MH
2697 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2698 cancel_delayed_work(&hdev->power_off);
2699
1da177e4 2700 err = hci_dev_do_close(hdev);
8ee56540 2701
0736cfa8 2702done:
1da177e4
LT
2703 hci_dev_put(hdev);
2704 return err;
2705}
2706
2707int hci_dev_reset(__u16 dev)
2708{
2709 struct hci_dev *hdev;
2710 int ret = 0;
2711
70f23020
AE
2712 hdev = hci_dev_get(dev);
2713 if (!hdev)
1da177e4
LT
2714 return -ENODEV;
2715
2716 hci_req_lock(hdev);
1da177e4 2717
808a049e
MH
2718 if (!test_bit(HCI_UP, &hdev->flags)) {
2719 ret = -ENETDOWN;
1da177e4 2720 goto done;
808a049e 2721 }
1da177e4 2722
0736cfa8
MH
2723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724 ret = -EBUSY;
2725 goto done;
2726 }
2727
4a964404 2728 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2729 ret = -EOPNOTSUPP;
2730 goto done;
2731 }
2732
1da177e4
LT
2733 /* Drop queues */
2734 skb_queue_purge(&hdev->rx_q);
2735 skb_queue_purge(&hdev->cmd_q);
2736
76727c02
JH
2737 /* Avoid potential lockdep warnings from the *_flush() calls by
2738 * ensuring the workqueue is empty up front.
2739 */
2740 drain_workqueue(hdev->workqueue);
2741
09fd0de5 2742 hci_dev_lock(hdev);
1f9b9a5d 2743 hci_inquiry_cache_flush(hdev);
1da177e4 2744 hci_conn_hash_flush(hdev);
09fd0de5 2745 hci_dev_unlock(hdev);
1da177e4
LT
2746
2747 if (hdev->flush)
2748 hdev->flush(hdev);
2749
8e87d142 2750 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2751 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2752
fee746b0 2753 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2754
2755done:
1da177e4
LT
2756 hci_req_unlock(hdev);
2757 hci_dev_put(hdev);
2758 return ret;
2759}
2760
2761int hci_dev_reset_stat(__u16 dev)
2762{
2763 struct hci_dev *hdev;
2764 int ret = 0;
2765
70f23020
AE
2766 hdev = hci_dev_get(dev);
2767 if (!hdev)
1da177e4
LT
2768 return -ENODEV;
2769
0736cfa8
MH
2770 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2771 ret = -EBUSY;
2772 goto done;
2773 }
2774
4a964404 2775 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2776 ret = -EOPNOTSUPP;
2777 goto done;
2778 }
2779
1da177e4
LT
2780 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2781
0736cfa8 2782done:
1da177e4 2783 hci_dev_put(hdev);
1da177e4
LT
2784 return ret;
2785}
2786
123abc08
JH
2787static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2788{
bc6d2d04 2789 bool conn_changed, discov_changed;
123abc08
JH
2790
2791 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2792
2793 if ((scan & SCAN_PAGE))
2794 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2795 &hdev->dev_flags);
2796 else
2797 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2798 &hdev->dev_flags);
2799
bc6d2d04
JH
2800 if ((scan & SCAN_INQUIRY)) {
2801 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2802 &hdev->dev_flags);
2803 } else {
2804 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2805 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2806 &hdev->dev_flags);
2807 }
2808
123abc08
JH
2809 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2810 return;
2811
bc6d2d04
JH
2812 if (conn_changed || discov_changed) {
2813 /* In case this was disabled through mgmt */
2814 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2815
2816 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2817 mgmt_update_adv_data(hdev);
2818
123abc08 2819 mgmt_new_settings(hdev);
bc6d2d04 2820 }
123abc08
JH
2821}
2822
1da177e4
LT
2823int hci_dev_cmd(unsigned int cmd, void __user *arg)
2824{
2825 struct hci_dev *hdev;
2826 struct hci_dev_req dr;
2827 int err = 0;
2828
2829 if (copy_from_user(&dr, arg, sizeof(dr)))
2830 return -EFAULT;
2831
70f23020
AE
2832 hdev = hci_dev_get(dr.dev_id);
2833 if (!hdev)
1da177e4
LT
2834 return -ENODEV;
2835
0736cfa8
MH
2836 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2837 err = -EBUSY;
2838 goto done;
2839 }
2840
4a964404 2841 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2842 err = -EOPNOTSUPP;
2843 goto done;
2844 }
2845
5b69bef5
MH
2846 if (hdev->dev_type != HCI_BREDR) {
2847 err = -EOPNOTSUPP;
2848 goto done;
2849 }
2850
56f87901
JH
2851 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2852 err = -EOPNOTSUPP;
2853 goto done;
2854 }
2855
1da177e4
LT
2856 switch (cmd) {
2857 case HCISETAUTH:
01178cd4
JH
2858 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2859 HCI_INIT_TIMEOUT);
1da177e4
LT
2860 break;
2861
2862 case HCISETENCRYPT:
2863 if (!lmp_encrypt_capable(hdev)) {
2864 err = -EOPNOTSUPP;
2865 break;
2866 }
2867
2868 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2869 /* Auth must be enabled first */
01178cd4
JH
2870 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2871 HCI_INIT_TIMEOUT);
1da177e4
LT
2872 if (err)
2873 break;
2874 }
2875
01178cd4
JH
2876 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2877 HCI_INIT_TIMEOUT);
1da177e4
LT
2878 break;
2879
2880 case HCISETSCAN:
01178cd4
JH
2881 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2882 HCI_INIT_TIMEOUT);
91a668b0 2883
bc6d2d04
JH
2884 /* Ensure that the connectable and discoverable states
2885 * get correctly modified as this was a non-mgmt change.
91a668b0 2886 */
123abc08
JH
2887 if (!err)
2888 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2889 break;
2890
1da177e4 2891 case HCISETLINKPOL:
01178cd4
JH
2892 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2893 HCI_INIT_TIMEOUT);
1da177e4
LT
2894 break;
2895
2896 case HCISETLINKMODE:
e4e8e37c
MH
2897 hdev->link_mode = ((__u16) dr.dev_opt) &
2898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2899 break;
2900
2901 case HCISETPTYPE:
2902 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2903 break;
2904
2905 case HCISETACLMTU:
e4e8e37c
MH
2906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2908 break;
2909
2910 case HCISETSCOMTU:
e4e8e37c
MH
2911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2913 break;
2914
2915 default:
2916 err = -EINVAL;
2917 break;
2918 }
e4e8e37c 2919
0736cfa8 2920done:
1da177e4
LT
2921 hci_dev_put(hdev);
2922 return err;
2923}
2924
2925int hci_get_dev_list(void __user *arg)
2926{
8035ded4 2927 struct hci_dev *hdev;
1da177e4
LT
2928 struct hci_dev_list_req *dl;
2929 struct hci_dev_req *dr;
1da177e4
LT
2930 int n = 0, size, err;
2931 __u16 dev_num;
2932
2933 if (get_user(dev_num, (__u16 __user *) arg))
2934 return -EFAULT;
2935
2936 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2937 return -EINVAL;
2938
2939 size = sizeof(*dl) + dev_num * sizeof(*dr);
2940
70f23020
AE
2941 dl = kzalloc(size, GFP_KERNEL);
2942 if (!dl)
1da177e4
LT
2943 return -ENOMEM;
2944
2945 dr = dl->dev_req;
2946
f20d09d5 2947 read_lock(&hci_dev_list_lock);
8035ded4 2948 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2949 unsigned long flags = hdev->flags;
c542a06c 2950
2e84d8db
MH
2951 /* When the auto-off is configured it means the transport
2952 * is running, but in that case still indicate that the
2953 * device is actually down.
2954 */
2955 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2956 flags &= ~BIT(HCI_UP);
c542a06c 2957
1da177e4 2958 (dr + n)->dev_id = hdev->id;
2e84d8db 2959 (dr + n)->dev_opt = flags;
c542a06c 2960
1da177e4
LT
2961 if (++n >= dev_num)
2962 break;
2963 }
f20d09d5 2964 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2965
2966 dl->dev_num = n;
2967 size = sizeof(*dl) + n * sizeof(*dr);
2968
2969 err = copy_to_user(arg, dl, size);
2970 kfree(dl);
2971
2972 return err ? -EFAULT : 0;
2973}
2974
2975int hci_get_dev_info(void __user *arg)
2976{
2977 struct hci_dev *hdev;
2978 struct hci_dev_info di;
2e84d8db 2979 unsigned long flags;
1da177e4
LT
2980 int err = 0;
2981
2982 if (copy_from_user(&di, arg, sizeof(di)))
2983 return -EFAULT;
2984
70f23020
AE
2985 hdev = hci_dev_get(di.dev_id);
2986 if (!hdev)
1da177e4
LT
2987 return -ENODEV;
2988
2e84d8db
MH
2989 /* When the auto-off is configured it means the transport
2990 * is running, but in that case still indicate that the
2991 * device is actually down.
2992 */
2993 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2994 flags = hdev->flags & ~BIT(HCI_UP);
2995 else
2996 flags = hdev->flags;
c542a06c 2997
1da177e4
LT
2998 strcpy(di.name, hdev->name);
2999 di.bdaddr = hdev->bdaddr;
60f2a3ed 3000 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 3001 di.flags = flags;
1da177e4 3002 di.pkt_type = hdev->pkt_type;
572c7f84
JH
3003 if (lmp_bredr_capable(hdev)) {
3004 di.acl_mtu = hdev->acl_mtu;
3005 di.acl_pkts = hdev->acl_pkts;
3006 di.sco_mtu = hdev->sco_mtu;
3007 di.sco_pkts = hdev->sco_pkts;
3008 } else {
3009 di.acl_mtu = hdev->le_mtu;
3010 di.acl_pkts = hdev->le_pkts;
3011 di.sco_mtu = 0;
3012 di.sco_pkts = 0;
3013 }
1da177e4
LT
3014 di.link_policy = hdev->link_policy;
3015 di.link_mode = hdev->link_mode;
3016
3017 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3018 memcpy(&di.features, &hdev->features, sizeof(di.features));
3019
3020 if (copy_to_user(arg, &di, sizeof(di)))
3021 err = -EFAULT;
3022
3023 hci_dev_put(hdev);
3024
3025 return err;
3026}
3027
3028/* ---- Interface to HCI drivers ---- */
3029
611b30f7
MH
3030static int hci_rfkill_set_block(void *data, bool blocked)
3031{
3032 struct hci_dev *hdev = data;
3033
3034 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3035
0736cfa8
MH
3036 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3037 return -EBUSY;
3038
5e130367
JH
3039 if (blocked) {
3040 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
3041 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3042 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 3043 hci_dev_do_close(hdev);
5e130367
JH
3044 } else {
3045 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 3046 }
611b30f7
MH
3047
3048 return 0;
3049}
3050
3051static const struct rfkill_ops hci_rfkill_ops = {
3052 .set_block = hci_rfkill_set_block,
3053};
3054
ab81cbf9
JH
3055static void hci_power_on(struct work_struct *work)
3056{
3057 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3058 int err;
ab81cbf9
JH
3059
3060 BT_DBG("%s", hdev->name);
3061
cbed0ca1 3062 err = hci_dev_do_open(hdev);
96570ffc
JH
3063 if (err < 0) {
3064 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3065 return;
96570ffc 3066 }
ab81cbf9 3067
a5c8f270
MH
3068 /* During the HCI setup phase, a few error conditions are
3069 * ignored and they need to be checked now. If they are still
3070 * valid, it is important to turn the device back off.
3071 */
3072 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3073 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3074 (hdev->dev_type == HCI_BREDR &&
3075 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3076 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3077 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3078 hci_dev_do_close(hdev);
3079 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3080 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3081 HCI_AUTO_OFF_TIMEOUT);
bf543036 3082 }
ab81cbf9 3083
fee746b0 3084 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3085 /* For unconfigured devices, set the HCI_RAW flag
3086 * so that userspace can easily identify them.
4a964404
MH
3087 */
3088 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3089 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3090
3091 /* For fully configured devices, this will send
3092 * the Index Added event. For unconfigured devices,
3093 * it will send Unconfigued Index Added event.
3094 *
3095 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3096 * and no event will be send.
3097 */
3098 mgmt_index_added(hdev);
d603b76b 3099 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3100 /* When the controller is now configured, then it
3101 * is important to clear the HCI_RAW flag.
3102 */
3103 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3104 clear_bit(HCI_RAW, &hdev->flags);
3105
d603b76b
MH
3106 /* Powering on the controller with HCI_CONFIG set only
3107 * happens with the transition from unconfigured to
3108 * configured. This will send the Index Added event.
3109 */
744cf19e 3110 mgmt_index_added(hdev);
fee746b0 3111 }
ab81cbf9
JH
3112}
3113
3114static void hci_power_off(struct work_struct *work)
3115{
3243553f 3116 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3117 power_off.work);
ab81cbf9
JH
3118
3119 BT_DBG("%s", hdev->name);
3120
8ee56540 3121 hci_dev_do_close(hdev);
ab81cbf9
JH
3122}
3123
16ab91ab
JH
3124static void hci_discov_off(struct work_struct *work)
3125{
3126 struct hci_dev *hdev;
16ab91ab
JH
3127
3128 hdev = container_of(work, struct hci_dev, discov_off.work);
3129
3130 BT_DBG("%s", hdev->name);
3131
d1967ff8 3132 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3133}
3134
35f7498a 3135void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3136{
4821002c 3137 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3138
4821002c
JH
3139 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3140 list_del(&uuid->list);
2aeb9a1a
JH
3141 kfree(uuid);
3142 }
2aeb9a1a
JH
3143}
3144
35f7498a 3145void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 3146{
0378b597 3147 struct link_key *key;
55ed8ca1 3148
0378b597
JH
3149 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3150 list_del_rcu(&key->list);
3151 kfree_rcu(key, rcu);
55ed8ca1 3152 }
55ed8ca1
JH
3153}
3154
35f7498a 3155void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3156{
970d0f1b 3157 struct smp_ltk *k;
b899efaf 3158
970d0f1b
JH
3159 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3160 list_del_rcu(&k->list);
3161 kfree_rcu(k, rcu);
b899efaf 3162 }
b899efaf
VCG
3163}
3164
970c4e46
JH
3165void hci_smp_irks_clear(struct hci_dev *hdev)
3166{
adae20cb 3167 struct smp_irk *k;
970c4e46 3168
adae20cb
JH
3169 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3170 list_del_rcu(&k->list);
3171 kfree_rcu(k, rcu);
970c4e46
JH
3172 }
3173}
3174
55ed8ca1
JH
3175struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3176{
8035ded4 3177 struct link_key *k;
55ed8ca1 3178
0378b597
JH
3179 rcu_read_lock();
3180 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3181 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3182 rcu_read_unlock();
55ed8ca1 3183 return k;
0378b597
JH
3184 }
3185 }
3186 rcu_read_unlock();
55ed8ca1
JH
3187
3188 return NULL;
3189}
3190
745c0ce3 3191static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3192 u8 key_type, u8 old_key_type)
d25e28ab
JH
3193{
3194 /* Legacy key */
3195 if (key_type < 0x03)
745c0ce3 3196 return true;
d25e28ab
JH
3197
3198 /* Debug keys are insecure so don't store them persistently */
3199 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3200 return false;
d25e28ab
JH
3201
3202 /* Changed combination key and there's no previous one */
3203 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3204 return false;
d25e28ab
JH
3205
3206 /* Security mode 3 case */
3207 if (!conn)
745c0ce3 3208 return true;
d25e28ab 3209
e3befab9
JH
3210 /* BR/EDR key derived using SC from an LE link */
3211 if (conn->type == LE_LINK)
3212 return true;
3213
d25e28ab
JH
3214 /* Neither local nor remote side had no-bonding as requirement */
3215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3216 return true;
d25e28ab
JH
3217
3218 /* Local side had dedicated bonding as requirement */
3219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3220 return true;
d25e28ab
JH
3221
3222 /* Remote side had dedicated bonding as requirement */
3223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3224 return true;
d25e28ab
JH
3225
3226 /* If none of the above criteria match, then don't store the key
3227 * persistently */
745c0ce3 3228 return false;
d25e28ab
JH
3229}
3230
e804d25d 3231static u8 ltk_role(u8 type)
98a0b845 3232{
e804d25d
JH
3233 if (type == SMP_LTK)
3234 return HCI_ROLE_MASTER;
98a0b845 3235
e804d25d 3236 return HCI_ROLE_SLAVE;
98a0b845
JH
3237}
3238
f3a73d97
JH
3239struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 addr_type, u8 role)
75d262c2 3241{
c9839a11 3242 struct smp_ltk *k;
75d262c2 3243
970d0f1b
JH
3244 rcu_read_lock();
3245 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
3246 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3247 continue;
3248
3249 if (smp_ltk_is_sc(k)) {
3250 if (k->type == SMP_LTK_P256_DEBUG &&
3251 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3252 continue;
3253 rcu_read_unlock();
3254 return k;
3255 }
3256
3257 if (ltk_role(k->type) == role) {
970d0f1b 3258 rcu_read_unlock();
75d262c2 3259 return k;
970d0f1b
JH
3260 }
3261 }
3262 rcu_read_unlock();
75d262c2
VCG
3263
3264 return NULL;
3265}
75d262c2 3266
970c4e46
JH
3267struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3268{
3269 struct smp_irk *irk;
3270
adae20cb
JH
3271 rcu_read_lock();
3272 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3273 if (!bacmp(&irk->rpa, rpa)) {
3274 rcu_read_unlock();
970c4e46 3275 return irk;
adae20cb 3276 }
970c4e46
JH
3277 }
3278
adae20cb 3279 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3280 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 3281 bacpy(&irk->rpa, rpa);
adae20cb 3282 rcu_read_unlock();
970c4e46
JH
3283 return irk;
3284 }
3285 }
adae20cb 3286 rcu_read_unlock();
970c4e46
JH
3287
3288 return NULL;
3289}
3290
3291struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3292 u8 addr_type)
3293{
3294 struct smp_irk *irk;
3295
6cfc9988
JH
3296 /* Identity Address must be public or static random */
3297 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3298 return NULL;
3299
adae20cb
JH
3300 rcu_read_lock();
3301 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 3302 if (addr_type == irk->addr_type &&
adae20cb
JH
3303 bacmp(bdaddr, &irk->bdaddr) == 0) {
3304 rcu_read_unlock();
970c4e46 3305 return irk;
adae20cb 3306 }
970c4e46 3307 }
adae20cb 3308 rcu_read_unlock();
970c4e46
JH
3309
3310 return NULL;
3311}
3312
567fa2aa 3313struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3314 bdaddr_t *bdaddr, u8 *val, u8 type,
3315 u8 pin_len, bool *persistent)
55ed8ca1
JH
3316{
3317 struct link_key *key, *old_key;
745c0ce3 3318 u8 old_key_type;
55ed8ca1
JH
3319
3320 old_key = hci_find_link_key(hdev, bdaddr);
3321 if (old_key) {
3322 old_key_type = old_key->type;
3323 key = old_key;
3324 } else {
12adcf3a 3325 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3326 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3327 if (!key)
567fa2aa 3328 return NULL;
0378b597 3329 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
3330 }
3331
6ed93dc6 3332 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3333
d25e28ab
JH
3334 /* Some buggy controller combinations generate a changed
3335 * combination key for legacy pairing even when there's no
3336 * previous key */
3337 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3338 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3339 type = HCI_LK_COMBINATION;
655fe6ec
JH
3340 if (conn)
3341 conn->key_type = type;
3342 }
d25e28ab 3343
55ed8ca1 3344 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3345 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3346 key->pin_len = pin_len;
3347
b6020ba0 3348 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3349 key->type = old_key_type;
4748fed2
JH
3350 else
3351 key->type = type;
3352
7652ff6a
JH
3353 if (persistent)
3354 *persistent = hci_persistent_key(hdev, conn, type,
3355 old_key_type);
4df378a1 3356
567fa2aa 3357 return key;
55ed8ca1
JH
3358}
3359
ca9142b8 3360struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3361 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3362 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3363{
c9839a11 3364 struct smp_ltk *key, *old_key;
e804d25d 3365 u8 role = ltk_role(type);
75d262c2 3366
f3a73d97 3367 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 3368 if (old_key)
75d262c2 3369 key = old_key;
c9839a11 3370 else {
0a14ab41 3371 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3372 if (!key)
ca9142b8 3373 return NULL;
970d0f1b 3374 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3375 }
3376
75d262c2 3377 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3378 key->bdaddr_type = addr_type;
3379 memcpy(key->val, tk, sizeof(key->val));
3380 key->authenticated = authenticated;
3381 key->ediv = ediv;
fe39c7b2 3382 key->rand = rand;
c9839a11
VCG
3383 key->enc_size = enc_size;
3384 key->type = type;
75d262c2 3385
ca9142b8 3386 return key;
75d262c2
VCG
3387}
3388
ca9142b8
JH
3389struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3390 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3391{
3392 struct smp_irk *irk;
3393
3394 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3395 if (!irk) {
3396 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3397 if (!irk)
ca9142b8 3398 return NULL;
970c4e46
JH
3399
3400 bacpy(&irk->bdaddr, bdaddr);
3401 irk->addr_type = addr_type;
3402
adae20cb 3403 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
3404 }
3405
3406 memcpy(irk->val, val, 16);
3407 bacpy(&irk->rpa, rpa);
3408
ca9142b8 3409 return irk;
970c4e46
JH
3410}
3411
55ed8ca1
JH
3412int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3413{
3414 struct link_key *key;
3415
3416 key = hci_find_link_key(hdev, bdaddr);
3417 if (!key)
3418 return -ENOENT;
3419
6ed93dc6 3420 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 3421
0378b597
JH
3422 list_del_rcu(&key->list);
3423 kfree_rcu(key, rcu);
55ed8ca1
JH
3424
3425 return 0;
3426}
3427
e0b2b27e 3428int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3429{
970d0f1b 3430 struct smp_ltk *k;
c51ffa0b 3431 int removed = 0;
b899efaf 3432
970d0f1b 3433 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3434 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3435 continue;
3436
6ed93dc6 3437 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3438
970d0f1b
JH
3439 list_del_rcu(&k->list);
3440 kfree_rcu(k, rcu);
c51ffa0b 3441 removed++;
b899efaf
VCG
3442 }
3443
c51ffa0b 3444 return removed ? 0 : -ENOENT;
b899efaf
VCG
3445}
3446
a7ec7338
JH
3447void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3448{
adae20cb 3449 struct smp_irk *k;
a7ec7338 3450
adae20cb 3451 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3452 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3453 continue;
3454
3455 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3456
adae20cb
JH
3457 list_del_rcu(&k->list);
3458 kfree_rcu(k, rcu);
a7ec7338
JH
3459 }
3460}
3461
6bd32326 3462/* HCI command timer function */
65cc2b49 3463static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3464{
65cc2b49
MH
3465 struct hci_dev *hdev = container_of(work, struct hci_dev,
3466 cmd_timer.work);
6bd32326 3467
bda4f23a
AE
3468 if (hdev->sent_cmd) {
3469 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3470 u16 opcode = __le16_to_cpu(sent->opcode);
3471
3472 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3473 } else {
3474 BT_ERR("%s command tx timeout", hdev->name);
3475 }
3476
6bd32326 3477 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3478 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3479}
3480
2763eda6 3481struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3482 bdaddr_t *bdaddr)
2763eda6
SJ
3483{
3484 struct oob_data *data;
3485
3486 list_for_each_entry(data, &hdev->remote_oob_data, list)
3487 if (bacmp(bdaddr, &data->bdaddr) == 0)
3488 return data;
3489
3490 return NULL;
3491}
3492
3493int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3494{
3495 struct oob_data *data;
3496
3497 data = hci_find_remote_oob_data(hdev, bdaddr);
3498 if (!data)
3499 return -ENOENT;
3500
6ed93dc6 3501 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3502
3503 list_del(&data->list);
3504 kfree(data);
3505
3506 return 0;
3507}
3508
35f7498a 3509void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3510{
3511 struct oob_data *data, *n;
3512
3513 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3514 list_del(&data->list);
3515 kfree(data);
3516 }
2763eda6
SJ
3517}
3518
0798872e 3519int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
81328d5c
JH
3520 u8 *hash192, u8 *rand192,
3521 u8 *hash256, u8 *rand256)
2763eda6
SJ
3522{
3523 struct oob_data *data;
3524
3525 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3526 if (!data) {
0a14ab41 3527 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3528 if (!data)
3529 return -ENOMEM;
3530
3531 bacpy(&data->bdaddr, bdaddr);
3532 list_add(&data->list, &hdev->remote_oob_data);
3533 }
3534
81328d5c
JH
3535 if (hash192 && rand192) {
3536 memcpy(data->hash192, hash192, sizeof(data->hash192));
3537 memcpy(data->rand192, rand192, sizeof(data->rand192));
3538 } else {
3539 memset(data->hash192, 0, sizeof(data->hash192));
3540 memset(data->rand192, 0, sizeof(data->rand192));
0798872e
MH
3541 }
3542
81328d5c
JH
3543 if (hash256 && rand256) {
3544 memcpy(data->hash256, hash256, sizeof(data->hash256));
3545 memcpy(data->rand256, rand256, sizeof(data->rand256));
3546 } else {
3547 memset(data->hash256, 0, sizeof(data->hash256));
3548 memset(data->rand256, 0, sizeof(data->rand256));
3549 }
0798872e 3550
6ed93dc6 3551 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3552
3553 return 0;
3554}
3555
dcc36c16 3556struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3557 bdaddr_t *bdaddr, u8 type)
b2a66aad 3558{
8035ded4 3559 struct bdaddr_list *b;
b2a66aad 3560
dcc36c16 3561 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3562 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3563 return b;
b9ee0a78 3564 }
b2a66aad
AJ
3565
3566 return NULL;
3567}
3568
dcc36c16 3569void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3570{
3571 struct list_head *p, *n;
3572
dcc36c16 3573 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3574 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3575
3576 list_del(p);
3577 kfree(b);
3578 }
b2a66aad
AJ
3579}
3580
dcc36c16 3581int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3582{
3583 struct bdaddr_list *entry;
b2a66aad 3584
b9ee0a78 3585 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3586 return -EBADF;
3587
dcc36c16 3588 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3589 return -EEXIST;
b2a66aad 3590
27f70f3e 3591 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3592 if (!entry)
3593 return -ENOMEM;
b2a66aad
AJ
3594
3595 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3596 entry->bdaddr_type = type;
b2a66aad 3597
dcc36c16 3598 list_add(&entry->list, list);
b2a66aad 3599
2a8357f2 3600 return 0;
b2a66aad
AJ
3601}
3602
dcc36c16 3603int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3604{
3605 struct bdaddr_list *entry;
b2a66aad 3606
35f7498a 3607 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3608 hci_bdaddr_list_clear(list);
35f7498a
JH
3609 return 0;
3610 }
b2a66aad 3611
dcc36c16 3612 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3613 if (!entry)
3614 return -ENOENT;
3615
3616 list_del(&entry->list);
3617 kfree(entry);
3618
3619 return 0;
3620}
3621
15819a70
AG
3622/* This function requires the caller holds hdev->lock */
3623struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3624 bdaddr_t *addr, u8 addr_type)
3625{
3626 struct hci_conn_params *params;
3627
738f6185
JH
3628 /* The conn params list only contains identity addresses */
3629 if (!hci_is_identity_address(addr, addr_type))
3630 return NULL;
3631
15819a70
AG
3632 list_for_each_entry(params, &hdev->le_conn_params, list) {
3633 if (bacmp(&params->addr, addr) == 0 &&
3634 params->addr_type == addr_type) {
3635 return params;
3636 }
3637 }
3638
3639 return NULL;
3640}
3641
cef952ce
AG
3642static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3643{
3644 struct hci_conn *conn;
3645
3646 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3647 if (!conn)
3648 return false;
3649
3650 if (conn->dst_type != type)
3651 return false;
3652
3653 if (conn->state != BT_CONNECTED)
3654 return false;
3655
3656 return true;
3657}
3658
4b10966f 3659/* This function requires the caller holds hdev->lock */
501f8827
JH
3660struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3661 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3662{
912b42ef 3663 struct hci_conn_params *param;
a9b0a04c 3664
738f6185
JH
3665 /* The list only contains identity addresses */
3666 if (!hci_is_identity_address(addr, addr_type))
3667 return NULL;
a9b0a04c 3668
501f8827 3669 list_for_each_entry(param, list, action) {
912b42ef
JH
3670 if (bacmp(&param->addr, addr) == 0 &&
3671 param->addr_type == addr_type)
3672 return param;
4b10966f
MH
3673 }
3674
3675 return NULL;
a9b0a04c
AG
3676}
3677
15819a70 3678/* This function requires the caller holds hdev->lock */
51d167c0
MH
3679struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3680 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3681{
3682 struct hci_conn_params *params;
3683
c46245b3 3684 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3685 return NULL;
a9b0a04c 3686
15819a70 3687 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3688 if (params)
51d167c0 3689 return params;
15819a70
AG
3690
3691 params = kzalloc(sizeof(*params), GFP_KERNEL);
3692 if (!params) {
3693 BT_ERR("Out of memory");
51d167c0 3694 return NULL;
15819a70
AG
3695 }
3696
3697 bacpy(&params->addr, addr);
3698 params->addr_type = addr_type;
cef952ce
AG
3699
3700 list_add(&params->list, &hdev->le_conn_params);
93450c75 3701 INIT_LIST_HEAD(&params->action);
cef952ce 3702
bf5b3c8b
MH
3703 params->conn_min_interval = hdev->le_conn_min_interval;
3704 params->conn_max_interval = hdev->le_conn_max_interval;
3705 params->conn_latency = hdev->le_conn_latency;
3706 params->supervision_timeout = hdev->le_supv_timeout;
3707 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3708
3709 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3710
51d167c0 3711 return params;
bf5b3c8b
MH
3712}
3713
3714/* This function requires the caller holds hdev->lock */
3715int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3716 u8 auto_connect)
15819a70
AG
3717{
3718 struct hci_conn_params *params;
3719
8c87aae1
MH
3720 params = hci_conn_params_add(hdev, addr, addr_type);
3721 if (!params)
3722 return -EIO;
cef952ce 3723
42ce26de
JH
3724 if (params->auto_connect == auto_connect)
3725 return 0;
3726
95305baa 3727 list_del_init(&params->action);
15819a70 3728
cef952ce
AG
3729 switch (auto_connect) {
3730 case HCI_AUTO_CONN_DISABLED:
3731 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3732 hci_update_background_scan(hdev);
cef952ce 3733 break;
851efca8 3734 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3735 list_add(&params->action, &hdev->pend_le_reports);
3736 hci_update_background_scan(hdev);
cef952ce 3737 break;
4b9e7e75 3738 case HCI_AUTO_CONN_DIRECT:
cef952ce 3739 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3740 if (!is_connected(hdev, addr, addr_type)) {
3741 list_add(&params->action, &hdev->pend_le_conns);
3742 hci_update_background_scan(hdev);
3743 }
cef952ce
AG
3744 break;
3745 }
15819a70 3746
851efca8
JH
3747 params->auto_connect = auto_connect;
3748
d06b50ce
MH
3749 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3750 auto_connect);
a9b0a04c
AG
3751
3752 return 0;
15819a70
AG
3753}
3754
f6c63249 3755static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3756{
f8aaf9b6 3757 if (params->conn) {
f161dd41 3758 hci_conn_drop(params->conn);
f8aaf9b6
JH
3759 hci_conn_put(params->conn);
3760 }
f161dd41 3761
95305baa 3762 list_del(&params->action);
15819a70
AG
3763 list_del(&params->list);
3764 kfree(params);
f6c63249
JH
3765}
3766
3767/* This function requires the caller holds hdev->lock */
3768void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3769{
3770 struct hci_conn_params *params;
3771
3772 params = hci_conn_params_lookup(hdev, addr, addr_type);
3773 if (!params)
3774 return;
3775
3776 hci_conn_params_free(params);
15819a70 3777
95305baa
JH
3778 hci_update_background_scan(hdev);
3779
15819a70
AG
3780 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3781}
3782
3783/* This function requires the caller holds hdev->lock */
55af49a8 3784void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3785{
3786 struct hci_conn_params *params, *tmp;
3787
3788 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3789 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3790 continue;
15819a70
AG
3791 list_del(&params->list);
3792 kfree(params);
3793 }
3794
55af49a8 3795 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3796}
3797
3798/* This function requires the caller holds hdev->lock */
373110c5 3799void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3800{
15819a70 3801 struct hci_conn_params *params, *tmp;
77a77a30 3802
f6c63249
JH
3803 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3804 hci_conn_params_free(params);
77a77a30 3805
a4790dbd 3806 hci_update_background_scan(hdev);
77a77a30 3807
15819a70 3808 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3809}
3810
4c87eaab 3811static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3812{
4c87eaab
AG
3813 if (status) {
3814 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3815
4c87eaab
AG
3816 hci_dev_lock(hdev);
3817 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3818 hci_dev_unlock(hdev);
3819 return;
3820 }
7ba8b4be
AG
3821}
3822
4c87eaab 3823static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3824{
4c87eaab
AG
3825 /* General inquiry access code (GIAC) */
3826 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3827 struct hci_request req;
3828 struct hci_cp_inquiry cp;
7ba8b4be
AG
3829 int err;
3830
4c87eaab
AG
3831 if (status) {
3832 BT_ERR("Failed to disable LE scanning: status %d", status);
3833 return;
3834 }
7ba8b4be 3835
4c87eaab
AG
3836 switch (hdev->discovery.type) {
3837 case DISCOV_TYPE_LE:
3838 hci_dev_lock(hdev);
3839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3840 hci_dev_unlock(hdev);
3841 break;
7ba8b4be 3842
4c87eaab
AG
3843 case DISCOV_TYPE_INTERLEAVED:
3844 hci_req_init(&req, hdev);
7ba8b4be 3845
4c87eaab
AG
3846 memset(&cp, 0, sizeof(cp));
3847 memcpy(&cp.lap, lap, sizeof(cp.lap));
3848 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3849 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3850
4c87eaab 3851 hci_dev_lock(hdev);
7dbfac1d 3852
4c87eaab 3853 hci_inquiry_cache_flush(hdev);
7dbfac1d 3854
4c87eaab
AG
3855 err = hci_req_run(&req, inquiry_complete);
3856 if (err) {
3857 BT_ERR("Inquiry request failed: err %d", err);
3858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3859 }
7dbfac1d 3860
4c87eaab
AG
3861 hci_dev_unlock(hdev);
3862 break;
7dbfac1d 3863 }
7dbfac1d
AG
3864}
3865
7ba8b4be
AG
3866static void le_scan_disable_work(struct work_struct *work)
3867{
3868 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3869 le_scan_disable.work);
4c87eaab
AG
3870 struct hci_request req;
3871 int err;
7ba8b4be
AG
3872
3873 BT_DBG("%s", hdev->name);
3874
4c87eaab 3875 hci_req_init(&req, hdev);
28b75a89 3876
b1efcc28 3877 hci_req_add_le_scan_disable(&req);
28b75a89 3878
4c87eaab
AG
3879 err = hci_req_run(&req, le_scan_disable_work_complete);
3880 if (err)
3881 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3882}
3883
8d97250e
JH
3884static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3885{
3886 struct hci_dev *hdev = req->hdev;
3887
3888 /* If we're advertising or initiating an LE connection we can't
3889 * go ahead and change the random address at this time. This is
3890 * because the eventual initiator address used for the
3891 * subsequently created connection will be undefined (some
3892 * controllers use the new address and others the one we had
3893 * when the operation started).
3894 *
3895 * In this kind of scenario skip the update and let the random
3896 * address be updated at the next cycle.
3897 */
5ce194c4 3898 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3899 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3900 BT_DBG("Deferring random address update");
9a783a13 3901 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
8d97250e
JH
3902 return;
3903 }
3904
3905 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3906}
3907
94b1fc92
MH
3908int hci_update_random_address(struct hci_request *req, bool require_privacy,
3909 u8 *own_addr_type)
ebd3a747
JH
3910{
3911 struct hci_dev *hdev = req->hdev;
3912 int err;
3913
3914 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3915 * current RPA has expired or there is something else than
3916 * the current RPA in use, then generate a new one.
ebd3a747
JH
3917 */
3918 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3919 int to;
3920
3921 *own_addr_type = ADDR_LE_DEV_RANDOM;
3922
3923 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3924 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3925 return 0;
3926
defce9e8 3927 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
ebd3a747
JH
3928 if (err < 0) {
3929 BT_ERR("%s failed to generate new RPA", hdev->name);
3930 return err;
3931 }
3932
8d97250e 3933 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3934
3935 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3936 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3937
3938 return 0;
94b1fc92
MH
3939 }
3940
3941 /* In case of required privacy without resolvable private address,
3942 * use an unresolvable private address. This is useful for active
3943 * scanning and non-connectable advertising.
3944 */
3945 if (require_privacy) {
3946 bdaddr_t urpa;
3947
3948 get_random_bytes(&urpa, 6);
3949 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3950
3951 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3952 set_random_addr(req, &urpa);
94b1fc92 3953 return 0;
ebd3a747
JH
3954 }
3955
3956 /* If forcing static address is in use or there is no public
3957 * address use the static address as random address (but skip
3958 * the HCI command if the current random address is already the
3959 * static one.
3960 */
111902f7 3961 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3962 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3963 *own_addr_type = ADDR_LE_DEV_RANDOM;
3964 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3965 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3966 &hdev->static_addr);
3967 return 0;
3968 }
3969
3970 /* Neither privacy nor static address is being used so use a
3971 * public address.
3972 */
3973 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3974
3975 return 0;
3976}
3977
a1f4c318
JH
3978/* Copy the Identity Address of the controller.
3979 *
3980 * If the controller has a public BD_ADDR, then by default use that one.
3981 * If this is a LE only controller without a public address, default to
3982 * the static random address.
3983 *
3984 * For debugging purposes it is possible to force controllers with a
3985 * public address to use the static random address instead.
3986 */
3987void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3988 u8 *bdaddr_type)
3989{
111902f7 3990 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3991 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3992 bacpy(bdaddr, &hdev->static_addr);
3993 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3994 } else {
3995 bacpy(bdaddr, &hdev->bdaddr);
3996 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3997 }
3998}
3999
9be0dab7
DH
4000/* Alloc HCI device */
4001struct hci_dev *hci_alloc_dev(void)
4002{
4003 struct hci_dev *hdev;
4004
27f70f3e 4005 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
4006 if (!hdev)
4007 return NULL;
4008
b1b813d4
DH
4009 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
4010 hdev->esco_type = (ESCO_HV1);
4011 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
4012 hdev->num_iac = 0x01; /* One IAC support is mandatory */
4013 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 4014 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
4015 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
4016 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 4017
b1b813d4
DH
4018 hdev->sniff_max_interval = 800;
4019 hdev->sniff_min_interval = 80;
4020
3f959d46 4021 hdev->le_adv_channel_map = 0x07;
628531c9
GL
4022 hdev->le_adv_min_interval = 0x0800;
4023 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
4024 hdev->le_scan_interval = 0x0060;
4025 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
4026 hdev->le_conn_min_interval = 0x0028;
4027 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
4028 hdev->le_conn_latency = 0x0000;
4029 hdev->le_supv_timeout = 0x002a;
bef64738 4030
d6bfd59c 4031 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 4032 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
4033 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4034 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 4035
b1b813d4
DH
4036 mutex_init(&hdev->lock);
4037 mutex_init(&hdev->req_lock);
4038
4039 INIT_LIST_HEAD(&hdev->mgmt_pending);
4040 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4041 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4042 INIT_LIST_HEAD(&hdev->uuids);
4043 INIT_LIST_HEAD(&hdev->link_keys);
4044 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4045 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4046 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4047 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4048 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4049 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4050 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4051 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4052
4053 INIT_WORK(&hdev->rx_work, hci_rx_work);
4054 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4055 INIT_WORK(&hdev->tx_work, hci_tx_work);
4056 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4057
b1b813d4
DH
4058 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4059 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4060 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4061
b1b813d4
DH
4062 skb_queue_head_init(&hdev->rx_q);
4063 skb_queue_head_init(&hdev->cmd_q);
4064 skb_queue_head_init(&hdev->raw_q);
4065
4066 init_waitqueue_head(&hdev->req_wait_q);
4067
65cc2b49 4068 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4069
b1b813d4
DH
4070 hci_init_sysfs(hdev);
4071 discovery_init(hdev);
9be0dab7
DH
4072
4073 return hdev;
4074}
4075EXPORT_SYMBOL(hci_alloc_dev);
4076
4077/* Free HCI device */
4078void hci_free_dev(struct hci_dev *hdev)
4079{
9be0dab7
DH
4080 /* will free via device release */
4081 put_device(&hdev->dev);
4082}
4083EXPORT_SYMBOL(hci_free_dev);
4084
1da177e4
LT
4085/* Register HCI device */
4086int hci_register_dev(struct hci_dev *hdev)
4087{
b1b813d4 4088 int id, error;
1da177e4 4089
74292d5a 4090 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4091 return -EINVAL;
4092
08add513
MM
4093 /* Do not allow HCI_AMP devices to register at index 0,
4094 * so the index can be used as the AMP controller ID.
4095 */
3df92b31
SL
4096 switch (hdev->dev_type) {
4097 case HCI_BREDR:
4098 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4099 break;
4100 case HCI_AMP:
4101 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4102 break;
4103 default:
4104 return -EINVAL;
1da177e4 4105 }
8e87d142 4106
3df92b31
SL
4107 if (id < 0)
4108 return id;
4109
1da177e4
LT
4110 sprintf(hdev->name, "hci%d", id);
4111 hdev->id = id;
2d8b3a11
AE
4112
4113 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4114
d8537548
KC
4115 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4116 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4117 if (!hdev->workqueue) {
4118 error = -ENOMEM;
4119 goto err;
4120 }
f48fd9c8 4121
d8537548
KC
4122 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4123 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4124 if (!hdev->req_workqueue) {
4125 destroy_workqueue(hdev->workqueue);
4126 error = -ENOMEM;
4127 goto err;
4128 }
4129
0153e2ec
MH
4130 if (!IS_ERR_OR_NULL(bt_debugfs))
4131 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4132
bdc3e0f1
MH
4133 dev_set_name(&hdev->dev, "%s", hdev->name);
4134
4135 error = device_add(&hdev->dev);
33ca954d 4136 if (error < 0)
54506918 4137 goto err_wqueue;
1da177e4 4138
611b30f7 4139 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4140 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4141 hdev);
611b30f7
MH
4142 if (hdev->rfkill) {
4143 if (rfkill_register(hdev->rfkill) < 0) {
4144 rfkill_destroy(hdev->rfkill);
4145 hdev->rfkill = NULL;
4146 }
4147 }
4148
5e130367
JH
4149 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4150 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4151
a8b2d5c2 4152 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4153 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4154
01cd3404 4155 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4156 /* Assume BR/EDR support until proven otherwise (such as
4157 * through reading supported features during init.
4158 */
4159 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4160 }
ce2be9ac 4161
fcee3377
GP
4162 write_lock(&hci_dev_list_lock);
4163 list_add(&hdev->list, &hci_dev_list);
4164 write_unlock(&hci_dev_list_lock);
4165
4a964404
MH
4166 /* Devices that are marked for raw-only usage are unconfigured
4167 * and should not be included in normal operation.
fee746b0
MH
4168 */
4169 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4170 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4171
1da177e4 4172 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4173 hci_dev_hold(hdev);
1da177e4 4174
19202573 4175 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4176
1da177e4 4177 return id;
f48fd9c8 4178
33ca954d
DH
4179err_wqueue:
4180 destroy_workqueue(hdev->workqueue);
6ead1bbc 4181 destroy_workqueue(hdev->req_workqueue);
33ca954d 4182err:
3df92b31 4183 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4184
33ca954d 4185 return error;
1da177e4
LT
4186}
4187EXPORT_SYMBOL(hci_register_dev);
4188
4189/* Unregister HCI device */
59735631 4190void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4191{
3df92b31 4192 int i, id;
ef222013 4193
c13854ce 4194 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4195
94324962
JH
4196 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4197
3df92b31
SL
4198 id = hdev->id;
4199
f20d09d5 4200 write_lock(&hci_dev_list_lock);
1da177e4 4201 list_del(&hdev->list);
f20d09d5 4202 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4203
4204 hci_dev_do_close(hdev);
4205
cd4c5391 4206 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4207 kfree_skb(hdev->reassembly[i]);
4208
b9b5ef18
GP
4209 cancel_work_sync(&hdev->power_on);
4210
ab81cbf9 4211 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4212 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4213 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4214 hci_dev_lock(hdev);
744cf19e 4215 mgmt_index_removed(hdev);
09fd0de5 4216 hci_dev_unlock(hdev);
56e5cb86 4217 }
ab81cbf9 4218
2e58ef3e
JH
4219 /* mgmt_index_removed should take care of emptying the
4220 * pending list */
4221 BUG_ON(!list_empty(&hdev->mgmt_pending));
4222
1da177e4
LT
4223 hci_notify(hdev, HCI_DEV_UNREG);
4224
611b30f7
MH
4225 if (hdev->rfkill) {
4226 rfkill_unregister(hdev->rfkill);
4227 rfkill_destroy(hdev->rfkill);
4228 }
4229
711eafe3 4230 smp_unregister(hdev);
99780a7b 4231
bdc3e0f1 4232 device_del(&hdev->dev);
147e2d59 4233
0153e2ec
MH
4234 debugfs_remove_recursive(hdev->debugfs);
4235
f48fd9c8 4236 destroy_workqueue(hdev->workqueue);
6ead1bbc 4237 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4238
09fd0de5 4239 hci_dev_lock(hdev);
dcc36c16 4240 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4241 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4242 hci_uuids_clear(hdev);
55ed8ca1 4243 hci_link_keys_clear(hdev);
b899efaf 4244 hci_smp_ltks_clear(hdev);
970c4e46 4245 hci_smp_irks_clear(hdev);
2763eda6 4246 hci_remote_oob_data_clear(hdev);
dcc36c16 4247 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4248 hci_conn_params_clear_all(hdev);
09fd0de5 4249 hci_dev_unlock(hdev);
e2e0cacb 4250
dc946bd8 4251 hci_dev_put(hdev);
3df92b31
SL
4252
4253 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4254}
4255EXPORT_SYMBOL(hci_unregister_dev);
4256
4257/* Suspend HCI device */
4258int hci_suspend_dev(struct hci_dev *hdev)
4259{
4260 hci_notify(hdev, HCI_DEV_SUSPEND);
4261 return 0;
4262}
4263EXPORT_SYMBOL(hci_suspend_dev);
4264
4265/* Resume HCI device */
4266int hci_resume_dev(struct hci_dev *hdev)
4267{
4268 hci_notify(hdev, HCI_DEV_RESUME);
4269 return 0;
4270}
4271EXPORT_SYMBOL(hci_resume_dev);
4272
75e0569f
MH
4273/* Reset HCI device */
4274int hci_reset_dev(struct hci_dev *hdev)
4275{
4276 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4277 struct sk_buff *skb;
4278
4279 skb = bt_skb_alloc(3, GFP_ATOMIC);
4280 if (!skb)
4281 return -ENOMEM;
4282
4283 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4284 memcpy(skb_put(skb, 3), hw_err, 3);
4285
4286 /* Send Hardware Error to upper stack */
4287 return hci_recv_frame(hdev, skb);
4288}
4289EXPORT_SYMBOL(hci_reset_dev);
4290
76bca880 4291/* Receive frame from HCI drivers */
e1a26170 4292int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4293{
76bca880 4294 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4295 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4296 kfree_skb(skb);
4297 return -ENXIO;
4298 }
4299
d82603c6 4300 /* Incoming skb */
76bca880
MH
4301 bt_cb(skb)->incoming = 1;
4302
4303 /* Time stamp */
4304 __net_timestamp(skb);
4305
76bca880 4306 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4307 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4308
76bca880
MH
4309 return 0;
4310}
4311EXPORT_SYMBOL(hci_recv_frame);
4312
33e882a5 4313static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4314 int count, __u8 index)
33e882a5
SS
4315{
4316 int len = 0;
4317 int hlen = 0;
4318 int remain = count;
4319 struct sk_buff *skb;
4320 struct bt_skb_cb *scb;
4321
4322 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4323 index >= NUM_REASSEMBLY)
33e882a5
SS
4324 return -EILSEQ;
4325
4326 skb = hdev->reassembly[index];
4327
4328 if (!skb) {
4329 switch (type) {
4330 case HCI_ACLDATA_PKT:
4331 len = HCI_MAX_FRAME_SIZE;
4332 hlen = HCI_ACL_HDR_SIZE;
4333 break;
4334 case HCI_EVENT_PKT:
4335 len = HCI_MAX_EVENT_SIZE;
4336 hlen = HCI_EVENT_HDR_SIZE;
4337 break;
4338 case HCI_SCODATA_PKT:
4339 len = HCI_MAX_SCO_SIZE;
4340 hlen = HCI_SCO_HDR_SIZE;
4341 break;
4342 }
4343
1e429f38 4344 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4345 if (!skb)
4346 return -ENOMEM;
4347
4348 scb = (void *) skb->cb;
4349 scb->expect = hlen;
4350 scb->pkt_type = type;
4351
33e882a5
SS
4352 hdev->reassembly[index] = skb;
4353 }
4354
4355 while (count) {
4356 scb = (void *) skb->cb;
89bb46d0 4357 len = min_t(uint, scb->expect, count);
33e882a5
SS
4358
4359 memcpy(skb_put(skb, len), data, len);
4360
4361 count -= len;
4362 data += len;
4363 scb->expect -= len;
4364 remain = count;
4365
4366 switch (type) {
4367 case HCI_EVENT_PKT:
4368 if (skb->len == HCI_EVENT_HDR_SIZE) {
4369 struct hci_event_hdr *h = hci_event_hdr(skb);
4370 scb->expect = h->plen;
4371
4372 if (skb_tailroom(skb) < scb->expect) {
4373 kfree_skb(skb);
4374 hdev->reassembly[index] = NULL;
4375 return -ENOMEM;
4376 }
4377 }
4378 break;
4379
4380 case HCI_ACLDATA_PKT:
4381 if (skb->len == HCI_ACL_HDR_SIZE) {
4382 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4383 scb->expect = __le16_to_cpu(h->dlen);
4384
4385 if (skb_tailroom(skb) < scb->expect) {
4386 kfree_skb(skb);
4387 hdev->reassembly[index] = NULL;
4388 return -ENOMEM;
4389 }
4390 }
4391 break;
4392
4393 case HCI_SCODATA_PKT:
4394 if (skb->len == HCI_SCO_HDR_SIZE) {
4395 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4396 scb->expect = h->dlen;
4397
4398 if (skb_tailroom(skb) < scb->expect) {
4399 kfree_skb(skb);
4400 hdev->reassembly[index] = NULL;
4401 return -ENOMEM;
4402 }
4403 }
4404 break;
4405 }
4406
4407 if (scb->expect == 0) {
4408 /* Complete frame */
4409
4410 bt_cb(skb)->pkt_type = type;
e1a26170 4411 hci_recv_frame(hdev, skb);
33e882a5
SS
4412
4413 hdev->reassembly[index] = NULL;
4414 return remain;
4415 }
4416 }
4417
4418 return remain;
4419}
4420
99811510
SS
4421#define STREAM_REASSEMBLY 0
4422
4423int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4424{
4425 int type;
4426 int rem = 0;
4427
da5f6c37 4428 while (count) {
99811510
SS
4429 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4430
4431 if (!skb) {
4432 struct { char type; } *pkt;
4433
4434 /* Start of the frame */
4435 pkt = data;
4436 type = pkt->type;
4437
4438 data++;
4439 count--;
4440 } else
4441 type = bt_cb(skb)->pkt_type;
4442
1e429f38 4443 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4444 STREAM_REASSEMBLY);
99811510
SS
4445 if (rem < 0)
4446 return rem;
4447
4448 data += (count - rem);
4449 count = rem;
f81c6224 4450 }
99811510
SS
4451
4452 return rem;
4453}
4454EXPORT_SYMBOL(hci_recv_stream_fragment);
4455
1da177e4
LT
4456/* ---- Interface to upper protocols ---- */
4457
1da177e4
LT
4458int hci_register_cb(struct hci_cb *cb)
4459{
4460 BT_DBG("%p name %s", cb, cb->name);
4461
f20d09d5 4462 write_lock(&hci_cb_list_lock);
1da177e4 4463 list_add(&cb->list, &hci_cb_list);
f20d09d5 4464 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4465
4466 return 0;
4467}
4468EXPORT_SYMBOL(hci_register_cb);
4469
4470int hci_unregister_cb(struct hci_cb *cb)
4471{
4472 BT_DBG("%p name %s", cb, cb->name);
4473
f20d09d5 4474 write_lock(&hci_cb_list_lock);
1da177e4 4475 list_del(&cb->list);
f20d09d5 4476 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4477
4478 return 0;
4479}
4480EXPORT_SYMBOL(hci_unregister_cb);
4481
51086991 4482static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4483{
cdc52faa
MH
4484 int err;
4485
0d48d939 4486 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4487
cd82e61c
MH
4488 /* Time stamp */
4489 __net_timestamp(skb);
1da177e4 4490
cd82e61c
MH
4491 /* Send copy to monitor */
4492 hci_send_to_monitor(hdev, skb);
4493
4494 if (atomic_read(&hdev->promisc)) {
4495 /* Send copy to the sockets */
470fe1b5 4496 hci_send_to_sock(hdev, skb);
1da177e4
LT
4497 }
4498
4499 /* Get rid of skb owner, prior to sending to the driver. */
4500 skb_orphan(skb);
4501
cdc52faa
MH
4502 err = hdev->send(hdev, skb);
4503 if (err < 0) {
4504 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4505 kfree_skb(skb);
4506 }
1da177e4
LT
4507}
4508
3119ae95
JH
4509void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4510{
4511 skb_queue_head_init(&req->cmd_q);
4512 req->hdev = hdev;
5d73e034 4513 req->err = 0;
3119ae95
JH
4514}
4515
4516int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4517{
4518 struct hci_dev *hdev = req->hdev;
4519 struct sk_buff *skb;
4520 unsigned long flags;
4521
4522 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4523
49c922bb 4524 /* If an error occurred during request building, remove all HCI
5d73e034
AG
4525 * commands queued on the HCI request queue.
4526 */
4527 if (req->err) {
4528 skb_queue_purge(&req->cmd_q);
4529 return req->err;
4530 }
4531
3119ae95
JH
4532 /* Do not allow empty requests */
4533 if (skb_queue_empty(&req->cmd_q))
382b0c39 4534 return -ENODATA;
3119ae95
JH
4535
4536 skb = skb_peek_tail(&req->cmd_q);
4537 bt_cb(skb)->req.complete = complete;
4538
4539 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4540 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4541 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4542
4543 queue_work(hdev->workqueue, &hdev->cmd_work);
4544
4545 return 0;
4546}
4547
899de765
MH
4548bool hci_req_pending(struct hci_dev *hdev)
4549{
4550 return (hdev->req_status == HCI_REQ_PEND);
4551}
4552
1ca3a9d0 4553static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4554 u32 plen, const void *param)
1da177e4
LT
4555{
4556 int len = HCI_COMMAND_HDR_SIZE + plen;
4557 struct hci_command_hdr *hdr;
4558 struct sk_buff *skb;
4559
1da177e4 4560 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4561 if (!skb)
4562 return NULL;
1da177e4
LT
4563
4564 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4565 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4566 hdr->plen = plen;
4567
4568 if (plen)
4569 memcpy(skb_put(skb, plen), param, plen);
4570
4571 BT_DBG("skb len %d", skb->len);
4572
0d48d939 4573 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
43e73e4e 4574 bt_cb(skb)->opcode = opcode;
c78ae283 4575
1ca3a9d0
JH
4576 return skb;
4577}
4578
4579/* Send HCI command */
07dc93dd
JH
4580int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4581 const void *param)
1ca3a9d0
JH
4582{
4583 struct sk_buff *skb;
4584
4585 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4586
4587 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4588 if (!skb) {
4589 BT_ERR("%s no memory for command", hdev->name);
4590 return -ENOMEM;
4591 }
4592
49c922bb 4593 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4594 * single-command requests.
4595 */
4596 bt_cb(skb)->req.start = true;
4597
1da177e4 4598 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4599 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4600
4601 return 0;
4602}
1da177e4 4603
71c76a17 4604/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4605void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4606 const void *param, u8 event)
71c76a17
JH
4607{
4608 struct hci_dev *hdev = req->hdev;
4609 struct sk_buff *skb;
4610
4611 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4612
49c922bb 4613 /* If an error occurred during request building, there is no point in
34739c1e
AG
4614 * queueing the HCI command. We can simply return.
4615 */
4616 if (req->err)
4617 return;
4618
71c76a17
JH
4619 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4620 if (!skb) {
5d73e034
AG
4621 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4622 hdev->name, opcode);
4623 req->err = -ENOMEM;
e348fe6b 4624 return;
71c76a17
JH
4625 }
4626
4627 if (skb_queue_empty(&req->cmd_q))
4628 bt_cb(skb)->req.start = true;
4629
02350a72
JH
4630 bt_cb(skb)->req.event = event;
4631
71c76a17 4632 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4633}
4634
07dc93dd
JH
4635void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4636 const void *param)
02350a72
JH
4637{
4638 hci_req_add_ev(req, opcode, plen, param, 0);
4639}
4640
1da177e4 4641/* Get data from the previously sent command */
a9de9248 4642void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4643{
4644 struct hci_command_hdr *hdr;
4645
4646 if (!hdev->sent_cmd)
4647 return NULL;
4648
4649 hdr = (void *) hdev->sent_cmd->data;
4650
a9de9248 4651 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4652 return NULL;
4653
f0e09510 4654 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4655
4656 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4657}
4658
4659/* Send ACL data */
4660static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4661{
4662 struct hci_acl_hdr *hdr;
4663 int len = skb->len;
4664
badff6d0
ACM
4665 skb_push(skb, HCI_ACL_HDR_SIZE);
4666 skb_reset_transport_header(skb);
9c70220b 4667 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4668 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4669 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4670}
4671
ee22be7e 4672static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4673 struct sk_buff *skb, __u16 flags)
1da177e4 4674{
ee22be7e 4675 struct hci_conn *conn = chan->conn;
1da177e4
LT
4676 struct hci_dev *hdev = conn->hdev;
4677 struct sk_buff *list;
4678
087bfd99
GP
4679 skb->len = skb_headlen(skb);
4680 skb->data_len = 0;
4681
4682 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4683
4684 switch (hdev->dev_type) {
4685 case HCI_BREDR:
4686 hci_add_acl_hdr(skb, conn->handle, flags);
4687 break;
4688 case HCI_AMP:
4689 hci_add_acl_hdr(skb, chan->handle, flags);
4690 break;
4691 default:
4692 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4693 return;
4694 }
087bfd99 4695
70f23020
AE
4696 list = skb_shinfo(skb)->frag_list;
4697 if (!list) {
1da177e4
LT
4698 /* Non fragmented */
4699 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4700
73d80deb 4701 skb_queue_tail(queue, skb);
1da177e4
LT
4702 } else {
4703 /* Fragmented */
4704 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4705
4706 skb_shinfo(skb)->frag_list = NULL;
4707
9cfd5a23
JR
4708 /* Queue all fragments atomically. We need to use spin_lock_bh
4709 * here because of 6LoWPAN links, as there this function is
4710 * called from softirq and using normal spin lock could cause
4711 * deadlocks.
4712 */
4713 spin_lock_bh(&queue->lock);
1da177e4 4714
73d80deb 4715 __skb_queue_tail(queue, skb);
e702112f
AE
4716
4717 flags &= ~ACL_START;
4718 flags |= ACL_CONT;
1da177e4
LT
4719 do {
4720 skb = list; list = list->next;
8e87d142 4721
0d48d939 4722 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4723 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4724
4725 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4726
73d80deb 4727 __skb_queue_tail(queue, skb);
1da177e4
LT
4728 } while (list);
4729
9cfd5a23 4730 spin_unlock_bh(&queue->lock);
1da177e4 4731 }
73d80deb
LAD
4732}
4733
4734void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4735{
ee22be7e 4736 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4737
f0e09510 4738 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4739
ee22be7e 4740 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4741
3eff45ea 4742 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4743}
1da177e4
LT
4744
4745/* Send SCO data */
0d861d8b 4746void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4747{
4748 struct hci_dev *hdev = conn->hdev;
4749 struct hci_sco_hdr hdr;
4750
4751 BT_DBG("%s len %d", hdev->name, skb->len);
4752
aca3192c 4753 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4754 hdr.dlen = skb->len;
4755
badff6d0
ACM
4756 skb_push(skb, HCI_SCO_HDR_SIZE);
4757 skb_reset_transport_header(skb);
9c70220b 4758 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4759
0d48d939 4760 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4761
1da177e4 4762 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4763 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4764}
1da177e4
LT
4765
4766/* ---- HCI TX task (outgoing data) ---- */
4767
4768/* HCI Connection scheduler */
6039aa73
GP
4769static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4770 int *quote)
1da177e4
LT
4771{
4772 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4773 struct hci_conn *conn = NULL, *c;
abc5de8f 4774 unsigned int num = 0, min = ~0;
1da177e4 4775
8e87d142 4776 /* We don't have to lock device here. Connections are always
1da177e4 4777 * added and removed with TX task disabled. */
bf4c6325
GP
4778
4779 rcu_read_lock();
4780
4781 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4782 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4783 continue;
769be974
MH
4784
4785 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4786 continue;
4787
1da177e4
LT
4788 num++;
4789
4790 if (c->sent < min) {
4791 min = c->sent;
4792 conn = c;
4793 }
52087a79
LAD
4794
4795 if (hci_conn_num(hdev, type) == num)
4796 break;
1da177e4
LT
4797 }
4798
bf4c6325
GP
4799 rcu_read_unlock();
4800
1da177e4 4801 if (conn) {
6ed58ec5
VT
4802 int cnt, q;
4803
4804 switch (conn->type) {
4805 case ACL_LINK:
4806 cnt = hdev->acl_cnt;
4807 break;
4808 case SCO_LINK:
4809 case ESCO_LINK:
4810 cnt = hdev->sco_cnt;
4811 break;
4812 case LE_LINK:
4813 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4814 break;
4815 default:
4816 cnt = 0;
4817 BT_ERR("Unknown link type");
4818 }
4819
4820 q = cnt / num;
1da177e4
LT
4821 *quote = q ? q : 1;
4822 } else
4823 *quote = 0;
4824
4825 BT_DBG("conn %p quote %d", conn, *quote);
4826 return conn;
4827}
4828
6039aa73 4829static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4830{
4831 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4832 struct hci_conn *c;
1da177e4 4833
bae1f5d9 4834 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4835
bf4c6325
GP
4836 rcu_read_lock();
4837
1da177e4 4838 /* Kill stalled connections */
bf4c6325 4839 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4840 if (c->type == type && c->sent) {
6ed93dc6
AE
4841 BT_ERR("%s killing stalled connection %pMR",
4842 hdev->name, &c->dst);
bed71748 4843 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4844 }
4845 }
bf4c6325
GP
4846
4847 rcu_read_unlock();
1da177e4
LT
4848}
4849
6039aa73
GP
4850static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4851 int *quote)
1da177e4 4852{
73d80deb
LAD
4853 struct hci_conn_hash *h = &hdev->conn_hash;
4854 struct hci_chan *chan = NULL;
abc5de8f 4855 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4856 struct hci_conn *conn;
73d80deb
LAD
4857 int cnt, q, conn_num = 0;
4858
4859 BT_DBG("%s", hdev->name);
4860
bf4c6325
GP
4861 rcu_read_lock();
4862
4863 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4864 struct hci_chan *tmp;
4865
4866 if (conn->type != type)
4867 continue;
4868
4869 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4870 continue;
4871
4872 conn_num++;
4873
8192edef 4874 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4875 struct sk_buff *skb;
4876
4877 if (skb_queue_empty(&tmp->data_q))
4878 continue;
4879
4880 skb = skb_peek(&tmp->data_q);
4881 if (skb->priority < cur_prio)
4882 continue;
4883
4884 if (skb->priority > cur_prio) {
4885 num = 0;
4886 min = ~0;
4887 cur_prio = skb->priority;
4888 }
4889
4890 num++;
4891
4892 if (conn->sent < min) {
4893 min = conn->sent;
4894 chan = tmp;
4895 }
4896 }
4897
4898 if (hci_conn_num(hdev, type) == conn_num)
4899 break;
4900 }
4901
bf4c6325
GP
4902 rcu_read_unlock();
4903
73d80deb
LAD
4904 if (!chan)
4905 return NULL;
4906
4907 switch (chan->conn->type) {
4908 case ACL_LINK:
4909 cnt = hdev->acl_cnt;
4910 break;
bd1eb66b
AE
4911 case AMP_LINK:
4912 cnt = hdev->block_cnt;
4913 break;
73d80deb
LAD
4914 case SCO_LINK:
4915 case ESCO_LINK:
4916 cnt = hdev->sco_cnt;
4917 break;
4918 case LE_LINK:
4919 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4920 break;
4921 default:
4922 cnt = 0;
4923 BT_ERR("Unknown link type");
4924 }
4925
4926 q = cnt / num;
4927 *quote = q ? q : 1;
4928 BT_DBG("chan %p quote %d", chan, *quote);
4929 return chan;
4930}
4931
02b20f0b
LAD
4932static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4933{
4934 struct hci_conn_hash *h = &hdev->conn_hash;
4935 struct hci_conn *conn;
4936 int num = 0;
4937
4938 BT_DBG("%s", hdev->name);
4939
bf4c6325
GP
4940 rcu_read_lock();
4941
4942 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4943 struct hci_chan *chan;
4944
4945 if (conn->type != type)
4946 continue;
4947
4948 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4949 continue;
4950
4951 num++;
4952
8192edef 4953 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4954 struct sk_buff *skb;
4955
4956 if (chan->sent) {
4957 chan->sent = 0;
4958 continue;
4959 }
4960
4961 if (skb_queue_empty(&chan->data_q))
4962 continue;
4963
4964 skb = skb_peek(&chan->data_q);
4965 if (skb->priority >= HCI_PRIO_MAX - 1)
4966 continue;
4967
4968 skb->priority = HCI_PRIO_MAX - 1;
4969
4970 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4971 skb->priority);
02b20f0b
LAD
4972 }
4973
4974 if (hci_conn_num(hdev, type) == num)
4975 break;
4976 }
bf4c6325
GP
4977
4978 rcu_read_unlock();
4979
02b20f0b
LAD
4980}
4981
b71d385a
AE
4982static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4983{
4984 /* Calculate count of blocks used by this packet */
4985 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4986}
4987
6039aa73 4988static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4989{
4a964404 4990 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4991 /* ACL tx timeout must be longer than maximum
4992 * link supervision timeout (40.9 seconds) */
63d2bc1b 4993 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4994 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4995 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4996 }
63d2bc1b 4997}
1da177e4 4998
6039aa73 4999static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
5000{
5001 unsigned int cnt = hdev->acl_cnt;
5002 struct hci_chan *chan;
5003 struct sk_buff *skb;
5004 int quote;
5005
5006 __check_timeout(hdev, cnt);
04837f64 5007
73d80deb 5008 while (hdev->acl_cnt &&
a8c5fb1a 5009 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
5010 u32 priority = (skb_peek(&chan->data_q))->priority;
5011 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5012 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5013 skb->len, skb->priority);
73d80deb 5014
ec1cce24
LAD
5015 /* Stop if priority has changed */
5016 if (skb->priority < priority)
5017 break;
5018
5019 skb = skb_dequeue(&chan->data_q);
5020
73d80deb 5021 hci_conn_enter_active_mode(chan->conn,
04124681 5022 bt_cb(skb)->force_active);
04837f64 5023
57d17d70 5024 hci_send_frame(hdev, skb);
1da177e4
LT
5025 hdev->acl_last_tx = jiffies;
5026
5027 hdev->acl_cnt--;
73d80deb
LAD
5028 chan->sent++;
5029 chan->conn->sent++;
1da177e4
LT
5030 }
5031 }
02b20f0b
LAD
5032
5033 if (cnt != hdev->acl_cnt)
5034 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5035}
5036
6039aa73 5037static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5038{
63d2bc1b 5039 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5040 struct hci_chan *chan;
5041 struct sk_buff *skb;
5042 int quote;
bd1eb66b 5043 u8 type;
b71d385a 5044
63d2bc1b 5045 __check_timeout(hdev, cnt);
b71d385a 5046
bd1eb66b
AE
5047 BT_DBG("%s", hdev->name);
5048
5049 if (hdev->dev_type == HCI_AMP)
5050 type = AMP_LINK;
5051 else
5052 type = ACL_LINK;
5053
b71d385a 5054 while (hdev->block_cnt > 0 &&
bd1eb66b 5055 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5056 u32 priority = (skb_peek(&chan->data_q))->priority;
5057 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5058 int blocks;
5059
5060 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5061 skb->len, skb->priority);
b71d385a
AE
5062
5063 /* Stop if priority has changed */
5064 if (skb->priority < priority)
5065 break;
5066
5067 skb = skb_dequeue(&chan->data_q);
5068
5069 blocks = __get_blocks(hdev, skb);
5070 if (blocks > hdev->block_cnt)
5071 return;
5072
5073 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5074 bt_cb(skb)->force_active);
b71d385a 5075
57d17d70 5076 hci_send_frame(hdev, skb);
b71d385a
AE
5077 hdev->acl_last_tx = jiffies;
5078
5079 hdev->block_cnt -= blocks;
5080 quote -= blocks;
5081
5082 chan->sent += blocks;
5083 chan->conn->sent += blocks;
5084 }
5085 }
5086
5087 if (cnt != hdev->block_cnt)
bd1eb66b 5088 hci_prio_recalculate(hdev, type);
b71d385a
AE
5089}
5090
6039aa73 5091static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5092{
5093 BT_DBG("%s", hdev->name);
5094
bd1eb66b
AE
5095 /* No ACL link over BR/EDR controller */
5096 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5097 return;
5098
5099 /* No AMP link over AMP controller */
5100 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5101 return;
5102
5103 switch (hdev->flow_ctl_mode) {
5104 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5105 hci_sched_acl_pkt(hdev);
5106 break;
5107
5108 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5109 hci_sched_acl_blk(hdev);
5110 break;
5111 }
5112}
5113
1da177e4 5114/* Schedule SCO */
6039aa73 5115static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5116{
5117 struct hci_conn *conn;
5118 struct sk_buff *skb;
5119 int quote;
5120
5121 BT_DBG("%s", hdev->name);
5122
52087a79
LAD
5123 if (!hci_conn_num(hdev, SCO_LINK))
5124 return;
5125
1da177e4
LT
5126 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5127 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5128 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5129 hci_send_frame(hdev, skb);
1da177e4
LT
5130
5131 conn->sent++;
5132 if (conn->sent == ~0)
5133 conn->sent = 0;
5134 }
5135 }
5136}
5137
6039aa73 5138static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5139{
5140 struct hci_conn *conn;
5141 struct sk_buff *skb;
5142 int quote;
5143
5144 BT_DBG("%s", hdev->name);
5145
52087a79
LAD
5146 if (!hci_conn_num(hdev, ESCO_LINK))
5147 return;
5148
8fc9ced3
GP
5149 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5150 &quote))) {
b6a0dc82
MH
5151 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5152 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5153 hci_send_frame(hdev, skb);
b6a0dc82
MH
5154
5155 conn->sent++;
5156 if (conn->sent == ~0)
5157 conn->sent = 0;
5158 }
5159 }
5160}
5161
6039aa73 5162static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5163{
73d80deb 5164 struct hci_chan *chan;
6ed58ec5 5165 struct sk_buff *skb;
02b20f0b 5166 int quote, cnt, tmp;
6ed58ec5
VT
5167
5168 BT_DBG("%s", hdev->name);
5169
52087a79
LAD
5170 if (!hci_conn_num(hdev, LE_LINK))
5171 return;
5172
4a964404 5173 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5174 /* LE tx timeout must be longer than maximum
5175 * link supervision timeout (40.9 seconds) */
bae1f5d9 5176 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5177 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5178 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5179 }
5180
5181 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5182 tmp = cnt;
73d80deb 5183 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5184 u32 priority = (skb_peek(&chan->data_q))->priority;
5185 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5186 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5187 skb->len, skb->priority);
6ed58ec5 5188
ec1cce24
LAD
5189 /* Stop if priority has changed */
5190 if (skb->priority < priority)
5191 break;
5192
5193 skb = skb_dequeue(&chan->data_q);
5194
57d17d70 5195 hci_send_frame(hdev, skb);
6ed58ec5
VT
5196 hdev->le_last_tx = jiffies;
5197
5198 cnt--;
73d80deb
LAD
5199 chan->sent++;
5200 chan->conn->sent++;
6ed58ec5
VT
5201 }
5202 }
73d80deb 5203
6ed58ec5
VT
5204 if (hdev->le_pkts)
5205 hdev->le_cnt = cnt;
5206 else
5207 hdev->acl_cnt = cnt;
02b20f0b
LAD
5208
5209 if (cnt != tmp)
5210 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5211}
5212
3eff45ea 5213static void hci_tx_work(struct work_struct *work)
1da177e4 5214{
3eff45ea 5215 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5216 struct sk_buff *skb;
5217
6ed58ec5 5218 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5219 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5220
52de599e
MH
5221 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5222 /* Schedule queues and send stuff to HCI driver */
5223 hci_sched_acl(hdev);
5224 hci_sched_sco(hdev);
5225 hci_sched_esco(hdev);
5226 hci_sched_le(hdev);
5227 }
6ed58ec5 5228
1da177e4
LT
5229 /* Send next queued raw (unknown type) packet */
5230 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5231 hci_send_frame(hdev, skb);
1da177e4
LT
5232}
5233
25985edc 5234/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5235
5236/* ACL data packet */
6039aa73 5237static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5238{
5239 struct hci_acl_hdr *hdr = (void *) skb->data;
5240 struct hci_conn *conn;
5241 __u16 handle, flags;
5242
5243 skb_pull(skb, HCI_ACL_HDR_SIZE);
5244
5245 handle = __le16_to_cpu(hdr->handle);
5246 flags = hci_flags(handle);
5247 handle = hci_handle(handle);
5248
f0e09510 5249 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5250 handle, flags);
1da177e4
LT
5251
5252 hdev->stat.acl_rx++;
5253
5254 hci_dev_lock(hdev);
5255 conn = hci_conn_hash_lookup_handle(hdev, handle);
5256 hci_dev_unlock(hdev);
8e87d142 5257
1da177e4 5258 if (conn) {
65983fc7 5259 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5260
1da177e4 5261 /* Send to upper protocol */
686ebf28
UF
5262 l2cap_recv_acldata(conn, skb, flags);
5263 return;
1da177e4 5264 } else {
8e87d142 5265 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5266 hdev->name, handle);
1da177e4
LT
5267 }
5268
5269 kfree_skb(skb);
5270}
5271
5272/* SCO data packet */
6039aa73 5273static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5274{
5275 struct hci_sco_hdr *hdr = (void *) skb->data;
5276 struct hci_conn *conn;
5277 __u16 handle;
5278
5279 skb_pull(skb, HCI_SCO_HDR_SIZE);
5280
5281 handle = __le16_to_cpu(hdr->handle);
5282
f0e09510 5283 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5284
5285 hdev->stat.sco_rx++;
5286
5287 hci_dev_lock(hdev);
5288 conn = hci_conn_hash_lookup_handle(hdev, handle);
5289 hci_dev_unlock(hdev);
5290
5291 if (conn) {
1da177e4 5292 /* Send to upper protocol */
686ebf28
UF
5293 sco_recv_scodata(conn, skb);
5294 return;
1da177e4 5295 } else {
8e87d142 5296 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5297 hdev->name, handle);
1da177e4
LT
5298 }
5299
5300 kfree_skb(skb);
5301}
5302
9238f36a
JH
5303static bool hci_req_is_complete(struct hci_dev *hdev)
5304{
5305 struct sk_buff *skb;
5306
5307 skb = skb_peek(&hdev->cmd_q);
5308 if (!skb)
5309 return true;
5310
5311 return bt_cb(skb)->req.start;
5312}
5313
42c6b129
JH
5314static void hci_resend_last(struct hci_dev *hdev)
5315{
5316 struct hci_command_hdr *sent;
5317 struct sk_buff *skb;
5318 u16 opcode;
5319
5320 if (!hdev->sent_cmd)
5321 return;
5322
5323 sent = (void *) hdev->sent_cmd->data;
5324 opcode = __le16_to_cpu(sent->opcode);
5325 if (opcode == HCI_OP_RESET)
5326 return;
5327
5328 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5329 if (!skb)
5330 return;
5331
5332 skb_queue_head(&hdev->cmd_q, skb);
5333 queue_work(hdev->workqueue, &hdev->cmd_work);
5334}
5335
9238f36a
JH
5336void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5337{
5338 hci_req_complete_t req_complete = NULL;
5339 struct sk_buff *skb;
5340 unsigned long flags;
5341
5342 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5343
42c6b129
JH
5344 /* If the completed command doesn't match the last one that was
5345 * sent we need to do special handling of it.
9238f36a 5346 */
42c6b129
JH
5347 if (!hci_sent_cmd_data(hdev, opcode)) {
5348 /* Some CSR based controllers generate a spontaneous
5349 * reset complete event during init and any pending
5350 * command will never be completed. In such a case we
5351 * need to resend whatever was the last sent
5352 * command.
5353 */
5354 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5355 hci_resend_last(hdev);
5356
9238f36a 5357 return;
42c6b129 5358 }
9238f36a
JH
5359
5360 /* If the command succeeded and there's still more commands in
5361 * this request the request is not yet complete.
5362 */
5363 if (!status && !hci_req_is_complete(hdev))
5364 return;
5365
5366 /* If this was the last command in a request the complete
5367 * callback would be found in hdev->sent_cmd instead of the
5368 * command queue (hdev->cmd_q).
5369 */
5370 if (hdev->sent_cmd) {
5371 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5372
5373 if (req_complete) {
5374 /* We must set the complete callback to NULL to
5375 * avoid calling the callback more than once if
5376 * this function gets called again.
5377 */
5378 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5379
9238f36a 5380 goto call_complete;
53e21fbc 5381 }
9238f36a
JH
5382 }
5383
5384 /* Remove all pending commands belonging to this request */
5385 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5386 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5387 if (bt_cb(skb)->req.start) {
5388 __skb_queue_head(&hdev->cmd_q, skb);
5389 break;
5390 }
5391
5392 req_complete = bt_cb(skb)->req.complete;
5393 kfree_skb(skb);
5394 }
5395 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5396
5397call_complete:
5398 if (req_complete)
5399 req_complete(hdev, status);
5400}
5401
b78752cc 5402static void hci_rx_work(struct work_struct *work)
1da177e4 5403{
b78752cc 5404 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5405 struct sk_buff *skb;
5406
5407 BT_DBG("%s", hdev->name);
5408
1da177e4 5409 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5410 /* Send copy to monitor */
5411 hci_send_to_monitor(hdev, skb);
5412
1da177e4
LT
5413 if (atomic_read(&hdev->promisc)) {
5414 /* Send copy to the sockets */
470fe1b5 5415 hci_send_to_sock(hdev, skb);
1da177e4
LT
5416 }
5417
fee746b0 5418 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5419 kfree_skb(skb);
5420 continue;
5421 }
5422
5423 if (test_bit(HCI_INIT, &hdev->flags)) {
5424 /* Don't process data packets in this states. */
0d48d939 5425 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5426 case HCI_ACLDATA_PKT:
5427 case HCI_SCODATA_PKT:
5428 kfree_skb(skb);
5429 continue;
3ff50b79 5430 }
1da177e4
LT
5431 }
5432
5433 /* Process frame */
0d48d939 5434 switch (bt_cb(skb)->pkt_type) {
1da177e4 5435 case HCI_EVENT_PKT:
b78752cc 5436 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5437 hci_event_packet(hdev, skb);
5438 break;
5439
5440 case HCI_ACLDATA_PKT:
5441 BT_DBG("%s ACL data packet", hdev->name);
5442 hci_acldata_packet(hdev, skb);
5443 break;
5444
5445 case HCI_SCODATA_PKT:
5446 BT_DBG("%s SCO data packet", hdev->name);
5447 hci_scodata_packet(hdev, skb);
5448 break;
5449
5450 default:
5451 kfree_skb(skb);
5452 break;
5453 }
5454 }
1da177e4
LT
5455}
5456
c347b765 5457static void hci_cmd_work(struct work_struct *work)
1da177e4 5458{
c347b765 5459 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5460 struct sk_buff *skb;
5461
2104786b
AE
5462 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5463 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5464
1da177e4 5465 /* Send queued commands */
5a08ecce
AE
5466 if (atomic_read(&hdev->cmd_cnt)) {
5467 skb = skb_dequeue(&hdev->cmd_q);
5468 if (!skb)
5469 return;
5470
7585b97a 5471 kfree_skb(hdev->sent_cmd);
1da177e4 5472
a675d7f1 5473 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5474 if (hdev->sent_cmd) {
1da177e4 5475 atomic_dec(&hdev->cmd_cnt);
57d17d70 5476 hci_send_frame(hdev, skb);
7bdb8a5c 5477 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5478 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5479 else
65cc2b49
MH
5480 schedule_delayed_work(&hdev->cmd_timer,
5481 HCI_CMD_TIMEOUT);
1da177e4
LT
5482 } else {
5483 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5484 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5485 }
5486 }
5487}
b1efcc28
AG
5488
5489void hci_req_add_le_scan_disable(struct hci_request *req)
5490{
5491 struct hci_cp_le_set_scan_enable cp;
5492
5493 memset(&cp, 0, sizeof(cp));
5494 cp.enable = LE_SCAN_DISABLE;
5495 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5496}
a4790dbd 5497
8540f6c0
MH
5498static void add_to_white_list(struct hci_request *req,
5499 struct hci_conn_params *params)
5500{
5501 struct hci_cp_le_add_to_white_list cp;
5502
5503 cp.bdaddr_type = params->addr_type;
5504 bacpy(&cp.bdaddr, &params->addr);
5505
5506 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5507}
5508
5509static u8 update_white_list(struct hci_request *req)
5510{
5511 struct hci_dev *hdev = req->hdev;
5512 struct hci_conn_params *params;
5513 struct bdaddr_list *b;
5514 uint8_t white_list_entries = 0;
5515
5516 /* Go through the current white list programmed into the
5517 * controller one by one and check if that address is still
5518 * in the list of pending connections or list of devices to
5519 * report. If not present in either list, then queue the
5520 * command to remove it from the controller.
5521 */
5522 list_for_each_entry(b, &hdev->le_white_list, list) {
5523 struct hci_cp_le_del_from_white_list cp;
5524
5525 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5526 &b->bdaddr, b->bdaddr_type) ||
5527 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5528 &b->bdaddr, b->bdaddr_type)) {
5529 white_list_entries++;
5530 continue;
5531 }
5532
5533 cp.bdaddr_type = b->bdaddr_type;
5534 bacpy(&cp.bdaddr, &b->bdaddr);
5535
5536 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5537 sizeof(cp), &cp);
5538 }
5539
5540 /* Since all no longer valid white list entries have been
5541 * removed, walk through the list of pending connections
5542 * and ensure that any new device gets programmed into
5543 * the controller.
5544 *
5545 * If the list of the devices is larger than the list of
5546 * available white list entries in the controller, then
5547 * just abort and return filer policy value to not use the
5548 * white list.
5549 */
5550 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5551 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5552 &params->addr, params->addr_type))
5553 continue;
5554
5555 if (white_list_entries >= hdev->le_white_list_size) {
5556 /* Select filter policy to accept all advertising */
5557 return 0x00;
5558 }
5559
66d8e837
MH
5560 if (hci_find_irk_by_addr(hdev, &params->addr,
5561 params->addr_type)) {
5562 /* White list can not be used with RPAs */
5563 return 0x00;
5564 }
5565
8540f6c0
MH
5566 white_list_entries++;
5567 add_to_white_list(req, params);
5568 }
5569
5570 /* After adding all new pending connections, walk through
5571 * the list of pending reports and also add these to the
5572 * white list if there is still space.
5573 */
5574 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5575 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5576 &params->addr, params->addr_type))
5577 continue;
5578
5579 if (white_list_entries >= hdev->le_white_list_size) {
5580 /* Select filter policy to accept all advertising */
5581 return 0x00;
5582 }
5583
66d8e837
MH
5584 if (hci_find_irk_by_addr(hdev, &params->addr,
5585 params->addr_type)) {
5586 /* White list can not be used with RPAs */
5587 return 0x00;
5588 }
5589
8540f6c0
MH
5590 white_list_entries++;
5591 add_to_white_list(req, params);
5592 }
5593
5594 /* Select filter policy to use white list */
5595 return 0x01;
5596}
5597
8ef30fd3
AG
5598void hci_req_add_le_passive_scan(struct hci_request *req)
5599{
5600 struct hci_cp_le_set_scan_param param_cp;
5601 struct hci_cp_le_set_scan_enable enable_cp;
5602 struct hci_dev *hdev = req->hdev;
5603 u8 own_addr_type;
8540f6c0 5604 u8 filter_policy;
8ef30fd3 5605
6ab535a7
MH
5606 /* Set require_privacy to false since no SCAN_REQ are send
5607 * during passive scanning. Not using an unresolvable address
5608 * here is important so that peer devices using direct
5609 * advertising with our address will be correctly reported
5610 * by the controller.
8ef30fd3 5611 */
6ab535a7 5612 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5613 return;
5614
8540f6c0
MH
5615 /* Adding or removing entries from the white list must
5616 * happen before enabling scanning. The controller does
5617 * not allow white list modification while scanning.
5618 */
5619 filter_policy = update_white_list(req);
5620
8ef30fd3
AG
5621 memset(&param_cp, 0, sizeof(param_cp));
5622 param_cp.type = LE_SCAN_PASSIVE;
5623 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5624 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5625 param_cp.own_address_type = own_addr_type;
8540f6c0 5626 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5627 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5628 &param_cp);
5629
5630 memset(&enable_cp, 0, sizeof(enable_cp));
5631 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5632 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5633 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5634 &enable_cp);
5635}
5636
a4790dbd
AG
5637static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5638{
5639 if (status)
5640 BT_DBG("HCI request failed to update background scanning: "
5641 "status 0x%2.2x", status);
5642}
5643
5644/* This function controls the background scanning based on hdev->pend_le_conns
5645 * list. If there are pending LE connection we start the background scanning,
5646 * otherwise we stop it.
5647 *
5648 * This function requires the caller holds hdev->lock.
5649 */
5650void hci_update_background_scan(struct hci_dev *hdev)
5651{
a4790dbd
AG
5652 struct hci_request req;
5653 struct hci_conn *conn;
5654 int err;
5655
c20c02d5
MH
5656 if (!test_bit(HCI_UP, &hdev->flags) ||
5657 test_bit(HCI_INIT, &hdev->flags) ||
5658 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5659 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5660 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5661 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5662 return;
5663
a70f4b5f
JH
5664 /* No point in doing scanning if LE support hasn't been enabled */
5665 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5666 return;
5667
ae23ada4
JH
5668 /* If discovery is active don't interfere with it */
5669 if (hdev->discovery.state != DISCOVERY_STOPPED)
5670 return;
5671
a4790dbd
AG
5672 hci_req_init(&req, hdev);
5673
d1d588c1 5674 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5675 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5676 /* If there is no pending LE connections or devices
5677 * to be scanned for, we should stop the background
5678 * scanning.
a4790dbd
AG
5679 */
5680
5681 /* If controller is not scanning we are done. */
5682 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5683 return;
5684
5685 hci_req_add_le_scan_disable(&req);
5686
5687 BT_DBG("%s stopping background scanning", hdev->name);
5688 } else {
a4790dbd
AG
5689 /* If there is at least one pending LE connection, we should
5690 * keep the background scan running.
5691 */
5692
a4790dbd
AG
5693 /* If controller is connecting, we should not start scanning
5694 * since some controllers are not able to scan and connect at
5695 * the same time.
5696 */
5697 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5698 if (conn)
5699 return;
5700
4340a124
AG
5701 /* If controller is currently scanning, we stop it to ensure we
5702 * don't miss any advertising (due to duplicates filter).
5703 */
5704 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5705 hci_req_add_le_scan_disable(&req);
5706
8ef30fd3 5707 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5708
5709 BT_DBG("%s starting background scanning", hdev->name);
5710 }
5711
5712 err = hci_req_run(&req, update_background_scan_complete);
5713 if (err)
5714 BT_ERR("Failed to run HCI request: err %d", err);
5715}
432df05e 5716
22f433dc
JH
5717static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5718{
5719 struct bdaddr_list *b;
5720
5721 list_for_each_entry(b, &hdev->whitelist, list) {
5722 struct hci_conn *conn;
5723
5724 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5725 if (!conn)
5726 return true;
5727
5728 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5729 return true;
5730 }
5731
5732 return false;
5733}
5734
432df05e
JH
5735void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5736{
5737 u8 scan;
5738
5739 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5740 return;
5741
5742 if (!hdev_is_powered(hdev))
5743 return;
5744
5745 if (mgmt_powering_down(hdev))
5746 return;
5747
5748 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5749 disconnected_whitelist_entries(hdev))
432df05e
JH
5750 scan = SCAN_PAGE;
5751 else
5752 scan = SCAN_DISABLED;
5753
5754 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5755 return;
5756
5757 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5758 scan |= SCAN_INQUIRY;
5759
5760 if (req)
5761 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5762 else
5763 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5764}