]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
ieee802154: iface: move multiple node type check
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
970c4e46
JH
41#include "smp.h"
42
b78752cc 43static void hci_rx_work(struct work_struct *work);
c347b765 44static void hci_cmd_work(struct work_struct *work);
3eff45ea 45static void hci_tx_work(struct work_struct *work);
1da177e4 46
1da177e4
LT
47/* HCI device list */
48LIST_HEAD(hci_dev_list);
49DEFINE_RWLOCK(hci_dev_list_lock);
50
51/* HCI callback list */
52LIST_HEAD(hci_cb_list);
53DEFINE_RWLOCK(hci_cb_list_lock);
54
3df92b31
SL
55/* HCI ID Numbering */
56static DEFINE_IDA(hci_index_ida);
57
899de765
MH
58/* ----- HCI requests ----- */
59
60#define HCI_REQ_DONE 0
61#define HCI_REQ_PEND 1
62#define HCI_REQ_CANCELED 2
63
64#define hci_req_lock(d) mutex_lock(&d->req_lock)
65#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
66
1da177e4
LT
67/* ---- HCI notifications ---- */
68
6516455d 69static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 70{
040030ef 71 hci_sock_dev_event(hdev, event);
1da177e4
LT
72}
73
baf27f6e
MH
74/* ---- HCI debugfs entries ---- */
75
4b4148e9
MH
76static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 char buf[3];
81
111902f7 82 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
83 buf[1] = '\n';
84 buf[2] = '\0';
85 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
86}
87
88static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
89 size_t count, loff_t *ppos)
90{
91 struct hci_dev *hdev = file->private_data;
92 struct sk_buff *skb;
93 char buf[32];
94 size_t buf_size = min(count, (sizeof(buf)-1));
95 bool enable;
96 int err;
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
111902f7 108 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
123 err = -bt_to_errno(skb->data[0]);
124 kfree_skb(skb);
125
126 if (err < 0)
127 return err;
128
111902f7 129 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
130
131 return count;
132}
133
134static const struct file_operations dut_mode_fops = {
135 .open = simple_open,
136 .read = dut_mode_read,
137 .write = dut_mode_write,
138 .llseek = default_llseek,
139};
140
dfb826a8
MH
141static int features_show(struct seq_file *f, void *ptr)
142{
143 struct hci_dev *hdev = f->private;
144 u8 p;
145
146 hci_dev_lock(hdev);
147 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 148 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
149 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
150 hdev->features[p][0], hdev->features[p][1],
151 hdev->features[p][2], hdev->features[p][3],
152 hdev->features[p][4], hdev->features[p][5],
153 hdev->features[p][6], hdev->features[p][7]);
154 }
cfbb2b5b
MH
155 if (lmp_le_capable(hdev))
156 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
157 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
158 hdev->le_features[0], hdev->le_features[1],
159 hdev->le_features[2], hdev->le_features[3],
160 hdev->le_features[4], hdev->le_features[5],
161 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
162 hci_dev_unlock(hdev);
163
164 return 0;
165}
166
167static int features_open(struct inode *inode, struct file *file)
168{
169 return single_open(file, features_show, inode->i_private);
170}
171
172static const struct file_operations features_fops = {
173 .open = features_open,
174 .read = seq_read,
175 .llseek = seq_lseek,
176 .release = single_release,
177};
178
70afe0b8
MH
179static int blacklist_show(struct seq_file *f, void *p)
180{
181 struct hci_dev *hdev = f->private;
182 struct bdaddr_list *b;
183
184 hci_dev_lock(hdev);
185 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 186 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
187 hci_dev_unlock(hdev);
188
189 return 0;
190}
191
192static int blacklist_open(struct inode *inode, struct file *file)
193{
194 return single_open(file, blacklist_show, inode->i_private);
195}
196
197static const struct file_operations blacklist_fops = {
198 .open = blacklist_open,
199 .read = seq_read,
200 .llseek = seq_lseek,
201 .release = single_release,
202};
203
47219839
MH
204static int uuids_show(struct seq_file *f, void *p)
205{
206 struct hci_dev *hdev = f->private;
207 struct bt_uuid *uuid;
208
209 hci_dev_lock(hdev);
210 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
211 u8 i, val[16];
212
213 /* The Bluetooth UUID values are stored in big endian,
214 * but with reversed byte order. So convert them into
215 * the right order for the %pUb modifier.
216 */
217 for (i = 0; i < 16; i++)
218 val[i] = uuid->uuid[15 - i];
219
220 seq_printf(f, "%pUb\n", val);
47219839
MH
221 }
222 hci_dev_unlock(hdev);
223
224 return 0;
225}
226
227static int uuids_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, uuids_show, inode->i_private);
230}
231
232static const struct file_operations uuids_fops = {
233 .open = uuids_open,
234 .read = seq_read,
235 .llseek = seq_lseek,
236 .release = single_release,
237};
238
baf27f6e
MH
239static int inquiry_cache_show(struct seq_file *f, void *p)
240{
241 struct hci_dev *hdev = f->private;
242 struct discovery_state *cache = &hdev->discovery;
243 struct inquiry_entry *e;
244
245 hci_dev_lock(hdev);
246
247 list_for_each_entry(e, &cache->all, all) {
248 struct inquiry_data *data = &e->data;
249 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
250 &data->bdaddr,
251 data->pscan_rep_mode, data->pscan_period_mode,
252 data->pscan_mode, data->dev_class[2],
253 data->dev_class[1], data->dev_class[0],
254 __le16_to_cpu(data->clock_offset),
255 data->rssi, data->ssp_mode, e->timestamp);
256 }
257
258 hci_dev_unlock(hdev);
259
260 return 0;
261}
262
263static int inquiry_cache_open(struct inode *inode, struct file *file)
264{
265 return single_open(file, inquiry_cache_show, inode->i_private);
266}
267
268static const struct file_operations inquiry_cache_fops = {
269 .open = inquiry_cache_open,
270 .read = seq_read,
271 .llseek = seq_lseek,
272 .release = single_release,
273};
274
02d08d15
MH
275static int link_keys_show(struct seq_file *f, void *ptr)
276{
277 struct hci_dev *hdev = f->private;
0378b597 278 struct link_key *key;
02d08d15 279
0378b597
JH
280 rcu_read_lock();
281 list_for_each_entry_rcu(key, &hdev->link_keys, list)
02d08d15
MH
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
0378b597 284 rcu_read_unlock();
02d08d15
MH
285
286 return 0;
287}
288
289static int link_keys_open(struct inode *inode, struct file *file)
290{
291 return single_open(file, link_keys_show, inode->i_private);
292}
293
294static const struct file_operations link_keys_fops = {
295 .open = link_keys_open,
296 .read = seq_read,
297 .llseek = seq_lseek,
298 .release = single_release,
299};
300
babdbb3c
MH
301static int dev_class_show(struct seq_file *f, void *ptr)
302{
303 struct hci_dev *hdev = f->private;
304
305 hci_dev_lock(hdev);
306 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
307 hdev->dev_class[1], hdev->dev_class[0]);
308 hci_dev_unlock(hdev);
309
310 return 0;
311}
312
313static int dev_class_open(struct inode *inode, struct file *file)
314{
315 return single_open(file, dev_class_show, inode->i_private);
316}
317
318static const struct file_operations dev_class_fops = {
319 .open = dev_class_open,
320 .read = seq_read,
321 .llseek = seq_lseek,
322 .release = single_release,
323};
324
041000b9
MH
325static int voice_setting_get(void *data, u64 *val)
326{
327 struct hci_dev *hdev = data;
328
329 hci_dev_lock(hdev);
330 *val = hdev->voice_setting;
331 hci_dev_unlock(hdev);
332
333 return 0;
334}
335
336DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
337 NULL, "0x%4.4llx\n");
338
ebd1e33b
MH
339static int auto_accept_delay_set(void *data, u64 val)
340{
341 struct hci_dev *hdev = data;
342
343 hci_dev_lock(hdev);
344 hdev->auto_accept_delay = val;
345 hci_dev_unlock(hdev);
346
347 return 0;
348}
349
350static int auto_accept_delay_get(void *data, u64 *val)
351{
352 struct hci_dev *hdev = data;
353
354 hci_dev_lock(hdev);
355 *val = hdev->auto_accept_delay;
356 hci_dev_unlock(hdev);
357
358 return 0;
359}
360
361DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
362 auto_accept_delay_set, "%llu\n");
363
5afeac14
MH
364static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
365 size_t count, loff_t *ppos)
366{
367 struct hci_dev *hdev = file->private_data;
368 char buf[3];
369
111902f7 370 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
371 buf[1] = '\n';
372 buf[2] = '\0';
373 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
374}
375
376static ssize_t force_sc_support_write(struct file *file,
377 const char __user *user_buf,
378 size_t count, loff_t *ppos)
379{
380 struct hci_dev *hdev = file->private_data;
381 char buf[32];
382 size_t buf_size = min(count, (sizeof(buf)-1));
383 bool enable;
384
385 if (test_bit(HCI_UP, &hdev->flags))
386 return -EBUSY;
387
388 if (copy_from_user(buf, user_buf, buf_size))
389 return -EFAULT;
390
391 buf[buf_size] = '\0';
392 if (strtobool(buf, &enable))
393 return -EINVAL;
394
111902f7 395 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
396 return -EALREADY;
397
111902f7 398 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
399
400 return count;
401}
402
403static const struct file_operations force_sc_support_fops = {
404 .open = simple_open,
405 .read = force_sc_support_read,
406 .write = force_sc_support_write,
407 .llseek = default_llseek,
408};
409
858cdc78
JH
410static ssize_t force_lesc_support_read(struct file *file, char __user *user_buf,
411 size_t count, loff_t *ppos)
412{
413 struct hci_dev *hdev = file->private_data;
414 char buf[3];
415
416 buf[0] = test_bit(HCI_FORCE_LESC, &hdev->dbg_flags) ? 'Y': 'N';
417 buf[1] = '\n';
418 buf[2] = '\0';
419 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
420}
421
422static ssize_t force_lesc_support_write(struct file *file,
423 const char __user *user_buf,
424 size_t count, loff_t *ppos)
425{
426 struct hci_dev *hdev = file->private_data;
427 char buf[32];
428 size_t buf_size = min(count, (sizeof(buf)-1));
429 bool enable;
430
431 if (copy_from_user(buf, user_buf, buf_size))
432 return -EFAULT;
433
434 buf[buf_size] = '\0';
435 if (strtobool(buf, &enable))
436 return -EINVAL;
437
438 if (enable == test_bit(HCI_FORCE_LESC, &hdev->dbg_flags))
439 return -EALREADY;
440
441 change_bit(HCI_FORCE_LESC, &hdev->dbg_flags);
442
443 return count;
444}
445
446static const struct file_operations force_lesc_support_fops = {
447 .open = simple_open,
448 .read = force_lesc_support_read,
449 .write = force_lesc_support_write,
450 .llseek = default_llseek,
451};
452
134c2a89
MH
453static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
454 size_t count, loff_t *ppos)
455{
456 struct hci_dev *hdev = file->private_data;
457 char buf[3];
458
459 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
460 buf[1] = '\n';
461 buf[2] = '\0';
462 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
463}
464
465static const struct file_operations sc_only_mode_fops = {
466 .open = simple_open,
467 .read = sc_only_mode_read,
468 .llseek = default_llseek,
469};
470
2bfa3531
MH
471static int idle_timeout_set(void *data, u64 val)
472{
473 struct hci_dev *hdev = data;
474
475 if (val != 0 && (val < 500 || val > 3600000))
476 return -EINVAL;
477
478 hci_dev_lock(hdev);
2be48b65 479 hdev->idle_timeout = val;
2bfa3531
MH
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485static int idle_timeout_get(void *data, u64 *val)
486{
487 struct hci_dev *hdev = data;
488
489 hci_dev_lock(hdev);
490 *val = hdev->idle_timeout;
491 hci_dev_unlock(hdev);
492
493 return 0;
494}
495
496DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
497 idle_timeout_set, "%llu\n");
498
c982b2ea
JH
499static int rpa_timeout_set(void *data, u64 val)
500{
501 struct hci_dev *hdev = data;
502
503 /* Require the RPA timeout to be at least 30 seconds and at most
504 * 24 hours.
505 */
506 if (val < 30 || val > (60 * 60 * 24))
507 return -EINVAL;
508
509 hci_dev_lock(hdev);
510 hdev->rpa_timeout = val;
511 hci_dev_unlock(hdev);
512
513 return 0;
514}
515
516static int rpa_timeout_get(void *data, u64 *val)
517{
518 struct hci_dev *hdev = data;
519
520 hci_dev_lock(hdev);
521 *val = hdev->rpa_timeout;
522 hci_dev_unlock(hdev);
523
524 return 0;
525}
526
527DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
528 rpa_timeout_set, "%llu\n");
529
2bfa3531
MH
530static int sniff_min_interval_set(void *data, u64 val)
531{
532 struct hci_dev *hdev = data;
533
534 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
535 return -EINVAL;
536
537 hci_dev_lock(hdev);
2be48b65 538 hdev->sniff_min_interval = val;
2bfa3531
MH
539 hci_dev_unlock(hdev);
540
541 return 0;
542}
543
544static int sniff_min_interval_get(void *data, u64 *val)
545{
546 struct hci_dev *hdev = data;
547
548 hci_dev_lock(hdev);
549 *val = hdev->sniff_min_interval;
550 hci_dev_unlock(hdev);
551
552 return 0;
553}
554
555DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
556 sniff_min_interval_set, "%llu\n");
557
558static int sniff_max_interval_set(void *data, u64 val)
559{
560 struct hci_dev *hdev = data;
561
562 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
563 return -EINVAL;
564
565 hci_dev_lock(hdev);
2be48b65 566 hdev->sniff_max_interval = val;
2bfa3531
MH
567 hci_dev_unlock(hdev);
568
569 return 0;
570}
571
572static int sniff_max_interval_get(void *data, u64 *val)
573{
574 struct hci_dev *hdev = data;
575
576 hci_dev_lock(hdev);
577 *val = hdev->sniff_max_interval;
578 hci_dev_unlock(hdev);
579
580 return 0;
581}
582
583DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
584 sniff_max_interval_set, "%llu\n");
585
31ad1691
AK
586static int conn_info_min_age_set(void *data, u64 val)
587{
588 struct hci_dev *hdev = data;
589
590 if (val == 0 || val > hdev->conn_info_max_age)
591 return -EINVAL;
592
593 hci_dev_lock(hdev);
594 hdev->conn_info_min_age = val;
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int conn_info_min_age_get(void *data, u64 *val)
601{
602 struct hci_dev *hdev = data;
603
604 hci_dev_lock(hdev);
605 *val = hdev->conn_info_min_age;
606 hci_dev_unlock(hdev);
607
608 return 0;
609}
610
611DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
612 conn_info_min_age_set, "%llu\n");
613
614static int conn_info_max_age_set(void *data, u64 val)
615{
616 struct hci_dev *hdev = data;
617
618 if (val == 0 || val < hdev->conn_info_min_age)
619 return -EINVAL;
620
621 hci_dev_lock(hdev);
622 hdev->conn_info_max_age = val;
623 hci_dev_unlock(hdev);
624
625 return 0;
626}
627
628static int conn_info_max_age_get(void *data, u64 *val)
629{
630 struct hci_dev *hdev = data;
631
632 hci_dev_lock(hdev);
633 *val = hdev->conn_info_max_age;
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
640 conn_info_max_age_set, "%llu\n");
641
ac345813
MH
642static int identity_show(struct seq_file *f, void *p)
643{
644 struct hci_dev *hdev = f->private;
a1f4c318 645 bdaddr_t addr;
ac345813
MH
646 u8 addr_type;
647
648 hci_dev_lock(hdev);
649
a1f4c318 650 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 651
a1f4c318 652 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 653 16, hdev->irk, &hdev->rpa);
ac345813
MH
654
655 hci_dev_unlock(hdev);
656
657 return 0;
658}
659
660static int identity_open(struct inode *inode, struct file *file)
661{
662 return single_open(file, identity_show, inode->i_private);
663}
664
665static const struct file_operations identity_fops = {
666 .open = identity_open,
667 .read = seq_read,
668 .llseek = seq_lseek,
669 .release = single_release,
670};
671
7a4cd51d
MH
672static int random_address_show(struct seq_file *f, void *p)
673{
674 struct hci_dev *hdev = f->private;
675
676 hci_dev_lock(hdev);
677 seq_printf(f, "%pMR\n", &hdev->random_addr);
678 hci_dev_unlock(hdev);
679
680 return 0;
681}
682
683static int random_address_open(struct inode *inode, struct file *file)
684{
685 return single_open(file, random_address_show, inode->i_private);
686}
687
688static const struct file_operations random_address_fops = {
689 .open = random_address_open,
690 .read = seq_read,
691 .llseek = seq_lseek,
692 .release = single_release,
693};
694
e7b8fc92
MH
695static int static_address_show(struct seq_file *f, void *p)
696{
697 struct hci_dev *hdev = f->private;
698
699 hci_dev_lock(hdev);
700 seq_printf(f, "%pMR\n", &hdev->static_addr);
701 hci_dev_unlock(hdev);
702
703 return 0;
704}
705
706static int static_address_open(struct inode *inode, struct file *file)
707{
708 return single_open(file, static_address_show, inode->i_private);
709}
710
711static const struct file_operations static_address_fops = {
712 .open = static_address_open,
713 .read = seq_read,
714 .llseek = seq_lseek,
715 .release = single_release,
716};
717
b32bba6c
MH
718static ssize_t force_static_address_read(struct file *file,
719 char __user *user_buf,
720 size_t count, loff_t *ppos)
92202185 721{
b32bba6c
MH
722 struct hci_dev *hdev = file->private_data;
723 char buf[3];
92202185 724
111902f7 725 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
726 buf[1] = '\n';
727 buf[2] = '\0';
728 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
729}
730
b32bba6c
MH
731static ssize_t force_static_address_write(struct file *file,
732 const char __user *user_buf,
733 size_t count, loff_t *ppos)
92202185 734{
b32bba6c
MH
735 struct hci_dev *hdev = file->private_data;
736 char buf[32];
737 size_t buf_size = min(count, (sizeof(buf)-1));
738 bool enable;
92202185 739
b32bba6c
MH
740 if (test_bit(HCI_UP, &hdev->flags))
741 return -EBUSY;
92202185 742
b32bba6c
MH
743 if (copy_from_user(buf, user_buf, buf_size))
744 return -EFAULT;
745
746 buf[buf_size] = '\0';
747 if (strtobool(buf, &enable))
748 return -EINVAL;
749
111902f7 750 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
751 return -EALREADY;
752
111902f7 753 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
754
755 return count;
92202185
MH
756}
757
b32bba6c
MH
758static const struct file_operations force_static_address_fops = {
759 .open = simple_open,
760 .read = force_static_address_read,
761 .write = force_static_address_write,
762 .llseek = default_llseek,
763};
92202185 764
d2ab0ac1
MH
765static int white_list_show(struct seq_file *f, void *ptr)
766{
767 struct hci_dev *hdev = f->private;
768 struct bdaddr_list *b;
769
770 hci_dev_lock(hdev);
771 list_for_each_entry(b, &hdev->le_white_list, list)
772 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int white_list_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, white_list_show, inode->i_private);
781}
782
783static const struct file_operations white_list_fops = {
784 .open = white_list_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
3698d704
MH
790static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
791{
792 struct hci_dev *hdev = f->private;
adae20cb 793 struct smp_irk *irk;
3698d704 794
adae20cb
JH
795 rcu_read_lock();
796 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3698d704
MH
797 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
798 &irk->bdaddr, irk->addr_type,
799 16, irk->val, &irk->rpa);
800 }
adae20cb 801 rcu_read_unlock();
3698d704
MH
802
803 return 0;
804}
805
806static int identity_resolving_keys_open(struct inode *inode, struct file *file)
807{
808 return single_open(file, identity_resolving_keys_show,
809 inode->i_private);
810}
811
812static const struct file_operations identity_resolving_keys_fops = {
813 .open = identity_resolving_keys_open,
814 .read = seq_read,
815 .llseek = seq_lseek,
816 .release = single_release,
817};
818
8f8625cd
MH
819static int long_term_keys_show(struct seq_file *f, void *ptr)
820{
821 struct hci_dev *hdev = f->private;
970d0f1b 822 struct smp_ltk *ltk;
8f8625cd 823
970d0f1b
JH
824 rcu_read_lock();
825 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 826 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
827 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
828 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 829 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 830 rcu_read_unlock();
8f8625cd
MH
831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
4e70c7e7
MH
847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
2be48b65 855 hdev->le_conn_min_interval = val;
4e70c7e7
MH
856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
2be48b65 883 hdev->le_conn_max_interval = val;
4e70c7e7
MH
884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
816a93d1 903static int conn_latency_set(void *data, u64 val)
3f959d46
MH
904{
905 struct hci_dev *hdev = data;
906
816a93d1 907 if (val > 0x01f3)
3f959d46
MH
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
816a93d1 911 hdev->le_conn_latency = val;
3f959d46
MH
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
816a93d1 917static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
816a93d1 922 *val = hdev->le_conn_latency;
3f959d46
MH
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
816a93d1
MH
928DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
929 conn_latency_set, "%llu\n");
3f959d46 930
f1649577 931static int supervision_timeout_set(void *data, u64 val)
89863109 932{
f1649577 933 struct hci_dev *hdev = data;
89863109 934
f1649577
MH
935 if (val < 0x000a || val > 0x0c80)
936 return -EINVAL;
937
938 hci_dev_lock(hdev);
939 hdev->le_supv_timeout = val;
940 hci_dev_unlock(hdev);
941
942 return 0;
89863109
JR
943}
944
f1649577 945static int supervision_timeout_get(void *data, u64 *val)
89863109 946{
f1649577 947 struct hci_dev *hdev = data;
89863109 948
f1649577
MH
949 hci_dev_lock(hdev);
950 *val = hdev->le_supv_timeout;
951 hci_dev_unlock(hdev);
89863109 952
f1649577
MH
953 return 0;
954}
89863109 955
f1649577
MH
956DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
957 supervision_timeout_set, "%llu\n");
89863109 958
3f959d46
MH
959static int adv_channel_map_set(void *data, u64 val)
960{
961 struct hci_dev *hdev = data;
89863109 962
3f959d46
MH
963 if (val < 0x01 || val > 0x07)
964 return -EINVAL;
89863109 965
3f959d46
MH
966 hci_dev_lock(hdev);
967 hdev->le_adv_channel_map = val;
968 hci_dev_unlock(hdev);
89863109 969
3f959d46
MH
970 return 0;
971}
89863109 972
3f959d46 973static int adv_channel_map_get(void *data, u64 *val)
7d474e06 974{
3f959d46 975 struct hci_dev *hdev = data;
7d474e06
AG
976
977 hci_dev_lock(hdev);
3f959d46
MH
978 *val = hdev->le_adv_channel_map;
979 hci_dev_unlock(hdev);
7d474e06 980
3f959d46
MH
981 return 0;
982}
983
984DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
985 adv_channel_map_set, "%llu\n");
7d474e06 986
729a1051
GL
987static int adv_min_interval_set(void *data, u64 val)
988{
989 struct hci_dev *hdev = data;
990
991 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
992 return -EINVAL;
993
994 hci_dev_lock(hdev);
995 hdev->le_adv_min_interval = val;
7d474e06
AG
996 hci_dev_unlock(hdev);
997
998 return 0;
999}
1000
729a1051 1001static int adv_min_interval_get(void *data, u64 *val)
7d474e06 1002{
729a1051
GL
1003 struct hci_dev *hdev = data;
1004
1005 hci_dev_lock(hdev);
1006 *val = hdev->le_adv_min_interval;
1007 hci_dev_unlock(hdev);
1008
1009 return 0;
7d474e06
AG
1010}
1011
729a1051
GL
1012DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
1013 adv_min_interval_set, "%llu\n");
1014
1015static int adv_max_interval_set(void *data, u64 val)
7d474e06 1016{
729a1051 1017 struct hci_dev *hdev = data;
7d474e06 1018
729a1051 1019 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
1020 return -EINVAL;
1021
729a1051
GL
1022 hci_dev_lock(hdev);
1023 hdev->le_adv_max_interval = val;
1024 hci_dev_unlock(hdev);
7d474e06 1025
729a1051
GL
1026 return 0;
1027}
7d474e06 1028
729a1051
GL
1029static int adv_max_interval_get(void *data, u64 *val)
1030{
1031 struct hci_dev *hdev = data;
7d474e06 1032
729a1051
GL
1033 hci_dev_lock(hdev);
1034 *val = hdev->le_adv_max_interval;
1035 hci_dev_unlock(hdev);
7d474e06 1036
729a1051
GL
1037 return 0;
1038}
7d474e06 1039
729a1051
GL
1040DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1041 adv_max_interval_set, "%llu\n");
7d474e06 1042
0b3c7d37 1043static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1044{
0b3c7d37 1045 struct hci_dev *hdev = f->private;
7d474e06 1046 struct hci_conn_params *p;
40f4938a 1047 struct bdaddr_list *b;
7d474e06 1048
7d474e06 1049 hci_dev_lock(hdev);
40f4938a
MH
1050 list_for_each_entry(b, &hdev->whitelist, list)
1051 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1052 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1053 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1054 p->auto_connect);
7d474e06 1055 }
7d474e06 1056 hci_dev_unlock(hdev);
7d474e06 1057
7d474e06
AG
1058 return 0;
1059}
7d474e06 1060
0b3c7d37 1061static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1062{
0b3c7d37 1063 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1064}
1065
0b3c7d37
MH
1066static const struct file_operations device_list_fops = {
1067 .open = device_list_open,
7d474e06 1068 .read = seq_read,
7d474e06
AG
1069 .llseek = seq_lseek,
1070 .release = single_release,
1071};
1072
1da177e4
LT
1073/* ---- HCI requests ---- */
1074
42c6b129 1075static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1076{
42c6b129 1077 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1078
1079 if (hdev->req_status == HCI_REQ_PEND) {
1080 hdev->req_result = result;
1081 hdev->req_status = HCI_REQ_DONE;
1082 wake_up_interruptible(&hdev->req_wait_q);
1083 }
1084}
1085
1086static void hci_req_cancel(struct hci_dev *hdev, int err)
1087{
1088 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1089
1090 if (hdev->req_status == HCI_REQ_PEND) {
1091 hdev->req_result = err;
1092 hdev->req_status = HCI_REQ_CANCELED;
1093 wake_up_interruptible(&hdev->req_wait_q);
1094 }
1095}
1096
77a63e0a
FW
1097static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1098 u8 event)
75e84b7c
JH
1099{
1100 struct hci_ev_cmd_complete *ev;
1101 struct hci_event_hdr *hdr;
1102 struct sk_buff *skb;
1103
1104 hci_dev_lock(hdev);
1105
1106 skb = hdev->recv_evt;
1107 hdev->recv_evt = NULL;
1108
1109 hci_dev_unlock(hdev);
1110
1111 if (!skb)
1112 return ERR_PTR(-ENODATA);
1113
1114 if (skb->len < sizeof(*hdr)) {
1115 BT_ERR("Too short HCI event");
1116 goto failed;
1117 }
1118
1119 hdr = (void *) skb->data;
1120 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1121
7b1abbbe
JH
1122 if (event) {
1123 if (hdr->evt != event)
1124 goto failed;
1125 return skb;
1126 }
1127
75e84b7c
JH
1128 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1129 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1130 goto failed;
1131 }
1132
1133 if (skb->len < sizeof(*ev)) {
1134 BT_ERR("Too short cmd_complete event");
1135 goto failed;
1136 }
1137
1138 ev = (void *) skb->data;
1139 skb_pull(skb, sizeof(*ev));
1140
1141 if (opcode == __le16_to_cpu(ev->opcode))
1142 return skb;
1143
1144 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1145 __le16_to_cpu(ev->opcode));
1146
1147failed:
1148 kfree_skb(skb);
1149 return ERR_PTR(-ENODATA);
1150}
1151
7b1abbbe 1152struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1153 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1154{
1155 DECLARE_WAITQUEUE(wait, current);
1156 struct hci_request req;
1157 int err = 0;
1158
1159 BT_DBG("%s", hdev->name);
1160
1161 hci_req_init(&req, hdev);
1162
7b1abbbe 1163 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1164
1165 hdev->req_status = HCI_REQ_PEND;
1166
75e84b7c
JH
1167 add_wait_queue(&hdev->req_wait_q, &wait);
1168 set_current_state(TASK_INTERRUPTIBLE);
1169
039fada5
CP
1170 err = hci_req_run(&req, hci_req_sync_complete);
1171 if (err < 0) {
1172 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1173 set_current_state(TASK_RUNNING);
039fada5
CP
1174 return ERR_PTR(err);
1175 }
1176
75e84b7c
JH
1177 schedule_timeout(timeout);
1178
1179 remove_wait_queue(&hdev->req_wait_q, &wait);
1180
1181 if (signal_pending(current))
1182 return ERR_PTR(-EINTR);
1183
1184 switch (hdev->req_status) {
1185 case HCI_REQ_DONE:
1186 err = -bt_to_errno(hdev->req_result);
1187 break;
1188
1189 case HCI_REQ_CANCELED:
1190 err = -hdev->req_result;
1191 break;
1192
1193 default:
1194 err = -ETIMEDOUT;
1195 break;
1196 }
1197
1198 hdev->req_status = hdev->req_result = 0;
1199
1200 BT_DBG("%s end: err %d", hdev->name, err);
1201
1202 if (err < 0)
1203 return ERR_PTR(err);
1204
7b1abbbe
JH
1205 return hci_get_cmd_complete(hdev, opcode, event);
1206}
1207EXPORT_SYMBOL(__hci_cmd_sync_ev);
1208
1209struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1210 const void *param, u32 timeout)
7b1abbbe
JH
1211{
1212 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1213}
1214EXPORT_SYMBOL(__hci_cmd_sync);
1215
1da177e4 1216/* Execute request and wait for completion. */
01178cd4 1217static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1218 void (*func)(struct hci_request *req,
1219 unsigned long opt),
01178cd4 1220 unsigned long opt, __u32 timeout)
1da177e4 1221{
42c6b129 1222 struct hci_request req;
1da177e4
LT
1223 DECLARE_WAITQUEUE(wait, current);
1224 int err = 0;
1225
1226 BT_DBG("%s start", hdev->name);
1227
42c6b129
JH
1228 hci_req_init(&req, hdev);
1229
1da177e4
LT
1230 hdev->req_status = HCI_REQ_PEND;
1231
42c6b129 1232 func(&req, opt);
53cce22d 1233
039fada5
CP
1234 add_wait_queue(&hdev->req_wait_q, &wait);
1235 set_current_state(TASK_INTERRUPTIBLE);
1236
42c6b129
JH
1237 err = hci_req_run(&req, hci_req_sync_complete);
1238 if (err < 0) {
53cce22d 1239 hdev->req_status = 0;
920c8300 1240
039fada5 1241 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1242 set_current_state(TASK_RUNNING);
039fada5 1243
920c8300
AG
1244 /* ENODATA means the HCI request command queue is empty.
1245 * This can happen when a request with conditionals doesn't
1246 * trigger any commands to be sent. This is normal behavior
1247 * and should not trigger an error return.
42c6b129 1248 */
920c8300
AG
1249 if (err == -ENODATA)
1250 return 0;
1251
1252 return err;
53cce22d
JH
1253 }
1254
1da177e4
LT
1255 schedule_timeout(timeout);
1256
1257 remove_wait_queue(&hdev->req_wait_q, &wait);
1258
1259 if (signal_pending(current))
1260 return -EINTR;
1261
1262 switch (hdev->req_status) {
1263 case HCI_REQ_DONE:
e175072f 1264 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1265 break;
1266
1267 case HCI_REQ_CANCELED:
1268 err = -hdev->req_result;
1269 break;
1270
1271 default:
1272 err = -ETIMEDOUT;
1273 break;
3ff50b79 1274 }
1da177e4 1275
a5040efa 1276 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1277
1278 BT_DBG("%s end: err %d", hdev->name, err);
1279
1280 return err;
1281}
1282
01178cd4 1283static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1284 void (*req)(struct hci_request *req,
1285 unsigned long opt),
01178cd4 1286 unsigned long opt, __u32 timeout)
1da177e4
LT
1287{
1288 int ret;
1289
7c6a329e
MH
1290 if (!test_bit(HCI_UP, &hdev->flags))
1291 return -ENETDOWN;
1292
1da177e4
LT
1293 /* Serialize all requests */
1294 hci_req_lock(hdev);
01178cd4 1295 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1296 hci_req_unlock(hdev);
1297
1298 return ret;
1299}
1300
42c6b129 1301static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1302{
42c6b129 1303 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1304
1305 /* Reset device */
42c6b129
JH
1306 set_bit(HCI_RESET, &req->hdev->flags);
1307 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1308}
1309
42c6b129 1310static void bredr_init(struct hci_request *req)
1da177e4 1311{
42c6b129 1312 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1313
1da177e4 1314 /* Read Local Supported Features */
42c6b129 1315 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1316
1143e5a6 1317 /* Read Local Version */
42c6b129 1318 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1319
1320 /* Read BD Address */
42c6b129 1321 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1322}
1323
42c6b129 1324static void amp_init(struct hci_request *req)
e61ef499 1325{
42c6b129 1326 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1327
e61ef499 1328 /* Read Local Version */
42c6b129 1329 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1330
f6996cfe
MH
1331 /* Read Local Supported Commands */
1332 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1333
1334 /* Read Local Supported Features */
1335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1336
6bcbc489 1337 /* Read Local AMP Info */
42c6b129 1338 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1339
1340 /* Read Data Blk size */
42c6b129 1341 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1342
f38ba941
MH
1343 /* Read Flow Control Mode */
1344 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1345
7528ca1c
MH
1346 /* Read Location Data */
1347 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1348}
1349
42c6b129 1350static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1351{
42c6b129 1352 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1353
1354 BT_DBG("%s %ld", hdev->name, opt);
1355
11778716
AE
1356 /* Reset */
1357 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1358 hci_reset_req(req, 0);
11778716 1359
e61ef499
AE
1360 switch (hdev->dev_type) {
1361 case HCI_BREDR:
42c6b129 1362 bredr_init(req);
e61ef499
AE
1363 break;
1364
1365 case HCI_AMP:
42c6b129 1366 amp_init(req);
e61ef499
AE
1367 break;
1368
1369 default:
1370 BT_ERR("Unknown device type %d", hdev->dev_type);
1371 break;
1372 }
e61ef499
AE
1373}
1374
42c6b129 1375static void bredr_setup(struct hci_request *req)
2177bab5 1376{
2177bab5
JH
1377 __le16 param;
1378 __u8 flt_type;
1379
1380 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1381 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1382
1383 /* Read Class of Device */
42c6b129 1384 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1385
1386 /* Read Local Name */
42c6b129 1387 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1388
1389 /* Read Voice Setting */
42c6b129 1390 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1391
b4cb9fb2
MH
1392 /* Read Number of Supported IAC */
1393 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1394
4b836f39
MH
1395 /* Read Current IAC LAP */
1396 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1397
2177bab5
JH
1398 /* Clear Event Filters */
1399 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1400 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1401
1402 /* Connection accept timeout ~20 secs */
dcf4adbf 1403 param = cpu_to_le16(0x7d00);
42c6b129 1404 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
1405}
1406
42c6b129 1407static void le_setup(struct hci_request *req)
2177bab5 1408{
c73eee91
JH
1409 struct hci_dev *hdev = req->hdev;
1410
2177bab5 1411 /* Read LE Buffer Size */
42c6b129 1412 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1413
1414 /* Read LE Local Supported Features */
42c6b129 1415 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1416
747d3f03
MH
1417 /* Read LE Supported States */
1418 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1419
2177bab5 1420 /* Read LE White List Size */
42c6b129 1421 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1422
747d3f03
MH
1423 /* Clear LE White List */
1424 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1425
1426 /* LE-only controllers have LE implicitly enabled */
1427 if (!lmp_bredr_capable(hdev))
1428 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1429}
1430
1431static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1432{
1433 if (lmp_ext_inq_capable(hdev))
1434 return 0x02;
1435
1436 if (lmp_inq_rssi_capable(hdev))
1437 return 0x01;
1438
1439 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1440 hdev->lmp_subver == 0x0757)
1441 return 0x01;
1442
1443 if (hdev->manufacturer == 15) {
1444 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1445 return 0x01;
1446 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1447 return 0x01;
1448 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1449 return 0x01;
1450 }
1451
1452 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1453 hdev->lmp_subver == 0x1805)
1454 return 0x01;
1455
1456 return 0x00;
1457}
1458
42c6b129 1459static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1460{
1461 u8 mode;
1462
42c6b129 1463 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1464
42c6b129 1465 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1466}
1467
42c6b129 1468static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1469{
42c6b129
JH
1470 struct hci_dev *hdev = req->hdev;
1471
2177bab5
JH
1472 /* The second byte is 0xff instead of 0x9f (two reserved bits
1473 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1474 * command otherwise.
1475 */
1476 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1477
1478 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1479 * any event mask for pre 1.2 devices.
1480 */
1481 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1482 return;
1483
1484 if (lmp_bredr_capable(hdev)) {
1485 events[4] |= 0x01; /* Flow Specification Complete */
1486 events[4] |= 0x02; /* Inquiry Result with RSSI */
1487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1488 events[5] |= 0x08; /* Synchronous Connection Complete */
1489 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1490 } else {
1491 /* Use a different default for LE-only devices */
1492 memset(events, 0, sizeof(events));
1493 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1494 events[1] |= 0x08; /* Read Remote Version Information Complete */
1495 events[1] |= 0x20; /* Command Complete */
1496 events[1] |= 0x40; /* Command Status */
1497 events[1] |= 0x80; /* Hardware Error */
1498 events[2] |= 0x04; /* Number of Completed Packets */
1499 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1500
1501 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1502 events[0] |= 0x80; /* Encryption Change */
1503 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1504 }
2177bab5
JH
1505 }
1506
1507 if (lmp_inq_rssi_capable(hdev))
1508 events[4] |= 0x02; /* Inquiry Result with RSSI */
1509
1510 if (lmp_sniffsubr_capable(hdev))
1511 events[5] |= 0x20; /* Sniff Subrating */
1512
1513 if (lmp_pause_enc_capable(hdev))
1514 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1515
1516 if (lmp_ext_inq_capable(hdev))
1517 events[5] |= 0x40; /* Extended Inquiry Result */
1518
1519 if (lmp_no_flush_capable(hdev))
1520 events[7] |= 0x01; /* Enhanced Flush Complete */
1521
1522 if (lmp_lsto_capable(hdev))
1523 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1524
1525 if (lmp_ssp_capable(hdev)) {
1526 events[6] |= 0x01; /* IO Capability Request */
1527 events[6] |= 0x02; /* IO Capability Response */
1528 events[6] |= 0x04; /* User Confirmation Request */
1529 events[6] |= 0x08; /* User Passkey Request */
1530 events[6] |= 0x10; /* Remote OOB Data Request */
1531 events[6] |= 0x20; /* Simple Pairing Complete */
1532 events[7] |= 0x04; /* User Passkey Notification */
1533 events[7] |= 0x08; /* Keypress Notification */
1534 events[7] |= 0x10; /* Remote Host Supported
1535 * Features Notification
1536 */
1537 }
1538
1539 if (lmp_le_capable(hdev))
1540 events[7] |= 0x20; /* LE Meta-Event */
1541
42c6b129 1542 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1543}
1544
42c6b129 1545static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1546{
42c6b129
JH
1547 struct hci_dev *hdev = req->hdev;
1548
2177bab5 1549 if (lmp_bredr_capable(hdev))
42c6b129 1550 bredr_setup(req);
56f87901
JH
1551 else
1552 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1553
1554 if (lmp_le_capable(hdev))
42c6b129 1555 le_setup(req);
2177bab5 1556
3f8e2d75
JH
1557 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1558 * local supported commands HCI command.
1559 */
1560 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1561 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1562
1563 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1564 /* When SSP is available, then the host features page
1565 * should also be available as well. However some
1566 * controllers list the max_page as 0 as long as SSP
1567 * has not been enabled. To achieve proper debugging
1568 * output, force the minimum max_page to 1 at least.
1569 */
1570 hdev->max_page = 0x01;
1571
2177bab5
JH
1572 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1573 u8 mode = 0x01;
42c6b129
JH
1574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1575 sizeof(mode), &mode);
2177bab5
JH
1576 } else {
1577 struct hci_cp_write_eir cp;
1578
1579 memset(hdev->eir, 0, sizeof(hdev->eir));
1580 memset(&cp, 0, sizeof(cp));
1581
42c6b129 1582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1583 }
1584 }
1585
1586 if (lmp_inq_rssi_capable(hdev))
42c6b129 1587 hci_setup_inquiry_mode(req);
2177bab5
JH
1588
1589 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1590 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1591
1592 if (lmp_ext_feat_capable(hdev)) {
1593 struct hci_cp_read_local_ext_features cp;
1594
1595 cp.page = 0x01;
42c6b129
JH
1596 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1597 sizeof(cp), &cp);
2177bab5
JH
1598 }
1599
1600 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1601 u8 enable = 1;
42c6b129
JH
1602 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1603 &enable);
2177bab5
JH
1604 }
1605}
1606
42c6b129 1607static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1608{
42c6b129 1609 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1610 struct hci_cp_write_def_link_policy cp;
1611 u16 link_policy = 0;
1612
1613 if (lmp_rswitch_capable(hdev))
1614 link_policy |= HCI_LP_RSWITCH;
1615 if (lmp_hold_capable(hdev))
1616 link_policy |= HCI_LP_HOLD;
1617 if (lmp_sniff_capable(hdev))
1618 link_policy |= HCI_LP_SNIFF;
1619 if (lmp_park_capable(hdev))
1620 link_policy |= HCI_LP_PARK;
1621
1622 cp.policy = cpu_to_le16(link_policy);
42c6b129 1623 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1624}
1625
42c6b129 1626static void hci_set_le_support(struct hci_request *req)
2177bab5 1627{
42c6b129 1628 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1629 struct hci_cp_write_le_host_supported cp;
1630
c73eee91
JH
1631 /* LE-only devices do not support explicit enablement */
1632 if (!lmp_bredr_capable(hdev))
1633 return;
1634
2177bab5
JH
1635 memset(&cp, 0, sizeof(cp));
1636
1637 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1638 cp.le = 0x01;
32226e4f 1639 cp.simul = 0x00;
2177bab5
JH
1640 }
1641
1642 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1643 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1644 &cp);
2177bab5
JH
1645}
1646
d62e6d67
JH
1647static void hci_set_event_mask_page_2(struct hci_request *req)
1648{
1649 struct hci_dev *hdev = req->hdev;
1650 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1651
1652 /* If Connectionless Slave Broadcast master role is supported
1653 * enable all necessary events for it.
1654 */
53b834d2 1655 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1656 events[1] |= 0x40; /* Triggered Clock Capture */
1657 events[1] |= 0x80; /* Synchronization Train Complete */
1658 events[2] |= 0x10; /* Slave Page Response Timeout */
1659 events[2] |= 0x20; /* CSB Channel Map Change */
1660 }
1661
1662 /* If Connectionless Slave Broadcast slave role is supported
1663 * enable all necessary events for it.
1664 */
53b834d2 1665 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1666 events[2] |= 0x01; /* Synchronization Train Received */
1667 events[2] |= 0x02; /* CSB Receive */
1668 events[2] |= 0x04; /* CSB Timeout */
1669 events[2] |= 0x08; /* Truncated Page Complete */
1670 }
1671
40c59fcb 1672 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1673 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1674 events[2] |= 0x80;
1675
d62e6d67
JH
1676 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1677}
1678
42c6b129 1679static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1680{
42c6b129 1681 struct hci_dev *hdev = req->hdev;
d2c5d77f 1682 u8 p;
42c6b129 1683
0da71f1b
MH
1684 hci_setup_event_mask(req);
1685
b8f4e068
GP
1686 /* Some Broadcom based Bluetooth controllers do not support the
1687 * Delete Stored Link Key command. They are clearly indicating its
1688 * absence in the bit mask of supported commands.
1689 *
1690 * Check the supported commands and only if the the command is marked
1691 * as supported send it. If not supported assume that the controller
1692 * does not have actual support for stored link keys which makes this
1693 * command redundant anyway.
f9f462fa
MH
1694 *
1695 * Some controllers indicate that they support handling deleting
1696 * stored link keys, but they don't. The quirk lets a driver
1697 * just disable this command.
637b4cae 1698 */
f9f462fa
MH
1699 if (hdev->commands[6] & 0x80 &&
1700 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1701 struct hci_cp_delete_stored_link_key cp;
1702
1703 bacpy(&cp.bdaddr, BDADDR_ANY);
1704 cp.delete_all = 0x01;
1705 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1706 sizeof(cp), &cp);
1707 }
1708
2177bab5 1709 if (hdev->commands[5] & 0x10)
42c6b129 1710 hci_setup_link_policy(req);
2177bab5 1711
417287de
MH
1712 if (hdev->commands[8] & 0x01)
1713 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1714
1715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
1716 * support the Read Page Scan Type command. Check support for
1717 * this command in the bit mask of supported commands.
1718 */
1719 if (hdev->commands[13] & 0x01)
1720 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1721
9193c6e8
AG
1722 if (lmp_le_capable(hdev)) {
1723 u8 events[8];
1724
1725 memset(events, 0, sizeof(events));
4d6c705b
MH
1726 events[0] = 0x0f;
1727
1728 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1729 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1730
1731 /* If controller supports the Connection Parameters Request
1732 * Link Layer Procedure, enable the corresponding event.
1733 */
1734 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1735 events[0] |= 0x20; /* LE Remote Connection
1736 * Parameter Request
1737 */
1738
4b71bba4
MH
1739 /* If the controller supports Extended Scanner Filter
1740 * Policies, enable the correspondig event.
1741 */
1742 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
1743 events[1] |= 0x04; /* LE Direct Advertising
1744 * Report
1745 */
1746
5a34bd5f
MH
1747 /* If the controller supports the LE Read Local P-256
1748 * Public Key command, enable the corresponding event.
1749 */
1750 if (hdev->commands[34] & 0x02)
1751 events[0] |= 0x80; /* LE Read Local P-256
1752 * Public Key Complete
1753 */
1754
1755 /* If the controller supports the LE Generate DHKey
1756 * command, enable the corresponding event.
1757 */
1758 if (hdev->commands[34] & 0x04)
1759 events[1] |= 0x01; /* LE Generate DHKey Complete */
1760
9193c6e8
AG
1761 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1762 events);
1763
15a49cca
MH
1764 if (hdev->commands[25] & 0x40) {
1765 /* Read LE Advertising Channel TX Power */
1766 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1767 }
1768
42c6b129 1769 hci_set_le_support(req);
9193c6e8 1770 }
d2c5d77f
JH
1771
1772 /* Read features beyond page 1 if available */
1773 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1774 struct hci_cp_read_local_ext_features cp;
1775
1776 cp.page = p;
1777 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1778 sizeof(cp), &cp);
1779 }
2177bab5
JH
1780}
1781
5d4e7e8d
JH
1782static void hci_init4_req(struct hci_request *req, unsigned long opt)
1783{
1784 struct hci_dev *hdev = req->hdev;
1785
d62e6d67
JH
1786 /* Set event mask page 2 if the HCI command for it is supported */
1787 if (hdev->commands[22] & 0x04)
1788 hci_set_event_mask_page_2(req);
1789
109e3191
MH
1790 /* Read local codec list if the HCI command is supported */
1791 if (hdev->commands[29] & 0x20)
1792 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1793
f4fe73ed
MH
1794 /* Get MWS transport configuration if the HCI command is supported */
1795 if (hdev->commands[30] & 0x08)
1796 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1797
5d4e7e8d 1798 /* Check for Synchronization Train support */
53b834d2 1799 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1800 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1801
1802 /* Enable Secure Connections if supported and configured */
710f11c0 1803 if (bredr_sc_enabled(hdev)) {
a6d0d690
MH
1804 u8 support = 0x01;
1805 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1806 sizeof(support), &support);
1807 }
5d4e7e8d
JH
1808}
1809
2177bab5
JH
1810static int __hci_init(struct hci_dev *hdev)
1811{
1812 int err;
1813
1814 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1815 if (err < 0)
1816 return err;
1817
4b4148e9
MH
1818 /* The Device Under Test (DUT) mode is special and available for
1819 * all controller types. So just create it early on.
1820 */
1821 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1822 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1823 &dut_mode_fops);
1824 }
1825
2177bab5
JH
1826 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1827 * BR/EDR/LE type controllers. AMP controllers only need the
1828 * first stage init.
1829 */
1830 if (hdev->dev_type != HCI_BREDR)
1831 return 0;
1832
1833 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1834 if (err < 0)
1835 return err;
1836
5d4e7e8d
JH
1837 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1838 if (err < 0)
1839 return err;
1840
baf27f6e
MH
1841 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1842 if (err < 0)
1843 return err;
1844
1845 /* Only create debugfs entries during the initial setup
1846 * phase and not every time the controller gets powered on.
1847 */
1848 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1849 return 0;
1850
dfb826a8
MH
1851 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1852 &features_fops);
ceeb3bc0
MH
1853 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1854 &hdev->manufacturer);
1855 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1856 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1857 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1858 &device_list_fops);
70afe0b8
MH
1859 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1860 &blacklist_fops);
47219839
MH
1861 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1862
31ad1691
AK
1863 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1864 &conn_info_min_age_fops);
1865 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1866 &conn_info_max_age_fops);
1867
baf27f6e
MH
1868 if (lmp_bredr_capable(hdev)) {
1869 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1870 hdev, &inquiry_cache_fops);
02d08d15
MH
1871 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1872 hdev, &link_keys_fops);
babdbb3c
MH
1873 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1874 hdev, &dev_class_fops);
041000b9
MH
1875 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1876 hdev, &voice_setting_fops);
baf27f6e
MH
1877 }
1878
06f5b778 1879 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1880 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1881 hdev, &auto_accept_delay_fops);
5afeac14
MH
1882 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1883 hdev, &force_sc_support_fops);
134c2a89
MH
1884 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1885 hdev, &sc_only_mode_fops);
858cdc78
JH
1886 if (lmp_le_capable(hdev))
1887 debugfs_create_file("force_lesc_support", 0644,
1888 hdev->debugfs, hdev,
1889 &force_lesc_support_fops);
06f5b778 1890 }
ebd1e33b 1891
2bfa3531
MH
1892 if (lmp_sniff_capable(hdev)) {
1893 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1894 hdev, &idle_timeout_fops);
1895 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1896 hdev, &sniff_min_interval_fops);
1897 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1898 hdev, &sniff_max_interval_fops);
1899 }
1900
d0f729b8 1901 if (lmp_le_capable(hdev)) {
ac345813
MH
1902 debugfs_create_file("identity", 0400, hdev->debugfs,
1903 hdev, &identity_fops);
1904 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1905 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1906 debugfs_create_file("random_address", 0444, hdev->debugfs,
1907 hdev, &random_address_fops);
b32bba6c
MH
1908 debugfs_create_file("static_address", 0444, hdev->debugfs,
1909 hdev, &static_address_fops);
1910
1911 /* For controllers with a public address, provide a debug
1912 * option to force the usage of the configured static
1913 * address. By default the public address is used.
1914 */
1915 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1916 debugfs_create_file("force_static_address", 0644,
1917 hdev->debugfs, hdev,
1918 &force_static_address_fops);
1919
d0f729b8
MH
1920 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1921 &hdev->le_white_list_size);
d2ab0ac1
MH
1922 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1923 &white_list_fops);
3698d704
MH
1924 debugfs_create_file("identity_resolving_keys", 0400,
1925 hdev->debugfs, hdev,
1926 &identity_resolving_keys_fops);
8f8625cd
MH
1927 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1928 hdev, &long_term_keys_fops);
4e70c7e7
MH
1929 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1930 hdev, &conn_min_interval_fops);
1931 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1932 hdev, &conn_max_interval_fops);
816a93d1
MH
1933 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1934 hdev, &conn_latency_fops);
f1649577
MH
1935 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1936 hdev, &supervision_timeout_fops);
3f959d46
MH
1937 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1938 hdev, &adv_channel_map_fops);
729a1051
GL
1939 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1940 hdev, &adv_min_interval_fops);
1941 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1942 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1943 debugfs_create_u16("discov_interleaved_timeout", 0644,
1944 hdev->debugfs,
1945 &hdev->discov_interleaved_timeout);
54506918 1946
711eafe3 1947 smp_register(hdev);
d0f729b8 1948 }
e7b8fc92 1949
baf27f6e 1950 return 0;
2177bab5
JH
1951}
1952
0ebca7d6
MH
1953static void hci_init0_req(struct hci_request *req, unsigned long opt)
1954{
1955 struct hci_dev *hdev = req->hdev;
1956
1957 BT_DBG("%s %ld", hdev->name, opt);
1958
1959 /* Reset */
1960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1961 hci_reset_req(req, 0);
1962
1963 /* Read Local Version */
1964 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1965
1966 /* Read BD Address */
1967 if (hdev->set_bdaddr)
1968 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1969}
1970
1971static int __hci_unconf_init(struct hci_dev *hdev)
1972{
1973 int err;
1974
cc78b44b
MH
1975 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1976 return 0;
1977
0ebca7d6
MH
1978 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1979 if (err < 0)
1980 return err;
1981
1982 return 0;
1983}
1984
42c6b129 1985static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1986{
1987 __u8 scan = opt;
1988
42c6b129 1989 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1990
1991 /* Inquiry and Page scans */
42c6b129 1992 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1993}
1994
42c6b129 1995static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1996{
1997 __u8 auth = opt;
1998
42c6b129 1999 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
2000
2001 /* Authentication */
42c6b129 2002 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
2003}
2004
42c6b129 2005static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2006{
2007 __u8 encrypt = opt;
2008
42c6b129 2009 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 2010
e4e8e37c 2011 /* Encryption */
42c6b129 2012 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
2013}
2014
42c6b129 2015static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
2016{
2017 __le16 policy = cpu_to_le16(opt);
2018
42c6b129 2019 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
2020
2021 /* Default link policy */
42c6b129 2022 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
2023}
2024
8e87d142 2025/* Get HCI device by index.
1da177e4
LT
2026 * Device is held on return. */
2027struct hci_dev *hci_dev_get(int index)
2028{
8035ded4 2029 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
2030
2031 BT_DBG("%d", index);
2032
2033 if (index < 0)
2034 return NULL;
2035
2036 read_lock(&hci_dev_list_lock);
8035ded4 2037 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
2038 if (d->id == index) {
2039 hdev = hci_dev_hold(d);
2040 break;
2041 }
2042 }
2043 read_unlock(&hci_dev_list_lock);
2044 return hdev;
2045}
1da177e4
LT
2046
2047/* ---- Inquiry support ---- */
ff9ef578 2048
30dc78e1
JH
2049bool hci_discovery_active(struct hci_dev *hdev)
2050{
2051 struct discovery_state *discov = &hdev->discovery;
2052
6fbe195d 2053 switch (discov->state) {
343f935b 2054 case DISCOVERY_FINDING:
6fbe195d 2055 case DISCOVERY_RESOLVING:
30dc78e1
JH
2056 return true;
2057
6fbe195d
AG
2058 default:
2059 return false;
2060 }
30dc78e1
JH
2061}
2062
ff9ef578
JH
2063void hci_discovery_set_state(struct hci_dev *hdev, int state)
2064{
bb3e0a33
JH
2065 int old_state = hdev->discovery.state;
2066
ff9ef578
JH
2067 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2068
bb3e0a33 2069 if (old_state == state)
ff9ef578
JH
2070 return;
2071
bb3e0a33
JH
2072 hdev->discovery.state = state;
2073
ff9ef578
JH
2074 switch (state) {
2075 case DISCOVERY_STOPPED:
c54c3860
AG
2076 hci_update_background_scan(hdev);
2077
bb3e0a33 2078 if (old_state != DISCOVERY_STARTING)
7b99b659 2079 mgmt_discovering(hdev, 0);
ff9ef578
JH
2080 break;
2081 case DISCOVERY_STARTING:
2082 break;
343f935b 2083 case DISCOVERY_FINDING:
ff9ef578
JH
2084 mgmt_discovering(hdev, 1);
2085 break;
30dc78e1
JH
2086 case DISCOVERY_RESOLVING:
2087 break;
ff9ef578
JH
2088 case DISCOVERY_STOPPING:
2089 break;
2090 }
ff9ef578
JH
2091}
2092
1f9b9a5d 2093void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2094{
30883512 2095 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2096 struct inquiry_entry *p, *n;
1da177e4 2097
561aafbc
JH
2098 list_for_each_entry_safe(p, n, &cache->all, all) {
2099 list_del(&p->all);
b57c1a56 2100 kfree(p);
1da177e4 2101 }
561aafbc
JH
2102
2103 INIT_LIST_HEAD(&cache->unknown);
2104 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2105}
2106
a8c5fb1a
GP
2107struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2108 bdaddr_t *bdaddr)
1da177e4 2109{
30883512 2110 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2111 struct inquiry_entry *e;
2112
6ed93dc6 2113 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2114
561aafbc
JH
2115 list_for_each_entry(e, &cache->all, all) {
2116 if (!bacmp(&e->data.bdaddr, bdaddr))
2117 return e;
2118 }
2119
2120 return NULL;
2121}
2122
2123struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2124 bdaddr_t *bdaddr)
561aafbc 2125{
30883512 2126 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2127 struct inquiry_entry *e;
2128
6ed93dc6 2129 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2130
2131 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2132 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2133 return e;
2134 }
2135
2136 return NULL;
1da177e4
LT
2137}
2138
30dc78e1 2139struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2140 bdaddr_t *bdaddr,
2141 int state)
30dc78e1
JH
2142{
2143 struct discovery_state *cache = &hdev->discovery;
2144 struct inquiry_entry *e;
2145
6ed93dc6 2146 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2147
2148 list_for_each_entry(e, &cache->resolve, list) {
2149 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2150 return e;
2151 if (!bacmp(&e->data.bdaddr, bdaddr))
2152 return e;
2153 }
2154
2155 return NULL;
2156}
2157
a3d4e20a 2158void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2159 struct inquiry_entry *ie)
a3d4e20a
JH
2160{
2161 struct discovery_state *cache = &hdev->discovery;
2162 struct list_head *pos = &cache->resolve;
2163 struct inquiry_entry *p;
2164
2165 list_del(&ie->list);
2166
2167 list_for_each_entry(p, &cache->resolve, list) {
2168 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2169 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2170 break;
2171 pos = &p->list;
2172 }
2173
2174 list_add(&ie->list, pos);
2175}
2176
af58925c
MH
2177u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2178 bool name_known)
1da177e4 2179{
30883512 2180 struct discovery_state *cache = &hdev->discovery;
70f23020 2181 struct inquiry_entry *ie;
af58925c 2182 u32 flags = 0;
1da177e4 2183
6ed93dc6 2184 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2185
6928a924 2186 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 2187
af58925c
MH
2188 if (!data->ssp_mode)
2189 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2190
70f23020 2191 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2192 if (ie) {
af58925c
MH
2193 if (!ie->data.ssp_mode)
2194 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2195
a3d4e20a 2196 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2197 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2198 ie->data.rssi = data->rssi;
2199 hci_inquiry_cache_update_resolve(hdev, ie);
2200 }
2201
561aafbc 2202 goto update;
a3d4e20a 2203 }
561aafbc
JH
2204
2205 /* Entry not in the cache. Add new one. */
27f70f3e 2206 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2207 if (!ie) {
2208 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2209 goto done;
2210 }
561aafbc
JH
2211
2212 list_add(&ie->all, &cache->all);
2213
2214 if (name_known) {
2215 ie->name_state = NAME_KNOWN;
2216 } else {
2217 ie->name_state = NAME_NOT_KNOWN;
2218 list_add(&ie->list, &cache->unknown);
2219 }
70f23020 2220
561aafbc
JH
2221update:
2222 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2223 ie->name_state != NAME_PENDING) {
561aafbc
JH
2224 ie->name_state = NAME_KNOWN;
2225 list_del(&ie->list);
1da177e4
LT
2226 }
2227
70f23020
AE
2228 memcpy(&ie->data, data, sizeof(*data));
2229 ie->timestamp = jiffies;
1da177e4 2230 cache->timestamp = jiffies;
3175405b
JH
2231
2232 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2233 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2234
af58925c
MH
2235done:
2236 return flags;
1da177e4
LT
2237}
2238
2239static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2240{
30883512 2241 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2242 struct inquiry_info *info = (struct inquiry_info *) buf;
2243 struct inquiry_entry *e;
2244 int copied = 0;
2245
561aafbc 2246 list_for_each_entry(e, &cache->all, all) {
1da177e4 2247 struct inquiry_data *data = &e->data;
b57c1a56
JH
2248
2249 if (copied >= num)
2250 break;
2251
1da177e4
LT
2252 bacpy(&info->bdaddr, &data->bdaddr);
2253 info->pscan_rep_mode = data->pscan_rep_mode;
2254 info->pscan_period_mode = data->pscan_period_mode;
2255 info->pscan_mode = data->pscan_mode;
2256 memcpy(info->dev_class, data->dev_class, 3);
2257 info->clock_offset = data->clock_offset;
b57c1a56 2258
1da177e4 2259 info++;
b57c1a56 2260 copied++;
1da177e4
LT
2261 }
2262
2263 BT_DBG("cache %p, copied %d", cache, copied);
2264 return copied;
2265}
2266
42c6b129 2267static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2268{
2269 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2270 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2271 struct hci_cp_inquiry cp;
2272
2273 BT_DBG("%s", hdev->name);
2274
2275 if (test_bit(HCI_INQUIRY, &hdev->flags))
2276 return;
2277
2278 /* Start Inquiry */
2279 memcpy(&cp.lap, &ir->lap, 3);
2280 cp.length = ir->length;
2281 cp.num_rsp = ir->num_rsp;
42c6b129 2282 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2283}
2284
2285int hci_inquiry(void __user *arg)
2286{
2287 __u8 __user *ptr = arg;
2288 struct hci_inquiry_req ir;
2289 struct hci_dev *hdev;
2290 int err = 0, do_inquiry = 0, max_rsp;
2291 long timeo;
2292 __u8 *buf;
2293
2294 if (copy_from_user(&ir, ptr, sizeof(ir)))
2295 return -EFAULT;
2296
5a08ecce
AE
2297 hdev = hci_dev_get(ir.dev_id);
2298 if (!hdev)
1da177e4
LT
2299 return -ENODEV;
2300
0736cfa8
MH
2301 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2302 err = -EBUSY;
2303 goto done;
2304 }
2305
4a964404 2306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2307 err = -EOPNOTSUPP;
2308 goto done;
2309 }
2310
5b69bef5
MH
2311 if (hdev->dev_type != HCI_BREDR) {
2312 err = -EOPNOTSUPP;
2313 goto done;
2314 }
2315
56f87901
JH
2316 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2317 err = -EOPNOTSUPP;
2318 goto done;
2319 }
2320
09fd0de5 2321 hci_dev_lock(hdev);
8e87d142 2322 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2323 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2324 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2325 do_inquiry = 1;
2326 }
09fd0de5 2327 hci_dev_unlock(hdev);
1da177e4 2328
04837f64 2329 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2330
2331 if (do_inquiry) {
01178cd4
JH
2332 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2333 timeo);
70f23020
AE
2334 if (err < 0)
2335 goto done;
3e13fa1e
AG
2336
2337 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2338 * cleared). If it is interrupted by a signal, return -EINTR.
2339 */
74316201 2340 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2341 TASK_INTERRUPTIBLE))
2342 return -EINTR;
70f23020 2343 }
1da177e4 2344
8fc9ced3
GP
2345 /* for unlimited number of responses we will use buffer with
2346 * 255 entries
2347 */
1da177e4
LT
2348 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2349
2350 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2351 * copy it to the user space.
2352 */
01df8c31 2353 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2354 if (!buf) {
1da177e4
LT
2355 err = -ENOMEM;
2356 goto done;
2357 }
2358
09fd0de5 2359 hci_dev_lock(hdev);
1da177e4 2360 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2361 hci_dev_unlock(hdev);
1da177e4
LT
2362
2363 BT_DBG("num_rsp %d", ir.num_rsp);
2364
2365 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2366 ptr += sizeof(ir);
2367 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2368 ir.num_rsp))
1da177e4 2369 err = -EFAULT;
8e87d142 2370 } else
1da177e4
LT
2371 err = -EFAULT;
2372
2373 kfree(buf);
2374
2375done:
2376 hci_dev_put(hdev);
2377 return err;
2378}
2379
cbed0ca1 2380static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2381{
1da177e4
LT
2382 int ret = 0;
2383
1da177e4
LT
2384 BT_DBG("%s %p", hdev->name, hdev);
2385
2386 hci_req_lock(hdev);
2387
94324962
JH
2388 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2389 ret = -ENODEV;
2390 goto done;
2391 }
2392
d603b76b
MH
2393 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2394 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2395 /* Check for rfkill but allow the HCI setup stage to
2396 * proceed (which in itself doesn't cause any RF activity).
2397 */
2398 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2399 ret = -ERFKILL;
2400 goto done;
2401 }
2402
2403 /* Check for valid public address or a configured static
2404 * random adddress, but let the HCI setup proceed to
2405 * be able to determine if there is a public address
2406 * or not.
2407 *
c6beca0e
MH
2408 * In case of user channel usage, it is not important
2409 * if a public address or static random address is
2410 * available.
2411 *
a5c8f270
MH
2412 * This check is only valid for BR/EDR controllers
2413 * since AMP controllers do not have an address.
2414 */
c6beca0e
MH
2415 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2416 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2417 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2418 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2419 ret = -EADDRNOTAVAIL;
2420 goto done;
2421 }
611b30f7
MH
2422 }
2423
1da177e4
LT
2424 if (test_bit(HCI_UP, &hdev->flags)) {
2425 ret = -EALREADY;
2426 goto done;
2427 }
2428
1da177e4
LT
2429 if (hdev->open(hdev)) {
2430 ret = -EIO;
2431 goto done;
2432 }
2433
f41c70c4
MH
2434 atomic_set(&hdev->cmd_cnt, 1);
2435 set_bit(HCI_INIT, &hdev->flags);
2436
af202f84
MH
2437 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2438 if (hdev->setup)
2439 ret = hdev->setup(hdev);
f41c70c4 2440
af202f84
MH
2441 /* The transport driver can set these quirks before
2442 * creating the HCI device or in its setup callback.
2443 *
2444 * In case any of them is set, the controller has to
2445 * start up as unconfigured.
2446 */
eb1904f4
MH
2447 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2448 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2449 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2450
0ebca7d6
MH
2451 /* For an unconfigured controller it is required to
2452 * read at least the version information provided by
2453 * the Read Local Version Information command.
2454 *
2455 * If the set_bdaddr driver callback is provided, then
2456 * also the original Bluetooth public device address
2457 * will be read using the Read BD Address command.
2458 */
2459 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2460 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2461 }
2462
9713c17b
MH
2463 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2464 /* If public address change is configured, ensure that
2465 * the address gets programmed. If the driver does not
2466 * support changing the public address, fail the power
2467 * on procedure.
2468 */
2469 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2470 hdev->set_bdaddr)
24c457e2
MH
2471 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2472 else
2473 ret = -EADDRNOTAVAIL;
2474 }
2475
f41c70c4 2476 if (!ret) {
4a964404 2477 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2479 ret = __hci_init(hdev);
1da177e4
LT
2480 }
2481
f41c70c4
MH
2482 clear_bit(HCI_INIT, &hdev->flags);
2483
1da177e4
LT
2484 if (!ret) {
2485 hci_dev_hold(hdev);
d6bfd59c 2486 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2487 set_bit(HCI_UP, &hdev->flags);
2488 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2489 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2490 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2491 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2492 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2493 hdev->dev_type == HCI_BREDR) {
09fd0de5 2494 hci_dev_lock(hdev);
744cf19e 2495 mgmt_powered(hdev, 1);
09fd0de5 2496 hci_dev_unlock(hdev);
56e5cb86 2497 }
8e87d142 2498 } else {
1da177e4 2499 /* Init failed, cleanup */
3eff45ea 2500 flush_work(&hdev->tx_work);
c347b765 2501 flush_work(&hdev->cmd_work);
b78752cc 2502 flush_work(&hdev->rx_work);
1da177e4
LT
2503
2504 skb_queue_purge(&hdev->cmd_q);
2505 skb_queue_purge(&hdev->rx_q);
2506
2507 if (hdev->flush)
2508 hdev->flush(hdev);
2509
2510 if (hdev->sent_cmd) {
2511 kfree_skb(hdev->sent_cmd);
2512 hdev->sent_cmd = NULL;
2513 }
2514
2515 hdev->close(hdev);
fee746b0 2516 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2517 }
2518
2519done:
2520 hci_req_unlock(hdev);
1da177e4
LT
2521 return ret;
2522}
2523
cbed0ca1
JH
2524/* ---- HCI ioctl helpers ---- */
2525
2526int hci_dev_open(__u16 dev)
2527{
2528 struct hci_dev *hdev;
2529 int err;
2530
2531 hdev = hci_dev_get(dev);
2532 if (!hdev)
2533 return -ENODEV;
2534
4a964404 2535 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2536 * up as user channel. Trying to bring them up as normal devices
2537 * will result into a failure. Only user channel operation is
2538 * possible.
2539 *
2540 * When this function is called for a user channel, the flag
2541 * HCI_USER_CHANNEL will be set first before attempting to
2542 * open the device.
2543 */
4a964404 2544 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2545 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2546 err = -EOPNOTSUPP;
2547 goto done;
2548 }
2549
e1d08f40
JH
2550 /* We need to ensure that no other power on/off work is pending
2551 * before proceeding to call hci_dev_do_open. This is
2552 * particularly important if the setup procedure has not yet
2553 * completed.
2554 */
2555 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2556 cancel_delayed_work(&hdev->power_off);
2557
a5c8f270
MH
2558 /* After this call it is guaranteed that the setup procedure
2559 * has finished. This means that error conditions like RFKILL
2560 * or no valid public or static random address apply.
2561 */
e1d08f40
JH
2562 flush_workqueue(hdev->req_workqueue);
2563
12aa4f0a 2564 /* For controllers not using the management interface and that
b6ae8457 2565 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2566 * so that pairing works for them. Once the management interface
2567 * is in use this bit will be cleared again and userspace has
2568 * to explicitly enable it.
2569 */
2570 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2571 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2572 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2573
cbed0ca1
JH
2574 err = hci_dev_do_open(hdev);
2575
fee746b0 2576done:
cbed0ca1 2577 hci_dev_put(hdev);
cbed0ca1
JH
2578 return err;
2579}
2580
d7347f3c
JH
2581/* This function requires the caller holds hdev->lock */
2582static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2583{
2584 struct hci_conn_params *p;
2585
f161dd41
JH
2586 list_for_each_entry(p, &hdev->le_conn_params, list) {
2587 if (p->conn) {
2588 hci_conn_drop(p->conn);
f8aaf9b6 2589 hci_conn_put(p->conn);
f161dd41
JH
2590 p->conn = NULL;
2591 }
d7347f3c 2592 list_del_init(&p->action);
f161dd41 2593 }
d7347f3c
JH
2594
2595 BT_DBG("All LE pending actions cleared");
2596}
2597
1da177e4
LT
2598static int hci_dev_do_close(struct hci_dev *hdev)
2599{
2600 BT_DBG("%s %p", hdev->name, hdev);
2601
78c04c0b
VCG
2602 cancel_delayed_work(&hdev->power_off);
2603
1da177e4
LT
2604 hci_req_cancel(hdev, ENODEV);
2605 hci_req_lock(hdev);
2606
2607 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2608 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2609 hci_req_unlock(hdev);
2610 return 0;
2611 }
2612
3eff45ea
GP
2613 /* Flush RX and TX works */
2614 flush_work(&hdev->tx_work);
b78752cc 2615 flush_work(&hdev->rx_work);
1da177e4 2616
16ab91ab 2617 if (hdev->discov_timeout > 0) {
e0f9309f 2618 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2619 hdev->discov_timeout = 0;
5e5282bb 2620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2621 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2622 }
2623
a8b2d5c2 2624 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2625 cancel_delayed_work(&hdev->service_cache);
2626
7ba8b4be 2627 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2628
2629 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2630 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2631
76727c02
JH
2632 /* Avoid potential lockdep warnings from the *_flush() calls by
2633 * ensuring the workqueue is empty up front.
2634 */
2635 drain_workqueue(hdev->workqueue);
2636
09fd0de5 2637 hci_dev_lock(hdev);
1aeb9c65
JH
2638
2639 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2640 if (hdev->dev_type == HCI_BREDR)
2641 mgmt_powered(hdev, 0);
2642 }
2643
1f9b9a5d 2644 hci_inquiry_cache_flush(hdev);
d7347f3c 2645 hci_pend_le_actions_clear(hdev);
f161dd41 2646 hci_conn_hash_flush(hdev);
09fd0de5 2647 hci_dev_unlock(hdev);
1da177e4
LT
2648
2649 hci_notify(hdev, HCI_DEV_DOWN);
2650
2651 if (hdev->flush)
2652 hdev->flush(hdev);
2653
2654 /* Reset device */
2655 skb_queue_purge(&hdev->cmd_q);
2656 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2657 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2658 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2659 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2660 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2661 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2662 clear_bit(HCI_INIT, &hdev->flags);
2663 }
2664
c347b765
GP
2665 /* flush cmd work */
2666 flush_work(&hdev->cmd_work);
1da177e4
LT
2667
2668 /* Drop queues */
2669 skb_queue_purge(&hdev->rx_q);
2670 skb_queue_purge(&hdev->cmd_q);
2671 skb_queue_purge(&hdev->raw_q);
2672
2673 /* Drop last sent command */
2674 if (hdev->sent_cmd) {
65cc2b49 2675 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2676 kfree_skb(hdev->sent_cmd);
2677 hdev->sent_cmd = NULL;
2678 }
2679
b6ddb638
JH
2680 kfree_skb(hdev->recv_evt);
2681 hdev->recv_evt = NULL;
2682
1da177e4
LT
2683 /* After this point our queues are empty
2684 * and no tasks are scheduled. */
2685 hdev->close(hdev);
2686
35b973c9 2687 /* Clear flags */
fee746b0 2688 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2689 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2690
ced5c338 2691 /* Controller radio is available but is currently powered down */
536619e8 2692 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2693
e59fda8d 2694 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2695 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2696 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2697
1da177e4
LT
2698 hci_req_unlock(hdev);
2699
2700 hci_dev_put(hdev);
2701 return 0;
2702}
2703
2704int hci_dev_close(__u16 dev)
2705{
2706 struct hci_dev *hdev;
2707 int err;
2708
70f23020
AE
2709 hdev = hci_dev_get(dev);
2710 if (!hdev)
1da177e4 2711 return -ENODEV;
8ee56540 2712
0736cfa8
MH
2713 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2714 err = -EBUSY;
2715 goto done;
2716 }
2717
8ee56540
MH
2718 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2719 cancel_delayed_work(&hdev->power_off);
2720
1da177e4 2721 err = hci_dev_do_close(hdev);
8ee56540 2722
0736cfa8 2723done:
1da177e4
LT
2724 hci_dev_put(hdev);
2725 return err;
2726}
2727
2728int hci_dev_reset(__u16 dev)
2729{
2730 struct hci_dev *hdev;
2731 int ret = 0;
2732
70f23020
AE
2733 hdev = hci_dev_get(dev);
2734 if (!hdev)
1da177e4
LT
2735 return -ENODEV;
2736
2737 hci_req_lock(hdev);
1da177e4 2738
808a049e
MH
2739 if (!test_bit(HCI_UP, &hdev->flags)) {
2740 ret = -ENETDOWN;
1da177e4 2741 goto done;
808a049e 2742 }
1da177e4 2743
0736cfa8
MH
2744 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2745 ret = -EBUSY;
2746 goto done;
2747 }
2748
4a964404 2749 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2750 ret = -EOPNOTSUPP;
2751 goto done;
2752 }
2753
1da177e4
LT
2754 /* Drop queues */
2755 skb_queue_purge(&hdev->rx_q);
2756 skb_queue_purge(&hdev->cmd_q);
2757
76727c02
JH
2758 /* Avoid potential lockdep warnings from the *_flush() calls by
2759 * ensuring the workqueue is empty up front.
2760 */
2761 drain_workqueue(hdev->workqueue);
2762
09fd0de5 2763 hci_dev_lock(hdev);
1f9b9a5d 2764 hci_inquiry_cache_flush(hdev);
1da177e4 2765 hci_conn_hash_flush(hdev);
09fd0de5 2766 hci_dev_unlock(hdev);
1da177e4
LT
2767
2768 if (hdev->flush)
2769 hdev->flush(hdev);
2770
8e87d142 2771 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2772 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2773
fee746b0 2774 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2775
2776done:
1da177e4
LT
2777 hci_req_unlock(hdev);
2778 hci_dev_put(hdev);
2779 return ret;
2780}
2781
2782int hci_dev_reset_stat(__u16 dev)
2783{
2784 struct hci_dev *hdev;
2785 int ret = 0;
2786
70f23020
AE
2787 hdev = hci_dev_get(dev);
2788 if (!hdev)
1da177e4
LT
2789 return -ENODEV;
2790
0736cfa8
MH
2791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2792 ret = -EBUSY;
2793 goto done;
2794 }
2795
4a964404 2796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2797 ret = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
1da177e4
LT
2801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2802
0736cfa8 2803done:
1da177e4 2804 hci_dev_put(hdev);
1da177e4
LT
2805 return ret;
2806}
2807
123abc08
JH
2808static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2809{
bc6d2d04 2810 bool conn_changed, discov_changed;
123abc08
JH
2811
2812 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2813
2814 if ((scan & SCAN_PAGE))
2815 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2816 &hdev->dev_flags);
2817 else
2818 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2819 &hdev->dev_flags);
2820
bc6d2d04
JH
2821 if ((scan & SCAN_INQUIRY)) {
2822 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2823 &hdev->dev_flags);
2824 } else {
2825 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2826 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2827 &hdev->dev_flags);
2828 }
2829
123abc08
JH
2830 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2831 return;
2832
bc6d2d04
JH
2833 if (conn_changed || discov_changed) {
2834 /* In case this was disabled through mgmt */
2835 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2836
2837 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2838 mgmt_update_adv_data(hdev);
2839
123abc08 2840 mgmt_new_settings(hdev);
bc6d2d04 2841 }
123abc08
JH
2842}
2843
1da177e4
LT
2844int hci_dev_cmd(unsigned int cmd, void __user *arg)
2845{
2846 struct hci_dev *hdev;
2847 struct hci_dev_req dr;
2848 int err = 0;
2849
2850 if (copy_from_user(&dr, arg, sizeof(dr)))
2851 return -EFAULT;
2852
70f23020
AE
2853 hdev = hci_dev_get(dr.dev_id);
2854 if (!hdev)
1da177e4
LT
2855 return -ENODEV;
2856
0736cfa8
MH
2857 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2858 err = -EBUSY;
2859 goto done;
2860 }
2861
4a964404 2862 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2863 err = -EOPNOTSUPP;
2864 goto done;
2865 }
2866
5b69bef5
MH
2867 if (hdev->dev_type != HCI_BREDR) {
2868 err = -EOPNOTSUPP;
2869 goto done;
2870 }
2871
56f87901
JH
2872 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2873 err = -EOPNOTSUPP;
2874 goto done;
2875 }
2876
1da177e4
LT
2877 switch (cmd) {
2878 case HCISETAUTH:
01178cd4
JH
2879 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2880 HCI_INIT_TIMEOUT);
1da177e4
LT
2881 break;
2882
2883 case HCISETENCRYPT:
2884 if (!lmp_encrypt_capable(hdev)) {
2885 err = -EOPNOTSUPP;
2886 break;
2887 }
2888
2889 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2890 /* Auth must be enabled first */
01178cd4
JH
2891 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2892 HCI_INIT_TIMEOUT);
1da177e4
LT
2893 if (err)
2894 break;
2895 }
2896
01178cd4
JH
2897 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2898 HCI_INIT_TIMEOUT);
1da177e4
LT
2899 break;
2900
2901 case HCISETSCAN:
01178cd4
JH
2902 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2903 HCI_INIT_TIMEOUT);
91a668b0 2904
bc6d2d04
JH
2905 /* Ensure that the connectable and discoverable states
2906 * get correctly modified as this was a non-mgmt change.
91a668b0 2907 */
123abc08
JH
2908 if (!err)
2909 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2910 break;
2911
1da177e4 2912 case HCISETLINKPOL:
01178cd4
JH
2913 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2914 HCI_INIT_TIMEOUT);
1da177e4
LT
2915 break;
2916
2917 case HCISETLINKMODE:
e4e8e37c
MH
2918 hdev->link_mode = ((__u16) dr.dev_opt) &
2919 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2920 break;
2921
2922 case HCISETPTYPE:
2923 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2924 break;
2925
2926 case HCISETACLMTU:
e4e8e37c
MH
2927 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2928 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2929 break;
2930
2931 case HCISETSCOMTU:
e4e8e37c
MH
2932 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2933 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2934 break;
2935
2936 default:
2937 err = -EINVAL;
2938 break;
2939 }
e4e8e37c 2940
0736cfa8 2941done:
1da177e4
LT
2942 hci_dev_put(hdev);
2943 return err;
2944}
2945
2946int hci_get_dev_list(void __user *arg)
2947{
8035ded4 2948 struct hci_dev *hdev;
1da177e4
LT
2949 struct hci_dev_list_req *dl;
2950 struct hci_dev_req *dr;
1da177e4
LT
2951 int n = 0, size, err;
2952 __u16 dev_num;
2953
2954 if (get_user(dev_num, (__u16 __user *) arg))
2955 return -EFAULT;
2956
2957 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2958 return -EINVAL;
2959
2960 size = sizeof(*dl) + dev_num * sizeof(*dr);
2961
70f23020
AE
2962 dl = kzalloc(size, GFP_KERNEL);
2963 if (!dl)
1da177e4
LT
2964 return -ENOMEM;
2965
2966 dr = dl->dev_req;
2967
f20d09d5 2968 read_lock(&hci_dev_list_lock);
8035ded4 2969 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2970 unsigned long flags = hdev->flags;
c542a06c 2971
2e84d8db
MH
2972 /* When the auto-off is configured it means the transport
2973 * is running, but in that case still indicate that the
2974 * device is actually down.
2975 */
2976 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2977 flags &= ~BIT(HCI_UP);
c542a06c 2978
1da177e4 2979 (dr + n)->dev_id = hdev->id;
2e84d8db 2980 (dr + n)->dev_opt = flags;
c542a06c 2981
1da177e4
LT
2982 if (++n >= dev_num)
2983 break;
2984 }
f20d09d5 2985 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2986
2987 dl->dev_num = n;
2988 size = sizeof(*dl) + n * sizeof(*dr);
2989
2990 err = copy_to_user(arg, dl, size);
2991 kfree(dl);
2992
2993 return err ? -EFAULT : 0;
2994}
2995
2996int hci_get_dev_info(void __user *arg)
2997{
2998 struct hci_dev *hdev;
2999 struct hci_dev_info di;
2e84d8db 3000 unsigned long flags;
1da177e4
LT
3001 int err = 0;
3002
3003 if (copy_from_user(&di, arg, sizeof(di)))
3004 return -EFAULT;
3005
70f23020
AE
3006 hdev = hci_dev_get(di.dev_id);
3007 if (!hdev)
1da177e4
LT
3008 return -ENODEV;
3009
2e84d8db
MH
3010 /* When the auto-off is configured it means the transport
3011 * is running, but in that case still indicate that the
3012 * device is actually down.
3013 */
3014 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3015 flags = hdev->flags & ~BIT(HCI_UP);
3016 else
3017 flags = hdev->flags;
c542a06c 3018
1da177e4
LT
3019 strcpy(di.name, hdev->name);
3020 di.bdaddr = hdev->bdaddr;
60f2a3ed 3021 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 3022 di.flags = flags;
1da177e4 3023 di.pkt_type = hdev->pkt_type;
572c7f84
JH
3024 if (lmp_bredr_capable(hdev)) {
3025 di.acl_mtu = hdev->acl_mtu;
3026 di.acl_pkts = hdev->acl_pkts;
3027 di.sco_mtu = hdev->sco_mtu;
3028 di.sco_pkts = hdev->sco_pkts;
3029 } else {
3030 di.acl_mtu = hdev->le_mtu;
3031 di.acl_pkts = hdev->le_pkts;
3032 di.sco_mtu = 0;
3033 di.sco_pkts = 0;
3034 }
1da177e4
LT
3035 di.link_policy = hdev->link_policy;
3036 di.link_mode = hdev->link_mode;
3037
3038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
3039 memcpy(&di.features, &hdev->features, sizeof(di.features));
3040
3041 if (copy_to_user(arg, &di, sizeof(di)))
3042 err = -EFAULT;
3043
3044 hci_dev_put(hdev);
3045
3046 return err;
3047}
3048
3049/* ---- Interface to HCI drivers ---- */
3050
611b30f7
MH
3051static int hci_rfkill_set_block(void *data, bool blocked)
3052{
3053 struct hci_dev *hdev = data;
3054
3055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
3056
0736cfa8
MH
3057 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
3058 return -EBUSY;
3059
5e130367
JH
3060 if (blocked) {
3061 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
3062 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
3063 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 3064 hci_dev_do_close(hdev);
5e130367
JH
3065 } else {
3066 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 3067 }
611b30f7
MH
3068
3069 return 0;
3070}
3071
3072static const struct rfkill_ops hci_rfkill_ops = {
3073 .set_block = hci_rfkill_set_block,
3074};
3075
ab81cbf9
JH
3076static void hci_power_on(struct work_struct *work)
3077{
3078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3079 int err;
ab81cbf9
JH
3080
3081 BT_DBG("%s", hdev->name);
3082
cbed0ca1 3083 err = hci_dev_do_open(hdev);
96570ffc 3084 if (err < 0) {
3ad67582 3085 hci_dev_lock(hdev);
96570ffc 3086 mgmt_set_powered_failed(hdev, err);
3ad67582 3087 hci_dev_unlock(hdev);
ab81cbf9 3088 return;
96570ffc 3089 }
ab81cbf9 3090
a5c8f270
MH
3091 /* During the HCI setup phase, a few error conditions are
3092 * ignored and they need to be checked now. If they are still
3093 * valid, it is important to turn the device back off.
3094 */
3095 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3096 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3097 (hdev->dev_type == HCI_BREDR &&
3098 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3099 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3100 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3101 hci_dev_do_close(hdev);
3102 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3103 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3104 HCI_AUTO_OFF_TIMEOUT);
bf543036 3105 }
ab81cbf9 3106
fee746b0 3107 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3108 /* For unconfigured devices, set the HCI_RAW flag
3109 * so that userspace can easily identify them.
4a964404
MH
3110 */
3111 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3112 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3113
3114 /* For fully configured devices, this will send
3115 * the Index Added event. For unconfigured devices,
3116 * it will send Unconfigued Index Added event.
3117 *
3118 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3119 * and no event will be send.
3120 */
3121 mgmt_index_added(hdev);
d603b76b 3122 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3123 /* When the controller is now configured, then it
3124 * is important to clear the HCI_RAW flag.
3125 */
3126 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3127 clear_bit(HCI_RAW, &hdev->flags);
3128
d603b76b
MH
3129 /* Powering on the controller with HCI_CONFIG set only
3130 * happens with the transition from unconfigured to
3131 * configured. This will send the Index Added event.
3132 */
744cf19e 3133 mgmt_index_added(hdev);
fee746b0 3134 }
ab81cbf9
JH
3135}
3136
3137static void hci_power_off(struct work_struct *work)
3138{
3243553f 3139 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3140 power_off.work);
ab81cbf9
JH
3141
3142 BT_DBG("%s", hdev->name);
3143
8ee56540 3144 hci_dev_do_close(hdev);
ab81cbf9
JH
3145}
3146
16ab91ab
JH
3147static void hci_discov_off(struct work_struct *work)
3148{
3149 struct hci_dev *hdev;
16ab91ab
JH
3150
3151 hdev = container_of(work, struct hci_dev, discov_off.work);
3152
3153 BT_DBG("%s", hdev->name);
3154
d1967ff8 3155 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3156}
3157
35f7498a 3158void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3159{
4821002c 3160 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3161
4821002c
JH
3162 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3163 list_del(&uuid->list);
2aeb9a1a
JH
3164 kfree(uuid);
3165 }
2aeb9a1a
JH
3166}
3167
35f7498a 3168void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 3169{
0378b597 3170 struct link_key *key;
55ed8ca1 3171
0378b597
JH
3172 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3173 list_del_rcu(&key->list);
3174 kfree_rcu(key, rcu);
55ed8ca1 3175 }
55ed8ca1
JH
3176}
3177
35f7498a 3178void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3179{
970d0f1b 3180 struct smp_ltk *k;
b899efaf 3181
970d0f1b
JH
3182 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3183 list_del_rcu(&k->list);
3184 kfree_rcu(k, rcu);
b899efaf 3185 }
b899efaf
VCG
3186}
3187
970c4e46
JH
3188void hci_smp_irks_clear(struct hci_dev *hdev)
3189{
adae20cb 3190 struct smp_irk *k;
970c4e46 3191
adae20cb
JH
3192 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3193 list_del_rcu(&k->list);
3194 kfree_rcu(k, rcu);
970c4e46
JH
3195 }
3196}
3197
55ed8ca1
JH
3198struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3199{
8035ded4 3200 struct link_key *k;
55ed8ca1 3201
0378b597
JH
3202 rcu_read_lock();
3203 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3204 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3205 rcu_read_unlock();
55ed8ca1 3206 return k;
0378b597
JH
3207 }
3208 }
3209 rcu_read_unlock();
55ed8ca1
JH
3210
3211 return NULL;
3212}
3213
745c0ce3 3214static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3215 u8 key_type, u8 old_key_type)
d25e28ab
JH
3216{
3217 /* Legacy key */
3218 if (key_type < 0x03)
745c0ce3 3219 return true;
d25e28ab
JH
3220
3221 /* Debug keys are insecure so don't store them persistently */
3222 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3223 return false;
d25e28ab
JH
3224
3225 /* Changed combination key and there's no previous one */
3226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3227 return false;
d25e28ab
JH
3228
3229 /* Security mode 3 case */
3230 if (!conn)
745c0ce3 3231 return true;
d25e28ab 3232
e3befab9
JH
3233 /* BR/EDR key derived using SC from an LE link */
3234 if (conn->type == LE_LINK)
3235 return true;
3236
d25e28ab
JH
3237 /* Neither local nor remote side had no-bonding as requirement */
3238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3239 return true;
d25e28ab
JH
3240
3241 /* Local side had dedicated bonding as requirement */
3242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3243 return true;
d25e28ab
JH
3244
3245 /* Remote side had dedicated bonding as requirement */
3246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3247 return true;
d25e28ab
JH
3248
3249 /* If none of the above criteria match, then don't store the key
3250 * persistently */
745c0ce3 3251 return false;
d25e28ab
JH
3252}
3253
e804d25d 3254static u8 ltk_role(u8 type)
98a0b845 3255{
e804d25d
JH
3256 if (type == SMP_LTK)
3257 return HCI_ROLE_MASTER;
98a0b845 3258
e804d25d 3259 return HCI_ROLE_SLAVE;
98a0b845
JH
3260}
3261
f3a73d97
JH
3262struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3263 u8 addr_type, u8 role)
75d262c2 3264{
c9839a11 3265 struct smp_ltk *k;
75d262c2 3266
970d0f1b
JH
3267 rcu_read_lock();
3268 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
3269 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3270 continue;
3271
923e2414 3272 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 3273 rcu_read_unlock();
75d262c2 3274 return k;
970d0f1b
JH
3275 }
3276 }
3277 rcu_read_unlock();
75d262c2
VCG
3278
3279 return NULL;
3280}
75d262c2 3281
970c4e46
JH
3282struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3283{
3284 struct smp_irk *irk;
3285
adae20cb
JH
3286 rcu_read_lock();
3287 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3288 if (!bacmp(&irk->rpa, rpa)) {
3289 rcu_read_unlock();
970c4e46 3290 return irk;
adae20cb 3291 }
970c4e46
JH
3292 }
3293
adae20cb 3294 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3295 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 3296 bacpy(&irk->rpa, rpa);
adae20cb 3297 rcu_read_unlock();
970c4e46
JH
3298 return irk;
3299 }
3300 }
adae20cb 3301 rcu_read_unlock();
970c4e46
JH
3302
3303 return NULL;
3304}
3305
3306struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3307 u8 addr_type)
3308{
3309 struct smp_irk *irk;
3310
6cfc9988
JH
3311 /* Identity Address must be public or static random */
3312 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3313 return NULL;
3314
adae20cb
JH
3315 rcu_read_lock();
3316 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 3317 if (addr_type == irk->addr_type &&
adae20cb
JH
3318 bacmp(bdaddr, &irk->bdaddr) == 0) {
3319 rcu_read_unlock();
970c4e46 3320 return irk;
adae20cb 3321 }
970c4e46 3322 }
adae20cb 3323 rcu_read_unlock();
970c4e46
JH
3324
3325 return NULL;
3326}
3327
567fa2aa 3328struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3329 bdaddr_t *bdaddr, u8 *val, u8 type,
3330 u8 pin_len, bool *persistent)
55ed8ca1
JH
3331{
3332 struct link_key *key, *old_key;
745c0ce3 3333 u8 old_key_type;
55ed8ca1
JH
3334
3335 old_key = hci_find_link_key(hdev, bdaddr);
3336 if (old_key) {
3337 old_key_type = old_key->type;
3338 key = old_key;
3339 } else {
12adcf3a 3340 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3341 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3342 if (!key)
567fa2aa 3343 return NULL;
0378b597 3344 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
3345 }
3346
6ed93dc6 3347 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3348
d25e28ab
JH
3349 /* Some buggy controller combinations generate a changed
3350 * combination key for legacy pairing even when there's no
3351 * previous key */
3352 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3353 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3354 type = HCI_LK_COMBINATION;
655fe6ec
JH
3355 if (conn)
3356 conn->key_type = type;
3357 }
d25e28ab 3358
55ed8ca1 3359 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3360 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3361 key->pin_len = pin_len;
3362
b6020ba0 3363 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3364 key->type = old_key_type;
4748fed2
JH
3365 else
3366 key->type = type;
3367
7652ff6a
JH
3368 if (persistent)
3369 *persistent = hci_persistent_key(hdev, conn, type,
3370 old_key_type);
4df378a1 3371
567fa2aa 3372 return key;
55ed8ca1
JH
3373}
3374
ca9142b8 3375struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3376 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3377 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3378{
c9839a11 3379 struct smp_ltk *key, *old_key;
e804d25d 3380 u8 role = ltk_role(type);
75d262c2 3381
f3a73d97 3382 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 3383 if (old_key)
75d262c2 3384 key = old_key;
c9839a11 3385 else {
0a14ab41 3386 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3387 if (!key)
ca9142b8 3388 return NULL;
970d0f1b 3389 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3390 }
3391
75d262c2 3392 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3393 key->bdaddr_type = addr_type;
3394 memcpy(key->val, tk, sizeof(key->val));
3395 key->authenticated = authenticated;
3396 key->ediv = ediv;
fe39c7b2 3397 key->rand = rand;
c9839a11
VCG
3398 key->enc_size = enc_size;
3399 key->type = type;
75d262c2 3400
ca9142b8 3401 return key;
75d262c2
VCG
3402}
3403
ca9142b8
JH
3404struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3405 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3406{
3407 struct smp_irk *irk;
3408
3409 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3410 if (!irk) {
3411 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3412 if (!irk)
ca9142b8 3413 return NULL;
970c4e46
JH
3414
3415 bacpy(&irk->bdaddr, bdaddr);
3416 irk->addr_type = addr_type;
3417
adae20cb 3418 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
3419 }
3420
3421 memcpy(irk->val, val, 16);
3422 bacpy(&irk->rpa, rpa);
3423
ca9142b8 3424 return irk;
970c4e46
JH
3425}
3426
55ed8ca1
JH
3427int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3428{
3429 struct link_key *key;
3430
3431 key = hci_find_link_key(hdev, bdaddr);
3432 if (!key)
3433 return -ENOENT;
3434
6ed93dc6 3435 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 3436
0378b597
JH
3437 list_del_rcu(&key->list);
3438 kfree_rcu(key, rcu);
55ed8ca1
JH
3439
3440 return 0;
3441}
3442
e0b2b27e 3443int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3444{
970d0f1b 3445 struct smp_ltk *k;
c51ffa0b 3446 int removed = 0;
b899efaf 3447
970d0f1b 3448 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3449 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3450 continue;
3451
6ed93dc6 3452 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3453
970d0f1b
JH
3454 list_del_rcu(&k->list);
3455 kfree_rcu(k, rcu);
c51ffa0b 3456 removed++;
b899efaf
VCG
3457 }
3458
c51ffa0b 3459 return removed ? 0 : -ENOENT;
b899efaf
VCG
3460}
3461
a7ec7338
JH
3462void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3463{
adae20cb 3464 struct smp_irk *k;
a7ec7338 3465
adae20cb 3466 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3467 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3468 continue;
3469
3470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3471
adae20cb
JH
3472 list_del_rcu(&k->list);
3473 kfree_rcu(k, rcu);
a7ec7338
JH
3474 }
3475}
3476
6bd32326 3477/* HCI command timer function */
65cc2b49 3478static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3479{
65cc2b49
MH
3480 struct hci_dev *hdev = container_of(work, struct hci_dev,
3481 cmd_timer.work);
6bd32326 3482
bda4f23a
AE
3483 if (hdev->sent_cmd) {
3484 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3485 u16 opcode = __le16_to_cpu(sent->opcode);
3486
3487 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3488 } else {
3489 BT_ERR("%s command tx timeout", hdev->name);
3490 }
3491
6bd32326 3492 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3493 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3494}
3495
2763eda6 3496struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 3497 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
3498{
3499 struct oob_data *data;
3500
6928a924
JH
3501 list_for_each_entry(data, &hdev->remote_oob_data, list) {
3502 if (bacmp(bdaddr, &data->bdaddr) != 0)
3503 continue;
3504 if (data->bdaddr_type != bdaddr_type)
3505 continue;
3506 return data;
3507 }
2763eda6
SJ
3508
3509 return NULL;
3510}
3511
6928a924
JH
3512int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3513 u8 bdaddr_type)
2763eda6
SJ
3514{
3515 struct oob_data *data;
3516
6928a924 3517 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
3518 if (!data)
3519 return -ENOENT;
3520
6928a924 3521 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
3522
3523 list_del(&data->list);
3524 kfree(data);
3525
3526 return 0;
3527}
3528
35f7498a 3529void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3530{
3531 struct oob_data *data, *n;
3532
3533 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3534 list_del(&data->list);
3535 kfree(data);
3536 }
2763eda6
SJ
3537}
3538
0798872e 3539int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 3540 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 3541 u8 *hash256, u8 *rand256)
2763eda6
SJ
3542{
3543 struct oob_data *data;
3544
6928a924 3545 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 3546 if (!data) {
0a14ab41 3547 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3548 if (!data)
3549 return -ENOMEM;
3550
3551 bacpy(&data->bdaddr, bdaddr);
6928a924 3552 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
3553 list_add(&data->list, &hdev->remote_oob_data);
3554 }
3555
81328d5c
JH
3556 if (hash192 && rand192) {
3557 memcpy(data->hash192, hash192, sizeof(data->hash192));
3558 memcpy(data->rand192, rand192, sizeof(data->rand192));
3559 } else {
3560 memset(data->hash192, 0, sizeof(data->hash192));
3561 memset(data->rand192, 0, sizeof(data->rand192));
0798872e
MH
3562 }
3563
81328d5c
JH
3564 if (hash256 && rand256) {
3565 memcpy(data->hash256, hash256, sizeof(data->hash256));
3566 memcpy(data->rand256, rand256, sizeof(data->rand256));
3567 } else {
3568 memset(data->hash256, 0, sizeof(data->hash256));
3569 memset(data->rand256, 0, sizeof(data->rand256));
3570 }
0798872e 3571
6ed93dc6 3572 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3573
3574 return 0;
3575}
3576
dcc36c16 3577struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3578 bdaddr_t *bdaddr, u8 type)
b2a66aad 3579{
8035ded4 3580 struct bdaddr_list *b;
b2a66aad 3581
dcc36c16 3582 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3583 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3584 return b;
b9ee0a78 3585 }
b2a66aad
AJ
3586
3587 return NULL;
3588}
3589
dcc36c16 3590void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3591{
3592 struct list_head *p, *n;
3593
dcc36c16 3594 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3595 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3596
3597 list_del(p);
3598 kfree(b);
3599 }
b2a66aad
AJ
3600}
3601
dcc36c16 3602int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3603{
3604 struct bdaddr_list *entry;
b2a66aad 3605
b9ee0a78 3606 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3607 return -EBADF;
3608
dcc36c16 3609 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3610 return -EEXIST;
b2a66aad 3611
27f70f3e 3612 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3613 if (!entry)
3614 return -ENOMEM;
b2a66aad
AJ
3615
3616 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3617 entry->bdaddr_type = type;
b2a66aad 3618
dcc36c16 3619 list_add(&entry->list, list);
b2a66aad 3620
2a8357f2 3621 return 0;
b2a66aad
AJ
3622}
3623
dcc36c16 3624int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3625{
3626 struct bdaddr_list *entry;
b2a66aad 3627
35f7498a 3628 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3629 hci_bdaddr_list_clear(list);
35f7498a
JH
3630 return 0;
3631 }
b2a66aad 3632
dcc36c16 3633 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3634 if (!entry)
3635 return -ENOENT;
3636
3637 list_del(&entry->list);
3638 kfree(entry);
3639
3640 return 0;
3641}
3642
15819a70
AG
3643/* This function requires the caller holds hdev->lock */
3644struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3645 bdaddr_t *addr, u8 addr_type)
3646{
3647 struct hci_conn_params *params;
3648
738f6185
JH
3649 /* The conn params list only contains identity addresses */
3650 if (!hci_is_identity_address(addr, addr_type))
3651 return NULL;
3652
15819a70
AG
3653 list_for_each_entry(params, &hdev->le_conn_params, list) {
3654 if (bacmp(&params->addr, addr) == 0 &&
3655 params->addr_type == addr_type) {
3656 return params;
3657 }
3658 }
3659
3660 return NULL;
3661}
3662
4b10966f 3663/* This function requires the caller holds hdev->lock */
501f8827
JH
3664struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3665 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3666{
912b42ef 3667 struct hci_conn_params *param;
a9b0a04c 3668
738f6185
JH
3669 /* The list only contains identity addresses */
3670 if (!hci_is_identity_address(addr, addr_type))
3671 return NULL;
a9b0a04c 3672
501f8827 3673 list_for_each_entry(param, list, action) {
912b42ef
JH
3674 if (bacmp(&param->addr, addr) == 0 &&
3675 param->addr_type == addr_type)
3676 return param;
4b10966f
MH
3677 }
3678
3679 return NULL;
a9b0a04c
AG
3680}
3681
15819a70 3682/* This function requires the caller holds hdev->lock */
51d167c0
MH
3683struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3684 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3685{
3686 struct hci_conn_params *params;
3687
c46245b3 3688 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3689 return NULL;
a9b0a04c 3690
15819a70 3691 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3692 if (params)
51d167c0 3693 return params;
15819a70
AG
3694
3695 params = kzalloc(sizeof(*params), GFP_KERNEL);
3696 if (!params) {
3697 BT_ERR("Out of memory");
51d167c0 3698 return NULL;
15819a70
AG
3699 }
3700
3701 bacpy(&params->addr, addr);
3702 params->addr_type = addr_type;
cef952ce
AG
3703
3704 list_add(&params->list, &hdev->le_conn_params);
93450c75 3705 INIT_LIST_HEAD(&params->action);
cef952ce 3706
bf5b3c8b
MH
3707 params->conn_min_interval = hdev->le_conn_min_interval;
3708 params->conn_max_interval = hdev->le_conn_max_interval;
3709 params->conn_latency = hdev->le_conn_latency;
3710 params->supervision_timeout = hdev->le_supv_timeout;
3711 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3712
3713 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3714
51d167c0 3715 return params;
bf5b3c8b
MH
3716}
3717
f6c63249 3718static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3719{
f8aaf9b6 3720 if (params->conn) {
f161dd41 3721 hci_conn_drop(params->conn);
f8aaf9b6
JH
3722 hci_conn_put(params->conn);
3723 }
f161dd41 3724
95305baa 3725 list_del(&params->action);
15819a70
AG
3726 list_del(&params->list);
3727 kfree(params);
f6c63249
JH
3728}
3729
3730/* This function requires the caller holds hdev->lock */
3731void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3732{
3733 struct hci_conn_params *params;
3734
3735 params = hci_conn_params_lookup(hdev, addr, addr_type);
3736 if (!params)
3737 return;
3738
3739 hci_conn_params_free(params);
15819a70 3740
95305baa
JH
3741 hci_update_background_scan(hdev);
3742
15819a70
AG
3743 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3744}
3745
3746/* This function requires the caller holds hdev->lock */
55af49a8 3747void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3748{
3749 struct hci_conn_params *params, *tmp;
3750
3751 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3752 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3753 continue;
15819a70
AG
3754 list_del(&params->list);
3755 kfree(params);
3756 }
3757
55af49a8 3758 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3759}
3760
3761/* This function requires the caller holds hdev->lock */
373110c5 3762void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3763{
15819a70 3764 struct hci_conn_params *params, *tmp;
77a77a30 3765
f6c63249
JH
3766 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3767 hci_conn_params_free(params);
77a77a30 3768
a4790dbd 3769 hci_update_background_scan(hdev);
77a77a30 3770
15819a70 3771 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3772}
3773
4c87eaab 3774static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3775{
4c87eaab
AG
3776 if (status) {
3777 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3778
4c87eaab
AG
3779 hci_dev_lock(hdev);
3780 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3781 hci_dev_unlock(hdev);
3782 return;
3783 }
7ba8b4be
AG
3784}
3785
4c87eaab 3786static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3787{
4c87eaab
AG
3788 /* General inquiry access code (GIAC) */
3789 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3790 struct hci_request req;
3791 struct hci_cp_inquiry cp;
7ba8b4be
AG
3792 int err;
3793
4c87eaab
AG
3794 if (status) {
3795 BT_ERR("Failed to disable LE scanning: status %d", status);
3796 return;
3797 }
7ba8b4be 3798
4c87eaab
AG
3799 switch (hdev->discovery.type) {
3800 case DISCOV_TYPE_LE:
3801 hci_dev_lock(hdev);
3802 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3803 hci_dev_unlock(hdev);
3804 break;
7ba8b4be 3805
4c87eaab
AG
3806 case DISCOV_TYPE_INTERLEAVED:
3807 hci_req_init(&req, hdev);
7ba8b4be 3808
4c87eaab
AG
3809 memset(&cp, 0, sizeof(cp));
3810 memcpy(&cp.lap, lap, sizeof(cp.lap));
3811 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3812 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3813
4c87eaab 3814 hci_dev_lock(hdev);
7dbfac1d 3815
4c87eaab 3816 hci_inquiry_cache_flush(hdev);
7dbfac1d 3817
4c87eaab
AG
3818 err = hci_req_run(&req, inquiry_complete);
3819 if (err) {
3820 BT_ERR("Inquiry request failed: err %d", err);
3821 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3822 }
7dbfac1d 3823
4c87eaab
AG
3824 hci_dev_unlock(hdev);
3825 break;
7dbfac1d 3826 }
7dbfac1d
AG
3827}
3828
7ba8b4be
AG
3829static void le_scan_disable_work(struct work_struct *work)
3830{
3831 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3832 le_scan_disable.work);
4c87eaab
AG
3833 struct hci_request req;
3834 int err;
7ba8b4be
AG
3835
3836 BT_DBG("%s", hdev->name);
3837
4c87eaab 3838 hci_req_init(&req, hdev);
28b75a89 3839
b1efcc28 3840 hci_req_add_le_scan_disable(&req);
28b75a89 3841
4c87eaab
AG
3842 err = hci_req_run(&req, le_scan_disable_work_complete);
3843 if (err)
3844 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3845}
3846
a1f4c318
JH
3847/* Copy the Identity Address of the controller.
3848 *
3849 * If the controller has a public BD_ADDR, then by default use that one.
3850 * If this is a LE only controller without a public address, default to
3851 * the static random address.
3852 *
3853 * For debugging purposes it is possible to force controllers with a
3854 * public address to use the static random address instead.
3855 */
3856void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3857 u8 *bdaddr_type)
3858{
111902f7 3859 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3860 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3861 bacpy(bdaddr, &hdev->static_addr);
3862 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3863 } else {
3864 bacpy(bdaddr, &hdev->bdaddr);
3865 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3866 }
3867}
3868
9be0dab7
DH
3869/* Alloc HCI device */
3870struct hci_dev *hci_alloc_dev(void)
3871{
3872 struct hci_dev *hdev;
3873
27f70f3e 3874 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3875 if (!hdev)
3876 return NULL;
3877
b1b813d4
DH
3878 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3879 hdev->esco_type = (ESCO_HV1);
3880 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3881 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3882 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3883 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3884 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3885 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3886
b1b813d4
DH
3887 hdev->sniff_max_interval = 800;
3888 hdev->sniff_min_interval = 80;
3889
3f959d46 3890 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3891 hdev->le_adv_min_interval = 0x0800;
3892 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3893 hdev->le_scan_interval = 0x0060;
3894 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3895 hdev->le_conn_min_interval = 0x0028;
3896 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3897 hdev->le_conn_latency = 0x0000;
3898 hdev->le_supv_timeout = 0x002a;
bef64738 3899
d6bfd59c 3900 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3901 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3902 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3903 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3904
b1b813d4
DH
3905 mutex_init(&hdev->lock);
3906 mutex_init(&hdev->req_lock);
3907
3908 INIT_LIST_HEAD(&hdev->mgmt_pending);
3909 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3910 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3911 INIT_LIST_HEAD(&hdev->uuids);
3912 INIT_LIST_HEAD(&hdev->link_keys);
3913 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3914 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3915 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3916 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3917 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3918 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3919 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3920 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3921
3922 INIT_WORK(&hdev->rx_work, hci_rx_work);
3923 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3924 INIT_WORK(&hdev->tx_work, hci_tx_work);
3925 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3926
b1b813d4
DH
3927 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3928 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3929 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3930
b1b813d4
DH
3931 skb_queue_head_init(&hdev->rx_q);
3932 skb_queue_head_init(&hdev->cmd_q);
3933 skb_queue_head_init(&hdev->raw_q);
3934
3935 init_waitqueue_head(&hdev->req_wait_q);
3936
65cc2b49 3937 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3938
b1b813d4
DH
3939 hci_init_sysfs(hdev);
3940 discovery_init(hdev);
9be0dab7
DH
3941
3942 return hdev;
3943}
3944EXPORT_SYMBOL(hci_alloc_dev);
3945
3946/* Free HCI device */
3947void hci_free_dev(struct hci_dev *hdev)
3948{
9be0dab7
DH
3949 /* will free via device release */
3950 put_device(&hdev->dev);
3951}
3952EXPORT_SYMBOL(hci_free_dev);
3953
1da177e4
LT
3954/* Register HCI device */
3955int hci_register_dev(struct hci_dev *hdev)
3956{
b1b813d4 3957 int id, error;
1da177e4 3958
74292d5a 3959 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3960 return -EINVAL;
3961
08add513
MM
3962 /* Do not allow HCI_AMP devices to register at index 0,
3963 * so the index can be used as the AMP controller ID.
3964 */
3df92b31
SL
3965 switch (hdev->dev_type) {
3966 case HCI_BREDR:
3967 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3968 break;
3969 case HCI_AMP:
3970 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3971 break;
3972 default:
3973 return -EINVAL;
1da177e4 3974 }
8e87d142 3975
3df92b31
SL
3976 if (id < 0)
3977 return id;
3978
1da177e4
LT
3979 sprintf(hdev->name, "hci%d", id);
3980 hdev->id = id;
2d8b3a11
AE
3981
3982 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3983
d8537548
KC
3984 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3985 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3986 if (!hdev->workqueue) {
3987 error = -ENOMEM;
3988 goto err;
3989 }
f48fd9c8 3990
d8537548
KC
3991 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3992 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3993 if (!hdev->req_workqueue) {
3994 destroy_workqueue(hdev->workqueue);
3995 error = -ENOMEM;
3996 goto err;
3997 }
3998
0153e2ec
MH
3999 if (!IS_ERR_OR_NULL(bt_debugfs))
4000 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4001
bdc3e0f1
MH
4002 dev_set_name(&hdev->dev, "%s", hdev->name);
4003
4004 error = device_add(&hdev->dev);
33ca954d 4005 if (error < 0)
54506918 4006 goto err_wqueue;
1da177e4 4007
611b30f7 4008 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4009 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4010 hdev);
611b30f7
MH
4011 if (hdev->rfkill) {
4012 if (rfkill_register(hdev->rfkill) < 0) {
4013 rfkill_destroy(hdev->rfkill);
4014 hdev->rfkill = NULL;
4015 }
4016 }
4017
5e130367
JH
4018 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4019 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4020
a8b2d5c2 4021 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4022 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4023
01cd3404 4024 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4025 /* Assume BR/EDR support until proven otherwise (such as
4026 * through reading supported features during init.
4027 */
4028 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4029 }
ce2be9ac 4030
fcee3377
GP
4031 write_lock(&hci_dev_list_lock);
4032 list_add(&hdev->list, &hci_dev_list);
4033 write_unlock(&hci_dev_list_lock);
4034
4a964404
MH
4035 /* Devices that are marked for raw-only usage are unconfigured
4036 * and should not be included in normal operation.
fee746b0
MH
4037 */
4038 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4039 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4040
1da177e4 4041 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4042 hci_dev_hold(hdev);
1da177e4 4043
19202573 4044 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4045
1da177e4 4046 return id;
f48fd9c8 4047
33ca954d
DH
4048err_wqueue:
4049 destroy_workqueue(hdev->workqueue);
6ead1bbc 4050 destroy_workqueue(hdev->req_workqueue);
33ca954d 4051err:
3df92b31 4052 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4053
33ca954d 4054 return error;
1da177e4
LT
4055}
4056EXPORT_SYMBOL(hci_register_dev);
4057
4058/* Unregister HCI device */
59735631 4059void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4060{
3df92b31 4061 int i, id;
ef222013 4062
c13854ce 4063 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4064
94324962
JH
4065 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4066
3df92b31
SL
4067 id = hdev->id;
4068
f20d09d5 4069 write_lock(&hci_dev_list_lock);
1da177e4 4070 list_del(&hdev->list);
f20d09d5 4071 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4072
4073 hci_dev_do_close(hdev);
4074
cd4c5391 4075 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4076 kfree_skb(hdev->reassembly[i]);
4077
b9b5ef18
GP
4078 cancel_work_sync(&hdev->power_on);
4079
ab81cbf9 4080 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4081 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4082 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4083 hci_dev_lock(hdev);
744cf19e 4084 mgmt_index_removed(hdev);
09fd0de5 4085 hci_dev_unlock(hdev);
56e5cb86 4086 }
ab81cbf9 4087
2e58ef3e
JH
4088 /* mgmt_index_removed should take care of emptying the
4089 * pending list */
4090 BUG_ON(!list_empty(&hdev->mgmt_pending));
4091
1da177e4
LT
4092 hci_notify(hdev, HCI_DEV_UNREG);
4093
611b30f7
MH
4094 if (hdev->rfkill) {
4095 rfkill_unregister(hdev->rfkill);
4096 rfkill_destroy(hdev->rfkill);
4097 }
4098
711eafe3 4099 smp_unregister(hdev);
99780a7b 4100
bdc3e0f1 4101 device_del(&hdev->dev);
147e2d59 4102
0153e2ec
MH
4103 debugfs_remove_recursive(hdev->debugfs);
4104
f48fd9c8 4105 destroy_workqueue(hdev->workqueue);
6ead1bbc 4106 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4107
09fd0de5 4108 hci_dev_lock(hdev);
dcc36c16 4109 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4110 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4111 hci_uuids_clear(hdev);
55ed8ca1 4112 hci_link_keys_clear(hdev);
b899efaf 4113 hci_smp_ltks_clear(hdev);
970c4e46 4114 hci_smp_irks_clear(hdev);
2763eda6 4115 hci_remote_oob_data_clear(hdev);
dcc36c16 4116 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4117 hci_conn_params_clear_all(hdev);
22078800 4118 hci_discovery_filter_clear(hdev);
09fd0de5 4119 hci_dev_unlock(hdev);
e2e0cacb 4120
dc946bd8 4121 hci_dev_put(hdev);
3df92b31
SL
4122
4123 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4124}
4125EXPORT_SYMBOL(hci_unregister_dev);
4126
4127/* Suspend HCI device */
4128int hci_suspend_dev(struct hci_dev *hdev)
4129{
4130 hci_notify(hdev, HCI_DEV_SUSPEND);
4131 return 0;
4132}
4133EXPORT_SYMBOL(hci_suspend_dev);
4134
4135/* Resume HCI device */
4136int hci_resume_dev(struct hci_dev *hdev)
4137{
4138 hci_notify(hdev, HCI_DEV_RESUME);
4139 return 0;
4140}
4141EXPORT_SYMBOL(hci_resume_dev);
4142
75e0569f
MH
4143/* Reset HCI device */
4144int hci_reset_dev(struct hci_dev *hdev)
4145{
4146 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4147 struct sk_buff *skb;
4148
4149 skb = bt_skb_alloc(3, GFP_ATOMIC);
4150 if (!skb)
4151 return -ENOMEM;
4152
4153 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4154 memcpy(skb_put(skb, 3), hw_err, 3);
4155
4156 /* Send Hardware Error to upper stack */
4157 return hci_recv_frame(hdev, skb);
4158}
4159EXPORT_SYMBOL(hci_reset_dev);
4160
76bca880 4161/* Receive frame from HCI drivers */
e1a26170 4162int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4163{
76bca880 4164 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4165 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4166 kfree_skb(skb);
4167 return -ENXIO;
4168 }
4169
d82603c6 4170 /* Incoming skb */
76bca880
MH
4171 bt_cb(skb)->incoming = 1;
4172
4173 /* Time stamp */
4174 __net_timestamp(skb);
4175
76bca880 4176 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4177 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4178
76bca880
MH
4179 return 0;
4180}
4181EXPORT_SYMBOL(hci_recv_frame);
4182
33e882a5 4183static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4184 int count, __u8 index)
33e882a5
SS
4185{
4186 int len = 0;
4187 int hlen = 0;
4188 int remain = count;
4189 struct sk_buff *skb;
4190 struct bt_skb_cb *scb;
4191
4192 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4193 index >= NUM_REASSEMBLY)
33e882a5
SS
4194 return -EILSEQ;
4195
4196 skb = hdev->reassembly[index];
4197
4198 if (!skb) {
4199 switch (type) {
4200 case HCI_ACLDATA_PKT:
4201 len = HCI_MAX_FRAME_SIZE;
4202 hlen = HCI_ACL_HDR_SIZE;
4203 break;
4204 case HCI_EVENT_PKT:
4205 len = HCI_MAX_EVENT_SIZE;
4206 hlen = HCI_EVENT_HDR_SIZE;
4207 break;
4208 case HCI_SCODATA_PKT:
4209 len = HCI_MAX_SCO_SIZE;
4210 hlen = HCI_SCO_HDR_SIZE;
4211 break;
4212 }
4213
1e429f38 4214 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4215 if (!skb)
4216 return -ENOMEM;
4217
4218 scb = (void *) skb->cb;
4219 scb->expect = hlen;
4220 scb->pkt_type = type;
4221
33e882a5
SS
4222 hdev->reassembly[index] = skb;
4223 }
4224
4225 while (count) {
4226 scb = (void *) skb->cb;
89bb46d0 4227 len = min_t(uint, scb->expect, count);
33e882a5
SS
4228
4229 memcpy(skb_put(skb, len), data, len);
4230
4231 count -= len;
4232 data += len;
4233 scb->expect -= len;
4234 remain = count;
4235
4236 switch (type) {
4237 case HCI_EVENT_PKT:
4238 if (skb->len == HCI_EVENT_HDR_SIZE) {
4239 struct hci_event_hdr *h = hci_event_hdr(skb);
4240 scb->expect = h->plen;
4241
4242 if (skb_tailroom(skb) < scb->expect) {
4243 kfree_skb(skb);
4244 hdev->reassembly[index] = NULL;
4245 return -ENOMEM;
4246 }
4247 }
4248 break;
4249
4250 case HCI_ACLDATA_PKT:
4251 if (skb->len == HCI_ACL_HDR_SIZE) {
4252 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4253 scb->expect = __le16_to_cpu(h->dlen);
4254
4255 if (skb_tailroom(skb) < scb->expect) {
4256 kfree_skb(skb);
4257 hdev->reassembly[index] = NULL;
4258 return -ENOMEM;
4259 }
4260 }
4261 break;
4262
4263 case HCI_SCODATA_PKT:
4264 if (skb->len == HCI_SCO_HDR_SIZE) {
4265 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4266 scb->expect = h->dlen;
4267
4268 if (skb_tailroom(skb) < scb->expect) {
4269 kfree_skb(skb);
4270 hdev->reassembly[index] = NULL;
4271 return -ENOMEM;
4272 }
4273 }
4274 break;
4275 }
4276
4277 if (scb->expect == 0) {
4278 /* Complete frame */
4279
4280 bt_cb(skb)->pkt_type = type;
e1a26170 4281 hci_recv_frame(hdev, skb);
33e882a5
SS
4282
4283 hdev->reassembly[index] = NULL;
4284 return remain;
4285 }
4286 }
4287
4288 return remain;
4289}
4290
99811510
SS
4291#define STREAM_REASSEMBLY 0
4292
4293int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4294{
4295 int type;
4296 int rem = 0;
4297
da5f6c37 4298 while (count) {
99811510
SS
4299 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4300
4301 if (!skb) {
4302 struct { char type; } *pkt;
4303
4304 /* Start of the frame */
4305 pkt = data;
4306 type = pkt->type;
4307
4308 data++;
4309 count--;
4310 } else
4311 type = bt_cb(skb)->pkt_type;
4312
1e429f38 4313 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4314 STREAM_REASSEMBLY);
99811510
SS
4315 if (rem < 0)
4316 return rem;
4317
4318 data += (count - rem);
4319 count = rem;
f81c6224 4320 }
99811510
SS
4321
4322 return rem;
4323}
4324EXPORT_SYMBOL(hci_recv_stream_fragment);
4325
1da177e4
LT
4326/* ---- Interface to upper protocols ---- */
4327
1da177e4
LT
4328int hci_register_cb(struct hci_cb *cb)
4329{
4330 BT_DBG("%p name %s", cb, cb->name);
4331
f20d09d5 4332 write_lock(&hci_cb_list_lock);
1da177e4 4333 list_add(&cb->list, &hci_cb_list);
f20d09d5 4334 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4335
4336 return 0;
4337}
4338EXPORT_SYMBOL(hci_register_cb);
4339
4340int hci_unregister_cb(struct hci_cb *cb)
4341{
4342 BT_DBG("%p name %s", cb, cb->name);
4343
f20d09d5 4344 write_lock(&hci_cb_list_lock);
1da177e4 4345 list_del(&cb->list);
f20d09d5 4346 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4347
4348 return 0;
4349}
4350EXPORT_SYMBOL(hci_unregister_cb);
4351
51086991 4352static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4353{
cdc52faa
MH
4354 int err;
4355
0d48d939 4356 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4357
cd82e61c
MH
4358 /* Time stamp */
4359 __net_timestamp(skb);
1da177e4 4360
cd82e61c
MH
4361 /* Send copy to monitor */
4362 hci_send_to_monitor(hdev, skb);
4363
4364 if (atomic_read(&hdev->promisc)) {
4365 /* Send copy to the sockets */
470fe1b5 4366 hci_send_to_sock(hdev, skb);
1da177e4
LT
4367 }
4368
4369 /* Get rid of skb owner, prior to sending to the driver. */
4370 skb_orphan(skb);
4371
cdc52faa
MH
4372 err = hdev->send(hdev, skb);
4373 if (err < 0) {
4374 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4375 kfree_skb(skb);
4376 }
1da177e4
LT
4377}
4378
899de765
MH
4379bool hci_req_pending(struct hci_dev *hdev)
4380{
4381 return (hdev->req_status == HCI_REQ_PEND);
4382}
4383
1ca3a9d0 4384/* Send HCI command */
07dc93dd
JH
4385int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4386 const void *param)
1ca3a9d0
JH
4387{
4388 struct sk_buff *skb;
4389
4390 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4391
4392 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4393 if (!skb) {
4394 BT_ERR("%s no memory for command", hdev->name);
4395 return -ENOMEM;
4396 }
4397
49c922bb 4398 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4399 * single-command requests.
4400 */
4401 bt_cb(skb)->req.start = true;
4402
1da177e4 4403 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4404 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4405
4406 return 0;
4407}
1da177e4
LT
4408
4409/* Get data from the previously sent command */
a9de9248 4410void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4411{
4412 struct hci_command_hdr *hdr;
4413
4414 if (!hdev->sent_cmd)
4415 return NULL;
4416
4417 hdr = (void *) hdev->sent_cmd->data;
4418
a9de9248 4419 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4420 return NULL;
4421
f0e09510 4422 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4423
4424 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4425}
4426
4427/* Send ACL data */
4428static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4429{
4430 struct hci_acl_hdr *hdr;
4431 int len = skb->len;
4432
badff6d0
ACM
4433 skb_push(skb, HCI_ACL_HDR_SIZE);
4434 skb_reset_transport_header(skb);
9c70220b 4435 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4436 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4437 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4438}
4439
ee22be7e 4440static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4441 struct sk_buff *skb, __u16 flags)
1da177e4 4442{
ee22be7e 4443 struct hci_conn *conn = chan->conn;
1da177e4
LT
4444 struct hci_dev *hdev = conn->hdev;
4445 struct sk_buff *list;
4446
087bfd99
GP
4447 skb->len = skb_headlen(skb);
4448 skb->data_len = 0;
4449
4450 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4451
4452 switch (hdev->dev_type) {
4453 case HCI_BREDR:
4454 hci_add_acl_hdr(skb, conn->handle, flags);
4455 break;
4456 case HCI_AMP:
4457 hci_add_acl_hdr(skb, chan->handle, flags);
4458 break;
4459 default:
4460 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4461 return;
4462 }
087bfd99 4463
70f23020
AE
4464 list = skb_shinfo(skb)->frag_list;
4465 if (!list) {
1da177e4
LT
4466 /* Non fragmented */
4467 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4468
73d80deb 4469 skb_queue_tail(queue, skb);
1da177e4
LT
4470 } else {
4471 /* Fragmented */
4472 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4473
4474 skb_shinfo(skb)->frag_list = NULL;
4475
9cfd5a23
JR
4476 /* Queue all fragments atomically. We need to use spin_lock_bh
4477 * here because of 6LoWPAN links, as there this function is
4478 * called from softirq and using normal spin lock could cause
4479 * deadlocks.
4480 */
4481 spin_lock_bh(&queue->lock);
1da177e4 4482
73d80deb 4483 __skb_queue_tail(queue, skb);
e702112f
AE
4484
4485 flags &= ~ACL_START;
4486 flags |= ACL_CONT;
1da177e4
LT
4487 do {
4488 skb = list; list = list->next;
8e87d142 4489
0d48d939 4490 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4491 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4492
4493 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4494
73d80deb 4495 __skb_queue_tail(queue, skb);
1da177e4
LT
4496 } while (list);
4497
9cfd5a23 4498 spin_unlock_bh(&queue->lock);
1da177e4 4499 }
73d80deb
LAD
4500}
4501
4502void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4503{
ee22be7e 4504 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4505
f0e09510 4506 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4507
ee22be7e 4508 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4509
3eff45ea 4510 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4511}
1da177e4
LT
4512
4513/* Send SCO data */
0d861d8b 4514void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4515{
4516 struct hci_dev *hdev = conn->hdev;
4517 struct hci_sco_hdr hdr;
4518
4519 BT_DBG("%s len %d", hdev->name, skb->len);
4520
aca3192c 4521 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4522 hdr.dlen = skb->len;
4523
badff6d0
ACM
4524 skb_push(skb, HCI_SCO_HDR_SIZE);
4525 skb_reset_transport_header(skb);
9c70220b 4526 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4527
0d48d939 4528 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4529
1da177e4 4530 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4531 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4532}
1da177e4
LT
4533
4534/* ---- HCI TX task (outgoing data) ---- */
4535
4536/* HCI Connection scheduler */
6039aa73
GP
4537static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4538 int *quote)
1da177e4
LT
4539{
4540 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4541 struct hci_conn *conn = NULL, *c;
abc5de8f 4542 unsigned int num = 0, min = ~0;
1da177e4 4543
8e87d142 4544 /* We don't have to lock device here. Connections are always
1da177e4 4545 * added and removed with TX task disabled. */
bf4c6325
GP
4546
4547 rcu_read_lock();
4548
4549 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4550 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4551 continue;
769be974
MH
4552
4553 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4554 continue;
4555
1da177e4
LT
4556 num++;
4557
4558 if (c->sent < min) {
4559 min = c->sent;
4560 conn = c;
4561 }
52087a79
LAD
4562
4563 if (hci_conn_num(hdev, type) == num)
4564 break;
1da177e4
LT
4565 }
4566
bf4c6325
GP
4567 rcu_read_unlock();
4568
1da177e4 4569 if (conn) {
6ed58ec5
VT
4570 int cnt, q;
4571
4572 switch (conn->type) {
4573 case ACL_LINK:
4574 cnt = hdev->acl_cnt;
4575 break;
4576 case SCO_LINK:
4577 case ESCO_LINK:
4578 cnt = hdev->sco_cnt;
4579 break;
4580 case LE_LINK:
4581 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4582 break;
4583 default:
4584 cnt = 0;
4585 BT_ERR("Unknown link type");
4586 }
4587
4588 q = cnt / num;
1da177e4
LT
4589 *quote = q ? q : 1;
4590 } else
4591 *quote = 0;
4592
4593 BT_DBG("conn %p quote %d", conn, *quote);
4594 return conn;
4595}
4596
6039aa73 4597static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4598{
4599 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4600 struct hci_conn *c;
1da177e4 4601
bae1f5d9 4602 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4603
bf4c6325
GP
4604 rcu_read_lock();
4605
1da177e4 4606 /* Kill stalled connections */
bf4c6325 4607 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4608 if (c->type == type && c->sent) {
6ed93dc6
AE
4609 BT_ERR("%s killing stalled connection %pMR",
4610 hdev->name, &c->dst);
bed71748 4611 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4612 }
4613 }
bf4c6325
GP
4614
4615 rcu_read_unlock();
1da177e4
LT
4616}
4617
6039aa73
GP
4618static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4619 int *quote)
1da177e4 4620{
73d80deb
LAD
4621 struct hci_conn_hash *h = &hdev->conn_hash;
4622 struct hci_chan *chan = NULL;
abc5de8f 4623 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4624 struct hci_conn *conn;
73d80deb
LAD
4625 int cnt, q, conn_num = 0;
4626
4627 BT_DBG("%s", hdev->name);
4628
bf4c6325
GP
4629 rcu_read_lock();
4630
4631 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4632 struct hci_chan *tmp;
4633
4634 if (conn->type != type)
4635 continue;
4636
4637 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4638 continue;
4639
4640 conn_num++;
4641
8192edef 4642 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4643 struct sk_buff *skb;
4644
4645 if (skb_queue_empty(&tmp->data_q))
4646 continue;
4647
4648 skb = skb_peek(&tmp->data_q);
4649 if (skb->priority < cur_prio)
4650 continue;
4651
4652 if (skb->priority > cur_prio) {
4653 num = 0;
4654 min = ~0;
4655 cur_prio = skb->priority;
4656 }
4657
4658 num++;
4659
4660 if (conn->sent < min) {
4661 min = conn->sent;
4662 chan = tmp;
4663 }
4664 }
4665
4666 if (hci_conn_num(hdev, type) == conn_num)
4667 break;
4668 }
4669
bf4c6325
GP
4670 rcu_read_unlock();
4671
73d80deb
LAD
4672 if (!chan)
4673 return NULL;
4674
4675 switch (chan->conn->type) {
4676 case ACL_LINK:
4677 cnt = hdev->acl_cnt;
4678 break;
bd1eb66b
AE
4679 case AMP_LINK:
4680 cnt = hdev->block_cnt;
4681 break;
73d80deb
LAD
4682 case SCO_LINK:
4683 case ESCO_LINK:
4684 cnt = hdev->sco_cnt;
4685 break;
4686 case LE_LINK:
4687 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4688 break;
4689 default:
4690 cnt = 0;
4691 BT_ERR("Unknown link type");
4692 }
4693
4694 q = cnt / num;
4695 *quote = q ? q : 1;
4696 BT_DBG("chan %p quote %d", chan, *quote);
4697 return chan;
4698}
4699
02b20f0b
LAD
4700static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4701{
4702 struct hci_conn_hash *h = &hdev->conn_hash;
4703 struct hci_conn *conn;
4704 int num = 0;
4705
4706 BT_DBG("%s", hdev->name);
4707
bf4c6325
GP
4708 rcu_read_lock();
4709
4710 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4711 struct hci_chan *chan;
4712
4713 if (conn->type != type)
4714 continue;
4715
4716 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4717 continue;
4718
4719 num++;
4720
8192edef 4721 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4722 struct sk_buff *skb;
4723
4724 if (chan->sent) {
4725 chan->sent = 0;
4726 continue;
4727 }
4728
4729 if (skb_queue_empty(&chan->data_q))
4730 continue;
4731
4732 skb = skb_peek(&chan->data_q);
4733 if (skb->priority >= HCI_PRIO_MAX - 1)
4734 continue;
4735
4736 skb->priority = HCI_PRIO_MAX - 1;
4737
4738 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4739 skb->priority);
02b20f0b
LAD
4740 }
4741
4742 if (hci_conn_num(hdev, type) == num)
4743 break;
4744 }
bf4c6325
GP
4745
4746 rcu_read_unlock();
4747
02b20f0b
LAD
4748}
4749
b71d385a
AE
4750static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4751{
4752 /* Calculate count of blocks used by this packet */
4753 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4754}
4755
6039aa73 4756static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4757{
4a964404 4758 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4759 /* ACL tx timeout must be longer than maximum
4760 * link supervision timeout (40.9 seconds) */
63d2bc1b 4761 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4762 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4763 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4764 }
63d2bc1b 4765}
1da177e4 4766
6039aa73 4767static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4768{
4769 unsigned int cnt = hdev->acl_cnt;
4770 struct hci_chan *chan;
4771 struct sk_buff *skb;
4772 int quote;
4773
4774 __check_timeout(hdev, cnt);
04837f64 4775
73d80deb 4776 while (hdev->acl_cnt &&
a8c5fb1a 4777 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4778 u32 priority = (skb_peek(&chan->data_q))->priority;
4779 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4780 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4781 skb->len, skb->priority);
73d80deb 4782
ec1cce24
LAD
4783 /* Stop if priority has changed */
4784 if (skb->priority < priority)
4785 break;
4786
4787 skb = skb_dequeue(&chan->data_q);
4788
73d80deb 4789 hci_conn_enter_active_mode(chan->conn,
04124681 4790 bt_cb(skb)->force_active);
04837f64 4791
57d17d70 4792 hci_send_frame(hdev, skb);
1da177e4
LT
4793 hdev->acl_last_tx = jiffies;
4794
4795 hdev->acl_cnt--;
73d80deb
LAD
4796 chan->sent++;
4797 chan->conn->sent++;
1da177e4
LT
4798 }
4799 }
02b20f0b
LAD
4800
4801 if (cnt != hdev->acl_cnt)
4802 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4803}
4804
6039aa73 4805static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4806{
63d2bc1b 4807 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4808 struct hci_chan *chan;
4809 struct sk_buff *skb;
4810 int quote;
bd1eb66b 4811 u8 type;
b71d385a 4812
63d2bc1b 4813 __check_timeout(hdev, cnt);
b71d385a 4814
bd1eb66b
AE
4815 BT_DBG("%s", hdev->name);
4816
4817 if (hdev->dev_type == HCI_AMP)
4818 type = AMP_LINK;
4819 else
4820 type = ACL_LINK;
4821
b71d385a 4822 while (hdev->block_cnt > 0 &&
bd1eb66b 4823 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4824 u32 priority = (skb_peek(&chan->data_q))->priority;
4825 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4826 int blocks;
4827
4828 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4829 skb->len, skb->priority);
b71d385a
AE
4830
4831 /* Stop if priority has changed */
4832 if (skb->priority < priority)
4833 break;
4834
4835 skb = skb_dequeue(&chan->data_q);
4836
4837 blocks = __get_blocks(hdev, skb);
4838 if (blocks > hdev->block_cnt)
4839 return;
4840
4841 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4842 bt_cb(skb)->force_active);
b71d385a 4843
57d17d70 4844 hci_send_frame(hdev, skb);
b71d385a
AE
4845 hdev->acl_last_tx = jiffies;
4846
4847 hdev->block_cnt -= blocks;
4848 quote -= blocks;
4849
4850 chan->sent += blocks;
4851 chan->conn->sent += blocks;
4852 }
4853 }
4854
4855 if (cnt != hdev->block_cnt)
bd1eb66b 4856 hci_prio_recalculate(hdev, type);
b71d385a
AE
4857}
4858
6039aa73 4859static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4860{
4861 BT_DBG("%s", hdev->name);
4862
bd1eb66b
AE
4863 /* No ACL link over BR/EDR controller */
4864 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4865 return;
4866
4867 /* No AMP link over AMP controller */
4868 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4869 return;
4870
4871 switch (hdev->flow_ctl_mode) {
4872 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4873 hci_sched_acl_pkt(hdev);
4874 break;
4875
4876 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4877 hci_sched_acl_blk(hdev);
4878 break;
4879 }
4880}
4881
1da177e4 4882/* Schedule SCO */
6039aa73 4883static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4884{
4885 struct hci_conn *conn;
4886 struct sk_buff *skb;
4887 int quote;
4888
4889 BT_DBG("%s", hdev->name);
4890
52087a79
LAD
4891 if (!hci_conn_num(hdev, SCO_LINK))
4892 return;
4893
1da177e4
LT
4894 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4895 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4896 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4897 hci_send_frame(hdev, skb);
1da177e4
LT
4898
4899 conn->sent++;
4900 if (conn->sent == ~0)
4901 conn->sent = 0;
4902 }
4903 }
4904}
4905
6039aa73 4906static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4907{
4908 struct hci_conn *conn;
4909 struct sk_buff *skb;
4910 int quote;
4911
4912 BT_DBG("%s", hdev->name);
4913
52087a79
LAD
4914 if (!hci_conn_num(hdev, ESCO_LINK))
4915 return;
4916
8fc9ced3
GP
4917 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4918 &quote))) {
b6a0dc82
MH
4919 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4920 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4921 hci_send_frame(hdev, skb);
b6a0dc82
MH
4922
4923 conn->sent++;
4924 if (conn->sent == ~0)
4925 conn->sent = 0;
4926 }
4927 }
4928}
4929
6039aa73 4930static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4931{
73d80deb 4932 struct hci_chan *chan;
6ed58ec5 4933 struct sk_buff *skb;
02b20f0b 4934 int quote, cnt, tmp;
6ed58ec5
VT
4935
4936 BT_DBG("%s", hdev->name);
4937
52087a79
LAD
4938 if (!hci_conn_num(hdev, LE_LINK))
4939 return;
4940
4a964404 4941 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
4942 /* LE tx timeout must be longer than maximum
4943 * link supervision timeout (40.9 seconds) */
bae1f5d9 4944 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4945 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4946 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4947 }
4948
4949 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4950 tmp = cnt;
73d80deb 4951 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4952 u32 priority = (skb_peek(&chan->data_q))->priority;
4953 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4954 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4955 skb->len, skb->priority);
6ed58ec5 4956
ec1cce24
LAD
4957 /* Stop if priority has changed */
4958 if (skb->priority < priority)
4959 break;
4960
4961 skb = skb_dequeue(&chan->data_q);
4962
57d17d70 4963 hci_send_frame(hdev, skb);
6ed58ec5
VT
4964 hdev->le_last_tx = jiffies;
4965
4966 cnt--;
73d80deb
LAD
4967 chan->sent++;
4968 chan->conn->sent++;
6ed58ec5
VT
4969 }
4970 }
73d80deb 4971
6ed58ec5
VT
4972 if (hdev->le_pkts)
4973 hdev->le_cnt = cnt;
4974 else
4975 hdev->acl_cnt = cnt;
02b20f0b
LAD
4976
4977 if (cnt != tmp)
4978 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4979}
4980
3eff45ea 4981static void hci_tx_work(struct work_struct *work)
1da177e4 4982{
3eff45ea 4983 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4984 struct sk_buff *skb;
4985
6ed58ec5 4986 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4987 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4988
52de599e
MH
4989 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4990 /* Schedule queues and send stuff to HCI driver */
4991 hci_sched_acl(hdev);
4992 hci_sched_sco(hdev);
4993 hci_sched_esco(hdev);
4994 hci_sched_le(hdev);
4995 }
6ed58ec5 4996
1da177e4
LT
4997 /* Send next queued raw (unknown type) packet */
4998 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4999 hci_send_frame(hdev, skb);
1da177e4
LT
5000}
5001
25985edc 5002/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5003
5004/* ACL data packet */
6039aa73 5005static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5006{
5007 struct hci_acl_hdr *hdr = (void *) skb->data;
5008 struct hci_conn *conn;
5009 __u16 handle, flags;
5010
5011 skb_pull(skb, HCI_ACL_HDR_SIZE);
5012
5013 handle = __le16_to_cpu(hdr->handle);
5014 flags = hci_flags(handle);
5015 handle = hci_handle(handle);
5016
f0e09510 5017 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5018 handle, flags);
1da177e4
LT
5019
5020 hdev->stat.acl_rx++;
5021
5022 hci_dev_lock(hdev);
5023 conn = hci_conn_hash_lookup_handle(hdev, handle);
5024 hci_dev_unlock(hdev);
8e87d142 5025
1da177e4 5026 if (conn) {
65983fc7 5027 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5028
1da177e4 5029 /* Send to upper protocol */
686ebf28
UF
5030 l2cap_recv_acldata(conn, skb, flags);
5031 return;
1da177e4 5032 } else {
8e87d142 5033 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5034 hdev->name, handle);
1da177e4
LT
5035 }
5036
5037 kfree_skb(skb);
5038}
5039
5040/* SCO data packet */
6039aa73 5041static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5042{
5043 struct hci_sco_hdr *hdr = (void *) skb->data;
5044 struct hci_conn *conn;
5045 __u16 handle;
5046
5047 skb_pull(skb, HCI_SCO_HDR_SIZE);
5048
5049 handle = __le16_to_cpu(hdr->handle);
5050
f0e09510 5051 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5052
5053 hdev->stat.sco_rx++;
5054
5055 hci_dev_lock(hdev);
5056 conn = hci_conn_hash_lookup_handle(hdev, handle);
5057 hci_dev_unlock(hdev);
5058
5059 if (conn) {
1da177e4 5060 /* Send to upper protocol */
686ebf28
UF
5061 sco_recv_scodata(conn, skb);
5062 return;
1da177e4 5063 } else {
8e87d142 5064 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5065 hdev->name, handle);
1da177e4
LT
5066 }
5067
5068 kfree_skb(skb);
5069}
5070
9238f36a
JH
5071static bool hci_req_is_complete(struct hci_dev *hdev)
5072{
5073 struct sk_buff *skb;
5074
5075 skb = skb_peek(&hdev->cmd_q);
5076 if (!skb)
5077 return true;
5078
5079 return bt_cb(skb)->req.start;
5080}
5081
42c6b129
JH
5082static void hci_resend_last(struct hci_dev *hdev)
5083{
5084 struct hci_command_hdr *sent;
5085 struct sk_buff *skb;
5086 u16 opcode;
5087
5088 if (!hdev->sent_cmd)
5089 return;
5090
5091 sent = (void *) hdev->sent_cmd->data;
5092 opcode = __le16_to_cpu(sent->opcode);
5093 if (opcode == HCI_OP_RESET)
5094 return;
5095
5096 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5097 if (!skb)
5098 return;
5099
5100 skb_queue_head(&hdev->cmd_q, skb);
5101 queue_work(hdev->workqueue, &hdev->cmd_work);
5102}
5103
9238f36a
JH
5104void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5105{
5106 hci_req_complete_t req_complete = NULL;
5107 struct sk_buff *skb;
5108 unsigned long flags;
5109
5110 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5111
42c6b129
JH
5112 /* If the completed command doesn't match the last one that was
5113 * sent we need to do special handling of it.
9238f36a 5114 */
42c6b129
JH
5115 if (!hci_sent_cmd_data(hdev, opcode)) {
5116 /* Some CSR based controllers generate a spontaneous
5117 * reset complete event during init and any pending
5118 * command will never be completed. In such a case we
5119 * need to resend whatever was the last sent
5120 * command.
5121 */
5122 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5123 hci_resend_last(hdev);
5124
9238f36a 5125 return;
42c6b129 5126 }
9238f36a
JH
5127
5128 /* If the command succeeded and there's still more commands in
5129 * this request the request is not yet complete.
5130 */
5131 if (!status && !hci_req_is_complete(hdev))
5132 return;
5133
5134 /* If this was the last command in a request the complete
5135 * callback would be found in hdev->sent_cmd instead of the
5136 * command queue (hdev->cmd_q).
5137 */
5138 if (hdev->sent_cmd) {
5139 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5140
5141 if (req_complete) {
5142 /* We must set the complete callback to NULL to
5143 * avoid calling the callback more than once if
5144 * this function gets called again.
5145 */
5146 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5147
9238f36a 5148 goto call_complete;
53e21fbc 5149 }
9238f36a
JH
5150 }
5151
5152 /* Remove all pending commands belonging to this request */
5153 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5154 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5155 if (bt_cb(skb)->req.start) {
5156 __skb_queue_head(&hdev->cmd_q, skb);
5157 break;
5158 }
5159
5160 req_complete = bt_cb(skb)->req.complete;
5161 kfree_skb(skb);
5162 }
5163 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5164
5165call_complete:
5166 if (req_complete)
5167 req_complete(hdev, status);
5168}
5169
b78752cc 5170static void hci_rx_work(struct work_struct *work)
1da177e4 5171{
b78752cc 5172 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5173 struct sk_buff *skb;
5174
5175 BT_DBG("%s", hdev->name);
5176
1da177e4 5177 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5178 /* Send copy to monitor */
5179 hci_send_to_monitor(hdev, skb);
5180
1da177e4
LT
5181 if (atomic_read(&hdev->promisc)) {
5182 /* Send copy to the sockets */
470fe1b5 5183 hci_send_to_sock(hdev, skb);
1da177e4
LT
5184 }
5185
fee746b0 5186 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5187 kfree_skb(skb);
5188 continue;
5189 }
5190
5191 if (test_bit(HCI_INIT, &hdev->flags)) {
5192 /* Don't process data packets in this states. */
0d48d939 5193 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5194 case HCI_ACLDATA_PKT:
5195 case HCI_SCODATA_PKT:
5196 kfree_skb(skb);
5197 continue;
3ff50b79 5198 }
1da177e4
LT
5199 }
5200
5201 /* Process frame */
0d48d939 5202 switch (bt_cb(skb)->pkt_type) {
1da177e4 5203 case HCI_EVENT_PKT:
b78752cc 5204 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5205 hci_event_packet(hdev, skb);
5206 break;
5207
5208 case HCI_ACLDATA_PKT:
5209 BT_DBG("%s ACL data packet", hdev->name);
5210 hci_acldata_packet(hdev, skb);
5211 break;
5212
5213 case HCI_SCODATA_PKT:
5214 BT_DBG("%s SCO data packet", hdev->name);
5215 hci_scodata_packet(hdev, skb);
5216 break;
5217
5218 default:
5219 kfree_skb(skb);
5220 break;
5221 }
5222 }
1da177e4
LT
5223}
5224
c347b765 5225static void hci_cmd_work(struct work_struct *work)
1da177e4 5226{
c347b765 5227 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5228 struct sk_buff *skb;
5229
2104786b
AE
5230 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5231 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5232
1da177e4 5233 /* Send queued commands */
5a08ecce
AE
5234 if (atomic_read(&hdev->cmd_cnt)) {
5235 skb = skb_dequeue(&hdev->cmd_q);
5236 if (!skb)
5237 return;
5238
7585b97a 5239 kfree_skb(hdev->sent_cmd);
1da177e4 5240
a675d7f1 5241 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5242 if (hdev->sent_cmd) {
1da177e4 5243 atomic_dec(&hdev->cmd_cnt);
57d17d70 5244 hci_send_frame(hdev, skb);
7bdb8a5c 5245 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5246 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5247 else
65cc2b49
MH
5248 schedule_delayed_work(&hdev->cmd_timer,
5249 HCI_CMD_TIMEOUT);
1da177e4
LT
5250 } else {
5251 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5252 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5253 }
5254 }
5255}