]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Track discovery type
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
8b281b9c 58bool enable_hs;
7784d78f 59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
ff9ef578 358
30dc78e1
JH
359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
6fbe195d
AG
363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
30dc78e1
JH
367 return true;
368
6fbe195d
AG
369 default:
370 return false;
371 }
30dc78e1
JH
372}
373
ff9ef578
JH
374void hci_discovery_set_state(struct hci_dev *hdev, int state)
375{
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
4aab14e5
AG
383 hdev->discovery.type = 0;
384
7b99b659
AG
385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
ff9ef578
JH
387 break;
388 case DISCOVERY_STARTING:
389 break;
30dc78e1 390 case DISCOVERY_INQUIRY:
c599008f 391 case DISCOVERY_LE_SCAN:
ff9ef578
JH
392 mgmt_discovering(hdev, 1);
393 break;
30dc78e1
JH
394 case DISCOVERY_RESOLVING:
395 break;
ff9ef578
JH
396 case DISCOVERY_STOPPING:
397 break;
398 }
399
400 hdev->discovery.state = state;
401}
402
1da177e4
LT
403static void inquiry_cache_flush(struct hci_dev *hdev)
404{
30883512 405 struct discovery_state *cache = &hdev->discovery;
b57c1a56 406 struct inquiry_entry *p, *n;
1da177e4 407
561aafbc
JH
408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
b57c1a56 410 kfree(p);
1da177e4 411 }
561aafbc
JH
412
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 415 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
30883512 420 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
561aafbc
JH
425 list_for_each_entry(e, &cache->all, all) {
426 if (!bacmp(&e->data.bdaddr, bdaddr))
427 return e;
428 }
429
430 return NULL;
431}
432
433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434 bdaddr_t *bdaddr)
435{
30883512 436 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 442 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
443 return e;
444 }
445
446 return NULL;
1da177e4
LT
447}
448
30dc78e1
JH
449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450 bdaddr_t *bdaddr,
451 int state)
452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
a3d4e20a
JH
468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469 struct inquiry_entry *ie)
470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
3175405b 487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 488 bool name_known)
1da177e4 489{
30883512 490 struct discovery_state *cache = &hdev->discovery;
70f23020 491 struct inquiry_entry *ie;
1da177e4
LT
492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
70f23020 495 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
496 if (ie) {
497 if (ie->name_state == NAME_NEEDED &&
498 data->rssi != ie->data.rssi) {
499 ie->data.rssi = data->rssi;
500 hci_inquiry_cache_update_resolve(hdev, ie);
501 }
502
561aafbc 503 goto update;
a3d4e20a 504 }
561aafbc
JH
505
506 /* Entry not in the cache. Add new one. */
507 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
508 if (!ie)
3175405b 509 return false;
561aafbc
JH
510
511 list_add(&ie->all, &cache->all);
512
513 if (name_known) {
514 ie->name_state = NAME_KNOWN;
515 } else {
516 ie->name_state = NAME_NOT_KNOWN;
517 list_add(&ie->list, &cache->unknown);
518 }
70f23020 519
561aafbc
JH
520update:
521 if (name_known && ie->name_state != NAME_KNOWN &&
522 ie->name_state != NAME_PENDING) {
523 ie->name_state = NAME_KNOWN;
524 list_del(&ie->list);
1da177e4
LT
525 }
526
70f23020
AE
527 memcpy(&ie->data, data, sizeof(*data));
528 ie->timestamp = jiffies;
1da177e4 529 cache->timestamp = jiffies;
3175405b
JH
530
531 if (ie->name_state == NAME_NOT_KNOWN)
532 return false;
533
534 return true;
1da177e4
LT
535}
536
537static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
538{
30883512 539 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
540 struct inquiry_info *info = (struct inquiry_info *) buf;
541 struct inquiry_entry *e;
542 int copied = 0;
543
561aafbc 544 list_for_each_entry(e, &cache->all, all) {
1da177e4 545 struct inquiry_data *data = &e->data;
b57c1a56
JH
546
547 if (copied >= num)
548 break;
549
1da177e4
LT
550 bacpy(&info->bdaddr, &data->bdaddr);
551 info->pscan_rep_mode = data->pscan_rep_mode;
552 info->pscan_period_mode = data->pscan_period_mode;
553 info->pscan_mode = data->pscan_mode;
554 memcpy(info->dev_class, data->dev_class, 3);
555 info->clock_offset = data->clock_offset;
b57c1a56 556
1da177e4 557 info++;
b57c1a56 558 copied++;
1da177e4
LT
559 }
560
561 BT_DBG("cache %p, copied %d", cache, copied);
562 return copied;
563}
564
565static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
566{
567 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
568 struct hci_cp_inquiry cp;
569
570 BT_DBG("%s", hdev->name);
571
572 if (test_bit(HCI_INQUIRY, &hdev->flags))
573 return;
574
575 /* Start Inquiry */
576 memcpy(&cp.lap, &ir->lap, 3);
577 cp.length = ir->length;
578 cp.num_rsp = ir->num_rsp;
a9de9248 579 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
580}
581
582int hci_inquiry(void __user *arg)
583{
584 __u8 __user *ptr = arg;
585 struct hci_inquiry_req ir;
586 struct hci_dev *hdev;
587 int err = 0, do_inquiry = 0, max_rsp;
588 long timeo;
589 __u8 *buf;
590
591 if (copy_from_user(&ir, ptr, sizeof(ir)))
592 return -EFAULT;
593
5a08ecce
AE
594 hdev = hci_dev_get(ir.dev_id);
595 if (!hdev)
1da177e4
LT
596 return -ENODEV;
597
09fd0de5 598 hci_dev_lock(hdev);
8e87d142 599 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
600 inquiry_cache_empty(hdev) ||
601 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
602 inquiry_cache_flush(hdev);
603 do_inquiry = 1;
604 }
09fd0de5 605 hci_dev_unlock(hdev);
1da177e4 606
04837f64 607 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
608
609 if (do_inquiry) {
610 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
611 if (err < 0)
612 goto done;
613 }
1da177e4
LT
614
615 /* for unlimited number of responses we will use buffer with 255 entries */
616 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
617
618 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
619 * copy it to the user space.
620 */
01df8c31 621 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 622 if (!buf) {
1da177e4
LT
623 err = -ENOMEM;
624 goto done;
625 }
626
09fd0de5 627 hci_dev_lock(hdev);
1da177e4 628 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 629 hci_dev_unlock(hdev);
1da177e4
LT
630
631 BT_DBG("num_rsp %d", ir.num_rsp);
632
633 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
634 ptr += sizeof(ir);
635 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
636 ir.num_rsp))
637 err = -EFAULT;
8e87d142 638 } else
1da177e4
LT
639 err = -EFAULT;
640
641 kfree(buf);
642
643done:
644 hci_dev_put(hdev);
645 return err;
646}
647
648/* ---- HCI ioctl helpers ---- */
649
650int hci_dev_open(__u16 dev)
651{
652 struct hci_dev *hdev;
653 int ret = 0;
654
5a08ecce
AE
655 hdev = hci_dev_get(dev);
656 if (!hdev)
1da177e4
LT
657 return -ENODEV;
658
659 BT_DBG("%s %p", hdev->name, hdev);
660
661 hci_req_lock(hdev);
662
611b30f7
MH
663 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
664 ret = -ERFKILL;
665 goto done;
666 }
667
1da177e4
LT
668 if (test_bit(HCI_UP, &hdev->flags)) {
669 ret = -EALREADY;
670 goto done;
671 }
672
673 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
674 set_bit(HCI_RAW, &hdev->flags);
675
07e3b94a
AE
676 /* Treat all non BR/EDR controllers as raw devices if
677 enable_hs is not set */
678 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
679 set_bit(HCI_RAW, &hdev->flags);
680
1da177e4
LT
681 if (hdev->open(hdev)) {
682 ret = -EIO;
683 goto done;
684 }
685
686 if (!test_bit(HCI_RAW, &hdev->flags)) {
687 atomic_set(&hdev->cmd_cnt, 1);
688 set_bit(HCI_INIT, &hdev->flags);
a5040efa 689 hdev->init_last_cmd = 0;
1da177e4 690
04837f64
MH
691 ret = __hci_request(hdev, hci_init_req, 0,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 693
eead27da 694 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
695 ret = __hci_request(hdev, hci_le_init_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
697
1da177e4
LT
698 clear_bit(HCI_INIT, &hdev->flags);
699 }
700
701 if (!ret) {
702 hci_dev_hold(hdev);
703 set_bit(HCI_UP, &hdev->flags);
704 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 705 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 706 hci_dev_lock(hdev);
744cf19e 707 mgmt_powered(hdev, 1);
09fd0de5 708 hci_dev_unlock(hdev);
56e5cb86 709 }
8e87d142 710 } else {
1da177e4 711 /* Init failed, cleanup */
3eff45ea 712 flush_work(&hdev->tx_work);
c347b765 713 flush_work(&hdev->cmd_work);
b78752cc 714 flush_work(&hdev->rx_work);
1da177e4
LT
715
716 skb_queue_purge(&hdev->cmd_q);
717 skb_queue_purge(&hdev->rx_q);
718
719 if (hdev->flush)
720 hdev->flush(hdev);
721
722 if (hdev->sent_cmd) {
723 kfree_skb(hdev->sent_cmd);
724 hdev->sent_cmd = NULL;
725 }
726
727 hdev->close(hdev);
728 hdev->flags = 0;
729 }
730
731done:
732 hci_req_unlock(hdev);
733 hci_dev_put(hdev);
734 return ret;
735}
736
737static int hci_dev_do_close(struct hci_dev *hdev)
738{
739 BT_DBG("%s %p", hdev->name, hdev);
740
28b75a89
AG
741 cancel_work_sync(&hdev->le_scan);
742
1da177e4
LT
743 hci_req_cancel(hdev, ENODEV);
744 hci_req_lock(hdev);
745
746 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 747 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
748 hci_req_unlock(hdev);
749 return 0;
750 }
751
3eff45ea
GP
752 /* Flush RX and TX works */
753 flush_work(&hdev->tx_work);
b78752cc 754 flush_work(&hdev->rx_work);
1da177e4 755
16ab91ab 756 if (hdev->discov_timeout > 0) {
e0f9309f 757 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
758 hdev->discov_timeout = 0;
759 }
760
a8b2d5c2 761 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 762 cancel_delayed_work(&hdev->power_off);
3243553f 763
a8b2d5c2 764 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
765 cancel_delayed_work(&hdev->service_cache);
766
7ba8b4be
AG
767 cancel_delayed_work_sync(&hdev->le_scan_disable);
768
09fd0de5 769 hci_dev_lock(hdev);
1da177e4
LT
770 inquiry_cache_flush(hdev);
771 hci_conn_hash_flush(hdev);
09fd0de5 772 hci_dev_unlock(hdev);
1da177e4
LT
773
774 hci_notify(hdev, HCI_DEV_DOWN);
775
776 if (hdev->flush)
777 hdev->flush(hdev);
778
779 /* Reset device */
780 skb_queue_purge(&hdev->cmd_q);
781 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
782 if (!test_bit(HCI_RAW, &hdev->flags) &&
783 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 784 set_bit(HCI_INIT, &hdev->flags);
04837f64 785 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 786 msecs_to_jiffies(250));
1da177e4
LT
787 clear_bit(HCI_INIT, &hdev->flags);
788 }
789
c347b765
GP
790 /* flush cmd work */
791 flush_work(&hdev->cmd_work);
1da177e4
LT
792
793 /* Drop queues */
794 skb_queue_purge(&hdev->rx_q);
795 skb_queue_purge(&hdev->cmd_q);
796 skb_queue_purge(&hdev->raw_q);
797
798 /* Drop last sent command */
799 if (hdev->sent_cmd) {
b79f44c1 800 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
801 kfree_skb(hdev->sent_cmd);
802 hdev->sent_cmd = NULL;
803 }
804
805 /* After this point our queues are empty
806 * and no tasks are scheduled. */
807 hdev->close(hdev);
808
09fd0de5 809 hci_dev_lock(hdev);
744cf19e 810 mgmt_powered(hdev, 0);
09fd0de5 811 hci_dev_unlock(hdev);
5add6af8 812
1da177e4
LT
813 /* Clear flags */
814 hdev->flags = 0;
815
816 hci_req_unlock(hdev);
817
818 hci_dev_put(hdev);
819 return 0;
820}
821
822int hci_dev_close(__u16 dev)
823{
824 struct hci_dev *hdev;
825 int err;
826
70f23020
AE
827 hdev = hci_dev_get(dev);
828 if (!hdev)
1da177e4
LT
829 return -ENODEV;
830 err = hci_dev_do_close(hdev);
831 hci_dev_put(hdev);
832 return err;
833}
834
835int hci_dev_reset(__u16 dev)
836{
837 struct hci_dev *hdev;
838 int ret = 0;
839
70f23020
AE
840 hdev = hci_dev_get(dev);
841 if (!hdev)
1da177e4
LT
842 return -ENODEV;
843
844 hci_req_lock(hdev);
1da177e4
LT
845
846 if (!test_bit(HCI_UP, &hdev->flags))
847 goto done;
848
849 /* Drop queues */
850 skb_queue_purge(&hdev->rx_q);
851 skb_queue_purge(&hdev->cmd_q);
852
09fd0de5 853 hci_dev_lock(hdev);
1da177e4
LT
854 inquiry_cache_flush(hdev);
855 hci_conn_hash_flush(hdev);
09fd0de5 856 hci_dev_unlock(hdev);
1da177e4
LT
857
858 if (hdev->flush)
859 hdev->flush(hdev);
860
8e87d142 861 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 862 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
863
864 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
865 ret = __hci_request(hdev, hci_reset_req, 0,
866 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
867
868done:
1da177e4
LT
869 hci_req_unlock(hdev);
870 hci_dev_put(hdev);
871 return ret;
872}
873
874int hci_dev_reset_stat(__u16 dev)
875{
876 struct hci_dev *hdev;
877 int ret = 0;
878
70f23020
AE
879 hdev = hci_dev_get(dev);
880 if (!hdev)
1da177e4
LT
881 return -ENODEV;
882
883 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
884
885 hci_dev_put(hdev);
886
887 return ret;
888}
889
890int hci_dev_cmd(unsigned int cmd, void __user *arg)
891{
892 struct hci_dev *hdev;
893 struct hci_dev_req dr;
894 int err = 0;
895
896 if (copy_from_user(&dr, arg, sizeof(dr)))
897 return -EFAULT;
898
70f23020
AE
899 hdev = hci_dev_get(dr.dev_id);
900 if (!hdev)
1da177e4
LT
901 return -ENODEV;
902
903 switch (cmd) {
904 case HCISETAUTH:
04837f64
MH
905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
906 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
907 break;
908
909 case HCISETENCRYPT:
910 if (!lmp_encrypt_capable(hdev)) {
911 err = -EOPNOTSUPP;
912 break;
913 }
914
915 if (!test_bit(HCI_AUTH, &hdev->flags)) {
916 /* Auth must be enabled first */
04837f64
MH
917 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
919 if (err)
920 break;
921 }
922
04837f64
MH
923 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
925 break;
926
927 case HCISETSCAN:
04837f64
MH
928 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
930 break;
931
1da177e4 932 case HCISETLINKPOL:
e4e8e37c
MH
933 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
935 break;
936
937 case HCISETLINKMODE:
e4e8e37c
MH
938 hdev->link_mode = ((__u16) dr.dev_opt) &
939 (HCI_LM_MASTER | HCI_LM_ACCEPT);
940 break;
941
942 case HCISETPTYPE:
943 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
944 break;
945
946 case HCISETACLMTU:
e4e8e37c
MH
947 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
948 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
949 break;
950
951 case HCISETSCOMTU:
e4e8e37c
MH
952 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
953 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
954 break;
955
956 default:
957 err = -EINVAL;
958 break;
959 }
e4e8e37c 960
1da177e4
LT
961 hci_dev_put(hdev);
962 return err;
963}
964
965int hci_get_dev_list(void __user *arg)
966{
8035ded4 967 struct hci_dev *hdev;
1da177e4
LT
968 struct hci_dev_list_req *dl;
969 struct hci_dev_req *dr;
1da177e4
LT
970 int n = 0, size, err;
971 __u16 dev_num;
972
973 if (get_user(dev_num, (__u16 __user *) arg))
974 return -EFAULT;
975
976 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
977 return -EINVAL;
978
979 size = sizeof(*dl) + dev_num * sizeof(*dr);
980
70f23020
AE
981 dl = kzalloc(size, GFP_KERNEL);
982 if (!dl)
1da177e4
LT
983 return -ENOMEM;
984
985 dr = dl->dev_req;
986
f20d09d5 987 read_lock(&hci_dev_list_lock);
8035ded4 988 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 989 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 990 cancel_delayed_work(&hdev->power_off);
c542a06c 991
a8b2d5c2
JH
992 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
993 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 994
1da177e4
LT
995 (dr + n)->dev_id = hdev->id;
996 (dr + n)->dev_opt = hdev->flags;
c542a06c 997
1da177e4
LT
998 if (++n >= dev_num)
999 break;
1000 }
f20d09d5 1001 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1002
1003 dl->dev_num = n;
1004 size = sizeof(*dl) + n * sizeof(*dr);
1005
1006 err = copy_to_user(arg, dl, size);
1007 kfree(dl);
1008
1009 return err ? -EFAULT : 0;
1010}
1011
1012int hci_get_dev_info(void __user *arg)
1013{
1014 struct hci_dev *hdev;
1015 struct hci_dev_info di;
1016 int err = 0;
1017
1018 if (copy_from_user(&di, arg, sizeof(di)))
1019 return -EFAULT;
1020
70f23020
AE
1021 hdev = hci_dev_get(di.dev_id);
1022 if (!hdev)
1da177e4
LT
1023 return -ENODEV;
1024
a8b2d5c2 1025 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1026 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1027
a8b2d5c2
JH
1028 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1029 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1030
1da177e4
LT
1031 strcpy(di.name, hdev->name);
1032 di.bdaddr = hdev->bdaddr;
943da25d 1033 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1034 di.flags = hdev->flags;
1035 di.pkt_type = hdev->pkt_type;
1036 di.acl_mtu = hdev->acl_mtu;
1037 di.acl_pkts = hdev->acl_pkts;
1038 di.sco_mtu = hdev->sco_mtu;
1039 di.sco_pkts = hdev->sco_pkts;
1040 di.link_policy = hdev->link_policy;
1041 di.link_mode = hdev->link_mode;
1042
1043 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1044 memcpy(&di.features, &hdev->features, sizeof(di.features));
1045
1046 if (copy_to_user(arg, &di, sizeof(di)))
1047 err = -EFAULT;
1048
1049 hci_dev_put(hdev);
1050
1051 return err;
1052}
1053
1054/* ---- Interface to HCI drivers ---- */
1055
611b30f7
MH
1056static int hci_rfkill_set_block(void *data, bool blocked)
1057{
1058 struct hci_dev *hdev = data;
1059
1060 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1061
1062 if (!blocked)
1063 return 0;
1064
1065 hci_dev_do_close(hdev);
1066
1067 return 0;
1068}
1069
1070static const struct rfkill_ops hci_rfkill_ops = {
1071 .set_block = hci_rfkill_set_block,
1072};
1073
1da177e4
LT
1074/* Alloc HCI device */
1075struct hci_dev *hci_alloc_dev(void)
1076{
1077 struct hci_dev *hdev;
1078
25ea6db0 1079 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1080 if (!hdev)
1081 return NULL;
1082
0ac7e700 1083 hci_init_sysfs(hdev);
1da177e4
LT
1084 skb_queue_head_init(&hdev->driver_init);
1085
1086 return hdev;
1087}
1088EXPORT_SYMBOL(hci_alloc_dev);
1089
1090/* Free HCI device */
1091void hci_free_dev(struct hci_dev *hdev)
1092{
1093 skb_queue_purge(&hdev->driver_init);
1094
a91f2e39
MH
1095 /* will free via device release */
1096 put_device(&hdev->dev);
1da177e4
LT
1097}
1098EXPORT_SYMBOL(hci_free_dev);
1099
ab81cbf9
JH
1100static void hci_power_on(struct work_struct *work)
1101{
1102 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1103
1104 BT_DBG("%s", hdev->name);
1105
1106 if (hci_dev_open(hdev->id) < 0)
1107 return;
1108
a8b2d5c2 1109 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1110 schedule_delayed_work(&hdev->power_off,
3243553f 1111 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1112
a8b2d5c2 1113 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1114 mgmt_index_added(hdev);
ab81cbf9
JH
1115}
1116
1117static void hci_power_off(struct work_struct *work)
1118{
3243553f
JH
1119 struct hci_dev *hdev = container_of(work, struct hci_dev,
1120 power_off.work);
ab81cbf9
JH
1121
1122 BT_DBG("%s", hdev->name);
1123
a8b2d5c2 1124 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ab81cbf9 1125
3243553f 1126 hci_dev_close(hdev->id);
ab81cbf9
JH
1127}
1128
16ab91ab
JH
1129static void hci_discov_off(struct work_struct *work)
1130{
1131 struct hci_dev *hdev;
1132 u8 scan = SCAN_PAGE;
1133
1134 hdev = container_of(work, struct hci_dev, discov_off.work);
1135
1136 BT_DBG("%s", hdev->name);
1137
09fd0de5 1138 hci_dev_lock(hdev);
16ab91ab
JH
1139
1140 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1141
1142 hdev->discov_timeout = 0;
1143
09fd0de5 1144 hci_dev_unlock(hdev);
16ab91ab
JH
1145}
1146
2aeb9a1a
JH
1147int hci_uuids_clear(struct hci_dev *hdev)
1148{
1149 struct list_head *p, *n;
1150
1151 list_for_each_safe(p, n, &hdev->uuids) {
1152 struct bt_uuid *uuid;
1153
1154 uuid = list_entry(p, struct bt_uuid, list);
1155
1156 list_del(p);
1157 kfree(uuid);
1158 }
1159
1160 return 0;
1161}
1162
55ed8ca1
JH
1163int hci_link_keys_clear(struct hci_dev *hdev)
1164{
1165 struct list_head *p, *n;
1166
1167 list_for_each_safe(p, n, &hdev->link_keys) {
1168 struct link_key *key;
1169
1170 key = list_entry(p, struct link_key, list);
1171
1172 list_del(p);
1173 kfree(key);
1174 }
1175
1176 return 0;
1177}
1178
b899efaf
VCG
1179int hci_smp_ltks_clear(struct hci_dev *hdev)
1180{
1181 struct smp_ltk *k, *tmp;
1182
1183 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1184 list_del(&k->list);
1185 kfree(k);
1186 }
1187
1188 return 0;
1189}
1190
55ed8ca1
JH
1191struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1192{
8035ded4 1193 struct link_key *k;
55ed8ca1 1194
8035ded4 1195 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1196 if (bacmp(bdaddr, &k->bdaddr) == 0)
1197 return k;
55ed8ca1
JH
1198
1199 return NULL;
1200}
1201
d25e28ab
JH
1202static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1203 u8 key_type, u8 old_key_type)
1204{
1205 /* Legacy key */
1206 if (key_type < 0x03)
1207 return 1;
1208
1209 /* Debug keys are insecure so don't store them persistently */
1210 if (key_type == HCI_LK_DEBUG_COMBINATION)
1211 return 0;
1212
1213 /* Changed combination key and there's no previous one */
1214 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1215 return 0;
1216
1217 /* Security mode 3 case */
1218 if (!conn)
1219 return 1;
1220
1221 /* Neither local nor remote side had no-bonding as requirement */
1222 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1223 return 1;
1224
1225 /* Local side had dedicated bonding as requirement */
1226 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1227 return 1;
1228
1229 /* Remote side had dedicated bonding as requirement */
1230 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1231 return 1;
1232
1233 /* If none of the above criteria match, then don't store the key
1234 * persistently */
1235 return 0;
1236}
1237
c9839a11 1238struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1239{
c9839a11 1240 struct smp_ltk *k;
75d262c2 1241
c9839a11
VCG
1242 list_for_each_entry(k, &hdev->long_term_keys, list) {
1243 if (k->ediv != ediv ||
1244 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1245 continue;
1246
c9839a11 1247 return k;
75d262c2
VCG
1248 }
1249
1250 return NULL;
1251}
1252EXPORT_SYMBOL(hci_find_ltk);
1253
c9839a11
VCG
1254struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1255 u8 addr_type)
75d262c2 1256{
c9839a11 1257 struct smp_ltk *k;
75d262c2 1258
c9839a11
VCG
1259 list_for_each_entry(k, &hdev->long_term_keys, list)
1260 if (addr_type == k->bdaddr_type &&
1261 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1262 return k;
1263
1264 return NULL;
1265}
c9839a11 1266EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1267
d25e28ab
JH
1268int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1269 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1270{
1271 struct link_key *key, *old_key;
4df378a1 1272 u8 old_key_type, persistent;
55ed8ca1
JH
1273
1274 old_key = hci_find_link_key(hdev, bdaddr);
1275 if (old_key) {
1276 old_key_type = old_key->type;
1277 key = old_key;
1278 } else {
12adcf3a 1279 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1280 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1281 if (!key)
1282 return -ENOMEM;
1283 list_add(&key->list, &hdev->link_keys);
1284 }
1285
1286 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1287
d25e28ab
JH
1288 /* Some buggy controller combinations generate a changed
1289 * combination key for legacy pairing even when there's no
1290 * previous key */
1291 if (type == HCI_LK_CHANGED_COMBINATION &&
1292 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1293 old_key_type == 0xff) {
d25e28ab 1294 type = HCI_LK_COMBINATION;
655fe6ec
JH
1295 if (conn)
1296 conn->key_type = type;
1297 }
d25e28ab 1298
55ed8ca1
JH
1299 bacpy(&key->bdaddr, bdaddr);
1300 memcpy(key->val, val, 16);
55ed8ca1
JH
1301 key->pin_len = pin_len;
1302
b6020ba0 1303 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1304 key->type = old_key_type;
4748fed2
JH
1305 else
1306 key->type = type;
1307
4df378a1
JH
1308 if (!new_key)
1309 return 0;
1310
1311 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1312
744cf19e 1313 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1314
1315 if (!persistent) {
1316 list_del(&key->list);
1317 kfree(key);
1318 }
55ed8ca1
JH
1319
1320 return 0;
1321}
1322
c9839a11
VCG
1323int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1324 int new_key, u8 authenticated, u8 tk[16],
1325 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1326{
c9839a11 1327 struct smp_ltk *key, *old_key;
75d262c2 1328
c9839a11
VCG
1329 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1330 return 0;
75d262c2 1331
c9839a11
VCG
1332 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1333 if (old_key)
75d262c2 1334 key = old_key;
c9839a11
VCG
1335 else {
1336 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1337 if (!key)
1338 return -ENOMEM;
c9839a11 1339 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1340 }
1341
75d262c2 1342 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1343 key->bdaddr_type = addr_type;
1344 memcpy(key->val, tk, sizeof(key->val));
1345 key->authenticated = authenticated;
1346 key->ediv = ediv;
1347 key->enc_size = enc_size;
1348 key->type = type;
1349 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1350
c9839a11
VCG
1351 if (!new_key)
1352 return 0;
75d262c2 1353
261cc5aa
VCG
1354 if (type & HCI_SMP_LTK)
1355 mgmt_new_ltk(hdev, key, 1);
1356
75d262c2
VCG
1357 return 0;
1358}
1359
55ed8ca1
JH
1360int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361{
1362 struct link_key *key;
1363
1364 key = hci_find_link_key(hdev, bdaddr);
1365 if (!key)
1366 return -ENOENT;
1367
1368 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370 list_del(&key->list);
1371 kfree(key);
1372
1373 return 0;
1374}
1375
b899efaf
VCG
1376int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1377{
1378 struct smp_ltk *k, *tmp;
1379
1380 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1381 if (bacmp(bdaddr, &k->bdaddr))
1382 continue;
1383
1384 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1385
1386 list_del(&k->list);
1387 kfree(k);
1388 }
1389
1390 return 0;
1391}
1392
6bd32326
VT
1393/* HCI command timer function */
1394static void hci_cmd_timer(unsigned long arg)
1395{
1396 struct hci_dev *hdev = (void *) arg;
1397
1398 BT_ERR("%s command tx timeout", hdev->name);
1399 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1400 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1401}
1402
2763eda6
SJ
1403struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1404 bdaddr_t *bdaddr)
1405{
1406 struct oob_data *data;
1407
1408 list_for_each_entry(data, &hdev->remote_oob_data, list)
1409 if (bacmp(bdaddr, &data->bdaddr) == 0)
1410 return data;
1411
1412 return NULL;
1413}
1414
1415int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1416{
1417 struct oob_data *data;
1418
1419 data = hci_find_remote_oob_data(hdev, bdaddr);
1420 if (!data)
1421 return -ENOENT;
1422
1423 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1424
1425 list_del(&data->list);
1426 kfree(data);
1427
1428 return 0;
1429}
1430
1431int hci_remote_oob_data_clear(struct hci_dev *hdev)
1432{
1433 struct oob_data *data, *n;
1434
1435 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1436 list_del(&data->list);
1437 kfree(data);
1438 }
1439
1440 return 0;
1441}
1442
1443int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1444 u8 *randomizer)
1445{
1446 struct oob_data *data;
1447
1448 data = hci_find_remote_oob_data(hdev, bdaddr);
1449
1450 if (!data) {
1451 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1452 if (!data)
1453 return -ENOMEM;
1454
1455 bacpy(&data->bdaddr, bdaddr);
1456 list_add(&data->list, &hdev->remote_oob_data);
1457 }
1458
1459 memcpy(data->hash, hash, sizeof(data->hash));
1460 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1461
1462 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1463
1464 return 0;
1465}
1466
b2a66aad
AJ
1467struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1468 bdaddr_t *bdaddr)
1469{
8035ded4 1470 struct bdaddr_list *b;
b2a66aad 1471
8035ded4 1472 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1473 if (bacmp(bdaddr, &b->bdaddr) == 0)
1474 return b;
b2a66aad
AJ
1475
1476 return NULL;
1477}
1478
1479int hci_blacklist_clear(struct hci_dev *hdev)
1480{
1481 struct list_head *p, *n;
1482
1483 list_for_each_safe(p, n, &hdev->blacklist) {
1484 struct bdaddr_list *b;
1485
1486 b = list_entry(p, struct bdaddr_list, list);
1487
1488 list_del(p);
1489 kfree(b);
1490 }
1491
1492 return 0;
1493}
1494
88c1fe4b 1495int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1496{
1497 struct bdaddr_list *entry;
b2a66aad
AJ
1498
1499 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1500 return -EBADF;
1501
5e762444
AJ
1502 if (hci_blacklist_lookup(hdev, bdaddr))
1503 return -EEXIST;
b2a66aad
AJ
1504
1505 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1506 if (!entry)
1507 return -ENOMEM;
b2a66aad
AJ
1508
1509 bacpy(&entry->bdaddr, bdaddr);
1510
1511 list_add(&entry->list, &hdev->blacklist);
1512
88c1fe4b 1513 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1514}
1515
88c1fe4b 1516int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1517{
1518 struct bdaddr_list *entry;
b2a66aad 1519
1ec918ce 1520 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1521 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1522
1523 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1524 if (!entry)
5e762444 1525 return -ENOENT;
b2a66aad
AJ
1526
1527 list_del(&entry->list);
1528 kfree(entry);
1529
88c1fe4b 1530 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1531}
1532
db323f2f 1533static void hci_clear_adv_cache(struct work_struct *work)
35815085 1534{
db323f2f
GP
1535 struct hci_dev *hdev = container_of(work, struct hci_dev,
1536 adv_work.work);
35815085
AG
1537
1538 hci_dev_lock(hdev);
1539
1540 hci_adv_entries_clear(hdev);
1541
1542 hci_dev_unlock(hdev);
1543}
1544
76c8686f
AG
1545int hci_adv_entries_clear(struct hci_dev *hdev)
1546{
1547 struct adv_entry *entry, *tmp;
1548
1549 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1550 list_del(&entry->list);
1551 kfree(entry);
1552 }
1553
1554 BT_DBG("%s adv cache cleared", hdev->name);
1555
1556 return 0;
1557}
1558
1559struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1560{
1561 struct adv_entry *entry;
1562
1563 list_for_each_entry(entry, &hdev->adv_entries, list)
1564 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1565 return entry;
1566
1567 return NULL;
1568}
1569
1570static inline int is_connectable_adv(u8 evt_type)
1571{
1572 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1573 return 1;
1574
1575 return 0;
1576}
1577
1578int hci_add_adv_entry(struct hci_dev *hdev,
1579 struct hci_ev_le_advertising_info *ev)
1580{
1581 struct adv_entry *entry;
1582
1583 if (!is_connectable_adv(ev->evt_type))
1584 return -EINVAL;
1585
1586 /* Only new entries should be added to adv_entries. So, if
1587 * bdaddr was found, don't add it. */
1588 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1589 return 0;
1590
4777bfde 1591 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1592 if (!entry)
1593 return -ENOMEM;
1594
1595 bacpy(&entry->bdaddr, &ev->bdaddr);
1596 entry->bdaddr_type = ev->bdaddr_type;
1597
1598 list_add(&entry->list, &hdev->adv_entries);
1599
1600 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1601 batostr(&entry->bdaddr), entry->bdaddr_type);
1602
1603 return 0;
1604}
1605
7ba8b4be
AG
1606static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1607{
1608 struct le_scan_params *param = (struct le_scan_params *) opt;
1609 struct hci_cp_le_set_scan_param cp;
1610
1611 memset(&cp, 0, sizeof(cp));
1612 cp.type = param->type;
1613 cp.interval = cpu_to_le16(param->interval);
1614 cp.window = cpu_to_le16(param->window);
1615
1616 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1617}
1618
1619static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1620{
1621 struct hci_cp_le_set_scan_enable cp;
1622
1623 memset(&cp, 0, sizeof(cp));
1624 cp.enable = 1;
1625
1626 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1627}
1628
1629static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1630 u16 window, int timeout)
1631{
1632 long timeo = msecs_to_jiffies(3000);
1633 struct le_scan_params param;
1634 int err;
1635
1636 BT_DBG("%s", hdev->name);
1637
1638 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1639 return -EINPROGRESS;
1640
1641 param.type = type;
1642 param.interval = interval;
1643 param.window = window;
1644
1645 hci_req_lock(hdev);
1646
1647 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1648 timeo);
1649 if (!err)
1650 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1651
1652 hci_req_unlock(hdev);
1653
1654 if (err < 0)
1655 return err;
1656
1657 schedule_delayed_work(&hdev->le_scan_disable,
1658 msecs_to_jiffies(timeout));
1659
1660 return 0;
1661}
1662
1663static void le_scan_disable_work(struct work_struct *work)
1664{
1665 struct hci_dev *hdev = container_of(work, struct hci_dev,
1666 le_scan_disable.work);
1667 struct hci_cp_le_set_scan_enable cp;
1668
1669 BT_DBG("%s", hdev->name);
1670
1671 memset(&cp, 0, sizeof(cp));
1672
1673 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1674}
1675
28b75a89
AG
1676static void le_scan_work(struct work_struct *work)
1677{
1678 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1679 struct le_scan_params *param = &hdev->le_scan_params;
1680
1681 BT_DBG("%s", hdev->name);
1682
1683 hci_do_le_scan(hdev, param->type, param->interval,
1684 param->window, param->timeout);
1685}
1686
1687int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1688 int timeout)
1689{
1690 struct le_scan_params *param = &hdev->le_scan_params;
1691
1692 BT_DBG("%s", hdev->name);
1693
1694 if (work_busy(&hdev->le_scan))
1695 return -EINPROGRESS;
1696
1697 param->type = type;
1698 param->interval = interval;
1699 param->window = window;
1700 param->timeout = timeout;
1701
1702 queue_work(system_long_wq, &hdev->le_scan);
1703
1704 return 0;
1705}
1706
1da177e4
LT
1707/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev)
1709{
1710 struct list_head *head = &hci_dev_list, *p;
08add513 1711 int i, id, error;
1da177e4 1712
e9b9cfa1 1713 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1714
010666a1 1715 if (!hdev->open || !hdev->close)
1da177e4
LT
1716 return -EINVAL;
1717
08add513
MM
1718 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID.
1720 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1722
f20d09d5 1723 write_lock(&hci_dev_list_lock);
1da177e4
LT
1724
1725 /* Find first available device id */
1726 list_for_each(p, &hci_dev_list) {
1727 if (list_entry(p, struct hci_dev, list)->id != id)
1728 break;
1729 head = p; id++;
1730 }
8e87d142 1731
1da177e4
LT
1732 sprintf(hdev->name, "hci%d", id);
1733 hdev->id = id;
c6feeb28 1734 list_add_tail(&hdev->list, head);
1da177e4 1735
09fd0de5 1736 mutex_init(&hdev->lock);
1da177e4
LT
1737
1738 hdev->flags = 0;
d23264a8 1739 hdev->dev_flags = 0;
1da177e4 1740 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1741 hdev->esco_type = (ESCO_HV1);
1da177e4 1742 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1743 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1744
04837f64
MH
1745 hdev->idle_timeout = 0;
1746 hdev->sniff_max_interval = 800;
1747 hdev->sniff_min_interval = 80;
1748
b78752cc 1749 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1750 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1751 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1752
1da177e4
LT
1753
1754 skb_queue_head_init(&hdev->rx_q);
1755 skb_queue_head_init(&hdev->cmd_q);
1756 skb_queue_head_init(&hdev->raw_q);
1757
6bd32326
VT
1758 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1759
cd4c5391 1760 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1761 hdev->reassembly[i] = NULL;
1762
1da177e4 1763 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1764 mutex_init(&hdev->req_lock);
1da177e4 1765
30883512 1766 discovery_init(hdev);
1da177e4
LT
1767
1768 hci_conn_hash_init(hdev);
1769
2e58ef3e
JH
1770 INIT_LIST_HEAD(&hdev->mgmt_pending);
1771
ea4bd8ba 1772 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1773
2aeb9a1a
JH
1774 INIT_LIST_HEAD(&hdev->uuids);
1775
55ed8ca1 1776 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1777 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1778
2763eda6
SJ
1779 INIT_LIST_HEAD(&hdev->remote_oob_data);
1780
76c8686f
AG
1781 INIT_LIST_HEAD(&hdev->adv_entries);
1782
db323f2f 1783 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1784 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1785 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1786
16ab91ab
JH
1787 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1788
1da177e4
LT
1789 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1790
1791 atomic_set(&hdev->promisc, 0);
1792
28b75a89
AG
1793 INIT_WORK(&hdev->le_scan, le_scan_work);
1794
7ba8b4be
AG
1795 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1796
f20d09d5 1797 write_unlock(&hci_dev_list_lock);
1da177e4 1798
32845eb1
GP
1799 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1800 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1801 if (!hdev->workqueue) {
1802 error = -ENOMEM;
1803 goto err;
1804 }
f48fd9c8 1805
33ca954d
DH
1806 error = hci_add_sysfs(hdev);
1807 if (error < 0)
1808 goto err_wqueue;
1da177e4 1809
611b30f7
MH
1810 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1811 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1812 if (hdev->rfkill) {
1813 if (rfkill_register(hdev->rfkill) < 0) {
1814 rfkill_destroy(hdev->rfkill);
1815 hdev->rfkill = NULL;
1816 }
1817 }
1818
a8b2d5c2
JH
1819 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1820 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1821 schedule_work(&hdev->power_on);
ab81cbf9 1822
1da177e4 1823 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1824 hci_dev_hold(hdev);
1da177e4
LT
1825
1826 return id;
f48fd9c8 1827
33ca954d
DH
1828err_wqueue:
1829 destroy_workqueue(hdev->workqueue);
1830err:
f20d09d5 1831 write_lock(&hci_dev_list_lock);
f48fd9c8 1832 list_del(&hdev->list);
f20d09d5 1833 write_unlock(&hci_dev_list_lock);
f48fd9c8 1834
33ca954d 1835 return error;
1da177e4
LT
1836}
1837EXPORT_SYMBOL(hci_register_dev);
1838
1839/* Unregister HCI device */
59735631 1840void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1841{
ef222013
MH
1842 int i;
1843
c13854ce 1844 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1845
f20d09d5 1846 write_lock(&hci_dev_list_lock);
1da177e4 1847 list_del(&hdev->list);
f20d09d5 1848 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1849
1850 hci_dev_do_close(hdev);
1851
cd4c5391 1852 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1853 kfree_skb(hdev->reassembly[i]);
1854
ab81cbf9 1855 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1856 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1857 hci_dev_lock(hdev);
744cf19e 1858 mgmt_index_removed(hdev);
09fd0de5 1859 hci_dev_unlock(hdev);
56e5cb86 1860 }
ab81cbf9 1861
2e58ef3e
JH
1862 /* mgmt_index_removed should take care of emptying the
1863 * pending list */
1864 BUG_ON(!list_empty(&hdev->mgmt_pending));
1865
1da177e4
LT
1866 hci_notify(hdev, HCI_DEV_UNREG);
1867
611b30f7
MH
1868 if (hdev->rfkill) {
1869 rfkill_unregister(hdev->rfkill);
1870 rfkill_destroy(hdev->rfkill);
1871 }
1872
ce242970 1873 hci_del_sysfs(hdev);
147e2d59 1874
db323f2f 1875 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1876
f48fd9c8
MH
1877 destroy_workqueue(hdev->workqueue);
1878
09fd0de5 1879 hci_dev_lock(hdev);
e2e0cacb 1880 hci_blacklist_clear(hdev);
2aeb9a1a 1881 hci_uuids_clear(hdev);
55ed8ca1 1882 hci_link_keys_clear(hdev);
b899efaf 1883 hci_smp_ltks_clear(hdev);
2763eda6 1884 hci_remote_oob_data_clear(hdev);
76c8686f 1885 hci_adv_entries_clear(hdev);
09fd0de5 1886 hci_dev_unlock(hdev);
e2e0cacb 1887
dc946bd8 1888 hci_dev_put(hdev);
1da177e4
LT
1889}
1890EXPORT_SYMBOL(hci_unregister_dev);
1891
1892/* Suspend HCI device */
1893int hci_suspend_dev(struct hci_dev *hdev)
1894{
1895 hci_notify(hdev, HCI_DEV_SUSPEND);
1896 return 0;
1897}
1898EXPORT_SYMBOL(hci_suspend_dev);
1899
1900/* Resume HCI device */
1901int hci_resume_dev(struct hci_dev *hdev)
1902{
1903 hci_notify(hdev, HCI_DEV_RESUME);
1904 return 0;
1905}
1906EXPORT_SYMBOL(hci_resume_dev);
1907
76bca880
MH
1908/* Receive frame from HCI drivers */
1909int hci_recv_frame(struct sk_buff *skb)
1910{
1911 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1912 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1913 && !test_bit(HCI_INIT, &hdev->flags))) {
1914 kfree_skb(skb);
1915 return -ENXIO;
1916 }
1917
1918 /* Incomming skb */
1919 bt_cb(skb)->incoming = 1;
1920
1921 /* Time stamp */
1922 __net_timestamp(skb);
1923
76bca880 1924 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1925 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1926
76bca880
MH
1927 return 0;
1928}
1929EXPORT_SYMBOL(hci_recv_frame);
1930
33e882a5 1931static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1932 int count, __u8 index)
33e882a5
SS
1933{
1934 int len = 0;
1935 int hlen = 0;
1936 int remain = count;
1937 struct sk_buff *skb;
1938 struct bt_skb_cb *scb;
1939
1940 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1941 index >= NUM_REASSEMBLY)
1942 return -EILSEQ;
1943
1944 skb = hdev->reassembly[index];
1945
1946 if (!skb) {
1947 switch (type) {
1948 case HCI_ACLDATA_PKT:
1949 len = HCI_MAX_FRAME_SIZE;
1950 hlen = HCI_ACL_HDR_SIZE;
1951 break;
1952 case HCI_EVENT_PKT:
1953 len = HCI_MAX_EVENT_SIZE;
1954 hlen = HCI_EVENT_HDR_SIZE;
1955 break;
1956 case HCI_SCODATA_PKT:
1957 len = HCI_MAX_SCO_SIZE;
1958 hlen = HCI_SCO_HDR_SIZE;
1959 break;
1960 }
1961
1e429f38 1962 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1963 if (!skb)
1964 return -ENOMEM;
1965
1966 scb = (void *) skb->cb;
1967 scb->expect = hlen;
1968 scb->pkt_type = type;
1969
1970 skb->dev = (void *) hdev;
1971 hdev->reassembly[index] = skb;
1972 }
1973
1974 while (count) {
1975 scb = (void *) skb->cb;
1976 len = min(scb->expect, (__u16)count);
1977
1978 memcpy(skb_put(skb, len), data, len);
1979
1980 count -= len;
1981 data += len;
1982 scb->expect -= len;
1983 remain = count;
1984
1985 switch (type) {
1986 case HCI_EVENT_PKT:
1987 if (skb->len == HCI_EVENT_HDR_SIZE) {
1988 struct hci_event_hdr *h = hci_event_hdr(skb);
1989 scb->expect = h->plen;
1990
1991 if (skb_tailroom(skb) < scb->expect) {
1992 kfree_skb(skb);
1993 hdev->reassembly[index] = NULL;
1994 return -ENOMEM;
1995 }
1996 }
1997 break;
1998
1999 case HCI_ACLDATA_PKT:
2000 if (skb->len == HCI_ACL_HDR_SIZE) {
2001 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2002 scb->expect = __le16_to_cpu(h->dlen);
2003
2004 if (skb_tailroom(skb) < scb->expect) {
2005 kfree_skb(skb);
2006 hdev->reassembly[index] = NULL;
2007 return -ENOMEM;
2008 }
2009 }
2010 break;
2011
2012 case HCI_SCODATA_PKT:
2013 if (skb->len == HCI_SCO_HDR_SIZE) {
2014 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2015 scb->expect = h->dlen;
2016
2017 if (skb_tailroom(skb) < scb->expect) {
2018 kfree_skb(skb);
2019 hdev->reassembly[index] = NULL;
2020 return -ENOMEM;
2021 }
2022 }
2023 break;
2024 }
2025
2026 if (scb->expect == 0) {
2027 /* Complete frame */
2028
2029 bt_cb(skb)->pkt_type = type;
2030 hci_recv_frame(skb);
2031
2032 hdev->reassembly[index] = NULL;
2033 return remain;
2034 }
2035 }
2036
2037 return remain;
2038}
2039
ef222013
MH
2040int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2041{
f39a3c06
SS
2042 int rem = 0;
2043
ef222013
MH
2044 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2045 return -EILSEQ;
2046
da5f6c37 2047 while (count) {
1e429f38 2048 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2049 if (rem < 0)
2050 return rem;
ef222013 2051
f39a3c06
SS
2052 data += (count - rem);
2053 count = rem;
f81c6224 2054 }
ef222013 2055
f39a3c06 2056 return rem;
ef222013
MH
2057}
2058EXPORT_SYMBOL(hci_recv_fragment);
2059
99811510
SS
2060#define STREAM_REASSEMBLY 0
2061
2062int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2063{
2064 int type;
2065 int rem = 0;
2066
da5f6c37 2067 while (count) {
99811510
SS
2068 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2069
2070 if (!skb) {
2071 struct { char type; } *pkt;
2072
2073 /* Start of the frame */
2074 pkt = data;
2075 type = pkt->type;
2076
2077 data++;
2078 count--;
2079 } else
2080 type = bt_cb(skb)->pkt_type;
2081
1e429f38
GP
2082 rem = hci_reassembly(hdev, type, data, count,
2083 STREAM_REASSEMBLY);
99811510
SS
2084 if (rem < 0)
2085 return rem;
2086
2087 data += (count - rem);
2088 count = rem;
f81c6224 2089 }
99811510
SS
2090
2091 return rem;
2092}
2093EXPORT_SYMBOL(hci_recv_stream_fragment);
2094
1da177e4
LT
2095/* ---- Interface to upper protocols ---- */
2096
1da177e4
LT
2097int hci_register_cb(struct hci_cb *cb)
2098{
2099 BT_DBG("%p name %s", cb, cb->name);
2100
f20d09d5 2101 write_lock(&hci_cb_list_lock);
1da177e4 2102 list_add(&cb->list, &hci_cb_list);
f20d09d5 2103 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2104
2105 return 0;
2106}
2107EXPORT_SYMBOL(hci_register_cb);
2108
2109int hci_unregister_cb(struct hci_cb *cb)
2110{
2111 BT_DBG("%p name %s", cb, cb->name);
2112
f20d09d5 2113 write_lock(&hci_cb_list_lock);
1da177e4 2114 list_del(&cb->list);
f20d09d5 2115 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2116
2117 return 0;
2118}
2119EXPORT_SYMBOL(hci_unregister_cb);
2120
2121static int hci_send_frame(struct sk_buff *skb)
2122{
2123 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2124
2125 if (!hdev) {
2126 kfree_skb(skb);
2127 return -ENODEV;
2128 }
2129
0d48d939 2130 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
2131
2132 if (atomic_read(&hdev->promisc)) {
2133 /* Time stamp */
a61bbcf2 2134 __net_timestamp(skb);
1da177e4 2135
eec8d2bc 2136 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2137 }
2138
2139 /* Get rid of skb owner, prior to sending to the driver. */
2140 skb_orphan(skb);
2141
2142 return hdev->send(skb);
2143}
2144
2145/* Send HCI command */
a9de9248 2146int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2147{
2148 int len = HCI_COMMAND_HDR_SIZE + plen;
2149 struct hci_command_hdr *hdr;
2150 struct sk_buff *skb;
2151
a9de9248 2152 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2153
2154 skb = bt_skb_alloc(len, GFP_ATOMIC);
2155 if (!skb) {
ef222013 2156 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2157 return -ENOMEM;
2158 }
2159
2160 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2161 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2162 hdr->plen = plen;
2163
2164 if (plen)
2165 memcpy(skb_put(skb, plen), param, plen);
2166
2167 BT_DBG("skb len %d", skb->len);
2168
0d48d939 2169 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2170 skb->dev = (void *) hdev;
c78ae283 2171
a5040efa
JH
2172 if (test_bit(HCI_INIT, &hdev->flags))
2173 hdev->init_last_cmd = opcode;
2174
1da177e4 2175 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2176 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2177
2178 return 0;
2179}
1da177e4
LT
2180
2181/* Get data from the previously sent command */
a9de9248 2182void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2183{
2184 struct hci_command_hdr *hdr;
2185
2186 if (!hdev->sent_cmd)
2187 return NULL;
2188
2189 hdr = (void *) hdev->sent_cmd->data;
2190
a9de9248 2191 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2192 return NULL;
2193
a9de9248 2194 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2195
2196 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2197}
2198
2199/* Send ACL data */
2200static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2201{
2202 struct hci_acl_hdr *hdr;
2203 int len = skb->len;
2204
badff6d0
ACM
2205 skb_push(skb, HCI_ACL_HDR_SIZE);
2206 skb_reset_transport_header(skb);
9c70220b 2207 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2208 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2209 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2210}
2211
73d80deb
LAD
2212static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2213 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2214{
2215 struct hci_dev *hdev = conn->hdev;
2216 struct sk_buff *list;
2217
70f23020
AE
2218 list = skb_shinfo(skb)->frag_list;
2219 if (!list) {
1da177e4
LT
2220 /* Non fragmented */
2221 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2222
73d80deb 2223 skb_queue_tail(queue, skb);
1da177e4
LT
2224 } else {
2225 /* Fragmented */
2226 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2227
2228 skb_shinfo(skb)->frag_list = NULL;
2229
2230 /* Queue all fragments atomically */
af3e6359 2231 spin_lock(&queue->lock);
1da177e4 2232
73d80deb 2233 __skb_queue_tail(queue, skb);
e702112f
AE
2234
2235 flags &= ~ACL_START;
2236 flags |= ACL_CONT;
1da177e4
LT
2237 do {
2238 skb = list; list = list->next;
8e87d142 2239
1da177e4 2240 skb->dev = (void *) hdev;
0d48d939 2241 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2242 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2243
2244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2245
73d80deb 2246 __skb_queue_tail(queue, skb);
1da177e4
LT
2247 } while (list);
2248
af3e6359 2249 spin_unlock(&queue->lock);
1da177e4 2250 }
73d80deb
LAD
2251}
2252
2253void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2254{
2255 struct hci_conn *conn = chan->conn;
2256 struct hci_dev *hdev = conn->hdev;
2257
2258 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2259
2260 skb->dev = (void *) hdev;
2261 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2262 hci_add_acl_hdr(skb, conn->handle, flags);
2263
2264 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2265
3eff45ea 2266 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2267}
2268EXPORT_SYMBOL(hci_send_acl);
2269
2270/* Send SCO data */
0d861d8b 2271void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2272{
2273 struct hci_dev *hdev = conn->hdev;
2274 struct hci_sco_hdr hdr;
2275
2276 BT_DBG("%s len %d", hdev->name, skb->len);
2277
aca3192c 2278 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2279 hdr.dlen = skb->len;
2280
badff6d0
ACM
2281 skb_push(skb, HCI_SCO_HDR_SIZE);
2282 skb_reset_transport_header(skb);
9c70220b 2283 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2284
2285 skb->dev = (void *) hdev;
0d48d939 2286 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2287
1da177e4 2288 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2289 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2290}
2291EXPORT_SYMBOL(hci_send_sco);
2292
2293/* ---- HCI TX task (outgoing data) ---- */
2294
2295/* HCI Connection scheduler */
2296static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2297{
2298 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2299 struct hci_conn *conn = NULL, *c;
1da177e4 2300 int num = 0, min = ~0;
1da177e4 2301
8e87d142 2302 /* We don't have to lock device here. Connections are always
1da177e4 2303 * added and removed with TX task disabled. */
bf4c6325
GP
2304
2305 rcu_read_lock();
2306
2307 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2308 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2309 continue;
769be974
MH
2310
2311 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2312 continue;
2313
1da177e4
LT
2314 num++;
2315
2316 if (c->sent < min) {
2317 min = c->sent;
2318 conn = c;
2319 }
52087a79
LAD
2320
2321 if (hci_conn_num(hdev, type) == num)
2322 break;
1da177e4
LT
2323 }
2324
bf4c6325
GP
2325 rcu_read_unlock();
2326
1da177e4 2327 if (conn) {
6ed58ec5
VT
2328 int cnt, q;
2329
2330 switch (conn->type) {
2331 case ACL_LINK:
2332 cnt = hdev->acl_cnt;
2333 break;
2334 case SCO_LINK:
2335 case ESCO_LINK:
2336 cnt = hdev->sco_cnt;
2337 break;
2338 case LE_LINK:
2339 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2340 break;
2341 default:
2342 cnt = 0;
2343 BT_ERR("Unknown link type");
2344 }
2345
2346 q = cnt / num;
1da177e4
LT
2347 *quote = q ? q : 1;
2348 } else
2349 *quote = 0;
2350
2351 BT_DBG("conn %p quote %d", conn, *quote);
2352 return conn;
2353}
2354
bae1f5d9 2355static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2356{
2357 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2358 struct hci_conn *c;
1da177e4 2359
bae1f5d9 2360 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2361
bf4c6325
GP
2362 rcu_read_lock();
2363
1da177e4 2364 /* Kill stalled connections */
bf4c6325 2365 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2366 if (c->type == type && c->sent) {
2367 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2368 hdev->name, batostr(&c->dst));
2369 hci_acl_disconn(c, 0x13);
2370 }
2371 }
bf4c6325
GP
2372
2373 rcu_read_unlock();
1da177e4
LT
2374}
2375
73d80deb
LAD
2376static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2377 int *quote)
1da177e4 2378{
73d80deb
LAD
2379 struct hci_conn_hash *h = &hdev->conn_hash;
2380 struct hci_chan *chan = NULL;
2381 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2382 struct hci_conn *conn;
73d80deb
LAD
2383 int cnt, q, conn_num = 0;
2384
2385 BT_DBG("%s", hdev->name);
2386
bf4c6325
GP
2387 rcu_read_lock();
2388
2389 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2390 struct hci_chan *tmp;
2391
2392 if (conn->type != type)
2393 continue;
2394
2395 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2396 continue;
2397
2398 conn_num++;
2399
8192edef 2400 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2401 struct sk_buff *skb;
2402
2403 if (skb_queue_empty(&tmp->data_q))
2404 continue;
2405
2406 skb = skb_peek(&tmp->data_q);
2407 if (skb->priority < cur_prio)
2408 continue;
2409
2410 if (skb->priority > cur_prio) {
2411 num = 0;
2412 min = ~0;
2413 cur_prio = skb->priority;
2414 }
2415
2416 num++;
2417
2418 if (conn->sent < min) {
2419 min = conn->sent;
2420 chan = tmp;
2421 }
2422 }
2423
2424 if (hci_conn_num(hdev, type) == conn_num)
2425 break;
2426 }
2427
bf4c6325
GP
2428 rcu_read_unlock();
2429
73d80deb
LAD
2430 if (!chan)
2431 return NULL;
2432
2433 switch (chan->conn->type) {
2434 case ACL_LINK:
2435 cnt = hdev->acl_cnt;
2436 break;
2437 case SCO_LINK:
2438 case ESCO_LINK:
2439 cnt = hdev->sco_cnt;
2440 break;
2441 case LE_LINK:
2442 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2443 break;
2444 default:
2445 cnt = 0;
2446 BT_ERR("Unknown link type");
2447 }
2448
2449 q = cnt / num;
2450 *quote = q ? q : 1;
2451 BT_DBG("chan %p quote %d", chan, *quote);
2452 return chan;
2453}
2454
02b20f0b
LAD
2455static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2456{
2457 struct hci_conn_hash *h = &hdev->conn_hash;
2458 struct hci_conn *conn;
2459 int num = 0;
2460
2461 BT_DBG("%s", hdev->name);
2462
bf4c6325
GP
2463 rcu_read_lock();
2464
2465 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2466 struct hci_chan *chan;
2467
2468 if (conn->type != type)
2469 continue;
2470
2471 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2472 continue;
2473
2474 num++;
2475
8192edef 2476 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2477 struct sk_buff *skb;
2478
2479 if (chan->sent) {
2480 chan->sent = 0;
2481 continue;
2482 }
2483
2484 if (skb_queue_empty(&chan->data_q))
2485 continue;
2486
2487 skb = skb_peek(&chan->data_q);
2488 if (skb->priority >= HCI_PRIO_MAX - 1)
2489 continue;
2490
2491 skb->priority = HCI_PRIO_MAX - 1;
2492
2493 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2494 skb->priority);
2495 }
2496
2497 if (hci_conn_num(hdev, type) == num)
2498 break;
2499 }
bf4c6325
GP
2500
2501 rcu_read_unlock();
2502
02b20f0b
LAD
2503}
2504
b71d385a
AE
2505static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2506{
2507 /* Calculate count of blocks used by this packet */
2508 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2509}
2510
63d2bc1b 2511static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2512{
1da177e4
LT
2513 if (!test_bit(HCI_RAW, &hdev->flags)) {
2514 /* ACL tx timeout must be longer than maximum
2515 * link supervision timeout (40.9 seconds) */
63d2bc1b 2516 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2517 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2518 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2519 }
63d2bc1b 2520}
1da177e4 2521
63d2bc1b
AE
2522static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2523{
2524 unsigned int cnt = hdev->acl_cnt;
2525 struct hci_chan *chan;
2526 struct sk_buff *skb;
2527 int quote;
2528
2529 __check_timeout(hdev, cnt);
04837f64 2530
73d80deb
LAD
2531 while (hdev->acl_cnt &&
2532 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2533 u32 priority = (skb_peek(&chan->data_q))->priority;
2534 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2535 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2536 skb->len, skb->priority);
2537
ec1cce24
LAD
2538 /* Stop if priority has changed */
2539 if (skb->priority < priority)
2540 break;
2541
2542 skb = skb_dequeue(&chan->data_q);
2543
73d80deb
LAD
2544 hci_conn_enter_active_mode(chan->conn,
2545 bt_cb(skb)->force_active);
04837f64 2546
1da177e4
LT
2547 hci_send_frame(skb);
2548 hdev->acl_last_tx = jiffies;
2549
2550 hdev->acl_cnt--;
73d80deb
LAD
2551 chan->sent++;
2552 chan->conn->sent++;
1da177e4
LT
2553 }
2554 }
02b20f0b
LAD
2555
2556 if (cnt != hdev->acl_cnt)
2557 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2558}
2559
b71d385a
AE
2560static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2561{
63d2bc1b 2562 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2563 struct hci_chan *chan;
2564 struct sk_buff *skb;
2565 int quote;
b71d385a 2566
63d2bc1b 2567 __check_timeout(hdev, cnt);
b71d385a
AE
2568
2569 while (hdev->block_cnt > 0 &&
2570 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2571 u32 priority = (skb_peek(&chan->data_q))->priority;
2572 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2573 int blocks;
2574
2575 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2576 skb->len, skb->priority);
2577
2578 /* Stop if priority has changed */
2579 if (skb->priority < priority)
2580 break;
2581
2582 skb = skb_dequeue(&chan->data_q);
2583
2584 blocks = __get_blocks(hdev, skb);
2585 if (blocks > hdev->block_cnt)
2586 return;
2587
2588 hci_conn_enter_active_mode(chan->conn,
2589 bt_cb(skb)->force_active);
2590
2591 hci_send_frame(skb);
2592 hdev->acl_last_tx = jiffies;
2593
2594 hdev->block_cnt -= blocks;
2595 quote -= blocks;
2596
2597 chan->sent += blocks;
2598 chan->conn->sent += blocks;
2599 }
2600 }
2601
2602 if (cnt != hdev->block_cnt)
2603 hci_prio_recalculate(hdev, ACL_LINK);
2604}
2605
2606static inline void hci_sched_acl(struct hci_dev *hdev)
2607{
2608 BT_DBG("%s", hdev->name);
2609
2610 if (!hci_conn_num(hdev, ACL_LINK))
2611 return;
2612
2613 switch (hdev->flow_ctl_mode) {
2614 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2615 hci_sched_acl_pkt(hdev);
2616 break;
2617
2618 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2619 hci_sched_acl_blk(hdev);
2620 break;
2621 }
2622}
2623
1da177e4
LT
2624/* Schedule SCO */
2625static inline void hci_sched_sco(struct hci_dev *hdev)
2626{
2627 struct hci_conn *conn;
2628 struct sk_buff *skb;
2629 int quote;
2630
2631 BT_DBG("%s", hdev->name);
2632
52087a79
LAD
2633 if (!hci_conn_num(hdev, SCO_LINK))
2634 return;
2635
1da177e4
LT
2636 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2637 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2638 BT_DBG("skb %p len %d", skb, skb->len);
2639 hci_send_frame(skb);
2640
2641 conn->sent++;
2642 if (conn->sent == ~0)
2643 conn->sent = 0;
2644 }
2645 }
2646}
2647
b6a0dc82
MH
2648static inline void hci_sched_esco(struct hci_dev *hdev)
2649{
2650 struct hci_conn *conn;
2651 struct sk_buff *skb;
2652 int quote;
2653
2654 BT_DBG("%s", hdev->name);
2655
52087a79
LAD
2656 if (!hci_conn_num(hdev, ESCO_LINK))
2657 return;
2658
b6a0dc82
MH
2659 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2660 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2661 BT_DBG("skb %p len %d", skb, skb->len);
2662 hci_send_frame(skb);
2663
2664 conn->sent++;
2665 if (conn->sent == ~0)
2666 conn->sent = 0;
2667 }
2668 }
2669}
2670
6ed58ec5
VT
2671static inline void hci_sched_le(struct hci_dev *hdev)
2672{
73d80deb 2673 struct hci_chan *chan;
6ed58ec5 2674 struct sk_buff *skb;
02b20f0b 2675 int quote, cnt, tmp;
6ed58ec5
VT
2676
2677 BT_DBG("%s", hdev->name);
2678
52087a79
LAD
2679 if (!hci_conn_num(hdev, LE_LINK))
2680 return;
2681
6ed58ec5
VT
2682 if (!test_bit(HCI_RAW, &hdev->flags)) {
2683 /* LE tx timeout must be longer than maximum
2684 * link supervision timeout (40.9 seconds) */
bae1f5d9 2685 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2686 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2687 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2688 }
2689
2690 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2691 tmp = cnt;
73d80deb 2692 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2693 u32 priority = (skb_peek(&chan->data_q))->priority;
2694 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2695 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2696 skb->len, skb->priority);
6ed58ec5 2697
ec1cce24
LAD
2698 /* Stop if priority has changed */
2699 if (skb->priority < priority)
2700 break;
2701
2702 skb = skb_dequeue(&chan->data_q);
2703
6ed58ec5
VT
2704 hci_send_frame(skb);
2705 hdev->le_last_tx = jiffies;
2706
2707 cnt--;
73d80deb
LAD
2708 chan->sent++;
2709 chan->conn->sent++;
6ed58ec5
VT
2710 }
2711 }
73d80deb 2712
6ed58ec5
VT
2713 if (hdev->le_pkts)
2714 hdev->le_cnt = cnt;
2715 else
2716 hdev->acl_cnt = cnt;
02b20f0b
LAD
2717
2718 if (cnt != tmp)
2719 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2720}
2721
3eff45ea 2722static void hci_tx_work(struct work_struct *work)
1da177e4 2723{
3eff45ea 2724 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2725 struct sk_buff *skb;
2726
6ed58ec5
VT
2727 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2728 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2729
2730 /* Schedule queues and send stuff to HCI driver */
2731
2732 hci_sched_acl(hdev);
2733
2734 hci_sched_sco(hdev);
2735
b6a0dc82
MH
2736 hci_sched_esco(hdev);
2737
6ed58ec5
VT
2738 hci_sched_le(hdev);
2739
1da177e4
LT
2740 /* Send next queued raw (unknown type) packet */
2741 while ((skb = skb_dequeue(&hdev->raw_q)))
2742 hci_send_frame(skb);
1da177e4
LT
2743}
2744
25985edc 2745/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2746
2747/* ACL data packet */
2748static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2749{
2750 struct hci_acl_hdr *hdr = (void *) skb->data;
2751 struct hci_conn *conn;
2752 __u16 handle, flags;
2753
2754 skb_pull(skb, HCI_ACL_HDR_SIZE);
2755
2756 handle = __le16_to_cpu(hdr->handle);
2757 flags = hci_flags(handle);
2758 handle = hci_handle(handle);
2759
2760 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2761
2762 hdev->stat.acl_rx++;
2763
2764 hci_dev_lock(hdev);
2765 conn = hci_conn_hash_lookup_handle(hdev, handle);
2766 hci_dev_unlock(hdev);
8e87d142 2767
1da177e4 2768 if (conn) {
65983fc7 2769 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2770
1da177e4 2771 /* Send to upper protocol */
686ebf28
UF
2772 l2cap_recv_acldata(conn, skb, flags);
2773 return;
1da177e4 2774 } else {
8e87d142 2775 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2776 hdev->name, handle);
2777 }
2778
2779 kfree_skb(skb);
2780}
2781
2782/* SCO data packet */
2783static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2784{
2785 struct hci_sco_hdr *hdr = (void *) skb->data;
2786 struct hci_conn *conn;
2787 __u16 handle;
2788
2789 skb_pull(skb, HCI_SCO_HDR_SIZE);
2790
2791 handle = __le16_to_cpu(hdr->handle);
2792
2793 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2794
2795 hdev->stat.sco_rx++;
2796
2797 hci_dev_lock(hdev);
2798 conn = hci_conn_hash_lookup_handle(hdev, handle);
2799 hci_dev_unlock(hdev);
2800
2801 if (conn) {
1da177e4 2802 /* Send to upper protocol */
686ebf28
UF
2803 sco_recv_scodata(conn, skb);
2804 return;
1da177e4 2805 } else {
8e87d142 2806 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2807 hdev->name, handle);
2808 }
2809
2810 kfree_skb(skb);
2811}
2812
b78752cc 2813static void hci_rx_work(struct work_struct *work)
1da177e4 2814{
b78752cc 2815 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2816 struct sk_buff *skb;
2817
2818 BT_DBG("%s", hdev->name);
2819
1da177e4
LT
2820 while ((skb = skb_dequeue(&hdev->rx_q))) {
2821 if (atomic_read(&hdev->promisc)) {
2822 /* Send copy to the sockets */
eec8d2bc 2823 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2824 }
2825
2826 if (test_bit(HCI_RAW, &hdev->flags)) {
2827 kfree_skb(skb);
2828 continue;
2829 }
2830
2831 if (test_bit(HCI_INIT, &hdev->flags)) {
2832 /* Don't process data packets in this states. */
0d48d939 2833 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2834 case HCI_ACLDATA_PKT:
2835 case HCI_SCODATA_PKT:
2836 kfree_skb(skb);
2837 continue;
3ff50b79 2838 }
1da177e4
LT
2839 }
2840
2841 /* Process frame */
0d48d939 2842 switch (bt_cb(skb)->pkt_type) {
1da177e4 2843 case HCI_EVENT_PKT:
b78752cc 2844 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2845 hci_event_packet(hdev, skb);
2846 break;
2847
2848 case HCI_ACLDATA_PKT:
2849 BT_DBG("%s ACL data packet", hdev->name);
2850 hci_acldata_packet(hdev, skb);
2851 break;
2852
2853 case HCI_SCODATA_PKT:
2854 BT_DBG("%s SCO data packet", hdev->name);
2855 hci_scodata_packet(hdev, skb);
2856 break;
2857
2858 default:
2859 kfree_skb(skb);
2860 break;
2861 }
2862 }
1da177e4
LT
2863}
2864
c347b765 2865static void hci_cmd_work(struct work_struct *work)
1da177e4 2866{
c347b765 2867 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2868 struct sk_buff *skb;
2869
2870 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2871
1da177e4 2872 /* Send queued commands */
5a08ecce
AE
2873 if (atomic_read(&hdev->cmd_cnt)) {
2874 skb = skb_dequeue(&hdev->cmd_q);
2875 if (!skb)
2876 return;
2877
7585b97a 2878 kfree_skb(hdev->sent_cmd);
1da177e4 2879
70f23020
AE
2880 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2881 if (hdev->sent_cmd) {
1da177e4
LT
2882 atomic_dec(&hdev->cmd_cnt);
2883 hci_send_frame(skb);
7bdb8a5c
SJ
2884 if (test_bit(HCI_RESET, &hdev->flags))
2885 del_timer(&hdev->cmd_timer);
2886 else
2887 mod_timer(&hdev->cmd_timer,
6bd32326 2888 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2889 } else {
2890 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2891 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2892 }
2893 }
2894}
2519a1fc
AG
2895
2896int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2897{
2898 /* General inquiry access code (GIAC) */
2899 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2900 struct hci_cp_inquiry cp;
2901
2902 BT_DBG("%s", hdev->name);
2903
2904 if (test_bit(HCI_INQUIRY, &hdev->flags))
2905 return -EINPROGRESS;
2906
4663262c
JH
2907 inquiry_cache_flush(hdev);
2908
2519a1fc
AG
2909 memset(&cp, 0, sizeof(cp));
2910 memcpy(&cp.lap, lap, sizeof(cp.lap));
2911 cp.length = length;
2912
2913 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2914}
023d5049
AG
2915
2916int hci_cancel_inquiry(struct hci_dev *hdev)
2917{
2918 BT_DBG("%s", hdev->name);
2919
2920 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2921 return -EPERM;
2922
2923 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2924}
7784d78f
AE
2925
2926module_param(enable_hs, bool, 0644);
2927MODULE_PARM_DESC(enable_hs, "Enable High Speed");