]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Use RCU to manipulate chan_list
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
8e87d142
YH
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
82453021 27#include <linux/jiffies.h>
1da177e4
LT
28#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
1da177e4
LT
34#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
f48fd9c8 40#include <linux/workqueue.h>
1da177e4
LT
41#include <linux/interrupt.h>
42#include <linux/notifier.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
7784d78f
AE
57int enable_hs;
58
b78752cc 59static void hci_rx_work(struct work_struct *work);
1da177e4 60static void hci_cmd_task(unsigned long arg);
1da177e4 61static void hci_tx_task(unsigned long arg);
1da177e4 62
67d0dfb5 63static DEFINE_MUTEX(hci_task_lock);
1da177e4
LT
64
65/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
73/* HCI protocols */
74#define HCI_MAX_PROTO 2
75struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77/* HCI notifiers list */
e041c683 78static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
79
80/* ---- HCI notifications ---- */
81
82int hci_register_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
85}
86
87int hci_unregister_notifier(struct notifier_block *nb)
88{
e041c683 89 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
90}
91
6516455d 92static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 93{
e041c683 94 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
95}
96
97/* ---- HCI requests ---- */
98
23bb5763 99void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 100{
23bb5763
JH
101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
a5040efa
JH
103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
105 */
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 107 return;
1da177e4
LT
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116static void hci_req_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127/* Execute request and wait for completion. */
8e87d142 128static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 129 unsigned long opt, __u32 timeout)
1da177e4
LT
130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
e175072f 151 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
3ff50b79 161 }
1da177e4 162
a5040efa 163 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 171 unsigned long opt, __u32 timeout)
1da177e4
LT
172{
173 int ret;
174
7c6a329e
MH
175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
1da177e4
LT
178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
f630cf0d 191 set_bit(HCI_RESET, &hdev->flags);
a9de9248 192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
193}
194
195static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196{
b0916ea0 197 struct hci_cp_delete_stored_link_key cp;
1da177e4 198 struct sk_buff *skb;
1ebb9252 199 __le16 param;
89f2783d 200 __u8 flt_type;
1da177e4
LT
201
202 BT_DBG("%s %ld", hdev->name, opt);
203
204 /* Driver initialization */
205
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
0d48d939 208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 209 skb->dev = (void *) hdev;
c78ae283 210
1da177e4 211 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 212 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
213 }
214 skb_queue_purge(&hdev->driver_init);
215
216 /* Mandatory initialization */
217
218 /* Reset */
f630cf0d
GP
219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
a9de9248 221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 222 }
1da177e4
LT
223
224 /* Read Local Supported Features */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 226
1143e5a6 227 /* Read Local Version */
a9de9248 228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 229
1da177e4 230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 232
1da177e4 233 /* Read BD Address */
a9de9248
MH
234 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
235
236 /* Read Class of Device */
237 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
238
239 /* Read Local Name */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
241
242 /* Read Voice Setting */
a9de9248 243 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
244
245 /* Optional initialization */
246
247 /* Clear Event Filters */
89f2783d 248 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 249 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 250
1da177e4 251 /* Connection accept timeout ~20 secs */
aca3192c 252 param = cpu_to_le16(0x7d00);
a9de9248 253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
254
255 bacpy(&cp.bdaddr, BDADDR_ANY);
256 cp.delete_all = 1;
257 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
258}
259
6ed58ec5
VT
260static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
261{
262 BT_DBG("%s", hdev->name);
263
264 /* Read LE buffer size */
265 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
266}
267
1da177e4
LT
268static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 scan = opt;
271
272 BT_DBG("%s %x", hdev->name, scan);
273
274 /* Inquiry and Page scans */
a9de9248 275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
276}
277
278static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 auth = opt;
281
282 BT_DBG("%s %x", hdev->name, auth);
283
284 /* Authentication */
a9de9248 285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
286}
287
288static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 encrypt = opt;
291
292 BT_DBG("%s %x", hdev->name, encrypt);
293
e4e8e37c 294 /* Encryption */
a9de9248 295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
296}
297
e4e8e37c
MH
298static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __le16 policy = cpu_to_le16(opt);
301
a418b893 302 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
303
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306}
307
8e87d142 308/* Get HCI device by index.
1da177e4
LT
309 * Device is held on return. */
310struct hci_dev *hci_dev_get(int index)
311{
8035ded4 312 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
313
314 BT_DBG("%d", index);
315
316 if (index < 0)
317 return NULL;
318
319 read_lock(&hci_dev_list_lock);
8035ded4 320 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
1da177e4
LT
329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 361 struct inquiry_entry *ie;
1da177e4
LT
362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
70f23020
AE
365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
1da177e4 367 /* Entry not in the cache. Add new one. */
70f23020
AE
368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
1da177e4 370 return;
70f23020
AE
371
372 ie->next = cache->list;
373 cache->list = ie;
1da177e4
LT
374 }
375
70f23020
AE
376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
1da177e4
LT
378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
a9de9248 417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
5a08ecce
AE
432 hdev = hci_dev_get(ir.dev_id);
433 if (!hdev)
1da177e4
LT
434 return -ENODEV;
435
09fd0de5 436 hci_dev_lock(hdev);
8e87d142 437 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
438 inquiry_cache_empty(hdev) ||
439 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
440 inquiry_cache_flush(hdev);
441 do_inquiry = 1;
442 }
09fd0de5 443 hci_dev_unlock(hdev);
1da177e4 444
04837f64 445 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
446
447 if (do_inquiry) {
448 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449 if (err < 0)
450 goto done;
451 }
1da177e4
LT
452
453 /* for unlimited number of responses we will use buffer with 255 entries */
454 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455
456 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457 * copy it to the user space.
458 */
01df8c31 459 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 460 if (!buf) {
1da177e4
LT
461 err = -ENOMEM;
462 goto done;
463 }
464
09fd0de5 465 hci_dev_lock(hdev);
1da177e4 466 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 467 hci_dev_unlock(hdev);
1da177e4
LT
468
469 BT_DBG("num_rsp %d", ir.num_rsp);
470
471 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472 ptr += sizeof(ir);
473 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474 ir.num_rsp))
475 err = -EFAULT;
8e87d142 476 } else
1da177e4
LT
477 err = -EFAULT;
478
479 kfree(buf);
480
481done:
482 hci_dev_put(hdev);
483 return err;
484}
485
486/* ---- HCI ioctl helpers ---- */
487
488int hci_dev_open(__u16 dev)
489{
490 struct hci_dev *hdev;
491 int ret = 0;
492
5a08ecce
AE
493 hdev = hci_dev_get(dev);
494 if (!hdev)
1da177e4
LT
495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
611b30f7
MH
501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
1da177e4
LT
506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
07e3b94a
AE
514 /* Treat all non BR/EDR controllers as raw devices if
515 enable_hs is not set */
516 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
517 set_bit(HCI_RAW, &hdev->flags);
518
1da177e4
LT
519 if (hdev->open(hdev)) {
520 ret = -EIO;
521 goto done;
522 }
523
524 if (!test_bit(HCI_RAW, &hdev->flags)) {
525 atomic_set(&hdev->cmd_cnt, 1);
526 set_bit(HCI_INIT, &hdev->flags);
a5040efa 527 hdev->init_last_cmd = 0;
1da177e4 528
04837f64
MH
529 ret = __hci_request(hdev, hci_init_req, 0,
530 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 531
eead27da 532 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
533 ret = __hci_request(hdev, hci_le_init_req, 0,
534 msecs_to_jiffies(HCI_INIT_TIMEOUT));
535
1da177e4
LT
536 clear_bit(HCI_INIT, &hdev->flags);
537 }
538
539 if (!ret) {
540 hci_dev_hold(hdev);
541 set_bit(HCI_UP, &hdev->flags);
542 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 543 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 544 hci_dev_lock(hdev);
744cf19e 545 mgmt_powered(hdev, 1);
09fd0de5 546 hci_dev_unlock(hdev);
56e5cb86 547 }
8e87d142 548 } else {
1da177e4 549 /* Init failed, cleanup */
1da177e4
LT
550 tasklet_kill(&hdev->tx_task);
551 tasklet_kill(&hdev->cmd_task);
b78752cc 552 flush_work(&hdev->rx_work);
1da177e4
LT
553
554 skb_queue_purge(&hdev->cmd_q);
555 skb_queue_purge(&hdev->rx_q);
556
557 if (hdev->flush)
558 hdev->flush(hdev);
559
560 if (hdev->sent_cmd) {
561 kfree_skb(hdev->sent_cmd);
562 hdev->sent_cmd = NULL;
563 }
564
565 hdev->close(hdev);
566 hdev->flags = 0;
567 }
568
569done:
570 hci_req_unlock(hdev);
571 hci_dev_put(hdev);
572 return ret;
573}
574
575static int hci_dev_do_close(struct hci_dev *hdev)
576{
577 BT_DBG("%s %p", hdev->name, hdev);
578
579 hci_req_cancel(hdev, ENODEV);
580 hci_req_lock(hdev);
581
582 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 583 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
584 hci_req_unlock(hdev);
585 return 0;
586 }
587
588 /* Kill RX and TX tasks */
1da177e4 589 tasklet_kill(&hdev->tx_task);
b78752cc 590 flush_work(&hdev->rx_work);
1da177e4 591
16ab91ab 592 if (hdev->discov_timeout > 0) {
e0f9309f 593 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
594 hdev->discov_timeout = 0;
595 }
596
3243553f 597 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 598 cancel_delayed_work(&hdev->power_off);
3243553f 599
09fd0de5 600 hci_dev_lock(hdev);
1da177e4
LT
601 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev);
09fd0de5 603 hci_dev_unlock(hdev);
1da177e4
LT
604
605 hci_notify(hdev, HCI_DEV_DOWN);
606
607 if (hdev->flush)
608 hdev->flush(hdev);
609
610 /* Reset device */
611 skb_queue_purge(&hdev->cmd_q);
612 atomic_set(&hdev->cmd_cnt, 1);
613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags);
04837f64 615 __hci_request(hdev, hci_reset_req, 0,
43611a7b 616 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
617 clear_bit(HCI_INIT, &hdev->flags);
618 }
619
620 /* Kill cmd task */
621 tasklet_kill(&hdev->cmd_task);
622
623 /* Drop queues */
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->raw_q);
627
628 /* Drop last sent command */
629 if (hdev->sent_cmd) {
b79f44c1 630 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
633 }
634
635 /* After this point our queues are empty
636 * and no tasks are scheduled. */
637 hdev->close(hdev);
638
09fd0de5 639 hci_dev_lock(hdev);
744cf19e 640 mgmt_powered(hdev, 0);
09fd0de5 641 hci_dev_unlock(hdev);
5add6af8 642
1da177e4
LT
643 /* Clear flags */
644 hdev->flags = 0;
645
646 hci_req_unlock(hdev);
647
648 hci_dev_put(hdev);
649 return 0;
650}
651
652int hci_dev_close(__u16 dev)
653{
654 struct hci_dev *hdev;
655 int err;
656
70f23020
AE
657 hdev = hci_dev_get(dev);
658 if (!hdev)
1da177e4
LT
659 return -ENODEV;
660 err = hci_dev_do_close(hdev);
661 hci_dev_put(hdev);
662 return err;
663}
664
665int hci_dev_reset(__u16 dev)
666{
667 struct hci_dev *hdev;
668 int ret = 0;
669
70f23020
AE
670 hdev = hci_dev_get(dev);
671 if (!hdev)
1da177e4
LT
672 return -ENODEV;
673
674 hci_req_lock(hdev);
675 tasklet_disable(&hdev->tx_task);
676
677 if (!test_bit(HCI_UP, &hdev->flags))
678 goto done;
679
680 /* Drop queues */
681 skb_queue_purge(&hdev->rx_q);
682 skb_queue_purge(&hdev->cmd_q);
683
09fd0de5 684 hci_dev_lock(hdev);
1da177e4
LT
685 inquiry_cache_flush(hdev);
686 hci_conn_hash_flush(hdev);
09fd0de5 687 hci_dev_unlock(hdev);
1da177e4
LT
688
689 if (hdev->flush)
690 hdev->flush(hdev);
691
8e87d142 692 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 693 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
694
695 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
696 ret = __hci_request(hdev, hci_reset_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
698
699done:
700 tasklet_enable(&hdev->tx_task);
701 hci_req_unlock(hdev);
702 hci_dev_put(hdev);
703 return ret;
704}
705
706int hci_dev_reset_stat(__u16 dev)
707{
708 struct hci_dev *hdev;
709 int ret = 0;
710
70f23020
AE
711 hdev = hci_dev_get(dev);
712 if (!hdev)
1da177e4
LT
713 return -ENODEV;
714
715 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
716
717 hci_dev_put(hdev);
718
719 return ret;
720}
721
722int hci_dev_cmd(unsigned int cmd, void __user *arg)
723{
724 struct hci_dev *hdev;
725 struct hci_dev_req dr;
726 int err = 0;
727
728 if (copy_from_user(&dr, arg, sizeof(dr)))
729 return -EFAULT;
730
70f23020
AE
731 hdev = hci_dev_get(dr.dev_id);
732 if (!hdev)
1da177e4
LT
733 return -ENODEV;
734
735 switch (cmd) {
736 case HCISETAUTH:
04837f64
MH
737 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
738 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
739 break;
740
741 case HCISETENCRYPT:
742 if (!lmp_encrypt_capable(hdev)) {
743 err = -EOPNOTSUPP;
744 break;
745 }
746
747 if (!test_bit(HCI_AUTH, &hdev->flags)) {
748 /* Auth must be enabled first */
04837f64
MH
749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
751 if (err)
752 break;
753 }
754
04837f64
MH
755 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
756 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
757 break;
758
759 case HCISETSCAN:
04837f64
MH
760 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
761 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
762 break;
763
1da177e4 764 case HCISETLINKPOL:
e4e8e37c
MH
765 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
767 break;
768
769 case HCISETLINKMODE:
e4e8e37c
MH
770 hdev->link_mode = ((__u16) dr.dev_opt) &
771 (HCI_LM_MASTER | HCI_LM_ACCEPT);
772 break;
773
774 case HCISETPTYPE:
775 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
776 break;
777
778 case HCISETACLMTU:
e4e8e37c
MH
779 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
780 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
781 break;
782
783 case HCISETSCOMTU:
e4e8e37c
MH
784 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
785 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
786 break;
787
788 default:
789 err = -EINVAL;
790 break;
791 }
e4e8e37c 792
1da177e4
LT
793 hci_dev_put(hdev);
794 return err;
795}
796
797int hci_get_dev_list(void __user *arg)
798{
8035ded4 799 struct hci_dev *hdev;
1da177e4
LT
800 struct hci_dev_list_req *dl;
801 struct hci_dev_req *dr;
1da177e4
LT
802 int n = 0, size, err;
803 __u16 dev_num;
804
805 if (get_user(dev_num, (__u16 __user *) arg))
806 return -EFAULT;
807
808 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 return -EINVAL;
810
811 size = sizeof(*dl) + dev_num * sizeof(*dr);
812
70f23020
AE
813 dl = kzalloc(size, GFP_KERNEL);
814 if (!dl)
1da177e4
LT
815 return -ENOMEM;
816
817 dr = dl->dev_req;
818
819 read_lock_bh(&hci_dev_list_lock);
8035ded4 820 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 821 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 822 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
823
824 if (!test_bit(HCI_MGMT, &hdev->flags))
825 set_bit(HCI_PAIRABLE, &hdev->flags);
826
1da177e4
LT
827 (dr + n)->dev_id = hdev->id;
828 (dr + n)->dev_opt = hdev->flags;
c542a06c 829
1da177e4
LT
830 if (++n >= dev_num)
831 break;
832 }
833 read_unlock_bh(&hci_dev_list_lock);
834
835 dl->dev_num = n;
836 size = sizeof(*dl) + n * sizeof(*dr);
837
838 err = copy_to_user(arg, dl, size);
839 kfree(dl);
840
841 return err ? -EFAULT : 0;
842}
843
844int hci_get_dev_info(void __user *arg)
845{
846 struct hci_dev *hdev;
847 struct hci_dev_info di;
848 int err = 0;
849
850 if (copy_from_user(&di, arg, sizeof(di)))
851 return -EFAULT;
852
70f23020
AE
853 hdev = hci_dev_get(di.dev_id);
854 if (!hdev)
1da177e4
LT
855 return -ENODEV;
856
3243553f
JH
857 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
858 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 859
c542a06c
JH
860 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags);
862
1da177e4
LT
863 strcpy(di.name, hdev->name);
864 di.bdaddr = hdev->bdaddr;
943da25d 865 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
866 di.flags = hdev->flags;
867 di.pkt_type = hdev->pkt_type;
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
874
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878 if (copy_to_user(arg, &di, sizeof(di)))
879 err = -EFAULT;
880
881 hci_dev_put(hdev);
882
883 return err;
884}
885
886/* ---- Interface to HCI drivers ---- */
887
611b30f7
MH
888static int hci_rfkill_set_block(void *data, bool blocked)
889{
890 struct hci_dev *hdev = data;
891
892 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894 if (!blocked)
895 return 0;
896
897 hci_dev_do_close(hdev);
898
899 return 0;
900}
901
902static const struct rfkill_ops hci_rfkill_ops = {
903 .set_block = hci_rfkill_set_block,
904};
905
1da177e4
LT
906/* Alloc HCI device */
907struct hci_dev *hci_alloc_dev(void)
908{
909 struct hci_dev *hdev;
910
25ea6db0 911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
912 if (!hdev)
913 return NULL;
914
0ac7e700 915 hci_init_sysfs(hdev);
1da177e4
LT
916 skb_queue_head_init(&hdev->driver_init);
917
918 return hdev;
919}
920EXPORT_SYMBOL(hci_alloc_dev);
921
922/* Free HCI device */
923void hci_free_dev(struct hci_dev *hdev)
924{
925 skb_queue_purge(&hdev->driver_init);
926
a91f2e39
MH
927 /* will free via device release */
928 put_device(&hdev->dev);
1da177e4
LT
929}
930EXPORT_SYMBOL(hci_free_dev);
931
ab81cbf9
JH
932static void hci_power_on(struct work_struct *work)
933{
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936 BT_DBG("%s", hdev->name);
937
938 if (hci_dev_open(hdev->id) < 0)
939 return;
940
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
3243553f
JH
942 queue_delayed_work(hdev->workqueue, &hdev->power_off,
943 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
944
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 946 mgmt_index_added(hdev);
ab81cbf9
JH
947}
948
949static void hci_power_off(struct work_struct *work)
950{
3243553f
JH
951 struct hci_dev *hdev = container_of(work, struct hci_dev,
952 power_off.work);
ab81cbf9
JH
953
954 BT_DBG("%s", hdev->name);
955
956 clear_bit(HCI_AUTO_OFF, &hdev->flags);
957
3243553f 958 hci_dev_close(hdev->id);
ab81cbf9
JH
959}
960
16ab91ab
JH
961static void hci_discov_off(struct work_struct *work)
962{
963 struct hci_dev *hdev;
964 u8 scan = SCAN_PAGE;
965
966 hdev = container_of(work, struct hci_dev, discov_off.work);
967
968 BT_DBG("%s", hdev->name);
969
09fd0de5 970 hci_dev_lock(hdev);
16ab91ab
JH
971
972 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
973
974 hdev->discov_timeout = 0;
975
09fd0de5 976 hci_dev_unlock(hdev);
16ab91ab
JH
977}
978
2aeb9a1a
JH
979int hci_uuids_clear(struct hci_dev *hdev)
980{
981 struct list_head *p, *n;
982
983 list_for_each_safe(p, n, &hdev->uuids) {
984 struct bt_uuid *uuid;
985
986 uuid = list_entry(p, struct bt_uuid, list);
987
988 list_del(p);
989 kfree(uuid);
990 }
991
992 return 0;
993}
994
55ed8ca1
JH
995int hci_link_keys_clear(struct hci_dev *hdev)
996{
997 struct list_head *p, *n;
998
999 list_for_each_safe(p, n, &hdev->link_keys) {
1000 struct link_key *key;
1001
1002 key = list_entry(p, struct link_key, list);
1003
1004 list_del(p);
1005 kfree(key);
1006 }
1007
1008 return 0;
1009}
1010
1011struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1012{
8035ded4 1013 struct link_key *k;
55ed8ca1 1014
8035ded4 1015 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1016 if (bacmp(bdaddr, &k->bdaddr) == 0)
1017 return k;
55ed8ca1
JH
1018
1019 return NULL;
1020}
1021
d25e28ab
JH
1022static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1023 u8 key_type, u8 old_key_type)
1024{
1025 /* Legacy key */
1026 if (key_type < 0x03)
1027 return 1;
1028
1029 /* Debug keys are insecure so don't store them persistently */
1030 if (key_type == HCI_LK_DEBUG_COMBINATION)
1031 return 0;
1032
1033 /* Changed combination key and there's no previous one */
1034 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1035 return 0;
1036
1037 /* Security mode 3 case */
1038 if (!conn)
1039 return 1;
1040
1041 /* Neither local nor remote side had no-bonding as requirement */
1042 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1043 return 1;
1044
1045 /* Local side had dedicated bonding as requirement */
1046 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1047 return 1;
1048
1049 /* Remote side had dedicated bonding as requirement */
1050 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1051 return 1;
1052
1053 /* If none of the above criteria match, then don't store the key
1054 * persistently */
1055 return 0;
1056}
1057
75d262c2
VCG
1058struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1059{
1060 struct link_key *k;
1061
1062 list_for_each_entry(k, &hdev->link_keys, list) {
1063 struct key_master_id *id;
1064
1065 if (k->type != HCI_LK_SMP_LTK)
1066 continue;
1067
1068 if (k->dlen != sizeof(*id))
1069 continue;
1070
1071 id = (void *) &k->data;
1072 if (id->ediv == ediv &&
1073 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1074 return k;
1075 }
1076
1077 return NULL;
1078}
1079EXPORT_SYMBOL(hci_find_ltk);
1080
1081struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1082 bdaddr_t *bdaddr, u8 type)
1083{
1084 struct link_key *k;
1085
1086 list_for_each_entry(k, &hdev->link_keys, list)
1087 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1088 return k;
1089
1090 return NULL;
1091}
1092EXPORT_SYMBOL(hci_find_link_key_type);
1093
d25e28ab
JH
1094int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1095 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1096{
1097 struct link_key *key, *old_key;
4df378a1 1098 u8 old_key_type, persistent;
55ed8ca1
JH
1099
1100 old_key = hci_find_link_key(hdev, bdaddr);
1101 if (old_key) {
1102 old_key_type = old_key->type;
1103 key = old_key;
1104 } else {
12adcf3a 1105 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1106 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1107 if (!key)
1108 return -ENOMEM;
1109 list_add(&key->list, &hdev->link_keys);
1110 }
1111
1112 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1113
d25e28ab
JH
1114 /* Some buggy controller combinations generate a changed
1115 * combination key for legacy pairing even when there's no
1116 * previous key */
1117 if (type == HCI_LK_CHANGED_COMBINATION &&
1118 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1119 old_key_type == 0xff) {
d25e28ab 1120 type = HCI_LK_COMBINATION;
655fe6ec
JH
1121 if (conn)
1122 conn->key_type = type;
1123 }
d25e28ab 1124
55ed8ca1
JH
1125 bacpy(&key->bdaddr, bdaddr);
1126 memcpy(key->val, val, 16);
55ed8ca1
JH
1127 key->pin_len = pin_len;
1128
b6020ba0 1129 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1130 key->type = old_key_type;
4748fed2
JH
1131 else
1132 key->type = type;
1133
4df378a1
JH
1134 if (!new_key)
1135 return 0;
1136
1137 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1138
744cf19e 1139 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1140
1141 if (!persistent) {
1142 list_del(&key->list);
1143 kfree(key);
1144 }
55ed8ca1
JH
1145
1146 return 0;
1147}
1148
75d262c2 1149int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1150 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1151{
1152 struct link_key *key, *old_key;
1153 struct key_master_id *id;
1154 u8 old_key_type;
1155
1156 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1157
1158 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1159 if (old_key) {
1160 key = old_key;
1161 old_key_type = old_key->type;
1162 } else {
1163 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1164 if (!key)
1165 return -ENOMEM;
1166 list_add(&key->list, &hdev->link_keys);
1167 old_key_type = 0xff;
1168 }
1169
1170 key->dlen = sizeof(*id);
1171
1172 bacpy(&key->bdaddr, bdaddr);
1173 memcpy(key->val, ltk, sizeof(key->val));
1174 key->type = HCI_LK_SMP_LTK;
726b4ffc 1175 key->pin_len = key_size;
75d262c2
VCG
1176
1177 id = (void *) &key->data;
1178 id->ediv = ediv;
1179 memcpy(id->rand, rand, sizeof(id->rand));
1180
1181 if (new_key)
744cf19e 1182 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1183
1184 return 0;
1185}
1186
55ed8ca1
JH
1187int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1188{
1189 struct link_key *key;
1190
1191 key = hci_find_link_key(hdev, bdaddr);
1192 if (!key)
1193 return -ENOENT;
1194
1195 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1196
1197 list_del(&key->list);
1198 kfree(key);
1199
1200 return 0;
1201}
1202
6bd32326
VT
1203/* HCI command timer function */
1204static void hci_cmd_timer(unsigned long arg)
1205{
1206 struct hci_dev *hdev = (void *) arg;
1207
1208 BT_ERR("%s command tx timeout", hdev->name);
1209 atomic_set(&hdev->cmd_cnt, 1);
1210 tasklet_schedule(&hdev->cmd_task);
1211}
1212
2763eda6
SJ
1213struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1214 bdaddr_t *bdaddr)
1215{
1216 struct oob_data *data;
1217
1218 list_for_each_entry(data, &hdev->remote_oob_data, list)
1219 if (bacmp(bdaddr, &data->bdaddr) == 0)
1220 return data;
1221
1222 return NULL;
1223}
1224
1225int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1226{
1227 struct oob_data *data;
1228
1229 data = hci_find_remote_oob_data(hdev, bdaddr);
1230 if (!data)
1231 return -ENOENT;
1232
1233 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1234
1235 list_del(&data->list);
1236 kfree(data);
1237
1238 return 0;
1239}
1240
1241int hci_remote_oob_data_clear(struct hci_dev *hdev)
1242{
1243 struct oob_data *data, *n;
1244
1245 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1246 list_del(&data->list);
1247 kfree(data);
1248 }
1249
1250 return 0;
1251}
1252
1253int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1254 u8 *randomizer)
1255{
1256 struct oob_data *data;
1257
1258 data = hci_find_remote_oob_data(hdev, bdaddr);
1259
1260 if (!data) {
1261 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1262 if (!data)
1263 return -ENOMEM;
1264
1265 bacpy(&data->bdaddr, bdaddr);
1266 list_add(&data->list, &hdev->remote_oob_data);
1267 }
1268
1269 memcpy(data->hash, hash, sizeof(data->hash));
1270 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1271
1272 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1273
1274 return 0;
1275}
1276
b2a66aad
AJ
1277struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1278 bdaddr_t *bdaddr)
1279{
8035ded4 1280 struct bdaddr_list *b;
b2a66aad 1281
8035ded4 1282 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1283 if (bacmp(bdaddr, &b->bdaddr) == 0)
1284 return b;
b2a66aad
AJ
1285
1286 return NULL;
1287}
1288
1289int hci_blacklist_clear(struct hci_dev *hdev)
1290{
1291 struct list_head *p, *n;
1292
1293 list_for_each_safe(p, n, &hdev->blacklist) {
1294 struct bdaddr_list *b;
1295
1296 b = list_entry(p, struct bdaddr_list, list);
1297
1298 list_del(p);
1299 kfree(b);
1300 }
1301
1302 return 0;
1303}
1304
1305int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1306{
1307 struct bdaddr_list *entry;
b2a66aad
AJ
1308
1309 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1310 return -EBADF;
1311
5e762444
AJ
1312 if (hci_blacklist_lookup(hdev, bdaddr))
1313 return -EEXIST;
b2a66aad
AJ
1314
1315 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1316 if (!entry)
1317 return -ENOMEM;
b2a66aad
AJ
1318
1319 bacpy(&entry->bdaddr, bdaddr);
1320
1321 list_add(&entry->list, &hdev->blacklist);
1322
744cf19e 1323 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1324}
1325
1326int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327{
1328 struct bdaddr_list *entry;
b2a66aad 1329
1ec918ce 1330 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1331 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1332
1333 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1334 if (!entry)
5e762444 1335 return -ENOENT;
b2a66aad
AJ
1336
1337 list_del(&entry->list);
1338 kfree(entry);
1339
744cf19e 1340 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1341}
1342
db323f2f 1343static void hci_clear_adv_cache(struct work_struct *work)
35815085 1344{
db323f2f
GP
1345 struct hci_dev *hdev = container_of(work, struct hci_dev,
1346 adv_work.work);
35815085
AG
1347
1348 hci_dev_lock(hdev);
1349
1350 hci_adv_entries_clear(hdev);
1351
1352 hci_dev_unlock(hdev);
1353}
1354
76c8686f
AG
1355int hci_adv_entries_clear(struct hci_dev *hdev)
1356{
1357 struct adv_entry *entry, *tmp;
1358
1359 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1360 list_del(&entry->list);
1361 kfree(entry);
1362 }
1363
1364 BT_DBG("%s adv cache cleared", hdev->name);
1365
1366 return 0;
1367}
1368
1369struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370{
1371 struct adv_entry *entry;
1372
1373 list_for_each_entry(entry, &hdev->adv_entries, list)
1374 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1375 return entry;
1376
1377 return NULL;
1378}
1379
1380static inline int is_connectable_adv(u8 evt_type)
1381{
1382 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1383 return 1;
1384
1385 return 0;
1386}
1387
1388int hci_add_adv_entry(struct hci_dev *hdev,
1389 struct hci_ev_le_advertising_info *ev)
1390{
1391 struct adv_entry *entry;
1392
1393 if (!is_connectable_adv(ev->evt_type))
1394 return -EINVAL;
1395
1396 /* Only new entries should be added to adv_entries. So, if
1397 * bdaddr was found, don't add it. */
1398 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1399 return 0;
1400
1401 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1402 if (!entry)
1403 return -ENOMEM;
1404
1405 bacpy(&entry->bdaddr, &ev->bdaddr);
1406 entry->bdaddr_type = ev->bdaddr_type;
1407
1408 list_add(&entry->list, &hdev->adv_entries);
1409
1410 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1411 batostr(&entry->bdaddr), entry->bdaddr_type);
1412
1413 return 0;
1414}
1415
1da177e4
LT
1416/* Register HCI device */
1417int hci_register_dev(struct hci_dev *hdev)
1418{
1419 struct list_head *head = &hci_dev_list, *p;
08add513 1420 int i, id, error;
1da177e4 1421
c13854ce
MH
1422 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1423 hdev->bus, hdev->owner);
1da177e4
LT
1424
1425 if (!hdev->open || !hdev->close || !hdev->destruct)
1426 return -EINVAL;
1427
08add513
MM
1428 /* Do not allow HCI_AMP devices to register at index 0,
1429 * so the index can be used as the AMP controller ID.
1430 */
1431 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1432
1da177e4
LT
1433 write_lock_bh(&hci_dev_list_lock);
1434
1435 /* Find first available device id */
1436 list_for_each(p, &hci_dev_list) {
1437 if (list_entry(p, struct hci_dev, list)->id != id)
1438 break;
1439 head = p; id++;
1440 }
8e87d142 1441
1da177e4
LT
1442 sprintf(hdev->name, "hci%d", id);
1443 hdev->id = id;
c6feeb28 1444 list_add_tail(&hdev->list, head);
1da177e4
LT
1445
1446 atomic_set(&hdev->refcnt, 1);
09fd0de5 1447 mutex_init(&hdev->lock);
1da177e4
LT
1448
1449 hdev->flags = 0;
d23264a8 1450 hdev->dev_flags = 0;
1da177e4 1451 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1452 hdev->esco_type = (ESCO_HV1);
1da177e4 1453 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1454 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1455
04837f64
MH
1456 hdev->idle_timeout = 0;
1457 hdev->sniff_max_interval = 800;
1458 hdev->sniff_min_interval = 80;
1459
b78752cc
MH
1460 INIT_WORK(&hdev->rx_work, hci_rx_work);
1461
1462 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
1da177e4
LT
1463 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1464
1465 skb_queue_head_init(&hdev->rx_q);
1466 skb_queue_head_init(&hdev->cmd_q);
1467 skb_queue_head_init(&hdev->raw_q);
1468
6bd32326
VT
1469 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1470
cd4c5391 1471 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1472 hdev->reassembly[i] = NULL;
1473
1da177e4 1474 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1475 mutex_init(&hdev->req_lock);
1da177e4
LT
1476
1477 inquiry_cache_init(hdev);
1478
1479 hci_conn_hash_init(hdev);
1480
2e58ef3e
JH
1481 INIT_LIST_HEAD(&hdev->mgmt_pending);
1482
ea4bd8ba 1483 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1484
2aeb9a1a
JH
1485 INIT_LIST_HEAD(&hdev->uuids);
1486
55ed8ca1
JH
1487 INIT_LIST_HEAD(&hdev->link_keys);
1488
2763eda6
SJ
1489 INIT_LIST_HEAD(&hdev->remote_oob_data);
1490
76c8686f
AG
1491 INIT_LIST_HEAD(&hdev->adv_entries);
1492
db323f2f 1493 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1494 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1495 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1496
16ab91ab
JH
1497 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1498
1da177e4
LT
1499 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1500
1501 atomic_set(&hdev->promisc, 0);
1502
1503 write_unlock_bh(&hci_dev_list_lock);
1504
f48fd9c8 1505 hdev->workqueue = create_singlethread_workqueue(hdev->name);
33ca954d
DH
1506 if (!hdev->workqueue) {
1507 error = -ENOMEM;
1508 goto err;
1509 }
f48fd9c8 1510
33ca954d
DH
1511 error = hci_add_sysfs(hdev);
1512 if (error < 0)
1513 goto err_wqueue;
1da177e4 1514
611b30f7
MH
1515 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1517 if (hdev->rfkill) {
1518 if (rfkill_register(hdev->rfkill) < 0) {
1519 rfkill_destroy(hdev->rfkill);
1520 hdev->rfkill = NULL;
1521 }
1522 }
1523
ab81cbf9
JH
1524 set_bit(HCI_AUTO_OFF, &hdev->flags);
1525 set_bit(HCI_SETUP, &hdev->flags);
1526 queue_work(hdev->workqueue, &hdev->power_on);
1527
1da177e4
LT
1528 hci_notify(hdev, HCI_DEV_REG);
1529
1530 return id;
f48fd9c8 1531
33ca954d
DH
1532err_wqueue:
1533 destroy_workqueue(hdev->workqueue);
1534err:
f48fd9c8
MH
1535 write_lock_bh(&hci_dev_list_lock);
1536 list_del(&hdev->list);
1537 write_unlock_bh(&hci_dev_list_lock);
1538
33ca954d 1539 return error;
1da177e4
LT
1540}
1541EXPORT_SYMBOL(hci_register_dev);
1542
1543/* Unregister HCI device */
59735631 1544void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1545{
ef222013
MH
1546 int i;
1547
c13854ce 1548 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1549
1da177e4
LT
1550 write_lock_bh(&hci_dev_list_lock);
1551 list_del(&hdev->list);
1552 write_unlock_bh(&hci_dev_list_lock);
1553
1554 hci_dev_do_close(hdev);
1555
cd4c5391 1556 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1557 kfree_skb(hdev->reassembly[i]);
1558
ab81cbf9 1559 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1560 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1561 hci_dev_lock(hdev);
744cf19e 1562 mgmt_index_removed(hdev);
09fd0de5 1563 hci_dev_unlock(hdev);
56e5cb86 1564 }
ab81cbf9 1565
2e58ef3e
JH
1566 /* mgmt_index_removed should take care of emptying the
1567 * pending list */
1568 BUG_ON(!list_empty(&hdev->mgmt_pending));
1569
1da177e4
LT
1570 hci_notify(hdev, HCI_DEV_UNREG);
1571
611b30f7
MH
1572 if (hdev->rfkill) {
1573 rfkill_unregister(hdev->rfkill);
1574 rfkill_destroy(hdev->rfkill);
1575 }
1576
ce242970 1577 hci_del_sysfs(hdev);
147e2d59 1578
db323f2f 1579 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1580
f48fd9c8
MH
1581 destroy_workqueue(hdev->workqueue);
1582
09fd0de5 1583 hci_dev_lock(hdev);
e2e0cacb 1584 hci_blacklist_clear(hdev);
2aeb9a1a 1585 hci_uuids_clear(hdev);
55ed8ca1 1586 hci_link_keys_clear(hdev);
2763eda6 1587 hci_remote_oob_data_clear(hdev);
76c8686f 1588 hci_adv_entries_clear(hdev);
09fd0de5 1589 hci_dev_unlock(hdev);
e2e0cacb 1590
1da177e4 1591 __hci_dev_put(hdev);
1da177e4
LT
1592}
1593EXPORT_SYMBOL(hci_unregister_dev);
1594
1595/* Suspend HCI device */
1596int hci_suspend_dev(struct hci_dev *hdev)
1597{
1598 hci_notify(hdev, HCI_DEV_SUSPEND);
1599 return 0;
1600}
1601EXPORT_SYMBOL(hci_suspend_dev);
1602
1603/* Resume HCI device */
1604int hci_resume_dev(struct hci_dev *hdev)
1605{
1606 hci_notify(hdev, HCI_DEV_RESUME);
1607 return 0;
1608}
1609EXPORT_SYMBOL(hci_resume_dev);
1610
76bca880
MH
1611/* Receive frame from HCI drivers */
1612int hci_recv_frame(struct sk_buff *skb)
1613{
1614 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1615 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1616 && !test_bit(HCI_INIT, &hdev->flags))) {
1617 kfree_skb(skb);
1618 return -ENXIO;
1619 }
1620
1621 /* Incomming skb */
1622 bt_cb(skb)->incoming = 1;
1623
1624 /* Time stamp */
1625 __net_timestamp(skb);
1626
76bca880 1627 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1628 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1629
76bca880
MH
1630 return 0;
1631}
1632EXPORT_SYMBOL(hci_recv_frame);
1633
33e882a5 1634static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1635 int count, __u8 index)
33e882a5
SS
1636{
1637 int len = 0;
1638 int hlen = 0;
1639 int remain = count;
1640 struct sk_buff *skb;
1641 struct bt_skb_cb *scb;
1642
1643 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1644 index >= NUM_REASSEMBLY)
1645 return -EILSEQ;
1646
1647 skb = hdev->reassembly[index];
1648
1649 if (!skb) {
1650 switch (type) {
1651 case HCI_ACLDATA_PKT:
1652 len = HCI_MAX_FRAME_SIZE;
1653 hlen = HCI_ACL_HDR_SIZE;
1654 break;
1655 case HCI_EVENT_PKT:
1656 len = HCI_MAX_EVENT_SIZE;
1657 hlen = HCI_EVENT_HDR_SIZE;
1658 break;
1659 case HCI_SCODATA_PKT:
1660 len = HCI_MAX_SCO_SIZE;
1661 hlen = HCI_SCO_HDR_SIZE;
1662 break;
1663 }
1664
1e429f38 1665 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1666 if (!skb)
1667 return -ENOMEM;
1668
1669 scb = (void *) skb->cb;
1670 scb->expect = hlen;
1671 scb->pkt_type = type;
1672
1673 skb->dev = (void *) hdev;
1674 hdev->reassembly[index] = skb;
1675 }
1676
1677 while (count) {
1678 scb = (void *) skb->cb;
1679 len = min(scb->expect, (__u16)count);
1680
1681 memcpy(skb_put(skb, len), data, len);
1682
1683 count -= len;
1684 data += len;
1685 scb->expect -= len;
1686 remain = count;
1687
1688 switch (type) {
1689 case HCI_EVENT_PKT:
1690 if (skb->len == HCI_EVENT_HDR_SIZE) {
1691 struct hci_event_hdr *h = hci_event_hdr(skb);
1692 scb->expect = h->plen;
1693
1694 if (skb_tailroom(skb) < scb->expect) {
1695 kfree_skb(skb);
1696 hdev->reassembly[index] = NULL;
1697 return -ENOMEM;
1698 }
1699 }
1700 break;
1701
1702 case HCI_ACLDATA_PKT:
1703 if (skb->len == HCI_ACL_HDR_SIZE) {
1704 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1705 scb->expect = __le16_to_cpu(h->dlen);
1706
1707 if (skb_tailroom(skb) < scb->expect) {
1708 kfree_skb(skb);
1709 hdev->reassembly[index] = NULL;
1710 return -ENOMEM;
1711 }
1712 }
1713 break;
1714
1715 case HCI_SCODATA_PKT:
1716 if (skb->len == HCI_SCO_HDR_SIZE) {
1717 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1718 scb->expect = h->dlen;
1719
1720 if (skb_tailroom(skb) < scb->expect) {
1721 kfree_skb(skb);
1722 hdev->reassembly[index] = NULL;
1723 return -ENOMEM;
1724 }
1725 }
1726 break;
1727 }
1728
1729 if (scb->expect == 0) {
1730 /* Complete frame */
1731
1732 bt_cb(skb)->pkt_type = type;
1733 hci_recv_frame(skb);
1734
1735 hdev->reassembly[index] = NULL;
1736 return remain;
1737 }
1738 }
1739
1740 return remain;
1741}
1742
ef222013
MH
1743int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1744{
f39a3c06
SS
1745 int rem = 0;
1746
ef222013
MH
1747 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1748 return -EILSEQ;
1749
da5f6c37 1750 while (count) {
1e429f38 1751 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1752 if (rem < 0)
1753 return rem;
ef222013 1754
f39a3c06
SS
1755 data += (count - rem);
1756 count = rem;
f81c6224 1757 }
ef222013 1758
f39a3c06 1759 return rem;
ef222013
MH
1760}
1761EXPORT_SYMBOL(hci_recv_fragment);
1762
99811510
SS
1763#define STREAM_REASSEMBLY 0
1764
1765int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766{
1767 int type;
1768 int rem = 0;
1769
da5f6c37 1770 while (count) {
99811510
SS
1771 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772
1773 if (!skb) {
1774 struct { char type; } *pkt;
1775
1776 /* Start of the frame */
1777 pkt = data;
1778 type = pkt->type;
1779
1780 data++;
1781 count--;
1782 } else
1783 type = bt_cb(skb)->pkt_type;
1784
1e429f38
GP
1785 rem = hci_reassembly(hdev, type, data, count,
1786 STREAM_REASSEMBLY);
99811510
SS
1787 if (rem < 0)
1788 return rem;
1789
1790 data += (count - rem);
1791 count = rem;
f81c6224 1792 }
99811510
SS
1793
1794 return rem;
1795}
1796EXPORT_SYMBOL(hci_recv_stream_fragment);
1797
1da177e4
LT
1798/* ---- Interface to upper protocols ---- */
1799
1800/* Register/Unregister protocols.
1801 * hci_task_lock is used to ensure that no tasks are running. */
1802int hci_register_proto(struct hci_proto *hp)
1803{
1804 int err = 0;
1805
1806 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807
1808 if (hp->id >= HCI_MAX_PROTO)
1809 return -EINVAL;
1810
67d0dfb5 1811 mutex_lock(&hci_task_lock);
1da177e4
LT
1812
1813 if (!hci_proto[hp->id])
1814 hci_proto[hp->id] = hp;
1815 else
1816 err = -EEXIST;
1817
67d0dfb5 1818 mutex_unlock(&hci_task_lock);
1da177e4
LT
1819
1820 return err;
1821}
1822EXPORT_SYMBOL(hci_register_proto);
1823
1824int hci_unregister_proto(struct hci_proto *hp)
1825{
1826 int err = 0;
1827
1828 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1829
1830 if (hp->id >= HCI_MAX_PROTO)
1831 return -EINVAL;
1832
67d0dfb5 1833 mutex_lock(&hci_task_lock);
1da177e4
LT
1834
1835 if (hci_proto[hp->id])
1836 hci_proto[hp->id] = NULL;
1837 else
1838 err = -ENOENT;
1839
67d0dfb5 1840 mutex_unlock(&hci_task_lock);
1da177e4
LT
1841
1842 return err;
1843}
1844EXPORT_SYMBOL(hci_unregister_proto);
1845
1846int hci_register_cb(struct hci_cb *cb)
1847{
1848 BT_DBG("%p name %s", cb, cb->name);
1849
1850 write_lock_bh(&hci_cb_list_lock);
1851 list_add(&cb->list, &hci_cb_list);
1852 write_unlock_bh(&hci_cb_list_lock);
1853
1854 return 0;
1855}
1856EXPORT_SYMBOL(hci_register_cb);
1857
1858int hci_unregister_cb(struct hci_cb *cb)
1859{
1860 BT_DBG("%p name %s", cb, cb->name);
1861
1862 write_lock_bh(&hci_cb_list_lock);
1863 list_del(&cb->list);
1864 write_unlock_bh(&hci_cb_list_lock);
1865
1866 return 0;
1867}
1868EXPORT_SYMBOL(hci_unregister_cb);
1869
1870static int hci_send_frame(struct sk_buff *skb)
1871{
1872 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1873
1874 if (!hdev) {
1875 kfree_skb(skb);
1876 return -ENODEV;
1877 }
1878
0d48d939 1879 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1880
1881 if (atomic_read(&hdev->promisc)) {
1882 /* Time stamp */
a61bbcf2 1883 __net_timestamp(skb);
1da177e4 1884
eec8d2bc 1885 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1886 }
1887
1888 /* Get rid of skb owner, prior to sending to the driver. */
1889 skb_orphan(skb);
1890
1891 return hdev->send(skb);
1892}
1893
1894/* Send HCI command */
a9de9248 1895int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1896{
1897 int len = HCI_COMMAND_HDR_SIZE + plen;
1898 struct hci_command_hdr *hdr;
1899 struct sk_buff *skb;
1900
a9de9248 1901 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1902
1903 skb = bt_skb_alloc(len, GFP_ATOMIC);
1904 if (!skb) {
ef222013 1905 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1906 return -ENOMEM;
1907 }
1908
1909 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1910 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1911 hdr->plen = plen;
1912
1913 if (plen)
1914 memcpy(skb_put(skb, plen), param, plen);
1915
1916 BT_DBG("skb len %d", skb->len);
1917
0d48d939 1918 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1919 skb->dev = (void *) hdev;
c78ae283 1920
a5040efa
JH
1921 if (test_bit(HCI_INIT, &hdev->flags))
1922 hdev->init_last_cmd = opcode;
1923
1da177e4 1924 skb_queue_tail(&hdev->cmd_q, skb);
c78ae283 1925 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
1926
1927 return 0;
1928}
1da177e4
LT
1929
1930/* Get data from the previously sent command */
a9de9248 1931void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1932{
1933 struct hci_command_hdr *hdr;
1934
1935 if (!hdev->sent_cmd)
1936 return NULL;
1937
1938 hdr = (void *) hdev->sent_cmd->data;
1939
a9de9248 1940 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1941 return NULL;
1942
a9de9248 1943 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1944
1945 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1946}
1947
1948/* Send ACL data */
1949static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1950{
1951 struct hci_acl_hdr *hdr;
1952 int len = skb->len;
1953
badff6d0
ACM
1954 skb_push(skb, HCI_ACL_HDR_SIZE);
1955 skb_reset_transport_header(skb);
9c70220b 1956 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1957 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1958 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1959}
1960
73d80deb
LAD
1961static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1962 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1963{
1964 struct hci_dev *hdev = conn->hdev;
1965 struct sk_buff *list;
1966
70f23020
AE
1967 list = skb_shinfo(skb)->frag_list;
1968 if (!list) {
1da177e4
LT
1969 /* Non fragmented */
1970 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1971
73d80deb 1972 skb_queue_tail(queue, skb);
1da177e4
LT
1973 } else {
1974 /* Fragmented */
1975 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1976
1977 skb_shinfo(skb)->frag_list = NULL;
1978
1979 /* Queue all fragments atomically */
73d80deb 1980 spin_lock_bh(&queue->lock);
1da177e4 1981
73d80deb 1982 __skb_queue_tail(queue, skb);
e702112f
AE
1983
1984 flags &= ~ACL_START;
1985 flags |= ACL_CONT;
1da177e4
LT
1986 do {
1987 skb = list; list = list->next;
8e87d142 1988
1da177e4 1989 skb->dev = (void *) hdev;
0d48d939 1990 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 1991 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
1992
1993 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1994
73d80deb 1995 __skb_queue_tail(queue, skb);
1da177e4
LT
1996 } while (list);
1997
73d80deb 1998 spin_unlock_bh(&queue->lock);
1da177e4 1999 }
73d80deb
LAD
2000}
2001
2002void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2003{
2004 struct hci_conn *conn = chan->conn;
2005 struct hci_dev *hdev = conn->hdev;
2006
2007 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2008
2009 skb->dev = (void *) hdev;
2010 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2011 hci_add_acl_hdr(skb, conn->handle, flags);
2012
2013 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2014
c78ae283 2015 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2016}
2017EXPORT_SYMBOL(hci_send_acl);
2018
2019/* Send SCO data */
0d861d8b 2020void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2021{
2022 struct hci_dev *hdev = conn->hdev;
2023 struct hci_sco_hdr hdr;
2024
2025 BT_DBG("%s len %d", hdev->name, skb->len);
2026
aca3192c 2027 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2028 hdr.dlen = skb->len;
2029
badff6d0
ACM
2030 skb_push(skb, HCI_SCO_HDR_SIZE);
2031 skb_reset_transport_header(skb);
9c70220b 2032 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2033
2034 skb->dev = (void *) hdev;
0d48d939 2035 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2036
1da177e4 2037 skb_queue_tail(&conn->data_q, skb);
c78ae283 2038 tasklet_schedule(&hdev->tx_task);
1da177e4
LT
2039}
2040EXPORT_SYMBOL(hci_send_sco);
2041
2042/* ---- HCI TX task (outgoing data) ---- */
2043
2044/* HCI Connection scheduler */
2045static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2046{
2047 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2048 struct hci_conn *conn = NULL, *c;
1da177e4 2049 int num = 0, min = ~0;
1da177e4 2050
8e87d142 2051 /* We don't have to lock device here. Connections are always
1da177e4 2052 * added and removed with TX task disabled. */
8035ded4 2053 list_for_each_entry(c, &h->list, list) {
769be974 2054 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2055 continue;
769be974
MH
2056
2057 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2058 continue;
2059
1da177e4
LT
2060 num++;
2061
2062 if (c->sent < min) {
2063 min = c->sent;
2064 conn = c;
2065 }
52087a79
LAD
2066
2067 if (hci_conn_num(hdev, type) == num)
2068 break;
1da177e4
LT
2069 }
2070
2071 if (conn) {
6ed58ec5
VT
2072 int cnt, q;
2073
2074 switch (conn->type) {
2075 case ACL_LINK:
2076 cnt = hdev->acl_cnt;
2077 break;
2078 case SCO_LINK:
2079 case ESCO_LINK:
2080 cnt = hdev->sco_cnt;
2081 break;
2082 case LE_LINK:
2083 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2084 break;
2085 default:
2086 cnt = 0;
2087 BT_ERR("Unknown link type");
2088 }
2089
2090 q = cnt / num;
1da177e4
LT
2091 *quote = q ? q : 1;
2092 } else
2093 *quote = 0;
2094
2095 BT_DBG("conn %p quote %d", conn, *quote);
2096 return conn;
2097}
2098
bae1f5d9 2099static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2100{
2101 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2102 struct hci_conn *c;
1da177e4 2103
bae1f5d9 2104 BT_ERR("%s link tx timeout", hdev->name);
1da177e4
LT
2105
2106 /* Kill stalled connections */
8035ded4 2107 list_for_each_entry(c, &h->list, list) {
bae1f5d9
VT
2108 if (c->type == type && c->sent) {
2109 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2110 hdev->name, batostr(&c->dst));
2111 hci_acl_disconn(c, 0x13);
2112 }
2113 }
2114}
2115
73d80deb
LAD
2116static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2117 int *quote)
1da177e4 2118{
73d80deb
LAD
2119 struct hci_conn_hash *h = &hdev->conn_hash;
2120 struct hci_chan *chan = NULL;
2121 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2122 struct hci_conn *conn;
73d80deb
LAD
2123 int cnt, q, conn_num = 0;
2124
2125 BT_DBG("%s", hdev->name);
2126
2127 list_for_each_entry(conn, &h->list, list) {
73d80deb
LAD
2128 struct hci_chan *tmp;
2129
2130 if (conn->type != type)
2131 continue;
2132
2133 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2134 continue;
2135
2136 conn_num++;
2137
8192edef
GP
2138 rcu_read_lock();
2139
2140 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2141 struct sk_buff *skb;
2142
2143 if (skb_queue_empty(&tmp->data_q))
2144 continue;
2145
2146 skb = skb_peek(&tmp->data_q);
2147 if (skb->priority < cur_prio)
2148 continue;
2149
2150 if (skb->priority > cur_prio) {
2151 num = 0;
2152 min = ~0;
2153 cur_prio = skb->priority;
2154 }
2155
2156 num++;
2157
2158 if (conn->sent < min) {
2159 min = conn->sent;
2160 chan = tmp;
2161 }
2162 }
2163
8192edef
GP
2164 rcu_read_unlock();
2165
73d80deb
LAD
2166 if (hci_conn_num(hdev, type) == conn_num)
2167 break;
2168 }
2169
2170 if (!chan)
2171 return NULL;
2172
2173 switch (chan->conn->type) {
2174 case ACL_LINK:
2175 cnt = hdev->acl_cnt;
2176 break;
2177 case SCO_LINK:
2178 case ESCO_LINK:
2179 cnt = hdev->sco_cnt;
2180 break;
2181 case LE_LINK:
2182 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2183 break;
2184 default:
2185 cnt = 0;
2186 BT_ERR("Unknown link type");
2187 }
2188
2189 q = cnt / num;
2190 *quote = q ? q : 1;
2191 BT_DBG("chan %p quote %d", chan, *quote);
2192 return chan;
2193}
2194
02b20f0b
LAD
2195static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2196{
2197 struct hci_conn_hash *h = &hdev->conn_hash;
2198 struct hci_conn *conn;
2199 int num = 0;
2200
2201 BT_DBG("%s", hdev->name);
2202
2203 list_for_each_entry(conn, &h->list, list) {
02b20f0b
LAD
2204 struct hci_chan *chan;
2205
2206 if (conn->type != type)
2207 continue;
2208
2209 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2210 continue;
2211
2212 num++;
2213
8192edef
GP
2214 rcu_read_lock();
2215
2216 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2217 struct sk_buff *skb;
2218
2219 if (chan->sent) {
2220 chan->sent = 0;
2221 continue;
2222 }
2223
2224 if (skb_queue_empty(&chan->data_q))
2225 continue;
2226
2227 skb = skb_peek(&chan->data_q);
2228 if (skb->priority >= HCI_PRIO_MAX - 1)
2229 continue;
2230
2231 skb->priority = HCI_PRIO_MAX - 1;
2232
2233 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2234 skb->priority);
2235 }
2236
8192edef
GP
2237 rcu_read_unlock();
2238
02b20f0b
LAD
2239 if (hci_conn_num(hdev, type) == num)
2240 break;
2241 }
2242}
2243
73d80deb
LAD
2244static inline void hci_sched_acl(struct hci_dev *hdev)
2245{
2246 struct hci_chan *chan;
1da177e4
LT
2247 struct sk_buff *skb;
2248 int quote;
73d80deb 2249 unsigned int cnt;
1da177e4
LT
2250
2251 BT_DBG("%s", hdev->name);
2252
52087a79
LAD
2253 if (!hci_conn_num(hdev, ACL_LINK))
2254 return;
2255
1da177e4
LT
2256 if (!test_bit(HCI_RAW, &hdev->flags)) {
2257 /* ACL tx timeout must be longer than maximum
2258 * link supervision timeout (40.9 seconds) */
82453021 2259 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2260 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2261 }
2262
73d80deb 2263 cnt = hdev->acl_cnt;
04837f64 2264
73d80deb
LAD
2265 while (hdev->acl_cnt &&
2266 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2267 u32 priority = (skb_peek(&chan->data_q))->priority;
2268 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2269 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2270 skb->len, skb->priority);
2271
ec1cce24
LAD
2272 /* Stop if priority has changed */
2273 if (skb->priority < priority)
2274 break;
2275
2276 skb = skb_dequeue(&chan->data_q);
2277
73d80deb
LAD
2278 hci_conn_enter_active_mode(chan->conn,
2279 bt_cb(skb)->force_active);
04837f64 2280
1da177e4
LT
2281 hci_send_frame(skb);
2282 hdev->acl_last_tx = jiffies;
2283
2284 hdev->acl_cnt--;
73d80deb
LAD
2285 chan->sent++;
2286 chan->conn->sent++;
1da177e4
LT
2287 }
2288 }
02b20f0b
LAD
2289
2290 if (cnt != hdev->acl_cnt)
2291 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2292}
2293
2294/* Schedule SCO */
2295static inline void hci_sched_sco(struct hci_dev *hdev)
2296{
2297 struct hci_conn *conn;
2298 struct sk_buff *skb;
2299 int quote;
2300
2301 BT_DBG("%s", hdev->name);
2302
52087a79
LAD
2303 if (!hci_conn_num(hdev, SCO_LINK))
2304 return;
2305
1da177e4
LT
2306 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2307 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2308 BT_DBG("skb %p len %d", skb, skb->len);
2309 hci_send_frame(skb);
2310
2311 conn->sent++;
2312 if (conn->sent == ~0)
2313 conn->sent = 0;
2314 }
2315 }
2316}
2317
b6a0dc82
MH
2318static inline void hci_sched_esco(struct hci_dev *hdev)
2319{
2320 struct hci_conn *conn;
2321 struct sk_buff *skb;
2322 int quote;
2323
2324 BT_DBG("%s", hdev->name);
2325
52087a79
LAD
2326 if (!hci_conn_num(hdev, ESCO_LINK))
2327 return;
2328
b6a0dc82
MH
2329 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2330 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2331 BT_DBG("skb %p len %d", skb, skb->len);
2332 hci_send_frame(skb);
2333
2334 conn->sent++;
2335 if (conn->sent == ~0)
2336 conn->sent = 0;
2337 }
2338 }
2339}
2340
6ed58ec5
VT
2341static inline void hci_sched_le(struct hci_dev *hdev)
2342{
73d80deb 2343 struct hci_chan *chan;
6ed58ec5 2344 struct sk_buff *skb;
02b20f0b 2345 int quote, cnt, tmp;
6ed58ec5
VT
2346
2347 BT_DBG("%s", hdev->name);
2348
52087a79
LAD
2349 if (!hci_conn_num(hdev, LE_LINK))
2350 return;
2351
6ed58ec5
VT
2352 if (!test_bit(HCI_RAW, &hdev->flags)) {
2353 /* LE tx timeout must be longer than maximum
2354 * link supervision timeout (40.9 seconds) */
bae1f5d9 2355 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2356 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2357 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2358 }
2359
2360 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2361 tmp = cnt;
73d80deb 2362 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2363 u32 priority = (skb_peek(&chan->data_q))->priority;
2364 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2365 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2366 skb->len, skb->priority);
6ed58ec5 2367
ec1cce24
LAD
2368 /* Stop if priority has changed */
2369 if (skb->priority < priority)
2370 break;
2371
2372 skb = skb_dequeue(&chan->data_q);
2373
6ed58ec5
VT
2374 hci_send_frame(skb);
2375 hdev->le_last_tx = jiffies;
2376
2377 cnt--;
73d80deb
LAD
2378 chan->sent++;
2379 chan->conn->sent++;
6ed58ec5
VT
2380 }
2381 }
73d80deb 2382
6ed58ec5
VT
2383 if (hdev->le_pkts)
2384 hdev->le_cnt = cnt;
2385 else
2386 hdev->acl_cnt = cnt;
02b20f0b
LAD
2387
2388 if (cnt != tmp)
2389 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2390}
2391
1da177e4
LT
2392static void hci_tx_task(unsigned long arg)
2393{
2394 struct hci_dev *hdev = (struct hci_dev *) arg;
2395 struct sk_buff *skb;
2396
67d0dfb5 2397 mutex_lock(&hci_task_lock);
1da177e4 2398
6ed58ec5
VT
2399 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2400 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2401
2402 /* Schedule queues and send stuff to HCI driver */
2403
2404 hci_sched_acl(hdev);
2405
2406 hci_sched_sco(hdev);
2407
b6a0dc82
MH
2408 hci_sched_esco(hdev);
2409
6ed58ec5
VT
2410 hci_sched_le(hdev);
2411
1da177e4
LT
2412 /* Send next queued raw (unknown type) packet */
2413 while ((skb = skb_dequeue(&hdev->raw_q)))
2414 hci_send_frame(skb);
2415
67d0dfb5 2416 mutex_unlock(&hci_task_lock);
1da177e4
LT
2417}
2418
25985edc 2419/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2420
2421/* ACL data packet */
2422static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2423{
2424 struct hci_acl_hdr *hdr = (void *) skb->data;
2425 struct hci_conn *conn;
2426 __u16 handle, flags;
2427
2428 skb_pull(skb, HCI_ACL_HDR_SIZE);
2429
2430 handle = __le16_to_cpu(hdr->handle);
2431 flags = hci_flags(handle);
2432 handle = hci_handle(handle);
2433
2434 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2435
2436 hdev->stat.acl_rx++;
2437
2438 hci_dev_lock(hdev);
2439 conn = hci_conn_hash_lookup_handle(hdev, handle);
2440 hci_dev_unlock(hdev);
8e87d142 2441
1da177e4
LT
2442 if (conn) {
2443 register struct hci_proto *hp;
2444
14b12d0b 2445 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
04837f64 2446
1da177e4 2447 /* Send to upper protocol */
70f23020
AE
2448 hp = hci_proto[HCI_PROTO_L2CAP];
2449 if (hp && hp->recv_acldata) {
1da177e4
LT
2450 hp->recv_acldata(conn, skb, flags);
2451 return;
2452 }
2453 } else {
8e87d142 2454 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2455 hdev->name, handle);
2456 }
2457
2458 kfree_skb(skb);
2459}
2460
2461/* SCO data packet */
2462static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2463{
2464 struct hci_sco_hdr *hdr = (void *) skb->data;
2465 struct hci_conn *conn;
2466 __u16 handle;
2467
2468 skb_pull(skb, HCI_SCO_HDR_SIZE);
2469
2470 handle = __le16_to_cpu(hdr->handle);
2471
2472 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2473
2474 hdev->stat.sco_rx++;
2475
2476 hci_dev_lock(hdev);
2477 conn = hci_conn_hash_lookup_handle(hdev, handle);
2478 hci_dev_unlock(hdev);
2479
2480 if (conn) {
2481 register struct hci_proto *hp;
2482
2483 /* Send to upper protocol */
70f23020
AE
2484 hp = hci_proto[HCI_PROTO_SCO];
2485 if (hp && hp->recv_scodata) {
1da177e4
LT
2486 hp->recv_scodata(conn, skb);
2487 return;
2488 }
2489 } else {
8e87d142 2490 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2491 hdev->name, handle);
2492 }
2493
2494 kfree_skb(skb);
2495}
2496
b78752cc 2497static void hci_rx_work(struct work_struct *work)
1da177e4 2498{
b78752cc 2499 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2500 struct sk_buff *skb;
2501
2502 BT_DBG("%s", hdev->name);
2503
67d0dfb5 2504 mutex_lock(&hci_task_lock);
1da177e4
LT
2505
2506 while ((skb = skb_dequeue(&hdev->rx_q))) {
2507 if (atomic_read(&hdev->promisc)) {
2508 /* Send copy to the sockets */
eec8d2bc 2509 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2510 }
2511
2512 if (test_bit(HCI_RAW, &hdev->flags)) {
2513 kfree_skb(skb);
2514 continue;
2515 }
2516
2517 if (test_bit(HCI_INIT, &hdev->flags)) {
2518 /* Don't process data packets in this states. */
0d48d939 2519 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2520 case HCI_ACLDATA_PKT:
2521 case HCI_SCODATA_PKT:
2522 kfree_skb(skb);
2523 continue;
3ff50b79 2524 }
1da177e4
LT
2525 }
2526
2527 /* Process frame */
0d48d939 2528 switch (bt_cb(skb)->pkt_type) {
1da177e4 2529 case HCI_EVENT_PKT:
b78752cc 2530 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2531 hci_event_packet(hdev, skb);
2532 break;
2533
2534 case HCI_ACLDATA_PKT:
2535 BT_DBG("%s ACL data packet", hdev->name);
2536 hci_acldata_packet(hdev, skb);
2537 break;
2538
2539 case HCI_SCODATA_PKT:
2540 BT_DBG("%s SCO data packet", hdev->name);
2541 hci_scodata_packet(hdev, skb);
2542 break;
2543
2544 default:
2545 kfree_skb(skb);
2546 break;
2547 }
2548 }
2549
67d0dfb5 2550 mutex_unlock(&hci_task_lock);
1da177e4
LT
2551}
2552
2553static void hci_cmd_task(unsigned long arg)
2554{
2555 struct hci_dev *hdev = (struct hci_dev *) arg;
2556 struct sk_buff *skb;
2557
2558 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2559
1da177e4 2560 /* Send queued commands */
5a08ecce
AE
2561 if (atomic_read(&hdev->cmd_cnt)) {
2562 skb = skb_dequeue(&hdev->cmd_q);
2563 if (!skb)
2564 return;
2565
7585b97a 2566 kfree_skb(hdev->sent_cmd);
1da177e4 2567
70f23020
AE
2568 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2569 if (hdev->sent_cmd) {
1da177e4
LT
2570 atomic_dec(&hdev->cmd_cnt);
2571 hci_send_frame(skb);
7bdb8a5c
SJ
2572 if (test_bit(HCI_RESET, &hdev->flags))
2573 del_timer(&hdev->cmd_timer);
2574 else
2575 mod_timer(&hdev->cmd_timer,
6bd32326 2576 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2577 } else {
2578 skb_queue_head(&hdev->cmd_q, skb);
c78ae283 2579 tasklet_schedule(&hdev->cmd_task);
1da177e4
LT
2580 }
2581 }
2582}
2519a1fc
AG
2583
2584int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2585{
2586 /* General inquiry access code (GIAC) */
2587 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2588 struct hci_cp_inquiry cp;
2589
2590 BT_DBG("%s", hdev->name);
2591
2592 if (test_bit(HCI_INQUIRY, &hdev->flags))
2593 return -EINPROGRESS;
2594
2595 memset(&cp, 0, sizeof(cp));
2596 memcpy(&cp.lap, lap, sizeof(cp.lap));
2597 cp.length = length;
2598
2599 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2600}
023d5049
AG
2601
2602int hci_cancel_inquiry(struct hci_dev *hdev)
2603{
2604 BT_DBG("%s", hdev->name);
2605
2606 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2607 return -EPERM;
2608
2609 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2610}
7784d78f
AE
2611
2612module_param(enable_hs, bool, 0644);
2613MODULE_PARM_DESC(enable_hs, "Enable High Speed");