]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Fix doing some useless casts when receiving MGMT commands
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
8b281b9c 58bool enable_hs;
7784d78f 59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
1da177e4 72/* HCI notifiers list */
e041c683 73static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
74
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
e041c683 79 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
80}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
e041c683 84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
85}
86
6516455d 87static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 88{
e041c683 89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
90}
91
92/* ---- HCI requests ---- */
93
23bb5763 94void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 95{
23bb5763
JH
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
a5040efa
JH
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 102 return;
1da177e4
LT
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
8e87d142 123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 124 unsigned long opt, __u32 timeout)
1da177e4
LT
125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
e175072f 146 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
3ff50b79 156 }
1da177e4 157
a5040efa 158 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 166 unsigned long opt, __u32 timeout)
1da177e4
LT
167{
168 int ret;
169
7c6a329e
MH
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
1da177e4
LT
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
f630cf0d 186 set_bit(HCI_RESET, &hdev->flags);
a9de9248 187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
188}
189
e61ef499 190static void bredr_init(struct hci_dev *hdev)
1da177e4 191{
b0916ea0 192 struct hci_cp_delete_stored_link_key cp;
1ebb9252 193 __le16 param;
89f2783d 194 __u8 flt_type;
1da177e4 195
2455a3ea
AE
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
1da177e4
LT
198 /* Mandatory initialization */
199
200 /* Reset */
f630cf0d 201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 204 }
1da177e4
LT
205
206 /* Read Local Supported Features */
a9de9248 207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 208
1143e5a6 209 /* Read Local Version */
a9de9248 210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 211
1da177e4 212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 214
1da177e4 215 /* Read BD Address */
a9de9248
MH
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
223
224 /* Read Voice Setting */
a9de9248 225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
89f2783d 230 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 232
1da177e4 233 /* Connection accept timeout ~20 secs */
aca3192c 234 param = cpu_to_le16(0x7d00);
a9de9248 235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
240}
241
e61ef499
AE
242static void amp_init(struct hci_dev *hdev)
243{
2455a3ea
AE
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
e61ef499
AE
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
6ed58ec5
VT
287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
1da177e4
LT
295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
a9de9248 302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
a9de9248 312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
e4e8e37c 321 /* Encryption */
a9de9248 322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
323}
324
e4e8e37c
MH
325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
a418b893 329 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
8e87d142 335/* Get HCI device by index.
1da177e4
LT
336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
8035ded4 339 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
8035ded4 347 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
1da177e4
LT
356
357/* ---- Inquiry support ---- */
ff9ef578 358
30dc78e1
JH
359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
363 if (discov->state == DISCOVERY_INQUIRY ||
364 discov->state == DISCOVERY_RESOLVING)
365 return true;
366
367 return false;
368}
369
ff9ef578
JH
370void hci_discovery_set_state(struct hci_dev *hdev, int state)
371{
372 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
373
374 if (hdev->discovery.state == state)
375 return;
376
377 switch (state) {
378 case DISCOVERY_STOPPED:
379 mgmt_discovering(hdev, 0);
380 break;
381 case DISCOVERY_STARTING:
382 break;
30dc78e1 383 case DISCOVERY_INQUIRY:
ff9ef578
JH
384 mgmt_discovering(hdev, 1);
385 break;
30dc78e1
JH
386 case DISCOVERY_RESOLVING:
387 break;
ff9ef578
JH
388 case DISCOVERY_STOPPING:
389 break;
390 }
391
392 hdev->discovery.state = state;
393}
394
1da177e4
LT
395static void inquiry_cache_flush(struct hci_dev *hdev)
396{
30883512 397 struct discovery_state *cache = &hdev->discovery;
b57c1a56 398 struct inquiry_entry *p, *n;
1da177e4 399
561aafbc
JH
400 list_for_each_entry_safe(p, n, &cache->all, all) {
401 list_del(&p->all);
b57c1a56 402 kfree(p);
1da177e4 403 }
561aafbc
JH
404
405 INIT_LIST_HEAD(&cache->unknown);
406 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 407 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
408}
409
410struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
411{
30883512 412 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
413 struct inquiry_entry *e;
414
415 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
416
561aafbc
JH
417 list_for_each_entry(e, &cache->all, all) {
418 if (!bacmp(&e->data.bdaddr, bdaddr))
419 return e;
420 }
421
422 return NULL;
423}
424
425struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
426 bdaddr_t *bdaddr)
427{
30883512 428 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
429 struct inquiry_entry *e;
430
431 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
432
433 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 434 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
435 return e;
436 }
437
438 return NULL;
1da177e4
LT
439}
440
30dc78e1
JH
441struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
442 bdaddr_t *bdaddr,
443 int state)
444{
445 struct discovery_state *cache = &hdev->discovery;
446 struct inquiry_entry *e;
447
448 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
449
450 list_for_each_entry(e, &cache->resolve, list) {
451 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
452 return e;
453 if (!bacmp(&e->data.bdaddr, bdaddr))
454 return e;
455 }
456
457 return NULL;
458}
459
a3d4e20a
JH
460void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
461 struct inquiry_entry *ie)
462{
463 struct discovery_state *cache = &hdev->discovery;
464 struct list_head *pos = &cache->resolve;
465 struct inquiry_entry *p;
466
467 list_del(&ie->list);
468
469 list_for_each_entry(p, &cache->resolve, list) {
470 if (p->name_state != NAME_PENDING &&
471 abs(p->data.rssi) >= abs(ie->data.rssi))
472 break;
473 pos = &p->list;
474 }
475
476 list_add(&ie->list, pos);
477}
478
3175405b 479bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 480 bool name_known)
1da177e4 481{
30883512 482 struct discovery_state *cache = &hdev->discovery;
70f23020 483 struct inquiry_entry *ie;
1da177e4
LT
484
485 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
486
70f23020 487 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
488 if (ie) {
489 if (ie->name_state == NAME_NEEDED &&
490 data->rssi != ie->data.rssi) {
491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
493 }
494
561aafbc 495 goto update;
a3d4e20a 496 }
561aafbc
JH
497
498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500 if (!ie)
3175405b 501 return false;
561aafbc
JH
502
503 list_add(&ie->all, &cache->all);
504
505 if (name_known) {
506 ie->name_state = NAME_KNOWN;
507 } else {
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
510 }
70f23020 511
561aafbc
JH
512update:
513 if (name_known && ie->name_state != NAME_KNOWN &&
514 ie->name_state != NAME_PENDING) {
515 ie->name_state = NAME_KNOWN;
516 list_del(&ie->list);
1da177e4
LT
517 }
518
70f23020
AE
519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
1da177e4 521 cache->timestamp = jiffies;
3175405b
JH
522
523 if (ie->name_state == NAME_NOT_KNOWN)
524 return false;
525
526 return true;
1da177e4
LT
527}
528
529static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530{
30883512 531 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
534 int copied = 0;
535
561aafbc 536 list_for_each_entry(e, &cache->all, all) {
1da177e4 537 struct inquiry_data *data = &e->data;
b57c1a56
JH
538
539 if (copied >= num)
540 break;
541
1da177e4
LT
542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
b57c1a56 548
1da177e4 549 info++;
b57c1a56 550 copied++;
1da177e4
LT
551 }
552
553 BT_DBG("cache %p, copied %d", cache, copied);
554 return copied;
555}
556
557static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558{
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
561
562 BT_DBG("%s", hdev->name);
563
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
565 return;
566
567 /* Start Inquiry */
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
a9de9248 571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
572}
573
574int hci_inquiry(void __user *arg)
575{
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
580 long timeo;
581 __u8 *buf;
582
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
584 return -EFAULT;
585
5a08ecce
AE
586 hdev = hci_dev_get(ir.dev_id);
587 if (!hdev)
1da177e4
LT
588 return -ENODEV;
589
09fd0de5 590 hci_dev_lock(hdev);
8e87d142 591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
592 inquiry_cache_empty(hdev) ||
593 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
594 inquiry_cache_flush(hdev);
595 do_inquiry = 1;
596 }
09fd0de5 597 hci_dev_unlock(hdev);
1da177e4 598
04837f64 599 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
600
601 if (do_inquiry) {
602 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
603 if (err < 0)
604 goto done;
605 }
1da177e4
LT
606
607 /* for unlimited number of responses we will use buffer with 255 entries */
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
01df8c31 613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 614 if (!buf) {
1da177e4
LT
615 err = -ENOMEM;
616 goto done;
617 }
618
09fd0de5 619 hci_dev_lock(hdev);
1da177e4 620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 621 hci_dev_unlock(hdev);
1da177e4
LT
622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628 ir.num_rsp))
629 err = -EFAULT;
8e87d142 630 } else
1da177e4
LT
631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
5a08ecce
AE
647 hdev = hci_dev_get(dev);
648 if (!hdev)
1da177e4
LT
649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
611b30f7
MH
655 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
656 ret = -ERFKILL;
657 goto done;
658 }
659
1da177e4
LT
660 if (test_bit(HCI_UP, &hdev->flags)) {
661 ret = -EALREADY;
662 goto done;
663 }
664
665 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
666 set_bit(HCI_RAW, &hdev->flags);
667
07e3b94a
AE
668 /* Treat all non BR/EDR controllers as raw devices if
669 enable_hs is not set */
670 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
671 set_bit(HCI_RAW, &hdev->flags);
672
1da177e4
LT
673 if (hdev->open(hdev)) {
674 ret = -EIO;
675 goto done;
676 }
677
678 if (!test_bit(HCI_RAW, &hdev->flags)) {
679 atomic_set(&hdev->cmd_cnt, 1);
680 set_bit(HCI_INIT, &hdev->flags);
a5040efa 681 hdev->init_last_cmd = 0;
1da177e4 682
04837f64
MH
683 ret = __hci_request(hdev, hci_init_req, 0,
684 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 685
eead27da 686 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
687 ret = __hci_request(hdev, hci_le_init_req, 0,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
689
1da177e4
LT
690 clear_bit(HCI_INIT, &hdev->flags);
691 }
692
693 if (!ret) {
694 hci_dev_hold(hdev);
695 set_bit(HCI_UP, &hdev->flags);
696 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 697 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 698 hci_dev_lock(hdev);
744cf19e 699 mgmt_powered(hdev, 1);
09fd0de5 700 hci_dev_unlock(hdev);
56e5cb86 701 }
8e87d142 702 } else {
1da177e4 703 /* Init failed, cleanup */
3eff45ea 704 flush_work(&hdev->tx_work);
c347b765 705 flush_work(&hdev->cmd_work);
b78752cc 706 flush_work(&hdev->rx_work);
1da177e4
LT
707
708 skb_queue_purge(&hdev->cmd_q);
709 skb_queue_purge(&hdev->rx_q);
710
711 if (hdev->flush)
712 hdev->flush(hdev);
713
714 if (hdev->sent_cmd) {
715 kfree_skb(hdev->sent_cmd);
716 hdev->sent_cmd = NULL;
717 }
718
719 hdev->close(hdev);
720 hdev->flags = 0;
721 }
722
723done:
724 hci_req_unlock(hdev);
725 hci_dev_put(hdev);
726 return ret;
727}
728
729static int hci_dev_do_close(struct hci_dev *hdev)
730{
731 BT_DBG("%s %p", hdev->name, hdev);
732
733 hci_req_cancel(hdev, ENODEV);
734 hci_req_lock(hdev);
735
736 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 737 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
738 hci_req_unlock(hdev);
739 return 0;
740 }
741
3eff45ea
GP
742 /* Flush RX and TX works */
743 flush_work(&hdev->tx_work);
b78752cc 744 flush_work(&hdev->rx_work);
1da177e4 745
16ab91ab 746 if (hdev->discov_timeout > 0) {
e0f9309f 747 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
748 hdev->discov_timeout = 0;
749 }
750
a8b2d5c2 751 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 752 cancel_delayed_work(&hdev->power_off);
3243553f 753
a8b2d5c2 754 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
755 cancel_delayed_work(&hdev->service_cache);
756
09fd0de5 757 hci_dev_lock(hdev);
1da177e4
LT
758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
09fd0de5 760 hci_dev_unlock(hdev);
1da177e4
LT
761
762 hci_notify(hdev, HCI_DEV_DOWN);
763
764 if (hdev->flush)
765 hdev->flush(hdev);
766
767 /* Reset device */
768 skb_queue_purge(&hdev->cmd_q);
769 atomic_set(&hdev->cmd_cnt, 1);
770 if (!test_bit(HCI_RAW, &hdev->flags)) {
771 set_bit(HCI_INIT, &hdev->flags);
04837f64 772 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 773 msecs_to_jiffies(250));
1da177e4
LT
774 clear_bit(HCI_INIT, &hdev->flags);
775 }
776
c347b765
GP
777 /* flush cmd work */
778 flush_work(&hdev->cmd_work);
1da177e4
LT
779
780 /* Drop queues */
781 skb_queue_purge(&hdev->rx_q);
782 skb_queue_purge(&hdev->cmd_q);
783 skb_queue_purge(&hdev->raw_q);
784
785 /* Drop last sent command */
786 if (hdev->sent_cmd) {
b79f44c1 787 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
788 kfree_skb(hdev->sent_cmd);
789 hdev->sent_cmd = NULL;
790 }
791
792 /* After this point our queues are empty
793 * and no tasks are scheduled. */
794 hdev->close(hdev);
795
09fd0de5 796 hci_dev_lock(hdev);
744cf19e 797 mgmt_powered(hdev, 0);
09fd0de5 798 hci_dev_unlock(hdev);
5add6af8 799
1da177e4
LT
800 /* Clear flags */
801 hdev->flags = 0;
802
803 hci_req_unlock(hdev);
804
805 hci_dev_put(hdev);
806 return 0;
807}
808
809int hci_dev_close(__u16 dev)
810{
811 struct hci_dev *hdev;
812 int err;
813
70f23020
AE
814 hdev = hci_dev_get(dev);
815 if (!hdev)
1da177e4
LT
816 return -ENODEV;
817 err = hci_dev_do_close(hdev);
818 hci_dev_put(hdev);
819 return err;
820}
821
822int hci_dev_reset(__u16 dev)
823{
824 struct hci_dev *hdev;
825 int ret = 0;
826
70f23020
AE
827 hdev = hci_dev_get(dev);
828 if (!hdev)
1da177e4
LT
829 return -ENODEV;
830
831 hci_req_lock(hdev);
1da177e4
LT
832
833 if (!test_bit(HCI_UP, &hdev->flags))
834 goto done;
835
836 /* Drop queues */
837 skb_queue_purge(&hdev->rx_q);
838 skb_queue_purge(&hdev->cmd_q);
839
09fd0de5 840 hci_dev_lock(hdev);
1da177e4
LT
841 inquiry_cache_flush(hdev);
842 hci_conn_hash_flush(hdev);
09fd0de5 843 hci_dev_unlock(hdev);
1da177e4
LT
844
845 if (hdev->flush)
846 hdev->flush(hdev);
847
8e87d142 848 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 849 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
850
851 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
852 ret = __hci_request(hdev, hci_reset_req, 0,
853 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
854
855done:
1da177e4
LT
856 hci_req_unlock(hdev);
857 hci_dev_put(hdev);
858 return ret;
859}
860
861int hci_dev_reset_stat(__u16 dev)
862{
863 struct hci_dev *hdev;
864 int ret = 0;
865
70f23020
AE
866 hdev = hci_dev_get(dev);
867 if (!hdev)
1da177e4
LT
868 return -ENODEV;
869
870 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
871
872 hci_dev_put(hdev);
873
874 return ret;
875}
876
877int hci_dev_cmd(unsigned int cmd, void __user *arg)
878{
879 struct hci_dev *hdev;
880 struct hci_dev_req dr;
881 int err = 0;
882
883 if (copy_from_user(&dr, arg, sizeof(dr)))
884 return -EFAULT;
885
70f23020
AE
886 hdev = hci_dev_get(dr.dev_id);
887 if (!hdev)
1da177e4
LT
888 return -ENODEV;
889
890 switch (cmd) {
891 case HCISETAUTH:
04837f64
MH
892 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
893 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
894 break;
895
896 case HCISETENCRYPT:
897 if (!lmp_encrypt_capable(hdev)) {
898 err = -EOPNOTSUPP;
899 break;
900 }
901
902 if (!test_bit(HCI_AUTH, &hdev->flags)) {
903 /* Auth must be enabled first */
04837f64
MH
904 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
905 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
906 if (err)
907 break;
908 }
909
04837f64
MH
910 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
912 break;
913
914 case HCISETSCAN:
04837f64
MH
915 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
917 break;
918
1da177e4 919 case HCISETLINKPOL:
e4e8e37c
MH
920 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
922 break;
923
924 case HCISETLINKMODE:
e4e8e37c
MH
925 hdev->link_mode = ((__u16) dr.dev_opt) &
926 (HCI_LM_MASTER | HCI_LM_ACCEPT);
927 break;
928
929 case HCISETPTYPE:
930 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
931 break;
932
933 case HCISETACLMTU:
e4e8e37c
MH
934 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
935 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
936 break;
937
938 case HCISETSCOMTU:
e4e8e37c
MH
939 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
940 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
941 break;
942
943 default:
944 err = -EINVAL;
945 break;
946 }
e4e8e37c 947
1da177e4
LT
948 hci_dev_put(hdev);
949 return err;
950}
951
952int hci_get_dev_list(void __user *arg)
953{
8035ded4 954 struct hci_dev *hdev;
1da177e4
LT
955 struct hci_dev_list_req *dl;
956 struct hci_dev_req *dr;
1da177e4
LT
957 int n = 0, size, err;
958 __u16 dev_num;
959
960 if (get_user(dev_num, (__u16 __user *) arg))
961 return -EFAULT;
962
963 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
964 return -EINVAL;
965
966 size = sizeof(*dl) + dev_num * sizeof(*dr);
967
70f23020
AE
968 dl = kzalloc(size, GFP_KERNEL);
969 if (!dl)
1da177e4
LT
970 return -ENOMEM;
971
972 dr = dl->dev_req;
973
f20d09d5 974 read_lock(&hci_dev_list_lock);
8035ded4 975 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 976 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 977 cancel_delayed_work(&hdev->power_off);
c542a06c 978
a8b2d5c2
JH
979 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
980 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 981
1da177e4
LT
982 (dr + n)->dev_id = hdev->id;
983 (dr + n)->dev_opt = hdev->flags;
c542a06c 984
1da177e4
LT
985 if (++n >= dev_num)
986 break;
987 }
f20d09d5 988 read_unlock(&hci_dev_list_lock);
1da177e4
LT
989
990 dl->dev_num = n;
991 size = sizeof(*dl) + n * sizeof(*dr);
992
993 err = copy_to_user(arg, dl, size);
994 kfree(dl);
995
996 return err ? -EFAULT : 0;
997}
998
999int hci_get_dev_info(void __user *arg)
1000{
1001 struct hci_dev *hdev;
1002 struct hci_dev_info di;
1003 int err = 0;
1004
1005 if (copy_from_user(&di, arg, sizeof(di)))
1006 return -EFAULT;
1007
70f23020
AE
1008 hdev = hci_dev_get(di.dev_id);
1009 if (!hdev)
1da177e4
LT
1010 return -ENODEV;
1011
a8b2d5c2 1012 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1013 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1014
a8b2d5c2
JH
1015 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1016 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1017
1da177e4
LT
1018 strcpy(di.name, hdev->name);
1019 di.bdaddr = hdev->bdaddr;
943da25d 1020 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1021 di.flags = hdev->flags;
1022 di.pkt_type = hdev->pkt_type;
1023 di.acl_mtu = hdev->acl_mtu;
1024 di.acl_pkts = hdev->acl_pkts;
1025 di.sco_mtu = hdev->sco_mtu;
1026 di.sco_pkts = hdev->sco_pkts;
1027 di.link_policy = hdev->link_policy;
1028 di.link_mode = hdev->link_mode;
1029
1030 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1031 memcpy(&di.features, &hdev->features, sizeof(di.features));
1032
1033 if (copy_to_user(arg, &di, sizeof(di)))
1034 err = -EFAULT;
1035
1036 hci_dev_put(hdev);
1037
1038 return err;
1039}
1040
1041/* ---- Interface to HCI drivers ---- */
1042
611b30f7
MH
1043static int hci_rfkill_set_block(void *data, bool blocked)
1044{
1045 struct hci_dev *hdev = data;
1046
1047 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1048
1049 if (!blocked)
1050 return 0;
1051
1052 hci_dev_do_close(hdev);
1053
1054 return 0;
1055}
1056
1057static const struct rfkill_ops hci_rfkill_ops = {
1058 .set_block = hci_rfkill_set_block,
1059};
1060
1da177e4
LT
1061/* Alloc HCI device */
1062struct hci_dev *hci_alloc_dev(void)
1063{
1064 struct hci_dev *hdev;
1065
25ea6db0 1066 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1067 if (!hdev)
1068 return NULL;
1069
0ac7e700 1070 hci_init_sysfs(hdev);
1da177e4
LT
1071 skb_queue_head_init(&hdev->driver_init);
1072
1073 return hdev;
1074}
1075EXPORT_SYMBOL(hci_alloc_dev);
1076
1077/* Free HCI device */
1078void hci_free_dev(struct hci_dev *hdev)
1079{
1080 skb_queue_purge(&hdev->driver_init);
1081
a91f2e39
MH
1082 /* will free via device release */
1083 put_device(&hdev->dev);
1da177e4
LT
1084}
1085EXPORT_SYMBOL(hci_free_dev);
1086
ab81cbf9
JH
1087static void hci_power_on(struct work_struct *work)
1088{
1089 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1090
1091 BT_DBG("%s", hdev->name);
1092
1093 if (hci_dev_open(hdev->id) < 0)
1094 return;
1095
a8b2d5c2 1096 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1097 schedule_delayed_work(&hdev->power_off,
3243553f 1098 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1099
a8b2d5c2 1100 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1101 mgmt_index_added(hdev);
ab81cbf9
JH
1102}
1103
1104static void hci_power_off(struct work_struct *work)
1105{
3243553f
JH
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 power_off.work);
ab81cbf9
JH
1108
1109 BT_DBG("%s", hdev->name);
1110
a8b2d5c2 1111 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ab81cbf9 1112
3243553f 1113 hci_dev_close(hdev->id);
ab81cbf9
JH
1114}
1115
16ab91ab
JH
1116static void hci_discov_off(struct work_struct *work)
1117{
1118 struct hci_dev *hdev;
1119 u8 scan = SCAN_PAGE;
1120
1121 hdev = container_of(work, struct hci_dev, discov_off.work);
1122
1123 BT_DBG("%s", hdev->name);
1124
09fd0de5 1125 hci_dev_lock(hdev);
16ab91ab
JH
1126
1127 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1128
1129 hdev->discov_timeout = 0;
1130
09fd0de5 1131 hci_dev_unlock(hdev);
16ab91ab
JH
1132}
1133
2aeb9a1a
JH
1134int hci_uuids_clear(struct hci_dev *hdev)
1135{
1136 struct list_head *p, *n;
1137
1138 list_for_each_safe(p, n, &hdev->uuids) {
1139 struct bt_uuid *uuid;
1140
1141 uuid = list_entry(p, struct bt_uuid, list);
1142
1143 list_del(p);
1144 kfree(uuid);
1145 }
1146
1147 return 0;
1148}
1149
55ed8ca1
JH
1150int hci_link_keys_clear(struct hci_dev *hdev)
1151{
1152 struct list_head *p, *n;
1153
1154 list_for_each_safe(p, n, &hdev->link_keys) {
1155 struct link_key *key;
1156
1157 key = list_entry(p, struct link_key, list);
1158
1159 list_del(p);
1160 kfree(key);
1161 }
1162
1163 return 0;
1164}
1165
1166struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1167{
8035ded4 1168 struct link_key *k;
55ed8ca1 1169
8035ded4 1170 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1171 if (bacmp(bdaddr, &k->bdaddr) == 0)
1172 return k;
55ed8ca1
JH
1173
1174 return NULL;
1175}
1176
d25e28ab
JH
1177static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1178 u8 key_type, u8 old_key_type)
1179{
1180 /* Legacy key */
1181 if (key_type < 0x03)
1182 return 1;
1183
1184 /* Debug keys are insecure so don't store them persistently */
1185 if (key_type == HCI_LK_DEBUG_COMBINATION)
1186 return 0;
1187
1188 /* Changed combination key and there's no previous one */
1189 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1190 return 0;
1191
1192 /* Security mode 3 case */
1193 if (!conn)
1194 return 1;
1195
1196 /* Neither local nor remote side had no-bonding as requirement */
1197 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1198 return 1;
1199
1200 /* Local side had dedicated bonding as requirement */
1201 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1202 return 1;
1203
1204 /* Remote side had dedicated bonding as requirement */
1205 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1206 return 1;
1207
1208 /* If none of the above criteria match, then don't store the key
1209 * persistently */
1210 return 0;
1211}
1212
75d262c2
VCG
1213struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1214{
1215 struct link_key *k;
1216
1217 list_for_each_entry(k, &hdev->link_keys, list) {
1218 struct key_master_id *id;
1219
1220 if (k->type != HCI_LK_SMP_LTK)
1221 continue;
1222
1223 if (k->dlen != sizeof(*id))
1224 continue;
1225
1226 id = (void *) &k->data;
1227 if (id->ediv == ediv &&
1228 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1229 return k;
1230 }
1231
1232 return NULL;
1233}
1234EXPORT_SYMBOL(hci_find_ltk);
1235
1236struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1237 bdaddr_t *bdaddr, u8 type)
1238{
1239 struct link_key *k;
1240
1241 list_for_each_entry(k, &hdev->link_keys, list)
1242 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1243 return k;
1244
1245 return NULL;
1246}
1247EXPORT_SYMBOL(hci_find_link_key_type);
1248
d25e28ab
JH
1249int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1250 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1251{
1252 struct link_key *key, *old_key;
4df378a1 1253 u8 old_key_type, persistent;
55ed8ca1
JH
1254
1255 old_key = hci_find_link_key(hdev, bdaddr);
1256 if (old_key) {
1257 old_key_type = old_key->type;
1258 key = old_key;
1259 } else {
12adcf3a 1260 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1261 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262 if (!key)
1263 return -ENOMEM;
1264 list_add(&key->list, &hdev->link_keys);
1265 }
1266
1267 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
d25e28ab
JH
1269 /* Some buggy controller combinations generate a changed
1270 * combination key for legacy pairing even when there's no
1271 * previous key */
1272 if (type == HCI_LK_CHANGED_COMBINATION &&
1273 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1274 old_key_type == 0xff) {
d25e28ab 1275 type = HCI_LK_COMBINATION;
655fe6ec
JH
1276 if (conn)
1277 conn->key_type = type;
1278 }
d25e28ab 1279
55ed8ca1
JH
1280 bacpy(&key->bdaddr, bdaddr);
1281 memcpy(key->val, val, 16);
55ed8ca1
JH
1282 key->pin_len = pin_len;
1283
b6020ba0 1284 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1285 key->type = old_key_type;
4748fed2
JH
1286 else
1287 key->type = type;
1288
4df378a1
JH
1289 if (!new_key)
1290 return 0;
1291
1292 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1293
744cf19e 1294 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1295
1296 if (!persistent) {
1297 list_del(&key->list);
1298 kfree(key);
1299 }
55ed8ca1
JH
1300
1301 return 0;
1302}
1303
75d262c2 1304int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1305 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1306{
1307 struct link_key *key, *old_key;
1308 struct key_master_id *id;
1309 u8 old_key_type;
1310
1311 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1312
1313 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1314 if (old_key) {
1315 key = old_key;
1316 old_key_type = old_key->type;
1317 } else {
1318 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1319 if (!key)
1320 return -ENOMEM;
1321 list_add(&key->list, &hdev->link_keys);
1322 old_key_type = 0xff;
1323 }
1324
1325 key->dlen = sizeof(*id);
1326
1327 bacpy(&key->bdaddr, bdaddr);
1328 memcpy(key->val, ltk, sizeof(key->val));
1329 key->type = HCI_LK_SMP_LTK;
726b4ffc 1330 key->pin_len = key_size;
75d262c2
VCG
1331
1332 id = (void *) &key->data;
1333 id->ediv = ediv;
1334 memcpy(id->rand, rand, sizeof(id->rand));
1335
1336 if (new_key)
744cf19e 1337 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1338
1339 return 0;
1340}
1341
55ed8ca1
JH
1342int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1343{
1344 struct link_key *key;
1345
1346 key = hci_find_link_key(hdev, bdaddr);
1347 if (!key)
1348 return -ENOENT;
1349
1350 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1351
1352 list_del(&key->list);
1353 kfree(key);
1354
1355 return 0;
1356}
1357
6bd32326
VT
1358/* HCI command timer function */
1359static void hci_cmd_timer(unsigned long arg)
1360{
1361 struct hci_dev *hdev = (void *) arg;
1362
1363 BT_ERR("%s command tx timeout", hdev->name);
1364 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1365 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1366}
1367
2763eda6
SJ
1368struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1369 bdaddr_t *bdaddr)
1370{
1371 struct oob_data *data;
1372
1373 list_for_each_entry(data, &hdev->remote_oob_data, list)
1374 if (bacmp(bdaddr, &data->bdaddr) == 0)
1375 return data;
1376
1377 return NULL;
1378}
1379
1380int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct oob_data *data;
1383
1384 data = hci_find_remote_oob_data(hdev, bdaddr);
1385 if (!data)
1386 return -ENOENT;
1387
1388 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1389
1390 list_del(&data->list);
1391 kfree(data);
1392
1393 return 0;
1394}
1395
1396int hci_remote_oob_data_clear(struct hci_dev *hdev)
1397{
1398 struct oob_data *data, *n;
1399
1400 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1401 list_del(&data->list);
1402 kfree(data);
1403 }
1404
1405 return 0;
1406}
1407
1408int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1409 u8 *randomizer)
1410{
1411 struct oob_data *data;
1412
1413 data = hci_find_remote_oob_data(hdev, bdaddr);
1414
1415 if (!data) {
1416 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1417 if (!data)
1418 return -ENOMEM;
1419
1420 bacpy(&data->bdaddr, bdaddr);
1421 list_add(&data->list, &hdev->remote_oob_data);
1422 }
1423
1424 memcpy(data->hash, hash, sizeof(data->hash));
1425 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1426
1427 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1428
1429 return 0;
1430}
1431
b2a66aad
AJ
1432struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1433 bdaddr_t *bdaddr)
1434{
8035ded4 1435 struct bdaddr_list *b;
b2a66aad 1436
8035ded4 1437 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1438 if (bacmp(bdaddr, &b->bdaddr) == 0)
1439 return b;
b2a66aad
AJ
1440
1441 return NULL;
1442}
1443
1444int hci_blacklist_clear(struct hci_dev *hdev)
1445{
1446 struct list_head *p, *n;
1447
1448 list_for_each_safe(p, n, &hdev->blacklist) {
1449 struct bdaddr_list *b;
1450
1451 b = list_entry(p, struct bdaddr_list, list);
1452
1453 list_del(p);
1454 kfree(b);
1455 }
1456
1457 return 0;
1458}
1459
1460int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1461{
1462 struct bdaddr_list *entry;
b2a66aad
AJ
1463
1464 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1465 return -EBADF;
1466
5e762444
AJ
1467 if (hci_blacklist_lookup(hdev, bdaddr))
1468 return -EEXIST;
b2a66aad
AJ
1469
1470 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1471 if (!entry)
1472 return -ENOMEM;
b2a66aad
AJ
1473
1474 bacpy(&entry->bdaddr, bdaddr);
1475
1476 list_add(&entry->list, &hdev->blacklist);
1477
744cf19e 1478 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1479}
1480
1481int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1482{
1483 struct bdaddr_list *entry;
b2a66aad 1484
1ec918ce 1485 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1486 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1487
1488 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1489 if (!entry)
5e762444 1490 return -ENOENT;
b2a66aad
AJ
1491
1492 list_del(&entry->list);
1493 kfree(entry);
1494
744cf19e 1495 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1496}
1497
db323f2f 1498static void hci_clear_adv_cache(struct work_struct *work)
35815085 1499{
db323f2f
GP
1500 struct hci_dev *hdev = container_of(work, struct hci_dev,
1501 adv_work.work);
35815085
AG
1502
1503 hci_dev_lock(hdev);
1504
1505 hci_adv_entries_clear(hdev);
1506
1507 hci_dev_unlock(hdev);
1508}
1509
76c8686f
AG
1510int hci_adv_entries_clear(struct hci_dev *hdev)
1511{
1512 struct adv_entry *entry, *tmp;
1513
1514 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1515 list_del(&entry->list);
1516 kfree(entry);
1517 }
1518
1519 BT_DBG("%s adv cache cleared", hdev->name);
1520
1521 return 0;
1522}
1523
1524struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1525{
1526 struct adv_entry *entry;
1527
1528 list_for_each_entry(entry, &hdev->adv_entries, list)
1529 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1530 return entry;
1531
1532 return NULL;
1533}
1534
1535static inline int is_connectable_adv(u8 evt_type)
1536{
1537 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1538 return 1;
1539
1540 return 0;
1541}
1542
1543int hci_add_adv_entry(struct hci_dev *hdev,
1544 struct hci_ev_le_advertising_info *ev)
1545{
1546 struct adv_entry *entry;
1547
1548 if (!is_connectable_adv(ev->evt_type))
1549 return -EINVAL;
1550
1551 /* Only new entries should be added to adv_entries. So, if
1552 * bdaddr was found, don't add it. */
1553 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1554 return 0;
1555
4777bfde 1556 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1557 if (!entry)
1558 return -ENOMEM;
1559
1560 bacpy(&entry->bdaddr, &ev->bdaddr);
1561 entry->bdaddr_type = ev->bdaddr_type;
1562
1563 list_add(&entry->list, &hdev->adv_entries);
1564
1565 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1566 batostr(&entry->bdaddr), entry->bdaddr_type);
1567
1568 return 0;
1569}
1570
1da177e4
LT
1571/* Register HCI device */
1572int hci_register_dev(struct hci_dev *hdev)
1573{
1574 struct list_head *head = &hci_dev_list, *p;
08add513 1575 int i, id, error;
1da177e4 1576
e9b9cfa1 1577 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1578
010666a1 1579 if (!hdev->open || !hdev->close)
1da177e4
LT
1580 return -EINVAL;
1581
08add513
MM
1582 /* Do not allow HCI_AMP devices to register at index 0,
1583 * so the index can be used as the AMP controller ID.
1584 */
1585 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1586
f20d09d5 1587 write_lock(&hci_dev_list_lock);
1da177e4
LT
1588
1589 /* Find first available device id */
1590 list_for_each(p, &hci_dev_list) {
1591 if (list_entry(p, struct hci_dev, list)->id != id)
1592 break;
1593 head = p; id++;
1594 }
8e87d142 1595
1da177e4
LT
1596 sprintf(hdev->name, "hci%d", id);
1597 hdev->id = id;
c6feeb28 1598 list_add_tail(&hdev->list, head);
1da177e4 1599
09fd0de5 1600 mutex_init(&hdev->lock);
1da177e4
LT
1601
1602 hdev->flags = 0;
d23264a8 1603 hdev->dev_flags = 0;
1da177e4 1604 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1605 hdev->esco_type = (ESCO_HV1);
1da177e4 1606 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1607 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1608
04837f64
MH
1609 hdev->idle_timeout = 0;
1610 hdev->sniff_max_interval = 800;
1611 hdev->sniff_min_interval = 80;
1612
b78752cc 1613 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1614 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1615 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1616
1da177e4
LT
1617
1618 skb_queue_head_init(&hdev->rx_q);
1619 skb_queue_head_init(&hdev->cmd_q);
1620 skb_queue_head_init(&hdev->raw_q);
1621
6bd32326
VT
1622 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1623
cd4c5391 1624 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1625 hdev->reassembly[i] = NULL;
1626
1da177e4 1627 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1628 mutex_init(&hdev->req_lock);
1da177e4 1629
30883512 1630 discovery_init(hdev);
1da177e4
LT
1631
1632 hci_conn_hash_init(hdev);
1633
2e58ef3e
JH
1634 INIT_LIST_HEAD(&hdev->mgmt_pending);
1635
ea4bd8ba 1636 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1637
2aeb9a1a
JH
1638 INIT_LIST_HEAD(&hdev->uuids);
1639
55ed8ca1
JH
1640 INIT_LIST_HEAD(&hdev->link_keys);
1641
2763eda6
SJ
1642 INIT_LIST_HEAD(&hdev->remote_oob_data);
1643
76c8686f
AG
1644 INIT_LIST_HEAD(&hdev->adv_entries);
1645
db323f2f 1646 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1647 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1648 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1649
16ab91ab
JH
1650 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1651
1da177e4
LT
1652 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1653
1654 atomic_set(&hdev->promisc, 0);
1655
f20d09d5 1656 write_unlock(&hci_dev_list_lock);
1da177e4 1657
32845eb1
GP
1658 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1659 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1660 if (!hdev->workqueue) {
1661 error = -ENOMEM;
1662 goto err;
1663 }
f48fd9c8 1664
33ca954d
DH
1665 error = hci_add_sysfs(hdev);
1666 if (error < 0)
1667 goto err_wqueue;
1da177e4 1668
611b30f7
MH
1669 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1670 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1671 if (hdev->rfkill) {
1672 if (rfkill_register(hdev->rfkill) < 0) {
1673 rfkill_destroy(hdev->rfkill);
1674 hdev->rfkill = NULL;
1675 }
1676 }
1677
a8b2d5c2
JH
1678 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1679 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1680 schedule_work(&hdev->power_on);
ab81cbf9 1681
1da177e4 1682 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1683 hci_dev_hold(hdev);
1da177e4
LT
1684
1685 return id;
f48fd9c8 1686
33ca954d
DH
1687err_wqueue:
1688 destroy_workqueue(hdev->workqueue);
1689err:
f20d09d5 1690 write_lock(&hci_dev_list_lock);
f48fd9c8 1691 list_del(&hdev->list);
f20d09d5 1692 write_unlock(&hci_dev_list_lock);
f48fd9c8 1693
33ca954d 1694 return error;
1da177e4
LT
1695}
1696EXPORT_SYMBOL(hci_register_dev);
1697
1698/* Unregister HCI device */
59735631 1699void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1700{
ef222013
MH
1701 int i;
1702
c13854ce 1703 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1704
f20d09d5 1705 write_lock(&hci_dev_list_lock);
1da177e4 1706 list_del(&hdev->list);
f20d09d5 1707 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1708
1709 hci_dev_do_close(hdev);
1710
cd4c5391 1711 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1712 kfree_skb(hdev->reassembly[i]);
1713
ab81cbf9 1714 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1715 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1716 hci_dev_lock(hdev);
744cf19e 1717 mgmt_index_removed(hdev);
09fd0de5 1718 hci_dev_unlock(hdev);
56e5cb86 1719 }
ab81cbf9 1720
2e58ef3e
JH
1721 /* mgmt_index_removed should take care of emptying the
1722 * pending list */
1723 BUG_ON(!list_empty(&hdev->mgmt_pending));
1724
1da177e4
LT
1725 hci_notify(hdev, HCI_DEV_UNREG);
1726
611b30f7
MH
1727 if (hdev->rfkill) {
1728 rfkill_unregister(hdev->rfkill);
1729 rfkill_destroy(hdev->rfkill);
1730 }
1731
ce242970 1732 hci_del_sysfs(hdev);
147e2d59 1733
db323f2f 1734 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1735
f48fd9c8
MH
1736 destroy_workqueue(hdev->workqueue);
1737
09fd0de5 1738 hci_dev_lock(hdev);
e2e0cacb 1739 hci_blacklist_clear(hdev);
2aeb9a1a 1740 hci_uuids_clear(hdev);
55ed8ca1 1741 hci_link_keys_clear(hdev);
2763eda6 1742 hci_remote_oob_data_clear(hdev);
76c8686f 1743 hci_adv_entries_clear(hdev);
09fd0de5 1744 hci_dev_unlock(hdev);
e2e0cacb 1745
dc946bd8 1746 hci_dev_put(hdev);
1da177e4
LT
1747}
1748EXPORT_SYMBOL(hci_unregister_dev);
1749
1750/* Suspend HCI device */
1751int hci_suspend_dev(struct hci_dev *hdev)
1752{
1753 hci_notify(hdev, HCI_DEV_SUSPEND);
1754 return 0;
1755}
1756EXPORT_SYMBOL(hci_suspend_dev);
1757
1758/* Resume HCI device */
1759int hci_resume_dev(struct hci_dev *hdev)
1760{
1761 hci_notify(hdev, HCI_DEV_RESUME);
1762 return 0;
1763}
1764EXPORT_SYMBOL(hci_resume_dev);
1765
76bca880
MH
1766/* Receive frame from HCI drivers */
1767int hci_recv_frame(struct sk_buff *skb)
1768{
1769 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1770 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1771 && !test_bit(HCI_INIT, &hdev->flags))) {
1772 kfree_skb(skb);
1773 return -ENXIO;
1774 }
1775
1776 /* Incomming skb */
1777 bt_cb(skb)->incoming = 1;
1778
1779 /* Time stamp */
1780 __net_timestamp(skb);
1781
76bca880 1782 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1783 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1784
76bca880
MH
1785 return 0;
1786}
1787EXPORT_SYMBOL(hci_recv_frame);
1788
33e882a5 1789static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1790 int count, __u8 index)
33e882a5
SS
1791{
1792 int len = 0;
1793 int hlen = 0;
1794 int remain = count;
1795 struct sk_buff *skb;
1796 struct bt_skb_cb *scb;
1797
1798 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1799 index >= NUM_REASSEMBLY)
1800 return -EILSEQ;
1801
1802 skb = hdev->reassembly[index];
1803
1804 if (!skb) {
1805 switch (type) {
1806 case HCI_ACLDATA_PKT:
1807 len = HCI_MAX_FRAME_SIZE;
1808 hlen = HCI_ACL_HDR_SIZE;
1809 break;
1810 case HCI_EVENT_PKT:
1811 len = HCI_MAX_EVENT_SIZE;
1812 hlen = HCI_EVENT_HDR_SIZE;
1813 break;
1814 case HCI_SCODATA_PKT:
1815 len = HCI_MAX_SCO_SIZE;
1816 hlen = HCI_SCO_HDR_SIZE;
1817 break;
1818 }
1819
1e429f38 1820 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1821 if (!skb)
1822 return -ENOMEM;
1823
1824 scb = (void *) skb->cb;
1825 scb->expect = hlen;
1826 scb->pkt_type = type;
1827
1828 skb->dev = (void *) hdev;
1829 hdev->reassembly[index] = skb;
1830 }
1831
1832 while (count) {
1833 scb = (void *) skb->cb;
1834 len = min(scb->expect, (__u16)count);
1835
1836 memcpy(skb_put(skb, len), data, len);
1837
1838 count -= len;
1839 data += len;
1840 scb->expect -= len;
1841 remain = count;
1842
1843 switch (type) {
1844 case HCI_EVENT_PKT:
1845 if (skb->len == HCI_EVENT_HDR_SIZE) {
1846 struct hci_event_hdr *h = hci_event_hdr(skb);
1847 scb->expect = h->plen;
1848
1849 if (skb_tailroom(skb) < scb->expect) {
1850 kfree_skb(skb);
1851 hdev->reassembly[index] = NULL;
1852 return -ENOMEM;
1853 }
1854 }
1855 break;
1856
1857 case HCI_ACLDATA_PKT:
1858 if (skb->len == HCI_ACL_HDR_SIZE) {
1859 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1860 scb->expect = __le16_to_cpu(h->dlen);
1861
1862 if (skb_tailroom(skb) < scb->expect) {
1863 kfree_skb(skb);
1864 hdev->reassembly[index] = NULL;
1865 return -ENOMEM;
1866 }
1867 }
1868 break;
1869
1870 case HCI_SCODATA_PKT:
1871 if (skb->len == HCI_SCO_HDR_SIZE) {
1872 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1873 scb->expect = h->dlen;
1874
1875 if (skb_tailroom(skb) < scb->expect) {
1876 kfree_skb(skb);
1877 hdev->reassembly[index] = NULL;
1878 return -ENOMEM;
1879 }
1880 }
1881 break;
1882 }
1883
1884 if (scb->expect == 0) {
1885 /* Complete frame */
1886
1887 bt_cb(skb)->pkt_type = type;
1888 hci_recv_frame(skb);
1889
1890 hdev->reassembly[index] = NULL;
1891 return remain;
1892 }
1893 }
1894
1895 return remain;
1896}
1897
ef222013
MH
1898int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1899{
f39a3c06
SS
1900 int rem = 0;
1901
ef222013
MH
1902 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1903 return -EILSEQ;
1904
da5f6c37 1905 while (count) {
1e429f38 1906 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1907 if (rem < 0)
1908 return rem;
ef222013 1909
f39a3c06
SS
1910 data += (count - rem);
1911 count = rem;
f81c6224 1912 }
ef222013 1913
f39a3c06 1914 return rem;
ef222013
MH
1915}
1916EXPORT_SYMBOL(hci_recv_fragment);
1917
99811510
SS
1918#define STREAM_REASSEMBLY 0
1919
1920int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1921{
1922 int type;
1923 int rem = 0;
1924
da5f6c37 1925 while (count) {
99811510
SS
1926 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1927
1928 if (!skb) {
1929 struct { char type; } *pkt;
1930
1931 /* Start of the frame */
1932 pkt = data;
1933 type = pkt->type;
1934
1935 data++;
1936 count--;
1937 } else
1938 type = bt_cb(skb)->pkt_type;
1939
1e429f38
GP
1940 rem = hci_reassembly(hdev, type, data, count,
1941 STREAM_REASSEMBLY);
99811510
SS
1942 if (rem < 0)
1943 return rem;
1944
1945 data += (count - rem);
1946 count = rem;
f81c6224 1947 }
99811510
SS
1948
1949 return rem;
1950}
1951EXPORT_SYMBOL(hci_recv_stream_fragment);
1952
1da177e4
LT
1953/* ---- Interface to upper protocols ---- */
1954
1da177e4
LT
1955int hci_register_cb(struct hci_cb *cb)
1956{
1957 BT_DBG("%p name %s", cb, cb->name);
1958
f20d09d5 1959 write_lock(&hci_cb_list_lock);
1da177e4 1960 list_add(&cb->list, &hci_cb_list);
f20d09d5 1961 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1962
1963 return 0;
1964}
1965EXPORT_SYMBOL(hci_register_cb);
1966
1967int hci_unregister_cb(struct hci_cb *cb)
1968{
1969 BT_DBG("%p name %s", cb, cb->name);
1970
f20d09d5 1971 write_lock(&hci_cb_list_lock);
1da177e4 1972 list_del(&cb->list);
f20d09d5 1973 write_unlock(&hci_cb_list_lock);
1da177e4
LT
1974
1975 return 0;
1976}
1977EXPORT_SYMBOL(hci_unregister_cb);
1978
1979static int hci_send_frame(struct sk_buff *skb)
1980{
1981 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1982
1983 if (!hdev) {
1984 kfree_skb(skb);
1985 return -ENODEV;
1986 }
1987
0d48d939 1988 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1989
1990 if (atomic_read(&hdev->promisc)) {
1991 /* Time stamp */
a61bbcf2 1992 __net_timestamp(skb);
1da177e4 1993
eec8d2bc 1994 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1995 }
1996
1997 /* Get rid of skb owner, prior to sending to the driver. */
1998 skb_orphan(skb);
1999
2000 return hdev->send(skb);
2001}
2002
2003/* Send HCI command */
a9de9248 2004int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2005{
2006 int len = HCI_COMMAND_HDR_SIZE + plen;
2007 struct hci_command_hdr *hdr;
2008 struct sk_buff *skb;
2009
a9de9248 2010 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2011
2012 skb = bt_skb_alloc(len, GFP_ATOMIC);
2013 if (!skb) {
ef222013 2014 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2015 return -ENOMEM;
2016 }
2017
2018 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2019 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2020 hdr->plen = plen;
2021
2022 if (plen)
2023 memcpy(skb_put(skb, plen), param, plen);
2024
2025 BT_DBG("skb len %d", skb->len);
2026
0d48d939 2027 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2028 skb->dev = (void *) hdev;
c78ae283 2029
a5040efa
JH
2030 if (test_bit(HCI_INIT, &hdev->flags))
2031 hdev->init_last_cmd = opcode;
2032
1da177e4 2033 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2034 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2035
2036 return 0;
2037}
1da177e4
LT
2038
2039/* Get data from the previously sent command */
a9de9248 2040void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2041{
2042 struct hci_command_hdr *hdr;
2043
2044 if (!hdev->sent_cmd)
2045 return NULL;
2046
2047 hdr = (void *) hdev->sent_cmd->data;
2048
a9de9248 2049 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2050 return NULL;
2051
a9de9248 2052 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2053
2054 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2055}
2056
2057/* Send ACL data */
2058static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2059{
2060 struct hci_acl_hdr *hdr;
2061 int len = skb->len;
2062
badff6d0
ACM
2063 skb_push(skb, HCI_ACL_HDR_SIZE);
2064 skb_reset_transport_header(skb);
9c70220b 2065 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2066 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2067 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2068}
2069
73d80deb
LAD
2070static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2071 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2072{
2073 struct hci_dev *hdev = conn->hdev;
2074 struct sk_buff *list;
2075
70f23020
AE
2076 list = skb_shinfo(skb)->frag_list;
2077 if (!list) {
1da177e4
LT
2078 /* Non fragmented */
2079 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2080
73d80deb 2081 skb_queue_tail(queue, skb);
1da177e4
LT
2082 } else {
2083 /* Fragmented */
2084 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2085
2086 skb_shinfo(skb)->frag_list = NULL;
2087
2088 /* Queue all fragments atomically */
af3e6359 2089 spin_lock(&queue->lock);
1da177e4 2090
73d80deb 2091 __skb_queue_tail(queue, skb);
e702112f
AE
2092
2093 flags &= ~ACL_START;
2094 flags |= ACL_CONT;
1da177e4
LT
2095 do {
2096 skb = list; list = list->next;
8e87d142 2097
1da177e4 2098 skb->dev = (void *) hdev;
0d48d939 2099 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2100 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2101
2102 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2103
73d80deb 2104 __skb_queue_tail(queue, skb);
1da177e4
LT
2105 } while (list);
2106
af3e6359 2107 spin_unlock(&queue->lock);
1da177e4 2108 }
73d80deb
LAD
2109}
2110
2111void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2112{
2113 struct hci_conn *conn = chan->conn;
2114 struct hci_dev *hdev = conn->hdev;
2115
2116 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2117
2118 skb->dev = (void *) hdev;
2119 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2120 hci_add_acl_hdr(skb, conn->handle, flags);
2121
2122 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2123
3eff45ea 2124 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2125}
2126EXPORT_SYMBOL(hci_send_acl);
2127
2128/* Send SCO data */
0d861d8b 2129void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2130{
2131 struct hci_dev *hdev = conn->hdev;
2132 struct hci_sco_hdr hdr;
2133
2134 BT_DBG("%s len %d", hdev->name, skb->len);
2135
aca3192c 2136 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2137 hdr.dlen = skb->len;
2138
badff6d0
ACM
2139 skb_push(skb, HCI_SCO_HDR_SIZE);
2140 skb_reset_transport_header(skb);
9c70220b 2141 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2142
2143 skb->dev = (void *) hdev;
0d48d939 2144 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2145
1da177e4 2146 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2147 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2148}
2149EXPORT_SYMBOL(hci_send_sco);
2150
2151/* ---- HCI TX task (outgoing data) ---- */
2152
2153/* HCI Connection scheduler */
2154static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2155{
2156 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2157 struct hci_conn *conn = NULL, *c;
1da177e4 2158 int num = 0, min = ~0;
1da177e4 2159
8e87d142 2160 /* We don't have to lock device here. Connections are always
1da177e4 2161 * added and removed with TX task disabled. */
bf4c6325
GP
2162
2163 rcu_read_lock();
2164
2165 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2166 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2167 continue;
769be974
MH
2168
2169 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2170 continue;
2171
1da177e4
LT
2172 num++;
2173
2174 if (c->sent < min) {
2175 min = c->sent;
2176 conn = c;
2177 }
52087a79
LAD
2178
2179 if (hci_conn_num(hdev, type) == num)
2180 break;
1da177e4
LT
2181 }
2182
bf4c6325
GP
2183 rcu_read_unlock();
2184
1da177e4 2185 if (conn) {
6ed58ec5
VT
2186 int cnt, q;
2187
2188 switch (conn->type) {
2189 case ACL_LINK:
2190 cnt = hdev->acl_cnt;
2191 break;
2192 case SCO_LINK:
2193 case ESCO_LINK:
2194 cnt = hdev->sco_cnt;
2195 break;
2196 case LE_LINK:
2197 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2198 break;
2199 default:
2200 cnt = 0;
2201 BT_ERR("Unknown link type");
2202 }
2203
2204 q = cnt / num;
1da177e4
LT
2205 *quote = q ? q : 1;
2206 } else
2207 *quote = 0;
2208
2209 BT_DBG("conn %p quote %d", conn, *quote);
2210 return conn;
2211}
2212
bae1f5d9 2213static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2214{
2215 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2216 struct hci_conn *c;
1da177e4 2217
bae1f5d9 2218 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2219
bf4c6325
GP
2220 rcu_read_lock();
2221
1da177e4 2222 /* Kill stalled connections */
bf4c6325 2223 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2224 if (c->type == type && c->sent) {
2225 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2226 hdev->name, batostr(&c->dst));
2227 hci_acl_disconn(c, 0x13);
2228 }
2229 }
bf4c6325
GP
2230
2231 rcu_read_unlock();
1da177e4
LT
2232}
2233
73d80deb
LAD
2234static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2235 int *quote)
1da177e4 2236{
73d80deb
LAD
2237 struct hci_conn_hash *h = &hdev->conn_hash;
2238 struct hci_chan *chan = NULL;
2239 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2240 struct hci_conn *conn;
73d80deb
LAD
2241 int cnt, q, conn_num = 0;
2242
2243 BT_DBG("%s", hdev->name);
2244
bf4c6325
GP
2245 rcu_read_lock();
2246
2247 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2248 struct hci_chan *tmp;
2249
2250 if (conn->type != type)
2251 continue;
2252
2253 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2254 continue;
2255
2256 conn_num++;
2257
8192edef 2258 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2259 struct sk_buff *skb;
2260
2261 if (skb_queue_empty(&tmp->data_q))
2262 continue;
2263
2264 skb = skb_peek(&tmp->data_q);
2265 if (skb->priority < cur_prio)
2266 continue;
2267
2268 if (skb->priority > cur_prio) {
2269 num = 0;
2270 min = ~0;
2271 cur_prio = skb->priority;
2272 }
2273
2274 num++;
2275
2276 if (conn->sent < min) {
2277 min = conn->sent;
2278 chan = tmp;
2279 }
2280 }
2281
2282 if (hci_conn_num(hdev, type) == conn_num)
2283 break;
2284 }
2285
bf4c6325
GP
2286 rcu_read_unlock();
2287
73d80deb
LAD
2288 if (!chan)
2289 return NULL;
2290
2291 switch (chan->conn->type) {
2292 case ACL_LINK:
2293 cnt = hdev->acl_cnt;
2294 break;
2295 case SCO_LINK:
2296 case ESCO_LINK:
2297 cnt = hdev->sco_cnt;
2298 break;
2299 case LE_LINK:
2300 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2301 break;
2302 default:
2303 cnt = 0;
2304 BT_ERR("Unknown link type");
2305 }
2306
2307 q = cnt / num;
2308 *quote = q ? q : 1;
2309 BT_DBG("chan %p quote %d", chan, *quote);
2310 return chan;
2311}
2312
02b20f0b
LAD
2313static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2314{
2315 struct hci_conn_hash *h = &hdev->conn_hash;
2316 struct hci_conn *conn;
2317 int num = 0;
2318
2319 BT_DBG("%s", hdev->name);
2320
bf4c6325
GP
2321 rcu_read_lock();
2322
2323 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2324 struct hci_chan *chan;
2325
2326 if (conn->type != type)
2327 continue;
2328
2329 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2330 continue;
2331
2332 num++;
2333
8192edef 2334 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2335 struct sk_buff *skb;
2336
2337 if (chan->sent) {
2338 chan->sent = 0;
2339 continue;
2340 }
2341
2342 if (skb_queue_empty(&chan->data_q))
2343 continue;
2344
2345 skb = skb_peek(&chan->data_q);
2346 if (skb->priority >= HCI_PRIO_MAX - 1)
2347 continue;
2348
2349 skb->priority = HCI_PRIO_MAX - 1;
2350
2351 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2352 skb->priority);
2353 }
2354
2355 if (hci_conn_num(hdev, type) == num)
2356 break;
2357 }
bf4c6325
GP
2358
2359 rcu_read_unlock();
2360
02b20f0b
LAD
2361}
2362
73d80deb
LAD
2363static inline void hci_sched_acl(struct hci_dev *hdev)
2364{
2365 struct hci_chan *chan;
1da177e4
LT
2366 struct sk_buff *skb;
2367 int quote;
73d80deb 2368 unsigned int cnt;
1da177e4
LT
2369
2370 BT_DBG("%s", hdev->name);
2371
52087a79
LAD
2372 if (!hci_conn_num(hdev, ACL_LINK))
2373 return;
2374
1da177e4
LT
2375 if (!test_bit(HCI_RAW, &hdev->flags)) {
2376 /* ACL tx timeout must be longer than maximum
2377 * link supervision timeout (40.9 seconds) */
cc48dc0a
AE
2378 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx +
2379 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2380 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2381 }
2382
73d80deb 2383 cnt = hdev->acl_cnt;
04837f64 2384
73d80deb
LAD
2385 while (hdev->acl_cnt &&
2386 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2387 u32 priority = (skb_peek(&chan->data_q))->priority;
2388 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2389 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2390 skb->len, skb->priority);
2391
ec1cce24
LAD
2392 /* Stop if priority has changed */
2393 if (skb->priority < priority)
2394 break;
2395
2396 skb = skb_dequeue(&chan->data_q);
2397
73d80deb
LAD
2398 hci_conn_enter_active_mode(chan->conn,
2399 bt_cb(skb)->force_active);
04837f64 2400
1da177e4
LT
2401 hci_send_frame(skb);
2402 hdev->acl_last_tx = jiffies;
2403
2404 hdev->acl_cnt--;
73d80deb
LAD
2405 chan->sent++;
2406 chan->conn->sent++;
1da177e4
LT
2407 }
2408 }
02b20f0b
LAD
2409
2410 if (cnt != hdev->acl_cnt)
2411 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2412}
2413
2414/* Schedule SCO */
2415static inline void hci_sched_sco(struct hci_dev *hdev)
2416{
2417 struct hci_conn *conn;
2418 struct sk_buff *skb;
2419 int quote;
2420
2421 BT_DBG("%s", hdev->name);
2422
52087a79
LAD
2423 if (!hci_conn_num(hdev, SCO_LINK))
2424 return;
2425
1da177e4
LT
2426 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2427 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2428 BT_DBG("skb %p len %d", skb, skb->len);
2429 hci_send_frame(skb);
2430
2431 conn->sent++;
2432 if (conn->sent == ~0)
2433 conn->sent = 0;
2434 }
2435 }
2436}
2437
b6a0dc82
MH
2438static inline void hci_sched_esco(struct hci_dev *hdev)
2439{
2440 struct hci_conn *conn;
2441 struct sk_buff *skb;
2442 int quote;
2443
2444 BT_DBG("%s", hdev->name);
2445
52087a79
LAD
2446 if (!hci_conn_num(hdev, ESCO_LINK))
2447 return;
2448
b6a0dc82
MH
2449 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2450 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2451 BT_DBG("skb %p len %d", skb, skb->len);
2452 hci_send_frame(skb);
2453
2454 conn->sent++;
2455 if (conn->sent == ~0)
2456 conn->sent = 0;
2457 }
2458 }
2459}
2460
6ed58ec5
VT
2461static inline void hci_sched_le(struct hci_dev *hdev)
2462{
73d80deb 2463 struct hci_chan *chan;
6ed58ec5 2464 struct sk_buff *skb;
02b20f0b 2465 int quote, cnt, tmp;
6ed58ec5
VT
2466
2467 BT_DBG("%s", hdev->name);
2468
52087a79
LAD
2469 if (!hci_conn_num(hdev, LE_LINK))
2470 return;
2471
6ed58ec5
VT
2472 if (!test_bit(HCI_RAW, &hdev->flags)) {
2473 /* LE tx timeout must be longer than maximum
2474 * link supervision timeout (40.9 seconds) */
bae1f5d9 2475 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2476 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2477 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2478 }
2479
2480 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2481 tmp = cnt;
73d80deb 2482 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2483 u32 priority = (skb_peek(&chan->data_q))->priority;
2484 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2485 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2486 skb->len, skb->priority);
6ed58ec5 2487
ec1cce24
LAD
2488 /* Stop if priority has changed */
2489 if (skb->priority < priority)
2490 break;
2491
2492 skb = skb_dequeue(&chan->data_q);
2493
6ed58ec5
VT
2494 hci_send_frame(skb);
2495 hdev->le_last_tx = jiffies;
2496
2497 cnt--;
73d80deb
LAD
2498 chan->sent++;
2499 chan->conn->sent++;
6ed58ec5
VT
2500 }
2501 }
73d80deb 2502
6ed58ec5
VT
2503 if (hdev->le_pkts)
2504 hdev->le_cnt = cnt;
2505 else
2506 hdev->acl_cnt = cnt;
02b20f0b
LAD
2507
2508 if (cnt != tmp)
2509 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2510}
2511
3eff45ea 2512static void hci_tx_work(struct work_struct *work)
1da177e4 2513{
3eff45ea 2514 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2515 struct sk_buff *skb;
2516
6ed58ec5
VT
2517 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2518 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2519
2520 /* Schedule queues and send stuff to HCI driver */
2521
2522 hci_sched_acl(hdev);
2523
2524 hci_sched_sco(hdev);
2525
b6a0dc82
MH
2526 hci_sched_esco(hdev);
2527
6ed58ec5
VT
2528 hci_sched_le(hdev);
2529
1da177e4
LT
2530 /* Send next queued raw (unknown type) packet */
2531 while ((skb = skb_dequeue(&hdev->raw_q)))
2532 hci_send_frame(skb);
1da177e4
LT
2533}
2534
25985edc 2535/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2536
2537/* ACL data packet */
2538static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2539{
2540 struct hci_acl_hdr *hdr = (void *) skb->data;
2541 struct hci_conn *conn;
2542 __u16 handle, flags;
2543
2544 skb_pull(skb, HCI_ACL_HDR_SIZE);
2545
2546 handle = __le16_to_cpu(hdr->handle);
2547 flags = hci_flags(handle);
2548 handle = hci_handle(handle);
2549
2550 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2551
2552 hdev->stat.acl_rx++;
2553
2554 hci_dev_lock(hdev);
2555 conn = hci_conn_hash_lookup_handle(hdev, handle);
2556 hci_dev_unlock(hdev);
8e87d142 2557
1da177e4 2558 if (conn) {
65983fc7 2559 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2560
1da177e4 2561 /* Send to upper protocol */
686ebf28
UF
2562 l2cap_recv_acldata(conn, skb, flags);
2563 return;
1da177e4 2564 } else {
8e87d142 2565 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2566 hdev->name, handle);
2567 }
2568
2569 kfree_skb(skb);
2570}
2571
2572/* SCO data packet */
2573static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2574{
2575 struct hci_sco_hdr *hdr = (void *) skb->data;
2576 struct hci_conn *conn;
2577 __u16 handle;
2578
2579 skb_pull(skb, HCI_SCO_HDR_SIZE);
2580
2581 handle = __le16_to_cpu(hdr->handle);
2582
2583 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2584
2585 hdev->stat.sco_rx++;
2586
2587 hci_dev_lock(hdev);
2588 conn = hci_conn_hash_lookup_handle(hdev, handle);
2589 hci_dev_unlock(hdev);
2590
2591 if (conn) {
1da177e4 2592 /* Send to upper protocol */
686ebf28
UF
2593 sco_recv_scodata(conn, skb);
2594 return;
1da177e4 2595 } else {
8e87d142 2596 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2597 hdev->name, handle);
2598 }
2599
2600 kfree_skb(skb);
2601}
2602
b78752cc 2603static void hci_rx_work(struct work_struct *work)
1da177e4 2604{
b78752cc 2605 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2606 struct sk_buff *skb;
2607
2608 BT_DBG("%s", hdev->name);
2609
1da177e4
LT
2610 while ((skb = skb_dequeue(&hdev->rx_q))) {
2611 if (atomic_read(&hdev->promisc)) {
2612 /* Send copy to the sockets */
eec8d2bc 2613 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2614 }
2615
2616 if (test_bit(HCI_RAW, &hdev->flags)) {
2617 kfree_skb(skb);
2618 continue;
2619 }
2620
2621 if (test_bit(HCI_INIT, &hdev->flags)) {
2622 /* Don't process data packets in this states. */
0d48d939 2623 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2624 case HCI_ACLDATA_PKT:
2625 case HCI_SCODATA_PKT:
2626 kfree_skb(skb);
2627 continue;
3ff50b79 2628 }
1da177e4
LT
2629 }
2630
2631 /* Process frame */
0d48d939 2632 switch (bt_cb(skb)->pkt_type) {
1da177e4 2633 case HCI_EVENT_PKT:
b78752cc 2634 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2635 hci_event_packet(hdev, skb);
2636 break;
2637
2638 case HCI_ACLDATA_PKT:
2639 BT_DBG("%s ACL data packet", hdev->name);
2640 hci_acldata_packet(hdev, skb);
2641 break;
2642
2643 case HCI_SCODATA_PKT:
2644 BT_DBG("%s SCO data packet", hdev->name);
2645 hci_scodata_packet(hdev, skb);
2646 break;
2647
2648 default:
2649 kfree_skb(skb);
2650 break;
2651 }
2652 }
1da177e4
LT
2653}
2654
c347b765 2655static void hci_cmd_work(struct work_struct *work)
1da177e4 2656{
c347b765 2657 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2658 struct sk_buff *skb;
2659
2660 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2661
1da177e4 2662 /* Send queued commands */
5a08ecce
AE
2663 if (atomic_read(&hdev->cmd_cnt)) {
2664 skb = skb_dequeue(&hdev->cmd_q);
2665 if (!skb)
2666 return;
2667
7585b97a 2668 kfree_skb(hdev->sent_cmd);
1da177e4 2669
70f23020
AE
2670 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2671 if (hdev->sent_cmd) {
1da177e4
LT
2672 atomic_dec(&hdev->cmd_cnt);
2673 hci_send_frame(skb);
7bdb8a5c
SJ
2674 if (test_bit(HCI_RESET, &hdev->flags))
2675 del_timer(&hdev->cmd_timer);
2676 else
2677 mod_timer(&hdev->cmd_timer,
6bd32326 2678 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2679 } else {
2680 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2681 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2682 }
2683 }
2684}
2519a1fc
AG
2685
2686int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2687{
2688 /* General inquiry access code (GIAC) */
2689 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2690 struct hci_cp_inquiry cp;
2691
2692 BT_DBG("%s", hdev->name);
2693
2694 if (test_bit(HCI_INQUIRY, &hdev->flags))
2695 return -EINPROGRESS;
2696
4663262c
JH
2697 inquiry_cache_flush(hdev);
2698
2519a1fc
AG
2699 memset(&cp, 0, sizeof(cp));
2700 memcpy(&cp.lap, lap, sizeof(cp.lap));
2701 cp.length = length;
2702
2703 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2704}
023d5049
AG
2705
2706int hci_cancel_inquiry(struct hci_dev *hdev)
2707{
2708 BT_DBG("%s", hdev->name);
2709
2710 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2711 return -EPERM;
2712
2713 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2714}
7784d78f
AE
2715
2716module_param(enable_hs, bool, 0644);
2717MODULE_PARM_DESC(enable_hs, "Enable High Speed");