]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: mgmt: Fix missing short_name in read_info
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4 42#include <linux/interrupt.h>
611b30f7 43#include <linux/rfkill.h>
6bd32326 44#include <linux/timer.h>
3a0259bb 45#include <linux/crypto.h>
1da177e4
LT
46#include <net/sock.h>
47
48#include <asm/system.h>
70f23020 49#include <linux/uaccess.h>
1da177e4
LT
50#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
ab81cbf9
JH
55#define AUTO_OFF_TIMEOUT 2000
56
b78752cc 57static void hci_rx_work(struct work_struct *work);
c347b765 58static void hci_cmd_work(struct work_struct *work);
3eff45ea 59static void hci_tx_work(struct work_struct *work);
1da177e4 60
1da177e4
LT
61/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
1da177e4
LT
69/* ---- HCI notifications ---- */
70
6516455d 71static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 72{
040030ef 73 hci_sock_dev_event(hdev, event);
1da177e4
LT
74}
75
76/* ---- HCI requests ---- */
77
23bb5763 78void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 79{
23bb5763
JH
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
a5040efa
JH
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 86 return;
1da177e4
LT
87
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
92 }
93}
94
95static void hci_req_cancel(struct hci_dev *hdev, int err)
96{
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
98
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
106/* Execute request and wait for completion. */
8e87d142 107static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 108 unsigned long opt, __u32 timeout)
1da177e4
LT
109{
110 DECLARE_WAITQUEUE(wait, current);
111 int err = 0;
112
113 BT_DBG("%s start", hdev->name);
114
115 hdev->req_status = HCI_REQ_PEND;
116
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
119
120 req(hdev, opt);
121 schedule_timeout(timeout);
122
123 remove_wait_queue(&hdev->req_wait_q, &wait);
124
125 if (signal_pending(current))
126 return -EINTR;
127
128 switch (hdev->req_status) {
129 case HCI_REQ_DONE:
e175072f 130 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
131 break;
132
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
135 break;
136
137 default:
138 err = -ETIMEDOUT;
139 break;
3ff50b79 140 }
1da177e4 141
a5040efa 142 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
143
144 BT_DBG("%s end: err %d", hdev->name, err);
145
146 return err;
147}
148
149static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 150 unsigned long opt, __u32 timeout)
1da177e4
LT
151{
152 int ret;
153
7c6a329e
MH
154 if (!test_bit(HCI_UP, &hdev->flags))
155 return -ENETDOWN;
156
1da177e4
LT
157 /* Serialize all requests */
158 hci_req_lock(hdev);
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
161
162 return ret;
163}
164
165static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
166{
167 BT_DBG("%s %ld", hdev->name, opt);
168
169 /* Reset device */
f630cf0d 170 set_bit(HCI_RESET, &hdev->flags);
a9de9248 171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
172}
173
e61ef499 174static void bredr_init(struct hci_dev *hdev)
1da177e4 175{
b0916ea0 176 struct hci_cp_delete_stored_link_key cp;
1ebb9252 177 __le16 param;
89f2783d 178 __u8 flt_type;
1da177e4 179
2455a3ea
AE
180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
181
1da177e4
LT
182 /* Mandatory initialization */
183
184 /* Reset */
f630cf0d 185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 188 }
1da177e4
LT
189
190 /* Read Local Supported Features */
a9de9248 191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 192
1143e5a6 193 /* Read Local Version */
a9de9248 194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 195
1da177e4 196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 198
1da177e4 199 /* Read BD Address */
a9de9248
MH
200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
201
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
204
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
207
208 /* Read Voice Setting */
a9de9248 209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
210
211 /* Optional initialization */
212
213 /* Clear Event Filters */
89f2783d 214 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 216
1da177e4 217 /* Connection accept timeout ~20 secs */
aca3192c 218 param = cpu_to_le16(0x7d00);
a9de9248 219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
220
221 bacpy(&cp.bdaddr, BDADDR_ANY);
222 cp.delete_all = 1;
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
224}
225
e61ef499
AE
226static void amp_init(struct hci_dev *hdev)
227{
2455a3ea
AE
228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
229
e61ef499
AE
230 /* Reset */
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
232
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
235}
236
237static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
238{
239 struct sk_buff *skb;
240
241 BT_DBG("%s %ld", hdev->name, opt);
242
243 /* Driver initialization */
244
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
249
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
252 }
253 skb_queue_purge(&hdev->driver_init);
254
255 switch (hdev->dev_type) {
256 case HCI_BREDR:
257 bredr_init(hdev);
258 break;
259
260 case HCI_AMP:
261 amp_init(hdev);
262 break;
263
264 default:
265 BT_ERR("Unknown device type %d", hdev->dev_type);
266 break;
267 }
268
269}
270
6ed58ec5
VT
271static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272{
273 BT_DBG("%s", hdev->name);
274
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277}
278
1da177e4
LT
279static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __u8 scan = opt;
282
283 BT_DBG("%s %x", hdev->name, scan);
284
285 /* Inquiry and Page scans */
a9de9248 286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
287}
288
289static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 auth = opt;
292
293 BT_DBG("%s %x", hdev->name, auth);
294
295 /* Authentication */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
297}
298
299static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 encrypt = opt;
302
303 BT_DBG("%s %x", hdev->name, encrypt);
304
e4e8e37c 305 /* Encryption */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
307}
308
e4e8e37c
MH
309static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __le16 policy = cpu_to_le16(opt);
312
a418b893 313 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
314
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317}
318
8e87d142 319/* Get HCI device by index.
1da177e4
LT
320 * Device is held on return. */
321struct hci_dev *hci_dev_get(int index)
322{
8035ded4 323 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
324
325 BT_DBG("%d", index);
326
327 if (index < 0)
328 return NULL;
329
330 read_lock(&hci_dev_list_lock);
8035ded4 331 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
1da177e4
LT
340
341/* ---- Inquiry support ---- */
ff9ef578 342
30dc78e1
JH
343bool hci_discovery_active(struct hci_dev *hdev)
344{
345 struct discovery_state *discov = &hdev->discovery;
346
6fbe195d 347 switch (discov->state) {
343f935b 348 case DISCOVERY_FINDING:
6fbe195d 349 case DISCOVERY_RESOLVING:
30dc78e1
JH
350 return true;
351
6fbe195d
AG
352 default:
353 return false;
354 }
30dc78e1
JH
355}
356
ff9ef578
JH
357void hci_discovery_set_state(struct hci_dev *hdev, int state)
358{
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
360
361 if (hdev->discovery.state == state)
362 return;
363
364 switch (state) {
365 case DISCOVERY_STOPPED:
7b99b659
AG
366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
f963e8e9 368 hdev->discovery.type = 0;
ff9ef578
JH
369 break;
370 case DISCOVERY_STARTING:
371 break;
343f935b 372 case DISCOVERY_FINDING:
ff9ef578
JH
373 mgmt_discovering(hdev, 1);
374 break;
30dc78e1
JH
375 case DISCOVERY_RESOLVING:
376 break;
ff9ef578
JH
377 case DISCOVERY_STOPPING:
378 break;
379 }
380
381 hdev->discovery.state = state;
382}
383
1da177e4
LT
384static void inquiry_cache_flush(struct hci_dev *hdev)
385{
30883512 386 struct discovery_state *cache = &hdev->discovery;
b57c1a56 387 struct inquiry_entry *p, *n;
1da177e4 388
561aafbc
JH
389 list_for_each_entry_safe(p, n, &cache->all, all) {
390 list_del(&p->all);
b57c1a56 391 kfree(p);
1da177e4 392 }
561aafbc
JH
393
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
ff9ef578 396 cache->state = DISCOVERY_STOPPED;
1da177e4
LT
397}
398
399struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
400{
30883512 401 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
402 struct inquiry_entry *e;
403
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
405
561aafbc
JH
406 list_for_each_entry(e, &cache->all, all) {
407 if (!bacmp(&e->data.bdaddr, bdaddr))
408 return e;
409 }
410
411 return NULL;
412}
413
414struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
415 bdaddr_t *bdaddr)
416{
30883512 417 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 423 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
424 return e;
425 }
426
427 return NULL;
1da177e4
LT
428}
429
30dc78e1
JH
430struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
431 bdaddr_t *bdaddr,
432 int state)
433{
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
438
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
441 return e;
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
a3d4e20a
JH
449void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
455
456 list_del(&ie->list);
457
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
461 break;
462 pos = &p->list;
463 }
464
465 list_add(&ie->list, pos);
466}
467
3175405b 468bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
561aafbc 469 bool name_known)
1da177e4 470{
30883512 471 struct discovery_state *cache = &hdev->discovery;
70f23020 472 struct inquiry_entry *ie;
1da177e4
LT
473
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
475
70f23020 476 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a
JH
477 if (ie) {
478 if (ie->name_state == NAME_NEEDED &&
479 data->rssi != ie->data.rssi) {
480 ie->data.rssi = data->rssi;
481 hci_inquiry_cache_update_resolve(hdev, ie);
482 }
483
561aafbc 484 goto update;
a3d4e20a 485 }
561aafbc
JH
486
487 /* Entry not in the cache. Add new one. */
488 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
489 if (!ie)
3175405b 490 return false;
561aafbc
JH
491
492 list_add(&ie->all, &cache->all);
493
494 if (name_known) {
495 ie->name_state = NAME_KNOWN;
496 } else {
497 ie->name_state = NAME_NOT_KNOWN;
498 list_add(&ie->list, &cache->unknown);
499 }
70f23020 500
561aafbc
JH
501update:
502 if (name_known && ie->name_state != NAME_KNOWN &&
503 ie->name_state != NAME_PENDING) {
504 ie->name_state = NAME_KNOWN;
505 list_del(&ie->list);
1da177e4
LT
506 }
507
70f23020
AE
508 memcpy(&ie->data, data, sizeof(*data));
509 ie->timestamp = jiffies;
1da177e4 510 cache->timestamp = jiffies;
3175405b
JH
511
512 if (ie->name_state == NAME_NOT_KNOWN)
513 return false;
514
515 return true;
1da177e4
LT
516}
517
518static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
519{
30883512 520 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
521 struct inquiry_info *info = (struct inquiry_info *) buf;
522 struct inquiry_entry *e;
523 int copied = 0;
524
561aafbc 525 list_for_each_entry(e, &cache->all, all) {
1da177e4 526 struct inquiry_data *data = &e->data;
b57c1a56
JH
527
528 if (copied >= num)
529 break;
530
1da177e4
LT
531 bacpy(&info->bdaddr, &data->bdaddr);
532 info->pscan_rep_mode = data->pscan_rep_mode;
533 info->pscan_period_mode = data->pscan_period_mode;
534 info->pscan_mode = data->pscan_mode;
535 memcpy(info->dev_class, data->dev_class, 3);
536 info->clock_offset = data->clock_offset;
b57c1a56 537
1da177e4 538 info++;
b57c1a56 539 copied++;
1da177e4
LT
540 }
541
542 BT_DBG("cache %p, copied %d", cache, copied);
543 return copied;
544}
545
546static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
547{
548 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
549 struct hci_cp_inquiry cp;
550
551 BT_DBG("%s", hdev->name);
552
553 if (test_bit(HCI_INQUIRY, &hdev->flags))
554 return;
555
556 /* Start Inquiry */
557 memcpy(&cp.lap, &ir->lap, 3);
558 cp.length = ir->length;
559 cp.num_rsp = ir->num_rsp;
a9de9248 560 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
561}
562
563int hci_inquiry(void __user *arg)
564{
565 __u8 __user *ptr = arg;
566 struct hci_inquiry_req ir;
567 struct hci_dev *hdev;
568 int err = 0, do_inquiry = 0, max_rsp;
569 long timeo;
570 __u8 *buf;
571
572 if (copy_from_user(&ir, ptr, sizeof(ir)))
573 return -EFAULT;
574
5a08ecce
AE
575 hdev = hci_dev_get(ir.dev_id);
576 if (!hdev)
1da177e4
LT
577 return -ENODEV;
578
09fd0de5 579 hci_dev_lock(hdev);
8e87d142 580 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
581 inquiry_cache_empty(hdev) ||
582 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
583 inquiry_cache_flush(hdev);
584 do_inquiry = 1;
585 }
09fd0de5 586 hci_dev_unlock(hdev);
1da177e4 587
04837f64 588 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
589
590 if (do_inquiry) {
591 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
592 if (err < 0)
593 goto done;
594 }
1da177e4
LT
595
596 /* for unlimited number of responses we will use buffer with 255 entries */
597 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
598
599 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
600 * copy it to the user space.
601 */
01df8c31 602 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 603 if (!buf) {
1da177e4
LT
604 err = -ENOMEM;
605 goto done;
606 }
607
09fd0de5 608 hci_dev_lock(hdev);
1da177e4 609 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 610 hci_dev_unlock(hdev);
1da177e4
LT
611
612 BT_DBG("num_rsp %d", ir.num_rsp);
613
614 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
615 ptr += sizeof(ir);
616 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
617 ir.num_rsp))
618 err = -EFAULT;
8e87d142 619 } else
1da177e4
LT
620 err = -EFAULT;
621
622 kfree(buf);
623
624done:
625 hci_dev_put(hdev);
626 return err;
627}
628
629/* ---- HCI ioctl helpers ---- */
630
631int hci_dev_open(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int ret = 0;
635
5a08ecce
AE
636 hdev = hci_dev_get(dev);
637 if (!hdev)
1da177e4
LT
638 return -ENODEV;
639
640 BT_DBG("%s %p", hdev->name, hdev);
641
642 hci_req_lock(hdev);
643
611b30f7
MH
644 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
645 ret = -ERFKILL;
646 goto done;
647 }
648
1da177e4
LT
649 if (test_bit(HCI_UP, &hdev->flags)) {
650 ret = -EALREADY;
651 goto done;
652 }
653
654 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
655 set_bit(HCI_RAW, &hdev->flags);
656
07e3b94a
AE
657 /* Treat all non BR/EDR controllers as raw devices if
658 enable_hs is not set */
659 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
660 set_bit(HCI_RAW, &hdev->flags);
661
1da177e4
LT
662 if (hdev->open(hdev)) {
663 ret = -EIO;
664 goto done;
665 }
666
667 if (!test_bit(HCI_RAW, &hdev->flags)) {
668 atomic_set(&hdev->cmd_cnt, 1);
669 set_bit(HCI_INIT, &hdev->flags);
a5040efa 670 hdev->init_last_cmd = 0;
1da177e4 671
04837f64
MH
672 ret = __hci_request(hdev, hci_init_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 674
eead27da 675 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
676 ret = __hci_request(hdev, hci_le_init_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
678
1da177e4
LT
679 clear_bit(HCI_INIT, &hdev->flags);
680 }
681
682 if (!ret) {
683 hci_dev_hold(hdev);
684 set_bit(HCI_UP, &hdev->flags);
685 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 686 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 687 hci_dev_lock(hdev);
744cf19e 688 mgmt_powered(hdev, 1);
09fd0de5 689 hci_dev_unlock(hdev);
56e5cb86 690 }
8e87d142 691 } else {
1da177e4 692 /* Init failed, cleanup */
3eff45ea 693 flush_work(&hdev->tx_work);
c347b765 694 flush_work(&hdev->cmd_work);
b78752cc 695 flush_work(&hdev->rx_work);
1da177e4
LT
696
697 skb_queue_purge(&hdev->cmd_q);
698 skb_queue_purge(&hdev->rx_q);
699
700 if (hdev->flush)
701 hdev->flush(hdev);
702
703 if (hdev->sent_cmd) {
704 kfree_skb(hdev->sent_cmd);
705 hdev->sent_cmd = NULL;
706 }
707
708 hdev->close(hdev);
709 hdev->flags = 0;
710 }
711
712done:
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718static int hci_dev_do_close(struct hci_dev *hdev)
719{
720 BT_DBG("%s %p", hdev->name, hdev);
721
28b75a89
AG
722 cancel_work_sync(&hdev->le_scan);
723
1da177e4
LT
724 hci_req_cancel(hdev, ENODEV);
725 hci_req_lock(hdev);
726
727 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 728 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
729 hci_req_unlock(hdev);
730 return 0;
731 }
732
3eff45ea
GP
733 /* Flush RX and TX works */
734 flush_work(&hdev->tx_work);
b78752cc 735 flush_work(&hdev->rx_work);
1da177e4 736
16ab91ab 737 if (hdev->discov_timeout > 0) {
e0f9309f 738 cancel_delayed_work(&hdev->discov_off);
16ab91ab 739 hdev->discov_timeout = 0;
5e5282bb 740 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
741 }
742
a8b2d5c2 743 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
744 cancel_delayed_work(&hdev->service_cache);
745
7ba8b4be
AG
746 cancel_delayed_work_sync(&hdev->le_scan_disable);
747
09fd0de5 748 hci_dev_lock(hdev);
1da177e4
LT
749 inquiry_cache_flush(hdev);
750 hci_conn_hash_flush(hdev);
09fd0de5 751 hci_dev_unlock(hdev);
1da177e4
LT
752
753 hci_notify(hdev, HCI_DEV_DOWN);
754
755 if (hdev->flush)
756 hdev->flush(hdev);
757
758 /* Reset device */
759 skb_queue_purge(&hdev->cmd_q);
760 atomic_set(&hdev->cmd_cnt, 1);
8af59467
JH
761 if (!test_bit(HCI_RAW, &hdev->flags) &&
762 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
1da177e4 763 set_bit(HCI_INIT, &hdev->flags);
04837f64 764 __hci_request(hdev, hci_reset_req, 0,
cad44c2b 765 msecs_to_jiffies(250));
1da177e4
LT
766 clear_bit(HCI_INIT, &hdev->flags);
767 }
768
c347b765
GP
769 /* flush cmd work */
770 flush_work(&hdev->cmd_work);
1da177e4
LT
771
772 /* Drop queues */
773 skb_queue_purge(&hdev->rx_q);
774 skb_queue_purge(&hdev->cmd_q);
775 skb_queue_purge(&hdev->raw_q);
776
777 /* Drop last sent command */
778 if (hdev->sent_cmd) {
b79f44c1 779 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
780 kfree_skb(hdev->sent_cmd);
781 hdev->sent_cmd = NULL;
782 }
783
784 /* After this point our queues are empty
785 * and no tasks are scheduled. */
786 hdev->close(hdev);
787
8ee56540
MH
788 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
789 hci_dev_lock(hdev);
790 mgmt_powered(hdev, 0);
791 hci_dev_unlock(hdev);
792 }
5add6af8 793
1da177e4
LT
794 /* Clear flags */
795 hdev->flags = 0;
796
e59fda8d
JH
797 memset(hdev->eir, 0, sizeof(hdev->eir));
798
1da177e4
LT
799 hci_req_unlock(hdev);
800
801 hci_dev_put(hdev);
802 return 0;
803}
804
805int hci_dev_close(__u16 dev)
806{
807 struct hci_dev *hdev;
808 int err;
809
70f23020
AE
810 hdev = hci_dev_get(dev);
811 if (!hdev)
1da177e4 812 return -ENODEV;
8ee56540
MH
813
814 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
815 cancel_delayed_work(&hdev->power_off);
816
1da177e4 817 err = hci_dev_do_close(hdev);
8ee56540 818
1da177e4
LT
819 hci_dev_put(hdev);
820 return err;
821}
822
823int hci_dev_reset(__u16 dev)
824{
825 struct hci_dev *hdev;
826 int ret = 0;
827
70f23020
AE
828 hdev = hci_dev_get(dev);
829 if (!hdev)
1da177e4
LT
830 return -ENODEV;
831
832 hci_req_lock(hdev);
1da177e4
LT
833
834 if (!test_bit(HCI_UP, &hdev->flags))
835 goto done;
836
837 /* Drop queues */
838 skb_queue_purge(&hdev->rx_q);
839 skb_queue_purge(&hdev->cmd_q);
840
09fd0de5 841 hci_dev_lock(hdev);
1da177e4
LT
842 inquiry_cache_flush(hdev);
843 hci_conn_hash_flush(hdev);
09fd0de5 844 hci_dev_unlock(hdev);
1da177e4
LT
845
846 if (hdev->flush)
847 hdev->flush(hdev);
848
8e87d142 849 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 850 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
851
852 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
853 ret = __hci_request(hdev, hci_reset_req, 0,
854 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
855
856done:
1da177e4
LT
857 hci_req_unlock(hdev);
858 hci_dev_put(hdev);
859 return ret;
860}
861
862int hci_dev_reset_stat(__u16 dev)
863{
864 struct hci_dev *hdev;
865 int ret = 0;
866
70f23020
AE
867 hdev = hci_dev_get(dev);
868 if (!hdev)
1da177e4
LT
869 return -ENODEV;
870
871 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
872
873 hci_dev_put(hdev);
874
875 return ret;
876}
877
878int hci_dev_cmd(unsigned int cmd, void __user *arg)
879{
880 struct hci_dev *hdev;
881 struct hci_dev_req dr;
882 int err = 0;
883
884 if (copy_from_user(&dr, arg, sizeof(dr)))
885 return -EFAULT;
886
70f23020
AE
887 hdev = hci_dev_get(dr.dev_id);
888 if (!hdev)
1da177e4
LT
889 return -ENODEV;
890
891 switch (cmd) {
892 case HCISETAUTH:
04837f64
MH
893 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
894 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
895 break;
896
897 case HCISETENCRYPT:
898 if (!lmp_encrypt_capable(hdev)) {
899 err = -EOPNOTSUPP;
900 break;
901 }
902
903 if (!test_bit(HCI_AUTH, &hdev->flags)) {
904 /* Auth must be enabled first */
04837f64
MH
905 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
906 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
907 if (err)
908 break;
909 }
910
04837f64
MH
911 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
912 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
913 break;
914
915 case HCISETSCAN:
04837f64
MH
916 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
917 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
918 break;
919
1da177e4 920 case HCISETLINKPOL:
e4e8e37c
MH
921 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
923 break;
924
925 case HCISETLINKMODE:
e4e8e37c
MH
926 hdev->link_mode = ((__u16) dr.dev_opt) &
927 (HCI_LM_MASTER | HCI_LM_ACCEPT);
928 break;
929
930 case HCISETPTYPE:
931 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
932 break;
933
934 case HCISETACLMTU:
e4e8e37c
MH
935 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
936 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
937 break;
938
939 case HCISETSCOMTU:
e4e8e37c
MH
940 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
941 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
942 break;
943
944 default:
945 err = -EINVAL;
946 break;
947 }
e4e8e37c 948
1da177e4
LT
949 hci_dev_put(hdev);
950 return err;
951}
952
953int hci_get_dev_list(void __user *arg)
954{
8035ded4 955 struct hci_dev *hdev;
1da177e4
LT
956 struct hci_dev_list_req *dl;
957 struct hci_dev_req *dr;
1da177e4
LT
958 int n = 0, size, err;
959 __u16 dev_num;
960
961 if (get_user(dev_num, (__u16 __user *) arg))
962 return -EFAULT;
963
964 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
965 return -EINVAL;
966
967 size = sizeof(*dl) + dev_num * sizeof(*dr);
968
70f23020
AE
969 dl = kzalloc(size, GFP_KERNEL);
970 if (!dl)
1da177e4
LT
971 return -ENOMEM;
972
973 dr = dl->dev_req;
974
f20d09d5 975 read_lock(&hci_dev_list_lock);
8035ded4 976 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 977 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 978 cancel_delayed_work(&hdev->power_off);
c542a06c 979
a8b2d5c2
JH
980 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
981 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 982
1da177e4
LT
983 (dr + n)->dev_id = hdev->id;
984 (dr + n)->dev_opt = hdev->flags;
c542a06c 985
1da177e4
LT
986 if (++n >= dev_num)
987 break;
988 }
f20d09d5 989 read_unlock(&hci_dev_list_lock);
1da177e4
LT
990
991 dl->dev_num = n;
992 size = sizeof(*dl) + n * sizeof(*dr);
993
994 err = copy_to_user(arg, dl, size);
995 kfree(dl);
996
997 return err ? -EFAULT : 0;
998}
999
1000int hci_get_dev_info(void __user *arg)
1001{
1002 struct hci_dev *hdev;
1003 struct hci_dev_info di;
1004 int err = 0;
1005
1006 if (copy_from_user(&di, arg, sizeof(di)))
1007 return -EFAULT;
1008
70f23020
AE
1009 hdev = hci_dev_get(di.dev_id);
1010 if (!hdev)
1da177e4
LT
1011 return -ENODEV;
1012
a8b2d5c2 1013 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1014 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1015
a8b2d5c2
JH
1016 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1017 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1018
1da177e4
LT
1019 strcpy(di.name, hdev->name);
1020 di.bdaddr = hdev->bdaddr;
943da25d 1021 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1022 di.flags = hdev->flags;
1023 di.pkt_type = hdev->pkt_type;
1024 di.acl_mtu = hdev->acl_mtu;
1025 di.acl_pkts = hdev->acl_pkts;
1026 di.sco_mtu = hdev->sco_mtu;
1027 di.sco_pkts = hdev->sco_pkts;
1028 di.link_policy = hdev->link_policy;
1029 di.link_mode = hdev->link_mode;
1030
1031 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1032 memcpy(&di.features, &hdev->features, sizeof(di.features));
1033
1034 if (copy_to_user(arg, &di, sizeof(di)))
1035 err = -EFAULT;
1036
1037 hci_dev_put(hdev);
1038
1039 return err;
1040}
1041
1042/* ---- Interface to HCI drivers ---- */
1043
611b30f7
MH
1044static int hci_rfkill_set_block(void *data, bool blocked)
1045{
1046 struct hci_dev *hdev = data;
1047
1048 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1049
1050 if (!blocked)
1051 return 0;
1052
1053 hci_dev_do_close(hdev);
1054
1055 return 0;
1056}
1057
1058static const struct rfkill_ops hci_rfkill_ops = {
1059 .set_block = hci_rfkill_set_block,
1060};
1061
1da177e4
LT
1062/* Alloc HCI device */
1063struct hci_dev *hci_alloc_dev(void)
1064{
1065 struct hci_dev *hdev;
1066
25ea6db0 1067 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
1068 if (!hdev)
1069 return NULL;
1070
0ac7e700 1071 hci_init_sysfs(hdev);
1da177e4
LT
1072 skb_queue_head_init(&hdev->driver_init);
1073
1074 return hdev;
1075}
1076EXPORT_SYMBOL(hci_alloc_dev);
1077
1078/* Free HCI device */
1079void hci_free_dev(struct hci_dev *hdev)
1080{
1081 skb_queue_purge(&hdev->driver_init);
1082
a91f2e39
MH
1083 /* will free via device release */
1084 put_device(&hdev->dev);
1da177e4
LT
1085}
1086EXPORT_SYMBOL(hci_free_dev);
1087
ab81cbf9
JH
1088static void hci_power_on(struct work_struct *work)
1089{
1090 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1091
1092 BT_DBG("%s", hdev->name);
1093
1094 if (hci_dev_open(hdev->id) < 0)
1095 return;
1096
a8b2d5c2 1097 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1098 schedule_delayed_work(&hdev->power_off,
3243553f 1099 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1100
a8b2d5c2 1101 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1102 mgmt_index_added(hdev);
ab81cbf9
JH
1103}
1104
1105static void hci_power_off(struct work_struct *work)
1106{
3243553f
JH
1107 struct hci_dev *hdev = container_of(work, struct hci_dev,
1108 power_off.work);
ab81cbf9
JH
1109
1110 BT_DBG("%s", hdev->name);
1111
8ee56540 1112 hci_dev_do_close(hdev);
ab81cbf9
JH
1113}
1114
16ab91ab
JH
1115static void hci_discov_off(struct work_struct *work)
1116{
1117 struct hci_dev *hdev;
1118 u8 scan = SCAN_PAGE;
1119
1120 hdev = container_of(work, struct hci_dev, discov_off.work);
1121
1122 BT_DBG("%s", hdev->name);
1123
09fd0de5 1124 hci_dev_lock(hdev);
16ab91ab
JH
1125
1126 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1127
1128 hdev->discov_timeout = 0;
1129
09fd0de5 1130 hci_dev_unlock(hdev);
16ab91ab
JH
1131}
1132
2aeb9a1a
JH
1133int hci_uuids_clear(struct hci_dev *hdev)
1134{
1135 struct list_head *p, *n;
1136
1137 list_for_each_safe(p, n, &hdev->uuids) {
1138 struct bt_uuid *uuid;
1139
1140 uuid = list_entry(p, struct bt_uuid, list);
1141
1142 list_del(p);
1143 kfree(uuid);
1144 }
1145
1146 return 0;
1147}
1148
55ed8ca1
JH
1149int hci_link_keys_clear(struct hci_dev *hdev)
1150{
1151 struct list_head *p, *n;
1152
1153 list_for_each_safe(p, n, &hdev->link_keys) {
1154 struct link_key *key;
1155
1156 key = list_entry(p, struct link_key, list);
1157
1158 list_del(p);
1159 kfree(key);
1160 }
1161
1162 return 0;
1163}
1164
b899efaf
VCG
1165int hci_smp_ltks_clear(struct hci_dev *hdev)
1166{
1167 struct smp_ltk *k, *tmp;
1168
1169 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1170 list_del(&k->list);
1171 kfree(k);
1172 }
1173
1174 return 0;
1175}
1176
55ed8ca1
JH
1177struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1178{
8035ded4 1179 struct link_key *k;
55ed8ca1 1180
8035ded4 1181 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1182 if (bacmp(bdaddr, &k->bdaddr) == 0)
1183 return k;
55ed8ca1
JH
1184
1185 return NULL;
1186}
1187
d25e28ab
JH
1188static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1189 u8 key_type, u8 old_key_type)
1190{
1191 /* Legacy key */
1192 if (key_type < 0x03)
1193 return 1;
1194
1195 /* Debug keys are insecure so don't store them persistently */
1196 if (key_type == HCI_LK_DEBUG_COMBINATION)
1197 return 0;
1198
1199 /* Changed combination key and there's no previous one */
1200 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1201 return 0;
1202
1203 /* Security mode 3 case */
1204 if (!conn)
1205 return 1;
1206
1207 /* Neither local nor remote side had no-bonding as requirement */
1208 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1209 return 1;
1210
1211 /* Local side had dedicated bonding as requirement */
1212 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1213 return 1;
1214
1215 /* Remote side had dedicated bonding as requirement */
1216 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1217 return 1;
1218
1219 /* If none of the above criteria match, then don't store the key
1220 * persistently */
1221 return 0;
1222}
1223
c9839a11 1224struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1225{
c9839a11 1226 struct smp_ltk *k;
75d262c2 1227
c9839a11
VCG
1228 list_for_each_entry(k, &hdev->long_term_keys, list) {
1229 if (k->ediv != ediv ||
1230 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1231 continue;
1232
c9839a11 1233 return k;
75d262c2
VCG
1234 }
1235
1236 return NULL;
1237}
1238EXPORT_SYMBOL(hci_find_ltk);
1239
c9839a11
VCG
1240struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1241 u8 addr_type)
75d262c2 1242{
c9839a11 1243 struct smp_ltk *k;
75d262c2 1244
c9839a11
VCG
1245 list_for_each_entry(k, &hdev->long_term_keys, list)
1246 if (addr_type == k->bdaddr_type &&
1247 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1248 return k;
1249
1250 return NULL;
1251}
c9839a11 1252EXPORT_SYMBOL(hci_find_ltk_by_addr);
75d262c2 1253
d25e28ab
JH
1254int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1255 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1256{
1257 struct link_key *key, *old_key;
4df378a1 1258 u8 old_key_type, persistent;
55ed8ca1
JH
1259
1260 old_key = hci_find_link_key(hdev, bdaddr);
1261 if (old_key) {
1262 old_key_type = old_key->type;
1263 key = old_key;
1264 } else {
12adcf3a 1265 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1266 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1267 if (!key)
1268 return -ENOMEM;
1269 list_add(&key->list, &hdev->link_keys);
1270 }
1271
1272 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1273
d25e28ab
JH
1274 /* Some buggy controller combinations generate a changed
1275 * combination key for legacy pairing even when there's no
1276 * previous key */
1277 if (type == HCI_LK_CHANGED_COMBINATION &&
1278 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1279 old_key_type == 0xff) {
d25e28ab 1280 type = HCI_LK_COMBINATION;
655fe6ec
JH
1281 if (conn)
1282 conn->key_type = type;
1283 }
d25e28ab 1284
55ed8ca1
JH
1285 bacpy(&key->bdaddr, bdaddr);
1286 memcpy(key->val, val, 16);
55ed8ca1
JH
1287 key->pin_len = pin_len;
1288
b6020ba0 1289 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1290 key->type = old_key_type;
4748fed2
JH
1291 else
1292 key->type = type;
1293
4df378a1
JH
1294 if (!new_key)
1295 return 0;
1296
1297 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1298
744cf19e 1299 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1300
1301 if (!persistent) {
1302 list_del(&key->list);
1303 kfree(key);
1304 }
55ed8ca1
JH
1305
1306 return 0;
1307}
1308
c9839a11
VCG
1309int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1310 int new_key, u8 authenticated, u8 tk[16],
1311 u8 enc_size, u16 ediv, u8 rand[8])
75d262c2 1312{
c9839a11 1313 struct smp_ltk *key, *old_key;
75d262c2 1314
c9839a11
VCG
1315 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1316 return 0;
75d262c2 1317
c9839a11
VCG
1318 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1319 if (old_key)
75d262c2 1320 key = old_key;
c9839a11
VCG
1321 else {
1322 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1323 if (!key)
1324 return -ENOMEM;
c9839a11 1325 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1326 }
1327
75d262c2 1328 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1329 key->bdaddr_type = addr_type;
1330 memcpy(key->val, tk, sizeof(key->val));
1331 key->authenticated = authenticated;
1332 key->ediv = ediv;
1333 key->enc_size = enc_size;
1334 key->type = type;
1335 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1336
c9839a11
VCG
1337 if (!new_key)
1338 return 0;
75d262c2 1339
261cc5aa
VCG
1340 if (type & HCI_SMP_LTK)
1341 mgmt_new_ltk(hdev, key, 1);
1342
75d262c2
VCG
1343 return 0;
1344}
1345
55ed8ca1
JH
1346int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1347{
1348 struct link_key *key;
1349
1350 key = hci_find_link_key(hdev, bdaddr);
1351 if (!key)
1352 return -ENOENT;
1353
1354 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1355
1356 list_del(&key->list);
1357 kfree(key);
1358
1359 return 0;
1360}
1361
b899efaf
VCG
1362int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1363{
1364 struct smp_ltk *k, *tmp;
1365
1366 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1367 if (bacmp(bdaddr, &k->bdaddr))
1368 continue;
1369
1370 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1371
1372 list_del(&k->list);
1373 kfree(k);
1374 }
1375
1376 return 0;
1377}
1378
6bd32326
VT
1379/* HCI command timer function */
1380static void hci_cmd_timer(unsigned long arg)
1381{
1382 struct hci_dev *hdev = (void *) arg;
1383
1384 BT_ERR("%s command tx timeout", hdev->name);
1385 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1386 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1387}
1388
2763eda6
SJ
1389struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1390 bdaddr_t *bdaddr)
1391{
1392 struct oob_data *data;
1393
1394 list_for_each_entry(data, &hdev->remote_oob_data, list)
1395 if (bacmp(bdaddr, &data->bdaddr) == 0)
1396 return data;
1397
1398 return NULL;
1399}
1400
1401int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402{
1403 struct oob_data *data;
1404
1405 data = hci_find_remote_oob_data(hdev, bdaddr);
1406 if (!data)
1407 return -ENOENT;
1408
1409 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1410
1411 list_del(&data->list);
1412 kfree(data);
1413
1414 return 0;
1415}
1416
1417int hci_remote_oob_data_clear(struct hci_dev *hdev)
1418{
1419 struct oob_data *data, *n;
1420
1421 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1422 list_del(&data->list);
1423 kfree(data);
1424 }
1425
1426 return 0;
1427}
1428
1429int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1430 u8 *randomizer)
1431{
1432 struct oob_data *data;
1433
1434 data = hci_find_remote_oob_data(hdev, bdaddr);
1435
1436 if (!data) {
1437 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1438 if (!data)
1439 return -ENOMEM;
1440
1441 bacpy(&data->bdaddr, bdaddr);
1442 list_add(&data->list, &hdev->remote_oob_data);
1443 }
1444
1445 memcpy(data->hash, hash, sizeof(data->hash));
1446 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1447
1448 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1449
1450 return 0;
1451}
1452
b2a66aad
AJ
1453struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1454 bdaddr_t *bdaddr)
1455{
8035ded4 1456 struct bdaddr_list *b;
b2a66aad 1457
8035ded4 1458 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1459 if (bacmp(bdaddr, &b->bdaddr) == 0)
1460 return b;
b2a66aad
AJ
1461
1462 return NULL;
1463}
1464
1465int hci_blacklist_clear(struct hci_dev *hdev)
1466{
1467 struct list_head *p, *n;
1468
1469 list_for_each_safe(p, n, &hdev->blacklist) {
1470 struct bdaddr_list *b;
1471
1472 b = list_entry(p, struct bdaddr_list, list);
1473
1474 list_del(p);
1475 kfree(b);
1476 }
1477
1478 return 0;
1479}
1480
88c1fe4b 1481int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1482{
1483 struct bdaddr_list *entry;
b2a66aad
AJ
1484
1485 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1486 return -EBADF;
1487
5e762444
AJ
1488 if (hci_blacklist_lookup(hdev, bdaddr))
1489 return -EEXIST;
b2a66aad
AJ
1490
1491 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1492 if (!entry)
1493 return -ENOMEM;
b2a66aad
AJ
1494
1495 bacpy(&entry->bdaddr, bdaddr);
1496
1497 list_add(&entry->list, &hdev->blacklist);
1498
88c1fe4b 1499 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1500}
1501
88c1fe4b 1502int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1503{
1504 struct bdaddr_list *entry;
b2a66aad 1505
1ec918ce 1506 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1507 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1508
1509 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1510 if (!entry)
5e762444 1511 return -ENOENT;
b2a66aad
AJ
1512
1513 list_del(&entry->list);
1514 kfree(entry);
1515
88c1fe4b 1516 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1517}
1518
db323f2f 1519static void hci_clear_adv_cache(struct work_struct *work)
35815085 1520{
db323f2f
GP
1521 struct hci_dev *hdev = container_of(work, struct hci_dev,
1522 adv_work.work);
35815085
AG
1523
1524 hci_dev_lock(hdev);
1525
1526 hci_adv_entries_clear(hdev);
1527
1528 hci_dev_unlock(hdev);
1529}
1530
76c8686f
AG
1531int hci_adv_entries_clear(struct hci_dev *hdev)
1532{
1533 struct adv_entry *entry, *tmp;
1534
1535 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1536 list_del(&entry->list);
1537 kfree(entry);
1538 }
1539
1540 BT_DBG("%s adv cache cleared", hdev->name);
1541
1542 return 0;
1543}
1544
1545struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1546{
1547 struct adv_entry *entry;
1548
1549 list_for_each_entry(entry, &hdev->adv_entries, list)
1550 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1551 return entry;
1552
1553 return NULL;
1554}
1555
1556static inline int is_connectable_adv(u8 evt_type)
1557{
1558 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1559 return 1;
1560
1561 return 0;
1562}
1563
1564int hci_add_adv_entry(struct hci_dev *hdev,
1565 struct hci_ev_le_advertising_info *ev)
1566{
1567 struct adv_entry *entry;
1568
1569 if (!is_connectable_adv(ev->evt_type))
1570 return -EINVAL;
1571
1572 /* Only new entries should be added to adv_entries. So, if
1573 * bdaddr was found, don't add it. */
1574 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1575 return 0;
1576
4777bfde 1577 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
76c8686f
AG
1578 if (!entry)
1579 return -ENOMEM;
1580
1581 bacpy(&entry->bdaddr, &ev->bdaddr);
1582 entry->bdaddr_type = ev->bdaddr_type;
1583
1584 list_add(&entry->list, &hdev->adv_entries);
1585
1586 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1587 batostr(&entry->bdaddr), entry->bdaddr_type);
1588
1589 return 0;
1590}
1591
7ba8b4be
AG
1592static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1593{
1594 struct le_scan_params *param = (struct le_scan_params *) opt;
1595 struct hci_cp_le_set_scan_param cp;
1596
1597 memset(&cp, 0, sizeof(cp));
1598 cp.type = param->type;
1599 cp.interval = cpu_to_le16(param->interval);
1600 cp.window = cpu_to_le16(param->window);
1601
1602 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1603}
1604
1605static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1606{
1607 struct hci_cp_le_set_scan_enable cp;
1608
1609 memset(&cp, 0, sizeof(cp));
1610 cp.enable = 1;
1611
1612 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1613}
1614
1615static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1616 u16 window, int timeout)
1617{
1618 long timeo = msecs_to_jiffies(3000);
1619 struct le_scan_params param;
1620 int err;
1621
1622 BT_DBG("%s", hdev->name);
1623
1624 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1625 return -EINPROGRESS;
1626
1627 param.type = type;
1628 param.interval = interval;
1629 param.window = window;
1630
1631 hci_req_lock(hdev);
1632
1633 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1634 timeo);
1635 if (!err)
1636 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1637
1638 hci_req_unlock(hdev);
1639
1640 if (err < 0)
1641 return err;
1642
1643 schedule_delayed_work(&hdev->le_scan_disable,
1644 msecs_to_jiffies(timeout));
1645
1646 return 0;
1647}
1648
1649static void le_scan_disable_work(struct work_struct *work)
1650{
1651 struct hci_dev *hdev = container_of(work, struct hci_dev,
1652 le_scan_disable.work);
1653 struct hci_cp_le_set_scan_enable cp;
1654
1655 BT_DBG("%s", hdev->name);
1656
1657 memset(&cp, 0, sizeof(cp));
1658
1659 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1660}
1661
28b75a89
AG
1662static void le_scan_work(struct work_struct *work)
1663{
1664 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1665 struct le_scan_params *param = &hdev->le_scan_params;
1666
1667 BT_DBG("%s", hdev->name);
1668
1669 hci_do_le_scan(hdev, param->type, param->interval,
1670 param->window, param->timeout);
1671}
1672
1673int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1674 int timeout)
1675{
1676 struct le_scan_params *param = &hdev->le_scan_params;
1677
1678 BT_DBG("%s", hdev->name);
1679
1680 if (work_busy(&hdev->le_scan))
1681 return -EINPROGRESS;
1682
1683 param->type = type;
1684 param->interval = interval;
1685 param->window = window;
1686 param->timeout = timeout;
1687
1688 queue_work(system_long_wq, &hdev->le_scan);
1689
1690 return 0;
1691}
1692
1da177e4
LT
1693/* Register HCI device */
1694int hci_register_dev(struct hci_dev *hdev)
1695{
1696 struct list_head *head = &hci_dev_list, *p;
08add513 1697 int i, id, error;
1da177e4 1698
e9b9cfa1 1699 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1700
010666a1 1701 if (!hdev->open || !hdev->close)
1da177e4
LT
1702 return -EINVAL;
1703
08add513
MM
1704 /* Do not allow HCI_AMP devices to register at index 0,
1705 * so the index can be used as the AMP controller ID.
1706 */
1707 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1708
f20d09d5 1709 write_lock(&hci_dev_list_lock);
1da177e4
LT
1710
1711 /* Find first available device id */
1712 list_for_each(p, &hci_dev_list) {
1713 if (list_entry(p, struct hci_dev, list)->id != id)
1714 break;
1715 head = p; id++;
1716 }
8e87d142 1717
1da177e4
LT
1718 sprintf(hdev->name, "hci%d", id);
1719 hdev->id = id;
c6feeb28 1720 list_add_tail(&hdev->list, head);
1da177e4 1721
09fd0de5 1722 mutex_init(&hdev->lock);
1da177e4
LT
1723
1724 hdev->flags = 0;
d23264a8 1725 hdev->dev_flags = 0;
1da177e4 1726 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1727 hdev->esco_type = (ESCO_HV1);
1da177e4 1728 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1729 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1730
04837f64
MH
1731 hdev->idle_timeout = 0;
1732 hdev->sniff_max_interval = 800;
1733 hdev->sniff_min_interval = 80;
1734
b78752cc 1735 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1736 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1737 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1738
1da177e4
LT
1739
1740 skb_queue_head_init(&hdev->rx_q);
1741 skb_queue_head_init(&hdev->cmd_q);
1742 skb_queue_head_init(&hdev->raw_q);
1743
6bd32326
VT
1744 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1745
cd4c5391 1746 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1747 hdev->reassembly[i] = NULL;
1748
1da177e4 1749 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1750 mutex_init(&hdev->req_lock);
1da177e4 1751
30883512 1752 discovery_init(hdev);
1da177e4
LT
1753
1754 hci_conn_hash_init(hdev);
1755
2e58ef3e
JH
1756 INIT_LIST_HEAD(&hdev->mgmt_pending);
1757
ea4bd8ba 1758 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1759
2aeb9a1a
JH
1760 INIT_LIST_HEAD(&hdev->uuids);
1761
55ed8ca1 1762 INIT_LIST_HEAD(&hdev->link_keys);
b899efaf 1763 INIT_LIST_HEAD(&hdev->long_term_keys);
55ed8ca1 1764
2763eda6
SJ
1765 INIT_LIST_HEAD(&hdev->remote_oob_data);
1766
76c8686f
AG
1767 INIT_LIST_HEAD(&hdev->adv_entries);
1768
db323f2f 1769 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1770 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1771 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1772
16ab91ab
JH
1773 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1774
1da177e4
LT
1775 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1776
1777 atomic_set(&hdev->promisc, 0);
1778
28b75a89
AG
1779 INIT_WORK(&hdev->le_scan, le_scan_work);
1780
7ba8b4be
AG
1781 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1782
f20d09d5 1783 write_unlock(&hci_dev_list_lock);
1da177e4 1784
32845eb1
GP
1785 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1786 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1787 if (!hdev->workqueue) {
1788 error = -ENOMEM;
1789 goto err;
1790 }
f48fd9c8 1791
33ca954d
DH
1792 error = hci_add_sysfs(hdev);
1793 if (error < 0)
1794 goto err_wqueue;
1da177e4 1795
611b30f7
MH
1796 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1797 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1798 if (hdev->rfkill) {
1799 if (rfkill_register(hdev->rfkill) < 0) {
1800 rfkill_destroy(hdev->rfkill);
1801 hdev->rfkill = NULL;
1802 }
1803 }
1804
a8b2d5c2
JH
1805 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1806 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1807 schedule_work(&hdev->power_on);
ab81cbf9 1808
1da177e4 1809 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1810 hci_dev_hold(hdev);
1da177e4
LT
1811
1812 return id;
f48fd9c8 1813
33ca954d
DH
1814err_wqueue:
1815 destroy_workqueue(hdev->workqueue);
1816err:
f20d09d5 1817 write_lock(&hci_dev_list_lock);
f48fd9c8 1818 list_del(&hdev->list);
f20d09d5 1819 write_unlock(&hci_dev_list_lock);
f48fd9c8 1820
33ca954d 1821 return error;
1da177e4
LT
1822}
1823EXPORT_SYMBOL(hci_register_dev);
1824
1825/* Unregister HCI device */
59735631 1826void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1827{
ef222013
MH
1828 int i;
1829
c13854ce 1830 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1831
f20d09d5 1832 write_lock(&hci_dev_list_lock);
1da177e4 1833 list_del(&hdev->list);
f20d09d5 1834 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1835
1836 hci_dev_do_close(hdev);
1837
cd4c5391 1838 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1839 kfree_skb(hdev->reassembly[i]);
1840
ab81cbf9 1841 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8b2d5c2 1842 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1843 hci_dev_lock(hdev);
744cf19e 1844 mgmt_index_removed(hdev);
09fd0de5 1845 hci_dev_unlock(hdev);
56e5cb86 1846 }
ab81cbf9 1847
2e58ef3e
JH
1848 /* mgmt_index_removed should take care of emptying the
1849 * pending list */
1850 BUG_ON(!list_empty(&hdev->mgmt_pending));
1851
1da177e4
LT
1852 hci_notify(hdev, HCI_DEV_UNREG);
1853
611b30f7
MH
1854 if (hdev->rfkill) {
1855 rfkill_unregister(hdev->rfkill);
1856 rfkill_destroy(hdev->rfkill);
1857 }
1858
ce242970 1859 hci_del_sysfs(hdev);
147e2d59 1860
db323f2f 1861 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1862
f48fd9c8
MH
1863 destroy_workqueue(hdev->workqueue);
1864
09fd0de5 1865 hci_dev_lock(hdev);
e2e0cacb 1866 hci_blacklist_clear(hdev);
2aeb9a1a 1867 hci_uuids_clear(hdev);
55ed8ca1 1868 hci_link_keys_clear(hdev);
b899efaf 1869 hci_smp_ltks_clear(hdev);
2763eda6 1870 hci_remote_oob_data_clear(hdev);
76c8686f 1871 hci_adv_entries_clear(hdev);
09fd0de5 1872 hci_dev_unlock(hdev);
e2e0cacb 1873
dc946bd8 1874 hci_dev_put(hdev);
1da177e4
LT
1875}
1876EXPORT_SYMBOL(hci_unregister_dev);
1877
1878/* Suspend HCI device */
1879int hci_suspend_dev(struct hci_dev *hdev)
1880{
1881 hci_notify(hdev, HCI_DEV_SUSPEND);
1882 return 0;
1883}
1884EXPORT_SYMBOL(hci_suspend_dev);
1885
1886/* Resume HCI device */
1887int hci_resume_dev(struct hci_dev *hdev)
1888{
1889 hci_notify(hdev, HCI_DEV_RESUME);
1890 return 0;
1891}
1892EXPORT_SYMBOL(hci_resume_dev);
1893
76bca880
MH
1894/* Receive frame from HCI drivers */
1895int hci_recv_frame(struct sk_buff *skb)
1896{
1897 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1898 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1899 && !test_bit(HCI_INIT, &hdev->flags))) {
1900 kfree_skb(skb);
1901 return -ENXIO;
1902 }
1903
1904 /* Incomming skb */
1905 bt_cb(skb)->incoming = 1;
1906
1907 /* Time stamp */
1908 __net_timestamp(skb);
1909
76bca880 1910 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1911 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1912
76bca880
MH
1913 return 0;
1914}
1915EXPORT_SYMBOL(hci_recv_frame);
1916
33e882a5 1917static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1918 int count, __u8 index)
33e882a5
SS
1919{
1920 int len = 0;
1921 int hlen = 0;
1922 int remain = count;
1923 struct sk_buff *skb;
1924 struct bt_skb_cb *scb;
1925
1926 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1927 index >= NUM_REASSEMBLY)
1928 return -EILSEQ;
1929
1930 skb = hdev->reassembly[index];
1931
1932 if (!skb) {
1933 switch (type) {
1934 case HCI_ACLDATA_PKT:
1935 len = HCI_MAX_FRAME_SIZE;
1936 hlen = HCI_ACL_HDR_SIZE;
1937 break;
1938 case HCI_EVENT_PKT:
1939 len = HCI_MAX_EVENT_SIZE;
1940 hlen = HCI_EVENT_HDR_SIZE;
1941 break;
1942 case HCI_SCODATA_PKT:
1943 len = HCI_MAX_SCO_SIZE;
1944 hlen = HCI_SCO_HDR_SIZE;
1945 break;
1946 }
1947
1e429f38 1948 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1949 if (!skb)
1950 return -ENOMEM;
1951
1952 scb = (void *) skb->cb;
1953 scb->expect = hlen;
1954 scb->pkt_type = type;
1955
1956 skb->dev = (void *) hdev;
1957 hdev->reassembly[index] = skb;
1958 }
1959
1960 while (count) {
1961 scb = (void *) skb->cb;
70c1f20b 1962 len = min_t(__u16, scb->expect, count);
33e882a5
SS
1963
1964 memcpy(skb_put(skb, len), data, len);
1965
1966 count -= len;
1967 data += len;
1968 scb->expect -= len;
1969 remain = count;
1970
1971 switch (type) {
1972 case HCI_EVENT_PKT:
1973 if (skb->len == HCI_EVENT_HDR_SIZE) {
1974 struct hci_event_hdr *h = hci_event_hdr(skb);
1975 scb->expect = h->plen;
1976
1977 if (skb_tailroom(skb) < scb->expect) {
1978 kfree_skb(skb);
1979 hdev->reassembly[index] = NULL;
1980 return -ENOMEM;
1981 }
1982 }
1983 break;
1984
1985 case HCI_ACLDATA_PKT:
1986 if (skb->len == HCI_ACL_HDR_SIZE) {
1987 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1988 scb->expect = __le16_to_cpu(h->dlen);
1989
1990 if (skb_tailroom(skb) < scb->expect) {
1991 kfree_skb(skb);
1992 hdev->reassembly[index] = NULL;
1993 return -ENOMEM;
1994 }
1995 }
1996 break;
1997
1998 case HCI_SCODATA_PKT:
1999 if (skb->len == HCI_SCO_HDR_SIZE) {
2000 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2001 scb->expect = h->dlen;
2002
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2007 }
2008 }
2009 break;
2010 }
2011
2012 if (scb->expect == 0) {
2013 /* Complete frame */
2014
2015 bt_cb(skb)->pkt_type = type;
2016 hci_recv_frame(skb);
2017
2018 hdev->reassembly[index] = NULL;
2019 return remain;
2020 }
2021 }
2022
2023 return remain;
2024}
2025
ef222013
MH
2026int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2027{
f39a3c06
SS
2028 int rem = 0;
2029
ef222013
MH
2030 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2031 return -EILSEQ;
2032
da5f6c37 2033 while (count) {
1e429f38 2034 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2035 if (rem < 0)
2036 return rem;
ef222013 2037
f39a3c06
SS
2038 data += (count - rem);
2039 count = rem;
f81c6224 2040 }
ef222013 2041
f39a3c06 2042 return rem;
ef222013
MH
2043}
2044EXPORT_SYMBOL(hci_recv_fragment);
2045
99811510
SS
2046#define STREAM_REASSEMBLY 0
2047
2048int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2049{
2050 int type;
2051 int rem = 0;
2052
da5f6c37 2053 while (count) {
99811510
SS
2054 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2055
2056 if (!skb) {
2057 struct { char type; } *pkt;
2058
2059 /* Start of the frame */
2060 pkt = data;
2061 type = pkt->type;
2062
2063 data++;
2064 count--;
2065 } else
2066 type = bt_cb(skb)->pkt_type;
2067
1e429f38
GP
2068 rem = hci_reassembly(hdev, type, data, count,
2069 STREAM_REASSEMBLY);
99811510
SS
2070 if (rem < 0)
2071 return rem;
2072
2073 data += (count - rem);
2074 count = rem;
f81c6224 2075 }
99811510
SS
2076
2077 return rem;
2078}
2079EXPORT_SYMBOL(hci_recv_stream_fragment);
2080
1da177e4
LT
2081/* ---- Interface to upper protocols ---- */
2082
1da177e4
LT
2083int hci_register_cb(struct hci_cb *cb)
2084{
2085 BT_DBG("%p name %s", cb, cb->name);
2086
f20d09d5 2087 write_lock(&hci_cb_list_lock);
1da177e4 2088 list_add(&cb->list, &hci_cb_list);
f20d09d5 2089 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2090
2091 return 0;
2092}
2093EXPORT_SYMBOL(hci_register_cb);
2094
2095int hci_unregister_cb(struct hci_cb *cb)
2096{
2097 BT_DBG("%p name %s", cb, cb->name);
2098
f20d09d5 2099 write_lock(&hci_cb_list_lock);
1da177e4 2100 list_del(&cb->list);
f20d09d5 2101 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2102
2103 return 0;
2104}
2105EXPORT_SYMBOL(hci_unregister_cb);
2106
2107static int hci_send_frame(struct sk_buff *skb)
2108{
2109 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2110
2111 if (!hdev) {
2112 kfree_skb(skb);
2113 return -ENODEV;
2114 }
2115
0d48d939 2116 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2117
cd82e61c
MH
2118 /* Time stamp */
2119 __net_timestamp(skb);
1da177e4 2120
cd82e61c
MH
2121 /* Send copy to monitor */
2122 hci_send_to_monitor(hdev, skb);
2123
2124 if (atomic_read(&hdev->promisc)) {
2125 /* Send copy to the sockets */
470fe1b5 2126 hci_send_to_sock(hdev, skb);
1da177e4
LT
2127 }
2128
2129 /* Get rid of skb owner, prior to sending to the driver. */
2130 skb_orphan(skb);
2131
2132 return hdev->send(skb);
2133}
2134
2135/* Send HCI command */
a9de9248 2136int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2137{
2138 int len = HCI_COMMAND_HDR_SIZE + plen;
2139 struct hci_command_hdr *hdr;
2140 struct sk_buff *skb;
2141
a9de9248 2142 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2143
2144 skb = bt_skb_alloc(len, GFP_ATOMIC);
2145 if (!skb) {
ef222013 2146 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2147 return -ENOMEM;
2148 }
2149
2150 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2151 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2152 hdr->plen = plen;
2153
2154 if (plen)
2155 memcpy(skb_put(skb, plen), param, plen);
2156
2157 BT_DBG("skb len %d", skb->len);
2158
0d48d939 2159 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2160 skb->dev = (void *) hdev;
c78ae283 2161
a5040efa
JH
2162 if (test_bit(HCI_INIT, &hdev->flags))
2163 hdev->init_last_cmd = opcode;
2164
1da177e4 2165 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2166 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2167
2168 return 0;
2169}
1da177e4
LT
2170
2171/* Get data from the previously sent command */
a9de9248 2172void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2173{
2174 struct hci_command_hdr *hdr;
2175
2176 if (!hdev->sent_cmd)
2177 return NULL;
2178
2179 hdr = (void *) hdev->sent_cmd->data;
2180
a9de9248 2181 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2182 return NULL;
2183
a9de9248 2184 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2185
2186 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2187}
2188
2189/* Send ACL data */
2190static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2191{
2192 struct hci_acl_hdr *hdr;
2193 int len = skb->len;
2194
badff6d0
ACM
2195 skb_push(skb, HCI_ACL_HDR_SIZE);
2196 skb_reset_transport_header(skb);
9c70220b 2197 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2198 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2199 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2200}
2201
73d80deb
LAD
2202static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2203 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2204{
2205 struct hci_dev *hdev = conn->hdev;
2206 struct sk_buff *list;
2207
70f23020
AE
2208 list = skb_shinfo(skb)->frag_list;
2209 if (!list) {
1da177e4
LT
2210 /* Non fragmented */
2211 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2212
73d80deb 2213 skb_queue_tail(queue, skb);
1da177e4
LT
2214 } else {
2215 /* Fragmented */
2216 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2217
2218 skb_shinfo(skb)->frag_list = NULL;
2219
2220 /* Queue all fragments atomically */
af3e6359 2221 spin_lock(&queue->lock);
1da177e4 2222
73d80deb 2223 __skb_queue_tail(queue, skb);
e702112f
AE
2224
2225 flags &= ~ACL_START;
2226 flags |= ACL_CONT;
1da177e4
LT
2227 do {
2228 skb = list; list = list->next;
8e87d142 2229
1da177e4 2230 skb->dev = (void *) hdev;
0d48d939 2231 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2232 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2233
2234 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2235
73d80deb 2236 __skb_queue_tail(queue, skb);
1da177e4
LT
2237 } while (list);
2238
af3e6359 2239 spin_unlock(&queue->lock);
1da177e4 2240 }
73d80deb
LAD
2241}
2242
2243void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2244{
2245 struct hci_conn *conn = chan->conn;
2246 struct hci_dev *hdev = conn->hdev;
2247
2248 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2249
2250 skb->dev = (void *) hdev;
2251 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2252 hci_add_acl_hdr(skb, conn->handle, flags);
2253
2254 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2255
3eff45ea 2256 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2257}
2258EXPORT_SYMBOL(hci_send_acl);
2259
2260/* Send SCO data */
0d861d8b 2261void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2262{
2263 struct hci_dev *hdev = conn->hdev;
2264 struct hci_sco_hdr hdr;
2265
2266 BT_DBG("%s len %d", hdev->name, skb->len);
2267
aca3192c 2268 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2269 hdr.dlen = skb->len;
2270
badff6d0
ACM
2271 skb_push(skb, HCI_SCO_HDR_SIZE);
2272 skb_reset_transport_header(skb);
9c70220b 2273 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2274
2275 skb->dev = (void *) hdev;
0d48d939 2276 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2277
1da177e4 2278 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2279 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2280}
2281EXPORT_SYMBOL(hci_send_sco);
2282
2283/* ---- HCI TX task (outgoing data) ---- */
2284
2285/* HCI Connection scheduler */
2286static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2287{
2288 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2289 struct hci_conn *conn = NULL, *c;
1da177e4 2290 int num = 0, min = ~0;
1da177e4 2291
8e87d142 2292 /* We don't have to lock device here. Connections are always
1da177e4 2293 * added and removed with TX task disabled. */
bf4c6325
GP
2294
2295 rcu_read_lock();
2296
2297 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2298 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2299 continue;
769be974
MH
2300
2301 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2302 continue;
2303
1da177e4
LT
2304 num++;
2305
2306 if (c->sent < min) {
2307 min = c->sent;
2308 conn = c;
2309 }
52087a79
LAD
2310
2311 if (hci_conn_num(hdev, type) == num)
2312 break;
1da177e4
LT
2313 }
2314
bf4c6325
GP
2315 rcu_read_unlock();
2316
1da177e4 2317 if (conn) {
6ed58ec5
VT
2318 int cnt, q;
2319
2320 switch (conn->type) {
2321 case ACL_LINK:
2322 cnt = hdev->acl_cnt;
2323 break;
2324 case SCO_LINK:
2325 case ESCO_LINK:
2326 cnt = hdev->sco_cnt;
2327 break;
2328 case LE_LINK:
2329 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2330 break;
2331 default:
2332 cnt = 0;
2333 BT_ERR("Unknown link type");
2334 }
2335
2336 q = cnt / num;
1da177e4
LT
2337 *quote = q ? q : 1;
2338 } else
2339 *quote = 0;
2340
2341 BT_DBG("conn %p quote %d", conn, *quote);
2342 return conn;
2343}
2344
bae1f5d9 2345static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2346{
2347 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2348 struct hci_conn *c;
1da177e4 2349
bae1f5d9 2350 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2351
bf4c6325
GP
2352 rcu_read_lock();
2353
1da177e4 2354 /* Kill stalled connections */
bf4c6325 2355 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2356 if (c->type == type && c->sent) {
2357 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2358 hdev->name, batostr(&c->dst));
2359 hci_acl_disconn(c, 0x13);
2360 }
2361 }
bf4c6325
GP
2362
2363 rcu_read_unlock();
1da177e4
LT
2364}
2365
73d80deb
LAD
2366static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2367 int *quote)
1da177e4 2368{
73d80deb
LAD
2369 struct hci_conn_hash *h = &hdev->conn_hash;
2370 struct hci_chan *chan = NULL;
2371 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2372 struct hci_conn *conn;
73d80deb
LAD
2373 int cnt, q, conn_num = 0;
2374
2375 BT_DBG("%s", hdev->name);
2376
bf4c6325
GP
2377 rcu_read_lock();
2378
2379 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2380 struct hci_chan *tmp;
2381
2382 if (conn->type != type)
2383 continue;
2384
2385 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2386 continue;
2387
2388 conn_num++;
2389
8192edef 2390 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2391 struct sk_buff *skb;
2392
2393 if (skb_queue_empty(&tmp->data_q))
2394 continue;
2395
2396 skb = skb_peek(&tmp->data_q);
2397 if (skb->priority < cur_prio)
2398 continue;
2399
2400 if (skb->priority > cur_prio) {
2401 num = 0;
2402 min = ~0;
2403 cur_prio = skb->priority;
2404 }
2405
2406 num++;
2407
2408 if (conn->sent < min) {
2409 min = conn->sent;
2410 chan = tmp;
2411 }
2412 }
2413
2414 if (hci_conn_num(hdev, type) == conn_num)
2415 break;
2416 }
2417
bf4c6325
GP
2418 rcu_read_unlock();
2419
73d80deb
LAD
2420 if (!chan)
2421 return NULL;
2422
2423 switch (chan->conn->type) {
2424 case ACL_LINK:
2425 cnt = hdev->acl_cnt;
2426 break;
2427 case SCO_LINK:
2428 case ESCO_LINK:
2429 cnt = hdev->sco_cnt;
2430 break;
2431 case LE_LINK:
2432 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2433 break;
2434 default:
2435 cnt = 0;
2436 BT_ERR("Unknown link type");
2437 }
2438
2439 q = cnt / num;
2440 *quote = q ? q : 1;
2441 BT_DBG("chan %p quote %d", chan, *quote);
2442 return chan;
2443}
2444
02b20f0b
LAD
2445static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2446{
2447 struct hci_conn_hash *h = &hdev->conn_hash;
2448 struct hci_conn *conn;
2449 int num = 0;
2450
2451 BT_DBG("%s", hdev->name);
2452
bf4c6325
GP
2453 rcu_read_lock();
2454
2455 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2456 struct hci_chan *chan;
2457
2458 if (conn->type != type)
2459 continue;
2460
2461 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2462 continue;
2463
2464 num++;
2465
8192edef 2466 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2467 struct sk_buff *skb;
2468
2469 if (chan->sent) {
2470 chan->sent = 0;
2471 continue;
2472 }
2473
2474 if (skb_queue_empty(&chan->data_q))
2475 continue;
2476
2477 skb = skb_peek(&chan->data_q);
2478 if (skb->priority >= HCI_PRIO_MAX - 1)
2479 continue;
2480
2481 skb->priority = HCI_PRIO_MAX - 1;
2482
2483 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2484 skb->priority);
2485 }
2486
2487 if (hci_conn_num(hdev, type) == num)
2488 break;
2489 }
bf4c6325
GP
2490
2491 rcu_read_unlock();
2492
02b20f0b
LAD
2493}
2494
b71d385a
AE
2495static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2496{
2497 /* Calculate count of blocks used by this packet */
2498 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2499}
2500
63d2bc1b 2501static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2502{
1da177e4
LT
2503 if (!test_bit(HCI_RAW, &hdev->flags)) {
2504 /* ACL tx timeout must be longer than maximum
2505 * link supervision timeout (40.9 seconds) */
63d2bc1b 2506 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
cc48dc0a 2507 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2508 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2509 }
63d2bc1b 2510}
1da177e4 2511
63d2bc1b
AE
2512static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2513{
2514 unsigned int cnt = hdev->acl_cnt;
2515 struct hci_chan *chan;
2516 struct sk_buff *skb;
2517 int quote;
2518
2519 __check_timeout(hdev, cnt);
04837f64 2520
73d80deb
LAD
2521 while (hdev->acl_cnt &&
2522 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2523 u32 priority = (skb_peek(&chan->data_q))->priority;
2524 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2525 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2526 skb->len, skb->priority);
2527
ec1cce24
LAD
2528 /* Stop if priority has changed */
2529 if (skb->priority < priority)
2530 break;
2531
2532 skb = skb_dequeue(&chan->data_q);
2533
73d80deb
LAD
2534 hci_conn_enter_active_mode(chan->conn,
2535 bt_cb(skb)->force_active);
04837f64 2536
1da177e4
LT
2537 hci_send_frame(skb);
2538 hdev->acl_last_tx = jiffies;
2539
2540 hdev->acl_cnt--;
73d80deb
LAD
2541 chan->sent++;
2542 chan->conn->sent++;
1da177e4
LT
2543 }
2544 }
02b20f0b
LAD
2545
2546 if (cnt != hdev->acl_cnt)
2547 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2548}
2549
b71d385a
AE
2550static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2551{
63d2bc1b 2552 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2553 struct hci_chan *chan;
2554 struct sk_buff *skb;
2555 int quote;
b71d385a 2556
63d2bc1b 2557 __check_timeout(hdev, cnt);
b71d385a
AE
2558
2559 while (hdev->block_cnt > 0 &&
2560 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2561 u32 priority = (skb_peek(&chan->data_q))->priority;
2562 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2563 int blocks;
2564
2565 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2566 skb->len, skb->priority);
2567
2568 /* Stop if priority has changed */
2569 if (skb->priority < priority)
2570 break;
2571
2572 skb = skb_dequeue(&chan->data_q);
2573
2574 blocks = __get_blocks(hdev, skb);
2575 if (blocks > hdev->block_cnt)
2576 return;
2577
2578 hci_conn_enter_active_mode(chan->conn,
2579 bt_cb(skb)->force_active);
2580
2581 hci_send_frame(skb);
2582 hdev->acl_last_tx = jiffies;
2583
2584 hdev->block_cnt -= blocks;
2585 quote -= blocks;
2586
2587 chan->sent += blocks;
2588 chan->conn->sent += blocks;
2589 }
2590 }
2591
2592 if (cnt != hdev->block_cnt)
2593 hci_prio_recalculate(hdev, ACL_LINK);
2594}
2595
2596static inline void hci_sched_acl(struct hci_dev *hdev)
2597{
2598 BT_DBG("%s", hdev->name);
2599
2600 if (!hci_conn_num(hdev, ACL_LINK))
2601 return;
2602
2603 switch (hdev->flow_ctl_mode) {
2604 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2605 hci_sched_acl_pkt(hdev);
2606 break;
2607
2608 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2609 hci_sched_acl_blk(hdev);
2610 break;
2611 }
2612}
2613
1da177e4
LT
2614/* Schedule SCO */
2615static inline void hci_sched_sco(struct hci_dev *hdev)
2616{
2617 struct hci_conn *conn;
2618 struct sk_buff *skb;
2619 int quote;
2620
2621 BT_DBG("%s", hdev->name);
2622
52087a79
LAD
2623 if (!hci_conn_num(hdev, SCO_LINK))
2624 return;
2625
1da177e4
LT
2626 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2627 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2628 BT_DBG("skb %p len %d", skb, skb->len);
2629 hci_send_frame(skb);
2630
2631 conn->sent++;
2632 if (conn->sent == ~0)
2633 conn->sent = 0;
2634 }
2635 }
2636}
2637
b6a0dc82
MH
2638static inline void hci_sched_esco(struct hci_dev *hdev)
2639{
2640 struct hci_conn *conn;
2641 struct sk_buff *skb;
2642 int quote;
2643
2644 BT_DBG("%s", hdev->name);
2645
52087a79
LAD
2646 if (!hci_conn_num(hdev, ESCO_LINK))
2647 return;
2648
b6a0dc82
MH
2649 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2650 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2651 BT_DBG("skb %p len %d", skb, skb->len);
2652 hci_send_frame(skb);
2653
2654 conn->sent++;
2655 if (conn->sent == ~0)
2656 conn->sent = 0;
2657 }
2658 }
2659}
2660
6ed58ec5
VT
2661static inline void hci_sched_le(struct hci_dev *hdev)
2662{
73d80deb 2663 struct hci_chan *chan;
6ed58ec5 2664 struct sk_buff *skb;
02b20f0b 2665 int quote, cnt, tmp;
6ed58ec5
VT
2666
2667 BT_DBG("%s", hdev->name);
2668
52087a79
LAD
2669 if (!hci_conn_num(hdev, LE_LINK))
2670 return;
2671
6ed58ec5
VT
2672 if (!test_bit(HCI_RAW, &hdev->flags)) {
2673 /* LE tx timeout must be longer than maximum
2674 * link supervision timeout (40.9 seconds) */
bae1f5d9 2675 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2676 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2677 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2678 }
2679
2680 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2681 tmp = cnt;
73d80deb 2682 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2683 u32 priority = (skb_peek(&chan->data_q))->priority;
2684 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2685 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2686 skb->len, skb->priority);
6ed58ec5 2687
ec1cce24
LAD
2688 /* Stop if priority has changed */
2689 if (skb->priority < priority)
2690 break;
2691
2692 skb = skb_dequeue(&chan->data_q);
2693
6ed58ec5
VT
2694 hci_send_frame(skb);
2695 hdev->le_last_tx = jiffies;
2696
2697 cnt--;
73d80deb
LAD
2698 chan->sent++;
2699 chan->conn->sent++;
6ed58ec5
VT
2700 }
2701 }
73d80deb 2702
6ed58ec5
VT
2703 if (hdev->le_pkts)
2704 hdev->le_cnt = cnt;
2705 else
2706 hdev->acl_cnt = cnt;
02b20f0b
LAD
2707
2708 if (cnt != tmp)
2709 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2710}
2711
3eff45ea 2712static void hci_tx_work(struct work_struct *work)
1da177e4 2713{
3eff45ea 2714 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2715 struct sk_buff *skb;
2716
6ed58ec5
VT
2717 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2718 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2719
2720 /* Schedule queues and send stuff to HCI driver */
2721
2722 hci_sched_acl(hdev);
2723
2724 hci_sched_sco(hdev);
2725
b6a0dc82
MH
2726 hci_sched_esco(hdev);
2727
6ed58ec5
VT
2728 hci_sched_le(hdev);
2729
1da177e4
LT
2730 /* Send next queued raw (unknown type) packet */
2731 while ((skb = skb_dequeue(&hdev->raw_q)))
2732 hci_send_frame(skb);
1da177e4
LT
2733}
2734
25985edc 2735/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2736
2737/* ACL data packet */
2738static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2739{
2740 struct hci_acl_hdr *hdr = (void *) skb->data;
2741 struct hci_conn *conn;
2742 __u16 handle, flags;
2743
2744 skb_pull(skb, HCI_ACL_HDR_SIZE);
2745
2746 handle = __le16_to_cpu(hdr->handle);
2747 flags = hci_flags(handle);
2748 handle = hci_handle(handle);
2749
2750 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2751
2752 hdev->stat.acl_rx++;
2753
2754 hci_dev_lock(hdev);
2755 conn = hci_conn_hash_lookup_handle(hdev, handle);
2756 hci_dev_unlock(hdev);
8e87d142 2757
1da177e4 2758 if (conn) {
65983fc7 2759 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2760
1da177e4 2761 /* Send to upper protocol */
686ebf28
UF
2762 l2cap_recv_acldata(conn, skb, flags);
2763 return;
1da177e4 2764 } else {
8e87d142 2765 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2766 hdev->name, handle);
2767 }
2768
2769 kfree_skb(skb);
2770}
2771
2772/* SCO data packet */
2773static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2774{
2775 struct hci_sco_hdr *hdr = (void *) skb->data;
2776 struct hci_conn *conn;
2777 __u16 handle;
2778
2779 skb_pull(skb, HCI_SCO_HDR_SIZE);
2780
2781 handle = __le16_to_cpu(hdr->handle);
2782
2783 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2784
2785 hdev->stat.sco_rx++;
2786
2787 hci_dev_lock(hdev);
2788 conn = hci_conn_hash_lookup_handle(hdev, handle);
2789 hci_dev_unlock(hdev);
2790
2791 if (conn) {
1da177e4 2792 /* Send to upper protocol */
686ebf28
UF
2793 sco_recv_scodata(conn, skb);
2794 return;
1da177e4 2795 } else {
8e87d142 2796 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2797 hdev->name, handle);
2798 }
2799
2800 kfree_skb(skb);
2801}
2802
b78752cc 2803static void hci_rx_work(struct work_struct *work)
1da177e4 2804{
b78752cc 2805 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2806 struct sk_buff *skb;
2807
2808 BT_DBG("%s", hdev->name);
2809
1da177e4 2810 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2811 /* Send copy to monitor */
2812 hci_send_to_monitor(hdev, skb);
2813
1da177e4
LT
2814 if (atomic_read(&hdev->promisc)) {
2815 /* Send copy to the sockets */
470fe1b5 2816 hci_send_to_sock(hdev, skb);
1da177e4
LT
2817 }
2818
2819 if (test_bit(HCI_RAW, &hdev->flags)) {
2820 kfree_skb(skb);
2821 continue;
2822 }
2823
2824 if (test_bit(HCI_INIT, &hdev->flags)) {
2825 /* Don't process data packets in this states. */
0d48d939 2826 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2827 case HCI_ACLDATA_PKT:
2828 case HCI_SCODATA_PKT:
2829 kfree_skb(skb);
2830 continue;
3ff50b79 2831 }
1da177e4
LT
2832 }
2833
2834 /* Process frame */
0d48d939 2835 switch (bt_cb(skb)->pkt_type) {
1da177e4 2836 case HCI_EVENT_PKT:
b78752cc 2837 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2838 hci_event_packet(hdev, skb);
2839 break;
2840
2841 case HCI_ACLDATA_PKT:
2842 BT_DBG("%s ACL data packet", hdev->name);
2843 hci_acldata_packet(hdev, skb);
2844 break;
2845
2846 case HCI_SCODATA_PKT:
2847 BT_DBG("%s SCO data packet", hdev->name);
2848 hci_scodata_packet(hdev, skb);
2849 break;
2850
2851 default:
2852 kfree_skb(skb);
2853 break;
2854 }
2855 }
1da177e4
LT
2856}
2857
c347b765 2858static void hci_cmd_work(struct work_struct *work)
1da177e4 2859{
c347b765 2860 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2861 struct sk_buff *skb;
2862
2863 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2864
1da177e4 2865 /* Send queued commands */
5a08ecce
AE
2866 if (atomic_read(&hdev->cmd_cnt)) {
2867 skb = skb_dequeue(&hdev->cmd_q);
2868 if (!skb)
2869 return;
2870
7585b97a 2871 kfree_skb(hdev->sent_cmd);
1da177e4 2872
70f23020
AE
2873 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2874 if (hdev->sent_cmd) {
1da177e4
LT
2875 atomic_dec(&hdev->cmd_cnt);
2876 hci_send_frame(skb);
7bdb8a5c
SJ
2877 if (test_bit(HCI_RESET, &hdev->flags))
2878 del_timer(&hdev->cmd_timer);
2879 else
2880 mod_timer(&hdev->cmd_timer,
6bd32326 2881 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2882 } else {
2883 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2884 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2885 }
2886 }
2887}
2519a1fc
AG
2888
2889int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2890{
2891 /* General inquiry access code (GIAC) */
2892 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2893 struct hci_cp_inquiry cp;
2894
2895 BT_DBG("%s", hdev->name);
2896
2897 if (test_bit(HCI_INQUIRY, &hdev->flags))
2898 return -EINPROGRESS;
2899
4663262c
JH
2900 inquiry_cache_flush(hdev);
2901
2519a1fc
AG
2902 memset(&cp, 0, sizeof(cp));
2903 memcpy(&cp.lap, lap, sizeof(cp.lap));
2904 cp.length = length;
2905
2906 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2907}
023d5049
AG
2908
2909int hci_cancel_inquiry(struct hci_dev *hdev)
2910{
2911 BT_DBG("%s", hdev->name);
2912
2913 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2914 return -EPERM;
2915
2916 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2917}