]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Create empty l2cap ops function
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
ab81cbf9
JH
36#define AUTO_OFF_TIMEOUT 2000
37
b78752cc 38static void hci_rx_work(struct work_struct *work);
c347b765 39static void hci_cmd_work(struct work_struct *work);
3eff45ea 40static void hci_tx_work(struct work_struct *work);
1da177e4 41
1da177e4
LT
42/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
3df92b31
SL
50/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
1da177e4
LT
53/* ---- HCI notifications ---- */
54
6516455d 55static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 56{
040030ef 57 hci_sock_dev_event(hdev, event);
1da177e4
LT
58}
59
60/* ---- HCI requests ---- */
61
23bb5763 62void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 63{
23bb5763
JH
64 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
65
a5040efa
JH
66 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
68 */
75fb0e32
JH
69 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 71 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
72 struct sk_buff *skb;
73
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
78 * command.
79 */
80
1036b890 81 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
82 return;
83
84 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85 if (skb) {
86 skb_queue_head(&hdev->cmd_q, skb);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88 }
89
23bb5763 90 return;
75fb0e32 91 }
1da177e4
LT
92
93 if (hdev->req_status == HCI_REQ_PEND) {
94 hdev->req_result = result;
95 hdev->req_status = HCI_REQ_DONE;
96 wake_up_interruptible(&hdev->req_wait_q);
97 }
98}
99
100static void hci_req_cancel(struct hci_dev *hdev, int err)
101{
102 BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = err;
106 hdev->req_status = HCI_REQ_CANCELED;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111/* Execute request and wait for completion. */
a8c5fb1a
GP
112static int __hci_request(struct hci_dev *hdev,
113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
1da177e4
LT
115{
116 DECLARE_WAITQUEUE(wait, current);
117 int err = 0;
118
119 BT_DBG("%s start", hdev->name);
120
121 hdev->req_status = HCI_REQ_PEND;
122
123 add_wait_queue(&hdev->req_wait_q, &wait);
124 set_current_state(TASK_INTERRUPTIBLE);
125
126 req(hdev, opt);
127 schedule_timeout(timeout);
128
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130
131 if (signal_pending(current))
132 return -EINTR;
133
134 switch (hdev->req_status) {
135 case HCI_REQ_DONE:
e175072f 136 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
137 break;
138
139 case HCI_REQ_CANCELED:
140 err = -hdev->req_result;
141 break;
142
143 default:
144 err = -ETIMEDOUT;
145 break;
3ff50b79 146 }
1da177e4 147
a5040efa 148 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
149
150 BT_DBG("%s end: err %d", hdev->name, err);
151
152 return err;
153}
154
6039aa73
GP
155static int hci_request(struct hci_dev *hdev,
156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
1da177e4
LT
158{
159 int ret;
160
7c6a329e
MH
161 if (!test_bit(HCI_UP, &hdev->flags))
162 return -ENETDOWN;
163
1da177e4
LT
164 /* Serialize all requests */
165 hci_req_lock(hdev);
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
168
169 return ret;
170}
171
172static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173{
174 BT_DBG("%s %ld", hdev->name, opt);
175
176 /* Reset device */
f630cf0d 177 set_bit(HCI_RESET, &hdev->flags);
a9de9248 178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
179}
180
e61ef499 181static void bredr_init(struct hci_dev *hdev)
1da177e4 182{
b0916ea0 183 struct hci_cp_delete_stored_link_key cp;
1ebb9252 184 __le16 param;
89f2783d 185 __u8 flt_type;
1da177e4 186
2455a3ea
AE
187 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
1da177e4
LT
189 /* Mandatory initialization */
190
191 /* Reset */
a6c511c6 192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
e61ef499
AE
193 set_bit(HCI_RESET, &hdev->flags);
194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 195 }
1da177e4
LT
196
197 /* Read Local Supported Features */
a9de9248 198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 199
1143e5a6 200 /* Read Local Version */
a9de9248 201 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 202
1da177e4 203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 204 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 205
1da177e4 206 /* Read BD Address */
a9de9248
MH
207 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209 /* Read Class of Device */
210 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212 /* Read Local Name */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
214
215 /* Read Voice Setting */
a9de9248 216 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
217
218 /* Optional initialization */
219
220 /* Clear Event Filters */
89f2783d 221 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 223
1da177e4 224 /* Connection accept timeout ~20 secs */
82781e63 225 param = __constant_cpu_to_le16(0x7d00);
a9de9248 226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
227
228 bacpy(&cp.bdaddr, BDADDR_ANY);
229 cp.delete_all = 1;
230 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
231}
232
e61ef499
AE
233static void amp_init(struct hci_dev *hdev)
234{
2455a3ea
AE
235 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
e61ef499
AE
237 /* Reset */
238 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240 /* Read Local Version */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
242
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e61ef499
AE
245}
246
247static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Driver initialization */
254
255 /* Special commands */
256 while ((skb = skb_dequeue(&hdev->driver_init))) {
257 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258 skb->dev = (void *) hdev;
259
260 skb_queue_tail(&hdev->cmd_q, skb);
261 queue_work(hdev->workqueue, &hdev->cmd_work);
262 }
263 skb_queue_purge(&hdev->driver_init);
264
265 switch (hdev->dev_type) {
266 case HCI_BREDR:
267 bredr_init(hdev);
268 break;
269
270 case HCI_AMP:
271 amp_init(hdev);
272 break;
273
274 default:
275 BT_ERR("Unknown device type %d", hdev->dev_type);
276 break;
277 }
278
279}
280
6ed58ec5
VT
281static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282{
283 BT_DBG("%s", hdev->name);
284
285 /* Read LE buffer size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287}
288
1da177e4
LT
289static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 scan = opt;
292
293 BT_DBG("%s %x", hdev->name, scan);
294
295 /* Inquiry and Page scans */
a9de9248 296 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
297}
298
299static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 auth = opt;
302
303 BT_DBG("%s %x", hdev->name, auth);
304
305 /* Authentication */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
307}
308
309static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 encrypt = opt;
312
313 BT_DBG("%s %x", hdev->name, encrypt);
314
e4e8e37c 315 /* Encryption */
a9de9248 316 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
317}
318
e4e8e37c
MH
319static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __le16 policy = cpu_to_le16(opt);
322
a418b893 323 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
324
325 /* Default link policy */
326 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327}
328
8e87d142 329/* Get HCI device by index.
1da177e4
LT
330 * Device is held on return. */
331struct hci_dev *hci_dev_get(int index)
332{
8035ded4 333 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
334
335 BT_DBG("%d", index);
336
337 if (index < 0)
338 return NULL;
339
340 read_lock(&hci_dev_list_lock);
8035ded4 341 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
342 if (d->id == index) {
343 hdev = hci_dev_hold(d);
344 break;
345 }
346 }
347 read_unlock(&hci_dev_list_lock);
348 return hdev;
349}
1da177e4
LT
350
351/* ---- Inquiry support ---- */
ff9ef578 352
30dc78e1
JH
353bool hci_discovery_active(struct hci_dev *hdev)
354{
355 struct discovery_state *discov = &hdev->discovery;
356
6fbe195d 357 switch (discov->state) {
343f935b 358 case DISCOVERY_FINDING:
6fbe195d 359 case DISCOVERY_RESOLVING:
30dc78e1
JH
360 return true;
361
6fbe195d
AG
362 default:
363 return false;
364 }
30dc78e1
JH
365}
366
ff9ef578
JH
367void hci_discovery_set_state(struct hci_dev *hdev, int state)
368{
369 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371 if (hdev->discovery.state == state)
372 return;
373
374 switch (state) {
375 case DISCOVERY_STOPPED:
7b99b659
AG
376 if (hdev->discovery.state != DISCOVERY_STARTING)
377 mgmt_discovering(hdev, 0);
ff9ef578
JH
378 break;
379 case DISCOVERY_STARTING:
380 break;
343f935b 381 case DISCOVERY_FINDING:
ff9ef578
JH
382 mgmt_discovering(hdev, 1);
383 break;
30dc78e1
JH
384 case DISCOVERY_RESOLVING:
385 break;
ff9ef578
JH
386 case DISCOVERY_STOPPING:
387 break;
388 }
389
390 hdev->discovery.state = state;
391}
392
1da177e4
LT
393static void inquiry_cache_flush(struct hci_dev *hdev)
394{
30883512 395 struct discovery_state *cache = &hdev->discovery;
b57c1a56 396 struct inquiry_entry *p, *n;
1da177e4 397
561aafbc
JH
398 list_for_each_entry_safe(p, n, &cache->all, all) {
399 list_del(&p->all);
b57c1a56 400 kfree(p);
1da177e4 401 }
561aafbc
JH
402
403 INIT_LIST_HEAD(&cache->unknown);
404 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
405}
406
a8c5fb1a
GP
407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
1da177e4 409{
30883512 410 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
411 struct inquiry_entry *e;
412
413 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
561aafbc
JH
415 list_for_each_entry(e, &cache->all, all) {
416 if (!bacmp(&e->data.bdaddr, bdaddr))
417 return e;
418 }
419
420 return NULL;
421}
422
423struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 424 bdaddr_t *bdaddr)
561aafbc 425{
30883512 426 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
427 struct inquiry_entry *e;
428
429 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 432 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
433 return e;
434 }
435
436 return NULL;
1da177e4
LT
437}
438
30dc78e1 439struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
440 bdaddr_t *bdaddr,
441 int state)
30dc78e1
JH
442{
443 struct discovery_state *cache = &hdev->discovery;
444 struct inquiry_entry *e;
445
446 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448 list_for_each_entry(e, &cache->resolve, list) {
449 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450 return e;
451 if (!bacmp(&e->data.bdaddr, bdaddr))
452 return e;
453 }
454
455 return NULL;
456}
457
a3d4e20a 458void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 459 struct inquiry_entry *ie)
a3d4e20a
JH
460{
461 struct discovery_state *cache = &hdev->discovery;
462 struct list_head *pos = &cache->resolve;
463 struct inquiry_entry *p;
464
465 list_del(&ie->list);
466
467 list_for_each_entry(p, &cache->resolve, list) {
468 if (p->name_state != NAME_PENDING &&
a8c5fb1a 469 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
470 break;
471 pos = &p->list;
472 }
473
474 list_add(&ie->list, pos);
475}
476
3175405b 477bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 478 bool name_known, bool *ssp)
1da177e4 479{
30883512 480 struct discovery_state *cache = &hdev->discovery;
70f23020 481 struct inquiry_entry *ie;
1da177e4
LT
482
483 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
388fc8fa
JH
485 if (ssp)
486 *ssp = data->ssp_mode;
487
70f23020 488 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 489 if (ie) {
388fc8fa
JH
490 if (ie->data.ssp_mode && ssp)
491 *ssp = true;
492
a3d4e20a 493 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 494 data->rssi != ie->data.rssi) {
a3d4e20a
JH
495 ie->data.rssi = data->rssi;
496 hci_inquiry_cache_update_resolve(hdev, ie);
497 }
498
561aafbc 499 goto update;
a3d4e20a 500 }
561aafbc
JH
501
502 /* Entry not in the cache. Add new one. */
503 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504 if (!ie)
3175405b 505 return false;
561aafbc
JH
506
507 list_add(&ie->all, &cache->all);
508
509 if (name_known) {
510 ie->name_state = NAME_KNOWN;
511 } else {
512 ie->name_state = NAME_NOT_KNOWN;
513 list_add(&ie->list, &cache->unknown);
514 }
70f23020 515
561aafbc
JH
516update:
517 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 518 ie->name_state != NAME_PENDING) {
561aafbc
JH
519 ie->name_state = NAME_KNOWN;
520 list_del(&ie->list);
1da177e4
LT
521 }
522
70f23020
AE
523 memcpy(&ie->data, data, sizeof(*data));
524 ie->timestamp = jiffies;
1da177e4 525 cache->timestamp = jiffies;
3175405b
JH
526
527 if (ie->name_state == NAME_NOT_KNOWN)
528 return false;
529
530 return true;
1da177e4
LT
531}
532
533static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534{
30883512 535 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
536 struct inquiry_info *info = (struct inquiry_info *) buf;
537 struct inquiry_entry *e;
538 int copied = 0;
539
561aafbc 540 list_for_each_entry(e, &cache->all, all) {
1da177e4 541 struct inquiry_data *data = &e->data;
b57c1a56
JH
542
543 if (copied >= num)
544 break;
545
1da177e4
LT
546 bacpy(&info->bdaddr, &data->bdaddr);
547 info->pscan_rep_mode = data->pscan_rep_mode;
548 info->pscan_period_mode = data->pscan_period_mode;
549 info->pscan_mode = data->pscan_mode;
550 memcpy(info->dev_class, data->dev_class, 3);
551 info->clock_offset = data->clock_offset;
b57c1a56 552
1da177e4 553 info++;
b57c1a56 554 copied++;
1da177e4
LT
555 }
556
557 BT_DBG("cache %p, copied %d", cache, copied);
558 return copied;
559}
560
561static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562{
563 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564 struct hci_cp_inquiry cp;
565
566 BT_DBG("%s", hdev->name);
567
568 if (test_bit(HCI_INQUIRY, &hdev->flags))
569 return;
570
571 /* Start Inquiry */
572 memcpy(&cp.lap, &ir->lap, 3);
573 cp.length = ir->length;
574 cp.num_rsp = ir->num_rsp;
a9de9248 575 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
576}
577
578int hci_inquiry(void __user *arg)
579{
580 __u8 __user *ptr = arg;
581 struct hci_inquiry_req ir;
582 struct hci_dev *hdev;
583 int err = 0, do_inquiry = 0, max_rsp;
584 long timeo;
585 __u8 *buf;
586
587 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 return -EFAULT;
589
5a08ecce
AE
590 hdev = hci_dev_get(ir.dev_id);
591 if (!hdev)
1da177e4
LT
592 return -ENODEV;
593
09fd0de5 594 hci_dev_lock(hdev);
8e87d142 595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
597 inquiry_cache_flush(hdev);
598 do_inquiry = 1;
599 }
09fd0de5 600 hci_dev_unlock(hdev);
1da177e4 601
04837f64 602 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
603
604 if (do_inquiry) {
605 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 if (err < 0)
607 goto done;
608 }
1da177e4 609
8fc9ced3
GP
610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
1da177e4
LT
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
01df8c31 618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 619 if (!buf) {
1da177e4
LT
620 err = -ENOMEM;
621 goto done;
622 }
623
09fd0de5 624 hci_dev_lock(hdev);
1da177e4 625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 626 hci_dev_unlock(hdev);
1da177e4
LT
627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 633 ir.num_rsp))
1da177e4 634 err = -EFAULT;
8e87d142 635 } else
1da177e4
LT
636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
5a08ecce
AE
652 hdev = hci_dev_get(dev);
653 if (!hdev)
1da177e4
LT
654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
94324962
JH
660 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 ret = -ENODEV;
662 goto done;
663 }
664
611b30f7
MH
665 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 ret = -ERFKILL;
667 goto done;
668 }
669
1da177e4
LT
670 if (test_bit(HCI_UP, &hdev->flags)) {
671 ret = -EALREADY;
672 goto done;
673 }
674
675 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676 set_bit(HCI_RAW, &hdev->flags);
677
07e3b94a
AE
678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
681 set_bit(HCI_RAW, &hdev->flags);
682
1da177e4
LT
683 if (hdev->open(hdev)) {
684 ret = -EIO;
685 goto done;
686 }
687
688 if (!test_bit(HCI_RAW, &hdev->flags)) {
689 atomic_set(&hdev->cmd_cnt, 1);
690 set_bit(HCI_INIT, &hdev->flags);
a5040efa 691 hdev->init_last_cmd = 0;
1da177e4 692
04837f64 693 ret = __hci_request(hdev, hci_init_req, 0,
a8c5fb1a 694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 695
eead27da 696 if (lmp_host_le_capable(hdev))
6ed58ec5 697 ret = __hci_request(hdev, hci_le_init_req, 0,
a8c5fb1a 698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
6ed58ec5 699
1da177e4
LT
700 clear_bit(HCI_INIT, &hdev->flags);
701 }
702
703 if (!ret) {
704 hci_dev_hold(hdev);
705 set_bit(HCI_UP, &hdev->flags);
706 hci_notify(hdev, HCI_DEV_UP);
a8b2d5c2 707 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 708 hci_dev_lock(hdev);
744cf19e 709 mgmt_powered(hdev, 1);
09fd0de5 710 hci_dev_unlock(hdev);
56e5cb86 711 }
8e87d142 712 } else {
1da177e4 713 /* Init failed, cleanup */
3eff45ea 714 flush_work(&hdev->tx_work);
c347b765 715 flush_work(&hdev->cmd_work);
b78752cc 716 flush_work(&hdev->rx_work);
1da177e4
LT
717
718 skb_queue_purge(&hdev->cmd_q);
719 skb_queue_purge(&hdev->rx_q);
720
721 if (hdev->flush)
722 hdev->flush(hdev);
723
724 if (hdev->sent_cmd) {
725 kfree_skb(hdev->sent_cmd);
726 hdev->sent_cmd = NULL;
727 }
728
729 hdev->close(hdev);
730 hdev->flags = 0;
731 }
732
733done:
734 hci_req_unlock(hdev);
735 hci_dev_put(hdev);
736 return ret;
737}
738
739static int hci_dev_do_close(struct hci_dev *hdev)
740{
741 BT_DBG("%s %p", hdev->name, hdev);
742
28b75a89
AG
743 cancel_work_sync(&hdev->le_scan);
744
1da177e4
LT
745 hci_req_cancel(hdev, ENODEV);
746 hci_req_lock(hdev);
747
748 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 749 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
750 hci_req_unlock(hdev);
751 return 0;
752 }
753
3eff45ea
GP
754 /* Flush RX and TX works */
755 flush_work(&hdev->tx_work);
b78752cc 756 flush_work(&hdev->rx_work);
1da177e4 757
16ab91ab 758 if (hdev->discov_timeout > 0) {
e0f9309f 759 cancel_delayed_work(&hdev->discov_off);
16ab91ab 760 hdev->discov_timeout = 0;
5e5282bb 761 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
762 }
763
a8b2d5c2 764 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
765 cancel_delayed_work(&hdev->service_cache);
766
7ba8b4be
AG
767 cancel_delayed_work_sync(&hdev->le_scan_disable);
768
09fd0de5 769 hci_dev_lock(hdev);
1da177e4
LT
770 inquiry_cache_flush(hdev);
771 hci_conn_hash_flush(hdev);
09fd0de5 772 hci_dev_unlock(hdev);
1da177e4
LT
773
774 hci_notify(hdev, HCI_DEV_DOWN);
775
776 if (hdev->flush)
777 hdev->flush(hdev);
778
779 /* Reset device */
780 skb_queue_purge(&hdev->cmd_q);
781 atomic_set(&hdev->cmd_cnt, 1);
8af59467 782 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 783 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 784 set_bit(HCI_INIT, &hdev->flags);
04837f64 785 __hci_request(hdev, hci_reset_req, 0,
a8c5fb1a 786 msecs_to_jiffies(250));
1da177e4
LT
787 clear_bit(HCI_INIT, &hdev->flags);
788 }
789
c347b765
GP
790 /* flush cmd work */
791 flush_work(&hdev->cmd_work);
1da177e4
LT
792
793 /* Drop queues */
794 skb_queue_purge(&hdev->rx_q);
795 skb_queue_purge(&hdev->cmd_q);
796 skb_queue_purge(&hdev->raw_q);
797
798 /* Drop last sent command */
799 if (hdev->sent_cmd) {
b79f44c1 800 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
801 kfree_skb(hdev->sent_cmd);
802 hdev->sent_cmd = NULL;
803 }
804
805 /* After this point our queues are empty
806 * and no tasks are scheduled. */
807 hdev->close(hdev);
808
8ee56540
MH
809 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
810 hci_dev_lock(hdev);
811 mgmt_powered(hdev, 0);
812 hci_dev_unlock(hdev);
813 }
5add6af8 814
1da177e4
LT
815 /* Clear flags */
816 hdev->flags = 0;
817
e59fda8d 818 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 819 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 820
1da177e4
LT
821 hci_req_unlock(hdev);
822
823 hci_dev_put(hdev);
824 return 0;
825}
826
827int hci_dev_close(__u16 dev)
828{
829 struct hci_dev *hdev;
830 int err;
831
70f23020
AE
832 hdev = hci_dev_get(dev);
833 if (!hdev)
1da177e4 834 return -ENODEV;
8ee56540
MH
835
836 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
837 cancel_delayed_work(&hdev->power_off);
838
1da177e4 839 err = hci_dev_do_close(hdev);
8ee56540 840
1da177e4
LT
841 hci_dev_put(hdev);
842 return err;
843}
844
845int hci_dev_reset(__u16 dev)
846{
847 struct hci_dev *hdev;
848 int ret = 0;
849
70f23020
AE
850 hdev = hci_dev_get(dev);
851 if (!hdev)
1da177e4
LT
852 return -ENODEV;
853
854 hci_req_lock(hdev);
1da177e4
LT
855
856 if (!test_bit(HCI_UP, &hdev->flags))
857 goto done;
858
859 /* Drop queues */
860 skb_queue_purge(&hdev->rx_q);
861 skb_queue_purge(&hdev->cmd_q);
862
09fd0de5 863 hci_dev_lock(hdev);
1da177e4
LT
864 inquiry_cache_flush(hdev);
865 hci_conn_hash_flush(hdev);
09fd0de5 866 hci_dev_unlock(hdev);
1da177e4
LT
867
868 if (hdev->flush)
869 hdev->flush(hdev);
870
8e87d142 871 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 872 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
873
874 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64 875 ret = __hci_request(hdev, hci_reset_req, 0,
a8c5fb1a 876 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
877
878done:
1da177e4
LT
879 hci_req_unlock(hdev);
880 hci_dev_put(hdev);
881 return ret;
882}
883
884int hci_dev_reset_stat(__u16 dev)
885{
886 struct hci_dev *hdev;
887 int ret = 0;
888
70f23020
AE
889 hdev = hci_dev_get(dev);
890 if (!hdev)
1da177e4
LT
891 return -ENODEV;
892
893 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
894
895 hci_dev_put(hdev);
896
897 return ret;
898}
899
900int hci_dev_cmd(unsigned int cmd, void __user *arg)
901{
902 struct hci_dev *hdev;
903 struct hci_dev_req dr;
904 int err = 0;
905
906 if (copy_from_user(&dr, arg, sizeof(dr)))
907 return -EFAULT;
908
70f23020
AE
909 hdev = hci_dev_get(dr.dev_id);
910 if (!hdev)
1da177e4
LT
911 return -ENODEV;
912
913 switch (cmd) {
914 case HCISETAUTH:
04837f64 915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
a8c5fb1a 916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
917 break;
918
919 case HCISETENCRYPT:
920 if (!lmp_encrypt_capable(hdev)) {
921 err = -EOPNOTSUPP;
922 break;
923 }
924
925 if (!test_bit(HCI_AUTH, &hdev->flags)) {
926 /* Auth must be enabled first */
04837f64 927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
a8c5fb1a 928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
929 if (err)
930 break;
931 }
932
04837f64 933 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
a8c5fb1a 934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
935 break;
936
937 case HCISETSCAN:
04837f64 938 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
a8c5fb1a 939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
940 break;
941
1da177e4 942 case HCISETLINKPOL:
e4e8e37c 943 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
a8c5fb1a 944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
945 break;
946
947 case HCISETLINKMODE:
e4e8e37c
MH
948 hdev->link_mode = ((__u16) dr.dev_opt) &
949 (HCI_LM_MASTER | HCI_LM_ACCEPT);
950 break;
951
952 case HCISETPTYPE:
953 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
954 break;
955
956 case HCISETACLMTU:
e4e8e37c
MH
957 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
958 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
959 break;
960
961 case HCISETSCOMTU:
e4e8e37c
MH
962 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
963 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
964 break;
965
966 default:
967 err = -EINVAL;
968 break;
969 }
e4e8e37c 970
1da177e4
LT
971 hci_dev_put(hdev);
972 return err;
973}
974
975int hci_get_dev_list(void __user *arg)
976{
8035ded4 977 struct hci_dev *hdev;
1da177e4
LT
978 struct hci_dev_list_req *dl;
979 struct hci_dev_req *dr;
1da177e4
LT
980 int n = 0, size, err;
981 __u16 dev_num;
982
983 if (get_user(dev_num, (__u16 __user *) arg))
984 return -EFAULT;
985
986 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
987 return -EINVAL;
988
989 size = sizeof(*dl) + dev_num * sizeof(*dr);
990
70f23020
AE
991 dl = kzalloc(size, GFP_KERNEL);
992 if (!dl)
1da177e4
LT
993 return -ENOMEM;
994
995 dr = dl->dev_req;
996
f20d09d5 997 read_lock(&hci_dev_list_lock);
8035ded4 998 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 999 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1000 cancel_delayed_work(&hdev->power_off);
c542a06c 1001
a8b2d5c2
JH
1002 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1003 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1004
1da177e4
LT
1005 (dr + n)->dev_id = hdev->id;
1006 (dr + n)->dev_opt = hdev->flags;
c542a06c 1007
1da177e4
LT
1008 if (++n >= dev_num)
1009 break;
1010 }
f20d09d5 1011 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1012
1013 dl->dev_num = n;
1014 size = sizeof(*dl) + n * sizeof(*dr);
1015
1016 err = copy_to_user(arg, dl, size);
1017 kfree(dl);
1018
1019 return err ? -EFAULT : 0;
1020}
1021
1022int hci_get_dev_info(void __user *arg)
1023{
1024 struct hci_dev *hdev;
1025 struct hci_dev_info di;
1026 int err = 0;
1027
1028 if (copy_from_user(&di, arg, sizeof(di)))
1029 return -EFAULT;
1030
70f23020
AE
1031 hdev = hci_dev_get(di.dev_id);
1032 if (!hdev)
1da177e4
LT
1033 return -ENODEV;
1034
a8b2d5c2 1035 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1036 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1037
a8b2d5c2
JH
1038 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1039 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1040
1da177e4
LT
1041 strcpy(di.name, hdev->name);
1042 di.bdaddr = hdev->bdaddr;
943da25d 1043 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1044 di.flags = hdev->flags;
1045 di.pkt_type = hdev->pkt_type;
1046 di.acl_mtu = hdev->acl_mtu;
1047 di.acl_pkts = hdev->acl_pkts;
1048 di.sco_mtu = hdev->sco_mtu;
1049 di.sco_pkts = hdev->sco_pkts;
1050 di.link_policy = hdev->link_policy;
1051 di.link_mode = hdev->link_mode;
1052
1053 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1054 memcpy(&di.features, &hdev->features, sizeof(di.features));
1055
1056 if (copy_to_user(arg, &di, sizeof(di)))
1057 err = -EFAULT;
1058
1059 hci_dev_put(hdev);
1060
1061 return err;
1062}
1063
1064/* ---- Interface to HCI drivers ---- */
1065
611b30f7
MH
1066static int hci_rfkill_set_block(void *data, bool blocked)
1067{
1068 struct hci_dev *hdev = data;
1069
1070 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1071
1072 if (!blocked)
1073 return 0;
1074
1075 hci_dev_do_close(hdev);
1076
1077 return 0;
1078}
1079
1080static const struct rfkill_ops hci_rfkill_ops = {
1081 .set_block = hci_rfkill_set_block,
1082};
1083
ab81cbf9
JH
1084static void hci_power_on(struct work_struct *work)
1085{
1086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088 BT_DBG("%s", hdev->name);
1089
1090 if (hci_dev_open(hdev->id) < 0)
1091 return;
1092
a8b2d5c2 1093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
80b7ab33 1094 schedule_delayed_work(&hdev->power_off,
a8c5fb1a 1095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9 1096
a8b2d5c2 1097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1098 mgmt_index_added(hdev);
ab81cbf9
JH
1099}
1100
1101static void hci_power_off(struct work_struct *work)
1102{
3243553f 1103 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1104 power_off.work);
ab81cbf9
JH
1105
1106 BT_DBG("%s", hdev->name);
1107
8ee56540 1108 hci_dev_do_close(hdev);
ab81cbf9
JH
1109}
1110
16ab91ab
JH
1111static void hci_discov_off(struct work_struct *work)
1112{
1113 struct hci_dev *hdev;
1114 u8 scan = SCAN_PAGE;
1115
1116 hdev = container_of(work, struct hci_dev, discov_off.work);
1117
1118 BT_DBG("%s", hdev->name);
1119
09fd0de5 1120 hci_dev_lock(hdev);
16ab91ab
JH
1121
1122 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1123
1124 hdev->discov_timeout = 0;
1125
09fd0de5 1126 hci_dev_unlock(hdev);
16ab91ab
JH
1127}
1128
2aeb9a1a
JH
1129int hci_uuids_clear(struct hci_dev *hdev)
1130{
1131 struct list_head *p, *n;
1132
1133 list_for_each_safe(p, n, &hdev->uuids) {
1134 struct bt_uuid *uuid;
1135
1136 uuid = list_entry(p, struct bt_uuid, list);
1137
1138 list_del(p);
1139 kfree(uuid);
1140 }
1141
1142 return 0;
1143}
1144
55ed8ca1
JH
1145int hci_link_keys_clear(struct hci_dev *hdev)
1146{
1147 struct list_head *p, *n;
1148
1149 list_for_each_safe(p, n, &hdev->link_keys) {
1150 struct link_key *key;
1151
1152 key = list_entry(p, struct link_key, list);
1153
1154 list_del(p);
1155 kfree(key);
1156 }
1157
1158 return 0;
1159}
1160
b899efaf
VCG
1161int hci_smp_ltks_clear(struct hci_dev *hdev)
1162{
1163 struct smp_ltk *k, *tmp;
1164
1165 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1166 list_del(&k->list);
1167 kfree(k);
1168 }
1169
1170 return 0;
1171}
1172
55ed8ca1
JH
1173struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1174{
8035ded4 1175 struct link_key *k;
55ed8ca1 1176
8035ded4 1177 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1178 if (bacmp(bdaddr, &k->bdaddr) == 0)
1179 return k;
55ed8ca1
JH
1180
1181 return NULL;
1182}
1183
745c0ce3 1184static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1185 u8 key_type, u8 old_key_type)
d25e28ab
JH
1186{
1187 /* Legacy key */
1188 if (key_type < 0x03)
745c0ce3 1189 return true;
d25e28ab
JH
1190
1191 /* Debug keys are insecure so don't store them persistently */
1192 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1193 return false;
d25e28ab
JH
1194
1195 /* Changed combination key and there's no previous one */
1196 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1197 return false;
d25e28ab
JH
1198
1199 /* Security mode 3 case */
1200 if (!conn)
745c0ce3 1201 return true;
d25e28ab
JH
1202
1203 /* Neither local nor remote side had no-bonding as requirement */
1204 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1205 return true;
d25e28ab
JH
1206
1207 /* Local side had dedicated bonding as requirement */
1208 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1209 return true;
d25e28ab
JH
1210
1211 /* Remote side had dedicated bonding as requirement */
1212 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1213 return true;
d25e28ab
JH
1214
1215 /* If none of the above criteria match, then don't store the key
1216 * persistently */
745c0ce3 1217 return false;
d25e28ab
JH
1218}
1219
c9839a11 1220struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1221{
c9839a11 1222 struct smp_ltk *k;
75d262c2 1223
c9839a11
VCG
1224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1225 if (k->ediv != ediv ||
a8c5fb1a 1226 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1227 continue;
1228
c9839a11 1229 return k;
75d262c2
VCG
1230 }
1231
1232 return NULL;
1233}
75d262c2 1234
c9839a11 1235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1236 u8 addr_type)
75d262c2 1237{
c9839a11 1238 struct smp_ltk *k;
75d262c2 1239
c9839a11
VCG
1240 list_for_each_entry(k, &hdev->long_term_keys, list)
1241 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1242 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1243 return k;
1244
1245 return NULL;
1246}
75d262c2 1247
d25e28ab 1248int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1249 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1250{
1251 struct link_key *key, *old_key;
745c0ce3
VA
1252 u8 old_key_type;
1253 bool persistent;
55ed8ca1
JH
1254
1255 old_key = hci_find_link_key(hdev, bdaddr);
1256 if (old_key) {
1257 old_key_type = old_key->type;
1258 key = old_key;
1259 } else {
12adcf3a 1260 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1261 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262 if (!key)
1263 return -ENOMEM;
1264 list_add(&key->list, &hdev->link_keys);
1265 }
1266
1267 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
d25e28ab
JH
1269 /* Some buggy controller combinations generate a changed
1270 * combination key for legacy pairing even when there's no
1271 * previous key */
1272 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1273 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1274 type = HCI_LK_COMBINATION;
655fe6ec
JH
1275 if (conn)
1276 conn->key_type = type;
1277 }
d25e28ab 1278
55ed8ca1 1279 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1280 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1281 key->pin_len = pin_len;
1282
b6020ba0 1283 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1284 key->type = old_key_type;
4748fed2
JH
1285 else
1286 key->type = type;
1287
4df378a1
JH
1288 if (!new_key)
1289 return 0;
1290
1291 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1292
744cf19e 1293 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1294
6ec5bcad
VA
1295 if (conn)
1296 conn->flush_key = !persistent;
55ed8ca1
JH
1297
1298 return 0;
1299}
1300
c9839a11 1301int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1302 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1303 ediv, u8 rand[8])
75d262c2 1304{
c9839a11 1305 struct smp_ltk *key, *old_key;
75d262c2 1306
c9839a11
VCG
1307 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1308 return 0;
75d262c2 1309
c9839a11
VCG
1310 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1311 if (old_key)
75d262c2 1312 key = old_key;
c9839a11
VCG
1313 else {
1314 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1315 if (!key)
1316 return -ENOMEM;
c9839a11 1317 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1318 }
1319
75d262c2 1320 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1321 key->bdaddr_type = addr_type;
1322 memcpy(key->val, tk, sizeof(key->val));
1323 key->authenticated = authenticated;
1324 key->ediv = ediv;
1325 key->enc_size = enc_size;
1326 key->type = type;
1327 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1328
c9839a11
VCG
1329 if (!new_key)
1330 return 0;
75d262c2 1331
261cc5aa
VCG
1332 if (type & HCI_SMP_LTK)
1333 mgmt_new_ltk(hdev, key, 1);
1334
75d262c2
VCG
1335 return 0;
1336}
1337
55ed8ca1
JH
1338int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339{
1340 struct link_key *key;
1341
1342 key = hci_find_link_key(hdev, bdaddr);
1343 if (!key)
1344 return -ENOENT;
1345
1346 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1347
1348 list_del(&key->list);
1349 kfree(key);
1350
1351 return 0;
1352}
1353
b899efaf
VCG
1354int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355{
1356 struct smp_ltk *k, *tmp;
1357
1358 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1359 if (bacmp(bdaddr, &k->bdaddr))
1360 continue;
1361
1362 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363
1364 list_del(&k->list);
1365 kfree(k);
1366 }
1367
1368 return 0;
1369}
1370
6bd32326
VT
1371/* HCI command timer function */
1372static void hci_cmd_timer(unsigned long arg)
1373{
1374 struct hci_dev *hdev = (void *) arg;
1375
1376 BT_ERR("%s command tx timeout", hdev->name);
1377 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1378 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1379}
1380
2763eda6 1381struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1382 bdaddr_t *bdaddr)
2763eda6
SJ
1383{
1384 struct oob_data *data;
1385
1386 list_for_each_entry(data, &hdev->remote_oob_data, list)
1387 if (bacmp(bdaddr, &data->bdaddr) == 0)
1388 return data;
1389
1390 return NULL;
1391}
1392
1393int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct oob_data *data;
1396
1397 data = hci_find_remote_oob_data(hdev, bdaddr);
1398 if (!data)
1399 return -ENOENT;
1400
1401 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403 list_del(&data->list);
1404 kfree(data);
1405
1406 return 0;
1407}
1408
1409int hci_remote_oob_data_clear(struct hci_dev *hdev)
1410{
1411 struct oob_data *data, *n;
1412
1413 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1414 list_del(&data->list);
1415 kfree(data);
1416 }
1417
1418 return 0;
1419}
1420
1421int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1422 u8 *randomizer)
2763eda6
SJ
1423{
1424 struct oob_data *data;
1425
1426 data = hci_find_remote_oob_data(hdev, bdaddr);
1427
1428 if (!data) {
1429 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1430 if (!data)
1431 return -ENOMEM;
1432
1433 bacpy(&data->bdaddr, bdaddr);
1434 list_add(&data->list, &hdev->remote_oob_data);
1435 }
1436
1437 memcpy(data->hash, hash, sizeof(data->hash));
1438 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1439
1440 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1441
1442 return 0;
1443}
1444
04124681 1445struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1446{
8035ded4 1447 struct bdaddr_list *b;
b2a66aad 1448
8035ded4 1449 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1450 if (bacmp(bdaddr, &b->bdaddr) == 0)
1451 return b;
b2a66aad
AJ
1452
1453 return NULL;
1454}
1455
1456int hci_blacklist_clear(struct hci_dev *hdev)
1457{
1458 struct list_head *p, *n;
1459
1460 list_for_each_safe(p, n, &hdev->blacklist) {
1461 struct bdaddr_list *b;
1462
1463 b = list_entry(p, struct bdaddr_list, list);
1464
1465 list_del(p);
1466 kfree(b);
1467 }
1468
1469 return 0;
1470}
1471
88c1fe4b 1472int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1473{
1474 struct bdaddr_list *entry;
b2a66aad
AJ
1475
1476 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1477 return -EBADF;
1478
5e762444
AJ
1479 if (hci_blacklist_lookup(hdev, bdaddr))
1480 return -EEXIST;
b2a66aad
AJ
1481
1482 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1483 if (!entry)
1484 return -ENOMEM;
b2a66aad
AJ
1485
1486 bacpy(&entry->bdaddr, bdaddr);
1487
1488 list_add(&entry->list, &hdev->blacklist);
1489
88c1fe4b 1490 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1491}
1492
88c1fe4b 1493int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1494{
1495 struct bdaddr_list *entry;
b2a66aad 1496
1ec918ce 1497 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1498 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1499
1500 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1501 if (!entry)
5e762444 1502 return -ENOENT;
b2a66aad
AJ
1503
1504 list_del(&entry->list);
1505 kfree(entry);
1506
88c1fe4b 1507 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1508}
1509
7ba8b4be
AG
1510static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1511{
1512 struct le_scan_params *param = (struct le_scan_params *) opt;
1513 struct hci_cp_le_set_scan_param cp;
1514
1515 memset(&cp, 0, sizeof(cp));
1516 cp.type = param->type;
1517 cp.interval = cpu_to_le16(param->interval);
1518 cp.window = cpu_to_le16(param->window);
1519
1520 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1521}
1522
1523static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1524{
1525 struct hci_cp_le_set_scan_enable cp;
1526
1527 memset(&cp, 0, sizeof(cp));
1528 cp.enable = 1;
1529
1530 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1531}
1532
1533static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1534 u16 window, int timeout)
7ba8b4be
AG
1535{
1536 long timeo = msecs_to_jiffies(3000);
1537 struct le_scan_params param;
1538 int err;
1539
1540 BT_DBG("%s", hdev->name);
1541
1542 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1543 return -EINPROGRESS;
1544
1545 param.type = type;
1546 param.interval = interval;
1547 param.window = window;
1548
1549 hci_req_lock(hdev);
1550
1551 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1552 timeo);
7ba8b4be
AG
1553 if (!err)
1554 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1555
1556 hci_req_unlock(hdev);
1557
1558 if (err < 0)
1559 return err;
1560
1561 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1562 msecs_to_jiffies(timeout));
7ba8b4be
AG
1563
1564 return 0;
1565}
1566
7dbfac1d
AG
1567int hci_cancel_le_scan(struct hci_dev *hdev)
1568{
1569 BT_DBG("%s", hdev->name);
1570
1571 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1572 return -EALREADY;
1573
1574 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1575 struct hci_cp_le_set_scan_enable cp;
1576
1577 /* Send HCI command to disable LE Scan */
1578 memset(&cp, 0, sizeof(cp));
1579 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1580 }
1581
1582 return 0;
1583}
1584
7ba8b4be
AG
1585static void le_scan_disable_work(struct work_struct *work)
1586{
1587 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1588 le_scan_disable.work);
7ba8b4be
AG
1589 struct hci_cp_le_set_scan_enable cp;
1590
1591 BT_DBG("%s", hdev->name);
1592
1593 memset(&cp, 0, sizeof(cp));
1594
1595 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1596}
1597
28b75a89
AG
1598static void le_scan_work(struct work_struct *work)
1599{
1600 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1601 struct le_scan_params *param = &hdev->le_scan_params;
1602
1603 BT_DBG("%s", hdev->name);
1604
04124681
GP
1605 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1606 param->timeout);
28b75a89
AG
1607}
1608
1609int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1610 int timeout)
28b75a89
AG
1611{
1612 struct le_scan_params *param = &hdev->le_scan_params;
1613
1614 BT_DBG("%s", hdev->name);
1615
1616 if (work_busy(&hdev->le_scan))
1617 return -EINPROGRESS;
1618
1619 param->type = type;
1620 param->interval = interval;
1621 param->window = window;
1622 param->timeout = timeout;
1623
1624 queue_work(system_long_wq, &hdev->le_scan);
1625
1626 return 0;
1627}
1628
9be0dab7
DH
1629/* Alloc HCI device */
1630struct hci_dev *hci_alloc_dev(void)
1631{
1632 struct hci_dev *hdev;
1633
1634 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1635 if (!hdev)
1636 return NULL;
1637
b1b813d4
DH
1638 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1639 hdev->esco_type = (ESCO_HV1);
1640 hdev->link_mode = (HCI_LM_ACCEPT);
1641 hdev->io_capability = 0x03; /* No Input No Output */
1642
b1b813d4
DH
1643 hdev->sniff_max_interval = 800;
1644 hdev->sniff_min_interval = 80;
1645
1646 mutex_init(&hdev->lock);
1647 mutex_init(&hdev->req_lock);
1648
1649 INIT_LIST_HEAD(&hdev->mgmt_pending);
1650 INIT_LIST_HEAD(&hdev->blacklist);
1651 INIT_LIST_HEAD(&hdev->uuids);
1652 INIT_LIST_HEAD(&hdev->link_keys);
1653 INIT_LIST_HEAD(&hdev->long_term_keys);
1654 INIT_LIST_HEAD(&hdev->remote_oob_data);
b1b813d4
DH
1655
1656 INIT_WORK(&hdev->rx_work, hci_rx_work);
1657 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1658 INIT_WORK(&hdev->tx_work, hci_tx_work);
1659 INIT_WORK(&hdev->power_on, hci_power_on);
1660 INIT_WORK(&hdev->le_scan, le_scan_work);
1661
b1b813d4
DH
1662 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1663 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1664 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1665
9be0dab7 1666 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1667 skb_queue_head_init(&hdev->rx_q);
1668 skb_queue_head_init(&hdev->cmd_q);
1669 skb_queue_head_init(&hdev->raw_q);
1670
1671 init_waitqueue_head(&hdev->req_wait_q);
1672
1673 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1674
b1b813d4
DH
1675 hci_init_sysfs(hdev);
1676 discovery_init(hdev);
1677 hci_conn_hash_init(hdev);
9be0dab7
DH
1678
1679 return hdev;
1680}
1681EXPORT_SYMBOL(hci_alloc_dev);
1682
1683/* Free HCI device */
1684void hci_free_dev(struct hci_dev *hdev)
1685{
1686 skb_queue_purge(&hdev->driver_init);
1687
1688 /* will free via device release */
1689 put_device(&hdev->dev);
1690}
1691EXPORT_SYMBOL(hci_free_dev);
1692
1da177e4
LT
1693/* Register HCI device */
1694int hci_register_dev(struct hci_dev *hdev)
1695{
b1b813d4 1696 int id, error;
1da177e4 1697
010666a1 1698 if (!hdev->open || !hdev->close)
1da177e4
LT
1699 return -EINVAL;
1700
08add513
MM
1701 /* Do not allow HCI_AMP devices to register at index 0,
1702 * so the index can be used as the AMP controller ID.
1703 */
3df92b31
SL
1704 switch (hdev->dev_type) {
1705 case HCI_BREDR:
1706 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1707 break;
1708 case HCI_AMP:
1709 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1710 break;
1711 default:
1712 return -EINVAL;
1da177e4 1713 }
8e87d142 1714
3df92b31
SL
1715 if (id < 0)
1716 return id;
1717
1da177e4
LT
1718 sprintf(hdev->name, "hci%d", id);
1719 hdev->id = id;
2d8b3a11
AE
1720
1721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1722
3df92b31
SL
1723 write_lock(&hci_dev_list_lock);
1724 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1725 write_unlock(&hci_dev_list_lock);
1da177e4 1726
32845eb1 1727 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1728 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1729 if (!hdev->workqueue) {
1730 error = -ENOMEM;
1731 goto err;
1732 }
f48fd9c8 1733
33ca954d
DH
1734 error = hci_add_sysfs(hdev);
1735 if (error < 0)
1736 goto err_wqueue;
1da177e4 1737
611b30f7 1738 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1739 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740 hdev);
611b30f7
MH
1741 if (hdev->rfkill) {
1742 if (rfkill_register(hdev->rfkill) < 0) {
1743 rfkill_destroy(hdev->rfkill);
1744 hdev->rfkill = NULL;
1745 }
1746 }
1747
a8b2d5c2
JH
1748 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1749 set_bit(HCI_SETUP, &hdev->dev_flags);
7f971041 1750 schedule_work(&hdev->power_on);
ab81cbf9 1751
1da177e4 1752 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1753 hci_dev_hold(hdev);
1da177e4
LT
1754
1755 return id;
f48fd9c8 1756
33ca954d
DH
1757err_wqueue:
1758 destroy_workqueue(hdev->workqueue);
1759err:
3df92b31 1760 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1761 write_lock(&hci_dev_list_lock);
f48fd9c8 1762 list_del(&hdev->list);
f20d09d5 1763 write_unlock(&hci_dev_list_lock);
f48fd9c8 1764
33ca954d 1765 return error;
1da177e4
LT
1766}
1767EXPORT_SYMBOL(hci_register_dev);
1768
1769/* Unregister HCI device */
59735631 1770void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1771{
3df92b31 1772 int i, id;
ef222013 1773
c13854ce 1774 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1775
94324962
JH
1776 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1777
3df92b31
SL
1778 id = hdev->id;
1779
f20d09d5 1780 write_lock(&hci_dev_list_lock);
1da177e4 1781 list_del(&hdev->list);
f20d09d5 1782 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1783
1784 hci_dev_do_close(hdev);
1785
cd4c5391 1786 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1787 kfree_skb(hdev->reassembly[i]);
1788
ab81cbf9 1789 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1790 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1791 hci_dev_lock(hdev);
744cf19e 1792 mgmt_index_removed(hdev);
09fd0de5 1793 hci_dev_unlock(hdev);
56e5cb86 1794 }
ab81cbf9 1795
2e58ef3e
JH
1796 /* mgmt_index_removed should take care of emptying the
1797 * pending list */
1798 BUG_ON(!list_empty(&hdev->mgmt_pending));
1799
1da177e4
LT
1800 hci_notify(hdev, HCI_DEV_UNREG);
1801
611b30f7
MH
1802 if (hdev->rfkill) {
1803 rfkill_unregister(hdev->rfkill);
1804 rfkill_destroy(hdev->rfkill);
1805 }
1806
ce242970 1807 hci_del_sysfs(hdev);
147e2d59 1808
f48fd9c8
MH
1809 destroy_workqueue(hdev->workqueue);
1810
09fd0de5 1811 hci_dev_lock(hdev);
e2e0cacb 1812 hci_blacklist_clear(hdev);
2aeb9a1a 1813 hci_uuids_clear(hdev);
55ed8ca1 1814 hci_link_keys_clear(hdev);
b899efaf 1815 hci_smp_ltks_clear(hdev);
2763eda6 1816 hci_remote_oob_data_clear(hdev);
09fd0de5 1817 hci_dev_unlock(hdev);
e2e0cacb 1818
dc946bd8 1819 hci_dev_put(hdev);
3df92b31
SL
1820
1821 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1822}
1823EXPORT_SYMBOL(hci_unregister_dev);
1824
1825/* Suspend HCI device */
1826int hci_suspend_dev(struct hci_dev *hdev)
1827{
1828 hci_notify(hdev, HCI_DEV_SUSPEND);
1829 return 0;
1830}
1831EXPORT_SYMBOL(hci_suspend_dev);
1832
1833/* Resume HCI device */
1834int hci_resume_dev(struct hci_dev *hdev)
1835{
1836 hci_notify(hdev, HCI_DEV_RESUME);
1837 return 0;
1838}
1839EXPORT_SYMBOL(hci_resume_dev);
1840
76bca880
MH
1841/* Receive frame from HCI drivers */
1842int hci_recv_frame(struct sk_buff *skb)
1843{
1844 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1845 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1846 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1847 kfree_skb(skb);
1848 return -ENXIO;
1849 }
1850
1851 /* Incomming skb */
1852 bt_cb(skb)->incoming = 1;
1853
1854 /* Time stamp */
1855 __net_timestamp(skb);
1856
76bca880 1857 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1858 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1859
76bca880
MH
1860 return 0;
1861}
1862EXPORT_SYMBOL(hci_recv_frame);
1863
33e882a5 1864static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1865 int count, __u8 index)
33e882a5
SS
1866{
1867 int len = 0;
1868 int hlen = 0;
1869 int remain = count;
1870 struct sk_buff *skb;
1871 struct bt_skb_cb *scb;
1872
1873 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1874 index >= NUM_REASSEMBLY)
33e882a5
SS
1875 return -EILSEQ;
1876
1877 skb = hdev->reassembly[index];
1878
1879 if (!skb) {
1880 switch (type) {
1881 case HCI_ACLDATA_PKT:
1882 len = HCI_MAX_FRAME_SIZE;
1883 hlen = HCI_ACL_HDR_SIZE;
1884 break;
1885 case HCI_EVENT_PKT:
1886 len = HCI_MAX_EVENT_SIZE;
1887 hlen = HCI_EVENT_HDR_SIZE;
1888 break;
1889 case HCI_SCODATA_PKT:
1890 len = HCI_MAX_SCO_SIZE;
1891 hlen = HCI_SCO_HDR_SIZE;
1892 break;
1893 }
1894
1e429f38 1895 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1896 if (!skb)
1897 return -ENOMEM;
1898
1899 scb = (void *) skb->cb;
1900 scb->expect = hlen;
1901 scb->pkt_type = type;
1902
1903 skb->dev = (void *) hdev;
1904 hdev->reassembly[index] = skb;
1905 }
1906
1907 while (count) {
1908 scb = (void *) skb->cb;
89bb46d0 1909 len = min_t(uint, scb->expect, count);
33e882a5
SS
1910
1911 memcpy(skb_put(skb, len), data, len);
1912
1913 count -= len;
1914 data += len;
1915 scb->expect -= len;
1916 remain = count;
1917
1918 switch (type) {
1919 case HCI_EVENT_PKT:
1920 if (skb->len == HCI_EVENT_HDR_SIZE) {
1921 struct hci_event_hdr *h = hci_event_hdr(skb);
1922 scb->expect = h->plen;
1923
1924 if (skb_tailroom(skb) < scb->expect) {
1925 kfree_skb(skb);
1926 hdev->reassembly[index] = NULL;
1927 return -ENOMEM;
1928 }
1929 }
1930 break;
1931
1932 case HCI_ACLDATA_PKT:
1933 if (skb->len == HCI_ACL_HDR_SIZE) {
1934 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1935 scb->expect = __le16_to_cpu(h->dlen);
1936
1937 if (skb_tailroom(skb) < scb->expect) {
1938 kfree_skb(skb);
1939 hdev->reassembly[index] = NULL;
1940 return -ENOMEM;
1941 }
1942 }
1943 break;
1944
1945 case HCI_SCODATA_PKT:
1946 if (skb->len == HCI_SCO_HDR_SIZE) {
1947 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1948 scb->expect = h->dlen;
1949
1950 if (skb_tailroom(skb) < scb->expect) {
1951 kfree_skb(skb);
1952 hdev->reassembly[index] = NULL;
1953 return -ENOMEM;
1954 }
1955 }
1956 break;
1957 }
1958
1959 if (scb->expect == 0) {
1960 /* Complete frame */
1961
1962 bt_cb(skb)->pkt_type = type;
1963 hci_recv_frame(skb);
1964
1965 hdev->reassembly[index] = NULL;
1966 return remain;
1967 }
1968 }
1969
1970 return remain;
1971}
1972
ef222013
MH
1973int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1974{
f39a3c06
SS
1975 int rem = 0;
1976
ef222013
MH
1977 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1978 return -EILSEQ;
1979
da5f6c37 1980 while (count) {
1e429f38 1981 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1982 if (rem < 0)
1983 return rem;
ef222013 1984
f39a3c06
SS
1985 data += (count - rem);
1986 count = rem;
f81c6224 1987 }
ef222013 1988
f39a3c06 1989 return rem;
ef222013
MH
1990}
1991EXPORT_SYMBOL(hci_recv_fragment);
1992
99811510
SS
1993#define STREAM_REASSEMBLY 0
1994
1995int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1996{
1997 int type;
1998 int rem = 0;
1999
da5f6c37 2000 while (count) {
99811510
SS
2001 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2002
2003 if (!skb) {
2004 struct { char type; } *pkt;
2005
2006 /* Start of the frame */
2007 pkt = data;
2008 type = pkt->type;
2009
2010 data++;
2011 count--;
2012 } else
2013 type = bt_cb(skb)->pkt_type;
2014
1e429f38 2015 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2016 STREAM_REASSEMBLY);
99811510
SS
2017 if (rem < 0)
2018 return rem;
2019
2020 data += (count - rem);
2021 count = rem;
f81c6224 2022 }
99811510
SS
2023
2024 return rem;
2025}
2026EXPORT_SYMBOL(hci_recv_stream_fragment);
2027
1da177e4
LT
2028/* ---- Interface to upper protocols ---- */
2029
1da177e4
LT
2030int hci_register_cb(struct hci_cb *cb)
2031{
2032 BT_DBG("%p name %s", cb, cb->name);
2033
f20d09d5 2034 write_lock(&hci_cb_list_lock);
1da177e4 2035 list_add(&cb->list, &hci_cb_list);
f20d09d5 2036 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2037
2038 return 0;
2039}
2040EXPORT_SYMBOL(hci_register_cb);
2041
2042int hci_unregister_cb(struct hci_cb *cb)
2043{
2044 BT_DBG("%p name %s", cb, cb->name);
2045
f20d09d5 2046 write_lock(&hci_cb_list_lock);
1da177e4 2047 list_del(&cb->list);
f20d09d5 2048 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2049
2050 return 0;
2051}
2052EXPORT_SYMBOL(hci_unregister_cb);
2053
2054static int hci_send_frame(struct sk_buff *skb)
2055{
2056 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2057
2058 if (!hdev) {
2059 kfree_skb(skb);
2060 return -ENODEV;
2061 }
2062
0d48d939 2063 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2064
cd82e61c
MH
2065 /* Time stamp */
2066 __net_timestamp(skb);
1da177e4 2067
cd82e61c
MH
2068 /* Send copy to monitor */
2069 hci_send_to_monitor(hdev, skb);
2070
2071 if (atomic_read(&hdev->promisc)) {
2072 /* Send copy to the sockets */
470fe1b5 2073 hci_send_to_sock(hdev, skb);
1da177e4
LT
2074 }
2075
2076 /* Get rid of skb owner, prior to sending to the driver. */
2077 skb_orphan(skb);
2078
2079 return hdev->send(skb);
2080}
2081
2082/* Send HCI command */
a9de9248 2083int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2084{
2085 int len = HCI_COMMAND_HDR_SIZE + plen;
2086 struct hci_command_hdr *hdr;
2087 struct sk_buff *skb;
2088
a9de9248 2089 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2090
2091 skb = bt_skb_alloc(len, GFP_ATOMIC);
2092 if (!skb) {
ef222013 2093 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2094 return -ENOMEM;
2095 }
2096
2097 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2098 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2099 hdr->plen = plen;
2100
2101 if (plen)
2102 memcpy(skb_put(skb, plen), param, plen);
2103
2104 BT_DBG("skb len %d", skb->len);
2105
0d48d939 2106 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2107 skb->dev = (void *) hdev;
c78ae283 2108
a5040efa
JH
2109 if (test_bit(HCI_INIT, &hdev->flags))
2110 hdev->init_last_cmd = opcode;
2111
1da177e4 2112 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2113 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2114
2115 return 0;
2116}
1da177e4
LT
2117
2118/* Get data from the previously sent command */
a9de9248 2119void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2120{
2121 struct hci_command_hdr *hdr;
2122
2123 if (!hdev->sent_cmd)
2124 return NULL;
2125
2126 hdr = (void *) hdev->sent_cmd->data;
2127
a9de9248 2128 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2129 return NULL;
2130
a9de9248 2131 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
2132
2133 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2134}
2135
2136/* Send ACL data */
2137static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2138{
2139 struct hci_acl_hdr *hdr;
2140 int len = skb->len;
2141
badff6d0
ACM
2142 skb_push(skb, HCI_ACL_HDR_SIZE);
2143 skb_reset_transport_header(skb);
9c70220b 2144 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2145 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2146 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2147}
2148
73d80deb 2149static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
a8c5fb1a 2150 struct sk_buff *skb, __u16 flags)
1da177e4
LT
2151{
2152 struct hci_dev *hdev = conn->hdev;
2153 struct sk_buff *list;
2154
087bfd99
GP
2155 skb->len = skb_headlen(skb);
2156 skb->data_len = 0;
2157
2158 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2159 hci_add_acl_hdr(skb, conn->handle, flags);
2160
70f23020
AE
2161 list = skb_shinfo(skb)->frag_list;
2162 if (!list) {
1da177e4
LT
2163 /* Non fragmented */
2164 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2165
73d80deb 2166 skb_queue_tail(queue, skb);
1da177e4
LT
2167 } else {
2168 /* Fragmented */
2169 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2170
2171 skb_shinfo(skb)->frag_list = NULL;
2172
2173 /* Queue all fragments atomically */
af3e6359 2174 spin_lock(&queue->lock);
1da177e4 2175
73d80deb 2176 __skb_queue_tail(queue, skb);
e702112f
AE
2177
2178 flags &= ~ACL_START;
2179 flags |= ACL_CONT;
1da177e4
LT
2180 do {
2181 skb = list; list = list->next;
8e87d142 2182
1da177e4 2183 skb->dev = (void *) hdev;
0d48d939 2184 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2185 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2186
2187 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2188
73d80deb 2189 __skb_queue_tail(queue, skb);
1da177e4
LT
2190 } while (list);
2191
af3e6359 2192 spin_unlock(&queue->lock);
1da177e4 2193 }
73d80deb
LAD
2194}
2195
2196void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2197{
2198 struct hci_conn *conn = chan->conn;
2199 struct hci_dev *hdev = conn->hdev;
2200
2201 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2202
2203 skb->dev = (void *) hdev;
73d80deb
LAD
2204
2205 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2206
3eff45ea 2207 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2208}
1da177e4
LT
2209
2210/* Send SCO data */
0d861d8b 2211void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2212{
2213 struct hci_dev *hdev = conn->hdev;
2214 struct hci_sco_hdr hdr;
2215
2216 BT_DBG("%s len %d", hdev->name, skb->len);
2217
aca3192c 2218 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2219 hdr.dlen = skb->len;
2220
badff6d0
ACM
2221 skb_push(skb, HCI_SCO_HDR_SIZE);
2222 skb_reset_transport_header(skb);
9c70220b 2223 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2224
2225 skb->dev = (void *) hdev;
0d48d939 2226 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2227
1da177e4 2228 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2229 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2230}
1da177e4
LT
2231
2232/* ---- HCI TX task (outgoing data) ---- */
2233
2234/* HCI Connection scheduler */
6039aa73
GP
2235static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2236 int *quote)
1da177e4
LT
2237{
2238 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2239 struct hci_conn *conn = NULL, *c;
abc5de8f 2240 unsigned int num = 0, min = ~0;
1da177e4 2241
8e87d142 2242 /* We don't have to lock device here. Connections are always
1da177e4 2243 * added and removed with TX task disabled. */
bf4c6325
GP
2244
2245 rcu_read_lock();
2246
2247 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2248 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2249 continue;
769be974
MH
2250
2251 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2252 continue;
2253
1da177e4
LT
2254 num++;
2255
2256 if (c->sent < min) {
2257 min = c->sent;
2258 conn = c;
2259 }
52087a79
LAD
2260
2261 if (hci_conn_num(hdev, type) == num)
2262 break;
1da177e4
LT
2263 }
2264
bf4c6325
GP
2265 rcu_read_unlock();
2266
1da177e4 2267 if (conn) {
6ed58ec5
VT
2268 int cnt, q;
2269
2270 switch (conn->type) {
2271 case ACL_LINK:
2272 cnt = hdev->acl_cnt;
2273 break;
2274 case SCO_LINK:
2275 case ESCO_LINK:
2276 cnt = hdev->sco_cnt;
2277 break;
2278 case LE_LINK:
2279 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2280 break;
2281 default:
2282 cnt = 0;
2283 BT_ERR("Unknown link type");
2284 }
2285
2286 q = cnt / num;
1da177e4
LT
2287 *quote = q ? q : 1;
2288 } else
2289 *quote = 0;
2290
2291 BT_DBG("conn %p quote %d", conn, *quote);
2292 return conn;
2293}
2294
6039aa73 2295static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2296{
2297 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2298 struct hci_conn *c;
1da177e4 2299
bae1f5d9 2300 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2301
bf4c6325
GP
2302 rcu_read_lock();
2303
1da177e4 2304 /* Kill stalled connections */
bf4c6325 2305 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2306 if (c->type == type && c->sent) {
2307 BT_ERR("%s killing stalled connection %s",
a8c5fb1a 2308 hdev->name, batostr(&c->dst));
1da177e4
LT
2309 hci_acl_disconn(c, 0x13);
2310 }
2311 }
bf4c6325
GP
2312
2313 rcu_read_unlock();
1da177e4
LT
2314}
2315
6039aa73
GP
2316static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2317 int *quote)
1da177e4 2318{
73d80deb
LAD
2319 struct hci_conn_hash *h = &hdev->conn_hash;
2320 struct hci_chan *chan = NULL;
abc5de8f 2321 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2322 struct hci_conn *conn;
73d80deb
LAD
2323 int cnt, q, conn_num = 0;
2324
2325 BT_DBG("%s", hdev->name);
2326
bf4c6325
GP
2327 rcu_read_lock();
2328
2329 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2330 struct hci_chan *tmp;
2331
2332 if (conn->type != type)
2333 continue;
2334
2335 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2336 continue;
2337
2338 conn_num++;
2339
8192edef 2340 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2341 struct sk_buff *skb;
2342
2343 if (skb_queue_empty(&tmp->data_q))
2344 continue;
2345
2346 skb = skb_peek(&tmp->data_q);
2347 if (skb->priority < cur_prio)
2348 continue;
2349
2350 if (skb->priority > cur_prio) {
2351 num = 0;
2352 min = ~0;
2353 cur_prio = skb->priority;
2354 }
2355
2356 num++;
2357
2358 if (conn->sent < min) {
2359 min = conn->sent;
2360 chan = tmp;
2361 }
2362 }
2363
2364 if (hci_conn_num(hdev, type) == conn_num)
2365 break;
2366 }
2367
bf4c6325
GP
2368 rcu_read_unlock();
2369
73d80deb
LAD
2370 if (!chan)
2371 return NULL;
2372
2373 switch (chan->conn->type) {
2374 case ACL_LINK:
2375 cnt = hdev->acl_cnt;
2376 break;
2377 case SCO_LINK:
2378 case ESCO_LINK:
2379 cnt = hdev->sco_cnt;
2380 break;
2381 case LE_LINK:
2382 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2383 break;
2384 default:
2385 cnt = 0;
2386 BT_ERR("Unknown link type");
2387 }
2388
2389 q = cnt / num;
2390 *quote = q ? q : 1;
2391 BT_DBG("chan %p quote %d", chan, *quote);
2392 return chan;
2393}
2394
02b20f0b
LAD
2395static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2396{
2397 struct hci_conn_hash *h = &hdev->conn_hash;
2398 struct hci_conn *conn;
2399 int num = 0;
2400
2401 BT_DBG("%s", hdev->name);
2402
bf4c6325
GP
2403 rcu_read_lock();
2404
2405 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2406 struct hci_chan *chan;
2407
2408 if (conn->type != type)
2409 continue;
2410
2411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412 continue;
2413
2414 num++;
2415
8192edef 2416 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2417 struct sk_buff *skb;
2418
2419 if (chan->sent) {
2420 chan->sent = 0;
2421 continue;
2422 }
2423
2424 if (skb_queue_empty(&chan->data_q))
2425 continue;
2426
2427 skb = skb_peek(&chan->data_q);
2428 if (skb->priority >= HCI_PRIO_MAX - 1)
2429 continue;
2430
2431 skb->priority = HCI_PRIO_MAX - 1;
2432
2433 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2434 skb->priority);
02b20f0b
LAD
2435 }
2436
2437 if (hci_conn_num(hdev, type) == num)
2438 break;
2439 }
bf4c6325
GP
2440
2441 rcu_read_unlock();
2442
02b20f0b
LAD
2443}
2444
b71d385a
AE
2445static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2446{
2447 /* Calculate count of blocks used by this packet */
2448 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2449}
2450
6039aa73 2451static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2452{
1da177e4
LT
2453 if (!test_bit(HCI_RAW, &hdev->flags)) {
2454 /* ACL tx timeout must be longer than maximum
2455 * link supervision timeout (40.9 seconds) */
63d2bc1b 2456 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
a8c5fb1a 2457 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
bae1f5d9 2458 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2459 }
63d2bc1b 2460}
1da177e4 2461
6039aa73 2462static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2463{
2464 unsigned int cnt = hdev->acl_cnt;
2465 struct hci_chan *chan;
2466 struct sk_buff *skb;
2467 int quote;
2468
2469 __check_timeout(hdev, cnt);
04837f64 2470
73d80deb 2471 while (hdev->acl_cnt &&
a8c5fb1a 2472 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2473 u32 priority = (skb_peek(&chan->data_q))->priority;
2474 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2475 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2476 skb->len, skb->priority);
73d80deb 2477
ec1cce24
LAD
2478 /* Stop if priority has changed */
2479 if (skb->priority < priority)
2480 break;
2481
2482 skb = skb_dequeue(&chan->data_q);
2483
73d80deb 2484 hci_conn_enter_active_mode(chan->conn,
04124681 2485 bt_cb(skb)->force_active);
04837f64 2486
1da177e4
LT
2487 hci_send_frame(skb);
2488 hdev->acl_last_tx = jiffies;
2489
2490 hdev->acl_cnt--;
73d80deb
LAD
2491 chan->sent++;
2492 chan->conn->sent++;
1da177e4
LT
2493 }
2494 }
02b20f0b
LAD
2495
2496 if (cnt != hdev->acl_cnt)
2497 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2498}
2499
6039aa73 2500static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2501{
63d2bc1b 2502 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2503 struct hci_chan *chan;
2504 struct sk_buff *skb;
2505 int quote;
b71d385a 2506
63d2bc1b 2507 __check_timeout(hdev, cnt);
b71d385a
AE
2508
2509 while (hdev->block_cnt > 0 &&
a8c5fb1a 2510 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
b71d385a
AE
2511 u32 priority = (skb_peek(&chan->data_q))->priority;
2512 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2513 int blocks;
2514
2515 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2516 skb->len, skb->priority);
b71d385a
AE
2517
2518 /* Stop if priority has changed */
2519 if (skb->priority < priority)
2520 break;
2521
2522 skb = skb_dequeue(&chan->data_q);
2523
2524 blocks = __get_blocks(hdev, skb);
2525 if (blocks > hdev->block_cnt)
2526 return;
2527
2528 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2529 bt_cb(skb)->force_active);
b71d385a
AE
2530
2531 hci_send_frame(skb);
2532 hdev->acl_last_tx = jiffies;
2533
2534 hdev->block_cnt -= blocks;
2535 quote -= blocks;
2536
2537 chan->sent += blocks;
2538 chan->conn->sent += blocks;
2539 }
2540 }
2541
2542 if (cnt != hdev->block_cnt)
2543 hci_prio_recalculate(hdev, ACL_LINK);
2544}
2545
6039aa73 2546static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2547{
2548 BT_DBG("%s", hdev->name);
2549
2550 if (!hci_conn_num(hdev, ACL_LINK))
2551 return;
2552
2553 switch (hdev->flow_ctl_mode) {
2554 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2555 hci_sched_acl_pkt(hdev);
2556 break;
2557
2558 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2559 hci_sched_acl_blk(hdev);
2560 break;
2561 }
2562}
2563
1da177e4 2564/* Schedule SCO */
6039aa73 2565static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2566{
2567 struct hci_conn *conn;
2568 struct sk_buff *skb;
2569 int quote;
2570
2571 BT_DBG("%s", hdev->name);
2572
52087a79
LAD
2573 if (!hci_conn_num(hdev, SCO_LINK))
2574 return;
2575
1da177e4
LT
2576 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2577 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2578 BT_DBG("skb %p len %d", skb, skb->len);
2579 hci_send_frame(skb);
2580
2581 conn->sent++;
2582 if (conn->sent == ~0)
2583 conn->sent = 0;
2584 }
2585 }
2586}
2587
6039aa73 2588static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2589{
2590 struct hci_conn *conn;
2591 struct sk_buff *skb;
2592 int quote;
2593
2594 BT_DBG("%s", hdev->name);
2595
52087a79
LAD
2596 if (!hci_conn_num(hdev, ESCO_LINK))
2597 return;
2598
8fc9ced3
GP
2599 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2600 &quote))) {
b6a0dc82
MH
2601 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2602 BT_DBG("skb %p len %d", skb, skb->len);
2603 hci_send_frame(skb);
2604
2605 conn->sent++;
2606 if (conn->sent == ~0)
2607 conn->sent = 0;
2608 }
2609 }
2610}
2611
6039aa73 2612static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2613{
73d80deb 2614 struct hci_chan *chan;
6ed58ec5 2615 struct sk_buff *skb;
02b20f0b 2616 int quote, cnt, tmp;
6ed58ec5
VT
2617
2618 BT_DBG("%s", hdev->name);
2619
52087a79
LAD
2620 if (!hci_conn_num(hdev, LE_LINK))
2621 return;
2622
6ed58ec5
VT
2623 if (!test_bit(HCI_RAW, &hdev->flags)) {
2624 /* LE tx timeout must be longer than maximum
2625 * link supervision timeout (40.9 seconds) */
bae1f5d9 2626 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2627 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2628 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2629 }
2630
2631 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2632 tmp = cnt;
73d80deb 2633 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2634 u32 priority = (skb_peek(&chan->data_q))->priority;
2635 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2636 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2637 skb->len, skb->priority);
6ed58ec5 2638
ec1cce24
LAD
2639 /* Stop if priority has changed */
2640 if (skb->priority < priority)
2641 break;
2642
2643 skb = skb_dequeue(&chan->data_q);
2644
6ed58ec5
VT
2645 hci_send_frame(skb);
2646 hdev->le_last_tx = jiffies;
2647
2648 cnt--;
73d80deb
LAD
2649 chan->sent++;
2650 chan->conn->sent++;
6ed58ec5
VT
2651 }
2652 }
73d80deb 2653
6ed58ec5
VT
2654 if (hdev->le_pkts)
2655 hdev->le_cnt = cnt;
2656 else
2657 hdev->acl_cnt = cnt;
02b20f0b
LAD
2658
2659 if (cnt != tmp)
2660 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2661}
2662
3eff45ea 2663static void hci_tx_work(struct work_struct *work)
1da177e4 2664{
3eff45ea 2665 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2666 struct sk_buff *skb;
2667
6ed58ec5 2668 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2669 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2670
2671 /* Schedule queues and send stuff to HCI driver */
2672
2673 hci_sched_acl(hdev);
2674
2675 hci_sched_sco(hdev);
2676
b6a0dc82
MH
2677 hci_sched_esco(hdev);
2678
6ed58ec5
VT
2679 hci_sched_le(hdev);
2680
1da177e4
LT
2681 /* Send next queued raw (unknown type) packet */
2682 while ((skb = skb_dequeue(&hdev->raw_q)))
2683 hci_send_frame(skb);
1da177e4
LT
2684}
2685
25985edc 2686/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2687
2688/* ACL data packet */
6039aa73 2689static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2690{
2691 struct hci_acl_hdr *hdr = (void *) skb->data;
2692 struct hci_conn *conn;
2693 __u16 handle, flags;
2694
2695 skb_pull(skb, HCI_ACL_HDR_SIZE);
2696
2697 handle = __le16_to_cpu(hdr->handle);
2698 flags = hci_flags(handle);
2699 handle = hci_handle(handle);
2700
a8c5fb1a
GP
2701 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2702 handle, flags);
1da177e4
LT
2703
2704 hdev->stat.acl_rx++;
2705
2706 hci_dev_lock(hdev);
2707 conn = hci_conn_hash_lookup_handle(hdev, handle);
2708 hci_dev_unlock(hdev);
8e87d142 2709
1da177e4 2710 if (conn) {
65983fc7 2711 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2712
671267bf
JH
2713 hci_dev_lock(hdev);
2714 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2715 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2716 mgmt_device_connected(hdev, &conn->dst, conn->type,
2717 conn->dst_type, 0, NULL, 0,
2718 conn->dev_class);
2719 hci_dev_unlock(hdev);
2720
1da177e4 2721 /* Send to upper protocol */
686ebf28
UF
2722 l2cap_recv_acldata(conn, skb, flags);
2723 return;
1da177e4 2724 } else {
8e87d142 2725 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2726 hdev->name, handle);
1da177e4
LT
2727 }
2728
2729 kfree_skb(skb);
2730}
2731
2732/* SCO data packet */
6039aa73 2733static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2734{
2735 struct hci_sco_hdr *hdr = (void *) skb->data;
2736 struct hci_conn *conn;
2737 __u16 handle;
2738
2739 skb_pull(skb, HCI_SCO_HDR_SIZE);
2740
2741 handle = __le16_to_cpu(hdr->handle);
2742
2743 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2744
2745 hdev->stat.sco_rx++;
2746
2747 hci_dev_lock(hdev);
2748 conn = hci_conn_hash_lookup_handle(hdev, handle);
2749 hci_dev_unlock(hdev);
2750
2751 if (conn) {
1da177e4 2752 /* Send to upper protocol */
686ebf28
UF
2753 sco_recv_scodata(conn, skb);
2754 return;
1da177e4 2755 } else {
8e87d142 2756 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2757 hdev->name, handle);
1da177e4
LT
2758 }
2759
2760 kfree_skb(skb);
2761}
2762
b78752cc 2763static void hci_rx_work(struct work_struct *work)
1da177e4 2764{
b78752cc 2765 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2766 struct sk_buff *skb;
2767
2768 BT_DBG("%s", hdev->name);
2769
1da177e4 2770 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2771 /* Send copy to monitor */
2772 hci_send_to_monitor(hdev, skb);
2773
1da177e4
LT
2774 if (atomic_read(&hdev->promisc)) {
2775 /* Send copy to the sockets */
470fe1b5 2776 hci_send_to_sock(hdev, skb);
1da177e4
LT
2777 }
2778
2779 if (test_bit(HCI_RAW, &hdev->flags)) {
2780 kfree_skb(skb);
2781 continue;
2782 }
2783
2784 if (test_bit(HCI_INIT, &hdev->flags)) {
2785 /* Don't process data packets in this states. */
0d48d939 2786 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2787 case HCI_ACLDATA_PKT:
2788 case HCI_SCODATA_PKT:
2789 kfree_skb(skb);
2790 continue;
3ff50b79 2791 }
1da177e4
LT
2792 }
2793
2794 /* Process frame */
0d48d939 2795 switch (bt_cb(skb)->pkt_type) {
1da177e4 2796 case HCI_EVENT_PKT:
b78752cc 2797 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2798 hci_event_packet(hdev, skb);
2799 break;
2800
2801 case HCI_ACLDATA_PKT:
2802 BT_DBG("%s ACL data packet", hdev->name);
2803 hci_acldata_packet(hdev, skb);
2804 break;
2805
2806 case HCI_SCODATA_PKT:
2807 BT_DBG("%s SCO data packet", hdev->name);
2808 hci_scodata_packet(hdev, skb);
2809 break;
2810
2811 default:
2812 kfree_skb(skb);
2813 break;
2814 }
2815 }
1da177e4
LT
2816}
2817
c347b765 2818static void hci_cmd_work(struct work_struct *work)
1da177e4 2819{
c347b765 2820 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2821 struct sk_buff *skb;
2822
2823 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2824
1da177e4 2825 /* Send queued commands */
5a08ecce
AE
2826 if (atomic_read(&hdev->cmd_cnt)) {
2827 skb = skb_dequeue(&hdev->cmd_q);
2828 if (!skb)
2829 return;
2830
7585b97a 2831 kfree_skb(hdev->sent_cmd);
1da177e4 2832
70f23020
AE
2833 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2834 if (hdev->sent_cmd) {
1da177e4
LT
2835 atomic_dec(&hdev->cmd_cnt);
2836 hci_send_frame(skb);
7bdb8a5c
SJ
2837 if (test_bit(HCI_RESET, &hdev->flags))
2838 del_timer(&hdev->cmd_timer);
2839 else
2840 mod_timer(&hdev->cmd_timer,
6bd32326 2841 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2842 } else {
2843 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2844 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2845 }
2846 }
2847}
2519a1fc
AG
2848
2849int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2850{
2851 /* General inquiry access code (GIAC) */
2852 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2853 struct hci_cp_inquiry cp;
2854
2855 BT_DBG("%s", hdev->name);
2856
2857 if (test_bit(HCI_INQUIRY, &hdev->flags))
2858 return -EINPROGRESS;
2859
4663262c
JH
2860 inquiry_cache_flush(hdev);
2861
2519a1fc
AG
2862 memset(&cp, 0, sizeof(cp));
2863 memcpy(&cp.lap, lap, sizeof(cp.lap));
2864 cp.length = length;
2865
2866 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2867}
023d5049
AG
2868
2869int hci_cancel_inquiry(struct hci_dev *hdev)
2870{
2871 BT_DBG("%s", hdev->name);
2872
2873 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2874 return -EALREADY;
023d5049
AG
2875
2876 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2877}
31f7956c
AG
2878
2879u8 bdaddr_to_le(u8 bdaddr_type)
2880{
2881 switch (bdaddr_type) {
2882 case BDADDR_LE_PUBLIC:
2883 return ADDR_LE_DEV_PUBLIC;
2884
2885 default:
2886 /* Fallback to LE Random address type */
2887 return ADDR_LE_DEV_RANDOM;
2888 }
2889}