]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_core.c
Bluetooth: Expose debugfs settings for LE connection interval
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51
52 /* ---- HCI notifications ---- */
53
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56 hci_sock_dev_event(hdev, event);
57 }
58
59 /* ---- HCI debugfs entries ---- */
60
61 static int features_show(struct seq_file *f, void *ptr)
62 {
63 struct hci_dev *hdev = f->private;
64 u8 p;
65
66 hci_dev_lock(hdev);
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
74 }
75 if (lmp_le_capable(hdev))
76 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
77 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
78 hdev->le_features[0], hdev->le_features[1],
79 hdev->le_features[2], hdev->le_features[3],
80 hdev->le_features[4], hdev->le_features[5],
81 hdev->le_features[6], hdev->le_features[7]);
82 hci_dev_unlock(hdev);
83
84 return 0;
85 }
86
87 static int features_open(struct inode *inode, struct file *file)
88 {
89 return single_open(file, features_show, inode->i_private);
90 }
91
92 static const struct file_operations features_fops = {
93 .open = features_open,
94 .read = seq_read,
95 .llseek = seq_lseek,
96 .release = single_release,
97 };
98
99 static int blacklist_show(struct seq_file *f, void *p)
100 {
101 struct hci_dev *hdev = f->private;
102 struct bdaddr_list *b;
103
104 hci_dev_lock(hdev);
105 list_for_each_entry(b, &hdev->blacklist, list)
106 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
107 hci_dev_unlock(hdev);
108
109 return 0;
110 }
111
112 static int blacklist_open(struct inode *inode, struct file *file)
113 {
114 return single_open(file, blacklist_show, inode->i_private);
115 }
116
117 static const struct file_operations blacklist_fops = {
118 .open = blacklist_open,
119 .read = seq_read,
120 .llseek = seq_lseek,
121 .release = single_release,
122 };
123
124 static int uuids_show(struct seq_file *f, void *p)
125 {
126 struct hci_dev *hdev = f->private;
127 struct bt_uuid *uuid;
128
129 hci_dev_lock(hdev);
130 list_for_each_entry(uuid, &hdev->uuids, list) {
131 u32 data0, data5;
132 u16 data1, data2, data3, data4;
133
134 data5 = get_unaligned_le32(uuid);
135 data4 = get_unaligned_le16(uuid + 4);
136 data3 = get_unaligned_le16(uuid + 6);
137 data2 = get_unaligned_le16(uuid + 8);
138 data1 = get_unaligned_le16(uuid + 10);
139 data0 = get_unaligned_le32(uuid + 12);
140
141 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
142 data0, data1, data2, data3, data4, data5);
143 }
144 hci_dev_unlock(hdev);
145
146 return 0;
147 }
148
149 static int uuids_open(struct inode *inode, struct file *file)
150 {
151 return single_open(file, uuids_show, inode->i_private);
152 }
153
154 static const struct file_operations uuids_fops = {
155 .open = uuids_open,
156 .read = seq_read,
157 .llseek = seq_lseek,
158 .release = single_release,
159 };
160
161 static int inquiry_cache_show(struct seq_file *f, void *p)
162 {
163 struct hci_dev *hdev = f->private;
164 struct discovery_state *cache = &hdev->discovery;
165 struct inquiry_entry *e;
166
167 hci_dev_lock(hdev);
168
169 list_for_each_entry(e, &cache->all, all) {
170 struct inquiry_data *data = &e->data;
171 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
172 &data->bdaddr,
173 data->pscan_rep_mode, data->pscan_period_mode,
174 data->pscan_mode, data->dev_class[2],
175 data->dev_class[1], data->dev_class[0],
176 __le16_to_cpu(data->clock_offset),
177 data->rssi, data->ssp_mode, e->timestamp);
178 }
179
180 hci_dev_unlock(hdev);
181
182 return 0;
183 }
184
185 static int inquiry_cache_open(struct inode *inode, struct file *file)
186 {
187 return single_open(file, inquiry_cache_show, inode->i_private);
188 }
189
190 static const struct file_operations inquiry_cache_fops = {
191 .open = inquiry_cache_open,
192 .read = seq_read,
193 .llseek = seq_lseek,
194 .release = single_release,
195 };
196
197 static int link_keys_show(struct seq_file *f, void *ptr)
198 {
199 struct hci_dev *hdev = f->private;
200 struct list_head *p, *n;
201
202 hci_dev_lock(hdev);
203 list_for_each_safe(p, n, &hdev->link_keys) {
204 struct link_key *key = list_entry(p, struct link_key, list);
205 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
206 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
207 }
208 hci_dev_unlock(hdev);
209
210 return 0;
211 }
212
213 static int link_keys_open(struct inode *inode, struct file *file)
214 {
215 return single_open(file, link_keys_show, inode->i_private);
216 }
217
218 static const struct file_operations link_keys_fops = {
219 .open = link_keys_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223 };
224
225 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
226 size_t count, loff_t *ppos)
227 {
228 struct hci_dev *hdev = file->private_data;
229 char buf[3];
230
231 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
232 buf[1] = '\n';
233 buf[2] = '\0';
234 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
235 }
236
237 static const struct file_operations use_debug_keys_fops = {
238 .open = simple_open,
239 .read = use_debug_keys_read,
240 .llseek = default_llseek,
241 };
242
243 static int dev_class_show(struct seq_file *f, void *ptr)
244 {
245 struct hci_dev *hdev = f->private;
246
247 hci_dev_lock(hdev);
248 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
249 hdev->dev_class[1], hdev->dev_class[0]);
250 hci_dev_unlock(hdev);
251
252 return 0;
253 }
254
255 static int dev_class_open(struct inode *inode, struct file *file)
256 {
257 return single_open(file, dev_class_show, inode->i_private);
258 }
259
260 static const struct file_operations dev_class_fops = {
261 .open = dev_class_open,
262 .read = seq_read,
263 .llseek = seq_lseek,
264 .release = single_release,
265 };
266
267 static int voice_setting_get(void *data, u64 *val)
268 {
269 struct hci_dev *hdev = data;
270
271 hci_dev_lock(hdev);
272 *val = hdev->voice_setting;
273 hci_dev_unlock(hdev);
274
275 return 0;
276 }
277
278 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
279 NULL, "0x%4.4llx\n");
280
281 static int auto_accept_delay_set(void *data, u64 val)
282 {
283 struct hci_dev *hdev = data;
284
285 hci_dev_lock(hdev);
286 hdev->auto_accept_delay = val;
287 hci_dev_unlock(hdev);
288
289 return 0;
290 }
291
292 static int auto_accept_delay_get(void *data, u64 *val)
293 {
294 struct hci_dev *hdev = data;
295
296 hci_dev_lock(hdev);
297 *val = hdev->auto_accept_delay;
298 hci_dev_unlock(hdev);
299
300 return 0;
301 }
302
303 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
304 auto_accept_delay_set, "%llu\n");
305
306 static int ssp_debug_mode_set(void *data, u64 val)
307 {
308 struct hci_dev *hdev = data;
309 struct sk_buff *skb;
310 __u8 mode;
311 int err;
312
313 if (val != 0 && val != 1)
314 return -EINVAL;
315
316 if (!test_bit(HCI_UP, &hdev->flags))
317 return -ENETDOWN;
318
319 hci_req_lock(hdev);
320 mode = val;
321 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
322 &mode, HCI_CMD_TIMEOUT);
323 hci_req_unlock(hdev);
324
325 if (IS_ERR(skb))
326 return PTR_ERR(skb);
327
328 err = -bt_to_errno(skb->data[0]);
329 kfree_skb(skb);
330
331 if (err < 0)
332 return err;
333
334 hci_dev_lock(hdev);
335 hdev->ssp_debug_mode = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339 }
340
341 static int ssp_debug_mode_get(void *data, u64 *val)
342 {
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->ssp_debug_mode;
347 hci_dev_unlock(hdev);
348
349 return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
353 ssp_debug_mode_set, "%llu\n");
354
355 static int idle_timeout_set(void *data, u64 val)
356 {
357 struct hci_dev *hdev = data;
358
359 if (val != 0 && (val < 500 || val > 3600000))
360 return -EINVAL;
361
362 hci_dev_lock(hdev);
363 hdev->idle_timeout= val;
364 hci_dev_unlock(hdev);
365
366 return 0;
367 }
368
369 static int idle_timeout_get(void *data, u64 *val)
370 {
371 struct hci_dev *hdev = data;
372
373 hci_dev_lock(hdev);
374 *val = hdev->idle_timeout;
375 hci_dev_unlock(hdev);
376
377 return 0;
378 }
379
380 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
381 idle_timeout_set, "%llu\n");
382
383 static int sniff_min_interval_set(void *data, u64 val)
384 {
385 struct hci_dev *hdev = data;
386
387 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
388 return -EINVAL;
389
390 hci_dev_lock(hdev);
391 hdev->sniff_min_interval= val;
392 hci_dev_unlock(hdev);
393
394 return 0;
395 }
396
397 static int sniff_min_interval_get(void *data, u64 *val)
398 {
399 struct hci_dev *hdev = data;
400
401 hci_dev_lock(hdev);
402 *val = hdev->sniff_min_interval;
403 hci_dev_unlock(hdev);
404
405 return 0;
406 }
407
408 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
409 sniff_min_interval_set, "%llu\n");
410
411 static int sniff_max_interval_set(void *data, u64 val)
412 {
413 struct hci_dev *hdev = data;
414
415 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
416 return -EINVAL;
417
418 hci_dev_lock(hdev);
419 hdev->sniff_max_interval= val;
420 hci_dev_unlock(hdev);
421
422 return 0;
423 }
424
425 static int sniff_max_interval_get(void *data, u64 *val)
426 {
427 struct hci_dev *hdev = data;
428
429 hci_dev_lock(hdev);
430 *val = hdev->sniff_max_interval;
431 hci_dev_unlock(hdev);
432
433 return 0;
434 }
435
436 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
437 sniff_max_interval_set, "%llu\n");
438
439 static int static_address_show(struct seq_file *f, void *p)
440 {
441 struct hci_dev *hdev = f->private;
442
443 hci_dev_lock(hdev);
444 seq_printf(f, "%pMR\n", &hdev->static_addr);
445 hci_dev_unlock(hdev);
446
447 return 0;
448 }
449
450 static int static_address_open(struct inode *inode, struct file *file)
451 {
452 return single_open(file, static_address_show, inode->i_private);
453 }
454
455 static const struct file_operations static_address_fops = {
456 .open = static_address_open,
457 .read = seq_read,
458 .llseek = seq_lseek,
459 .release = single_release,
460 };
461
462 static int own_address_type_set(void *data, u64 val)
463 {
464 struct hci_dev *hdev = data;
465
466 if (val != 0 && val != 1)
467 return -EINVAL;
468
469 hci_dev_lock(hdev);
470 hdev->own_addr_type = val;
471 hci_dev_unlock(hdev);
472
473 return 0;
474 }
475
476 static int own_address_type_get(void *data, u64 *val)
477 {
478 struct hci_dev *hdev = data;
479
480 hci_dev_lock(hdev);
481 *val = hdev->own_addr_type;
482 hci_dev_unlock(hdev);
483
484 return 0;
485 }
486
487 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
488 own_address_type_set, "%llu\n");
489
490 static int long_term_keys_show(struct seq_file *f, void *ptr)
491 {
492 struct hci_dev *hdev = f->private;
493 struct list_head *p, *n;
494
495 hci_dev_lock(hdev);
496 list_for_each_safe(p, n, &hdev->link_keys) {
497 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
498 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
499 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
500 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
501 8, ltk->rand, 16, ltk->val);
502 }
503 hci_dev_unlock(hdev);
504
505 return 0;
506 }
507
508 static int long_term_keys_open(struct inode *inode, struct file *file)
509 {
510 return single_open(file, long_term_keys_show, inode->i_private);
511 }
512
513 static const struct file_operations long_term_keys_fops = {
514 .open = long_term_keys_open,
515 .read = seq_read,
516 .llseek = seq_lseek,
517 .release = single_release,
518 };
519
520 static int conn_min_interval_set(void *data, u64 val)
521 {
522 struct hci_dev *hdev = data;
523
524 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
528 hdev->le_conn_min_interval= val;
529 hci_dev_unlock(hdev);
530
531 return 0;
532 }
533
534 static int conn_min_interval_get(void *data, u64 *val)
535 {
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->le_conn_min_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543 }
544
545 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
546 conn_min_interval_set, "%llu\n");
547
548 static int conn_max_interval_set(void *data, u64 val)
549 {
550 struct hci_dev *hdev = data;
551
552 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
553 return -EINVAL;
554
555 hci_dev_lock(hdev);
556 hdev->le_conn_max_interval= val;
557 hci_dev_unlock(hdev);
558
559 return 0;
560 }
561
562 static int conn_max_interval_get(void *data, u64 *val)
563 {
564 struct hci_dev *hdev = data;
565
566 hci_dev_lock(hdev);
567 *val = hdev->le_conn_max_interval;
568 hci_dev_unlock(hdev);
569
570 return 0;
571 }
572
573 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
574 conn_max_interval_set, "%llu\n");
575
576 /* ---- HCI requests ---- */
577
578 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
579 {
580 BT_DBG("%s result 0x%2.2x", hdev->name, result);
581
582 if (hdev->req_status == HCI_REQ_PEND) {
583 hdev->req_result = result;
584 hdev->req_status = HCI_REQ_DONE;
585 wake_up_interruptible(&hdev->req_wait_q);
586 }
587 }
588
589 static void hci_req_cancel(struct hci_dev *hdev, int err)
590 {
591 BT_DBG("%s err 0x%2.2x", hdev->name, err);
592
593 if (hdev->req_status == HCI_REQ_PEND) {
594 hdev->req_result = err;
595 hdev->req_status = HCI_REQ_CANCELED;
596 wake_up_interruptible(&hdev->req_wait_q);
597 }
598 }
599
600 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
601 u8 event)
602 {
603 struct hci_ev_cmd_complete *ev;
604 struct hci_event_hdr *hdr;
605 struct sk_buff *skb;
606
607 hci_dev_lock(hdev);
608
609 skb = hdev->recv_evt;
610 hdev->recv_evt = NULL;
611
612 hci_dev_unlock(hdev);
613
614 if (!skb)
615 return ERR_PTR(-ENODATA);
616
617 if (skb->len < sizeof(*hdr)) {
618 BT_ERR("Too short HCI event");
619 goto failed;
620 }
621
622 hdr = (void *) skb->data;
623 skb_pull(skb, HCI_EVENT_HDR_SIZE);
624
625 if (event) {
626 if (hdr->evt != event)
627 goto failed;
628 return skb;
629 }
630
631 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
632 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
633 goto failed;
634 }
635
636 if (skb->len < sizeof(*ev)) {
637 BT_ERR("Too short cmd_complete event");
638 goto failed;
639 }
640
641 ev = (void *) skb->data;
642 skb_pull(skb, sizeof(*ev));
643
644 if (opcode == __le16_to_cpu(ev->opcode))
645 return skb;
646
647 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
648 __le16_to_cpu(ev->opcode));
649
650 failed:
651 kfree_skb(skb);
652 return ERR_PTR(-ENODATA);
653 }
654
655 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
656 const void *param, u8 event, u32 timeout)
657 {
658 DECLARE_WAITQUEUE(wait, current);
659 struct hci_request req;
660 int err = 0;
661
662 BT_DBG("%s", hdev->name);
663
664 hci_req_init(&req, hdev);
665
666 hci_req_add_ev(&req, opcode, plen, param, event);
667
668 hdev->req_status = HCI_REQ_PEND;
669
670 err = hci_req_run(&req, hci_req_sync_complete);
671 if (err < 0)
672 return ERR_PTR(err);
673
674 add_wait_queue(&hdev->req_wait_q, &wait);
675 set_current_state(TASK_INTERRUPTIBLE);
676
677 schedule_timeout(timeout);
678
679 remove_wait_queue(&hdev->req_wait_q, &wait);
680
681 if (signal_pending(current))
682 return ERR_PTR(-EINTR);
683
684 switch (hdev->req_status) {
685 case HCI_REQ_DONE:
686 err = -bt_to_errno(hdev->req_result);
687 break;
688
689 case HCI_REQ_CANCELED:
690 err = -hdev->req_result;
691 break;
692
693 default:
694 err = -ETIMEDOUT;
695 break;
696 }
697
698 hdev->req_status = hdev->req_result = 0;
699
700 BT_DBG("%s end: err %d", hdev->name, err);
701
702 if (err < 0)
703 return ERR_PTR(err);
704
705 return hci_get_cmd_complete(hdev, opcode, event);
706 }
707 EXPORT_SYMBOL(__hci_cmd_sync_ev);
708
709 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
710 const void *param, u32 timeout)
711 {
712 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
713 }
714 EXPORT_SYMBOL(__hci_cmd_sync);
715
716 /* Execute request and wait for completion. */
717 static int __hci_req_sync(struct hci_dev *hdev,
718 void (*func)(struct hci_request *req,
719 unsigned long opt),
720 unsigned long opt, __u32 timeout)
721 {
722 struct hci_request req;
723 DECLARE_WAITQUEUE(wait, current);
724 int err = 0;
725
726 BT_DBG("%s start", hdev->name);
727
728 hci_req_init(&req, hdev);
729
730 hdev->req_status = HCI_REQ_PEND;
731
732 func(&req, opt);
733
734 err = hci_req_run(&req, hci_req_sync_complete);
735 if (err < 0) {
736 hdev->req_status = 0;
737
738 /* ENODATA means the HCI request command queue is empty.
739 * This can happen when a request with conditionals doesn't
740 * trigger any commands to be sent. This is normal behavior
741 * and should not trigger an error return.
742 */
743 if (err == -ENODATA)
744 return 0;
745
746 return err;
747 }
748
749 add_wait_queue(&hdev->req_wait_q, &wait);
750 set_current_state(TASK_INTERRUPTIBLE);
751
752 schedule_timeout(timeout);
753
754 remove_wait_queue(&hdev->req_wait_q, &wait);
755
756 if (signal_pending(current))
757 return -EINTR;
758
759 switch (hdev->req_status) {
760 case HCI_REQ_DONE:
761 err = -bt_to_errno(hdev->req_result);
762 break;
763
764 case HCI_REQ_CANCELED:
765 err = -hdev->req_result;
766 break;
767
768 default:
769 err = -ETIMEDOUT;
770 break;
771 }
772
773 hdev->req_status = hdev->req_result = 0;
774
775 BT_DBG("%s end: err %d", hdev->name, err);
776
777 return err;
778 }
779
780 static int hci_req_sync(struct hci_dev *hdev,
781 void (*req)(struct hci_request *req,
782 unsigned long opt),
783 unsigned long opt, __u32 timeout)
784 {
785 int ret;
786
787 if (!test_bit(HCI_UP, &hdev->flags))
788 return -ENETDOWN;
789
790 /* Serialize all requests */
791 hci_req_lock(hdev);
792 ret = __hci_req_sync(hdev, req, opt, timeout);
793 hci_req_unlock(hdev);
794
795 return ret;
796 }
797
798 static void hci_reset_req(struct hci_request *req, unsigned long opt)
799 {
800 BT_DBG("%s %ld", req->hdev->name, opt);
801
802 /* Reset device */
803 set_bit(HCI_RESET, &req->hdev->flags);
804 hci_req_add(req, HCI_OP_RESET, 0, NULL);
805 }
806
807 static void bredr_init(struct hci_request *req)
808 {
809 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
810
811 /* Read Local Supported Features */
812 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
813
814 /* Read Local Version */
815 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
816
817 /* Read BD Address */
818 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
819 }
820
821 static void amp_init(struct hci_request *req)
822 {
823 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
824
825 /* Read Local Version */
826 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
827
828 /* Read Local Supported Commands */
829 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
830
831 /* Read Local Supported Features */
832 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
833
834 /* Read Local AMP Info */
835 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
836
837 /* Read Data Blk size */
838 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
839
840 /* Read Flow Control Mode */
841 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
842
843 /* Read Location Data */
844 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
845 }
846
847 static void hci_init1_req(struct hci_request *req, unsigned long opt)
848 {
849 struct hci_dev *hdev = req->hdev;
850
851 BT_DBG("%s %ld", hdev->name, opt);
852
853 /* Reset */
854 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
855 hci_reset_req(req, 0);
856
857 switch (hdev->dev_type) {
858 case HCI_BREDR:
859 bredr_init(req);
860 break;
861
862 case HCI_AMP:
863 amp_init(req);
864 break;
865
866 default:
867 BT_ERR("Unknown device type %d", hdev->dev_type);
868 break;
869 }
870 }
871
872 static void bredr_setup(struct hci_request *req)
873 {
874 struct hci_dev *hdev = req->hdev;
875
876 __le16 param;
877 __u8 flt_type;
878
879 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
880 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
881
882 /* Read Class of Device */
883 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
884
885 /* Read Local Name */
886 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
887
888 /* Read Voice Setting */
889 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
890
891 /* Read Number of Supported IAC */
892 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
893
894 /* Read Current IAC LAP */
895 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
896
897 /* Clear Event Filters */
898 flt_type = HCI_FLT_CLEAR_ALL;
899 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
900
901 /* Connection accept timeout ~20 secs */
902 param = __constant_cpu_to_le16(0x7d00);
903 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
904
905 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
906 * but it does not support page scan related HCI commands.
907 */
908 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
909 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
910 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
911 }
912 }
913
914 static void le_setup(struct hci_request *req)
915 {
916 struct hci_dev *hdev = req->hdev;
917
918 /* Read LE Buffer Size */
919 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
920
921 /* Read LE Local Supported Features */
922 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
923
924 /* Read LE Advertising Channel TX Power */
925 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
926
927 /* Read LE White List Size */
928 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
929
930 /* Read LE Supported States */
931 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
932
933 /* LE-only controllers have LE implicitly enabled */
934 if (!lmp_bredr_capable(hdev))
935 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
936 }
937
938 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
939 {
940 if (lmp_ext_inq_capable(hdev))
941 return 0x02;
942
943 if (lmp_inq_rssi_capable(hdev))
944 return 0x01;
945
946 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
947 hdev->lmp_subver == 0x0757)
948 return 0x01;
949
950 if (hdev->manufacturer == 15) {
951 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
952 return 0x01;
953 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
954 return 0x01;
955 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
956 return 0x01;
957 }
958
959 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
960 hdev->lmp_subver == 0x1805)
961 return 0x01;
962
963 return 0x00;
964 }
965
966 static void hci_setup_inquiry_mode(struct hci_request *req)
967 {
968 u8 mode;
969
970 mode = hci_get_inquiry_mode(req->hdev);
971
972 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
973 }
974
975 static void hci_setup_event_mask(struct hci_request *req)
976 {
977 struct hci_dev *hdev = req->hdev;
978
979 /* The second byte is 0xff instead of 0x9f (two reserved bits
980 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
981 * command otherwise.
982 */
983 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
984
985 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
986 * any event mask for pre 1.2 devices.
987 */
988 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
989 return;
990
991 if (lmp_bredr_capable(hdev)) {
992 events[4] |= 0x01; /* Flow Specification Complete */
993 events[4] |= 0x02; /* Inquiry Result with RSSI */
994 events[4] |= 0x04; /* Read Remote Extended Features Complete */
995 events[5] |= 0x08; /* Synchronous Connection Complete */
996 events[5] |= 0x10; /* Synchronous Connection Changed */
997 } else {
998 /* Use a different default for LE-only devices */
999 memset(events, 0, sizeof(events));
1000 events[0] |= 0x10; /* Disconnection Complete */
1001 events[0] |= 0x80; /* Encryption Change */
1002 events[1] |= 0x08; /* Read Remote Version Information Complete */
1003 events[1] |= 0x20; /* Command Complete */
1004 events[1] |= 0x40; /* Command Status */
1005 events[1] |= 0x80; /* Hardware Error */
1006 events[2] |= 0x04; /* Number of Completed Packets */
1007 events[3] |= 0x02; /* Data Buffer Overflow */
1008 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1009 }
1010
1011 if (lmp_inq_rssi_capable(hdev))
1012 events[4] |= 0x02; /* Inquiry Result with RSSI */
1013
1014 if (lmp_sniffsubr_capable(hdev))
1015 events[5] |= 0x20; /* Sniff Subrating */
1016
1017 if (lmp_pause_enc_capable(hdev))
1018 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1019
1020 if (lmp_ext_inq_capable(hdev))
1021 events[5] |= 0x40; /* Extended Inquiry Result */
1022
1023 if (lmp_no_flush_capable(hdev))
1024 events[7] |= 0x01; /* Enhanced Flush Complete */
1025
1026 if (lmp_lsto_capable(hdev))
1027 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1028
1029 if (lmp_ssp_capable(hdev)) {
1030 events[6] |= 0x01; /* IO Capability Request */
1031 events[6] |= 0x02; /* IO Capability Response */
1032 events[6] |= 0x04; /* User Confirmation Request */
1033 events[6] |= 0x08; /* User Passkey Request */
1034 events[6] |= 0x10; /* Remote OOB Data Request */
1035 events[6] |= 0x20; /* Simple Pairing Complete */
1036 events[7] |= 0x04; /* User Passkey Notification */
1037 events[7] |= 0x08; /* Keypress Notification */
1038 events[7] |= 0x10; /* Remote Host Supported
1039 * Features Notification
1040 */
1041 }
1042
1043 if (lmp_le_capable(hdev))
1044 events[7] |= 0x20; /* LE Meta-Event */
1045
1046 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1047
1048 if (lmp_le_capable(hdev)) {
1049 memset(events, 0, sizeof(events));
1050 events[0] = 0x1f;
1051 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1052 sizeof(events), events);
1053 }
1054 }
1055
1056 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1057 {
1058 struct hci_dev *hdev = req->hdev;
1059
1060 if (lmp_bredr_capable(hdev))
1061 bredr_setup(req);
1062 else
1063 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1064
1065 if (lmp_le_capable(hdev))
1066 le_setup(req);
1067
1068 hci_setup_event_mask(req);
1069
1070 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1071 * local supported commands HCI command.
1072 */
1073 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1074 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1075
1076 if (lmp_ssp_capable(hdev)) {
1077 /* When SSP is available, then the host features page
1078 * should also be available as well. However some
1079 * controllers list the max_page as 0 as long as SSP
1080 * has not been enabled. To achieve proper debugging
1081 * output, force the minimum max_page to 1 at least.
1082 */
1083 hdev->max_page = 0x01;
1084
1085 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1086 u8 mode = 0x01;
1087 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1088 sizeof(mode), &mode);
1089 } else {
1090 struct hci_cp_write_eir cp;
1091
1092 memset(hdev->eir, 0, sizeof(hdev->eir));
1093 memset(&cp, 0, sizeof(cp));
1094
1095 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1096 }
1097 }
1098
1099 if (lmp_inq_rssi_capable(hdev))
1100 hci_setup_inquiry_mode(req);
1101
1102 if (lmp_inq_tx_pwr_capable(hdev))
1103 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1104
1105 if (lmp_ext_feat_capable(hdev)) {
1106 struct hci_cp_read_local_ext_features cp;
1107
1108 cp.page = 0x01;
1109 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1110 sizeof(cp), &cp);
1111 }
1112
1113 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1114 u8 enable = 1;
1115 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1116 &enable);
1117 }
1118 }
1119
1120 static void hci_setup_link_policy(struct hci_request *req)
1121 {
1122 struct hci_dev *hdev = req->hdev;
1123 struct hci_cp_write_def_link_policy cp;
1124 u16 link_policy = 0;
1125
1126 if (lmp_rswitch_capable(hdev))
1127 link_policy |= HCI_LP_RSWITCH;
1128 if (lmp_hold_capable(hdev))
1129 link_policy |= HCI_LP_HOLD;
1130 if (lmp_sniff_capable(hdev))
1131 link_policy |= HCI_LP_SNIFF;
1132 if (lmp_park_capable(hdev))
1133 link_policy |= HCI_LP_PARK;
1134
1135 cp.policy = cpu_to_le16(link_policy);
1136 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1137 }
1138
1139 static void hci_set_le_support(struct hci_request *req)
1140 {
1141 struct hci_dev *hdev = req->hdev;
1142 struct hci_cp_write_le_host_supported cp;
1143
1144 /* LE-only devices do not support explicit enablement */
1145 if (!lmp_bredr_capable(hdev))
1146 return;
1147
1148 memset(&cp, 0, sizeof(cp));
1149
1150 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1151 cp.le = 0x01;
1152 cp.simul = lmp_le_br_capable(hdev);
1153 }
1154
1155 if (cp.le != lmp_host_le_capable(hdev))
1156 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1157 &cp);
1158 }
1159
1160 static void hci_set_event_mask_page_2(struct hci_request *req)
1161 {
1162 struct hci_dev *hdev = req->hdev;
1163 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1164
1165 /* If Connectionless Slave Broadcast master role is supported
1166 * enable all necessary events for it.
1167 */
1168 if (hdev->features[2][0] & 0x01) {
1169 events[1] |= 0x40; /* Triggered Clock Capture */
1170 events[1] |= 0x80; /* Synchronization Train Complete */
1171 events[2] |= 0x10; /* Slave Page Response Timeout */
1172 events[2] |= 0x20; /* CSB Channel Map Change */
1173 }
1174
1175 /* If Connectionless Slave Broadcast slave role is supported
1176 * enable all necessary events for it.
1177 */
1178 if (hdev->features[2][0] & 0x02) {
1179 events[2] |= 0x01; /* Synchronization Train Received */
1180 events[2] |= 0x02; /* CSB Receive */
1181 events[2] |= 0x04; /* CSB Timeout */
1182 events[2] |= 0x08; /* Truncated Page Complete */
1183 }
1184
1185 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1186 }
1187
1188 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1189 {
1190 struct hci_dev *hdev = req->hdev;
1191 u8 p;
1192
1193 /* Some Broadcom based Bluetooth controllers do not support the
1194 * Delete Stored Link Key command. They are clearly indicating its
1195 * absence in the bit mask of supported commands.
1196 *
1197 * Check the supported commands and only if the the command is marked
1198 * as supported send it. If not supported assume that the controller
1199 * does not have actual support for stored link keys which makes this
1200 * command redundant anyway.
1201 */
1202 if (hdev->commands[6] & 0x80) {
1203 struct hci_cp_delete_stored_link_key cp;
1204
1205 bacpy(&cp.bdaddr, BDADDR_ANY);
1206 cp.delete_all = 0x01;
1207 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1208 sizeof(cp), &cp);
1209 }
1210
1211 if (hdev->commands[5] & 0x10)
1212 hci_setup_link_policy(req);
1213
1214 if (lmp_le_capable(hdev)) {
1215 /* If the controller has a public BD_ADDR, then by
1216 * default use that one. If this is a LE only
1217 * controller without one, default to the random
1218 * address.
1219 */
1220 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1221 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1222 else
1223 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1224
1225 hci_set_le_support(req);
1226 }
1227
1228 /* Read features beyond page 1 if available */
1229 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1230 struct hci_cp_read_local_ext_features cp;
1231
1232 cp.page = p;
1233 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1234 sizeof(cp), &cp);
1235 }
1236 }
1237
1238 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1239 {
1240 struct hci_dev *hdev = req->hdev;
1241
1242 /* Set event mask page 2 if the HCI command for it is supported */
1243 if (hdev->commands[22] & 0x04)
1244 hci_set_event_mask_page_2(req);
1245
1246 /* Check for Synchronization Train support */
1247 if (hdev->features[2][0] & 0x04)
1248 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1249 }
1250
1251 static int __hci_init(struct hci_dev *hdev)
1252 {
1253 int err;
1254
1255 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1256 if (err < 0)
1257 return err;
1258
1259 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1260 * BR/EDR/LE type controllers. AMP controllers only need the
1261 * first stage init.
1262 */
1263 if (hdev->dev_type != HCI_BREDR)
1264 return 0;
1265
1266 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1267 if (err < 0)
1268 return err;
1269
1270 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1271 if (err < 0)
1272 return err;
1273
1274 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1275 if (err < 0)
1276 return err;
1277
1278 /* Only create debugfs entries during the initial setup
1279 * phase and not every time the controller gets powered on.
1280 */
1281 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1282 return 0;
1283
1284 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1285 &features_fops);
1286 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1287 &hdev->manufacturer);
1288 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1289 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1290 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1291 &blacklist_fops);
1292 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1293
1294 if (lmp_bredr_capable(hdev)) {
1295 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1296 hdev, &inquiry_cache_fops);
1297 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1298 hdev, &link_keys_fops);
1299 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1300 hdev, &use_debug_keys_fops);
1301 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1302 hdev, &dev_class_fops);
1303 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1304 hdev, &voice_setting_fops);
1305 }
1306
1307 if (lmp_ssp_capable(hdev)) {
1308 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1309 hdev, &auto_accept_delay_fops);
1310 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1311 hdev, &ssp_debug_mode_fops);
1312 }
1313
1314 if (lmp_sniff_capable(hdev)) {
1315 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1316 hdev, &idle_timeout_fops);
1317 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1318 hdev, &sniff_min_interval_fops);
1319 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1320 hdev, &sniff_max_interval_fops);
1321 }
1322
1323 if (lmp_le_capable(hdev)) {
1324 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1325 &hdev->le_white_list_size);
1326 debugfs_create_file("static_address", 0444, hdev->debugfs,
1327 hdev, &static_address_fops);
1328 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1329 hdev, &own_address_type_fops);
1330 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1331 hdev, &long_term_keys_fops);
1332 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1333 hdev, &conn_min_interval_fops);
1334 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1335 hdev, &conn_max_interval_fops);
1336 }
1337
1338 return 0;
1339 }
1340
1341 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1342 {
1343 __u8 scan = opt;
1344
1345 BT_DBG("%s %x", req->hdev->name, scan);
1346
1347 /* Inquiry and Page scans */
1348 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1349 }
1350
1351 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1352 {
1353 __u8 auth = opt;
1354
1355 BT_DBG("%s %x", req->hdev->name, auth);
1356
1357 /* Authentication */
1358 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1359 }
1360
1361 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1362 {
1363 __u8 encrypt = opt;
1364
1365 BT_DBG("%s %x", req->hdev->name, encrypt);
1366
1367 /* Encryption */
1368 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1369 }
1370
1371 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1372 {
1373 __le16 policy = cpu_to_le16(opt);
1374
1375 BT_DBG("%s %x", req->hdev->name, policy);
1376
1377 /* Default link policy */
1378 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1379 }
1380
1381 /* Get HCI device by index.
1382 * Device is held on return. */
1383 struct hci_dev *hci_dev_get(int index)
1384 {
1385 struct hci_dev *hdev = NULL, *d;
1386
1387 BT_DBG("%d", index);
1388
1389 if (index < 0)
1390 return NULL;
1391
1392 read_lock(&hci_dev_list_lock);
1393 list_for_each_entry(d, &hci_dev_list, list) {
1394 if (d->id == index) {
1395 hdev = hci_dev_hold(d);
1396 break;
1397 }
1398 }
1399 read_unlock(&hci_dev_list_lock);
1400 return hdev;
1401 }
1402
1403 /* ---- Inquiry support ---- */
1404
1405 bool hci_discovery_active(struct hci_dev *hdev)
1406 {
1407 struct discovery_state *discov = &hdev->discovery;
1408
1409 switch (discov->state) {
1410 case DISCOVERY_FINDING:
1411 case DISCOVERY_RESOLVING:
1412 return true;
1413
1414 default:
1415 return false;
1416 }
1417 }
1418
1419 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1420 {
1421 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1422
1423 if (hdev->discovery.state == state)
1424 return;
1425
1426 switch (state) {
1427 case DISCOVERY_STOPPED:
1428 if (hdev->discovery.state != DISCOVERY_STARTING)
1429 mgmt_discovering(hdev, 0);
1430 break;
1431 case DISCOVERY_STARTING:
1432 break;
1433 case DISCOVERY_FINDING:
1434 mgmt_discovering(hdev, 1);
1435 break;
1436 case DISCOVERY_RESOLVING:
1437 break;
1438 case DISCOVERY_STOPPING:
1439 break;
1440 }
1441
1442 hdev->discovery.state = state;
1443 }
1444
1445 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1446 {
1447 struct discovery_state *cache = &hdev->discovery;
1448 struct inquiry_entry *p, *n;
1449
1450 list_for_each_entry_safe(p, n, &cache->all, all) {
1451 list_del(&p->all);
1452 kfree(p);
1453 }
1454
1455 INIT_LIST_HEAD(&cache->unknown);
1456 INIT_LIST_HEAD(&cache->resolve);
1457 }
1458
1459 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1460 bdaddr_t *bdaddr)
1461 {
1462 struct discovery_state *cache = &hdev->discovery;
1463 struct inquiry_entry *e;
1464
1465 BT_DBG("cache %p, %pMR", cache, bdaddr);
1466
1467 list_for_each_entry(e, &cache->all, all) {
1468 if (!bacmp(&e->data.bdaddr, bdaddr))
1469 return e;
1470 }
1471
1472 return NULL;
1473 }
1474
1475 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1476 bdaddr_t *bdaddr)
1477 {
1478 struct discovery_state *cache = &hdev->discovery;
1479 struct inquiry_entry *e;
1480
1481 BT_DBG("cache %p, %pMR", cache, bdaddr);
1482
1483 list_for_each_entry(e, &cache->unknown, list) {
1484 if (!bacmp(&e->data.bdaddr, bdaddr))
1485 return e;
1486 }
1487
1488 return NULL;
1489 }
1490
1491 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1492 bdaddr_t *bdaddr,
1493 int state)
1494 {
1495 struct discovery_state *cache = &hdev->discovery;
1496 struct inquiry_entry *e;
1497
1498 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1499
1500 list_for_each_entry(e, &cache->resolve, list) {
1501 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1502 return e;
1503 if (!bacmp(&e->data.bdaddr, bdaddr))
1504 return e;
1505 }
1506
1507 return NULL;
1508 }
1509
1510 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1511 struct inquiry_entry *ie)
1512 {
1513 struct discovery_state *cache = &hdev->discovery;
1514 struct list_head *pos = &cache->resolve;
1515 struct inquiry_entry *p;
1516
1517 list_del(&ie->list);
1518
1519 list_for_each_entry(p, &cache->resolve, list) {
1520 if (p->name_state != NAME_PENDING &&
1521 abs(p->data.rssi) >= abs(ie->data.rssi))
1522 break;
1523 pos = &p->list;
1524 }
1525
1526 list_add(&ie->list, pos);
1527 }
1528
1529 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1530 bool name_known, bool *ssp)
1531 {
1532 struct discovery_state *cache = &hdev->discovery;
1533 struct inquiry_entry *ie;
1534
1535 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1536
1537 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1538
1539 if (ssp)
1540 *ssp = data->ssp_mode;
1541
1542 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1543 if (ie) {
1544 if (ie->data.ssp_mode && ssp)
1545 *ssp = true;
1546
1547 if (ie->name_state == NAME_NEEDED &&
1548 data->rssi != ie->data.rssi) {
1549 ie->data.rssi = data->rssi;
1550 hci_inquiry_cache_update_resolve(hdev, ie);
1551 }
1552
1553 goto update;
1554 }
1555
1556 /* Entry not in the cache. Add new one. */
1557 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1558 if (!ie)
1559 return false;
1560
1561 list_add(&ie->all, &cache->all);
1562
1563 if (name_known) {
1564 ie->name_state = NAME_KNOWN;
1565 } else {
1566 ie->name_state = NAME_NOT_KNOWN;
1567 list_add(&ie->list, &cache->unknown);
1568 }
1569
1570 update:
1571 if (name_known && ie->name_state != NAME_KNOWN &&
1572 ie->name_state != NAME_PENDING) {
1573 ie->name_state = NAME_KNOWN;
1574 list_del(&ie->list);
1575 }
1576
1577 memcpy(&ie->data, data, sizeof(*data));
1578 ie->timestamp = jiffies;
1579 cache->timestamp = jiffies;
1580
1581 if (ie->name_state == NAME_NOT_KNOWN)
1582 return false;
1583
1584 return true;
1585 }
1586
1587 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1588 {
1589 struct discovery_state *cache = &hdev->discovery;
1590 struct inquiry_info *info = (struct inquiry_info *) buf;
1591 struct inquiry_entry *e;
1592 int copied = 0;
1593
1594 list_for_each_entry(e, &cache->all, all) {
1595 struct inquiry_data *data = &e->data;
1596
1597 if (copied >= num)
1598 break;
1599
1600 bacpy(&info->bdaddr, &data->bdaddr);
1601 info->pscan_rep_mode = data->pscan_rep_mode;
1602 info->pscan_period_mode = data->pscan_period_mode;
1603 info->pscan_mode = data->pscan_mode;
1604 memcpy(info->dev_class, data->dev_class, 3);
1605 info->clock_offset = data->clock_offset;
1606
1607 info++;
1608 copied++;
1609 }
1610
1611 BT_DBG("cache %p, copied %d", cache, copied);
1612 return copied;
1613 }
1614
1615 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1616 {
1617 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1618 struct hci_dev *hdev = req->hdev;
1619 struct hci_cp_inquiry cp;
1620
1621 BT_DBG("%s", hdev->name);
1622
1623 if (test_bit(HCI_INQUIRY, &hdev->flags))
1624 return;
1625
1626 /* Start Inquiry */
1627 memcpy(&cp.lap, &ir->lap, 3);
1628 cp.length = ir->length;
1629 cp.num_rsp = ir->num_rsp;
1630 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1631 }
1632
1633 static int wait_inquiry(void *word)
1634 {
1635 schedule();
1636 return signal_pending(current);
1637 }
1638
1639 int hci_inquiry(void __user *arg)
1640 {
1641 __u8 __user *ptr = arg;
1642 struct hci_inquiry_req ir;
1643 struct hci_dev *hdev;
1644 int err = 0, do_inquiry = 0, max_rsp;
1645 long timeo;
1646 __u8 *buf;
1647
1648 if (copy_from_user(&ir, ptr, sizeof(ir)))
1649 return -EFAULT;
1650
1651 hdev = hci_dev_get(ir.dev_id);
1652 if (!hdev)
1653 return -ENODEV;
1654
1655 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1656 err = -EBUSY;
1657 goto done;
1658 }
1659
1660 if (hdev->dev_type != HCI_BREDR) {
1661 err = -EOPNOTSUPP;
1662 goto done;
1663 }
1664
1665 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1666 err = -EOPNOTSUPP;
1667 goto done;
1668 }
1669
1670 hci_dev_lock(hdev);
1671 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1672 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1673 hci_inquiry_cache_flush(hdev);
1674 do_inquiry = 1;
1675 }
1676 hci_dev_unlock(hdev);
1677
1678 timeo = ir.length * msecs_to_jiffies(2000);
1679
1680 if (do_inquiry) {
1681 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1682 timeo);
1683 if (err < 0)
1684 goto done;
1685
1686 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1687 * cleared). If it is interrupted by a signal, return -EINTR.
1688 */
1689 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1690 TASK_INTERRUPTIBLE))
1691 return -EINTR;
1692 }
1693
1694 /* for unlimited number of responses we will use buffer with
1695 * 255 entries
1696 */
1697 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1698
1699 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1700 * copy it to the user space.
1701 */
1702 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1703 if (!buf) {
1704 err = -ENOMEM;
1705 goto done;
1706 }
1707
1708 hci_dev_lock(hdev);
1709 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1710 hci_dev_unlock(hdev);
1711
1712 BT_DBG("num_rsp %d", ir.num_rsp);
1713
1714 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1715 ptr += sizeof(ir);
1716 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1717 ir.num_rsp))
1718 err = -EFAULT;
1719 } else
1720 err = -EFAULT;
1721
1722 kfree(buf);
1723
1724 done:
1725 hci_dev_put(hdev);
1726 return err;
1727 }
1728
1729 static int hci_dev_do_open(struct hci_dev *hdev)
1730 {
1731 int ret = 0;
1732
1733 BT_DBG("%s %p", hdev->name, hdev);
1734
1735 hci_req_lock(hdev);
1736
1737 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1738 ret = -ENODEV;
1739 goto done;
1740 }
1741
1742 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1743 /* Check for rfkill but allow the HCI setup stage to
1744 * proceed (which in itself doesn't cause any RF activity).
1745 */
1746 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1747 ret = -ERFKILL;
1748 goto done;
1749 }
1750
1751 /* Check for valid public address or a configured static
1752 * random adddress, but let the HCI setup proceed to
1753 * be able to determine if there is a public address
1754 * or not.
1755 *
1756 * This check is only valid for BR/EDR controllers
1757 * since AMP controllers do not have an address.
1758 */
1759 if (hdev->dev_type == HCI_BREDR &&
1760 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1761 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1762 ret = -EADDRNOTAVAIL;
1763 goto done;
1764 }
1765 }
1766
1767 if (test_bit(HCI_UP, &hdev->flags)) {
1768 ret = -EALREADY;
1769 goto done;
1770 }
1771
1772 if (hdev->open(hdev)) {
1773 ret = -EIO;
1774 goto done;
1775 }
1776
1777 atomic_set(&hdev->cmd_cnt, 1);
1778 set_bit(HCI_INIT, &hdev->flags);
1779
1780 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1781 ret = hdev->setup(hdev);
1782
1783 if (!ret) {
1784 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1785 set_bit(HCI_RAW, &hdev->flags);
1786
1787 if (!test_bit(HCI_RAW, &hdev->flags) &&
1788 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1789 ret = __hci_init(hdev);
1790 }
1791
1792 clear_bit(HCI_INIT, &hdev->flags);
1793
1794 if (!ret) {
1795 hci_dev_hold(hdev);
1796 set_bit(HCI_UP, &hdev->flags);
1797 hci_notify(hdev, HCI_DEV_UP);
1798 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1799 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1800 hdev->dev_type == HCI_BREDR) {
1801 hci_dev_lock(hdev);
1802 mgmt_powered(hdev, 1);
1803 hci_dev_unlock(hdev);
1804 }
1805 } else {
1806 /* Init failed, cleanup */
1807 flush_work(&hdev->tx_work);
1808 flush_work(&hdev->cmd_work);
1809 flush_work(&hdev->rx_work);
1810
1811 skb_queue_purge(&hdev->cmd_q);
1812 skb_queue_purge(&hdev->rx_q);
1813
1814 if (hdev->flush)
1815 hdev->flush(hdev);
1816
1817 if (hdev->sent_cmd) {
1818 kfree_skb(hdev->sent_cmd);
1819 hdev->sent_cmd = NULL;
1820 }
1821
1822 hdev->close(hdev);
1823 hdev->flags = 0;
1824 }
1825
1826 done:
1827 hci_req_unlock(hdev);
1828 return ret;
1829 }
1830
1831 /* ---- HCI ioctl helpers ---- */
1832
1833 int hci_dev_open(__u16 dev)
1834 {
1835 struct hci_dev *hdev;
1836 int err;
1837
1838 hdev = hci_dev_get(dev);
1839 if (!hdev)
1840 return -ENODEV;
1841
1842 /* We need to ensure that no other power on/off work is pending
1843 * before proceeding to call hci_dev_do_open. This is
1844 * particularly important if the setup procedure has not yet
1845 * completed.
1846 */
1847 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1848 cancel_delayed_work(&hdev->power_off);
1849
1850 /* After this call it is guaranteed that the setup procedure
1851 * has finished. This means that error conditions like RFKILL
1852 * or no valid public or static random address apply.
1853 */
1854 flush_workqueue(hdev->req_workqueue);
1855
1856 err = hci_dev_do_open(hdev);
1857
1858 hci_dev_put(hdev);
1859
1860 return err;
1861 }
1862
1863 static int hci_dev_do_close(struct hci_dev *hdev)
1864 {
1865 BT_DBG("%s %p", hdev->name, hdev);
1866
1867 cancel_delayed_work(&hdev->power_off);
1868
1869 hci_req_cancel(hdev, ENODEV);
1870 hci_req_lock(hdev);
1871
1872 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1873 del_timer_sync(&hdev->cmd_timer);
1874 hci_req_unlock(hdev);
1875 return 0;
1876 }
1877
1878 /* Flush RX and TX works */
1879 flush_work(&hdev->tx_work);
1880 flush_work(&hdev->rx_work);
1881
1882 if (hdev->discov_timeout > 0) {
1883 cancel_delayed_work(&hdev->discov_off);
1884 hdev->discov_timeout = 0;
1885 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1886 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1887 }
1888
1889 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1890 cancel_delayed_work(&hdev->service_cache);
1891
1892 cancel_delayed_work_sync(&hdev->le_scan_disable);
1893
1894 hci_dev_lock(hdev);
1895 hci_inquiry_cache_flush(hdev);
1896 hci_conn_hash_flush(hdev);
1897 hci_dev_unlock(hdev);
1898
1899 hci_notify(hdev, HCI_DEV_DOWN);
1900
1901 if (hdev->flush)
1902 hdev->flush(hdev);
1903
1904 /* Reset device */
1905 skb_queue_purge(&hdev->cmd_q);
1906 atomic_set(&hdev->cmd_cnt, 1);
1907 if (!test_bit(HCI_RAW, &hdev->flags) &&
1908 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1909 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1910 set_bit(HCI_INIT, &hdev->flags);
1911 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1912 clear_bit(HCI_INIT, &hdev->flags);
1913 }
1914
1915 /* flush cmd work */
1916 flush_work(&hdev->cmd_work);
1917
1918 /* Drop queues */
1919 skb_queue_purge(&hdev->rx_q);
1920 skb_queue_purge(&hdev->cmd_q);
1921 skb_queue_purge(&hdev->raw_q);
1922
1923 /* Drop last sent command */
1924 if (hdev->sent_cmd) {
1925 del_timer_sync(&hdev->cmd_timer);
1926 kfree_skb(hdev->sent_cmd);
1927 hdev->sent_cmd = NULL;
1928 }
1929
1930 kfree_skb(hdev->recv_evt);
1931 hdev->recv_evt = NULL;
1932
1933 /* After this point our queues are empty
1934 * and no tasks are scheduled. */
1935 hdev->close(hdev);
1936
1937 /* Clear flags */
1938 hdev->flags = 0;
1939 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1940
1941 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1942 if (hdev->dev_type == HCI_BREDR) {
1943 hci_dev_lock(hdev);
1944 mgmt_powered(hdev, 0);
1945 hci_dev_unlock(hdev);
1946 }
1947 }
1948
1949 /* Controller radio is available but is currently powered down */
1950 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1951
1952 memset(hdev->eir, 0, sizeof(hdev->eir));
1953 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1954
1955 hci_req_unlock(hdev);
1956
1957 hci_dev_put(hdev);
1958 return 0;
1959 }
1960
1961 int hci_dev_close(__u16 dev)
1962 {
1963 struct hci_dev *hdev;
1964 int err;
1965
1966 hdev = hci_dev_get(dev);
1967 if (!hdev)
1968 return -ENODEV;
1969
1970 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1971 err = -EBUSY;
1972 goto done;
1973 }
1974
1975 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1976 cancel_delayed_work(&hdev->power_off);
1977
1978 err = hci_dev_do_close(hdev);
1979
1980 done:
1981 hci_dev_put(hdev);
1982 return err;
1983 }
1984
1985 int hci_dev_reset(__u16 dev)
1986 {
1987 struct hci_dev *hdev;
1988 int ret = 0;
1989
1990 hdev = hci_dev_get(dev);
1991 if (!hdev)
1992 return -ENODEV;
1993
1994 hci_req_lock(hdev);
1995
1996 if (!test_bit(HCI_UP, &hdev->flags)) {
1997 ret = -ENETDOWN;
1998 goto done;
1999 }
2000
2001 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2002 ret = -EBUSY;
2003 goto done;
2004 }
2005
2006 /* Drop queues */
2007 skb_queue_purge(&hdev->rx_q);
2008 skb_queue_purge(&hdev->cmd_q);
2009
2010 hci_dev_lock(hdev);
2011 hci_inquiry_cache_flush(hdev);
2012 hci_conn_hash_flush(hdev);
2013 hci_dev_unlock(hdev);
2014
2015 if (hdev->flush)
2016 hdev->flush(hdev);
2017
2018 atomic_set(&hdev->cmd_cnt, 1);
2019 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2020
2021 if (!test_bit(HCI_RAW, &hdev->flags))
2022 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2023
2024 done:
2025 hci_req_unlock(hdev);
2026 hci_dev_put(hdev);
2027 return ret;
2028 }
2029
2030 int hci_dev_reset_stat(__u16 dev)
2031 {
2032 struct hci_dev *hdev;
2033 int ret = 0;
2034
2035 hdev = hci_dev_get(dev);
2036 if (!hdev)
2037 return -ENODEV;
2038
2039 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2040 ret = -EBUSY;
2041 goto done;
2042 }
2043
2044 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2045
2046 done:
2047 hci_dev_put(hdev);
2048 return ret;
2049 }
2050
2051 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2052 {
2053 struct hci_dev *hdev;
2054 struct hci_dev_req dr;
2055 int err = 0;
2056
2057 if (copy_from_user(&dr, arg, sizeof(dr)))
2058 return -EFAULT;
2059
2060 hdev = hci_dev_get(dr.dev_id);
2061 if (!hdev)
2062 return -ENODEV;
2063
2064 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2065 err = -EBUSY;
2066 goto done;
2067 }
2068
2069 if (hdev->dev_type != HCI_BREDR) {
2070 err = -EOPNOTSUPP;
2071 goto done;
2072 }
2073
2074 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2075 err = -EOPNOTSUPP;
2076 goto done;
2077 }
2078
2079 switch (cmd) {
2080 case HCISETAUTH:
2081 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2082 HCI_INIT_TIMEOUT);
2083 break;
2084
2085 case HCISETENCRYPT:
2086 if (!lmp_encrypt_capable(hdev)) {
2087 err = -EOPNOTSUPP;
2088 break;
2089 }
2090
2091 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2092 /* Auth must be enabled first */
2093 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2094 HCI_INIT_TIMEOUT);
2095 if (err)
2096 break;
2097 }
2098
2099 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2100 HCI_INIT_TIMEOUT);
2101 break;
2102
2103 case HCISETSCAN:
2104 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2105 HCI_INIT_TIMEOUT);
2106 break;
2107
2108 case HCISETLINKPOL:
2109 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2110 HCI_INIT_TIMEOUT);
2111 break;
2112
2113 case HCISETLINKMODE:
2114 hdev->link_mode = ((__u16) dr.dev_opt) &
2115 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2116 break;
2117
2118 case HCISETPTYPE:
2119 hdev->pkt_type = (__u16) dr.dev_opt;
2120 break;
2121
2122 case HCISETACLMTU:
2123 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2124 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2125 break;
2126
2127 case HCISETSCOMTU:
2128 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2129 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2130 break;
2131
2132 default:
2133 err = -EINVAL;
2134 break;
2135 }
2136
2137 done:
2138 hci_dev_put(hdev);
2139 return err;
2140 }
2141
2142 int hci_get_dev_list(void __user *arg)
2143 {
2144 struct hci_dev *hdev;
2145 struct hci_dev_list_req *dl;
2146 struct hci_dev_req *dr;
2147 int n = 0, size, err;
2148 __u16 dev_num;
2149
2150 if (get_user(dev_num, (__u16 __user *) arg))
2151 return -EFAULT;
2152
2153 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2154 return -EINVAL;
2155
2156 size = sizeof(*dl) + dev_num * sizeof(*dr);
2157
2158 dl = kzalloc(size, GFP_KERNEL);
2159 if (!dl)
2160 return -ENOMEM;
2161
2162 dr = dl->dev_req;
2163
2164 read_lock(&hci_dev_list_lock);
2165 list_for_each_entry(hdev, &hci_dev_list, list) {
2166 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2167 cancel_delayed_work(&hdev->power_off);
2168
2169 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2170 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2171
2172 (dr + n)->dev_id = hdev->id;
2173 (dr + n)->dev_opt = hdev->flags;
2174
2175 if (++n >= dev_num)
2176 break;
2177 }
2178 read_unlock(&hci_dev_list_lock);
2179
2180 dl->dev_num = n;
2181 size = sizeof(*dl) + n * sizeof(*dr);
2182
2183 err = copy_to_user(arg, dl, size);
2184 kfree(dl);
2185
2186 return err ? -EFAULT : 0;
2187 }
2188
2189 int hci_get_dev_info(void __user *arg)
2190 {
2191 struct hci_dev *hdev;
2192 struct hci_dev_info di;
2193 int err = 0;
2194
2195 if (copy_from_user(&di, arg, sizeof(di)))
2196 return -EFAULT;
2197
2198 hdev = hci_dev_get(di.dev_id);
2199 if (!hdev)
2200 return -ENODEV;
2201
2202 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2203 cancel_delayed_work_sync(&hdev->power_off);
2204
2205 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2206 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2207
2208 strcpy(di.name, hdev->name);
2209 di.bdaddr = hdev->bdaddr;
2210 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2211 di.flags = hdev->flags;
2212 di.pkt_type = hdev->pkt_type;
2213 if (lmp_bredr_capable(hdev)) {
2214 di.acl_mtu = hdev->acl_mtu;
2215 di.acl_pkts = hdev->acl_pkts;
2216 di.sco_mtu = hdev->sco_mtu;
2217 di.sco_pkts = hdev->sco_pkts;
2218 } else {
2219 di.acl_mtu = hdev->le_mtu;
2220 di.acl_pkts = hdev->le_pkts;
2221 di.sco_mtu = 0;
2222 di.sco_pkts = 0;
2223 }
2224 di.link_policy = hdev->link_policy;
2225 di.link_mode = hdev->link_mode;
2226
2227 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2228 memcpy(&di.features, &hdev->features, sizeof(di.features));
2229
2230 if (copy_to_user(arg, &di, sizeof(di)))
2231 err = -EFAULT;
2232
2233 hci_dev_put(hdev);
2234
2235 return err;
2236 }
2237
2238 /* ---- Interface to HCI drivers ---- */
2239
2240 static int hci_rfkill_set_block(void *data, bool blocked)
2241 {
2242 struct hci_dev *hdev = data;
2243
2244 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2245
2246 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2247 return -EBUSY;
2248
2249 if (blocked) {
2250 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2251 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2252 hci_dev_do_close(hdev);
2253 } else {
2254 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2255 }
2256
2257 return 0;
2258 }
2259
2260 static const struct rfkill_ops hci_rfkill_ops = {
2261 .set_block = hci_rfkill_set_block,
2262 };
2263
2264 static void hci_power_on(struct work_struct *work)
2265 {
2266 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2267 int err;
2268
2269 BT_DBG("%s", hdev->name);
2270
2271 err = hci_dev_do_open(hdev);
2272 if (err < 0) {
2273 mgmt_set_powered_failed(hdev, err);
2274 return;
2275 }
2276
2277 /* During the HCI setup phase, a few error conditions are
2278 * ignored and they need to be checked now. If they are still
2279 * valid, it is important to turn the device back off.
2280 */
2281 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2282 (hdev->dev_type == HCI_BREDR &&
2283 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2284 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2285 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2286 hci_dev_do_close(hdev);
2287 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2288 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2289 HCI_AUTO_OFF_TIMEOUT);
2290 }
2291
2292 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2293 mgmt_index_added(hdev);
2294 }
2295
2296 static void hci_power_off(struct work_struct *work)
2297 {
2298 struct hci_dev *hdev = container_of(work, struct hci_dev,
2299 power_off.work);
2300
2301 BT_DBG("%s", hdev->name);
2302
2303 hci_dev_do_close(hdev);
2304 }
2305
2306 static void hci_discov_off(struct work_struct *work)
2307 {
2308 struct hci_dev *hdev;
2309
2310 hdev = container_of(work, struct hci_dev, discov_off.work);
2311
2312 BT_DBG("%s", hdev->name);
2313
2314 mgmt_discoverable_timeout(hdev);
2315 }
2316
2317 int hci_uuids_clear(struct hci_dev *hdev)
2318 {
2319 struct bt_uuid *uuid, *tmp;
2320
2321 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2322 list_del(&uuid->list);
2323 kfree(uuid);
2324 }
2325
2326 return 0;
2327 }
2328
2329 int hci_link_keys_clear(struct hci_dev *hdev)
2330 {
2331 struct list_head *p, *n;
2332
2333 list_for_each_safe(p, n, &hdev->link_keys) {
2334 struct link_key *key;
2335
2336 key = list_entry(p, struct link_key, list);
2337
2338 list_del(p);
2339 kfree(key);
2340 }
2341
2342 return 0;
2343 }
2344
2345 int hci_smp_ltks_clear(struct hci_dev *hdev)
2346 {
2347 struct smp_ltk *k, *tmp;
2348
2349 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2350 list_del(&k->list);
2351 kfree(k);
2352 }
2353
2354 return 0;
2355 }
2356
2357 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2358 {
2359 struct link_key *k;
2360
2361 list_for_each_entry(k, &hdev->link_keys, list)
2362 if (bacmp(bdaddr, &k->bdaddr) == 0)
2363 return k;
2364
2365 return NULL;
2366 }
2367
2368 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2369 u8 key_type, u8 old_key_type)
2370 {
2371 /* Legacy key */
2372 if (key_type < 0x03)
2373 return true;
2374
2375 /* Debug keys are insecure so don't store them persistently */
2376 if (key_type == HCI_LK_DEBUG_COMBINATION)
2377 return false;
2378
2379 /* Changed combination key and there's no previous one */
2380 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2381 return false;
2382
2383 /* Security mode 3 case */
2384 if (!conn)
2385 return true;
2386
2387 /* Neither local nor remote side had no-bonding as requirement */
2388 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2389 return true;
2390
2391 /* Local side had dedicated bonding as requirement */
2392 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2393 return true;
2394
2395 /* Remote side had dedicated bonding as requirement */
2396 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2397 return true;
2398
2399 /* If none of the above criteria match, then don't store the key
2400 * persistently */
2401 return false;
2402 }
2403
2404 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2405 {
2406 struct smp_ltk *k;
2407
2408 list_for_each_entry(k, &hdev->long_term_keys, list) {
2409 if (k->ediv != ediv ||
2410 memcmp(rand, k->rand, sizeof(k->rand)))
2411 continue;
2412
2413 return k;
2414 }
2415
2416 return NULL;
2417 }
2418
2419 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2420 u8 addr_type)
2421 {
2422 struct smp_ltk *k;
2423
2424 list_for_each_entry(k, &hdev->long_term_keys, list)
2425 if (addr_type == k->bdaddr_type &&
2426 bacmp(bdaddr, &k->bdaddr) == 0)
2427 return k;
2428
2429 return NULL;
2430 }
2431
2432 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2433 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2434 {
2435 struct link_key *key, *old_key;
2436 u8 old_key_type;
2437 bool persistent;
2438
2439 old_key = hci_find_link_key(hdev, bdaddr);
2440 if (old_key) {
2441 old_key_type = old_key->type;
2442 key = old_key;
2443 } else {
2444 old_key_type = conn ? conn->key_type : 0xff;
2445 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2446 if (!key)
2447 return -ENOMEM;
2448 list_add(&key->list, &hdev->link_keys);
2449 }
2450
2451 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2452
2453 /* Some buggy controller combinations generate a changed
2454 * combination key for legacy pairing even when there's no
2455 * previous key */
2456 if (type == HCI_LK_CHANGED_COMBINATION &&
2457 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2458 type = HCI_LK_COMBINATION;
2459 if (conn)
2460 conn->key_type = type;
2461 }
2462
2463 bacpy(&key->bdaddr, bdaddr);
2464 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2465 key->pin_len = pin_len;
2466
2467 if (type == HCI_LK_CHANGED_COMBINATION)
2468 key->type = old_key_type;
2469 else
2470 key->type = type;
2471
2472 if (!new_key)
2473 return 0;
2474
2475 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2476
2477 mgmt_new_link_key(hdev, key, persistent);
2478
2479 if (conn)
2480 conn->flush_key = !persistent;
2481
2482 return 0;
2483 }
2484
2485 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2486 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2487 ediv, u8 rand[8])
2488 {
2489 struct smp_ltk *key, *old_key;
2490
2491 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2492 return 0;
2493
2494 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2495 if (old_key)
2496 key = old_key;
2497 else {
2498 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2499 if (!key)
2500 return -ENOMEM;
2501 list_add(&key->list, &hdev->long_term_keys);
2502 }
2503
2504 bacpy(&key->bdaddr, bdaddr);
2505 key->bdaddr_type = addr_type;
2506 memcpy(key->val, tk, sizeof(key->val));
2507 key->authenticated = authenticated;
2508 key->ediv = ediv;
2509 key->enc_size = enc_size;
2510 key->type = type;
2511 memcpy(key->rand, rand, sizeof(key->rand));
2512
2513 if (!new_key)
2514 return 0;
2515
2516 if (type & HCI_SMP_LTK)
2517 mgmt_new_ltk(hdev, key, 1);
2518
2519 return 0;
2520 }
2521
2522 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2523 {
2524 struct link_key *key;
2525
2526 key = hci_find_link_key(hdev, bdaddr);
2527 if (!key)
2528 return -ENOENT;
2529
2530 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2531
2532 list_del(&key->list);
2533 kfree(key);
2534
2535 return 0;
2536 }
2537
2538 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2539 {
2540 struct smp_ltk *k, *tmp;
2541
2542 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2543 if (bacmp(bdaddr, &k->bdaddr))
2544 continue;
2545
2546 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2547
2548 list_del(&k->list);
2549 kfree(k);
2550 }
2551
2552 return 0;
2553 }
2554
2555 /* HCI command timer function */
2556 static void hci_cmd_timeout(unsigned long arg)
2557 {
2558 struct hci_dev *hdev = (void *) arg;
2559
2560 if (hdev->sent_cmd) {
2561 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2562 u16 opcode = __le16_to_cpu(sent->opcode);
2563
2564 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2565 } else {
2566 BT_ERR("%s command tx timeout", hdev->name);
2567 }
2568
2569 atomic_set(&hdev->cmd_cnt, 1);
2570 queue_work(hdev->workqueue, &hdev->cmd_work);
2571 }
2572
2573 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2574 bdaddr_t *bdaddr)
2575 {
2576 struct oob_data *data;
2577
2578 list_for_each_entry(data, &hdev->remote_oob_data, list)
2579 if (bacmp(bdaddr, &data->bdaddr) == 0)
2580 return data;
2581
2582 return NULL;
2583 }
2584
2585 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2586 {
2587 struct oob_data *data;
2588
2589 data = hci_find_remote_oob_data(hdev, bdaddr);
2590 if (!data)
2591 return -ENOENT;
2592
2593 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2594
2595 list_del(&data->list);
2596 kfree(data);
2597
2598 return 0;
2599 }
2600
2601 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2602 {
2603 struct oob_data *data, *n;
2604
2605 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2606 list_del(&data->list);
2607 kfree(data);
2608 }
2609
2610 return 0;
2611 }
2612
2613 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2614 u8 *randomizer)
2615 {
2616 struct oob_data *data;
2617
2618 data = hci_find_remote_oob_data(hdev, bdaddr);
2619
2620 if (!data) {
2621 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2622 if (!data)
2623 return -ENOMEM;
2624
2625 bacpy(&data->bdaddr, bdaddr);
2626 list_add(&data->list, &hdev->remote_oob_data);
2627 }
2628
2629 memcpy(data->hash, hash, sizeof(data->hash));
2630 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2631
2632 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2633
2634 return 0;
2635 }
2636
2637 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2638 bdaddr_t *bdaddr, u8 type)
2639 {
2640 struct bdaddr_list *b;
2641
2642 list_for_each_entry(b, &hdev->blacklist, list) {
2643 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2644 return b;
2645 }
2646
2647 return NULL;
2648 }
2649
2650 int hci_blacklist_clear(struct hci_dev *hdev)
2651 {
2652 struct list_head *p, *n;
2653
2654 list_for_each_safe(p, n, &hdev->blacklist) {
2655 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2656
2657 list_del(p);
2658 kfree(b);
2659 }
2660
2661 return 0;
2662 }
2663
2664 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2665 {
2666 struct bdaddr_list *entry;
2667
2668 if (!bacmp(bdaddr, BDADDR_ANY))
2669 return -EBADF;
2670
2671 if (hci_blacklist_lookup(hdev, bdaddr, type))
2672 return -EEXIST;
2673
2674 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2675 if (!entry)
2676 return -ENOMEM;
2677
2678 bacpy(&entry->bdaddr, bdaddr);
2679 entry->bdaddr_type = type;
2680
2681 list_add(&entry->list, &hdev->blacklist);
2682
2683 return mgmt_device_blocked(hdev, bdaddr, type);
2684 }
2685
2686 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2687 {
2688 struct bdaddr_list *entry;
2689
2690 if (!bacmp(bdaddr, BDADDR_ANY))
2691 return hci_blacklist_clear(hdev);
2692
2693 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2694 if (!entry)
2695 return -ENOENT;
2696
2697 list_del(&entry->list);
2698 kfree(entry);
2699
2700 return mgmt_device_unblocked(hdev, bdaddr, type);
2701 }
2702
2703 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2704 {
2705 if (status) {
2706 BT_ERR("Failed to start inquiry: status %d", status);
2707
2708 hci_dev_lock(hdev);
2709 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2710 hci_dev_unlock(hdev);
2711 return;
2712 }
2713 }
2714
2715 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2716 {
2717 /* General inquiry access code (GIAC) */
2718 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2719 struct hci_request req;
2720 struct hci_cp_inquiry cp;
2721 int err;
2722
2723 if (status) {
2724 BT_ERR("Failed to disable LE scanning: status %d", status);
2725 return;
2726 }
2727
2728 switch (hdev->discovery.type) {
2729 case DISCOV_TYPE_LE:
2730 hci_dev_lock(hdev);
2731 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2732 hci_dev_unlock(hdev);
2733 break;
2734
2735 case DISCOV_TYPE_INTERLEAVED:
2736 hci_req_init(&req, hdev);
2737
2738 memset(&cp, 0, sizeof(cp));
2739 memcpy(&cp.lap, lap, sizeof(cp.lap));
2740 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2741 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2742
2743 hci_dev_lock(hdev);
2744
2745 hci_inquiry_cache_flush(hdev);
2746
2747 err = hci_req_run(&req, inquiry_complete);
2748 if (err) {
2749 BT_ERR("Inquiry request failed: err %d", err);
2750 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2751 }
2752
2753 hci_dev_unlock(hdev);
2754 break;
2755 }
2756 }
2757
2758 static void le_scan_disable_work(struct work_struct *work)
2759 {
2760 struct hci_dev *hdev = container_of(work, struct hci_dev,
2761 le_scan_disable.work);
2762 struct hci_cp_le_set_scan_enable cp;
2763 struct hci_request req;
2764 int err;
2765
2766 BT_DBG("%s", hdev->name);
2767
2768 hci_req_init(&req, hdev);
2769
2770 memset(&cp, 0, sizeof(cp));
2771 cp.enable = LE_SCAN_DISABLE;
2772 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2773
2774 err = hci_req_run(&req, le_scan_disable_work_complete);
2775 if (err)
2776 BT_ERR("Disable LE scanning request failed: err %d", err);
2777 }
2778
2779 /* Alloc HCI device */
2780 struct hci_dev *hci_alloc_dev(void)
2781 {
2782 struct hci_dev *hdev;
2783
2784 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2785 if (!hdev)
2786 return NULL;
2787
2788 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2789 hdev->esco_type = (ESCO_HV1);
2790 hdev->link_mode = (HCI_LM_ACCEPT);
2791 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2792 hdev->io_capability = 0x03; /* No Input No Output */
2793 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2794 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2795
2796 hdev->sniff_max_interval = 800;
2797 hdev->sniff_min_interval = 80;
2798
2799 hdev->le_scan_interval = 0x0060;
2800 hdev->le_scan_window = 0x0030;
2801 hdev->le_conn_min_interval = 0x0028;
2802 hdev->le_conn_max_interval = 0x0038;
2803
2804 mutex_init(&hdev->lock);
2805 mutex_init(&hdev->req_lock);
2806
2807 INIT_LIST_HEAD(&hdev->mgmt_pending);
2808 INIT_LIST_HEAD(&hdev->blacklist);
2809 INIT_LIST_HEAD(&hdev->uuids);
2810 INIT_LIST_HEAD(&hdev->link_keys);
2811 INIT_LIST_HEAD(&hdev->long_term_keys);
2812 INIT_LIST_HEAD(&hdev->remote_oob_data);
2813 INIT_LIST_HEAD(&hdev->conn_hash.list);
2814
2815 INIT_WORK(&hdev->rx_work, hci_rx_work);
2816 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2817 INIT_WORK(&hdev->tx_work, hci_tx_work);
2818 INIT_WORK(&hdev->power_on, hci_power_on);
2819
2820 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2821 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2822 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2823
2824 skb_queue_head_init(&hdev->rx_q);
2825 skb_queue_head_init(&hdev->cmd_q);
2826 skb_queue_head_init(&hdev->raw_q);
2827
2828 init_waitqueue_head(&hdev->req_wait_q);
2829
2830 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2831
2832 hci_init_sysfs(hdev);
2833 discovery_init(hdev);
2834
2835 return hdev;
2836 }
2837 EXPORT_SYMBOL(hci_alloc_dev);
2838
2839 /* Free HCI device */
2840 void hci_free_dev(struct hci_dev *hdev)
2841 {
2842 /* will free via device release */
2843 put_device(&hdev->dev);
2844 }
2845 EXPORT_SYMBOL(hci_free_dev);
2846
2847 /* Register HCI device */
2848 int hci_register_dev(struct hci_dev *hdev)
2849 {
2850 int id, error;
2851
2852 if (!hdev->open || !hdev->close)
2853 return -EINVAL;
2854
2855 /* Do not allow HCI_AMP devices to register at index 0,
2856 * so the index can be used as the AMP controller ID.
2857 */
2858 switch (hdev->dev_type) {
2859 case HCI_BREDR:
2860 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2861 break;
2862 case HCI_AMP:
2863 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2864 break;
2865 default:
2866 return -EINVAL;
2867 }
2868
2869 if (id < 0)
2870 return id;
2871
2872 sprintf(hdev->name, "hci%d", id);
2873 hdev->id = id;
2874
2875 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2876
2877 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2878 WQ_MEM_RECLAIM, 1, hdev->name);
2879 if (!hdev->workqueue) {
2880 error = -ENOMEM;
2881 goto err;
2882 }
2883
2884 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2885 WQ_MEM_RECLAIM, 1, hdev->name);
2886 if (!hdev->req_workqueue) {
2887 destroy_workqueue(hdev->workqueue);
2888 error = -ENOMEM;
2889 goto err;
2890 }
2891
2892 if (!IS_ERR_OR_NULL(bt_debugfs))
2893 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2894
2895 dev_set_name(&hdev->dev, "%s", hdev->name);
2896
2897 error = device_add(&hdev->dev);
2898 if (error < 0)
2899 goto err_wqueue;
2900
2901 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2902 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2903 hdev);
2904 if (hdev->rfkill) {
2905 if (rfkill_register(hdev->rfkill) < 0) {
2906 rfkill_destroy(hdev->rfkill);
2907 hdev->rfkill = NULL;
2908 }
2909 }
2910
2911 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2912 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2913
2914 set_bit(HCI_SETUP, &hdev->dev_flags);
2915 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2916
2917 if (hdev->dev_type == HCI_BREDR) {
2918 /* Assume BR/EDR support until proven otherwise (such as
2919 * through reading supported features during init.
2920 */
2921 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2922 }
2923
2924 write_lock(&hci_dev_list_lock);
2925 list_add(&hdev->list, &hci_dev_list);
2926 write_unlock(&hci_dev_list_lock);
2927
2928 hci_notify(hdev, HCI_DEV_REG);
2929 hci_dev_hold(hdev);
2930
2931 queue_work(hdev->req_workqueue, &hdev->power_on);
2932
2933 return id;
2934
2935 err_wqueue:
2936 destroy_workqueue(hdev->workqueue);
2937 destroy_workqueue(hdev->req_workqueue);
2938 err:
2939 ida_simple_remove(&hci_index_ida, hdev->id);
2940
2941 return error;
2942 }
2943 EXPORT_SYMBOL(hci_register_dev);
2944
2945 /* Unregister HCI device */
2946 void hci_unregister_dev(struct hci_dev *hdev)
2947 {
2948 int i, id;
2949
2950 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2951
2952 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2953
2954 id = hdev->id;
2955
2956 write_lock(&hci_dev_list_lock);
2957 list_del(&hdev->list);
2958 write_unlock(&hci_dev_list_lock);
2959
2960 hci_dev_do_close(hdev);
2961
2962 for (i = 0; i < NUM_REASSEMBLY; i++)
2963 kfree_skb(hdev->reassembly[i]);
2964
2965 cancel_work_sync(&hdev->power_on);
2966
2967 if (!test_bit(HCI_INIT, &hdev->flags) &&
2968 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2969 hci_dev_lock(hdev);
2970 mgmt_index_removed(hdev);
2971 hci_dev_unlock(hdev);
2972 }
2973
2974 /* mgmt_index_removed should take care of emptying the
2975 * pending list */
2976 BUG_ON(!list_empty(&hdev->mgmt_pending));
2977
2978 hci_notify(hdev, HCI_DEV_UNREG);
2979
2980 if (hdev->rfkill) {
2981 rfkill_unregister(hdev->rfkill);
2982 rfkill_destroy(hdev->rfkill);
2983 }
2984
2985 device_del(&hdev->dev);
2986
2987 debugfs_remove_recursive(hdev->debugfs);
2988
2989 destroy_workqueue(hdev->workqueue);
2990 destroy_workqueue(hdev->req_workqueue);
2991
2992 hci_dev_lock(hdev);
2993 hci_blacklist_clear(hdev);
2994 hci_uuids_clear(hdev);
2995 hci_link_keys_clear(hdev);
2996 hci_smp_ltks_clear(hdev);
2997 hci_remote_oob_data_clear(hdev);
2998 hci_dev_unlock(hdev);
2999
3000 hci_dev_put(hdev);
3001
3002 ida_simple_remove(&hci_index_ida, id);
3003 }
3004 EXPORT_SYMBOL(hci_unregister_dev);
3005
3006 /* Suspend HCI device */
3007 int hci_suspend_dev(struct hci_dev *hdev)
3008 {
3009 hci_notify(hdev, HCI_DEV_SUSPEND);
3010 return 0;
3011 }
3012 EXPORT_SYMBOL(hci_suspend_dev);
3013
3014 /* Resume HCI device */
3015 int hci_resume_dev(struct hci_dev *hdev)
3016 {
3017 hci_notify(hdev, HCI_DEV_RESUME);
3018 return 0;
3019 }
3020 EXPORT_SYMBOL(hci_resume_dev);
3021
3022 /* Receive frame from HCI drivers */
3023 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3024 {
3025 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3026 && !test_bit(HCI_INIT, &hdev->flags))) {
3027 kfree_skb(skb);
3028 return -ENXIO;
3029 }
3030
3031 /* Incoming skb */
3032 bt_cb(skb)->incoming = 1;
3033
3034 /* Time stamp */
3035 __net_timestamp(skb);
3036
3037 skb_queue_tail(&hdev->rx_q, skb);
3038 queue_work(hdev->workqueue, &hdev->rx_work);
3039
3040 return 0;
3041 }
3042 EXPORT_SYMBOL(hci_recv_frame);
3043
3044 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3045 int count, __u8 index)
3046 {
3047 int len = 0;
3048 int hlen = 0;
3049 int remain = count;
3050 struct sk_buff *skb;
3051 struct bt_skb_cb *scb;
3052
3053 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3054 index >= NUM_REASSEMBLY)
3055 return -EILSEQ;
3056
3057 skb = hdev->reassembly[index];
3058
3059 if (!skb) {
3060 switch (type) {
3061 case HCI_ACLDATA_PKT:
3062 len = HCI_MAX_FRAME_SIZE;
3063 hlen = HCI_ACL_HDR_SIZE;
3064 break;
3065 case HCI_EVENT_PKT:
3066 len = HCI_MAX_EVENT_SIZE;
3067 hlen = HCI_EVENT_HDR_SIZE;
3068 break;
3069 case HCI_SCODATA_PKT:
3070 len = HCI_MAX_SCO_SIZE;
3071 hlen = HCI_SCO_HDR_SIZE;
3072 break;
3073 }
3074
3075 skb = bt_skb_alloc(len, GFP_ATOMIC);
3076 if (!skb)
3077 return -ENOMEM;
3078
3079 scb = (void *) skb->cb;
3080 scb->expect = hlen;
3081 scb->pkt_type = type;
3082
3083 hdev->reassembly[index] = skb;
3084 }
3085
3086 while (count) {
3087 scb = (void *) skb->cb;
3088 len = min_t(uint, scb->expect, count);
3089
3090 memcpy(skb_put(skb, len), data, len);
3091
3092 count -= len;
3093 data += len;
3094 scb->expect -= len;
3095 remain = count;
3096
3097 switch (type) {
3098 case HCI_EVENT_PKT:
3099 if (skb->len == HCI_EVENT_HDR_SIZE) {
3100 struct hci_event_hdr *h = hci_event_hdr(skb);
3101 scb->expect = h->plen;
3102
3103 if (skb_tailroom(skb) < scb->expect) {
3104 kfree_skb(skb);
3105 hdev->reassembly[index] = NULL;
3106 return -ENOMEM;
3107 }
3108 }
3109 break;
3110
3111 case HCI_ACLDATA_PKT:
3112 if (skb->len == HCI_ACL_HDR_SIZE) {
3113 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3114 scb->expect = __le16_to_cpu(h->dlen);
3115
3116 if (skb_tailroom(skb) < scb->expect) {
3117 kfree_skb(skb);
3118 hdev->reassembly[index] = NULL;
3119 return -ENOMEM;
3120 }
3121 }
3122 break;
3123
3124 case HCI_SCODATA_PKT:
3125 if (skb->len == HCI_SCO_HDR_SIZE) {
3126 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3127 scb->expect = h->dlen;
3128
3129 if (skb_tailroom(skb) < scb->expect) {
3130 kfree_skb(skb);
3131 hdev->reassembly[index] = NULL;
3132 return -ENOMEM;
3133 }
3134 }
3135 break;
3136 }
3137
3138 if (scb->expect == 0) {
3139 /* Complete frame */
3140
3141 bt_cb(skb)->pkt_type = type;
3142 hci_recv_frame(hdev, skb);
3143
3144 hdev->reassembly[index] = NULL;
3145 return remain;
3146 }
3147 }
3148
3149 return remain;
3150 }
3151
3152 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3153 {
3154 int rem = 0;
3155
3156 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3157 return -EILSEQ;
3158
3159 while (count) {
3160 rem = hci_reassembly(hdev, type, data, count, type - 1);
3161 if (rem < 0)
3162 return rem;
3163
3164 data += (count - rem);
3165 count = rem;
3166 }
3167
3168 return rem;
3169 }
3170 EXPORT_SYMBOL(hci_recv_fragment);
3171
3172 #define STREAM_REASSEMBLY 0
3173
3174 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3175 {
3176 int type;
3177 int rem = 0;
3178
3179 while (count) {
3180 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3181
3182 if (!skb) {
3183 struct { char type; } *pkt;
3184
3185 /* Start of the frame */
3186 pkt = data;
3187 type = pkt->type;
3188
3189 data++;
3190 count--;
3191 } else
3192 type = bt_cb(skb)->pkt_type;
3193
3194 rem = hci_reassembly(hdev, type, data, count,
3195 STREAM_REASSEMBLY);
3196 if (rem < 0)
3197 return rem;
3198
3199 data += (count - rem);
3200 count = rem;
3201 }
3202
3203 return rem;
3204 }
3205 EXPORT_SYMBOL(hci_recv_stream_fragment);
3206
3207 /* ---- Interface to upper protocols ---- */
3208
3209 int hci_register_cb(struct hci_cb *cb)
3210 {
3211 BT_DBG("%p name %s", cb, cb->name);
3212
3213 write_lock(&hci_cb_list_lock);
3214 list_add(&cb->list, &hci_cb_list);
3215 write_unlock(&hci_cb_list_lock);
3216
3217 return 0;
3218 }
3219 EXPORT_SYMBOL(hci_register_cb);
3220
3221 int hci_unregister_cb(struct hci_cb *cb)
3222 {
3223 BT_DBG("%p name %s", cb, cb->name);
3224
3225 write_lock(&hci_cb_list_lock);
3226 list_del(&cb->list);
3227 write_unlock(&hci_cb_list_lock);
3228
3229 return 0;
3230 }
3231 EXPORT_SYMBOL(hci_unregister_cb);
3232
3233 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3234 {
3235 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3236
3237 /* Time stamp */
3238 __net_timestamp(skb);
3239
3240 /* Send copy to monitor */
3241 hci_send_to_monitor(hdev, skb);
3242
3243 if (atomic_read(&hdev->promisc)) {
3244 /* Send copy to the sockets */
3245 hci_send_to_sock(hdev, skb);
3246 }
3247
3248 /* Get rid of skb owner, prior to sending to the driver. */
3249 skb_orphan(skb);
3250
3251 if (hdev->send(hdev, skb) < 0)
3252 BT_ERR("%s sending frame failed", hdev->name);
3253 }
3254
3255 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3256 {
3257 skb_queue_head_init(&req->cmd_q);
3258 req->hdev = hdev;
3259 req->err = 0;
3260 }
3261
3262 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3263 {
3264 struct hci_dev *hdev = req->hdev;
3265 struct sk_buff *skb;
3266 unsigned long flags;
3267
3268 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3269
3270 /* If an error occured during request building, remove all HCI
3271 * commands queued on the HCI request queue.
3272 */
3273 if (req->err) {
3274 skb_queue_purge(&req->cmd_q);
3275 return req->err;
3276 }
3277
3278 /* Do not allow empty requests */
3279 if (skb_queue_empty(&req->cmd_q))
3280 return -ENODATA;
3281
3282 skb = skb_peek_tail(&req->cmd_q);
3283 bt_cb(skb)->req.complete = complete;
3284
3285 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3286 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3287 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3288
3289 queue_work(hdev->workqueue, &hdev->cmd_work);
3290
3291 return 0;
3292 }
3293
3294 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3295 u32 plen, const void *param)
3296 {
3297 int len = HCI_COMMAND_HDR_SIZE + plen;
3298 struct hci_command_hdr *hdr;
3299 struct sk_buff *skb;
3300
3301 skb = bt_skb_alloc(len, GFP_ATOMIC);
3302 if (!skb)
3303 return NULL;
3304
3305 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3306 hdr->opcode = cpu_to_le16(opcode);
3307 hdr->plen = plen;
3308
3309 if (plen)
3310 memcpy(skb_put(skb, plen), param, plen);
3311
3312 BT_DBG("skb len %d", skb->len);
3313
3314 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3315
3316 return skb;
3317 }
3318
3319 /* Send HCI command */
3320 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3321 const void *param)
3322 {
3323 struct sk_buff *skb;
3324
3325 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3326
3327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3328 if (!skb) {
3329 BT_ERR("%s no memory for command", hdev->name);
3330 return -ENOMEM;
3331 }
3332
3333 /* Stand-alone HCI commands must be flaged as
3334 * single-command requests.
3335 */
3336 bt_cb(skb)->req.start = true;
3337
3338 skb_queue_tail(&hdev->cmd_q, skb);
3339 queue_work(hdev->workqueue, &hdev->cmd_work);
3340
3341 return 0;
3342 }
3343
3344 /* Queue a command to an asynchronous HCI request */
3345 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3346 const void *param, u8 event)
3347 {
3348 struct hci_dev *hdev = req->hdev;
3349 struct sk_buff *skb;
3350
3351 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3352
3353 /* If an error occured during request building, there is no point in
3354 * queueing the HCI command. We can simply return.
3355 */
3356 if (req->err)
3357 return;
3358
3359 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3360 if (!skb) {
3361 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3362 hdev->name, opcode);
3363 req->err = -ENOMEM;
3364 return;
3365 }
3366
3367 if (skb_queue_empty(&req->cmd_q))
3368 bt_cb(skb)->req.start = true;
3369
3370 bt_cb(skb)->req.event = event;
3371
3372 skb_queue_tail(&req->cmd_q, skb);
3373 }
3374
3375 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3376 const void *param)
3377 {
3378 hci_req_add_ev(req, opcode, plen, param, 0);
3379 }
3380
3381 /* Get data from the previously sent command */
3382 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3383 {
3384 struct hci_command_hdr *hdr;
3385
3386 if (!hdev->sent_cmd)
3387 return NULL;
3388
3389 hdr = (void *) hdev->sent_cmd->data;
3390
3391 if (hdr->opcode != cpu_to_le16(opcode))
3392 return NULL;
3393
3394 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3395
3396 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3397 }
3398
3399 /* Send ACL data */
3400 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3401 {
3402 struct hci_acl_hdr *hdr;
3403 int len = skb->len;
3404
3405 skb_push(skb, HCI_ACL_HDR_SIZE);
3406 skb_reset_transport_header(skb);
3407 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3408 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3409 hdr->dlen = cpu_to_le16(len);
3410 }
3411
3412 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3413 struct sk_buff *skb, __u16 flags)
3414 {
3415 struct hci_conn *conn = chan->conn;
3416 struct hci_dev *hdev = conn->hdev;
3417 struct sk_buff *list;
3418
3419 skb->len = skb_headlen(skb);
3420 skb->data_len = 0;
3421
3422 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3423
3424 switch (hdev->dev_type) {
3425 case HCI_BREDR:
3426 hci_add_acl_hdr(skb, conn->handle, flags);
3427 break;
3428 case HCI_AMP:
3429 hci_add_acl_hdr(skb, chan->handle, flags);
3430 break;
3431 default:
3432 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3433 return;
3434 }
3435
3436 list = skb_shinfo(skb)->frag_list;
3437 if (!list) {
3438 /* Non fragmented */
3439 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3440
3441 skb_queue_tail(queue, skb);
3442 } else {
3443 /* Fragmented */
3444 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3445
3446 skb_shinfo(skb)->frag_list = NULL;
3447
3448 /* Queue all fragments atomically */
3449 spin_lock(&queue->lock);
3450
3451 __skb_queue_tail(queue, skb);
3452
3453 flags &= ~ACL_START;
3454 flags |= ACL_CONT;
3455 do {
3456 skb = list; list = list->next;
3457
3458 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3459 hci_add_acl_hdr(skb, conn->handle, flags);
3460
3461 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3462
3463 __skb_queue_tail(queue, skb);
3464 } while (list);
3465
3466 spin_unlock(&queue->lock);
3467 }
3468 }
3469
3470 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3471 {
3472 struct hci_dev *hdev = chan->conn->hdev;
3473
3474 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3475
3476 hci_queue_acl(chan, &chan->data_q, skb, flags);
3477
3478 queue_work(hdev->workqueue, &hdev->tx_work);
3479 }
3480
3481 /* Send SCO data */
3482 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3483 {
3484 struct hci_dev *hdev = conn->hdev;
3485 struct hci_sco_hdr hdr;
3486
3487 BT_DBG("%s len %d", hdev->name, skb->len);
3488
3489 hdr.handle = cpu_to_le16(conn->handle);
3490 hdr.dlen = skb->len;
3491
3492 skb_push(skb, HCI_SCO_HDR_SIZE);
3493 skb_reset_transport_header(skb);
3494 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3495
3496 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3497
3498 skb_queue_tail(&conn->data_q, skb);
3499 queue_work(hdev->workqueue, &hdev->tx_work);
3500 }
3501
3502 /* ---- HCI TX task (outgoing data) ---- */
3503
3504 /* HCI Connection scheduler */
3505 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3506 int *quote)
3507 {
3508 struct hci_conn_hash *h = &hdev->conn_hash;
3509 struct hci_conn *conn = NULL, *c;
3510 unsigned int num = 0, min = ~0;
3511
3512 /* We don't have to lock device here. Connections are always
3513 * added and removed with TX task disabled. */
3514
3515 rcu_read_lock();
3516
3517 list_for_each_entry_rcu(c, &h->list, list) {
3518 if (c->type != type || skb_queue_empty(&c->data_q))
3519 continue;
3520
3521 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3522 continue;
3523
3524 num++;
3525
3526 if (c->sent < min) {
3527 min = c->sent;
3528 conn = c;
3529 }
3530
3531 if (hci_conn_num(hdev, type) == num)
3532 break;
3533 }
3534
3535 rcu_read_unlock();
3536
3537 if (conn) {
3538 int cnt, q;
3539
3540 switch (conn->type) {
3541 case ACL_LINK:
3542 cnt = hdev->acl_cnt;
3543 break;
3544 case SCO_LINK:
3545 case ESCO_LINK:
3546 cnt = hdev->sco_cnt;
3547 break;
3548 case LE_LINK:
3549 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3550 break;
3551 default:
3552 cnt = 0;
3553 BT_ERR("Unknown link type");
3554 }
3555
3556 q = cnt / num;
3557 *quote = q ? q : 1;
3558 } else
3559 *quote = 0;
3560
3561 BT_DBG("conn %p quote %d", conn, *quote);
3562 return conn;
3563 }
3564
3565 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3566 {
3567 struct hci_conn_hash *h = &hdev->conn_hash;
3568 struct hci_conn *c;
3569
3570 BT_ERR("%s link tx timeout", hdev->name);
3571
3572 rcu_read_lock();
3573
3574 /* Kill stalled connections */
3575 list_for_each_entry_rcu(c, &h->list, list) {
3576 if (c->type == type && c->sent) {
3577 BT_ERR("%s killing stalled connection %pMR",
3578 hdev->name, &c->dst);
3579 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3580 }
3581 }
3582
3583 rcu_read_unlock();
3584 }
3585
3586 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3587 int *quote)
3588 {
3589 struct hci_conn_hash *h = &hdev->conn_hash;
3590 struct hci_chan *chan = NULL;
3591 unsigned int num = 0, min = ~0, cur_prio = 0;
3592 struct hci_conn *conn;
3593 int cnt, q, conn_num = 0;
3594
3595 BT_DBG("%s", hdev->name);
3596
3597 rcu_read_lock();
3598
3599 list_for_each_entry_rcu(conn, &h->list, list) {
3600 struct hci_chan *tmp;
3601
3602 if (conn->type != type)
3603 continue;
3604
3605 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3606 continue;
3607
3608 conn_num++;
3609
3610 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3611 struct sk_buff *skb;
3612
3613 if (skb_queue_empty(&tmp->data_q))
3614 continue;
3615
3616 skb = skb_peek(&tmp->data_q);
3617 if (skb->priority < cur_prio)
3618 continue;
3619
3620 if (skb->priority > cur_prio) {
3621 num = 0;
3622 min = ~0;
3623 cur_prio = skb->priority;
3624 }
3625
3626 num++;
3627
3628 if (conn->sent < min) {
3629 min = conn->sent;
3630 chan = tmp;
3631 }
3632 }
3633
3634 if (hci_conn_num(hdev, type) == conn_num)
3635 break;
3636 }
3637
3638 rcu_read_unlock();
3639
3640 if (!chan)
3641 return NULL;
3642
3643 switch (chan->conn->type) {
3644 case ACL_LINK:
3645 cnt = hdev->acl_cnt;
3646 break;
3647 case AMP_LINK:
3648 cnt = hdev->block_cnt;
3649 break;
3650 case SCO_LINK:
3651 case ESCO_LINK:
3652 cnt = hdev->sco_cnt;
3653 break;
3654 case LE_LINK:
3655 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3656 break;
3657 default:
3658 cnt = 0;
3659 BT_ERR("Unknown link type");
3660 }
3661
3662 q = cnt / num;
3663 *quote = q ? q : 1;
3664 BT_DBG("chan %p quote %d", chan, *quote);
3665 return chan;
3666 }
3667
3668 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3669 {
3670 struct hci_conn_hash *h = &hdev->conn_hash;
3671 struct hci_conn *conn;
3672 int num = 0;
3673
3674 BT_DBG("%s", hdev->name);
3675
3676 rcu_read_lock();
3677
3678 list_for_each_entry_rcu(conn, &h->list, list) {
3679 struct hci_chan *chan;
3680
3681 if (conn->type != type)
3682 continue;
3683
3684 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3685 continue;
3686
3687 num++;
3688
3689 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3690 struct sk_buff *skb;
3691
3692 if (chan->sent) {
3693 chan->sent = 0;
3694 continue;
3695 }
3696
3697 if (skb_queue_empty(&chan->data_q))
3698 continue;
3699
3700 skb = skb_peek(&chan->data_q);
3701 if (skb->priority >= HCI_PRIO_MAX - 1)
3702 continue;
3703
3704 skb->priority = HCI_PRIO_MAX - 1;
3705
3706 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3707 skb->priority);
3708 }
3709
3710 if (hci_conn_num(hdev, type) == num)
3711 break;
3712 }
3713
3714 rcu_read_unlock();
3715
3716 }
3717
3718 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3719 {
3720 /* Calculate count of blocks used by this packet */
3721 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3722 }
3723
3724 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3725 {
3726 if (!test_bit(HCI_RAW, &hdev->flags)) {
3727 /* ACL tx timeout must be longer than maximum
3728 * link supervision timeout (40.9 seconds) */
3729 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3730 HCI_ACL_TX_TIMEOUT))
3731 hci_link_tx_to(hdev, ACL_LINK);
3732 }
3733 }
3734
3735 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3736 {
3737 unsigned int cnt = hdev->acl_cnt;
3738 struct hci_chan *chan;
3739 struct sk_buff *skb;
3740 int quote;
3741
3742 __check_timeout(hdev, cnt);
3743
3744 while (hdev->acl_cnt &&
3745 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3746 u32 priority = (skb_peek(&chan->data_q))->priority;
3747 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3748 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3749 skb->len, skb->priority);
3750
3751 /* Stop if priority has changed */
3752 if (skb->priority < priority)
3753 break;
3754
3755 skb = skb_dequeue(&chan->data_q);
3756
3757 hci_conn_enter_active_mode(chan->conn,
3758 bt_cb(skb)->force_active);
3759
3760 hci_send_frame(hdev, skb);
3761 hdev->acl_last_tx = jiffies;
3762
3763 hdev->acl_cnt--;
3764 chan->sent++;
3765 chan->conn->sent++;
3766 }
3767 }
3768
3769 if (cnt != hdev->acl_cnt)
3770 hci_prio_recalculate(hdev, ACL_LINK);
3771 }
3772
3773 static void hci_sched_acl_blk(struct hci_dev *hdev)
3774 {
3775 unsigned int cnt = hdev->block_cnt;
3776 struct hci_chan *chan;
3777 struct sk_buff *skb;
3778 int quote;
3779 u8 type;
3780
3781 __check_timeout(hdev, cnt);
3782
3783 BT_DBG("%s", hdev->name);
3784
3785 if (hdev->dev_type == HCI_AMP)
3786 type = AMP_LINK;
3787 else
3788 type = ACL_LINK;
3789
3790 while (hdev->block_cnt > 0 &&
3791 (chan = hci_chan_sent(hdev, type, &quote))) {
3792 u32 priority = (skb_peek(&chan->data_q))->priority;
3793 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3794 int blocks;
3795
3796 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3797 skb->len, skb->priority);
3798
3799 /* Stop if priority has changed */
3800 if (skb->priority < priority)
3801 break;
3802
3803 skb = skb_dequeue(&chan->data_q);
3804
3805 blocks = __get_blocks(hdev, skb);
3806 if (blocks > hdev->block_cnt)
3807 return;
3808
3809 hci_conn_enter_active_mode(chan->conn,
3810 bt_cb(skb)->force_active);
3811
3812 hci_send_frame(hdev, skb);
3813 hdev->acl_last_tx = jiffies;
3814
3815 hdev->block_cnt -= blocks;
3816 quote -= blocks;
3817
3818 chan->sent += blocks;
3819 chan->conn->sent += blocks;
3820 }
3821 }
3822
3823 if (cnt != hdev->block_cnt)
3824 hci_prio_recalculate(hdev, type);
3825 }
3826
3827 static void hci_sched_acl(struct hci_dev *hdev)
3828 {
3829 BT_DBG("%s", hdev->name);
3830
3831 /* No ACL link over BR/EDR controller */
3832 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3833 return;
3834
3835 /* No AMP link over AMP controller */
3836 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3837 return;
3838
3839 switch (hdev->flow_ctl_mode) {
3840 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3841 hci_sched_acl_pkt(hdev);
3842 break;
3843
3844 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3845 hci_sched_acl_blk(hdev);
3846 break;
3847 }
3848 }
3849
3850 /* Schedule SCO */
3851 static void hci_sched_sco(struct hci_dev *hdev)
3852 {
3853 struct hci_conn *conn;
3854 struct sk_buff *skb;
3855 int quote;
3856
3857 BT_DBG("%s", hdev->name);
3858
3859 if (!hci_conn_num(hdev, SCO_LINK))
3860 return;
3861
3862 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3863 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3864 BT_DBG("skb %p len %d", skb, skb->len);
3865 hci_send_frame(hdev, skb);
3866
3867 conn->sent++;
3868 if (conn->sent == ~0)
3869 conn->sent = 0;
3870 }
3871 }
3872 }
3873
3874 static void hci_sched_esco(struct hci_dev *hdev)
3875 {
3876 struct hci_conn *conn;
3877 struct sk_buff *skb;
3878 int quote;
3879
3880 BT_DBG("%s", hdev->name);
3881
3882 if (!hci_conn_num(hdev, ESCO_LINK))
3883 return;
3884
3885 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3886 &quote))) {
3887 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3888 BT_DBG("skb %p len %d", skb, skb->len);
3889 hci_send_frame(hdev, skb);
3890
3891 conn->sent++;
3892 if (conn->sent == ~0)
3893 conn->sent = 0;
3894 }
3895 }
3896 }
3897
3898 static void hci_sched_le(struct hci_dev *hdev)
3899 {
3900 struct hci_chan *chan;
3901 struct sk_buff *skb;
3902 int quote, cnt, tmp;
3903
3904 BT_DBG("%s", hdev->name);
3905
3906 if (!hci_conn_num(hdev, LE_LINK))
3907 return;
3908
3909 if (!test_bit(HCI_RAW, &hdev->flags)) {
3910 /* LE tx timeout must be longer than maximum
3911 * link supervision timeout (40.9 seconds) */
3912 if (!hdev->le_cnt && hdev->le_pkts &&
3913 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3914 hci_link_tx_to(hdev, LE_LINK);
3915 }
3916
3917 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3918 tmp = cnt;
3919 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3920 u32 priority = (skb_peek(&chan->data_q))->priority;
3921 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3922 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3923 skb->len, skb->priority);
3924
3925 /* Stop if priority has changed */
3926 if (skb->priority < priority)
3927 break;
3928
3929 skb = skb_dequeue(&chan->data_q);
3930
3931 hci_send_frame(hdev, skb);
3932 hdev->le_last_tx = jiffies;
3933
3934 cnt--;
3935 chan->sent++;
3936 chan->conn->sent++;
3937 }
3938 }
3939
3940 if (hdev->le_pkts)
3941 hdev->le_cnt = cnt;
3942 else
3943 hdev->acl_cnt = cnt;
3944
3945 if (cnt != tmp)
3946 hci_prio_recalculate(hdev, LE_LINK);
3947 }
3948
3949 static void hci_tx_work(struct work_struct *work)
3950 {
3951 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3952 struct sk_buff *skb;
3953
3954 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3955 hdev->sco_cnt, hdev->le_cnt);
3956
3957 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3958 /* Schedule queues and send stuff to HCI driver */
3959 hci_sched_acl(hdev);
3960 hci_sched_sco(hdev);
3961 hci_sched_esco(hdev);
3962 hci_sched_le(hdev);
3963 }
3964
3965 /* Send next queued raw (unknown type) packet */
3966 while ((skb = skb_dequeue(&hdev->raw_q)))
3967 hci_send_frame(hdev, skb);
3968 }
3969
3970 /* ----- HCI RX task (incoming data processing) ----- */
3971
3972 /* ACL data packet */
3973 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3974 {
3975 struct hci_acl_hdr *hdr = (void *) skb->data;
3976 struct hci_conn *conn;
3977 __u16 handle, flags;
3978
3979 skb_pull(skb, HCI_ACL_HDR_SIZE);
3980
3981 handle = __le16_to_cpu(hdr->handle);
3982 flags = hci_flags(handle);
3983 handle = hci_handle(handle);
3984
3985 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3986 handle, flags);
3987
3988 hdev->stat.acl_rx++;
3989
3990 hci_dev_lock(hdev);
3991 conn = hci_conn_hash_lookup_handle(hdev, handle);
3992 hci_dev_unlock(hdev);
3993
3994 if (conn) {
3995 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3996
3997 /* Send to upper protocol */
3998 l2cap_recv_acldata(conn, skb, flags);
3999 return;
4000 } else {
4001 BT_ERR("%s ACL packet for unknown connection handle %d",
4002 hdev->name, handle);
4003 }
4004
4005 kfree_skb(skb);
4006 }
4007
4008 /* SCO data packet */
4009 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4010 {
4011 struct hci_sco_hdr *hdr = (void *) skb->data;
4012 struct hci_conn *conn;
4013 __u16 handle;
4014
4015 skb_pull(skb, HCI_SCO_HDR_SIZE);
4016
4017 handle = __le16_to_cpu(hdr->handle);
4018
4019 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4020
4021 hdev->stat.sco_rx++;
4022
4023 hci_dev_lock(hdev);
4024 conn = hci_conn_hash_lookup_handle(hdev, handle);
4025 hci_dev_unlock(hdev);
4026
4027 if (conn) {
4028 /* Send to upper protocol */
4029 sco_recv_scodata(conn, skb);
4030 return;
4031 } else {
4032 BT_ERR("%s SCO packet for unknown connection handle %d",
4033 hdev->name, handle);
4034 }
4035
4036 kfree_skb(skb);
4037 }
4038
4039 static bool hci_req_is_complete(struct hci_dev *hdev)
4040 {
4041 struct sk_buff *skb;
4042
4043 skb = skb_peek(&hdev->cmd_q);
4044 if (!skb)
4045 return true;
4046
4047 return bt_cb(skb)->req.start;
4048 }
4049
4050 static void hci_resend_last(struct hci_dev *hdev)
4051 {
4052 struct hci_command_hdr *sent;
4053 struct sk_buff *skb;
4054 u16 opcode;
4055
4056 if (!hdev->sent_cmd)
4057 return;
4058
4059 sent = (void *) hdev->sent_cmd->data;
4060 opcode = __le16_to_cpu(sent->opcode);
4061 if (opcode == HCI_OP_RESET)
4062 return;
4063
4064 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4065 if (!skb)
4066 return;
4067
4068 skb_queue_head(&hdev->cmd_q, skb);
4069 queue_work(hdev->workqueue, &hdev->cmd_work);
4070 }
4071
4072 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4073 {
4074 hci_req_complete_t req_complete = NULL;
4075 struct sk_buff *skb;
4076 unsigned long flags;
4077
4078 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4079
4080 /* If the completed command doesn't match the last one that was
4081 * sent we need to do special handling of it.
4082 */
4083 if (!hci_sent_cmd_data(hdev, opcode)) {
4084 /* Some CSR based controllers generate a spontaneous
4085 * reset complete event during init and any pending
4086 * command will never be completed. In such a case we
4087 * need to resend whatever was the last sent
4088 * command.
4089 */
4090 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4091 hci_resend_last(hdev);
4092
4093 return;
4094 }
4095
4096 /* If the command succeeded and there's still more commands in
4097 * this request the request is not yet complete.
4098 */
4099 if (!status && !hci_req_is_complete(hdev))
4100 return;
4101
4102 /* If this was the last command in a request the complete
4103 * callback would be found in hdev->sent_cmd instead of the
4104 * command queue (hdev->cmd_q).
4105 */
4106 if (hdev->sent_cmd) {
4107 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4108
4109 if (req_complete) {
4110 /* We must set the complete callback to NULL to
4111 * avoid calling the callback more than once if
4112 * this function gets called again.
4113 */
4114 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4115
4116 goto call_complete;
4117 }
4118 }
4119
4120 /* Remove all pending commands belonging to this request */
4121 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4122 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4123 if (bt_cb(skb)->req.start) {
4124 __skb_queue_head(&hdev->cmd_q, skb);
4125 break;
4126 }
4127
4128 req_complete = bt_cb(skb)->req.complete;
4129 kfree_skb(skb);
4130 }
4131 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4132
4133 call_complete:
4134 if (req_complete)
4135 req_complete(hdev, status);
4136 }
4137
4138 static void hci_rx_work(struct work_struct *work)
4139 {
4140 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4141 struct sk_buff *skb;
4142
4143 BT_DBG("%s", hdev->name);
4144
4145 while ((skb = skb_dequeue(&hdev->rx_q))) {
4146 /* Send copy to monitor */
4147 hci_send_to_monitor(hdev, skb);
4148
4149 if (atomic_read(&hdev->promisc)) {
4150 /* Send copy to the sockets */
4151 hci_send_to_sock(hdev, skb);
4152 }
4153
4154 if (test_bit(HCI_RAW, &hdev->flags) ||
4155 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4156 kfree_skb(skb);
4157 continue;
4158 }
4159
4160 if (test_bit(HCI_INIT, &hdev->flags)) {
4161 /* Don't process data packets in this states. */
4162 switch (bt_cb(skb)->pkt_type) {
4163 case HCI_ACLDATA_PKT:
4164 case HCI_SCODATA_PKT:
4165 kfree_skb(skb);
4166 continue;
4167 }
4168 }
4169
4170 /* Process frame */
4171 switch (bt_cb(skb)->pkt_type) {
4172 case HCI_EVENT_PKT:
4173 BT_DBG("%s Event packet", hdev->name);
4174 hci_event_packet(hdev, skb);
4175 break;
4176
4177 case HCI_ACLDATA_PKT:
4178 BT_DBG("%s ACL data packet", hdev->name);
4179 hci_acldata_packet(hdev, skb);
4180 break;
4181
4182 case HCI_SCODATA_PKT:
4183 BT_DBG("%s SCO data packet", hdev->name);
4184 hci_scodata_packet(hdev, skb);
4185 break;
4186
4187 default:
4188 kfree_skb(skb);
4189 break;
4190 }
4191 }
4192 }
4193
4194 static void hci_cmd_work(struct work_struct *work)
4195 {
4196 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4197 struct sk_buff *skb;
4198
4199 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4200 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4201
4202 /* Send queued commands */
4203 if (atomic_read(&hdev->cmd_cnt)) {
4204 skb = skb_dequeue(&hdev->cmd_q);
4205 if (!skb)
4206 return;
4207
4208 kfree_skb(hdev->sent_cmd);
4209
4210 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4211 if (hdev->sent_cmd) {
4212 atomic_dec(&hdev->cmd_cnt);
4213 hci_send_frame(hdev, skb);
4214 if (test_bit(HCI_RESET, &hdev->flags))
4215 del_timer(&hdev->cmd_timer);
4216 else
4217 mod_timer(&hdev->cmd_timer,
4218 jiffies + HCI_CMD_TIMEOUT);
4219 } else {
4220 skb_queue_head(&hdev->cmd_q, skb);
4221 queue_work(hdev->workqueue, &hdev->cmd_work);
4222 }
4223 }
4224 }