]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_core.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43
44 static void hci_rx_work(struct work_struct *work);
45 static void hci_cmd_work(struct work_struct *work);
46 static void hci_tx_work(struct work_struct *work);
47
48 /* HCI device list */
49 LIST_HEAD(hci_dev_list);
50 DEFINE_RWLOCK(hci_dev_list_lock);
51
52 /* HCI callback list */
53 LIST_HEAD(hci_cb_list);
54 DEFINE_MUTEX(hci_cb_list_lock);
55
56 /* HCI ID Numbering */
57 static DEFINE_IDA(hci_index_ida);
58
59 /* ----- HCI requests ----- */
60
61 #define HCI_REQ_DONE 0
62 #define HCI_REQ_PEND 1
63 #define HCI_REQ_CANCELED 2
64
65 #define hci_req_lock(d) mutex_lock(&d->req_lock)
66 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72 hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI debugfs entries ---- */
76
77 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79 {
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87 }
88
89 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91 {
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
123 kfree_skb(skb);
124
125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
126
127 return count;
128 }
129
130 static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135 };
136
137 /* ---- HCI requests ---- */
138
139 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 struct sk_buff *skb)
141 {
142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
143
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
147 if (skb)
148 hdev->req_skb = skb_get(skb);
149 wake_up_interruptible(&hdev->req_wait_q);
150 }
151 }
152
153 static void hci_req_cancel(struct hci_dev *hdev, int err)
154 {
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
161 }
162 }
163
164 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
165 const void *param, u8 event, u32 timeout)
166 {
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
169 struct sk_buff *skb;
170 int err = 0;
171
172 BT_DBG("%s", hdev->name);
173
174 hci_req_init(&req, hdev);
175
176 hci_req_add_ev(&req, opcode, plen, param, event);
177
178 hdev->req_status = HCI_REQ_PEND;
179
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
182
183 err = hci_req_run_skb(&req, hci_req_sync_complete);
184 if (err < 0) {
185 remove_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_RUNNING);
187 return ERR_PTR(err);
188 }
189
190 schedule_timeout(timeout);
191
192 remove_wait_queue(&hdev->req_wait_q, &wait);
193
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
196
197 switch (hdev->req_status) {
198 case HCI_REQ_DONE:
199 err = -bt_to_errno(hdev->req_result);
200 break;
201
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
204 break;
205
206 default:
207 err = -ETIMEDOUT;
208 break;
209 }
210
211 hdev->req_status = hdev->req_result = 0;
212 skb = hdev->req_skb;
213 hdev->req_skb = NULL;
214
215 BT_DBG("%s end: err %d", hdev->name, err);
216
217 if (err < 0) {
218 kfree_skb(skb);
219 return ERR_PTR(err);
220 }
221
222 if (!skb)
223 return ERR_PTR(-ENODATA);
224
225 return skb;
226 }
227 EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
230 const void *param, u32 timeout)
231 {
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
233 }
234 EXPORT_SYMBOL(__hci_cmd_sync);
235
236 /* Execute request and wait for completion. */
237 static int __hci_req_sync(struct hci_dev *hdev,
238 void (*func)(struct hci_request *req,
239 unsigned long opt),
240 unsigned long opt, __u32 timeout)
241 {
242 struct hci_request req;
243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
245
246 BT_DBG("%s start", hdev->name);
247
248 hci_req_init(&req, hdev);
249
250 hdev->req_status = HCI_REQ_PEND;
251
252 func(&req, opt);
253
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
256
257 err = hci_req_run_skb(&req, hci_req_sync_complete);
258 if (err < 0) {
259 hdev->req_status = 0;
260
261 remove_wait_queue(&hdev->req_wait_q, &wait);
262 set_current_state(TASK_RUNNING);
263
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
268 */
269 if (err == -ENODATA)
270 return 0;
271
272 return err;
273 }
274
275 schedule_timeout(timeout);
276
277 remove_wait_queue(&hdev->req_wait_q, &wait);
278
279 if (signal_pending(current))
280 return -EINTR;
281
282 switch (hdev->req_status) {
283 case HCI_REQ_DONE:
284 err = -bt_to_errno(hdev->req_result);
285 break;
286
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
289 break;
290
291 default:
292 err = -ETIMEDOUT;
293 break;
294 }
295
296 hdev->req_status = hdev->req_result = 0;
297
298 BT_DBG("%s end: err %d", hdev->name, err);
299
300 return err;
301 }
302
303 static int hci_req_sync(struct hci_dev *hdev,
304 void (*req)(struct hci_request *req,
305 unsigned long opt),
306 unsigned long opt, __u32 timeout)
307 {
308 int ret;
309
310 if (!test_bit(HCI_UP, &hdev->flags))
311 return -ENETDOWN;
312
313 /* Serialize all requests */
314 hci_req_lock(hdev);
315 ret = __hci_req_sync(hdev, req, opt, timeout);
316 hci_req_unlock(hdev);
317
318 return ret;
319 }
320
321 static void hci_reset_req(struct hci_request *req, unsigned long opt)
322 {
323 BT_DBG("%s %ld", req->hdev->name, opt);
324
325 /* Reset device */
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
328 }
329
330 static void bredr_init(struct hci_request *req)
331 {
332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
333
334 /* Read Local Supported Features */
335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
336
337 /* Read Local Version */
338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
339
340 /* Read BD Address */
341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
342 }
343
344 static void amp_init1(struct hci_request *req)
345 {
346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
347
348 /* Read Local Version */
349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
350
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
354 /* Read Local AMP Info */
355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
356
357 /* Read Data Blk size */
358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
359
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
365 }
366
367 static void amp_init2(struct hci_request *req)
368 {
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
371 * stage init.
372 */
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375 }
376
377 static void hci_init1_req(struct hci_request *req, unsigned long opt)
378 {
379 struct hci_dev *hdev = req->hdev;
380
381 BT_DBG("%s %ld", hdev->name, opt);
382
383 /* Reset */
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
385 hci_reset_req(req, 0);
386
387 switch (hdev->dev_type) {
388 case HCI_BREDR:
389 bredr_init(req);
390 break;
391
392 case HCI_AMP:
393 amp_init1(req);
394 break;
395
396 default:
397 BT_ERR("Unknown device type %d", hdev->dev_type);
398 break;
399 }
400 }
401
402 static void bredr_setup(struct hci_request *req)
403 {
404 __le16 param;
405 __u8 flt_type;
406
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
409
410 /* Read Class of Device */
411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
412
413 /* Read Local Name */
414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
415
416 /* Read Voice Setting */
417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
418
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
428
429 /* Connection accept timeout ~20 secs */
430 param = cpu_to_le16(0x7d00);
431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
432 }
433
434 static void le_setup(struct hci_request *req)
435 {
436 struct hci_dev *hdev = req->hdev;
437
438 /* Read LE Buffer Size */
439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
440
441 /* Read LE Local Supported Features */
442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
443
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
447 /* Read LE White List Size */
448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
449
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
452
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
456 }
457
458 static void hci_setup_event_mask(struct hci_request *req)
459 {
460 struct hci_dev *hdev = req->hdev;
461
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 * command otherwise.
465 */
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
470 */
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 return;
473
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
480 } else {
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
490
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 }
495 }
496
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
502
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
508
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
526 */
527 }
528
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
531
532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
533 }
534
535 static void hci_init2_req(struct hci_request *req, unsigned long opt)
536 {
537 struct hci_dev *hdev = req->hdev;
538
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
541
542 if (lmp_bredr_capable(hdev))
543 bredr_setup(req);
544 else
545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
546
547 if (lmp_le_capable(hdev))
548 le_setup(req);
549
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
552 *
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
557 */
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
561
562 if (lmp_ssp_capable(hdev)) {
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
568 */
569 hdev->max_page = 0x01;
570
571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
572 u8 mode = 0x01;
573
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
576 } else {
577 struct hci_cp_write_eir cp;
578
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
581
582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
583 }
584 }
585
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
588 u8 mode;
589
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
592 * events.
593 */
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 }
598
599 if (lmp_inq_tx_pwr_capable(hdev))
600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
601
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
604
605 cp.page = 0x01;
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 sizeof(cp), &cp);
608 }
609
610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
611 u8 enable = 1;
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 &enable);
614 }
615 }
616
617 static void hci_setup_link_policy(struct hci_request *req)
618 {
619 struct hci_dev *hdev = req->hdev;
620 struct hci_cp_write_def_link_policy cp;
621 u16 link_policy = 0;
622
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
631
632 cp.policy = cpu_to_le16(link_policy);
633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
634 }
635
636 static void hci_set_le_support(struct hci_request *req)
637 {
638 struct hci_dev *hdev = req->hdev;
639 struct hci_cp_write_le_host_supported cp;
640
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
648 cp.le = 0x01;
649 cp.simul = 0x00;
650 }
651
652 if (cp.le != lmp_host_le_capable(hdev))
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 &cp);
655 }
656
657 static void hci_set_event_mask_page_2(struct hci_request *req)
658 {
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
664 */
665 if (lmp_csb_master_capable(hdev)) {
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
670 }
671
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
674 */
675 if (lmp_csb_slave_capable(hdev)) {
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
680 }
681
682 /* Enable Authenticated Payload Timeout Expired event if supported */
683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
684 events[2] |= 0x80;
685
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687 }
688
689 static void hci_init3_req(struct hci_request *req, unsigned long opt)
690 {
691 struct hci_dev *hdev = req->hdev;
692 u8 p;
693
694 hci_setup_event_mask(req);
695
696 if (hdev->commands[6] & 0x20 &&
697 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
698 struct hci_cp_read_stored_link_key cp;
699
700 bacpy(&cp.bdaddr, BDADDR_ANY);
701 cp.read_all = 0x01;
702 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
703 }
704
705 if (hdev->commands[5] & 0x10)
706 hci_setup_link_policy(req);
707
708 if (hdev->commands[8] & 0x01)
709 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710
711 /* Some older Broadcom based Bluetooth 1.2 controllers do not
712 * support the Read Page Scan Type command. Check support for
713 * this command in the bit mask of supported commands.
714 */
715 if (hdev->commands[13] & 0x01)
716 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717
718 if (lmp_le_capable(hdev)) {
719 u8 events[8];
720
721 memset(events, 0, sizeof(events));
722 events[0] = 0x0f;
723
724 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
725 events[0] |= 0x10; /* LE Long Term Key Request */
726
727 /* If controller supports the Connection Parameters Request
728 * Link Layer Procedure, enable the corresponding event.
729 */
730 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
731 events[0] |= 0x20; /* LE Remote Connection
732 * Parameter Request
733 */
734
735 /* If the controller supports the Data Length Extension
736 * feature, enable the corresponding event.
737 */
738 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
739 events[0] |= 0x40; /* LE Data Length Change */
740
741 /* If the controller supports Extended Scanner Filter
742 * Policies, enable the correspondig event.
743 */
744 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
745 events[1] |= 0x04; /* LE Direct Advertising
746 * Report
747 */
748
749 /* If the controller supports the LE Read Local P-256
750 * Public Key command, enable the corresponding event.
751 */
752 if (hdev->commands[34] & 0x02)
753 events[0] |= 0x80; /* LE Read Local P-256
754 * Public Key Complete
755 */
756
757 /* If the controller supports the LE Generate DHKey
758 * command, enable the corresponding event.
759 */
760 if (hdev->commands[34] & 0x04)
761 events[1] |= 0x01; /* LE Generate DHKey Complete */
762
763 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
764 events);
765
766 if (hdev->commands[25] & 0x40) {
767 /* Read LE Advertising Channel TX Power */
768 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
769 }
770
771 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
772 /* Read LE Maximum Data Length */
773 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774
775 /* Read LE Suggested Default Data Length */
776 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
777 }
778
779 hci_set_le_support(req);
780 }
781
782 /* Read features beyond page 1 if available */
783 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784 struct hci_cp_read_local_ext_features cp;
785
786 cp.page = p;
787 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788 sizeof(cp), &cp);
789 }
790 }
791
792 static void hci_init4_req(struct hci_request *req, unsigned long opt)
793 {
794 struct hci_dev *hdev = req->hdev;
795
796 /* Some Broadcom based Bluetooth controllers do not support the
797 * Delete Stored Link Key command. They are clearly indicating its
798 * absence in the bit mask of supported commands.
799 *
800 * Check the supported commands and only if the the command is marked
801 * as supported send it. If not supported assume that the controller
802 * does not have actual support for stored link keys which makes this
803 * command redundant anyway.
804 *
805 * Some controllers indicate that they support handling deleting
806 * stored link keys, but they don't. The quirk lets a driver
807 * just disable this command.
808 */
809 if (hdev->commands[6] & 0x80 &&
810 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
811 struct hci_cp_delete_stored_link_key cp;
812
813 bacpy(&cp.bdaddr, BDADDR_ANY);
814 cp.delete_all = 0x01;
815 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
816 sizeof(cp), &cp);
817 }
818
819 /* Set event mask page 2 if the HCI command for it is supported */
820 if (hdev->commands[22] & 0x04)
821 hci_set_event_mask_page_2(req);
822
823 /* Read local codec list if the HCI command is supported */
824 if (hdev->commands[29] & 0x20)
825 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826
827 /* Get MWS transport configuration if the HCI command is supported */
828 if (hdev->commands[30] & 0x08)
829 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830
831 /* Check for Synchronization Train support */
832 if (lmp_sync_train_capable(hdev))
833 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
834
835 /* Enable Secure Connections if supported and configured */
836 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
837 bredr_sc_enabled(hdev)) {
838 u8 support = 0x01;
839
840 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
841 sizeof(support), &support);
842 }
843 }
844
845 static int __hci_init(struct hci_dev *hdev)
846 {
847 int err;
848
849 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
850 if (err < 0)
851 return err;
852
853 /* The Device Under Test (DUT) mode is special and available for
854 * all controller types. So just create it early on.
855 */
856 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
857 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
858 &dut_mode_fops);
859 }
860
861 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
862 if (err < 0)
863 return err;
864
865 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
866 * BR/EDR/LE type controllers. AMP controllers only need the
867 * first two stages of init.
868 */
869 if (hdev->dev_type != HCI_BREDR)
870 return 0;
871
872 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
873 if (err < 0)
874 return err;
875
876 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
880 /* This function is only called when the controller is actually in
881 * configured state. When the controller is marked as unconfigured,
882 * this initialization procedure is not run.
883 *
884 * It means that it is possible that a controller runs through its
885 * setup phase and then discovers missing settings. If that is the
886 * case, then this function will not be called. It then will only
887 * be called during the config phase.
888 *
889 * So only when in setup phase or config phase, create the debugfs
890 * entries and register the SMP channels.
891 */
892 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
893 !hci_dev_test_flag(hdev, HCI_CONFIG))
894 return 0;
895
896 hci_debugfs_create_common(hdev);
897
898 if (lmp_bredr_capable(hdev))
899 hci_debugfs_create_bredr(hdev);
900
901 if (lmp_le_capable(hdev))
902 hci_debugfs_create_le(hdev);
903
904 return 0;
905 }
906
907 static void hci_init0_req(struct hci_request *req, unsigned long opt)
908 {
909 struct hci_dev *hdev = req->hdev;
910
911 BT_DBG("%s %ld", hdev->name, opt);
912
913 /* Reset */
914 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
915 hci_reset_req(req, 0);
916
917 /* Read Local Version */
918 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919
920 /* Read BD Address */
921 if (hdev->set_bdaddr)
922 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
923 }
924
925 static int __hci_unconf_init(struct hci_dev *hdev)
926 {
927 int err;
928
929 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
930 return 0;
931
932 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
933 if (err < 0)
934 return err;
935
936 return 0;
937 }
938
939 static void hci_scan_req(struct hci_request *req, unsigned long opt)
940 {
941 __u8 scan = opt;
942
943 BT_DBG("%s %x", req->hdev->name, scan);
944
945 /* Inquiry and Page scans */
946 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
947 }
948
949 static void hci_auth_req(struct hci_request *req, unsigned long opt)
950 {
951 __u8 auth = opt;
952
953 BT_DBG("%s %x", req->hdev->name, auth);
954
955 /* Authentication */
956 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
957 }
958
959 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
960 {
961 __u8 encrypt = opt;
962
963 BT_DBG("%s %x", req->hdev->name, encrypt);
964
965 /* Encryption */
966 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
967 }
968
969 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
970 {
971 __le16 policy = cpu_to_le16(opt);
972
973 BT_DBG("%s %x", req->hdev->name, policy);
974
975 /* Default link policy */
976 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
977 }
978
979 /* Get HCI device by index.
980 * Device is held on return. */
981 struct hci_dev *hci_dev_get(int index)
982 {
983 struct hci_dev *hdev = NULL, *d;
984
985 BT_DBG("%d", index);
986
987 if (index < 0)
988 return NULL;
989
990 read_lock(&hci_dev_list_lock);
991 list_for_each_entry(d, &hci_dev_list, list) {
992 if (d->id == index) {
993 hdev = hci_dev_hold(d);
994 break;
995 }
996 }
997 read_unlock(&hci_dev_list_lock);
998 return hdev;
999 }
1000
1001 /* ---- Inquiry support ---- */
1002
1003 bool hci_discovery_active(struct hci_dev *hdev)
1004 {
1005 struct discovery_state *discov = &hdev->discovery;
1006
1007 switch (discov->state) {
1008 case DISCOVERY_FINDING:
1009 case DISCOVERY_RESOLVING:
1010 return true;
1011
1012 default:
1013 return false;
1014 }
1015 }
1016
1017 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018 {
1019 int old_state = hdev->discovery.state;
1020
1021 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022
1023 if (old_state == state)
1024 return;
1025
1026 hdev->discovery.state = state;
1027
1028 switch (state) {
1029 case DISCOVERY_STOPPED:
1030 hci_update_background_scan(hdev);
1031
1032 if (old_state != DISCOVERY_STARTING)
1033 mgmt_discovering(hdev, 0);
1034 break;
1035 case DISCOVERY_STARTING:
1036 break;
1037 case DISCOVERY_FINDING:
1038 mgmt_discovering(hdev, 1);
1039 break;
1040 case DISCOVERY_RESOLVING:
1041 break;
1042 case DISCOVERY_STOPPING:
1043 break;
1044 }
1045 }
1046
1047 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1048 {
1049 struct discovery_state *cache = &hdev->discovery;
1050 struct inquiry_entry *p, *n;
1051
1052 list_for_each_entry_safe(p, n, &cache->all, all) {
1053 list_del(&p->all);
1054 kfree(p);
1055 }
1056
1057 INIT_LIST_HEAD(&cache->unknown);
1058 INIT_LIST_HEAD(&cache->resolve);
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1062 bdaddr_t *bdaddr)
1063 {
1064 struct discovery_state *cache = &hdev->discovery;
1065 struct inquiry_entry *e;
1066
1067 BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069 list_for_each_entry(e, &cache->all, all) {
1070 if (!bacmp(&e->data.bdaddr, bdaddr))
1071 return e;
1072 }
1073
1074 return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1078 bdaddr_t *bdaddr)
1079 {
1080 struct discovery_state *cache = &hdev->discovery;
1081 struct inquiry_entry *e;
1082
1083 BT_DBG("cache %p, %pMR", cache, bdaddr);
1084
1085 list_for_each_entry(e, &cache->unknown, list) {
1086 if (!bacmp(&e->data.bdaddr, bdaddr))
1087 return e;
1088 }
1089
1090 return NULL;
1091 }
1092
1093 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1094 bdaddr_t *bdaddr,
1095 int state)
1096 {
1097 struct discovery_state *cache = &hdev->discovery;
1098 struct inquiry_entry *e;
1099
1100 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1101
1102 list_for_each_entry(e, &cache->resolve, list) {
1103 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104 return e;
1105 if (!bacmp(&e->data.bdaddr, bdaddr))
1106 return e;
1107 }
1108
1109 return NULL;
1110 }
1111
1112 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1113 struct inquiry_entry *ie)
1114 {
1115 struct discovery_state *cache = &hdev->discovery;
1116 struct list_head *pos = &cache->resolve;
1117 struct inquiry_entry *p;
1118
1119 list_del(&ie->list);
1120
1121 list_for_each_entry(p, &cache->resolve, list) {
1122 if (p->name_state != NAME_PENDING &&
1123 abs(p->data.rssi) >= abs(ie->data.rssi))
1124 break;
1125 pos = &p->list;
1126 }
1127
1128 list_add(&ie->list, pos);
1129 }
1130
1131 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1132 bool name_known)
1133 {
1134 struct discovery_state *cache = &hdev->discovery;
1135 struct inquiry_entry *ie;
1136 u32 flags = 0;
1137
1138 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1139
1140 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1141
1142 if (!data->ssp_mode)
1143 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1144
1145 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1146 if (ie) {
1147 if (!ie->data.ssp_mode)
1148 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1149
1150 if (ie->name_state == NAME_NEEDED &&
1151 data->rssi != ie->data.rssi) {
1152 ie->data.rssi = data->rssi;
1153 hci_inquiry_cache_update_resolve(hdev, ie);
1154 }
1155
1156 goto update;
1157 }
1158
1159 /* Entry not in the cache. Add new one. */
1160 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1161 if (!ie) {
1162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163 goto done;
1164 }
1165
1166 list_add(&ie->all, &cache->all);
1167
1168 if (name_known) {
1169 ie->name_state = NAME_KNOWN;
1170 } else {
1171 ie->name_state = NAME_NOT_KNOWN;
1172 list_add(&ie->list, &cache->unknown);
1173 }
1174
1175 update:
1176 if (name_known && ie->name_state != NAME_KNOWN &&
1177 ie->name_state != NAME_PENDING) {
1178 ie->name_state = NAME_KNOWN;
1179 list_del(&ie->list);
1180 }
1181
1182 memcpy(&ie->data, data, sizeof(*data));
1183 ie->timestamp = jiffies;
1184 cache->timestamp = jiffies;
1185
1186 if (ie->name_state == NAME_NOT_KNOWN)
1187 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1188
1189 done:
1190 return flags;
1191 }
1192
1193 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194 {
1195 struct discovery_state *cache = &hdev->discovery;
1196 struct inquiry_info *info = (struct inquiry_info *) buf;
1197 struct inquiry_entry *e;
1198 int copied = 0;
1199
1200 list_for_each_entry(e, &cache->all, all) {
1201 struct inquiry_data *data = &e->data;
1202
1203 if (copied >= num)
1204 break;
1205
1206 bacpy(&info->bdaddr, &data->bdaddr);
1207 info->pscan_rep_mode = data->pscan_rep_mode;
1208 info->pscan_period_mode = data->pscan_period_mode;
1209 info->pscan_mode = data->pscan_mode;
1210 memcpy(info->dev_class, data->dev_class, 3);
1211 info->clock_offset = data->clock_offset;
1212
1213 info++;
1214 copied++;
1215 }
1216
1217 BT_DBG("cache %p, copied %d", cache, copied);
1218 return copied;
1219 }
1220
1221 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1222 {
1223 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1224 struct hci_dev *hdev = req->hdev;
1225 struct hci_cp_inquiry cp;
1226
1227 BT_DBG("%s", hdev->name);
1228
1229 if (test_bit(HCI_INQUIRY, &hdev->flags))
1230 return;
1231
1232 /* Start Inquiry */
1233 memcpy(&cp.lap, &ir->lap, 3);
1234 cp.length = ir->length;
1235 cp.num_rsp = ir->num_rsp;
1236 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1237 }
1238
1239 int hci_inquiry(void __user *arg)
1240 {
1241 __u8 __user *ptr = arg;
1242 struct hci_inquiry_req ir;
1243 struct hci_dev *hdev;
1244 int err = 0, do_inquiry = 0, max_rsp;
1245 long timeo;
1246 __u8 *buf;
1247
1248 if (copy_from_user(&ir, ptr, sizeof(ir)))
1249 return -EFAULT;
1250
1251 hdev = hci_dev_get(ir.dev_id);
1252 if (!hdev)
1253 return -ENODEV;
1254
1255 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1256 err = -EBUSY;
1257 goto done;
1258 }
1259
1260 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1261 err = -EOPNOTSUPP;
1262 goto done;
1263 }
1264
1265 if (hdev->dev_type != HCI_BREDR) {
1266 err = -EOPNOTSUPP;
1267 goto done;
1268 }
1269
1270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1271 err = -EOPNOTSUPP;
1272 goto done;
1273 }
1274
1275 hci_dev_lock(hdev);
1276 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1277 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1278 hci_inquiry_cache_flush(hdev);
1279 do_inquiry = 1;
1280 }
1281 hci_dev_unlock(hdev);
1282
1283 timeo = ir.length * msecs_to_jiffies(2000);
1284
1285 if (do_inquiry) {
1286 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1287 timeo);
1288 if (err < 0)
1289 goto done;
1290
1291 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1292 * cleared). If it is interrupted by a signal, return -EINTR.
1293 */
1294 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1295 TASK_INTERRUPTIBLE))
1296 return -EINTR;
1297 }
1298
1299 /* for unlimited number of responses we will use buffer with
1300 * 255 entries
1301 */
1302 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303
1304 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1305 * copy it to the user space.
1306 */
1307 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1308 if (!buf) {
1309 err = -ENOMEM;
1310 goto done;
1311 }
1312
1313 hci_dev_lock(hdev);
1314 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1315 hci_dev_unlock(hdev);
1316
1317 BT_DBG("num_rsp %d", ir.num_rsp);
1318
1319 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320 ptr += sizeof(ir);
1321 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1322 ir.num_rsp))
1323 err = -EFAULT;
1324 } else
1325 err = -EFAULT;
1326
1327 kfree(buf);
1328
1329 done:
1330 hci_dev_put(hdev);
1331 return err;
1332 }
1333
1334 static int hci_dev_do_open(struct hci_dev *hdev)
1335 {
1336 int ret = 0;
1337
1338 BT_DBG("%s %p", hdev->name, hdev);
1339
1340 hci_req_lock(hdev);
1341
1342 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1343 ret = -ENODEV;
1344 goto done;
1345 }
1346
1347 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1348 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1349 /* Check for rfkill but allow the HCI setup stage to
1350 * proceed (which in itself doesn't cause any RF activity).
1351 */
1352 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1353 ret = -ERFKILL;
1354 goto done;
1355 }
1356
1357 /* Check for valid public address or a configured static
1358 * random adddress, but let the HCI setup proceed to
1359 * be able to determine if there is a public address
1360 * or not.
1361 *
1362 * In case of user channel usage, it is not important
1363 * if a public address or static random address is
1364 * available.
1365 *
1366 * This check is only valid for BR/EDR controllers
1367 * since AMP controllers do not have an address.
1368 */
1369 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1370 hdev->dev_type == HCI_BREDR &&
1371 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1372 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1373 ret = -EADDRNOTAVAIL;
1374 goto done;
1375 }
1376 }
1377
1378 if (test_bit(HCI_UP, &hdev->flags)) {
1379 ret = -EALREADY;
1380 goto done;
1381 }
1382
1383 if (hdev->open(hdev)) {
1384 ret = -EIO;
1385 goto done;
1386 }
1387
1388 atomic_set(&hdev->cmd_cnt, 1);
1389 set_bit(HCI_INIT, &hdev->flags);
1390
1391 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1392 if (hdev->setup)
1393 ret = hdev->setup(hdev);
1394
1395 /* The transport driver can set these quirks before
1396 * creating the HCI device or in its setup callback.
1397 *
1398 * In case any of them is set, the controller has to
1399 * start up as unconfigured.
1400 */
1401 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1402 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1403 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1404
1405 /* For an unconfigured controller it is required to
1406 * read at least the version information provided by
1407 * the Read Local Version Information command.
1408 *
1409 * If the set_bdaddr driver callback is provided, then
1410 * also the original Bluetooth public device address
1411 * will be read using the Read BD Address command.
1412 */
1413 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1414 ret = __hci_unconf_init(hdev);
1415 }
1416
1417 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1418 /* If public address change is configured, ensure that
1419 * the address gets programmed. If the driver does not
1420 * support changing the public address, fail the power
1421 * on procedure.
1422 */
1423 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1424 hdev->set_bdaddr)
1425 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1426 else
1427 ret = -EADDRNOTAVAIL;
1428 }
1429
1430 if (!ret) {
1431 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1432 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
1433 ret = __hci_init(hdev);
1434 }
1435
1436 clear_bit(HCI_INIT, &hdev->flags);
1437
1438 if (!ret) {
1439 hci_dev_hold(hdev);
1440 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1441 set_bit(HCI_UP, &hdev->flags);
1442 hci_notify(hdev, HCI_DEV_UP);
1443 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1444 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1445 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1446 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1447 hdev->dev_type == HCI_BREDR) {
1448 hci_dev_lock(hdev);
1449 mgmt_powered(hdev, 1);
1450 hci_dev_unlock(hdev);
1451 }
1452 } else {
1453 /* Init failed, cleanup */
1454 flush_work(&hdev->tx_work);
1455 flush_work(&hdev->cmd_work);
1456 flush_work(&hdev->rx_work);
1457
1458 skb_queue_purge(&hdev->cmd_q);
1459 skb_queue_purge(&hdev->rx_q);
1460
1461 if (hdev->flush)
1462 hdev->flush(hdev);
1463
1464 if (hdev->sent_cmd) {
1465 kfree_skb(hdev->sent_cmd);
1466 hdev->sent_cmd = NULL;
1467 }
1468
1469 hdev->close(hdev);
1470 hdev->flags &= BIT(HCI_RAW);
1471 }
1472
1473 done:
1474 hci_req_unlock(hdev);
1475 return ret;
1476 }
1477
1478 /* ---- HCI ioctl helpers ---- */
1479
1480 int hci_dev_open(__u16 dev)
1481 {
1482 struct hci_dev *hdev;
1483 int err;
1484
1485 hdev = hci_dev_get(dev);
1486 if (!hdev)
1487 return -ENODEV;
1488
1489 /* Devices that are marked as unconfigured can only be powered
1490 * up as user channel. Trying to bring them up as normal devices
1491 * will result into a failure. Only user channel operation is
1492 * possible.
1493 *
1494 * When this function is called for a user channel, the flag
1495 * HCI_USER_CHANNEL will be set first before attempting to
1496 * open the device.
1497 */
1498 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1499 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1500 err = -EOPNOTSUPP;
1501 goto done;
1502 }
1503
1504 /* We need to ensure that no other power on/off work is pending
1505 * before proceeding to call hci_dev_do_open. This is
1506 * particularly important if the setup procedure has not yet
1507 * completed.
1508 */
1509 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1510 cancel_delayed_work(&hdev->power_off);
1511
1512 /* After this call it is guaranteed that the setup procedure
1513 * has finished. This means that error conditions like RFKILL
1514 * or no valid public or static random address apply.
1515 */
1516 flush_workqueue(hdev->req_workqueue);
1517
1518 /* For controllers not using the management interface and that
1519 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1520 * so that pairing works for them. Once the management interface
1521 * is in use this bit will be cleared again and userspace has
1522 * to explicitly enable it.
1523 */
1524 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1525 !hci_dev_test_flag(hdev, HCI_MGMT))
1526 hci_dev_set_flag(hdev, HCI_BONDABLE);
1527
1528 err = hci_dev_do_open(hdev);
1529
1530 done:
1531 hci_dev_put(hdev);
1532 return err;
1533 }
1534
1535 /* This function requires the caller holds hdev->lock */
1536 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1537 {
1538 struct hci_conn_params *p;
1539
1540 list_for_each_entry(p, &hdev->le_conn_params, list) {
1541 if (p->conn) {
1542 hci_conn_drop(p->conn);
1543 hci_conn_put(p->conn);
1544 p->conn = NULL;
1545 }
1546 list_del_init(&p->action);
1547 }
1548
1549 BT_DBG("All LE pending actions cleared");
1550 }
1551
1552 int hci_dev_do_close(struct hci_dev *hdev)
1553 {
1554 BT_DBG("%s %p", hdev->name, hdev);
1555
1556 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1557 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1558 test_bit(HCI_UP, &hdev->flags)) {
1559 /* Execute vendor specific shutdown routine */
1560 if (hdev->shutdown)
1561 hdev->shutdown(hdev);
1562 }
1563
1564 cancel_delayed_work(&hdev->power_off);
1565
1566 hci_req_cancel(hdev, ENODEV);
1567 hci_req_lock(hdev);
1568
1569 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1570 cancel_delayed_work_sync(&hdev->cmd_timer);
1571 hci_req_unlock(hdev);
1572 return 0;
1573 }
1574
1575 /* Flush RX and TX works */
1576 flush_work(&hdev->tx_work);
1577 flush_work(&hdev->rx_work);
1578
1579 if (hdev->discov_timeout > 0) {
1580 cancel_delayed_work(&hdev->discov_off);
1581 hdev->discov_timeout = 0;
1582 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1583 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1584 }
1585
1586 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1587 cancel_delayed_work(&hdev->service_cache);
1588
1589 cancel_delayed_work_sync(&hdev->le_scan_disable);
1590 cancel_delayed_work_sync(&hdev->le_scan_restart);
1591
1592 if (hci_dev_test_flag(hdev, HCI_MGMT))
1593 cancel_delayed_work_sync(&hdev->rpa_expired);
1594
1595 if (hdev->adv_instance_timeout) {
1596 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1597 hdev->adv_instance_timeout = 0;
1598 }
1599
1600 /* Avoid potential lockdep warnings from the *_flush() calls by
1601 * ensuring the workqueue is empty up front.
1602 */
1603 drain_workqueue(hdev->workqueue);
1604
1605 hci_dev_lock(hdev);
1606
1607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1608
1609 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1610 if (hdev->dev_type == HCI_BREDR)
1611 mgmt_powered(hdev, 0);
1612 }
1613
1614 hci_inquiry_cache_flush(hdev);
1615 hci_pend_le_actions_clear(hdev);
1616 hci_conn_hash_flush(hdev);
1617 hci_dev_unlock(hdev);
1618
1619 smp_unregister(hdev);
1620
1621 hci_notify(hdev, HCI_DEV_DOWN);
1622
1623 if (hdev->flush)
1624 hdev->flush(hdev);
1625
1626 /* Reset device */
1627 skb_queue_purge(&hdev->cmd_q);
1628 atomic_set(&hdev->cmd_cnt, 1);
1629 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1630 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1631 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1632 set_bit(HCI_INIT, &hdev->flags);
1633 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1634 clear_bit(HCI_INIT, &hdev->flags);
1635 }
1636
1637 /* flush cmd work */
1638 flush_work(&hdev->cmd_work);
1639
1640 /* Drop queues */
1641 skb_queue_purge(&hdev->rx_q);
1642 skb_queue_purge(&hdev->cmd_q);
1643 skb_queue_purge(&hdev->raw_q);
1644
1645 /* Drop last sent command */
1646 if (hdev->sent_cmd) {
1647 cancel_delayed_work_sync(&hdev->cmd_timer);
1648 kfree_skb(hdev->sent_cmd);
1649 hdev->sent_cmd = NULL;
1650 }
1651
1652 /* After this point our queues are empty
1653 * and no tasks are scheduled. */
1654 hdev->close(hdev);
1655
1656 /* Clear flags */
1657 hdev->flags &= BIT(HCI_RAW);
1658 hci_dev_clear_volatile_flags(hdev);
1659
1660 /* Controller radio is available but is currently powered down */
1661 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1662
1663 memset(hdev->eir, 0, sizeof(hdev->eir));
1664 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1665 bacpy(&hdev->random_addr, BDADDR_ANY);
1666
1667 hci_req_unlock(hdev);
1668
1669 hci_dev_put(hdev);
1670 return 0;
1671 }
1672
1673 int hci_dev_close(__u16 dev)
1674 {
1675 struct hci_dev *hdev;
1676 int err;
1677
1678 hdev = hci_dev_get(dev);
1679 if (!hdev)
1680 return -ENODEV;
1681
1682 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1683 err = -EBUSY;
1684 goto done;
1685 }
1686
1687 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1688 cancel_delayed_work(&hdev->power_off);
1689
1690 err = hci_dev_do_close(hdev);
1691
1692 done:
1693 hci_dev_put(hdev);
1694 return err;
1695 }
1696
1697 static int hci_dev_do_reset(struct hci_dev *hdev)
1698 {
1699 int ret;
1700
1701 BT_DBG("%s %p", hdev->name, hdev);
1702
1703 hci_req_lock(hdev);
1704
1705 /* Drop queues */
1706 skb_queue_purge(&hdev->rx_q);
1707 skb_queue_purge(&hdev->cmd_q);
1708
1709 /* Avoid potential lockdep warnings from the *_flush() calls by
1710 * ensuring the workqueue is empty up front.
1711 */
1712 drain_workqueue(hdev->workqueue);
1713
1714 hci_dev_lock(hdev);
1715 hci_inquiry_cache_flush(hdev);
1716 hci_conn_hash_flush(hdev);
1717 hci_dev_unlock(hdev);
1718
1719 if (hdev->flush)
1720 hdev->flush(hdev);
1721
1722 atomic_set(&hdev->cmd_cnt, 1);
1723 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1724
1725 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1726
1727 hci_req_unlock(hdev);
1728 return ret;
1729 }
1730
1731 int hci_dev_reset(__u16 dev)
1732 {
1733 struct hci_dev *hdev;
1734 int err;
1735
1736 hdev = hci_dev_get(dev);
1737 if (!hdev)
1738 return -ENODEV;
1739
1740 if (!test_bit(HCI_UP, &hdev->flags)) {
1741 err = -ENETDOWN;
1742 goto done;
1743 }
1744
1745 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1746 err = -EBUSY;
1747 goto done;
1748 }
1749
1750 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1751 err = -EOPNOTSUPP;
1752 goto done;
1753 }
1754
1755 err = hci_dev_do_reset(hdev);
1756
1757 done:
1758 hci_dev_put(hdev);
1759 return err;
1760 }
1761
1762 int hci_dev_reset_stat(__u16 dev)
1763 {
1764 struct hci_dev *hdev;
1765 int ret = 0;
1766
1767 hdev = hci_dev_get(dev);
1768 if (!hdev)
1769 return -ENODEV;
1770
1771 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1772 ret = -EBUSY;
1773 goto done;
1774 }
1775
1776 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1777 ret = -EOPNOTSUPP;
1778 goto done;
1779 }
1780
1781 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1782
1783 done:
1784 hci_dev_put(hdev);
1785 return ret;
1786 }
1787
1788 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1789 {
1790 bool conn_changed, discov_changed;
1791
1792 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1793
1794 if ((scan & SCAN_PAGE))
1795 conn_changed = !hci_dev_test_and_set_flag(hdev,
1796 HCI_CONNECTABLE);
1797 else
1798 conn_changed = hci_dev_test_and_clear_flag(hdev,
1799 HCI_CONNECTABLE);
1800
1801 if ((scan & SCAN_INQUIRY)) {
1802 discov_changed = !hci_dev_test_and_set_flag(hdev,
1803 HCI_DISCOVERABLE);
1804 } else {
1805 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1806 discov_changed = hci_dev_test_and_clear_flag(hdev,
1807 HCI_DISCOVERABLE);
1808 }
1809
1810 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1811 return;
1812
1813 if (conn_changed || discov_changed) {
1814 /* In case this was disabled through mgmt */
1815 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1816
1817 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1818 mgmt_update_adv_data(hdev);
1819
1820 mgmt_new_settings(hdev);
1821 }
1822 }
1823
1824 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1825 {
1826 struct hci_dev *hdev;
1827 struct hci_dev_req dr;
1828 int err = 0;
1829
1830 if (copy_from_user(&dr, arg, sizeof(dr)))
1831 return -EFAULT;
1832
1833 hdev = hci_dev_get(dr.dev_id);
1834 if (!hdev)
1835 return -ENODEV;
1836
1837 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1838 err = -EBUSY;
1839 goto done;
1840 }
1841
1842 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1843 err = -EOPNOTSUPP;
1844 goto done;
1845 }
1846
1847 if (hdev->dev_type != HCI_BREDR) {
1848 err = -EOPNOTSUPP;
1849 goto done;
1850 }
1851
1852 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1853 err = -EOPNOTSUPP;
1854 goto done;
1855 }
1856
1857 switch (cmd) {
1858 case HCISETAUTH:
1859 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1860 HCI_INIT_TIMEOUT);
1861 break;
1862
1863 case HCISETENCRYPT:
1864 if (!lmp_encrypt_capable(hdev)) {
1865 err = -EOPNOTSUPP;
1866 break;
1867 }
1868
1869 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1870 /* Auth must be enabled first */
1871 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1872 HCI_INIT_TIMEOUT);
1873 if (err)
1874 break;
1875 }
1876
1877 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1878 HCI_INIT_TIMEOUT);
1879 break;
1880
1881 case HCISETSCAN:
1882 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1883 HCI_INIT_TIMEOUT);
1884
1885 /* Ensure that the connectable and discoverable states
1886 * get correctly modified as this was a non-mgmt change.
1887 */
1888 if (!err)
1889 hci_update_scan_state(hdev, dr.dev_opt);
1890 break;
1891
1892 case HCISETLINKPOL:
1893 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1894 HCI_INIT_TIMEOUT);
1895 break;
1896
1897 case HCISETLINKMODE:
1898 hdev->link_mode = ((__u16) dr.dev_opt) &
1899 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1900 break;
1901
1902 case HCISETPTYPE:
1903 hdev->pkt_type = (__u16) dr.dev_opt;
1904 break;
1905
1906 case HCISETACLMTU:
1907 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1908 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1909 break;
1910
1911 case HCISETSCOMTU:
1912 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1913 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1914 break;
1915
1916 default:
1917 err = -EINVAL;
1918 break;
1919 }
1920
1921 done:
1922 hci_dev_put(hdev);
1923 return err;
1924 }
1925
1926 int hci_get_dev_list(void __user *arg)
1927 {
1928 struct hci_dev *hdev;
1929 struct hci_dev_list_req *dl;
1930 struct hci_dev_req *dr;
1931 int n = 0, size, err;
1932 __u16 dev_num;
1933
1934 if (get_user(dev_num, (__u16 __user *) arg))
1935 return -EFAULT;
1936
1937 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1938 return -EINVAL;
1939
1940 size = sizeof(*dl) + dev_num * sizeof(*dr);
1941
1942 dl = kzalloc(size, GFP_KERNEL);
1943 if (!dl)
1944 return -ENOMEM;
1945
1946 dr = dl->dev_req;
1947
1948 read_lock(&hci_dev_list_lock);
1949 list_for_each_entry(hdev, &hci_dev_list, list) {
1950 unsigned long flags = hdev->flags;
1951
1952 /* When the auto-off is configured it means the transport
1953 * is running, but in that case still indicate that the
1954 * device is actually down.
1955 */
1956 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1957 flags &= ~BIT(HCI_UP);
1958
1959 (dr + n)->dev_id = hdev->id;
1960 (dr + n)->dev_opt = flags;
1961
1962 if (++n >= dev_num)
1963 break;
1964 }
1965 read_unlock(&hci_dev_list_lock);
1966
1967 dl->dev_num = n;
1968 size = sizeof(*dl) + n * sizeof(*dr);
1969
1970 err = copy_to_user(arg, dl, size);
1971 kfree(dl);
1972
1973 return err ? -EFAULT : 0;
1974 }
1975
1976 int hci_get_dev_info(void __user *arg)
1977 {
1978 struct hci_dev *hdev;
1979 struct hci_dev_info di;
1980 unsigned long flags;
1981 int err = 0;
1982
1983 if (copy_from_user(&di, arg, sizeof(di)))
1984 return -EFAULT;
1985
1986 hdev = hci_dev_get(di.dev_id);
1987 if (!hdev)
1988 return -ENODEV;
1989
1990 /* When the auto-off is configured it means the transport
1991 * is running, but in that case still indicate that the
1992 * device is actually down.
1993 */
1994 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1995 flags = hdev->flags & ~BIT(HCI_UP);
1996 else
1997 flags = hdev->flags;
1998
1999 strcpy(di.name, hdev->name);
2000 di.bdaddr = hdev->bdaddr;
2001 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2002 di.flags = flags;
2003 di.pkt_type = hdev->pkt_type;
2004 if (lmp_bredr_capable(hdev)) {
2005 di.acl_mtu = hdev->acl_mtu;
2006 di.acl_pkts = hdev->acl_pkts;
2007 di.sco_mtu = hdev->sco_mtu;
2008 di.sco_pkts = hdev->sco_pkts;
2009 } else {
2010 di.acl_mtu = hdev->le_mtu;
2011 di.acl_pkts = hdev->le_pkts;
2012 di.sco_mtu = 0;
2013 di.sco_pkts = 0;
2014 }
2015 di.link_policy = hdev->link_policy;
2016 di.link_mode = hdev->link_mode;
2017
2018 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2019 memcpy(&di.features, &hdev->features, sizeof(di.features));
2020
2021 if (copy_to_user(arg, &di, sizeof(di)))
2022 err = -EFAULT;
2023
2024 hci_dev_put(hdev);
2025
2026 return err;
2027 }
2028
2029 /* ---- Interface to HCI drivers ---- */
2030
2031 static int hci_rfkill_set_block(void *data, bool blocked)
2032 {
2033 struct hci_dev *hdev = data;
2034
2035 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2036
2037 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2038 return -EBUSY;
2039
2040 if (blocked) {
2041 hci_dev_set_flag(hdev, HCI_RFKILLED);
2042 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2043 !hci_dev_test_flag(hdev, HCI_CONFIG))
2044 hci_dev_do_close(hdev);
2045 } else {
2046 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2047 }
2048
2049 return 0;
2050 }
2051
2052 static const struct rfkill_ops hci_rfkill_ops = {
2053 .set_block = hci_rfkill_set_block,
2054 };
2055
2056 static void hci_power_on(struct work_struct *work)
2057 {
2058 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2059 int err;
2060
2061 BT_DBG("%s", hdev->name);
2062
2063 err = hci_dev_do_open(hdev);
2064 if (err < 0) {
2065 hci_dev_lock(hdev);
2066 mgmt_set_powered_failed(hdev, err);
2067 hci_dev_unlock(hdev);
2068 return;
2069 }
2070
2071 /* During the HCI setup phase, a few error conditions are
2072 * ignored and they need to be checked now. If they are still
2073 * valid, it is important to turn the device back off.
2074 */
2075 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2076 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2077 (hdev->dev_type == HCI_BREDR &&
2078 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2079 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2080 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2081 hci_dev_do_close(hdev);
2082 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2083 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2084 HCI_AUTO_OFF_TIMEOUT);
2085 }
2086
2087 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2088 /* For unconfigured devices, set the HCI_RAW flag
2089 * so that userspace can easily identify them.
2090 */
2091 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2092 set_bit(HCI_RAW, &hdev->flags);
2093
2094 /* For fully configured devices, this will send
2095 * the Index Added event. For unconfigured devices,
2096 * it will send Unconfigued Index Added event.
2097 *
2098 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2099 * and no event will be send.
2100 */
2101 mgmt_index_added(hdev);
2102 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2103 /* When the controller is now configured, then it
2104 * is important to clear the HCI_RAW flag.
2105 */
2106 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2107 clear_bit(HCI_RAW, &hdev->flags);
2108
2109 /* Powering on the controller with HCI_CONFIG set only
2110 * happens with the transition from unconfigured to
2111 * configured. This will send the Index Added event.
2112 */
2113 mgmt_index_added(hdev);
2114 }
2115 }
2116
2117 static void hci_power_off(struct work_struct *work)
2118 {
2119 struct hci_dev *hdev = container_of(work, struct hci_dev,
2120 power_off.work);
2121
2122 BT_DBG("%s", hdev->name);
2123
2124 hci_dev_do_close(hdev);
2125 }
2126
2127 static void hci_error_reset(struct work_struct *work)
2128 {
2129 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2130
2131 BT_DBG("%s", hdev->name);
2132
2133 if (hdev->hw_error)
2134 hdev->hw_error(hdev, hdev->hw_error_code);
2135 else
2136 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2137 hdev->hw_error_code);
2138
2139 if (hci_dev_do_close(hdev))
2140 return;
2141
2142 hci_dev_do_open(hdev);
2143 }
2144
2145 static void hci_discov_off(struct work_struct *work)
2146 {
2147 struct hci_dev *hdev;
2148
2149 hdev = container_of(work, struct hci_dev, discov_off.work);
2150
2151 BT_DBG("%s", hdev->name);
2152
2153 mgmt_discoverable_timeout(hdev);
2154 }
2155
2156 static void hci_adv_timeout_expire(struct work_struct *work)
2157 {
2158 struct hci_dev *hdev;
2159
2160 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2161
2162 BT_DBG("%s", hdev->name);
2163
2164 mgmt_adv_timeout_expired(hdev);
2165 }
2166
2167 void hci_uuids_clear(struct hci_dev *hdev)
2168 {
2169 struct bt_uuid *uuid, *tmp;
2170
2171 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2172 list_del(&uuid->list);
2173 kfree(uuid);
2174 }
2175 }
2176
2177 void hci_link_keys_clear(struct hci_dev *hdev)
2178 {
2179 struct link_key *key;
2180
2181 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2182 list_del_rcu(&key->list);
2183 kfree_rcu(key, rcu);
2184 }
2185 }
2186
2187 void hci_smp_ltks_clear(struct hci_dev *hdev)
2188 {
2189 struct smp_ltk *k;
2190
2191 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2192 list_del_rcu(&k->list);
2193 kfree_rcu(k, rcu);
2194 }
2195 }
2196
2197 void hci_smp_irks_clear(struct hci_dev *hdev)
2198 {
2199 struct smp_irk *k;
2200
2201 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2202 list_del_rcu(&k->list);
2203 kfree_rcu(k, rcu);
2204 }
2205 }
2206
2207 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2208 {
2209 struct link_key *k;
2210
2211 rcu_read_lock();
2212 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2213 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2214 rcu_read_unlock();
2215 return k;
2216 }
2217 }
2218 rcu_read_unlock();
2219
2220 return NULL;
2221 }
2222
2223 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2224 u8 key_type, u8 old_key_type)
2225 {
2226 /* Legacy key */
2227 if (key_type < 0x03)
2228 return true;
2229
2230 /* Debug keys are insecure so don't store them persistently */
2231 if (key_type == HCI_LK_DEBUG_COMBINATION)
2232 return false;
2233
2234 /* Changed combination key and there's no previous one */
2235 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2236 return false;
2237
2238 /* Security mode 3 case */
2239 if (!conn)
2240 return true;
2241
2242 /* BR/EDR key derived using SC from an LE link */
2243 if (conn->type == LE_LINK)
2244 return true;
2245
2246 /* Neither local nor remote side had no-bonding as requirement */
2247 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2248 return true;
2249
2250 /* Local side had dedicated bonding as requirement */
2251 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2252 return true;
2253
2254 /* Remote side had dedicated bonding as requirement */
2255 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2256 return true;
2257
2258 /* If none of the above criteria match, then don't store the key
2259 * persistently */
2260 return false;
2261 }
2262
2263 static u8 ltk_role(u8 type)
2264 {
2265 if (type == SMP_LTK)
2266 return HCI_ROLE_MASTER;
2267
2268 return HCI_ROLE_SLAVE;
2269 }
2270
2271 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2272 u8 addr_type, u8 role)
2273 {
2274 struct smp_ltk *k;
2275
2276 rcu_read_lock();
2277 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2278 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2279 continue;
2280
2281 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2282 rcu_read_unlock();
2283 return k;
2284 }
2285 }
2286 rcu_read_unlock();
2287
2288 return NULL;
2289 }
2290
2291 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2292 {
2293 struct smp_irk *irk;
2294
2295 rcu_read_lock();
2296 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2297 if (!bacmp(&irk->rpa, rpa)) {
2298 rcu_read_unlock();
2299 return irk;
2300 }
2301 }
2302
2303 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2304 if (smp_irk_matches(hdev, irk->val, rpa)) {
2305 bacpy(&irk->rpa, rpa);
2306 rcu_read_unlock();
2307 return irk;
2308 }
2309 }
2310 rcu_read_unlock();
2311
2312 return NULL;
2313 }
2314
2315 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2316 u8 addr_type)
2317 {
2318 struct smp_irk *irk;
2319
2320 /* Identity Address must be public or static random */
2321 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2322 return NULL;
2323
2324 rcu_read_lock();
2325 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2326 if (addr_type == irk->addr_type &&
2327 bacmp(bdaddr, &irk->bdaddr) == 0) {
2328 rcu_read_unlock();
2329 return irk;
2330 }
2331 }
2332 rcu_read_unlock();
2333
2334 return NULL;
2335 }
2336
2337 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2338 bdaddr_t *bdaddr, u8 *val, u8 type,
2339 u8 pin_len, bool *persistent)
2340 {
2341 struct link_key *key, *old_key;
2342 u8 old_key_type;
2343
2344 old_key = hci_find_link_key(hdev, bdaddr);
2345 if (old_key) {
2346 old_key_type = old_key->type;
2347 key = old_key;
2348 } else {
2349 old_key_type = conn ? conn->key_type : 0xff;
2350 key = kzalloc(sizeof(*key), GFP_KERNEL);
2351 if (!key)
2352 return NULL;
2353 list_add_rcu(&key->list, &hdev->link_keys);
2354 }
2355
2356 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2357
2358 /* Some buggy controller combinations generate a changed
2359 * combination key for legacy pairing even when there's no
2360 * previous key */
2361 if (type == HCI_LK_CHANGED_COMBINATION &&
2362 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2363 type = HCI_LK_COMBINATION;
2364 if (conn)
2365 conn->key_type = type;
2366 }
2367
2368 bacpy(&key->bdaddr, bdaddr);
2369 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2370 key->pin_len = pin_len;
2371
2372 if (type == HCI_LK_CHANGED_COMBINATION)
2373 key->type = old_key_type;
2374 else
2375 key->type = type;
2376
2377 if (persistent)
2378 *persistent = hci_persistent_key(hdev, conn, type,
2379 old_key_type);
2380
2381 return key;
2382 }
2383
2384 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2385 u8 addr_type, u8 type, u8 authenticated,
2386 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2387 {
2388 struct smp_ltk *key, *old_key;
2389 u8 role = ltk_role(type);
2390
2391 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2392 if (old_key)
2393 key = old_key;
2394 else {
2395 key = kzalloc(sizeof(*key), GFP_KERNEL);
2396 if (!key)
2397 return NULL;
2398 list_add_rcu(&key->list, &hdev->long_term_keys);
2399 }
2400
2401 bacpy(&key->bdaddr, bdaddr);
2402 key->bdaddr_type = addr_type;
2403 memcpy(key->val, tk, sizeof(key->val));
2404 key->authenticated = authenticated;
2405 key->ediv = ediv;
2406 key->rand = rand;
2407 key->enc_size = enc_size;
2408 key->type = type;
2409
2410 return key;
2411 }
2412
2413 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2414 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2415 {
2416 struct smp_irk *irk;
2417
2418 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2419 if (!irk) {
2420 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2421 if (!irk)
2422 return NULL;
2423
2424 bacpy(&irk->bdaddr, bdaddr);
2425 irk->addr_type = addr_type;
2426
2427 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2428 }
2429
2430 memcpy(irk->val, val, 16);
2431 bacpy(&irk->rpa, rpa);
2432
2433 return irk;
2434 }
2435
2436 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2437 {
2438 struct link_key *key;
2439
2440 key = hci_find_link_key(hdev, bdaddr);
2441 if (!key)
2442 return -ENOENT;
2443
2444 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2445
2446 list_del_rcu(&key->list);
2447 kfree_rcu(key, rcu);
2448
2449 return 0;
2450 }
2451
2452 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2453 {
2454 struct smp_ltk *k;
2455 int removed = 0;
2456
2457 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2458 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2459 continue;
2460
2461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2462
2463 list_del_rcu(&k->list);
2464 kfree_rcu(k, rcu);
2465 removed++;
2466 }
2467
2468 return removed ? 0 : -ENOENT;
2469 }
2470
2471 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2472 {
2473 struct smp_irk *k;
2474
2475 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2476 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2477 continue;
2478
2479 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2480
2481 list_del_rcu(&k->list);
2482 kfree_rcu(k, rcu);
2483 }
2484 }
2485
2486 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2487 {
2488 struct smp_ltk *k;
2489 struct smp_irk *irk;
2490 u8 addr_type;
2491
2492 if (type == BDADDR_BREDR) {
2493 if (hci_find_link_key(hdev, bdaddr))
2494 return true;
2495 return false;
2496 }
2497
2498 /* Convert to HCI addr type which struct smp_ltk uses */
2499 if (type == BDADDR_LE_PUBLIC)
2500 addr_type = ADDR_LE_DEV_PUBLIC;
2501 else
2502 addr_type = ADDR_LE_DEV_RANDOM;
2503
2504 irk = hci_get_irk(hdev, bdaddr, addr_type);
2505 if (irk) {
2506 bdaddr = &irk->bdaddr;
2507 addr_type = irk->addr_type;
2508 }
2509
2510 rcu_read_lock();
2511 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2512 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2513 rcu_read_unlock();
2514 return true;
2515 }
2516 }
2517 rcu_read_unlock();
2518
2519 return false;
2520 }
2521
2522 /* HCI command timer function */
2523 static void hci_cmd_timeout(struct work_struct *work)
2524 {
2525 struct hci_dev *hdev = container_of(work, struct hci_dev,
2526 cmd_timer.work);
2527
2528 if (hdev->sent_cmd) {
2529 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2530 u16 opcode = __le16_to_cpu(sent->opcode);
2531
2532 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2533 } else {
2534 BT_ERR("%s command tx timeout", hdev->name);
2535 }
2536
2537 atomic_set(&hdev->cmd_cnt, 1);
2538 queue_work(hdev->workqueue, &hdev->cmd_work);
2539 }
2540
2541 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2542 bdaddr_t *bdaddr, u8 bdaddr_type)
2543 {
2544 struct oob_data *data;
2545
2546 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2547 if (bacmp(bdaddr, &data->bdaddr) != 0)
2548 continue;
2549 if (data->bdaddr_type != bdaddr_type)
2550 continue;
2551 return data;
2552 }
2553
2554 return NULL;
2555 }
2556
2557 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2558 u8 bdaddr_type)
2559 {
2560 struct oob_data *data;
2561
2562 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2563 if (!data)
2564 return -ENOENT;
2565
2566 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2567
2568 list_del(&data->list);
2569 kfree(data);
2570
2571 return 0;
2572 }
2573
2574 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2575 {
2576 struct oob_data *data, *n;
2577
2578 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2579 list_del(&data->list);
2580 kfree(data);
2581 }
2582 }
2583
2584 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2585 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2586 u8 *hash256, u8 *rand256)
2587 {
2588 struct oob_data *data;
2589
2590 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2591 if (!data) {
2592 data = kmalloc(sizeof(*data), GFP_KERNEL);
2593 if (!data)
2594 return -ENOMEM;
2595
2596 bacpy(&data->bdaddr, bdaddr);
2597 data->bdaddr_type = bdaddr_type;
2598 list_add(&data->list, &hdev->remote_oob_data);
2599 }
2600
2601 if (hash192 && rand192) {
2602 memcpy(data->hash192, hash192, sizeof(data->hash192));
2603 memcpy(data->rand192, rand192, sizeof(data->rand192));
2604 if (hash256 && rand256)
2605 data->present = 0x03;
2606 } else {
2607 memset(data->hash192, 0, sizeof(data->hash192));
2608 memset(data->rand192, 0, sizeof(data->rand192));
2609 if (hash256 && rand256)
2610 data->present = 0x02;
2611 else
2612 data->present = 0x00;
2613 }
2614
2615 if (hash256 && rand256) {
2616 memcpy(data->hash256, hash256, sizeof(data->hash256));
2617 memcpy(data->rand256, rand256, sizeof(data->rand256));
2618 } else {
2619 memset(data->hash256, 0, sizeof(data->hash256));
2620 memset(data->rand256, 0, sizeof(data->rand256));
2621 if (hash192 && rand192)
2622 data->present = 0x01;
2623 }
2624
2625 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2626
2627 return 0;
2628 }
2629
2630 /* This function requires the caller holds hdev->lock */
2631 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2632 {
2633 struct adv_info *adv_instance;
2634
2635 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2636 if (adv_instance->instance == instance)
2637 return adv_instance;
2638 }
2639
2640 return NULL;
2641 }
2642
2643 /* This function requires the caller holds hdev->lock */
2644 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2645 struct adv_info *cur_instance;
2646
2647 cur_instance = hci_find_adv_instance(hdev, instance);
2648 if (!cur_instance)
2649 return NULL;
2650
2651 if (cur_instance == list_last_entry(&hdev->adv_instances,
2652 struct adv_info, list))
2653 return list_first_entry(&hdev->adv_instances,
2654 struct adv_info, list);
2655 else
2656 return list_next_entry(cur_instance, list);
2657 }
2658
2659 /* This function requires the caller holds hdev->lock */
2660 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2661 {
2662 struct adv_info *adv_instance;
2663
2664 adv_instance = hci_find_adv_instance(hdev, instance);
2665 if (!adv_instance)
2666 return -ENOENT;
2667
2668 BT_DBG("%s removing %dMR", hdev->name, instance);
2669
2670 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2671 cancel_delayed_work(&hdev->adv_instance_expire);
2672 hdev->adv_instance_timeout = 0;
2673 }
2674
2675 list_del(&adv_instance->list);
2676 kfree(adv_instance);
2677
2678 hdev->adv_instance_cnt--;
2679
2680 return 0;
2681 }
2682
2683 /* This function requires the caller holds hdev->lock */
2684 void hci_adv_instances_clear(struct hci_dev *hdev)
2685 {
2686 struct adv_info *adv_instance, *n;
2687
2688 if (hdev->adv_instance_timeout) {
2689 cancel_delayed_work(&hdev->adv_instance_expire);
2690 hdev->adv_instance_timeout = 0;
2691 }
2692
2693 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2694 list_del(&adv_instance->list);
2695 kfree(adv_instance);
2696 }
2697
2698 hdev->adv_instance_cnt = 0;
2699 }
2700
2701 /* This function requires the caller holds hdev->lock */
2702 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2703 u16 adv_data_len, u8 *adv_data,
2704 u16 scan_rsp_len, u8 *scan_rsp_data,
2705 u16 timeout, u16 duration)
2706 {
2707 struct adv_info *adv_instance;
2708
2709 adv_instance = hci_find_adv_instance(hdev, instance);
2710 if (adv_instance) {
2711 memset(adv_instance->adv_data, 0,
2712 sizeof(adv_instance->adv_data));
2713 memset(adv_instance->scan_rsp_data, 0,
2714 sizeof(adv_instance->scan_rsp_data));
2715 } else {
2716 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2717 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2718 return -EOVERFLOW;
2719
2720 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2721 if (!adv_instance)
2722 return -ENOMEM;
2723
2724 adv_instance->pending = true;
2725 adv_instance->instance = instance;
2726 list_add(&adv_instance->list, &hdev->adv_instances);
2727 hdev->adv_instance_cnt++;
2728 }
2729
2730 adv_instance->flags = flags;
2731 adv_instance->adv_data_len = adv_data_len;
2732 adv_instance->scan_rsp_len = scan_rsp_len;
2733
2734 if (adv_data_len)
2735 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2736
2737 if (scan_rsp_len)
2738 memcpy(adv_instance->scan_rsp_data,
2739 scan_rsp_data, scan_rsp_len);
2740
2741 adv_instance->timeout = timeout;
2742 adv_instance->remaining_time = timeout;
2743
2744 if (duration == 0)
2745 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2746 else
2747 adv_instance->duration = duration;
2748
2749 BT_DBG("%s for %dMR", hdev->name, instance);
2750
2751 return 0;
2752 }
2753
2754 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2755 bdaddr_t *bdaddr, u8 type)
2756 {
2757 struct bdaddr_list *b;
2758
2759 list_for_each_entry(b, bdaddr_list, list) {
2760 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2761 return b;
2762 }
2763
2764 return NULL;
2765 }
2766
2767 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2768 {
2769 struct list_head *p, *n;
2770
2771 list_for_each_safe(p, n, bdaddr_list) {
2772 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2773
2774 list_del(p);
2775 kfree(b);
2776 }
2777 }
2778
2779 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2780 {
2781 struct bdaddr_list *entry;
2782
2783 if (!bacmp(bdaddr, BDADDR_ANY))
2784 return -EBADF;
2785
2786 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2787 return -EEXIST;
2788
2789 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2790 if (!entry)
2791 return -ENOMEM;
2792
2793 bacpy(&entry->bdaddr, bdaddr);
2794 entry->bdaddr_type = type;
2795
2796 list_add(&entry->list, list);
2797
2798 return 0;
2799 }
2800
2801 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2802 {
2803 struct bdaddr_list *entry;
2804
2805 if (!bacmp(bdaddr, BDADDR_ANY)) {
2806 hci_bdaddr_list_clear(list);
2807 return 0;
2808 }
2809
2810 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2811 if (!entry)
2812 return -ENOENT;
2813
2814 list_del(&entry->list);
2815 kfree(entry);
2816
2817 return 0;
2818 }
2819
2820 /* This function requires the caller holds hdev->lock */
2821 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2822 bdaddr_t *addr, u8 addr_type)
2823 {
2824 struct hci_conn_params *params;
2825
2826 list_for_each_entry(params, &hdev->le_conn_params, list) {
2827 if (bacmp(&params->addr, addr) == 0 &&
2828 params->addr_type == addr_type) {
2829 return params;
2830 }
2831 }
2832
2833 return NULL;
2834 }
2835
2836 /* This function requires the caller holds hdev->lock */
2837 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2838 bdaddr_t *addr, u8 addr_type)
2839 {
2840 struct hci_conn_params *param;
2841
2842 list_for_each_entry(param, list, action) {
2843 if (bacmp(&param->addr, addr) == 0 &&
2844 param->addr_type == addr_type)
2845 return param;
2846 }
2847
2848 return NULL;
2849 }
2850
2851 /* This function requires the caller holds hdev->lock */
2852 struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2853 bdaddr_t *addr,
2854 u8 addr_type)
2855 {
2856 struct hci_conn_params *param;
2857
2858 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2859 if (bacmp(&param->addr, addr) == 0 &&
2860 param->addr_type == addr_type &&
2861 param->explicit_connect)
2862 return param;
2863 }
2864
2865 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2866 if (bacmp(&param->addr, addr) == 0 &&
2867 param->addr_type == addr_type &&
2868 param->explicit_connect)
2869 return param;
2870 }
2871
2872 return NULL;
2873 }
2874
2875 /* This function requires the caller holds hdev->lock */
2876 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2877 bdaddr_t *addr, u8 addr_type)
2878 {
2879 struct hci_conn_params *params;
2880
2881 params = hci_conn_params_lookup(hdev, addr, addr_type);
2882 if (params)
2883 return params;
2884
2885 params = kzalloc(sizeof(*params), GFP_KERNEL);
2886 if (!params) {
2887 BT_ERR("Out of memory");
2888 return NULL;
2889 }
2890
2891 bacpy(&params->addr, addr);
2892 params->addr_type = addr_type;
2893
2894 list_add(&params->list, &hdev->le_conn_params);
2895 INIT_LIST_HEAD(&params->action);
2896
2897 params->conn_min_interval = hdev->le_conn_min_interval;
2898 params->conn_max_interval = hdev->le_conn_max_interval;
2899 params->conn_latency = hdev->le_conn_latency;
2900 params->supervision_timeout = hdev->le_supv_timeout;
2901 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2902
2903 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2904
2905 return params;
2906 }
2907
2908 static void hci_conn_params_free(struct hci_conn_params *params)
2909 {
2910 if (params->conn) {
2911 hci_conn_drop(params->conn);
2912 hci_conn_put(params->conn);
2913 }
2914
2915 list_del(&params->action);
2916 list_del(&params->list);
2917 kfree(params);
2918 }
2919
2920 /* This function requires the caller holds hdev->lock */
2921 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2922 {
2923 struct hci_conn_params *params;
2924
2925 params = hci_conn_params_lookup(hdev, addr, addr_type);
2926 if (!params)
2927 return;
2928
2929 hci_conn_params_free(params);
2930
2931 hci_update_background_scan(hdev);
2932
2933 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2934 }
2935
2936 /* This function requires the caller holds hdev->lock */
2937 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2938 {
2939 struct hci_conn_params *params, *tmp;
2940
2941 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2942 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2943 continue;
2944
2945 /* If trying to estabilish one time connection to disabled
2946 * device, leave the params, but mark them as just once.
2947 */
2948 if (params->explicit_connect) {
2949 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2950 continue;
2951 }
2952
2953 list_del(&params->list);
2954 kfree(params);
2955 }
2956
2957 BT_DBG("All LE disabled connection parameters were removed");
2958 }
2959
2960 /* This function requires the caller holds hdev->lock */
2961 void hci_conn_params_clear_all(struct hci_dev *hdev)
2962 {
2963 struct hci_conn_params *params, *tmp;
2964
2965 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2966 hci_conn_params_free(params);
2967
2968 hci_update_background_scan(hdev);
2969
2970 BT_DBG("All LE connection parameters were removed");
2971 }
2972
2973 static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2974 {
2975 if (status) {
2976 BT_ERR("Failed to start inquiry: status %d", status);
2977
2978 hci_dev_lock(hdev);
2979 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2980 hci_dev_unlock(hdev);
2981 return;
2982 }
2983 }
2984
2985 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2986 u16 opcode)
2987 {
2988 /* General inquiry access code (GIAC) */
2989 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2990 struct hci_cp_inquiry cp;
2991 int err;
2992
2993 if (status) {
2994 BT_ERR("Failed to disable LE scanning: status %d", status);
2995 return;
2996 }
2997
2998 hdev->discovery.scan_start = 0;
2999
3000 switch (hdev->discovery.type) {
3001 case DISCOV_TYPE_LE:
3002 hci_dev_lock(hdev);
3003 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3004 hci_dev_unlock(hdev);
3005 break;
3006
3007 case DISCOV_TYPE_INTERLEAVED:
3008 hci_dev_lock(hdev);
3009
3010 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3011 &hdev->quirks)) {
3012 /* If we were running LE only scan, change discovery
3013 * state. If we were running both LE and BR/EDR inquiry
3014 * simultaneously, and BR/EDR inquiry is already
3015 * finished, stop discovery, otherwise BR/EDR inquiry
3016 * will stop discovery when finished. If we will resolve
3017 * remote device name, do not change discovery state.
3018 */
3019 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3020 hdev->discovery.state != DISCOVERY_RESOLVING)
3021 hci_discovery_set_state(hdev,
3022 DISCOVERY_STOPPED);
3023 } else {
3024 struct hci_request req;
3025
3026 hci_inquiry_cache_flush(hdev);
3027
3028 hci_req_init(&req, hdev);
3029
3030 memset(&cp, 0, sizeof(cp));
3031 memcpy(&cp.lap, lap, sizeof(cp.lap));
3032 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3033 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3034
3035 err = hci_req_run(&req, inquiry_complete);
3036 if (err) {
3037 BT_ERR("Inquiry request failed: err %d", err);
3038 hci_discovery_set_state(hdev,
3039 DISCOVERY_STOPPED);
3040 }
3041 }
3042
3043 hci_dev_unlock(hdev);
3044 break;
3045 }
3046 }
3047
3048 static void le_scan_disable_work(struct work_struct *work)
3049 {
3050 struct hci_dev *hdev = container_of(work, struct hci_dev,
3051 le_scan_disable.work);
3052 struct hci_request req;
3053 int err;
3054
3055 BT_DBG("%s", hdev->name);
3056
3057 cancel_delayed_work_sync(&hdev->le_scan_restart);
3058
3059 hci_req_init(&req, hdev);
3060
3061 hci_req_add_le_scan_disable(&req);
3062
3063 err = hci_req_run(&req, le_scan_disable_work_complete);
3064 if (err)
3065 BT_ERR("Disable LE scanning request failed: err %d", err);
3066 }
3067
3068 static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3069 u16 opcode)
3070 {
3071 unsigned long timeout, duration, scan_start, now;
3072
3073 BT_DBG("%s", hdev->name);
3074
3075 if (status) {
3076 BT_ERR("Failed to restart LE scan: status %d", status);
3077 return;
3078 }
3079
3080 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3081 !hdev->discovery.scan_start)
3082 return;
3083
3084 /* When the scan was started, hdev->le_scan_disable has been queued
3085 * after duration from scan_start. During scan restart this job
3086 * has been canceled, and we need to queue it again after proper
3087 * timeout, to make sure that scan does not run indefinitely.
3088 */
3089 duration = hdev->discovery.scan_duration;
3090 scan_start = hdev->discovery.scan_start;
3091 now = jiffies;
3092 if (now - scan_start <= duration) {
3093 int elapsed;
3094
3095 if (now >= scan_start)
3096 elapsed = now - scan_start;
3097 else
3098 elapsed = ULONG_MAX - scan_start + now;
3099
3100 timeout = duration - elapsed;
3101 } else {
3102 timeout = 0;
3103 }
3104 queue_delayed_work(hdev->workqueue,
3105 &hdev->le_scan_disable, timeout);
3106 }
3107
3108 static void le_scan_restart_work(struct work_struct *work)
3109 {
3110 struct hci_dev *hdev = container_of(work, struct hci_dev,
3111 le_scan_restart.work);
3112 struct hci_request req;
3113 struct hci_cp_le_set_scan_enable cp;
3114 int err;
3115
3116 BT_DBG("%s", hdev->name);
3117
3118 /* If controller is not scanning we are done. */
3119 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
3120 return;
3121
3122 hci_req_init(&req, hdev);
3123
3124 hci_req_add_le_scan_disable(&req);
3125
3126 memset(&cp, 0, sizeof(cp));
3127 cp.enable = LE_SCAN_ENABLE;
3128 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3129 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3130
3131 err = hci_req_run(&req, le_scan_restart_work_complete);
3132 if (err)
3133 BT_ERR("Restart LE scan request failed: err %d", err);
3134 }
3135
3136 /* Copy the Identity Address of the controller.
3137 *
3138 * If the controller has a public BD_ADDR, then by default use that one.
3139 * If this is a LE only controller without a public address, default to
3140 * the static random address.
3141 *
3142 * For debugging purposes it is possible to force controllers with a
3143 * public address to use the static random address instead.
3144 *
3145 * In case BR/EDR has been disabled on a dual-mode controller and
3146 * userspace has configured a static address, then that address
3147 * becomes the identity address instead of the public BR/EDR address.
3148 */
3149 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3150 u8 *bdaddr_type)
3151 {
3152 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3153 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
3154 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
3155 bacmp(&hdev->static_addr, BDADDR_ANY))) {
3156 bacpy(bdaddr, &hdev->static_addr);
3157 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3158 } else {
3159 bacpy(bdaddr, &hdev->bdaddr);
3160 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3161 }
3162 }
3163
3164 /* Alloc HCI device */
3165 struct hci_dev *hci_alloc_dev(void)
3166 {
3167 struct hci_dev *hdev;
3168
3169 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3170 if (!hdev)
3171 return NULL;
3172
3173 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3174 hdev->esco_type = (ESCO_HV1);
3175 hdev->link_mode = (HCI_LM_ACCEPT);
3176 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3177 hdev->io_capability = 0x03; /* No Input No Output */
3178 hdev->manufacturer = 0xffff; /* Default to internal use */
3179 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3180 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3181 hdev->adv_instance_cnt = 0;
3182 hdev->cur_adv_instance = 0x00;
3183 hdev->adv_instance_timeout = 0;
3184
3185 hdev->sniff_max_interval = 800;
3186 hdev->sniff_min_interval = 80;
3187
3188 hdev->le_adv_channel_map = 0x07;
3189 hdev->le_adv_min_interval = 0x0800;
3190 hdev->le_adv_max_interval = 0x0800;
3191 hdev->le_scan_interval = 0x0060;
3192 hdev->le_scan_window = 0x0030;
3193 hdev->le_conn_min_interval = 0x0028;
3194 hdev->le_conn_max_interval = 0x0038;
3195 hdev->le_conn_latency = 0x0000;
3196 hdev->le_supv_timeout = 0x002a;
3197 hdev->le_def_tx_len = 0x001b;
3198 hdev->le_def_tx_time = 0x0148;
3199 hdev->le_max_tx_len = 0x001b;
3200 hdev->le_max_tx_time = 0x0148;
3201 hdev->le_max_rx_len = 0x001b;
3202 hdev->le_max_rx_time = 0x0148;
3203
3204 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3205 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3206 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3207 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3208
3209 mutex_init(&hdev->lock);
3210 mutex_init(&hdev->req_lock);
3211
3212 INIT_LIST_HEAD(&hdev->mgmt_pending);
3213 INIT_LIST_HEAD(&hdev->blacklist);
3214 INIT_LIST_HEAD(&hdev->whitelist);
3215 INIT_LIST_HEAD(&hdev->uuids);
3216 INIT_LIST_HEAD(&hdev->link_keys);
3217 INIT_LIST_HEAD(&hdev->long_term_keys);
3218 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3219 INIT_LIST_HEAD(&hdev->remote_oob_data);
3220 INIT_LIST_HEAD(&hdev->le_white_list);
3221 INIT_LIST_HEAD(&hdev->le_conn_params);
3222 INIT_LIST_HEAD(&hdev->pend_le_conns);
3223 INIT_LIST_HEAD(&hdev->pend_le_reports);
3224 INIT_LIST_HEAD(&hdev->conn_hash.list);
3225 INIT_LIST_HEAD(&hdev->adv_instances);
3226
3227 INIT_WORK(&hdev->rx_work, hci_rx_work);
3228 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3229 INIT_WORK(&hdev->tx_work, hci_tx_work);
3230 INIT_WORK(&hdev->power_on, hci_power_on);
3231 INIT_WORK(&hdev->error_reset, hci_error_reset);
3232
3233 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3234 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3235 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3236 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
3237 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
3238
3239 skb_queue_head_init(&hdev->rx_q);
3240 skb_queue_head_init(&hdev->cmd_q);
3241 skb_queue_head_init(&hdev->raw_q);
3242
3243 init_waitqueue_head(&hdev->req_wait_q);
3244
3245 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3246
3247 hci_init_sysfs(hdev);
3248 discovery_init(hdev);
3249
3250 return hdev;
3251 }
3252 EXPORT_SYMBOL(hci_alloc_dev);
3253
3254 /* Free HCI device */
3255 void hci_free_dev(struct hci_dev *hdev)
3256 {
3257 /* will free via device release */
3258 put_device(&hdev->dev);
3259 }
3260 EXPORT_SYMBOL(hci_free_dev);
3261
3262 /* Register HCI device */
3263 int hci_register_dev(struct hci_dev *hdev)
3264 {
3265 int id, error;
3266
3267 if (!hdev->open || !hdev->close || !hdev->send)
3268 return -EINVAL;
3269
3270 /* Do not allow HCI_AMP devices to register at index 0,
3271 * so the index can be used as the AMP controller ID.
3272 */
3273 switch (hdev->dev_type) {
3274 case HCI_BREDR:
3275 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3276 break;
3277 case HCI_AMP:
3278 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3279 break;
3280 default:
3281 return -EINVAL;
3282 }
3283
3284 if (id < 0)
3285 return id;
3286
3287 sprintf(hdev->name, "hci%d", id);
3288 hdev->id = id;
3289
3290 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3291
3292 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3293 WQ_MEM_RECLAIM, 1, hdev->name);
3294 if (!hdev->workqueue) {
3295 error = -ENOMEM;
3296 goto err;
3297 }
3298
3299 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3300 WQ_MEM_RECLAIM, 1, hdev->name);
3301 if (!hdev->req_workqueue) {
3302 destroy_workqueue(hdev->workqueue);
3303 error = -ENOMEM;
3304 goto err;
3305 }
3306
3307 if (!IS_ERR_OR_NULL(bt_debugfs))
3308 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3309
3310 dev_set_name(&hdev->dev, "%s", hdev->name);
3311
3312 error = device_add(&hdev->dev);
3313 if (error < 0)
3314 goto err_wqueue;
3315
3316 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3317 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3318 hdev);
3319 if (hdev->rfkill) {
3320 if (rfkill_register(hdev->rfkill) < 0) {
3321 rfkill_destroy(hdev->rfkill);
3322 hdev->rfkill = NULL;
3323 }
3324 }
3325
3326 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3327 hci_dev_set_flag(hdev, HCI_RFKILLED);
3328
3329 hci_dev_set_flag(hdev, HCI_SETUP);
3330 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3331
3332 if (hdev->dev_type == HCI_BREDR) {
3333 /* Assume BR/EDR support until proven otherwise (such as
3334 * through reading supported features during init.
3335 */
3336 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3337 }
3338
3339 write_lock(&hci_dev_list_lock);
3340 list_add(&hdev->list, &hci_dev_list);
3341 write_unlock(&hci_dev_list_lock);
3342
3343 /* Devices that are marked for raw-only usage are unconfigured
3344 * and should not be included in normal operation.
3345 */
3346 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3347 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3348
3349 hci_notify(hdev, HCI_DEV_REG);
3350 hci_dev_hold(hdev);
3351
3352 queue_work(hdev->req_workqueue, &hdev->power_on);
3353
3354 return id;
3355
3356 err_wqueue:
3357 destroy_workqueue(hdev->workqueue);
3358 destroy_workqueue(hdev->req_workqueue);
3359 err:
3360 ida_simple_remove(&hci_index_ida, hdev->id);
3361
3362 return error;
3363 }
3364 EXPORT_SYMBOL(hci_register_dev);
3365
3366 /* Unregister HCI device */
3367 void hci_unregister_dev(struct hci_dev *hdev)
3368 {
3369 int id;
3370
3371 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3372
3373 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3374
3375 id = hdev->id;
3376
3377 write_lock(&hci_dev_list_lock);
3378 list_del(&hdev->list);
3379 write_unlock(&hci_dev_list_lock);
3380
3381 hci_dev_do_close(hdev);
3382
3383 cancel_work_sync(&hdev->power_on);
3384
3385 if (!test_bit(HCI_INIT, &hdev->flags) &&
3386 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3387 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3388 hci_dev_lock(hdev);
3389 mgmt_index_removed(hdev);
3390 hci_dev_unlock(hdev);
3391 }
3392
3393 /* mgmt_index_removed should take care of emptying the
3394 * pending list */
3395 BUG_ON(!list_empty(&hdev->mgmt_pending));
3396
3397 hci_notify(hdev, HCI_DEV_UNREG);
3398
3399 if (hdev->rfkill) {
3400 rfkill_unregister(hdev->rfkill);
3401 rfkill_destroy(hdev->rfkill);
3402 }
3403
3404 device_del(&hdev->dev);
3405
3406 debugfs_remove_recursive(hdev->debugfs);
3407
3408 destroy_workqueue(hdev->workqueue);
3409 destroy_workqueue(hdev->req_workqueue);
3410
3411 hci_dev_lock(hdev);
3412 hci_bdaddr_list_clear(&hdev->blacklist);
3413 hci_bdaddr_list_clear(&hdev->whitelist);
3414 hci_uuids_clear(hdev);
3415 hci_link_keys_clear(hdev);
3416 hci_smp_ltks_clear(hdev);
3417 hci_smp_irks_clear(hdev);
3418 hci_remote_oob_data_clear(hdev);
3419 hci_adv_instances_clear(hdev);
3420 hci_bdaddr_list_clear(&hdev->le_white_list);
3421 hci_conn_params_clear_all(hdev);
3422 hci_discovery_filter_clear(hdev);
3423 hci_dev_unlock(hdev);
3424
3425 hci_dev_put(hdev);
3426
3427 ida_simple_remove(&hci_index_ida, id);
3428 }
3429 EXPORT_SYMBOL(hci_unregister_dev);
3430
3431 /* Suspend HCI device */
3432 int hci_suspend_dev(struct hci_dev *hdev)
3433 {
3434 hci_notify(hdev, HCI_DEV_SUSPEND);
3435 return 0;
3436 }
3437 EXPORT_SYMBOL(hci_suspend_dev);
3438
3439 /* Resume HCI device */
3440 int hci_resume_dev(struct hci_dev *hdev)
3441 {
3442 hci_notify(hdev, HCI_DEV_RESUME);
3443 return 0;
3444 }
3445 EXPORT_SYMBOL(hci_resume_dev);
3446
3447 /* Reset HCI device */
3448 int hci_reset_dev(struct hci_dev *hdev)
3449 {
3450 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3451 struct sk_buff *skb;
3452
3453 skb = bt_skb_alloc(3, GFP_ATOMIC);
3454 if (!skb)
3455 return -ENOMEM;
3456
3457 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3458 memcpy(skb_put(skb, 3), hw_err, 3);
3459
3460 /* Send Hardware Error to upper stack */
3461 return hci_recv_frame(hdev, skb);
3462 }
3463 EXPORT_SYMBOL(hci_reset_dev);
3464
3465 /* Receive frame from HCI drivers */
3466 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3467 {
3468 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3469 && !test_bit(HCI_INIT, &hdev->flags))) {
3470 kfree_skb(skb);
3471 return -ENXIO;
3472 }
3473
3474 /* Incoming skb */
3475 bt_cb(skb)->incoming = 1;
3476
3477 /* Time stamp */
3478 __net_timestamp(skb);
3479
3480 skb_queue_tail(&hdev->rx_q, skb);
3481 queue_work(hdev->workqueue, &hdev->rx_work);
3482
3483 return 0;
3484 }
3485 EXPORT_SYMBOL(hci_recv_frame);
3486
3487 /* ---- Interface to upper protocols ---- */
3488
3489 int hci_register_cb(struct hci_cb *cb)
3490 {
3491 BT_DBG("%p name %s", cb, cb->name);
3492
3493 mutex_lock(&hci_cb_list_lock);
3494 list_add_tail(&cb->list, &hci_cb_list);
3495 mutex_unlock(&hci_cb_list_lock);
3496
3497 return 0;
3498 }
3499 EXPORT_SYMBOL(hci_register_cb);
3500
3501 int hci_unregister_cb(struct hci_cb *cb)
3502 {
3503 BT_DBG("%p name %s", cb, cb->name);
3504
3505 mutex_lock(&hci_cb_list_lock);
3506 list_del(&cb->list);
3507 mutex_unlock(&hci_cb_list_lock);
3508
3509 return 0;
3510 }
3511 EXPORT_SYMBOL(hci_unregister_cb);
3512
3513 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3514 {
3515 int err;
3516
3517 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3518
3519 /* Time stamp */
3520 __net_timestamp(skb);
3521
3522 /* Send copy to monitor */
3523 hci_send_to_monitor(hdev, skb);
3524
3525 if (atomic_read(&hdev->promisc)) {
3526 /* Send copy to the sockets */
3527 hci_send_to_sock(hdev, skb);
3528 }
3529
3530 /* Get rid of skb owner, prior to sending to the driver. */
3531 skb_orphan(skb);
3532
3533 err = hdev->send(hdev, skb);
3534 if (err < 0) {
3535 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3536 kfree_skb(skb);
3537 }
3538 }
3539
3540 /* Send HCI command */
3541 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3542 const void *param)
3543 {
3544 struct sk_buff *skb;
3545
3546 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3547
3548 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3549 if (!skb) {
3550 BT_ERR("%s no memory for command", hdev->name);
3551 return -ENOMEM;
3552 }
3553
3554 /* Stand-alone HCI commands must be flagged as
3555 * single-command requests.
3556 */
3557 bt_cb(skb)->req.start = true;
3558
3559 skb_queue_tail(&hdev->cmd_q, skb);
3560 queue_work(hdev->workqueue, &hdev->cmd_work);
3561
3562 return 0;
3563 }
3564
3565 /* Get data from the previously sent command */
3566 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3567 {
3568 struct hci_command_hdr *hdr;
3569
3570 if (!hdev->sent_cmd)
3571 return NULL;
3572
3573 hdr = (void *) hdev->sent_cmd->data;
3574
3575 if (hdr->opcode != cpu_to_le16(opcode))
3576 return NULL;
3577
3578 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3579
3580 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3581 }
3582
3583 /* Send ACL data */
3584 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3585 {
3586 struct hci_acl_hdr *hdr;
3587 int len = skb->len;
3588
3589 skb_push(skb, HCI_ACL_HDR_SIZE);
3590 skb_reset_transport_header(skb);
3591 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3592 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3593 hdr->dlen = cpu_to_le16(len);
3594 }
3595
3596 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3597 struct sk_buff *skb, __u16 flags)
3598 {
3599 struct hci_conn *conn = chan->conn;
3600 struct hci_dev *hdev = conn->hdev;
3601 struct sk_buff *list;
3602
3603 skb->len = skb_headlen(skb);
3604 skb->data_len = 0;
3605
3606 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3607
3608 switch (hdev->dev_type) {
3609 case HCI_BREDR:
3610 hci_add_acl_hdr(skb, conn->handle, flags);
3611 break;
3612 case HCI_AMP:
3613 hci_add_acl_hdr(skb, chan->handle, flags);
3614 break;
3615 default:
3616 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3617 return;
3618 }
3619
3620 list = skb_shinfo(skb)->frag_list;
3621 if (!list) {
3622 /* Non fragmented */
3623 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3624
3625 skb_queue_tail(queue, skb);
3626 } else {
3627 /* Fragmented */
3628 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3629
3630 skb_shinfo(skb)->frag_list = NULL;
3631
3632 /* Queue all fragments atomically. We need to use spin_lock_bh
3633 * here because of 6LoWPAN links, as there this function is
3634 * called from softirq and using normal spin lock could cause
3635 * deadlocks.
3636 */
3637 spin_lock_bh(&queue->lock);
3638
3639 __skb_queue_tail(queue, skb);
3640
3641 flags &= ~ACL_START;
3642 flags |= ACL_CONT;
3643 do {
3644 skb = list; list = list->next;
3645
3646 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3647 hci_add_acl_hdr(skb, conn->handle, flags);
3648
3649 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3650
3651 __skb_queue_tail(queue, skb);
3652 } while (list);
3653
3654 spin_unlock_bh(&queue->lock);
3655 }
3656 }
3657
3658 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3659 {
3660 struct hci_dev *hdev = chan->conn->hdev;
3661
3662 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3663
3664 hci_queue_acl(chan, &chan->data_q, skb, flags);
3665
3666 queue_work(hdev->workqueue, &hdev->tx_work);
3667 }
3668
3669 /* Send SCO data */
3670 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3671 {
3672 struct hci_dev *hdev = conn->hdev;
3673 struct hci_sco_hdr hdr;
3674
3675 BT_DBG("%s len %d", hdev->name, skb->len);
3676
3677 hdr.handle = cpu_to_le16(conn->handle);
3678 hdr.dlen = skb->len;
3679
3680 skb_push(skb, HCI_SCO_HDR_SIZE);
3681 skb_reset_transport_header(skb);
3682 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3683
3684 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3685
3686 skb_queue_tail(&conn->data_q, skb);
3687 queue_work(hdev->workqueue, &hdev->tx_work);
3688 }
3689
3690 /* ---- HCI TX task (outgoing data) ---- */
3691
3692 /* HCI Connection scheduler */
3693 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3694 int *quote)
3695 {
3696 struct hci_conn_hash *h = &hdev->conn_hash;
3697 struct hci_conn *conn = NULL, *c;
3698 unsigned int num = 0, min = ~0;
3699
3700 /* We don't have to lock device here. Connections are always
3701 * added and removed with TX task disabled. */
3702
3703 rcu_read_lock();
3704
3705 list_for_each_entry_rcu(c, &h->list, list) {
3706 if (c->type != type || skb_queue_empty(&c->data_q))
3707 continue;
3708
3709 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3710 continue;
3711
3712 num++;
3713
3714 if (c->sent < min) {
3715 min = c->sent;
3716 conn = c;
3717 }
3718
3719 if (hci_conn_num(hdev, type) == num)
3720 break;
3721 }
3722
3723 rcu_read_unlock();
3724
3725 if (conn) {
3726 int cnt, q;
3727
3728 switch (conn->type) {
3729 case ACL_LINK:
3730 cnt = hdev->acl_cnt;
3731 break;
3732 case SCO_LINK:
3733 case ESCO_LINK:
3734 cnt = hdev->sco_cnt;
3735 break;
3736 case LE_LINK:
3737 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3738 break;
3739 default:
3740 cnt = 0;
3741 BT_ERR("Unknown link type");
3742 }
3743
3744 q = cnt / num;
3745 *quote = q ? q : 1;
3746 } else
3747 *quote = 0;
3748
3749 BT_DBG("conn %p quote %d", conn, *quote);
3750 return conn;
3751 }
3752
3753 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3754 {
3755 struct hci_conn_hash *h = &hdev->conn_hash;
3756 struct hci_conn *c;
3757
3758 BT_ERR("%s link tx timeout", hdev->name);
3759
3760 rcu_read_lock();
3761
3762 /* Kill stalled connections */
3763 list_for_each_entry_rcu(c, &h->list, list) {
3764 if (c->type == type && c->sent) {
3765 BT_ERR("%s killing stalled connection %pMR",
3766 hdev->name, &c->dst);
3767 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3768 }
3769 }
3770
3771 rcu_read_unlock();
3772 }
3773
3774 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3775 int *quote)
3776 {
3777 struct hci_conn_hash *h = &hdev->conn_hash;
3778 struct hci_chan *chan = NULL;
3779 unsigned int num = 0, min = ~0, cur_prio = 0;
3780 struct hci_conn *conn;
3781 int cnt, q, conn_num = 0;
3782
3783 BT_DBG("%s", hdev->name);
3784
3785 rcu_read_lock();
3786
3787 list_for_each_entry_rcu(conn, &h->list, list) {
3788 struct hci_chan *tmp;
3789
3790 if (conn->type != type)
3791 continue;
3792
3793 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3794 continue;
3795
3796 conn_num++;
3797
3798 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3799 struct sk_buff *skb;
3800
3801 if (skb_queue_empty(&tmp->data_q))
3802 continue;
3803
3804 skb = skb_peek(&tmp->data_q);
3805 if (skb->priority < cur_prio)
3806 continue;
3807
3808 if (skb->priority > cur_prio) {
3809 num = 0;
3810 min = ~0;
3811 cur_prio = skb->priority;
3812 }
3813
3814 num++;
3815
3816 if (conn->sent < min) {
3817 min = conn->sent;
3818 chan = tmp;
3819 }
3820 }
3821
3822 if (hci_conn_num(hdev, type) == conn_num)
3823 break;
3824 }
3825
3826 rcu_read_unlock();
3827
3828 if (!chan)
3829 return NULL;
3830
3831 switch (chan->conn->type) {
3832 case ACL_LINK:
3833 cnt = hdev->acl_cnt;
3834 break;
3835 case AMP_LINK:
3836 cnt = hdev->block_cnt;
3837 break;
3838 case SCO_LINK:
3839 case ESCO_LINK:
3840 cnt = hdev->sco_cnt;
3841 break;
3842 case LE_LINK:
3843 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3844 break;
3845 default:
3846 cnt = 0;
3847 BT_ERR("Unknown link type");
3848 }
3849
3850 q = cnt / num;
3851 *quote = q ? q : 1;
3852 BT_DBG("chan %p quote %d", chan, *quote);
3853 return chan;
3854 }
3855
3856 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3857 {
3858 struct hci_conn_hash *h = &hdev->conn_hash;
3859 struct hci_conn *conn;
3860 int num = 0;
3861
3862 BT_DBG("%s", hdev->name);
3863
3864 rcu_read_lock();
3865
3866 list_for_each_entry_rcu(conn, &h->list, list) {
3867 struct hci_chan *chan;
3868
3869 if (conn->type != type)
3870 continue;
3871
3872 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3873 continue;
3874
3875 num++;
3876
3877 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3878 struct sk_buff *skb;
3879
3880 if (chan->sent) {
3881 chan->sent = 0;
3882 continue;
3883 }
3884
3885 if (skb_queue_empty(&chan->data_q))
3886 continue;
3887
3888 skb = skb_peek(&chan->data_q);
3889 if (skb->priority >= HCI_PRIO_MAX - 1)
3890 continue;
3891
3892 skb->priority = HCI_PRIO_MAX - 1;
3893
3894 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3895 skb->priority);
3896 }
3897
3898 if (hci_conn_num(hdev, type) == num)
3899 break;
3900 }
3901
3902 rcu_read_unlock();
3903
3904 }
3905
3906 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3907 {
3908 /* Calculate count of blocks used by this packet */
3909 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3910 }
3911
3912 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3913 {
3914 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3915 /* ACL tx timeout must be longer than maximum
3916 * link supervision timeout (40.9 seconds) */
3917 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3918 HCI_ACL_TX_TIMEOUT))
3919 hci_link_tx_to(hdev, ACL_LINK);
3920 }
3921 }
3922
3923 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3924 {
3925 unsigned int cnt = hdev->acl_cnt;
3926 struct hci_chan *chan;
3927 struct sk_buff *skb;
3928 int quote;
3929
3930 __check_timeout(hdev, cnt);
3931
3932 while (hdev->acl_cnt &&
3933 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3934 u32 priority = (skb_peek(&chan->data_q))->priority;
3935 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3936 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3937 skb->len, skb->priority);
3938
3939 /* Stop if priority has changed */
3940 if (skb->priority < priority)
3941 break;
3942
3943 skb = skb_dequeue(&chan->data_q);
3944
3945 hci_conn_enter_active_mode(chan->conn,
3946 bt_cb(skb)->force_active);
3947
3948 hci_send_frame(hdev, skb);
3949 hdev->acl_last_tx = jiffies;
3950
3951 hdev->acl_cnt--;
3952 chan->sent++;
3953 chan->conn->sent++;
3954 }
3955 }
3956
3957 if (cnt != hdev->acl_cnt)
3958 hci_prio_recalculate(hdev, ACL_LINK);
3959 }
3960
3961 static void hci_sched_acl_blk(struct hci_dev *hdev)
3962 {
3963 unsigned int cnt = hdev->block_cnt;
3964 struct hci_chan *chan;
3965 struct sk_buff *skb;
3966 int quote;
3967 u8 type;
3968
3969 __check_timeout(hdev, cnt);
3970
3971 BT_DBG("%s", hdev->name);
3972
3973 if (hdev->dev_type == HCI_AMP)
3974 type = AMP_LINK;
3975 else
3976 type = ACL_LINK;
3977
3978 while (hdev->block_cnt > 0 &&
3979 (chan = hci_chan_sent(hdev, type, &quote))) {
3980 u32 priority = (skb_peek(&chan->data_q))->priority;
3981 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3982 int blocks;
3983
3984 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3985 skb->len, skb->priority);
3986
3987 /* Stop if priority has changed */
3988 if (skb->priority < priority)
3989 break;
3990
3991 skb = skb_dequeue(&chan->data_q);
3992
3993 blocks = __get_blocks(hdev, skb);
3994 if (blocks > hdev->block_cnt)
3995 return;
3996
3997 hci_conn_enter_active_mode(chan->conn,
3998 bt_cb(skb)->force_active);
3999
4000 hci_send_frame(hdev, skb);
4001 hdev->acl_last_tx = jiffies;
4002
4003 hdev->block_cnt -= blocks;
4004 quote -= blocks;
4005
4006 chan->sent += blocks;
4007 chan->conn->sent += blocks;
4008 }
4009 }
4010
4011 if (cnt != hdev->block_cnt)
4012 hci_prio_recalculate(hdev, type);
4013 }
4014
4015 static void hci_sched_acl(struct hci_dev *hdev)
4016 {
4017 BT_DBG("%s", hdev->name);
4018
4019 /* No ACL link over BR/EDR controller */
4020 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4021 return;
4022
4023 /* No AMP link over AMP controller */
4024 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4025 return;
4026
4027 switch (hdev->flow_ctl_mode) {
4028 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4029 hci_sched_acl_pkt(hdev);
4030 break;
4031
4032 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4033 hci_sched_acl_blk(hdev);
4034 break;
4035 }
4036 }
4037
4038 /* Schedule SCO */
4039 static void hci_sched_sco(struct hci_dev *hdev)
4040 {
4041 struct hci_conn *conn;
4042 struct sk_buff *skb;
4043 int quote;
4044
4045 BT_DBG("%s", hdev->name);
4046
4047 if (!hci_conn_num(hdev, SCO_LINK))
4048 return;
4049
4050 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4051 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4052 BT_DBG("skb %p len %d", skb, skb->len);
4053 hci_send_frame(hdev, skb);
4054
4055 conn->sent++;
4056 if (conn->sent == ~0)
4057 conn->sent = 0;
4058 }
4059 }
4060 }
4061
4062 static void hci_sched_esco(struct hci_dev *hdev)
4063 {
4064 struct hci_conn *conn;
4065 struct sk_buff *skb;
4066 int quote;
4067
4068 BT_DBG("%s", hdev->name);
4069
4070 if (!hci_conn_num(hdev, ESCO_LINK))
4071 return;
4072
4073 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4074 &quote))) {
4075 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4076 BT_DBG("skb %p len %d", skb, skb->len);
4077 hci_send_frame(hdev, skb);
4078
4079 conn->sent++;
4080 if (conn->sent == ~0)
4081 conn->sent = 0;
4082 }
4083 }
4084 }
4085
4086 static void hci_sched_le(struct hci_dev *hdev)
4087 {
4088 struct hci_chan *chan;
4089 struct sk_buff *skb;
4090 int quote, cnt, tmp;
4091
4092 BT_DBG("%s", hdev->name);
4093
4094 if (!hci_conn_num(hdev, LE_LINK))
4095 return;
4096
4097 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
4098 /* LE tx timeout must be longer than maximum
4099 * link supervision timeout (40.9 seconds) */
4100 if (!hdev->le_cnt && hdev->le_pkts &&
4101 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4102 hci_link_tx_to(hdev, LE_LINK);
4103 }
4104
4105 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4106 tmp = cnt;
4107 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4108 u32 priority = (skb_peek(&chan->data_q))->priority;
4109 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4110 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4111 skb->len, skb->priority);
4112
4113 /* Stop if priority has changed */
4114 if (skb->priority < priority)
4115 break;
4116
4117 skb = skb_dequeue(&chan->data_q);
4118
4119 hci_send_frame(hdev, skb);
4120 hdev->le_last_tx = jiffies;
4121
4122 cnt--;
4123 chan->sent++;
4124 chan->conn->sent++;
4125 }
4126 }
4127
4128 if (hdev->le_pkts)
4129 hdev->le_cnt = cnt;
4130 else
4131 hdev->acl_cnt = cnt;
4132
4133 if (cnt != tmp)
4134 hci_prio_recalculate(hdev, LE_LINK);
4135 }
4136
4137 static void hci_tx_work(struct work_struct *work)
4138 {
4139 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4140 struct sk_buff *skb;
4141
4142 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4143 hdev->sco_cnt, hdev->le_cnt);
4144
4145 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4146 /* Schedule queues and send stuff to HCI driver */
4147 hci_sched_acl(hdev);
4148 hci_sched_sco(hdev);
4149 hci_sched_esco(hdev);
4150 hci_sched_le(hdev);
4151 }
4152
4153 /* Send next queued raw (unknown type) packet */
4154 while ((skb = skb_dequeue(&hdev->raw_q)))
4155 hci_send_frame(hdev, skb);
4156 }
4157
4158 /* ----- HCI RX task (incoming data processing) ----- */
4159
4160 /* ACL data packet */
4161 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4162 {
4163 struct hci_acl_hdr *hdr = (void *) skb->data;
4164 struct hci_conn *conn;
4165 __u16 handle, flags;
4166
4167 skb_pull(skb, HCI_ACL_HDR_SIZE);
4168
4169 handle = __le16_to_cpu(hdr->handle);
4170 flags = hci_flags(handle);
4171 handle = hci_handle(handle);
4172
4173 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4174 handle, flags);
4175
4176 hdev->stat.acl_rx++;
4177
4178 hci_dev_lock(hdev);
4179 conn = hci_conn_hash_lookup_handle(hdev, handle);
4180 hci_dev_unlock(hdev);
4181
4182 if (conn) {
4183 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4184
4185 /* Send to upper protocol */
4186 l2cap_recv_acldata(conn, skb, flags);
4187 return;
4188 } else {
4189 BT_ERR("%s ACL packet for unknown connection handle %d",
4190 hdev->name, handle);
4191 }
4192
4193 kfree_skb(skb);
4194 }
4195
4196 /* SCO data packet */
4197 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4198 {
4199 struct hci_sco_hdr *hdr = (void *) skb->data;
4200 struct hci_conn *conn;
4201 __u16 handle;
4202
4203 skb_pull(skb, HCI_SCO_HDR_SIZE);
4204
4205 handle = __le16_to_cpu(hdr->handle);
4206
4207 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4208
4209 hdev->stat.sco_rx++;
4210
4211 hci_dev_lock(hdev);
4212 conn = hci_conn_hash_lookup_handle(hdev, handle);
4213 hci_dev_unlock(hdev);
4214
4215 if (conn) {
4216 /* Send to upper protocol */
4217 sco_recv_scodata(conn, skb);
4218 return;
4219 } else {
4220 BT_ERR("%s SCO packet for unknown connection handle %d",
4221 hdev->name, handle);
4222 }
4223
4224 kfree_skb(skb);
4225 }
4226
4227 static bool hci_req_is_complete(struct hci_dev *hdev)
4228 {
4229 struct sk_buff *skb;
4230
4231 skb = skb_peek(&hdev->cmd_q);
4232 if (!skb)
4233 return true;
4234
4235 return bt_cb(skb)->req.start;
4236 }
4237
4238 static void hci_resend_last(struct hci_dev *hdev)
4239 {
4240 struct hci_command_hdr *sent;
4241 struct sk_buff *skb;
4242 u16 opcode;
4243
4244 if (!hdev->sent_cmd)
4245 return;
4246
4247 sent = (void *) hdev->sent_cmd->data;
4248 opcode = __le16_to_cpu(sent->opcode);
4249 if (opcode == HCI_OP_RESET)
4250 return;
4251
4252 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4253 if (!skb)
4254 return;
4255
4256 skb_queue_head(&hdev->cmd_q, skb);
4257 queue_work(hdev->workqueue, &hdev->cmd_work);
4258 }
4259
4260 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4261 hci_req_complete_t *req_complete,
4262 hci_req_complete_skb_t *req_complete_skb)
4263 {
4264 struct sk_buff *skb;
4265 unsigned long flags;
4266
4267 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4268
4269 /* If the completed command doesn't match the last one that was
4270 * sent we need to do special handling of it.
4271 */
4272 if (!hci_sent_cmd_data(hdev, opcode)) {
4273 /* Some CSR based controllers generate a spontaneous
4274 * reset complete event during init and any pending
4275 * command will never be completed. In such a case we
4276 * need to resend whatever was the last sent
4277 * command.
4278 */
4279 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4280 hci_resend_last(hdev);
4281
4282 return;
4283 }
4284
4285 /* If the command succeeded and there's still more commands in
4286 * this request the request is not yet complete.
4287 */
4288 if (!status && !hci_req_is_complete(hdev))
4289 return;
4290
4291 /* If this was the last command in a request the complete
4292 * callback would be found in hdev->sent_cmd instead of the
4293 * command queue (hdev->cmd_q).
4294 */
4295 if (bt_cb(hdev->sent_cmd)->req.complete) {
4296 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4297 return;
4298 }
4299
4300 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4301 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4302 return;
4303 }
4304
4305 /* Remove all pending commands belonging to this request */
4306 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4307 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4308 if (bt_cb(skb)->req.start) {
4309 __skb_queue_head(&hdev->cmd_q, skb);
4310 break;
4311 }
4312
4313 *req_complete = bt_cb(skb)->req.complete;
4314 *req_complete_skb = bt_cb(skb)->req.complete_skb;
4315 kfree_skb(skb);
4316 }
4317 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4318 }
4319
4320 static void hci_rx_work(struct work_struct *work)
4321 {
4322 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4323 struct sk_buff *skb;
4324
4325 BT_DBG("%s", hdev->name);
4326
4327 while ((skb = skb_dequeue(&hdev->rx_q))) {
4328 /* Send copy to monitor */
4329 hci_send_to_monitor(hdev, skb);
4330
4331 if (atomic_read(&hdev->promisc)) {
4332 /* Send copy to the sockets */
4333 hci_send_to_sock(hdev, skb);
4334 }
4335
4336 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4337 kfree_skb(skb);
4338 continue;
4339 }
4340
4341 if (test_bit(HCI_INIT, &hdev->flags)) {
4342 /* Don't process data packets in this states. */
4343 switch (bt_cb(skb)->pkt_type) {
4344 case HCI_ACLDATA_PKT:
4345 case HCI_SCODATA_PKT:
4346 kfree_skb(skb);
4347 continue;
4348 }
4349 }
4350
4351 /* Process frame */
4352 switch (bt_cb(skb)->pkt_type) {
4353 case HCI_EVENT_PKT:
4354 BT_DBG("%s Event packet", hdev->name);
4355 hci_event_packet(hdev, skb);
4356 break;
4357
4358 case HCI_ACLDATA_PKT:
4359 BT_DBG("%s ACL data packet", hdev->name);
4360 hci_acldata_packet(hdev, skb);
4361 break;
4362
4363 case HCI_SCODATA_PKT:
4364 BT_DBG("%s SCO data packet", hdev->name);
4365 hci_scodata_packet(hdev, skb);
4366 break;
4367
4368 default:
4369 kfree_skb(skb);
4370 break;
4371 }
4372 }
4373 }
4374
4375 static void hci_cmd_work(struct work_struct *work)
4376 {
4377 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4378 struct sk_buff *skb;
4379
4380 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4381 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4382
4383 /* Send queued commands */
4384 if (atomic_read(&hdev->cmd_cnt)) {
4385 skb = skb_dequeue(&hdev->cmd_q);
4386 if (!skb)
4387 return;
4388
4389 kfree_skb(hdev->sent_cmd);
4390
4391 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4392 if (hdev->sent_cmd) {
4393 atomic_dec(&hdev->cmd_cnt);
4394 hci_send_frame(hdev, skb);
4395 if (test_bit(HCI_RESET, &hdev->flags))
4396 cancel_delayed_work(&hdev->cmd_timer);
4397 else
4398 schedule_delayed_work(&hdev->cmd_timer,
4399 HCI_CMD_TIMEOUT);
4400 } else {
4401 skb_queue_head(&hdev->cmd_q, skb);
4402 queue_work(hdev->workqueue, &hdev->cmd_work);
4403 }
4404 }
4405 }