]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/bluetooth/hci_core.c
Bluetooth: Fix debugfs NULL pointer dereference
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "hci_request.h"
41 #include "hci_debugfs.h"
42 #include "smp.h"
43 #include "leds.h"
44
45 static void hci_rx_work(struct work_struct *work);
46 static void hci_cmd_work(struct work_struct *work);
47 static void hci_tx_work(struct work_struct *work);
48
49 /* HCI device list */
50 LIST_HEAD(hci_dev_list);
51 DEFINE_RWLOCK(hci_dev_list_lock);
52
53 /* HCI callback list */
54 LIST_HEAD(hci_cb_list);
55 DEFINE_MUTEX(hci_cb_list_lock);
56
57 /* HCI ID Numbering */
58 static DEFINE_IDA(hci_index_ida);
59
60 /* ---- HCI debugfs entries ---- */
61
62 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64 {
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72 }
73
74 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76 {
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
94 return -EALREADY;
95
96 hci_req_sync_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_sync_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 kfree_skb(skb);
109
110 hci_dev_change_flag(hdev, HCI_DUT_MODE);
111
112 return count;
113 }
114
115 static const struct file_operations dut_mode_fops = {
116 .open = simple_open,
117 .read = dut_mode_read,
118 .write = dut_mode_write,
119 .llseek = default_llseek,
120 };
121
122 static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos)
124 {
125 struct hci_dev *hdev = file->private_data;
126 char buf[3];
127
128 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
129 buf[1] = '\n';
130 buf[2] = '\0';
131 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
132 }
133
134 static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
135 size_t count, loff_t *ppos)
136 {
137 struct hci_dev *hdev = file->private_data;
138 char buf[32];
139 size_t buf_size = min(count, (sizeof(buf)-1));
140 bool enable;
141 int err;
142
143 if (copy_from_user(buf, user_buf, buf_size))
144 return -EFAULT;
145
146 buf[buf_size] = '\0';
147 if (strtobool(buf, &enable))
148 return -EINVAL;
149
150 /* When the diagnostic flags are not persistent and the transport
151 * is not active or in user channel operation, then there is no need
152 * for the vendor callback. Instead just store the desired value and
153 * the setting will be programmed when the controller gets powered on.
154 */
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156 (!test_bit(HCI_RUNNING, &hdev->flags) ||
157 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
158 goto done;
159
160 hci_req_sync_lock(hdev);
161 err = hdev->set_diag(hdev, enable);
162 hci_req_sync_unlock(hdev);
163
164 if (err < 0)
165 return err;
166
167 done:
168 if (enable)
169 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
170 else
171 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
172
173 return count;
174 }
175
176 static const struct file_operations vendor_diag_fops = {
177 .open = simple_open,
178 .read = vendor_diag_read,
179 .write = vendor_diag_write,
180 .llseek = default_llseek,
181 };
182
183 static void hci_debugfs_create_basic(struct hci_dev *hdev)
184 {
185 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
186 &dut_mode_fops);
187
188 if (hdev->set_diag)
189 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
190 &vendor_diag_fops);
191 }
192
193 static int hci_reset_req(struct hci_request *req, unsigned long opt)
194 {
195 BT_DBG("%s %ld", req->hdev->name, opt);
196
197 /* Reset device */
198 set_bit(HCI_RESET, &req->hdev->flags);
199 hci_req_add(req, HCI_OP_RESET, 0, NULL);
200 return 0;
201 }
202
203 static void bredr_init(struct hci_request *req)
204 {
205 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
206
207 /* Read Local Supported Features */
208 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209
210 /* Read Local Version */
211 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212
213 /* Read BD Address */
214 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
215 }
216
217 static void amp_init1(struct hci_request *req)
218 {
219 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
220
221 /* Read Local Version */
222 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224 /* Read Local Supported Commands */
225 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
226
227 /* Read Local AMP Info */
228 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
229
230 /* Read Data Blk size */
231 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
232
233 /* Read Flow Control Mode */
234 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
235
236 /* Read Location Data */
237 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
238 }
239
240 static int amp_init2(struct hci_request *req)
241 {
242 /* Read Local Supported Features. Not all AMP controllers
243 * support this so it's placed conditionally in the second
244 * stage init.
245 */
246 if (req->hdev->commands[14] & 0x20)
247 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
248
249 return 0;
250 }
251
252 static int hci_init1_req(struct hci_request *req, unsigned long opt)
253 {
254 struct hci_dev *hdev = req->hdev;
255
256 BT_DBG("%s %ld", hdev->name, opt);
257
258 /* Reset */
259 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
260 hci_reset_req(req, 0);
261
262 switch (hdev->dev_type) {
263 case HCI_PRIMARY:
264 bredr_init(req);
265 break;
266 case HCI_AMP:
267 amp_init1(req);
268 break;
269 default:
270 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
271 break;
272 }
273
274 return 0;
275 }
276
277 static void bredr_setup(struct hci_request *req)
278 {
279 __le16 param;
280 __u8 flt_type;
281
282 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
283 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
284
285 /* Read Class of Device */
286 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
287
288 /* Read Local Name */
289 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
290
291 /* Read Voice Setting */
292 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
293
294 /* Read Number of Supported IAC */
295 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
296
297 /* Read Current IAC LAP */
298 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
299
300 /* Clear Event Filters */
301 flt_type = HCI_FLT_CLEAR_ALL;
302 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
303
304 /* Connection accept timeout ~20 secs */
305 param = cpu_to_le16(0x7d00);
306 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
307 }
308
309 static void le_setup(struct hci_request *req)
310 {
311 struct hci_dev *hdev = req->hdev;
312
313 /* Read LE Buffer Size */
314 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
315
316 /* Read LE Local Supported Features */
317 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
318
319 /* Read LE Supported States */
320 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
321
322 /* LE-only controllers have LE implicitly enabled */
323 if (!lmp_bredr_capable(hdev))
324 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
325 }
326
327 static void hci_setup_event_mask(struct hci_request *req)
328 {
329 struct hci_dev *hdev = req->hdev;
330
331 /* The second byte is 0xff instead of 0x9f (two reserved bits
332 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
333 * command otherwise.
334 */
335 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
336
337 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
338 * any event mask for pre 1.2 devices.
339 */
340 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
341 return;
342
343 if (lmp_bredr_capable(hdev)) {
344 events[4] |= 0x01; /* Flow Specification Complete */
345 } else {
346 /* Use a different default for LE-only devices */
347 memset(events, 0, sizeof(events));
348 events[1] |= 0x20; /* Command Complete */
349 events[1] |= 0x40; /* Command Status */
350 events[1] |= 0x80; /* Hardware Error */
351
352 /* If the controller supports the Disconnect command, enable
353 * the corresponding event. In addition enable packet flow
354 * control related events.
355 */
356 if (hdev->commands[0] & 0x20) {
357 events[0] |= 0x10; /* Disconnection Complete */
358 events[2] |= 0x04; /* Number of Completed Packets */
359 events[3] |= 0x02; /* Data Buffer Overflow */
360 }
361
362 /* If the controller supports the Read Remote Version
363 * Information command, enable the corresponding event.
364 */
365 if (hdev->commands[2] & 0x80)
366 events[1] |= 0x08; /* Read Remote Version Information
367 * Complete
368 */
369
370 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
371 events[0] |= 0x80; /* Encryption Change */
372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
373 }
374 }
375
376 if (lmp_inq_rssi_capable(hdev) ||
377 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
378 events[4] |= 0x02; /* Inquiry Result with RSSI */
379
380 if (lmp_ext_feat_capable(hdev))
381 events[4] |= 0x04; /* Read Remote Extended Features Complete */
382
383 if (lmp_esco_capable(hdev)) {
384 events[5] |= 0x08; /* Synchronous Connection Complete */
385 events[5] |= 0x10; /* Synchronous Connection Changed */
386 }
387
388 if (lmp_sniffsubr_capable(hdev))
389 events[5] |= 0x20; /* Sniff Subrating */
390
391 if (lmp_pause_enc_capable(hdev))
392 events[5] |= 0x80; /* Encryption Key Refresh Complete */
393
394 if (lmp_ext_inq_capable(hdev))
395 events[5] |= 0x40; /* Extended Inquiry Result */
396
397 if (lmp_no_flush_capable(hdev))
398 events[7] |= 0x01; /* Enhanced Flush Complete */
399
400 if (lmp_lsto_capable(hdev))
401 events[6] |= 0x80; /* Link Supervision Timeout Changed */
402
403 if (lmp_ssp_capable(hdev)) {
404 events[6] |= 0x01; /* IO Capability Request */
405 events[6] |= 0x02; /* IO Capability Response */
406 events[6] |= 0x04; /* User Confirmation Request */
407 events[6] |= 0x08; /* User Passkey Request */
408 events[6] |= 0x10; /* Remote OOB Data Request */
409 events[6] |= 0x20; /* Simple Pairing Complete */
410 events[7] |= 0x04; /* User Passkey Notification */
411 events[7] |= 0x08; /* Keypress Notification */
412 events[7] |= 0x10; /* Remote Host Supported
413 * Features Notification
414 */
415 }
416
417 if (lmp_le_capable(hdev))
418 events[7] |= 0x20; /* LE Meta-Event */
419
420 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
421 }
422
423 static int hci_init2_req(struct hci_request *req, unsigned long opt)
424 {
425 struct hci_dev *hdev = req->hdev;
426
427 if (hdev->dev_type == HCI_AMP)
428 return amp_init2(req);
429
430 if (lmp_bredr_capable(hdev))
431 bredr_setup(req);
432 else
433 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
434
435 if (lmp_le_capable(hdev))
436 le_setup(req);
437
438 /* All Bluetooth 1.2 and later controllers should support the
439 * HCI command for reading the local supported commands.
440 *
441 * Unfortunately some controllers indicate Bluetooth 1.2 support,
442 * but do not have support for this command. If that is the case,
443 * the driver can quirk the behavior and skip reading the local
444 * supported commands.
445 */
446 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
447 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
448 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
449
450 if (lmp_ssp_capable(hdev)) {
451 /* When SSP is available, then the host features page
452 * should also be available as well. However some
453 * controllers list the max_page as 0 as long as SSP
454 * has not been enabled. To achieve proper debugging
455 * output, force the minimum max_page to 1 at least.
456 */
457 hdev->max_page = 0x01;
458
459 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
460 u8 mode = 0x01;
461
462 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
463 sizeof(mode), &mode);
464 } else {
465 struct hci_cp_write_eir cp;
466
467 memset(hdev->eir, 0, sizeof(hdev->eir));
468 memset(&cp, 0, sizeof(cp));
469
470 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
471 }
472 }
473
474 if (lmp_inq_rssi_capable(hdev) ||
475 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
476 u8 mode;
477
478 /* If Extended Inquiry Result events are supported, then
479 * they are clearly preferred over Inquiry Result with RSSI
480 * events.
481 */
482 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
483
484 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
485 }
486
487 if (lmp_inq_tx_pwr_capable(hdev))
488 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
489
490 if (lmp_ext_feat_capable(hdev)) {
491 struct hci_cp_read_local_ext_features cp;
492
493 cp.page = 0x01;
494 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
495 sizeof(cp), &cp);
496 }
497
498 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
499 u8 enable = 1;
500 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
501 &enable);
502 }
503
504 return 0;
505 }
506
507 static void hci_setup_link_policy(struct hci_request *req)
508 {
509 struct hci_dev *hdev = req->hdev;
510 struct hci_cp_write_def_link_policy cp;
511 u16 link_policy = 0;
512
513 if (lmp_rswitch_capable(hdev))
514 link_policy |= HCI_LP_RSWITCH;
515 if (lmp_hold_capable(hdev))
516 link_policy |= HCI_LP_HOLD;
517 if (lmp_sniff_capable(hdev))
518 link_policy |= HCI_LP_SNIFF;
519 if (lmp_park_capable(hdev))
520 link_policy |= HCI_LP_PARK;
521
522 cp.policy = cpu_to_le16(link_policy);
523 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
524 }
525
526 static void hci_set_le_support(struct hci_request *req)
527 {
528 struct hci_dev *hdev = req->hdev;
529 struct hci_cp_write_le_host_supported cp;
530
531 /* LE-only devices do not support explicit enablement */
532 if (!lmp_bredr_capable(hdev))
533 return;
534
535 memset(&cp, 0, sizeof(cp));
536
537 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
538 cp.le = 0x01;
539 cp.simul = 0x00;
540 }
541
542 if (cp.le != lmp_host_le_capable(hdev))
543 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
544 &cp);
545 }
546
547 static void hci_set_event_mask_page_2(struct hci_request *req)
548 {
549 struct hci_dev *hdev = req->hdev;
550 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
551 bool changed = false;
552
553 /* If Connectionless Slave Broadcast master role is supported
554 * enable all necessary events for it.
555 */
556 if (lmp_csb_master_capable(hdev)) {
557 events[1] |= 0x40; /* Triggered Clock Capture */
558 events[1] |= 0x80; /* Synchronization Train Complete */
559 events[2] |= 0x10; /* Slave Page Response Timeout */
560 events[2] |= 0x20; /* CSB Channel Map Change */
561 changed = true;
562 }
563
564 /* If Connectionless Slave Broadcast slave role is supported
565 * enable all necessary events for it.
566 */
567 if (lmp_csb_slave_capable(hdev)) {
568 events[2] |= 0x01; /* Synchronization Train Received */
569 events[2] |= 0x02; /* CSB Receive */
570 events[2] |= 0x04; /* CSB Timeout */
571 events[2] |= 0x08; /* Truncated Page Complete */
572 changed = true;
573 }
574
575 /* Enable Authenticated Payload Timeout Expired event if supported */
576 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
577 events[2] |= 0x80;
578 changed = true;
579 }
580
581 /* Some Broadcom based controllers indicate support for Set Event
582 * Mask Page 2 command, but then actually do not support it. Since
583 * the default value is all bits set to zero, the command is only
584 * required if the event mask has to be changed. In case no change
585 * to the event mask is needed, skip this command.
586 */
587 if (changed)
588 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
589 sizeof(events), events);
590 }
591
592 static int hci_init3_req(struct hci_request *req, unsigned long opt)
593 {
594 struct hci_dev *hdev = req->hdev;
595 u8 p;
596
597 hci_setup_event_mask(req);
598
599 if (hdev->commands[6] & 0x20 &&
600 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
601 struct hci_cp_read_stored_link_key cp;
602
603 bacpy(&cp.bdaddr, BDADDR_ANY);
604 cp.read_all = 0x01;
605 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
606 }
607
608 if (hdev->commands[5] & 0x10)
609 hci_setup_link_policy(req);
610
611 if (hdev->commands[8] & 0x01)
612 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
613
614 /* Some older Broadcom based Bluetooth 1.2 controllers do not
615 * support the Read Page Scan Type command. Check support for
616 * this command in the bit mask of supported commands.
617 */
618 if (hdev->commands[13] & 0x01)
619 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
620
621 if (lmp_le_capable(hdev)) {
622 u8 events[8];
623
624 memset(events, 0, sizeof(events));
625
626 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
627 events[0] |= 0x10; /* LE Long Term Key Request */
628
629 /* If controller supports the Connection Parameters Request
630 * Link Layer Procedure, enable the corresponding event.
631 */
632 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
633 events[0] |= 0x20; /* LE Remote Connection
634 * Parameter Request
635 */
636
637 /* If the controller supports the Data Length Extension
638 * feature, enable the corresponding event.
639 */
640 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
641 events[0] |= 0x40; /* LE Data Length Change */
642
643 /* If the controller supports Extended Scanner Filter
644 * Policies, enable the correspondig event.
645 */
646 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
647 events[1] |= 0x04; /* LE Direct Advertising
648 * Report
649 */
650
651 /* If the controller supports Channel Selection Algorithm #2
652 * feature, enable the corresponding event.
653 */
654 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
655 events[2] |= 0x08; /* LE Channel Selection
656 * Algorithm
657 */
658
659 /* If the controller supports the LE Set Scan Enable command,
660 * enable the corresponding advertising report event.
661 */
662 if (hdev->commands[26] & 0x08)
663 events[0] |= 0x02; /* LE Advertising Report */
664
665 /* If the controller supports the LE Create Connection
666 * command, enable the corresponding event.
667 */
668 if (hdev->commands[26] & 0x10)
669 events[0] |= 0x01; /* LE Connection Complete */
670
671 /* If the controller supports the LE Connection Update
672 * command, enable the corresponding event.
673 */
674 if (hdev->commands[27] & 0x04)
675 events[0] |= 0x04; /* LE Connection Update
676 * Complete
677 */
678
679 /* If the controller supports the LE Read Remote Used Features
680 * command, enable the corresponding event.
681 */
682 if (hdev->commands[27] & 0x20)
683 events[0] |= 0x08; /* LE Read Remote Used
684 * Features Complete
685 */
686
687 /* If the controller supports the LE Read Local P-256
688 * Public Key command, enable the corresponding event.
689 */
690 if (hdev->commands[34] & 0x02)
691 events[0] |= 0x80; /* LE Read Local P-256
692 * Public Key Complete
693 */
694
695 /* If the controller supports the LE Generate DHKey
696 * command, enable the corresponding event.
697 */
698 if (hdev->commands[34] & 0x04)
699 events[1] |= 0x01; /* LE Generate DHKey Complete */
700
701 /* If the controller supports the LE Set Default PHY or
702 * LE Set PHY commands, enable the corresponding event.
703 */
704 if (hdev->commands[35] & (0x20 | 0x40))
705 events[1] |= 0x08; /* LE PHY Update Complete */
706
707 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
708 events);
709
710 if (hdev->commands[25] & 0x40) {
711 /* Read LE Advertising Channel TX Power */
712 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
713 }
714
715 if (hdev->commands[26] & 0x40) {
716 /* Read LE White List Size */
717 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
718 0, NULL);
719 }
720
721 if (hdev->commands[26] & 0x80) {
722 /* Clear LE White List */
723 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
724 }
725
726 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
727 /* Read LE Maximum Data Length */
728 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
729
730 /* Read LE Suggested Default Data Length */
731 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
732 }
733
734 hci_set_le_support(req);
735 }
736
737 /* Read features beyond page 1 if available */
738 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
739 struct hci_cp_read_local_ext_features cp;
740
741 cp.page = p;
742 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
743 sizeof(cp), &cp);
744 }
745
746 return 0;
747 }
748
749 static int hci_init4_req(struct hci_request *req, unsigned long opt)
750 {
751 struct hci_dev *hdev = req->hdev;
752
753 /* Some Broadcom based Bluetooth controllers do not support the
754 * Delete Stored Link Key command. They are clearly indicating its
755 * absence in the bit mask of supported commands.
756 *
757 * Check the supported commands and only if the the command is marked
758 * as supported send it. If not supported assume that the controller
759 * does not have actual support for stored link keys which makes this
760 * command redundant anyway.
761 *
762 * Some controllers indicate that they support handling deleting
763 * stored link keys, but they don't. The quirk lets a driver
764 * just disable this command.
765 */
766 if (hdev->commands[6] & 0x80 &&
767 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
768 struct hci_cp_delete_stored_link_key cp;
769
770 bacpy(&cp.bdaddr, BDADDR_ANY);
771 cp.delete_all = 0x01;
772 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
773 sizeof(cp), &cp);
774 }
775
776 /* Set event mask page 2 if the HCI command for it is supported */
777 if (hdev->commands[22] & 0x04)
778 hci_set_event_mask_page_2(req);
779
780 /* Read local codec list if the HCI command is supported */
781 if (hdev->commands[29] & 0x20)
782 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
783
784 /* Get MWS transport configuration if the HCI command is supported */
785 if (hdev->commands[30] & 0x08)
786 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
787
788 /* Check for Synchronization Train support */
789 if (lmp_sync_train_capable(hdev))
790 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
791
792 /* Enable Secure Connections if supported and configured */
793 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
794 bredr_sc_enabled(hdev)) {
795 u8 support = 0x01;
796
797 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
798 sizeof(support), &support);
799 }
800
801 /* Set Suggested Default Data Length to maximum if supported */
802 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
803 struct hci_cp_le_write_def_data_len cp;
804
805 cp.tx_len = hdev->le_max_tx_len;
806 cp.tx_time = hdev->le_max_tx_time;
807 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
808 }
809
810 /* Set Default PHY parameters if command is supported */
811 if (hdev->commands[35] & 0x20) {
812 struct hci_cp_le_set_default_phy cp;
813
814 /* No transmitter PHY or receiver PHY preferences */
815 cp.all_phys = 0x03;
816 cp.tx_phys = 0;
817 cp.rx_phys = 0;
818
819 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
820 }
821
822 return 0;
823 }
824
825 static int __hci_init(struct hci_dev *hdev)
826 {
827 int err;
828
829 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
830 if (err < 0)
831 return err;
832
833 if (hci_dev_test_flag(hdev, HCI_SETUP))
834 hci_debugfs_create_basic(hdev);
835
836 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
837 if (err < 0)
838 return err;
839
840 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
841 * BR/EDR/LE type controllers. AMP controllers only need the
842 * first two stages of init.
843 */
844 if (hdev->dev_type != HCI_PRIMARY)
845 return 0;
846
847 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
848 if (err < 0)
849 return err;
850
851 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
852 if (err < 0)
853 return err;
854
855 /* This function is only called when the controller is actually in
856 * configured state. When the controller is marked as unconfigured,
857 * this initialization procedure is not run.
858 *
859 * It means that it is possible that a controller runs through its
860 * setup phase and then discovers missing settings. If that is the
861 * case, then this function will not be called. It then will only
862 * be called during the config phase.
863 *
864 * So only when in setup phase or config phase, create the debugfs
865 * entries and register the SMP channels.
866 */
867 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
868 !hci_dev_test_flag(hdev, HCI_CONFIG))
869 return 0;
870
871 hci_debugfs_create_common(hdev);
872
873 if (lmp_bredr_capable(hdev))
874 hci_debugfs_create_bredr(hdev);
875
876 if (lmp_le_capable(hdev))
877 hci_debugfs_create_le(hdev);
878
879 return 0;
880 }
881
882 static int hci_init0_req(struct hci_request *req, unsigned long opt)
883 {
884 struct hci_dev *hdev = req->hdev;
885
886 BT_DBG("%s %ld", hdev->name, opt);
887
888 /* Reset */
889 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
890 hci_reset_req(req, 0);
891
892 /* Read Local Version */
893 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
894
895 /* Read BD Address */
896 if (hdev->set_bdaddr)
897 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
898
899 return 0;
900 }
901
902 static int __hci_unconf_init(struct hci_dev *hdev)
903 {
904 int err;
905
906 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
907 return 0;
908
909 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
910 if (err < 0)
911 return err;
912
913 if (hci_dev_test_flag(hdev, HCI_SETUP))
914 hci_debugfs_create_basic(hdev);
915
916 return 0;
917 }
918
919 static int hci_scan_req(struct hci_request *req, unsigned long opt)
920 {
921 __u8 scan = opt;
922
923 BT_DBG("%s %x", req->hdev->name, scan);
924
925 /* Inquiry and Page scans */
926 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
927 return 0;
928 }
929
930 static int hci_auth_req(struct hci_request *req, unsigned long opt)
931 {
932 __u8 auth = opt;
933
934 BT_DBG("%s %x", req->hdev->name, auth);
935
936 /* Authentication */
937 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
938 return 0;
939 }
940
941 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
942 {
943 __u8 encrypt = opt;
944
945 BT_DBG("%s %x", req->hdev->name, encrypt);
946
947 /* Encryption */
948 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
949 return 0;
950 }
951
952 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
953 {
954 __le16 policy = cpu_to_le16(opt);
955
956 BT_DBG("%s %x", req->hdev->name, policy);
957
958 /* Default link policy */
959 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
960 return 0;
961 }
962
963 /* Get HCI device by index.
964 * Device is held on return. */
965 struct hci_dev *hci_dev_get(int index)
966 {
967 struct hci_dev *hdev = NULL, *d;
968
969 BT_DBG("%d", index);
970
971 if (index < 0)
972 return NULL;
973
974 read_lock(&hci_dev_list_lock);
975 list_for_each_entry(d, &hci_dev_list, list) {
976 if (d->id == index) {
977 hdev = hci_dev_hold(d);
978 break;
979 }
980 }
981 read_unlock(&hci_dev_list_lock);
982 return hdev;
983 }
984
985 /* ---- Inquiry support ---- */
986
987 bool hci_discovery_active(struct hci_dev *hdev)
988 {
989 struct discovery_state *discov = &hdev->discovery;
990
991 switch (discov->state) {
992 case DISCOVERY_FINDING:
993 case DISCOVERY_RESOLVING:
994 return true;
995
996 default:
997 return false;
998 }
999 }
1000
1001 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1002 {
1003 int old_state = hdev->discovery.state;
1004
1005 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1006
1007 if (old_state == state)
1008 return;
1009
1010 hdev->discovery.state = state;
1011
1012 switch (state) {
1013 case DISCOVERY_STOPPED:
1014 hci_update_background_scan(hdev);
1015
1016 if (old_state != DISCOVERY_STARTING)
1017 mgmt_discovering(hdev, 0);
1018 break;
1019 case DISCOVERY_STARTING:
1020 break;
1021 case DISCOVERY_FINDING:
1022 mgmt_discovering(hdev, 1);
1023 break;
1024 case DISCOVERY_RESOLVING:
1025 break;
1026 case DISCOVERY_STOPPING:
1027 break;
1028 }
1029 }
1030
1031 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1032 {
1033 struct discovery_state *cache = &hdev->discovery;
1034 struct inquiry_entry *p, *n;
1035
1036 list_for_each_entry_safe(p, n, &cache->all, all) {
1037 list_del(&p->all);
1038 kfree(p);
1039 }
1040
1041 INIT_LIST_HEAD(&cache->unknown);
1042 INIT_LIST_HEAD(&cache->resolve);
1043 }
1044
1045 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1046 bdaddr_t *bdaddr)
1047 {
1048 struct discovery_state *cache = &hdev->discovery;
1049 struct inquiry_entry *e;
1050
1051 BT_DBG("cache %p, %pMR", cache, bdaddr);
1052
1053 list_for_each_entry(e, &cache->all, all) {
1054 if (!bacmp(&e->data.bdaddr, bdaddr))
1055 return e;
1056 }
1057
1058 return NULL;
1059 }
1060
1061 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1062 bdaddr_t *bdaddr)
1063 {
1064 struct discovery_state *cache = &hdev->discovery;
1065 struct inquiry_entry *e;
1066
1067 BT_DBG("cache %p, %pMR", cache, bdaddr);
1068
1069 list_for_each_entry(e, &cache->unknown, list) {
1070 if (!bacmp(&e->data.bdaddr, bdaddr))
1071 return e;
1072 }
1073
1074 return NULL;
1075 }
1076
1077 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1078 bdaddr_t *bdaddr,
1079 int state)
1080 {
1081 struct discovery_state *cache = &hdev->discovery;
1082 struct inquiry_entry *e;
1083
1084 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1085
1086 list_for_each_entry(e, &cache->resolve, list) {
1087 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1088 return e;
1089 if (!bacmp(&e->data.bdaddr, bdaddr))
1090 return e;
1091 }
1092
1093 return NULL;
1094 }
1095
1096 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1097 struct inquiry_entry *ie)
1098 {
1099 struct discovery_state *cache = &hdev->discovery;
1100 struct list_head *pos = &cache->resolve;
1101 struct inquiry_entry *p;
1102
1103 list_del(&ie->list);
1104
1105 list_for_each_entry(p, &cache->resolve, list) {
1106 if (p->name_state != NAME_PENDING &&
1107 abs(p->data.rssi) >= abs(ie->data.rssi))
1108 break;
1109 pos = &p->list;
1110 }
1111
1112 list_add(&ie->list, pos);
1113 }
1114
1115 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1116 bool name_known)
1117 {
1118 struct discovery_state *cache = &hdev->discovery;
1119 struct inquiry_entry *ie;
1120 u32 flags = 0;
1121
1122 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1123
1124 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
1125
1126 if (!data->ssp_mode)
1127 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1128
1129 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1130 if (ie) {
1131 if (!ie->data.ssp_mode)
1132 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1133
1134 if (ie->name_state == NAME_NEEDED &&
1135 data->rssi != ie->data.rssi) {
1136 ie->data.rssi = data->rssi;
1137 hci_inquiry_cache_update_resolve(hdev, ie);
1138 }
1139
1140 goto update;
1141 }
1142
1143 /* Entry not in the cache. Add new one. */
1144 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
1145 if (!ie) {
1146 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1147 goto done;
1148 }
1149
1150 list_add(&ie->all, &cache->all);
1151
1152 if (name_known) {
1153 ie->name_state = NAME_KNOWN;
1154 } else {
1155 ie->name_state = NAME_NOT_KNOWN;
1156 list_add(&ie->list, &cache->unknown);
1157 }
1158
1159 update:
1160 if (name_known && ie->name_state != NAME_KNOWN &&
1161 ie->name_state != NAME_PENDING) {
1162 ie->name_state = NAME_KNOWN;
1163 list_del(&ie->list);
1164 }
1165
1166 memcpy(&ie->data, data, sizeof(*data));
1167 ie->timestamp = jiffies;
1168 cache->timestamp = jiffies;
1169
1170 if (ie->name_state == NAME_NOT_KNOWN)
1171 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1172
1173 done:
1174 return flags;
1175 }
1176
1177 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1178 {
1179 struct discovery_state *cache = &hdev->discovery;
1180 struct inquiry_info *info = (struct inquiry_info *) buf;
1181 struct inquiry_entry *e;
1182 int copied = 0;
1183
1184 list_for_each_entry(e, &cache->all, all) {
1185 struct inquiry_data *data = &e->data;
1186
1187 if (copied >= num)
1188 break;
1189
1190 bacpy(&info->bdaddr, &data->bdaddr);
1191 info->pscan_rep_mode = data->pscan_rep_mode;
1192 info->pscan_period_mode = data->pscan_period_mode;
1193 info->pscan_mode = data->pscan_mode;
1194 memcpy(info->dev_class, data->dev_class, 3);
1195 info->clock_offset = data->clock_offset;
1196
1197 info++;
1198 copied++;
1199 }
1200
1201 BT_DBG("cache %p, copied %d", cache, copied);
1202 return copied;
1203 }
1204
1205 static int hci_inq_req(struct hci_request *req, unsigned long opt)
1206 {
1207 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1208 struct hci_dev *hdev = req->hdev;
1209 struct hci_cp_inquiry cp;
1210
1211 BT_DBG("%s", hdev->name);
1212
1213 if (test_bit(HCI_INQUIRY, &hdev->flags))
1214 return 0;
1215
1216 /* Start Inquiry */
1217 memcpy(&cp.lap, &ir->lap, 3);
1218 cp.length = ir->length;
1219 cp.num_rsp = ir->num_rsp;
1220 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1221
1222 return 0;
1223 }
1224
1225 int hci_inquiry(void __user *arg)
1226 {
1227 __u8 __user *ptr = arg;
1228 struct hci_inquiry_req ir;
1229 struct hci_dev *hdev;
1230 int err = 0, do_inquiry = 0, max_rsp;
1231 long timeo;
1232 __u8 *buf;
1233
1234 if (copy_from_user(&ir, ptr, sizeof(ir)))
1235 return -EFAULT;
1236
1237 hdev = hci_dev_get(ir.dev_id);
1238 if (!hdev)
1239 return -ENODEV;
1240
1241 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1242 err = -EBUSY;
1243 goto done;
1244 }
1245
1246 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1247 err = -EOPNOTSUPP;
1248 goto done;
1249 }
1250
1251 if (hdev->dev_type != HCI_PRIMARY) {
1252 err = -EOPNOTSUPP;
1253 goto done;
1254 }
1255
1256 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1257 err = -EOPNOTSUPP;
1258 goto done;
1259 }
1260
1261 hci_dev_lock(hdev);
1262 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1263 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1264 hci_inquiry_cache_flush(hdev);
1265 do_inquiry = 1;
1266 }
1267 hci_dev_unlock(hdev);
1268
1269 timeo = ir.length * msecs_to_jiffies(2000);
1270
1271 if (do_inquiry) {
1272 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1273 timeo, NULL);
1274 if (err < 0)
1275 goto done;
1276
1277 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1278 * cleared). If it is interrupted by a signal, return -EINTR.
1279 */
1280 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
1281 TASK_INTERRUPTIBLE))
1282 return -EINTR;
1283 }
1284
1285 /* for unlimited number of responses we will use buffer with
1286 * 255 entries
1287 */
1288 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1289
1290 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1291 * copy it to the user space.
1292 */
1293 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1294 if (!buf) {
1295 err = -ENOMEM;
1296 goto done;
1297 }
1298
1299 hci_dev_lock(hdev);
1300 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1301 hci_dev_unlock(hdev);
1302
1303 BT_DBG("num_rsp %d", ir.num_rsp);
1304
1305 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1306 ptr += sizeof(ir);
1307 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1308 ir.num_rsp))
1309 err = -EFAULT;
1310 } else
1311 err = -EFAULT;
1312
1313 kfree(buf);
1314
1315 done:
1316 hci_dev_put(hdev);
1317 return err;
1318 }
1319
1320 static int hci_dev_do_open(struct hci_dev *hdev)
1321 {
1322 int ret = 0;
1323
1324 BT_DBG("%s %p", hdev->name, hdev);
1325
1326 hci_req_sync_lock(hdev);
1327
1328 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
1329 ret = -ENODEV;
1330 goto done;
1331 }
1332
1333 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1334 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
1335 /* Check for rfkill but allow the HCI setup stage to
1336 * proceed (which in itself doesn't cause any RF activity).
1337 */
1338 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
1339 ret = -ERFKILL;
1340 goto done;
1341 }
1342
1343 /* Check for valid public address or a configured static
1344 * random adddress, but let the HCI setup proceed to
1345 * be able to determine if there is a public address
1346 * or not.
1347 *
1348 * In case of user channel usage, it is not important
1349 * if a public address or static random address is
1350 * available.
1351 *
1352 * This check is only valid for BR/EDR controllers
1353 * since AMP controllers do not have an address.
1354 */
1355 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1356 hdev->dev_type == HCI_PRIMARY &&
1357 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1358 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1359 ret = -EADDRNOTAVAIL;
1360 goto done;
1361 }
1362 }
1363
1364 if (test_bit(HCI_UP, &hdev->flags)) {
1365 ret = -EALREADY;
1366 goto done;
1367 }
1368
1369 if (hdev->open(hdev)) {
1370 ret = -EIO;
1371 goto done;
1372 }
1373
1374 set_bit(HCI_RUNNING, &hdev->flags);
1375 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
1376
1377 atomic_set(&hdev->cmd_cnt, 1);
1378 set_bit(HCI_INIT, &hdev->flags);
1379
1380 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
1381 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1382
1383 if (hdev->setup)
1384 ret = hdev->setup(hdev);
1385
1386 /* The transport driver can set these quirks before
1387 * creating the HCI device or in its setup callback.
1388 *
1389 * In case any of them is set, the controller has to
1390 * start up as unconfigured.
1391 */
1392 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1393 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
1394 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
1395
1396 /* For an unconfigured controller it is required to
1397 * read at least the version information provided by
1398 * the Read Local Version Information command.
1399 *
1400 * If the set_bdaddr driver callback is provided, then
1401 * also the original Bluetooth public device address
1402 * will be read using the Read BD Address command.
1403 */
1404 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1405 ret = __hci_unconf_init(hdev);
1406 }
1407
1408 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
1409 /* If public address change is configured, ensure that
1410 * the address gets programmed. If the driver does not
1411 * support changing the public address, fail the power
1412 * on procedure.
1413 */
1414 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1415 hdev->set_bdaddr)
1416 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1417 else
1418 ret = -EADDRNOTAVAIL;
1419 }
1420
1421 if (!ret) {
1422 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1423 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1424 ret = __hci_init(hdev);
1425 if (!ret && hdev->post_init)
1426 ret = hdev->post_init(hdev);
1427 }
1428 }
1429
1430 /* If the HCI Reset command is clearing all diagnostic settings,
1431 * then they need to be reprogrammed after the init procedure
1432 * completed.
1433 */
1434 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1435 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1436 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1437 ret = hdev->set_diag(hdev, true);
1438
1439 clear_bit(HCI_INIT, &hdev->flags);
1440
1441 if (!ret) {
1442 hci_dev_hold(hdev);
1443 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1444 set_bit(HCI_UP, &hdev->flags);
1445 hci_sock_dev_event(hdev, HCI_DEV_UP);
1446 hci_leds_update_powered(hdev, true);
1447 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1451 hci_dev_test_flag(hdev, HCI_MGMT) &&
1452 hdev->dev_type == HCI_PRIMARY) {
1453 ret = __hci_req_hci_power_on(hdev);
1454 mgmt_power_on(hdev, ret);
1455 }
1456 } else {
1457 /* Init failed, cleanup */
1458 flush_work(&hdev->tx_work);
1459 flush_work(&hdev->cmd_work);
1460 flush_work(&hdev->rx_work);
1461
1462 skb_queue_purge(&hdev->cmd_q);
1463 skb_queue_purge(&hdev->rx_q);
1464
1465 if (hdev->flush)
1466 hdev->flush(hdev);
1467
1468 if (hdev->sent_cmd) {
1469 kfree_skb(hdev->sent_cmd);
1470 hdev->sent_cmd = NULL;
1471 }
1472
1473 clear_bit(HCI_RUNNING, &hdev->flags);
1474 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1475
1476 hdev->close(hdev);
1477 hdev->flags &= BIT(HCI_RAW);
1478 }
1479
1480 done:
1481 hci_req_sync_unlock(hdev);
1482 return ret;
1483 }
1484
1485 /* ---- HCI ioctl helpers ---- */
1486
1487 int hci_dev_open(__u16 dev)
1488 {
1489 struct hci_dev *hdev;
1490 int err;
1491
1492 hdev = hci_dev_get(dev);
1493 if (!hdev)
1494 return -ENODEV;
1495
1496 /* Devices that are marked as unconfigured can only be powered
1497 * up as user channel. Trying to bring them up as normal devices
1498 * will result into a failure. Only user channel operation is
1499 * possible.
1500 *
1501 * When this function is called for a user channel, the flag
1502 * HCI_USER_CHANNEL will be set first before attempting to
1503 * open the device.
1504 */
1505 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1507 err = -EOPNOTSUPP;
1508 goto done;
1509 }
1510
1511 /* We need to ensure that no other power on/off work is pending
1512 * before proceeding to call hci_dev_do_open. This is
1513 * particularly important if the setup procedure has not yet
1514 * completed.
1515 */
1516 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1517 cancel_delayed_work(&hdev->power_off);
1518
1519 /* After this call it is guaranteed that the setup procedure
1520 * has finished. This means that error conditions like RFKILL
1521 * or no valid public or static random address apply.
1522 */
1523 flush_workqueue(hdev->req_workqueue);
1524
1525 /* For controllers not using the management interface and that
1526 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
1527 * so that pairing works for them. Once the management interface
1528 * is in use this bit will be cleared again and userspace has
1529 * to explicitly enable it.
1530 */
1531 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1532 !hci_dev_test_flag(hdev, HCI_MGMT))
1533 hci_dev_set_flag(hdev, HCI_BONDABLE);
1534
1535 err = hci_dev_do_open(hdev);
1536
1537 done:
1538 hci_dev_put(hdev);
1539 return err;
1540 }
1541
1542 /* This function requires the caller holds hdev->lock */
1543 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1544 {
1545 struct hci_conn_params *p;
1546
1547 list_for_each_entry(p, &hdev->le_conn_params, list) {
1548 if (p->conn) {
1549 hci_conn_drop(p->conn);
1550 hci_conn_put(p->conn);
1551 p->conn = NULL;
1552 }
1553 list_del_init(&p->action);
1554 }
1555
1556 BT_DBG("All LE pending actions cleared");
1557 }
1558
1559 int hci_dev_do_close(struct hci_dev *hdev)
1560 {
1561 bool auto_off;
1562
1563 BT_DBG("%s %p", hdev->name, hdev);
1564
1565 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
1566 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1567 test_bit(HCI_UP, &hdev->flags)) {
1568 /* Execute vendor specific shutdown routine */
1569 if (hdev->shutdown)
1570 hdev->shutdown(hdev);
1571 }
1572
1573 cancel_delayed_work(&hdev->power_off);
1574
1575 hci_request_cancel_all(hdev);
1576 hci_req_sync_lock(hdev);
1577
1578 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1579 cancel_delayed_work_sync(&hdev->cmd_timer);
1580 hci_req_sync_unlock(hdev);
1581 return 0;
1582 }
1583
1584 hci_leds_update_powered(hdev, false);
1585
1586 /* Flush RX and TX works */
1587 flush_work(&hdev->tx_work);
1588 flush_work(&hdev->rx_work);
1589
1590 if (hdev->discov_timeout > 0) {
1591 hdev->discov_timeout = 0;
1592 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1593 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1594 }
1595
1596 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
1597 cancel_delayed_work(&hdev->service_cache);
1598
1599 if (hci_dev_test_flag(hdev, HCI_MGMT))
1600 cancel_delayed_work_sync(&hdev->rpa_expired);
1601
1602 /* Avoid potential lockdep warnings from the *_flush() calls by
1603 * ensuring the workqueue is empty up front.
1604 */
1605 drain_workqueue(hdev->workqueue);
1606
1607 hci_dev_lock(hdev);
1608
1609 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1610
1611 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1612
1613 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
1614 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1615 hci_dev_test_flag(hdev, HCI_MGMT))
1616 __mgmt_power_off(hdev);
1617
1618 hci_inquiry_cache_flush(hdev);
1619 hci_pend_le_actions_clear(hdev);
1620 hci_conn_hash_flush(hdev);
1621 hci_dev_unlock(hdev);
1622
1623 smp_unregister(hdev);
1624
1625 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1626
1627 if (hdev->flush)
1628 hdev->flush(hdev);
1629
1630 /* Reset device */
1631 skb_queue_purge(&hdev->cmd_q);
1632 atomic_set(&hdev->cmd_cnt, 1);
1633 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1634 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1635 set_bit(HCI_INIT, &hdev->flags);
1636 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1637 clear_bit(HCI_INIT, &hdev->flags);
1638 }
1639
1640 /* flush cmd work */
1641 flush_work(&hdev->cmd_work);
1642
1643 /* Drop queues */
1644 skb_queue_purge(&hdev->rx_q);
1645 skb_queue_purge(&hdev->cmd_q);
1646 skb_queue_purge(&hdev->raw_q);
1647
1648 /* Drop last sent command */
1649 if (hdev->sent_cmd) {
1650 cancel_delayed_work_sync(&hdev->cmd_timer);
1651 kfree_skb(hdev->sent_cmd);
1652 hdev->sent_cmd = NULL;
1653 }
1654
1655 clear_bit(HCI_RUNNING, &hdev->flags);
1656 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
1657
1658 /* After this point our queues are empty
1659 * and no tasks are scheduled. */
1660 hdev->close(hdev);
1661
1662 /* Clear flags */
1663 hdev->flags &= BIT(HCI_RAW);
1664 hci_dev_clear_volatile_flags(hdev);
1665
1666 /* Controller radio is available but is currently powered down */
1667 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1668
1669 memset(hdev->eir, 0, sizeof(hdev->eir));
1670 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1671 bacpy(&hdev->random_addr, BDADDR_ANY);
1672
1673 hci_req_sync_unlock(hdev);
1674
1675 hci_dev_put(hdev);
1676 return 0;
1677 }
1678
1679 int hci_dev_close(__u16 dev)
1680 {
1681 struct hci_dev *hdev;
1682 int err;
1683
1684 hdev = hci_dev_get(dev);
1685 if (!hdev)
1686 return -ENODEV;
1687
1688 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1689 err = -EBUSY;
1690 goto done;
1691 }
1692
1693 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
1694 cancel_delayed_work(&hdev->power_off);
1695
1696 err = hci_dev_do_close(hdev);
1697
1698 done:
1699 hci_dev_put(hdev);
1700 return err;
1701 }
1702
1703 static int hci_dev_do_reset(struct hci_dev *hdev)
1704 {
1705 int ret;
1706
1707 BT_DBG("%s %p", hdev->name, hdev);
1708
1709 hci_req_sync_lock(hdev);
1710
1711 /* Drop queues */
1712 skb_queue_purge(&hdev->rx_q);
1713 skb_queue_purge(&hdev->cmd_q);
1714
1715 /* Avoid potential lockdep warnings from the *_flush() calls by
1716 * ensuring the workqueue is empty up front.
1717 */
1718 drain_workqueue(hdev->workqueue);
1719
1720 hci_dev_lock(hdev);
1721 hci_inquiry_cache_flush(hdev);
1722 hci_conn_hash_flush(hdev);
1723 hci_dev_unlock(hdev);
1724
1725 if (hdev->flush)
1726 hdev->flush(hdev);
1727
1728 atomic_set(&hdev->cmd_cnt, 1);
1729 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1730
1731 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1732
1733 hci_req_sync_unlock(hdev);
1734 return ret;
1735 }
1736
1737 int hci_dev_reset(__u16 dev)
1738 {
1739 struct hci_dev *hdev;
1740 int err;
1741
1742 hdev = hci_dev_get(dev);
1743 if (!hdev)
1744 return -ENODEV;
1745
1746 if (!test_bit(HCI_UP, &hdev->flags)) {
1747 err = -ENETDOWN;
1748 goto done;
1749 }
1750
1751 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1752 err = -EBUSY;
1753 goto done;
1754 }
1755
1756 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1757 err = -EOPNOTSUPP;
1758 goto done;
1759 }
1760
1761 err = hci_dev_do_reset(hdev);
1762
1763 done:
1764 hci_dev_put(hdev);
1765 return err;
1766 }
1767
1768 int hci_dev_reset_stat(__u16 dev)
1769 {
1770 struct hci_dev *hdev;
1771 int ret = 0;
1772
1773 hdev = hci_dev_get(dev);
1774 if (!hdev)
1775 return -ENODEV;
1776
1777 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1778 ret = -EBUSY;
1779 goto done;
1780 }
1781
1782 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1783 ret = -EOPNOTSUPP;
1784 goto done;
1785 }
1786
1787 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1788
1789 done:
1790 hci_dev_put(hdev);
1791 return ret;
1792 }
1793
1794 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1795 {
1796 bool conn_changed, discov_changed;
1797
1798 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1799
1800 if ((scan & SCAN_PAGE))
1801 conn_changed = !hci_dev_test_and_set_flag(hdev,
1802 HCI_CONNECTABLE);
1803 else
1804 conn_changed = hci_dev_test_and_clear_flag(hdev,
1805 HCI_CONNECTABLE);
1806
1807 if ((scan & SCAN_INQUIRY)) {
1808 discov_changed = !hci_dev_test_and_set_flag(hdev,
1809 HCI_DISCOVERABLE);
1810 } else {
1811 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1812 discov_changed = hci_dev_test_and_clear_flag(hdev,
1813 HCI_DISCOVERABLE);
1814 }
1815
1816 if (!hci_dev_test_flag(hdev, HCI_MGMT))
1817 return;
1818
1819 if (conn_changed || discov_changed) {
1820 /* In case this was disabled through mgmt */
1821 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
1822
1823 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1824 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
1825
1826 mgmt_new_settings(hdev);
1827 }
1828 }
1829
1830 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1831 {
1832 struct hci_dev *hdev;
1833 struct hci_dev_req dr;
1834 int err = 0;
1835
1836 if (copy_from_user(&dr, arg, sizeof(dr)))
1837 return -EFAULT;
1838
1839 hdev = hci_dev_get(dr.dev_id);
1840 if (!hdev)
1841 return -ENODEV;
1842
1843 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1844 err = -EBUSY;
1845 goto done;
1846 }
1847
1848 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1849 err = -EOPNOTSUPP;
1850 goto done;
1851 }
1852
1853 if (hdev->dev_type != HCI_PRIMARY) {
1854 err = -EOPNOTSUPP;
1855 goto done;
1856 }
1857
1858 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1859 err = -EOPNOTSUPP;
1860 goto done;
1861 }
1862
1863 switch (cmd) {
1864 case HCISETAUTH:
1865 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1866 HCI_INIT_TIMEOUT, NULL);
1867 break;
1868
1869 case HCISETENCRYPT:
1870 if (!lmp_encrypt_capable(hdev)) {
1871 err = -EOPNOTSUPP;
1872 break;
1873 }
1874
1875 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1876 /* Auth must be enabled first */
1877 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1878 HCI_INIT_TIMEOUT, NULL);
1879 if (err)
1880 break;
1881 }
1882
1883 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1884 HCI_INIT_TIMEOUT, NULL);
1885 break;
1886
1887 case HCISETSCAN:
1888 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1889 HCI_INIT_TIMEOUT, NULL);
1890
1891 /* Ensure that the connectable and discoverable states
1892 * get correctly modified as this was a non-mgmt change.
1893 */
1894 if (!err)
1895 hci_update_scan_state(hdev, dr.dev_opt);
1896 break;
1897
1898 case HCISETLINKPOL:
1899 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1900 HCI_INIT_TIMEOUT, NULL);
1901 break;
1902
1903 case HCISETLINKMODE:
1904 hdev->link_mode = ((__u16) dr.dev_opt) &
1905 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1906 break;
1907
1908 case HCISETPTYPE:
1909 hdev->pkt_type = (__u16) dr.dev_opt;
1910 break;
1911
1912 case HCISETACLMTU:
1913 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1914 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1915 break;
1916
1917 case HCISETSCOMTU:
1918 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1919 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1920 break;
1921
1922 default:
1923 err = -EINVAL;
1924 break;
1925 }
1926
1927 done:
1928 hci_dev_put(hdev);
1929 return err;
1930 }
1931
1932 int hci_get_dev_list(void __user *arg)
1933 {
1934 struct hci_dev *hdev;
1935 struct hci_dev_list_req *dl;
1936 struct hci_dev_req *dr;
1937 int n = 0, size, err;
1938 __u16 dev_num;
1939
1940 if (get_user(dev_num, (__u16 __user *) arg))
1941 return -EFAULT;
1942
1943 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1944 return -EINVAL;
1945
1946 size = sizeof(*dl) + dev_num * sizeof(*dr);
1947
1948 dl = kzalloc(size, GFP_KERNEL);
1949 if (!dl)
1950 return -ENOMEM;
1951
1952 dr = dl->dev_req;
1953
1954 read_lock(&hci_dev_list_lock);
1955 list_for_each_entry(hdev, &hci_dev_list, list) {
1956 unsigned long flags = hdev->flags;
1957
1958 /* When the auto-off is configured it means the transport
1959 * is running, but in that case still indicate that the
1960 * device is actually down.
1961 */
1962 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
1963 flags &= ~BIT(HCI_UP);
1964
1965 (dr + n)->dev_id = hdev->id;
1966 (dr + n)->dev_opt = flags;
1967
1968 if (++n >= dev_num)
1969 break;
1970 }
1971 read_unlock(&hci_dev_list_lock);
1972
1973 dl->dev_num = n;
1974 size = sizeof(*dl) + n * sizeof(*dr);
1975
1976 err = copy_to_user(arg, dl, size);
1977 kfree(dl);
1978
1979 return err ? -EFAULT : 0;
1980 }
1981
1982 int hci_get_dev_info(void __user *arg)
1983 {
1984 struct hci_dev *hdev;
1985 struct hci_dev_info di;
1986 unsigned long flags;
1987 int err = 0;
1988
1989 if (copy_from_user(&di, arg, sizeof(di)))
1990 return -EFAULT;
1991
1992 hdev = hci_dev_get(di.dev_id);
1993 if (!hdev)
1994 return -ENODEV;
1995
1996 /* When the auto-off is configured it means the transport
1997 * is running, but in that case still indicate that the
1998 * device is actually down.
1999 */
2000 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2001 flags = hdev->flags & ~BIT(HCI_UP);
2002 else
2003 flags = hdev->flags;
2004
2005 strcpy(di.name, hdev->name);
2006 di.bdaddr = hdev->bdaddr;
2007 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2008 di.flags = flags;
2009 di.pkt_type = hdev->pkt_type;
2010 if (lmp_bredr_capable(hdev)) {
2011 di.acl_mtu = hdev->acl_mtu;
2012 di.acl_pkts = hdev->acl_pkts;
2013 di.sco_mtu = hdev->sco_mtu;
2014 di.sco_pkts = hdev->sco_pkts;
2015 } else {
2016 di.acl_mtu = hdev->le_mtu;
2017 di.acl_pkts = hdev->le_pkts;
2018 di.sco_mtu = 0;
2019 di.sco_pkts = 0;
2020 }
2021 di.link_policy = hdev->link_policy;
2022 di.link_mode = hdev->link_mode;
2023
2024 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2025 memcpy(&di.features, &hdev->features, sizeof(di.features));
2026
2027 if (copy_to_user(arg, &di, sizeof(di)))
2028 err = -EFAULT;
2029
2030 hci_dev_put(hdev);
2031
2032 return err;
2033 }
2034
2035 /* ---- Interface to HCI drivers ---- */
2036
2037 static int hci_rfkill_set_block(void *data, bool blocked)
2038 {
2039 struct hci_dev *hdev = data;
2040
2041 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2042
2043 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2044 return -EBUSY;
2045
2046 if (blocked) {
2047 hci_dev_set_flag(hdev, HCI_RFKILLED);
2048 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2049 !hci_dev_test_flag(hdev, HCI_CONFIG))
2050 hci_dev_do_close(hdev);
2051 } else {
2052 hci_dev_clear_flag(hdev, HCI_RFKILLED);
2053 }
2054
2055 return 0;
2056 }
2057
2058 static const struct rfkill_ops hci_rfkill_ops = {
2059 .set_block = hci_rfkill_set_block,
2060 };
2061
2062 static void hci_power_on(struct work_struct *work)
2063 {
2064 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2065 int err;
2066
2067 BT_DBG("%s", hdev->name);
2068
2069 if (test_bit(HCI_UP, &hdev->flags) &&
2070 hci_dev_test_flag(hdev, HCI_MGMT) &&
2071 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
2072 cancel_delayed_work(&hdev->power_off);
2073 hci_req_sync_lock(hdev);
2074 err = __hci_req_hci_power_on(hdev);
2075 hci_req_sync_unlock(hdev);
2076 mgmt_power_on(hdev, err);
2077 return;
2078 }
2079
2080 err = hci_dev_do_open(hdev);
2081 if (err < 0) {
2082 hci_dev_lock(hdev);
2083 mgmt_set_powered_failed(hdev, err);
2084 hci_dev_unlock(hdev);
2085 return;
2086 }
2087
2088 /* During the HCI setup phase, a few error conditions are
2089 * ignored and they need to be checked now. If they are still
2090 * valid, it is important to turn the device back off.
2091 */
2092 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2093 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
2094 (hdev->dev_type == HCI_PRIMARY &&
2095 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2096 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2097 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
2098 hci_dev_do_close(hdev);
2099 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
2100 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2101 HCI_AUTO_OFF_TIMEOUT);
2102 }
2103
2104 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
2105 /* For unconfigured devices, set the HCI_RAW flag
2106 * so that userspace can easily identify them.
2107 */
2108 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2109 set_bit(HCI_RAW, &hdev->flags);
2110
2111 /* For fully configured devices, this will send
2112 * the Index Added event. For unconfigured devices,
2113 * it will send Unconfigued Index Added event.
2114 *
2115 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2116 * and no event will be send.
2117 */
2118 mgmt_index_added(hdev);
2119 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
2120 /* When the controller is now configured, then it
2121 * is important to clear the HCI_RAW flag.
2122 */
2123 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
2124 clear_bit(HCI_RAW, &hdev->flags);
2125
2126 /* Powering on the controller with HCI_CONFIG set only
2127 * happens with the transition from unconfigured to
2128 * configured. This will send the Index Added event.
2129 */
2130 mgmt_index_added(hdev);
2131 }
2132 }
2133
2134 static void hci_power_off(struct work_struct *work)
2135 {
2136 struct hci_dev *hdev = container_of(work, struct hci_dev,
2137 power_off.work);
2138
2139 BT_DBG("%s", hdev->name);
2140
2141 hci_dev_do_close(hdev);
2142 }
2143
2144 static void hci_error_reset(struct work_struct *work)
2145 {
2146 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2147
2148 BT_DBG("%s", hdev->name);
2149
2150 if (hdev->hw_error)
2151 hdev->hw_error(hdev, hdev->hw_error_code);
2152 else
2153 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
2154
2155 if (hci_dev_do_close(hdev))
2156 return;
2157
2158 hci_dev_do_open(hdev);
2159 }
2160
2161 void hci_uuids_clear(struct hci_dev *hdev)
2162 {
2163 struct bt_uuid *uuid, *tmp;
2164
2165 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2166 list_del(&uuid->list);
2167 kfree(uuid);
2168 }
2169 }
2170
2171 void hci_link_keys_clear(struct hci_dev *hdev)
2172 {
2173 struct link_key *key;
2174
2175 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2176 list_del_rcu(&key->list);
2177 kfree_rcu(key, rcu);
2178 }
2179 }
2180
2181 void hci_smp_ltks_clear(struct hci_dev *hdev)
2182 {
2183 struct smp_ltk *k;
2184
2185 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2186 list_del_rcu(&k->list);
2187 kfree_rcu(k, rcu);
2188 }
2189 }
2190
2191 void hci_smp_irks_clear(struct hci_dev *hdev)
2192 {
2193 struct smp_irk *k;
2194
2195 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2196 list_del_rcu(&k->list);
2197 kfree_rcu(k, rcu);
2198 }
2199 }
2200
2201 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2202 {
2203 struct link_key *k;
2204
2205 rcu_read_lock();
2206 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2207 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2208 rcu_read_unlock();
2209 return k;
2210 }
2211 }
2212 rcu_read_unlock();
2213
2214 return NULL;
2215 }
2216
2217 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2218 u8 key_type, u8 old_key_type)
2219 {
2220 /* Legacy key */
2221 if (key_type < 0x03)
2222 return true;
2223
2224 /* Debug keys are insecure so don't store them persistently */
2225 if (key_type == HCI_LK_DEBUG_COMBINATION)
2226 return false;
2227
2228 /* Changed combination key and there's no previous one */
2229 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2230 return false;
2231
2232 /* Security mode 3 case */
2233 if (!conn)
2234 return true;
2235
2236 /* BR/EDR key derived using SC from an LE link */
2237 if (conn->type == LE_LINK)
2238 return true;
2239
2240 /* Neither local nor remote side had no-bonding as requirement */
2241 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2242 return true;
2243
2244 /* Local side had dedicated bonding as requirement */
2245 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2246 return true;
2247
2248 /* Remote side had dedicated bonding as requirement */
2249 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2250 return true;
2251
2252 /* If none of the above criteria match, then don't store the key
2253 * persistently */
2254 return false;
2255 }
2256
2257 static u8 ltk_role(u8 type)
2258 {
2259 if (type == SMP_LTK)
2260 return HCI_ROLE_MASTER;
2261
2262 return HCI_ROLE_SLAVE;
2263 }
2264
2265 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2266 u8 addr_type, u8 role)
2267 {
2268 struct smp_ltk *k;
2269
2270 rcu_read_lock();
2271 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2272 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2273 continue;
2274
2275 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
2276 rcu_read_unlock();
2277 return k;
2278 }
2279 }
2280 rcu_read_unlock();
2281
2282 return NULL;
2283 }
2284
2285 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2286 {
2287 struct smp_irk *irk;
2288
2289 rcu_read_lock();
2290 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2291 if (!bacmp(&irk->rpa, rpa)) {
2292 rcu_read_unlock();
2293 return irk;
2294 }
2295 }
2296
2297 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2298 if (smp_irk_matches(hdev, irk->val, rpa)) {
2299 bacpy(&irk->rpa, rpa);
2300 rcu_read_unlock();
2301 return irk;
2302 }
2303 }
2304 rcu_read_unlock();
2305
2306 return NULL;
2307 }
2308
2309 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2310 u8 addr_type)
2311 {
2312 struct smp_irk *irk;
2313
2314 /* Identity Address must be public or static random */
2315 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2316 return NULL;
2317
2318 rcu_read_lock();
2319 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2320 if (addr_type == irk->addr_type &&
2321 bacmp(bdaddr, &irk->bdaddr) == 0) {
2322 rcu_read_unlock();
2323 return irk;
2324 }
2325 }
2326 rcu_read_unlock();
2327
2328 return NULL;
2329 }
2330
2331 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
2332 bdaddr_t *bdaddr, u8 *val, u8 type,
2333 u8 pin_len, bool *persistent)
2334 {
2335 struct link_key *key, *old_key;
2336 u8 old_key_type;
2337
2338 old_key = hci_find_link_key(hdev, bdaddr);
2339 if (old_key) {
2340 old_key_type = old_key->type;
2341 key = old_key;
2342 } else {
2343 old_key_type = conn ? conn->key_type : 0xff;
2344 key = kzalloc(sizeof(*key), GFP_KERNEL);
2345 if (!key)
2346 return NULL;
2347 list_add_rcu(&key->list, &hdev->link_keys);
2348 }
2349
2350 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2351
2352 /* Some buggy controller combinations generate a changed
2353 * combination key for legacy pairing even when there's no
2354 * previous key */
2355 if (type == HCI_LK_CHANGED_COMBINATION &&
2356 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2357 type = HCI_LK_COMBINATION;
2358 if (conn)
2359 conn->key_type = type;
2360 }
2361
2362 bacpy(&key->bdaddr, bdaddr);
2363 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2364 key->pin_len = pin_len;
2365
2366 if (type == HCI_LK_CHANGED_COMBINATION)
2367 key->type = old_key_type;
2368 else
2369 key->type = type;
2370
2371 if (persistent)
2372 *persistent = hci_persistent_key(hdev, conn, type,
2373 old_key_type);
2374
2375 return key;
2376 }
2377
2378 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2379 u8 addr_type, u8 type, u8 authenticated,
2380 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
2381 {
2382 struct smp_ltk *key, *old_key;
2383 u8 role = ltk_role(type);
2384
2385 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
2386 if (old_key)
2387 key = old_key;
2388 else {
2389 key = kzalloc(sizeof(*key), GFP_KERNEL);
2390 if (!key)
2391 return NULL;
2392 list_add_rcu(&key->list, &hdev->long_term_keys);
2393 }
2394
2395 bacpy(&key->bdaddr, bdaddr);
2396 key->bdaddr_type = addr_type;
2397 memcpy(key->val, tk, sizeof(key->val));
2398 key->authenticated = authenticated;
2399 key->ediv = ediv;
2400 key->rand = rand;
2401 key->enc_size = enc_size;
2402 key->type = type;
2403
2404 return key;
2405 }
2406
2407 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2408 u8 addr_type, u8 val[16], bdaddr_t *rpa)
2409 {
2410 struct smp_irk *irk;
2411
2412 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2413 if (!irk) {
2414 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2415 if (!irk)
2416 return NULL;
2417
2418 bacpy(&irk->bdaddr, bdaddr);
2419 irk->addr_type = addr_type;
2420
2421 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
2422 }
2423
2424 memcpy(irk->val, val, 16);
2425 bacpy(&irk->rpa, rpa);
2426
2427 return irk;
2428 }
2429
2430 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2431 {
2432 struct link_key *key;
2433
2434 key = hci_find_link_key(hdev, bdaddr);
2435 if (!key)
2436 return -ENOENT;
2437
2438 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2439
2440 list_del_rcu(&key->list);
2441 kfree_rcu(key, rcu);
2442
2443 return 0;
2444 }
2445
2446 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
2447 {
2448 struct smp_ltk *k;
2449 int removed = 0;
2450
2451 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2452 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
2453 continue;
2454
2455 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2456
2457 list_del_rcu(&k->list);
2458 kfree_rcu(k, rcu);
2459 removed++;
2460 }
2461
2462 return removed ? 0 : -ENOENT;
2463 }
2464
2465 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2466 {
2467 struct smp_irk *k;
2468
2469 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2470 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2471 continue;
2472
2473 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2474
2475 list_del_rcu(&k->list);
2476 kfree_rcu(k, rcu);
2477 }
2478 }
2479
2480 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2481 {
2482 struct smp_ltk *k;
2483 struct smp_irk *irk;
2484 u8 addr_type;
2485
2486 if (type == BDADDR_BREDR) {
2487 if (hci_find_link_key(hdev, bdaddr))
2488 return true;
2489 return false;
2490 }
2491
2492 /* Convert to HCI addr type which struct smp_ltk uses */
2493 if (type == BDADDR_LE_PUBLIC)
2494 addr_type = ADDR_LE_DEV_PUBLIC;
2495 else
2496 addr_type = ADDR_LE_DEV_RANDOM;
2497
2498 irk = hci_get_irk(hdev, bdaddr, addr_type);
2499 if (irk) {
2500 bdaddr = &irk->bdaddr;
2501 addr_type = irk->addr_type;
2502 }
2503
2504 rcu_read_lock();
2505 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2506 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2507 rcu_read_unlock();
2508 return true;
2509 }
2510 }
2511 rcu_read_unlock();
2512
2513 return false;
2514 }
2515
2516 /* HCI command timer function */
2517 static void hci_cmd_timeout(struct work_struct *work)
2518 {
2519 struct hci_dev *hdev = container_of(work, struct hci_dev,
2520 cmd_timer.work);
2521
2522 if (hdev->sent_cmd) {
2523 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2524 u16 opcode = __le16_to_cpu(sent->opcode);
2525
2526 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
2527 } else {
2528 bt_dev_err(hdev, "command tx timeout");
2529 }
2530
2531 atomic_set(&hdev->cmd_cnt, 1);
2532 queue_work(hdev->workqueue, &hdev->cmd_work);
2533 }
2534
2535 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2536 bdaddr_t *bdaddr, u8 bdaddr_type)
2537 {
2538 struct oob_data *data;
2539
2540 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2541 if (bacmp(bdaddr, &data->bdaddr) != 0)
2542 continue;
2543 if (data->bdaddr_type != bdaddr_type)
2544 continue;
2545 return data;
2546 }
2547
2548 return NULL;
2549 }
2550
2551 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2552 u8 bdaddr_type)
2553 {
2554 struct oob_data *data;
2555
2556 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2557 if (!data)
2558 return -ENOENT;
2559
2560 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2561
2562 list_del(&data->list);
2563 kfree(data);
2564
2565 return 0;
2566 }
2567
2568 void hci_remote_oob_data_clear(struct hci_dev *hdev)
2569 {
2570 struct oob_data *data, *n;
2571
2572 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2573 list_del(&data->list);
2574 kfree(data);
2575 }
2576 }
2577
2578 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2579 u8 bdaddr_type, u8 *hash192, u8 *rand192,
2580 u8 *hash256, u8 *rand256)
2581 {
2582 struct oob_data *data;
2583
2584 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2585 if (!data) {
2586 data = kmalloc(sizeof(*data), GFP_KERNEL);
2587 if (!data)
2588 return -ENOMEM;
2589
2590 bacpy(&data->bdaddr, bdaddr);
2591 data->bdaddr_type = bdaddr_type;
2592 list_add(&data->list, &hdev->remote_oob_data);
2593 }
2594
2595 if (hash192 && rand192) {
2596 memcpy(data->hash192, hash192, sizeof(data->hash192));
2597 memcpy(data->rand192, rand192, sizeof(data->rand192));
2598 if (hash256 && rand256)
2599 data->present = 0x03;
2600 } else {
2601 memset(data->hash192, 0, sizeof(data->hash192));
2602 memset(data->rand192, 0, sizeof(data->rand192));
2603 if (hash256 && rand256)
2604 data->present = 0x02;
2605 else
2606 data->present = 0x00;
2607 }
2608
2609 if (hash256 && rand256) {
2610 memcpy(data->hash256, hash256, sizeof(data->hash256));
2611 memcpy(data->rand256, rand256, sizeof(data->rand256));
2612 } else {
2613 memset(data->hash256, 0, sizeof(data->hash256));
2614 memset(data->rand256, 0, sizeof(data->rand256));
2615 if (hash192 && rand192)
2616 data->present = 0x01;
2617 }
2618
2619 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2620
2621 return 0;
2622 }
2623
2624 /* This function requires the caller holds hdev->lock */
2625 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2626 {
2627 struct adv_info *adv_instance;
2628
2629 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2630 if (adv_instance->instance == instance)
2631 return adv_instance;
2632 }
2633
2634 return NULL;
2635 }
2636
2637 /* This function requires the caller holds hdev->lock */
2638 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2639 {
2640 struct adv_info *cur_instance;
2641
2642 cur_instance = hci_find_adv_instance(hdev, instance);
2643 if (!cur_instance)
2644 return NULL;
2645
2646 if (cur_instance == list_last_entry(&hdev->adv_instances,
2647 struct adv_info, list))
2648 return list_first_entry(&hdev->adv_instances,
2649 struct adv_info, list);
2650 else
2651 return list_next_entry(cur_instance, list);
2652 }
2653
2654 /* This function requires the caller holds hdev->lock */
2655 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2656 {
2657 struct adv_info *adv_instance;
2658
2659 adv_instance = hci_find_adv_instance(hdev, instance);
2660 if (!adv_instance)
2661 return -ENOENT;
2662
2663 BT_DBG("%s removing %dMR", hdev->name, instance);
2664
2665 if (hdev->cur_adv_instance == instance) {
2666 if (hdev->adv_instance_timeout) {
2667 cancel_delayed_work(&hdev->adv_instance_expire);
2668 hdev->adv_instance_timeout = 0;
2669 }
2670 hdev->cur_adv_instance = 0x00;
2671 }
2672
2673 list_del(&adv_instance->list);
2674 kfree(adv_instance);
2675
2676 hdev->adv_instance_cnt--;
2677
2678 return 0;
2679 }
2680
2681 /* This function requires the caller holds hdev->lock */
2682 void hci_adv_instances_clear(struct hci_dev *hdev)
2683 {
2684 struct adv_info *adv_instance, *n;
2685
2686 if (hdev->adv_instance_timeout) {
2687 cancel_delayed_work(&hdev->adv_instance_expire);
2688 hdev->adv_instance_timeout = 0;
2689 }
2690
2691 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2692 list_del(&adv_instance->list);
2693 kfree(adv_instance);
2694 }
2695
2696 hdev->adv_instance_cnt = 0;
2697 hdev->cur_adv_instance = 0x00;
2698 }
2699
2700 /* This function requires the caller holds hdev->lock */
2701 int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2702 u16 adv_data_len, u8 *adv_data,
2703 u16 scan_rsp_len, u8 *scan_rsp_data,
2704 u16 timeout, u16 duration)
2705 {
2706 struct adv_info *adv_instance;
2707
2708 adv_instance = hci_find_adv_instance(hdev, instance);
2709 if (adv_instance) {
2710 memset(adv_instance->adv_data, 0,
2711 sizeof(adv_instance->adv_data));
2712 memset(adv_instance->scan_rsp_data, 0,
2713 sizeof(adv_instance->scan_rsp_data));
2714 } else {
2715 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2716 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2717 return -EOVERFLOW;
2718
2719 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
2720 if (!adv_instance)
2721 return -ENOMEM;
2722
2723 adv_instance->pending = true;
2724 adv_instance->instance = instance;
2725 list_add(&adv_instance->list, &hdev->adv_instances);
2726 hdev->adv_instance_cnt++;
2727 }
2728
2729 adv_instance->flags = flags;
2730 adv_instance->adv_data_len = adv_data_len;
2731 adv_instance->scan_rsp_len = scan_rsp_len;
2732
2733 if (adv_data_len)
2734 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2735
2736 if (scan_rsp_len)
2737 memcpy(adv_instance->scan_rsp_data,
2738 scan_rsp_data, scan_rsp_len);
2739
2740 adv_instance->timeout = timeout;
2741 adv_instance->remaining_time = timeout;
2742
2743 if (duration == 0)
2744 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2745 else
2746 adv_instance->duration = duration;
2747
2748 BT_DBG("%s for %dMR", hdev->name, instance);
2749
2750 return 0;
2751 }
2752
2753 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2754 bdaddr_t *bdaddr, u8 type)
2755 {
2756 struct bdaddr_list *b;
2757
2758 list_for_each_entry(b, bdaddr_list, list) {
2759 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2760 return b;
2761 }
2762
2763 return NULL;
2764 }
2765
2766 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2767 {
2768 struct bdaddr_list *b, *n;
2769
2770 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2771 list_del(&b->list);
2772 kfree(b);
2773 }
2774 }
2775
2776 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2777 {
2778 struct bdaddr_list *entry;
2779
2780 if (!bacmp(bdaddr, BDADDR_ANY))
2781 return -EBADF;
2782
2783 if (hci_bdaddr_list_lookup(list, bdaddr, type))
2784 return -EEXIST;
2785
2786 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2787 if (!entry)
2788 return -ENOMEM;
2789
2790 bacpy(&entry->bdaddr, bdaddr);
2791 entry->bdaddr_type = type;
2792
2793 list_add(&entry->list, list);
2794
2795 return 0;
2796 }
2797
2798 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2799 {
2800 struct bdaddr_list *entry;
2801
2802 if (!bacmp(bdaddr, BDADDR_ANY)) {
2803 hci_bdaddr_list_clear(list);
2804 return 0;
2805 }
2806
2807 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2808 if (!entry)
2809 return -ENOENT;
2810
2811 list_del(&entry->list);
2812 kfree(entry);
2813
2814 return 0;
2815 }
2816
2817 /* This function requires the caller holds hdev->lock */
2818 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2819 bdaddr_t *addr, u8 addr_type)
2820 {
2821 struct hci_conn_params *params;
2822
2823 list_for_each_entry(params, &hdev->le_conn_params, list) {
2824 if (bacmp(&params->addr, addr) == 0 &&
2825 params->addr_type == addr_type) {
2826 return params;
2827 }
2828 }
2829
2830 return NULL;
2831 }
2832
2833 /* This function requires the caller holds hdev->lock */
2834 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2835 bdaddr_t *addr, u8 addr_type)
2836 {
2837 struct hci_conn_params *param;
2838
2839 list_for_each_entry(param, list, action) {
2840 if (bacmp(&param->addr, addr) == 0 &&
2841 param->addr_type == addr_type)
2842 return param;
2843 }
2844
2845 return NULL;
2846 }
2847
2848 /* This function requires the caller holds hdev->lock */
2849 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2850 bdaddr_t *addr, u8 addr_type)
2851 {
2852 struct hci_conn_params *params;
2853
2854 params = hci_conn_params_lookup(hdev, addr, addr_type);
2855 if (params)
2856 return params;
2857
2858 params = kzalloc(sizeof(*params), GFP_KERNEL);
2859 if (!params) {
2860 bt_dev_err(hdev, "out of memory");
2861 return NULL;
2862 }
2863
2864 bacpy(&params->addr, addr);
2865 params->addr_type = addr_type;
2866
2867 list_add(&params->list, &hdev->le_conn_params);
2868 INIT_LIST_HEAD(&params->action);
2869
2870 params->conn_min_interval = hdev->le_conn_min_interval;
2871 params->conn_max_interval = hdev->le_conn_max_interval;
2872 params->conn_latency = hdev->le_conn_latency;
2873 params->supervision_timeout = hdev->le_supv_timeout;
2874 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2875
2876 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2877
2878 return params;
2879 }
2880
2881 static void hci_conn_params_free(struct hci_conn_params *params)
2882 {
2883 if (params->conn) {
2884 hci_conn_drop(params->conn);
2885 hci_conn_put(params->conn);
2886 }
2887
2888 list_del(&params->action);
2889 list_del(&params->list);
2890 kfree(params);
2891 }
2892
2893 /* This function requires the caller holds hdev->lock */
2894 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2895 {
2896 struct hci_conn_params *params;
2897
2898 params = hci_conn_params_lookup(hdev, addr, addr_type);
2899 if (!params)
2900 return;
2901
2902 hci_conn_params_free(params);
2903
2904 hci_update_background_scan(hdev);
2905
2906 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2907 }
2908
2909 /* This function requires the caller holds hdev->lock */
2910 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2911 {
2912 struct hci_conn_params *params, *tmp;
2913
2914 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2915 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2916 continue;
2917
2918 /* If trying to estabilish one time connection to disabled
2919 * device, leave the params, but mark them as just once.
2920 */
2921 if (params->explicit_connect) {
2922 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2923 continue;
2924 }
2925
2926 list_del(&params->list);
2927 kfree(params);
2928 }
2929
2930 BT_DBG("All LE disabled connection parameters were removed");
2931 }
2932
2933 /* This function requires the caller holds hdev->lock */
2934 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2935 {
2936 struct hci_conn_params *params, *tmp;
2937
2938 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2939 hci_conn_params_free(params);
2940
2941 BT_DBG("All LE connection parameters were removed");
2942 }
2943
2944 /* Copy the Identity Address of the controller.
2945 *
2946 * If the controller has a public BD_ADDR, then by default use that one.
2947 * If this is a LE only controller without a public address, default to
2948 * the static random address.
2949 *
2950 * For debugging purposes it is possible to force controllers with a
2951 * public address to use the static random address instead.
2952 *
2953 * In case BR/EDR has been disabled on a dual-mode controller and
2954 * userspace has configured a static address, then that address
2955 * becomes the identity address instead of the public BR/EDR address.
2956 */
2957 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2958 u8 *bdaddr_type)
2959 {
2960 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2961 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2962 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2963 bacmp(&hdev->static_addr, BDADDR_ANY))) {
2964 bacpy(bdaddr, &hdev->static_addr);
2965 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2966 } else {
2967 bacpy(bdaddr, &hdev->bdaddr);
2968 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2969 }
2970 }
2971
2972 /* Alloc HCI device */
2973 struct hci_dev *hci_alloc_dev(void)
2974 {
2975 struct hci_dev *hdev;
2976
2977 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2978 if (!hdev)
2979 return NULL;
2980
2981 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2982 hdev->esco_type = (ESCO_HV1);
2983 hdev->link_mode = (HCI_LM_ACCEPT);
2984 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2985 hdev->io_capability = 0x03; /* No Input No Output */
2986 hdev->manufacturer = 0xffff; /* Default to internal use */
2987 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2988 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2989 hdev->adv_instance_cnt = 0;
2990 hdev->cur_adv_instance = 0x00;
2991 hdev->adv_instance_timeout = 0;
2992
2993 hdev->sniff_max_interval = 800;
2994 hdev->sniff_min_interval = 80;
2995
2996 hdev->le_adv_channel_map = 0x07;
2997 hdev->le_adv_min_interval = 0x0800;
2998 hdev->le_adv_max_interval = 0x0800;
2999 hdev->le_scan_interval = 0x0060;
3000 hdev->le_scan_window = 0x0030;
3001 hdev->le_conn_min_interval = 0x0018;
3002 hdev->le_conn_max_interval = 0x0028;
3003 hdev->le_conn_latency = 0x0000;
3004 hdev->le_supv_timeout = 0x002a;
3005 hdev->le_def_tx_len = 0x001b;
3006 hdev->le_def_tx_time = 0x0148;
3007 hdev->le_max_tx_len = 0x001b;
3008 hdev->le_max_tx_time = 0x0148;
3009 hdev->le_max_rx_len = 0x001b;
3010 hdev->le_max_rx_time = 0x0148;
3011 hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
3012 hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
3013
3014 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3015 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3016 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3017 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3018
3019 mutex_init(&hdev->lock);
3020 mutex_init(&hdev->req_lock);
3021
3022 INIT_LIST_HEAD(&hdev->mgmt_pending);
3023 INIT_LIST_HEAD(&hdev->blacklist);
3024 INIT_LIST_HEAD(&hdev->whitelist);
3025 INIT_LIST_HEAD(&hdev->uuids);
3026 INIT_LIST_HEAD(&hdev->link_keys);
3027 INIT_LIST_HEAD(&hdev->long_term_keys);
3028 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3029 INIT_LIST_HEAD(&hdev->remote_oob_data);
3030 INIT_LIST_HEAD(&hdev->le_white_list);
3031 INIT_LIST_HEAD(&hdev->le_conn_params);
3032 INIT_LIST_HEAD(&hdev->pend_le_conns);
3033 INIT_LIST_HEAD(&hdev->pend_le_reports);
3034 INIT_LIST_HEAD(&hdev->conn_hash.list);
3035 INIT_LIST_HEAD(&hdev->adv_instances);
3036
3037 INIT_WORK(&hdev->rx_work, hci_rx_work);
3038 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3039 INIT_WORK(&hdev->tx_work, hci_tx_work);
3040 INIT_WORK(&hdev->power_on, hci_power_on);
3041 INIT_WORK(&hdev->error_reset, hci_error_reset);
3042
3043 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3044
3045 skb_queue_head_init(&hdev->rx_q);
3046 skb_queue_head_init(&hdev->cmd_q);
3047 skb_queue_head_init(&hdev->raw_q);
3048
3049 init_waitqueue_head(&hdev->req_wait_q);
3050
3051 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3052
3053 hci_request_setup(hdev);
3054
3055 hci_init_sysfs(hdev);
3056 discovery_init(hdev);
3057
3058 return hdev;
3059 }
3060 EXPORT_SYMBOL(hci_alloc_dev);
3061
3062 /* Free HCI device */
3063 void hci_free_dev(struct hci_dev *hdev)
3064 {
3065 /* will free via device release */
3066 put_device(&hdev->dev);
3067 }
3068 EXPORT_SYMBOL(hci_free_dev);
3069
3070 /* Register HCI device */
3071 int hci_register_dev(struct hci_dev *hdev)
3072 {
3073 int id, error;
3074
3075 if (!hdev->open || !hdev->close || !hdev->send)
3076 return -EINVAL;
3077
3078 /* Do not allow HCI_AMP devices to register at index 0,
3079 * so the index can be used as the AMP controller ID.
3080 */
3081 switch (hdev->dev_type) {
3082 case HCI_PRIMARY:
3083 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3084 break;
3085 case HCI_AMP:
3086 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3087 break;
3088 default:
3089 return -EINVAL;
3090 }
3091
3092 if (id < 0)
3093 return id;
3094
3095 sprintf(hdev->name, "hci%d", id);
3096 hdev->id = id;
3097
3098 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3099
3100 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
3101 if (!hdev->workqueue) {
3102 error = -ENOMEM;
3103 goto err;
3104 }
3105
3106 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3107 hdev->name);
3108 if (!hdev->req_workqueue) {
3109 destroy_workqueue(hdev->workqueue);
3110 error = -ENOMEM;
3111 goto err;
3112 }
3113
3114 if (!IS_ERR_OR_NULL(bt_debugfs))
3115 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3116
3117 dev_set_name(&hdev->dev, "%s", hdev->name);
3118
3119 error = device_add(&hdev->dev);
3120 if (error < 0)
3121 goto err_wqueue;
3122
3123 hci_leds_init(hdev);
3124
3125 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3126 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3127 hdev);
3128 if (hdev->rfkill) {
3129 if (rfkill_register(hdev->rfkill) < 0) {
3130 rfkill_destroy(hdev->rfkill);
3131 hdev->rfkill = NULL;
3132 }
3133 }
3134
3135 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3136 hci_dev_set_flag(hdev, HCI_RFKILLED);
3137
3138 hci_dev_set_flag(hdev, HCI_SETUP);
3139 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
3140
3141 if (hdev->dev_type == HCI_PRIMARY) {
3142 /* Assume BR/EDR support until proven otherwise (such as
3143 * through reading supported features during init.
3144 */
3145 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
3146 }
3147
3148 write_lock(&hci_dev_list_lock);
3149 list_add(&hdev->list, &hci_dev_list);
3150 write_unlock(&hci_dev_list_lock);
3151
3152 /* Devices that are marked for raw-only usage are unconfigured
3153 * and should not be included in normal operation.
3154 */
3155 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3156 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
3157
3158 hci_sock_dev_event(hdev, HCI_DEV_REG);
3159 hci_dev_hold(hdev);
3160
3161 queue_work(hdev->req_workqueue, &hdev->power_on);
3162
3163 return id;
3164
3165 err_wqueue:
3166 destroy_workqueue(hdev->workqueue);
3167 destroy_workqueue(hdev->req_workqueue);
3168 err:
3169 ida_simple_remove(&hci_index_ida, hdev->id);
3170
3171 return error;
3172 }
3173 EXPORT_SYMBOL(hci_register_dev);
3174
3175 /* Unregister HCI device */
3176 void hci_unregister_dev(struct hci_dev *hdev)
3177 {
3178 int id;
3179
3180 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3181
3182 hci_dev_set_flag(hdev, HCI_UNREGISTER);
3183
3184 id = hdev->id;
3185
3186 write_lock(&hci_dev_list_lock);
3187 list_del(&hdev->list);
3188 write_unlock(&hci_dev_list_lock);
3189
3190 cancel_work_sync(&hdev->power_on);
3191
3192 hci_dev_do_close(hdev);
3193
3194 if (!test_bit(HCI_INIT, &hdev->flags) &&
3195 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3196 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
3197 hci_dev_lock(hdev);
3198 mgmt_index_removed(hdev);
3199 hci_dev_unlock(hdev);
3200 }
3201
3202 /* mgmt_index_removed should take care of emptying the
3203 * pending list */
3204 BUG_ON(!list_empty(&hdev->mgmt_pending));
3205
3206 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
3207
3208 if (hdev->rfkill) {
3209 rfkill_unregister(hdev->rfkill);
3210 rfkill_destroy(hdev->rfkill);
3211 }
3212
3213 device_del(&hdev->dev);
3214
3215 debugfs_remove_recursive(hdev->debugfs);
3216 kfree_const(hdev->hw_info);
3217 kfree_const(hdev->fw_info);
3218
3219 destroy_workqueue(hdev->workqueue);
3220 destroy_workqueue(hdev->req_workqueue);
3221
3222 hci_dev_lock(hdev);
3223 hci_bdaddr_list_clear(&hdev->blacklist);
3224 hci_bdaddr_list_clear(&hdev->whitelist);
3225 hci_uuids_clear(hdev);
3226 hci_link_keys_clear(hdev);
3227 hci_smp_ltks_clear(hdev);
3228 hci_smp_irks_clear(hdev);
3229 hci_remote_oob_data_clear(hdev);
3230 hci_adv_instances_clear(hdev);
3231 hci_bdaddr_list_clear(&hdev->le_white_list);
3232 hci_conn_params_clear_all(hdev);
3233 hci_discovery_filter_clear(hdev);
3234 hci_dev_unlock(hdev);
3235
3236 hci_dev_put(hdev);
3237
3238 ida_simple_remove(&hci_index_ida, id);
3239 }
3240 EXPORT_SYMBOL(hci_unregister_dev);
3241
3242 /* Suspend HCI device */
3243 int hci_suspend_dev(struct hci_dev *hdev)
3244 {
3245 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
3246 return 0;
3247 }
3248 EXPORT_SYMBOL(hci_suspend_dev);
3249
3250 /* Resume HCI device */
3251 int hci_resume_dev(struct hci_dev *hdev)
3252 {
3253 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
3254 return 0;
3255 }
3256 EXPORT_SYMBOL(hci_resume_dev);
3257
3258 /* Reset HCI device */
3259 int hci_reset_dev(struct hci_dev *hdev)
3260 {
3261 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3262 struct sk_buff *skb;
3263
3264 skb = bt_skb_alloc(3, GFP_ATOMIC);
3265 if (!skb)
3266 return -ENOMEM;
3267
3268 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
3269 skb_put_data(skb, hw_err, 3);
3270
3271 /* Send Hardware Error to upper stack */
3272 return hci_recv_frame(hdev, skb);
3273 }
3274 EXPORT_SYMBOL(hci_reset_dev);
3275
3276 /* Receive frame from HCI drivers */
3277 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3278 {
3279 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3280 && !test_bit(HCI_INIT, &hdev->flags))) {
3281 kfree_skb(skb);
3282 return -ENXIO;
3283 }
3284
3285 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3286 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3287 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
3288 kfree_skb(skb);
3289 return -EINVAL;
3290 }
3291
3292 /* Incoming skb */
3293 bt_cb(skb)->incoming = 1;
3294
3295 /* Time stamp */
3296 __net_timestamp(skb);
3297
3298 skb_queue_tail(&hdev->rx_q, skb);
3299 queue_work(hdev->workqueue, &hdev->rx_work);
3300
3301 return 0;
3302 }
3303 EXPORT_SYMBOL(hci_recv_frame);
3304
3305 /* Receive diagnostic message from HCI drivers */
3306 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3307 {
3308 /* Mark as diagnostic packet */
3309 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3310
3311 /* Time stamp */
3312 __net_timestamp(skb);
3313
3314 skb_queue_tail(&hdev->rx_q, skb);
3315 queue_work(hdev->workqueue, &hdev->rx_work);
3316
3317 return 0;
3318 }
3319 EXPORT_SYMBOL(hci_recv_diag);
3320
3321 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3322 {
3323 va_list vargs;
3324
3325 va_start(vargs, fmt);
3326 kfree_const(hdev->hw_info);
3327 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3328 va_end(vargs);
3329 }
3330 EXPORT_SYMBOL(hci_set_hw_info);
3331
3332 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3333 {
3334 va_list vargs;
3335
3336 va_start(vargs, fmt);
3337 kfree_const(hdev->fw_info);
3338 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3339 va_end(vargs);
3340 }
3341 EXPORT_SYMBOL(hci_set_fw_info);
3342
3343 /* ---- Interface to upper protocols ---- */
3344
3345 int hci_register_cb(struct hci_cb *cb)
3346 {
3347 BT_DBG("%p name %s", cb, cb->name);
3348
3349 mutex_lock(&hci_cb_list_lock);
3350 list_add_tail(&cb->list, &hci_cb_list);
3351 mutex_unlock(&hci_cb_list_lock);
3352
3353 return 0;
3354 }
3355 EXPORT_SYMBOL(hci_register_cb);
3356
3357 int hci_unregister_cb(struct hci_cb *cb)
3358 {
3359 BT_DBG("%p name %s", cb, cb->name);
3360
3361 mutex_lock(&hci_cb_list_lock);
3362 list_del(&cb->list);
3363 mutex_unlock(&hci_cb_list_lock);
3364
3365 return 0;
3366 }
3367 EXPORT_SYMBOL(hci_unregister_cb);
3368
3369 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3370 {
3371 int err;
3372
3373 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3374 skb->len);
3375
3376 /* Time stamp */
3377 __net_timestamp(skb);
3378
3379 /* Send copy to monitor */
3380 hci_send_to_monitor(hdev, skb);
3381
3382 if (atomic_read(&hdev->promisc)) {
3383 /* Send copy to the sockets */
3384 hci_send_to_sock(hdev, skb);
3385 }
3386
3387 /* Get rid of skb owner, prior to sending to the driver. */
3388 skb_orphan(skb);
3389
3390 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3391 kfree_skb(skb);
3392 return;
3393 }
3394
3395 err = hdev->send(hdev, skb);
3396 if (err < 0) {
3397 bt_dev_err(hdev, "sending frame failed (%d)", err);
3398 kfree_skb(skb);
3399 }
3400 }
3401
3402 /* Send HCI command */
3403 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3404 const void *param)
3405 {
3406 struct sk_buff *skb;
3407
3408 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3409
3410 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3411 if (!skb) {
3412 bt_dev_err(hdev, "no memory for command");
3413 return -ENOMEM;
3414 }
3415
3416 /* Stand-alone HCI commands must be flagged as
3417 * single-command requests.
3418 */
3419 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3420
3421 skb_queue_tail(&hdev->cmd_q, skb);
3422 queue_work(hdev->workqueue, &hdev->cmd_work);
3423
3424 return 0;
3425 }
3426
3427 /* Get data from the previously sent command */
3428 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3429 {
3430 struct hci_command_hdr *hdr;
3431
3432 if (!hdev->sent_cmd)
3433 return NULL;
3434
3435 hdr = (void *) hdev->sent_cmd->data;
3436
3437 if (hdr->opcode != cpu_to_le16(opcode))
3438 return NULL;
3439
3440 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3441
3442 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3443 }
3444
3445 /* Send HCI command and wait for command commplete event */
3446 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3447 const void *param, u32 timeout)
3448 {
3449 struct sk_buff *skb;
3450
3451 if (!test_bit(HCI_UP, &hdev->flags))
3452 return ERR_PTR(-ENETDOWN);
3453
3454 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3455
3456 hci_req_sync_lock(hdev);
3457 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3458 hci_req_sync_unlock(hdev);
3459
3460 return skb;
3461 }
3462 EXPORT_SYMBOL(hci_cmd_sync);
3463
3464 /* Send ACL data */
3465 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3466 {
3467 struct hci_acl_hdr *hdr;
3468 int len = skb->len;
3469
3470 skb_push(skb, HCI_ACL_HDR_SIZE);
3471 skb_reset_transport_header(skb);
3472 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3473 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3474 hdr->dlen = cpu_to_le16(len);
3475 }
3476
3477 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3478 struct sk_buff *skb, __u16 flags)
3479 {
3480 struct hci_conn *conn = chan->conn;
3481 struct hci_dev *hdev = conn->hdev;
3482 struct sk_buff *list;
3483
3484 skb->len = skb_headlen(skb);
3485 skb->data_len = 0;
3486
3487 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3488
3489 switch (hdev->dev_type) {
3490 case HCI_PRIMARY:
3491 hci_add_acl_hdr(skb, conn->handle, flags);
3492 break;
3493 case HCI_AMP:
3494 hci_add_acl_hdr(skb, chan->handle, flags);
3495 break;
3496 default:
3497 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3498 return;
3499 }
3500
3501 list = skb_shinfo(skb)->frag_list;
3502 if (!list) {
3503 /* Non fragmented */
3504 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3505
3506 skb_queue_tail(queue, skb);
3507 } else {
3508 /* Fragmented */
3509 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3510
3511 skb_shinfo(skb)->frag_list = NULL;
3512
3513 /* Queue all fragments atomically. We need to use spin_lock_bh
3514 * here because of 6LoWPAN links, as there this function is
3515 * called from softirq and using normal spin lock could cause
3516 * deadlocks.
3517 */
3518 spin_lock_bh(&queue->lock);
3519
3520 __skb_queue_tail(queue, skb);
3521
3522 flags &= ~ACL_START;
3523 flags |= ACL_CONT;
3524 do {
3525 skb = list; list = list->next;
3526
3527 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3528 hci_add_acl_hdr(skb, conn->handle, flags);
3529
3530 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3531
3532 __skb_queue_tail(queue, skb);
3533 } while (list);
3534
3535 spin_unlock_bh(&queue->lock);
3536 }
3537 }
3538
3539 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3540 {
3541 struct hci_dev *hdev = chan->conn->hdev;
3542
3543 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3544
3545 hci_queue_acl(chan, &chan->data_q, skb, flags);
3546
3547 queue_work(hdev->workqueue, &hdev->tx_work);
3548 }
3549
3550 /* Send SCO data */
3551 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3552 {
3553 struct hci_dev *hdev = conn->hdev;
3554 struct hci_sco_hdr hdr;
3555
3556 BT_DBG("%s len %d", hdev->name, skb->len);
3557
3558 hdr.handle = cpu_to_le16(conn->handle);
3559 hdr.dlen = skb->len;
3560
3561 skb_push(skb, HCI_SCO_HDR_SIZE);
3562 skb_reset_transport_header(skb);
3563 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3564
3565 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3566
3567 skb_queue_tail(&conn->data_q, skb);
3568 queue_work(hdev->workqueue, &hdev->tx_work);
3569 }
3570
3571 /* ---- HCI TX task (outgoing data) ---- */
3572
3573 /* HCI Connection scheduler */
3574 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3575 int *quote)
3576 {
3577 struct hci_conn_hash *h = &hdev->conn_hash;
3578 struct hci_conn *conn = NULL, *c;
3579 unsigned int num = 0, min = ~0;
3580
3581 /* We don't have to lock device here. Connections are always
3582 * added and removed with TX task disabled. */
3583
3584 rcu_read_lock();
3585
3586 list_for_each_entry_rcu(c, &h->list, list) {
3587 if (c->type != type || skb_queue_empty(&c->data_q))
3588 continue;
3589
3590 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3591 continue;
3592
3593 num++;
3594
3595 if (c->sent < min) {
3596 min = c->sent;
3597 conn = c;
3598 }
3599
3600 if (hci_conn_num(hdev, type) == num)
3601 break;
3602 }
3603
3604 rcu_read_unlock();
3605
3606 if (conn) {
3607 int cnt, q;
3608
3609 switch (conn->type) {
3610 case ACL_LINK:
3611 cnt = hdev->acl_cnt;
3612 break;
3613 case SCO_LINK:
3614 case ESCO_LINK:
3615 cnt = hdev->sco_cnt;
3616 break;
3617 case LE_LINK:
3618 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3619 break;
3620 default:
3621 cnt = 0;
3622 bt_dev_err(hdev, "unknown link type %d", conn->type);
3623 }
3624
3625 q = cnt / num;
3626 *quote = q ? q : 1;
3627 } else
3628 *quote = 0;
3629
3630 BT_DBG("conn %p quote %d", conn, *quote);
3631 return conn;
3632 }
3633
3634 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3635 {
3636 struct hci_conn_hash *h = &hdev->conn_hash;
3637 struct hci_conn *c;
3638
3639 bt_dev_err(hdev, "link tx timeout");
3640
3641 rcu_read_lock();
3642
3643 /* Kill stalled connections */
3644 list_for_each_entry_rcu(c, &h->list, list) {
3645 if (c->type == type && c->sent) {
3646 bt_dev_err(hdev, "killing stalled connection %pMR",
3647 &c->dst);
3648 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3649 }
3650 }
3651
3652 rcu_read_unlock();
3653 }
3654
3655 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3656 int *quote)
3657 {
3658 struct hci_conn_hash *h = &hdev->conn_hash;
3659 struct hci_chan *chan = NULL;
3660 unsigned int num = 0, min = ~0, cur_prio = 0;
3661 struct hci_conn *conn;
3662 int cnt, q, conn_num = 0;
3663
3664 BT_DBG("%s", hdev->name);
3665
3666 rcu_read_lock();
3667
3668 list_for_each_entry_rcu(conn, &h->list, list) {
3669 struct hci_chan *tmp;
3670
3671 if (conn->type != type)
3672 continue;
3673
3674 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3675 continue;
3676
3677 conn_num++;
3678
3679 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3680 struct sk_buff *skb;
3681
3682 if (skb_queue_empty(&tmp->data_q))
3683 continue;
3684
3685 skb = skb_peek(&tmp->data_q);
3686 if (skb->priority < cur_prio)
3687 continue;
3688
3689 if (skb->priority > cur_prio) {
3690 num = 0;
3691 min = ~0;
3692 cur_prio = skb->priority;
3693 }
3694
3695 num++;
3696
3697 if (conn->sent < min) {
3698 min = conn->sent;
3699 chan = tmp;
3700 }
3701 }
3702
3703 if (hci_conn_num(hdev, type) == conn_num)
3704 break;
3705 }
3706
3707 rcu_read_unlock();
3708
3709 if (!chan)
3710 return NULL;
3711
3712 switch (chan->conn->type) {
3713 case ACL_LINK:
3714 cnt = hdev->acl_cnt;
3715 break;
3716 case AMP_LINK:
3717 cnt = hdev->block_cnt;
3718 break;
3719 case SCO_LINK:
3720 case ESCO_LINK:
3721 cnt = hdev->sco_cnt;
3722 break;
3723 case LE_LINK:
3724 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3725 break;
3726 default:
3727 cnt = 0;
3728 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
3729 }
3730
3731 q = cnt / num;
3732 *quote = q ? q : 1;
3733 BT_DBG("chan %p quote %d", chan, *quote);
3734 return chan;
3735 }
3736
3737 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3738 {
3739 struct hci_conn_hash *h = &hdev->conn_hash;
3740 struct hci_conn *conn;
3741 int num = 0;
3742
3743 BT_DBG("%s", hdev->name);
3744
3745 rcu_read_lock();
3746
3747 list_for_each_entry_rcu(conn, &h->list, list) {
3748 struct hci_chan *chan;
3749
3750 if (conn->type != type)
3751 continue;
3752
3753 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3754 continue;
3755
3756 num++;
3757
3758 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3759 struct sk_buff *skb;
3760
3761 if (chan->sent) {
3762 chan->sent = 0;
3763 continue;
3764 }
3765
3766 if (skb_queue_empty(&chan->data_q))
3767 continue;
3768
3769 skb = skb_peek(&chan->data_q);
3770 if (skb->priority >= HCI_PRIO_MAX - 1)
3771 continue;
3772
3773 skb->priority = HCI_PRIO_MAX - 1;
3774
3775 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3776 skb->priority);
3777 }
3778
3779 if (hci_conn_num(hdev, type) == num)
3780 break;
3781 }
3782
3783 rcu_read_unlock();
3784
3785 }
3786
3787 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3788 {
3789 /* Calculate count of blocks used by this packet */
3790 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3791 }
3792
3793 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3794 {
3795 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3796 /* ACL tx timeout must be longer than maximum
3797 * link supervision timeout (40.9 seconds) */
3798 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3799 HCI_ACL_TX_TIMEOUT))
3800 hci_link_tx_to(hdev, ACL_LINK);
3801 }
3802 }
3803
3804 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3805 {
3806 unsigned int cnt = hdev->acl_cnt;
3807 struct hci_chan *chan;
3808 struct sk_buff *skb;
3809 int quote;
3810
3811 __check_timeout(hdev, cnt);
3812
3813 while (hdev->acl_cnt &&
3814 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3815 u32 priority = (skb_peek(&chan->data_q))->priority;
3816 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3817 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3818 skb->len, skb->priority);
3819
3820 /* Stop if priority has changed */
3821 if (skb->priority < priority)
3822 break;
3823
3824 skb = skb_dequeue(&chan->data_q);
3825
3826 hci_conn_enter_active_mode(chan->conn,
3827 bt_cb(skb)->force_active);
3828
3829 hci_send_frame(hdev, skb);
3830 hdev->acl_last_tx = jiffies;
3831
3832 hdev->acl_cnt--;
3833 chan->sent++;
3834 chan->conn->sent++;
3835 }
3836 }
3837
3838 if (cnt != hdev->acl_cnt)
3839 hci_prio_recalculate(hdev, ACL_LINK);
3840 }
3841
3842 static void hci_sched_acl_blk(struct hci_dev *hdev)
3843 {
3844 unsigned int cnt = hdev->block_cnt;
3845 struct hci_chan *chan;
3846 struct sk_buff *skb;
3847 int quote;
3848 u8 type;
3849
3850 __check_timeout(hdev, cnt);
3851
3852 BT_DBG("%s", hdev->name);
3853
3854 if (hdev->dev_type == HCI_AMP)
3855 type = AMP_LINK;
3856 else
3857 type = ACL_LINK;
3858
3859 while (hdev->block_cnt > 0 &&
3860 (chan = hci_chan_sent(hdev, type, &quote))) {
3861 u32 priority = (skb_peek(&chan->data_q))->priority;
3862 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3863 int blocks;
3864
3865 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3866 skb->len, skb->priority);
3867
3868 /* Stop if priority has changed */
3869 if (skb->priority < priority)
3870 break;
3871
3872 skb = skb_dequeue(&chan->data_q);
3873
3874 blocks = __get_blocks(hdev, skb);
3875 if (blocks > hdev->block_cnt)
3876 return;
3877
3878 hci_conn_enter_active_mode(chan->conn,
3879 bt_cb(skb)->force_active);
3880
3881 hci_send_frame(hdev, skb);
3882 hdev->acl_last_tx = jiffies;
3883
3884 hdev->block_cnt -= blocks;
3885 quote -= blocks;
3886
3887 chan->sent += blocks;
3888 chan->conn->sent += blocks;
3889 }
3890 }
3891
3892 if (cnt != hdev->block_cnt)
3893 hci_prio_recalculate(hdev, type);
3894 }
3895
3896 static void hci_sched_acl(struct hci_dev *hdev)
3897 {
3898 BT_DBG("%s", hdev->name);
3899
3900 /* No ACL link over BR/EDR controller */
3901 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3902 return;
3903
3904 /* No AMP link over AMP controller */
3905 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3906 return;
3907
3908 switch (hdev->flow_ctl_mode) {
3909 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3910 hci_sched_acl_pkt(hdev);
3911 break;
3912
3913 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3914 hci_sched_acl_blk(hdev);
3915 break;
3916 }
3917 }
3918
3919 /* Schedule SCO */
3920 static void hci_sched_sco(struct hci_dev *hdev)
3921 {
3922 struct hci_conn *conn;
3923 struct sk_buff *skb;
3924 int quote;
3925
3926 BT_DBG("%s", hdev->name);
3927
3928 if (!hci_conn_num(hdev, SCO_LINK))
3929 return;
3930
3931 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3932 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3933 BT_DBG("skb %p len %d", skb, skb->len);
3934 hci_send_frame(hdev, skb);
3935
3936 conn->sent++;
3937 if (conn->sent == ~0)
3938 conn->sent = 0;
3939 }
3940 }
3941 }
3942
3943 static void hci_sched_esco(struct hci_dev *hdev)
3944 {
3945 struct hci_conn *conn;
3946 struct sk_buff *skb;
3947 int quote;
3948
3949 BT_DBG("%s", hdev->name);
3950
3951 if (!hci_conn_num(hdev, ESCO_LINK))
3952 return;
3953
3954 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3955 &quote))) {
3956 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3957 BT_DBG("skb %p len %d", skb, skb->len);
3958 hci_send_frame(hdev, skb);
3959
3960 conn->sent++;
3961 if (conn->sent == ~0)
3962 conn->sent = 0;
3963 }
3964 }
3965 }
3966
3967 static void hci_sched_le(struct hci_dev *hdev)
3968 {
3969 struct hci_chan *chan;
3970 struct sk_buff *skb;
3971 int quote, cnt, tmp;
3972
3973 BT_DBG("%s", hdev->name);
3974
3975 if (!hci_conn_num(hdev, LE_LINK))
3976 return;
3977
3978 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
3979 /* LE tx timeout must be longer than maximum
3980 * link supervision timeout (40.9 seconds) */
3981 if (!hdev->le_cnt && hdev->le_pkts &&
3982 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3983 hci_link_tx_to(hdev, LE_LINK);
3984 }
3985
3986 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3987 tmp = cnt;
3988 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3989 u32 priority = (skb_peek(&chan->data_q))->priority;
3990 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3991 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3992 skb->len, skb->priority);
3993
3994 /* Stop if priority has changed */
3995 if (skb->priority < priority)
3996 break;
3997
3998 skb = skb_dequeue(&chan->data_q);
3999
4000 hci_send_frame(hdev, skb);
4001 hdev->le_last_tx = jiffies;
4002
4003 cnt--;
4004 chan->sent++;
4005 chan->conn->sent++;
4006 }
4007 }
4008
4009 if (hdev->le_pkts)
4010 hdev->le_cnt = cnt;
4011 else
4012 hdev->acl_cnt = cnt;
4013
4014 if (cnt != tmp)
4015 hci_prio_recalculate(hdev, LE_LINK);
4016 }
4017
4018 static void hci_tx_work(struct work_struct *work)
4019 {
4020 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4021 struct sk_buff *skb;
4022
4023 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4024 hdev->sco_cnt, hdev->le_cnt);
4025
4026 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4027 /* Schedule queues and send stuff to HCI driver */
4028 hci_sched_acl(hdev);
4029 hci_sched_sco(hdev);
4030 hci_sched_esco(hdev);
4031 hci_sched_le(hdev);
4032 }
4033
4034 /* Send next queued raw (unknown type) packet */
4035 while ((skb = skb_dequeue(&hdev->raw_q)))
4036 hci_send_frame(hdev, skb);
4037 }
4038
4039 /* ----- HCI RX task (incoming data processing) ----- */
4040
4041 /* ACL data packet */
4042 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4043 {
4044 struct hci_acl_hdr *hdr = (void *) skb->data;
4045 struct hci_conn *conn;
4046 __u16 handle, flags;
4047
4048 skb_pull(skb, HCI_ACL_HDR_SIZE);
4049
4050 handle = __le16_to_cpu(hdr->handle);
4051 flags = hci_flags(handle);
4052 handle = hci_handle(handle);
4053
4054 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4055 handle, flags);
4056
4057 hdev->stat.acl_rx++;
4058
4059 hci_dev_lock(hdev);
4060 conn = hci_conn_hash_lookup_handle(hdev, handle);
4061 hci_dev_unlock(hdev);
4062
4063 if (conn) {
4064 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4065
4066 /* Send to upper protocol */
4067 l2cap_recv_acldata(conn, skb, flags);
4068 return;
4069 } else {
4070 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4071 handle);
4072 }
4073
4074 kfree_skb(skb);
4075 }
4076
4077 /* SCO data packet */
4078 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4079 {
4080 struct hci_sco_hdr *hdr = (void *) skb->data;
4081 struct hci_conn *conn;
4082 __u16 handle;
4083
4084 skb_pull(skb, HCI_SCO_HDR_SIZE);
4085
4086 handle = __le16_to_cpu(hdr->handle);
4087
4088 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4089
4090 hdev->stat.sco_rx++;
4091
4092 hci_dev_lock(hdev);
4093 conn = hci_conn_hash_lookup_handle(hdev, handle);
4094 hci_dev_unlock(hdev);
4095
4096 if (conn) {
4097 /* Send to upper protocol */
4098 sco_recv_scodata(conn, skb);
4099 return;
4100 } else {
4101 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4102 handle);
4103 }
4104
4105 kfree_skb(skb);
4106 }
4107
4108 static bool hci_req_is_complete(struct hci_dev *hdev)
4109 {
4110 struct sk_buff *skb;
4111
4112 skb = skb_peek(&hdev->cmd_q);
4113 if (!skb)
4114 return true;
4115
4116 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4117 }
4118
4119 static void hci_resend_last(struct hci_dev *hdev)
4120 {
4121 struct hci_command_hdr *sent;
4122 struct sk_buff *skb;
4123 u16 opcode;
4124
4125 if (!hdev->sent_cmd)
4126 return;
4127
4128 sent = (void *) hdev->sent_cmd->data;
4129 opcode = __le16_to_cpu(sent->opcode);
4130 if (opcode == HCI_OP_RESET)
4131 return;
4132
4133 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4134 if (!skb)
4135 return;
4136
4137 skb_queue_head(&hdev->cmd_q, skb);
4138 queue_work(hdev->workqueue, &hdev->cmd_work);
4139 }
4140
4141 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4142 hci_req_complete_t *req_complete,
4143 hci_req_complete_skb_t *req_complete_skb)
4144 {
4145 struct sk_buff *skb;
4146 unsigned long flags;
4147
4148 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4149
4150 /* If the completed command doesn't match the last one that was
4151 * sent we need to do special handling of it.
4152 */
4153 if (!hci_sent_cmd_data(hdev, opcode)) {
4154 /* Some CSR based controllers generate a spontaneous
4155 * reset complete event during init and any pending
4156 * command will never be completed. In such a case we
4157 * need to resend whatever was the last sent
4158 * command.
4159 */
4160 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4161 hci_resend_last(hdev);
4162
4163 return;
4164 }
4165
4166 /* If the command succeeded and there's still more commands in
4167 * this request the request is not yet complete.
4168 */
4169 if (!status && !hci_req_is_complete(hdev))
4170 return;
4171
4172 /* If this was the last command in a request the complete
4173 * callback would be found in hdev->sent_cmd instead of the
4174 * command queue (hdev->cmd_q).
4175 */
4176 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4177 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
4178 return;
4179 }
4180
4181 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4182 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
4183 return;
4184 }
4185
4186 /* Remove all pending commands belonging to this request */
4187 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4188 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4189 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4190 __skb_queue_head(&hdev->cmd_q, skb);
4191 break;
4192 }
4193
4194 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4195 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4196 else
4197 *req_complete = bt_cb(skb)->hci.req_complete;
4198 kfree_skb(skb);
4199 }
4200 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4201 }
4202
4203 static void hci_rx_work(struct work_struct *work)
4204 {
4205 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4206 struct sk_buff *skb;
4207
4208 BT_DBG("%s", hdev->name);
4209
4210 while ((skb = skb_dequeue(&hdev->rx_q))) {
4211 /* Send copy to monitor */
4212 hci_send_to_monitor(hdev, skb);
4213
4214 if (atomic_read(&hdev->promisc)) {
4215 /* Send copy to the sockets */
4216 hci_send_to_sock(hdev, skb);
4217 }
4218
4219 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4220 kfree_skb(skb);
4221 continue;
4222 }
4223
4224 if (test_bit(HCI_INIT, &hdev->flags)) {
4225 /* Don't process data packets in this states. */
4226 switch (hci_skb_pkt_type(skb)) {
4227 case HCI_ACLDATA_PKT:
4228 case HCI_SCODATA_PKT:
4229 kfree_skb(skb);
4230 continue;
4231 }
4232 }
4233
4234 /* Process frame */
4235 switch (hci_skb_pkt_type(skb)) {
4236 case HCI_EVENT_PKT:
4237 BT_DBG("%s Event packet", hdev->name);
4238 hci_event_packet(hdev, skb);
4239 break;
4240
4241 case HCI_ACLDATA_PKT:
4242 BT_DBG("%s ACL data packet", hdev->name);
4243 hci_acldata_packet(hdev, skb);
4244 break;
4245
4246 case HCI_SCODATA_PKT:
4247 BT_DBG("%s SCO data packet", hdev->name);
4248 hci_scodata_packet(hdev, skb);
4249 break;
4250
4251 default:
4252 kfree_skb(skb);
4253 break;
4254 }
4255 }
4256 }
4257
4258 static void hci_cmd_work(struct work_struct *work)
4259 {
4260 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4261 struct sk_buff *skb;
4262
4263 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4264 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4265
4266 /* Send queued commands */
4267 if (atomic_read(&hdev->cmd_cnt)) {
4268 skb = skb_dequeue(&hdev->cmd_q);
4269 if (!skb)
4270 return;
4271
4272 kfree_skb(hdev->sent_cmd);
4273
4274 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4275 if (hdev->sent_cmd) {
4276 atomic_dec(&hdev->cmd_cnt);
4277 hci_send_frame(hdev, skb);
4278 if (test_bit(HCI_RESET, &hdev->flags))
4279 cancel_delayed_work(&hdev->cmd_timer);
4280 else
4281 schedule_delayed_work(&hdev->cmd_timer,
4282 HCI_CMD_TIMEOUT);
4283 } else {
4284 skb_queue_head(&hdev->cmd_q, skb);
4285 queue_work(hdev->workqueue, &hdev->cmd_work);
4286 }
4287 }
4288 }