]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Don't wait for HCI in Add/Remove Device
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
4b4148e9
MH
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
b7cb93e5 67 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
4b4148e9
MH
81
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
85 if (copy_from_user(buf, user_buf, buf_size))
86 return -EFAULT;
87
88 buf[buf_size] = '\0';
89 if (strtobool(buf, &enable))
90 return -EINVAL;
91
b7cb93e5 92 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
93 return -EALREADY;
94
b504430c 95 hci_req_sync_lock(hdev);
4b4148e9
MH
96 if (enable)
97 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
98 HCI_CMD_TIMEOUT);
99 else
100 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
101 HCI_CMD_TIMEOUT);
b504430c 102 hci_req_sync_unlock(hdev);
4b4148e9
MH
103
104 if (IS_ERR(skb))
105 return PTR_ERR(skb);
106
4b4148e9
MH
107 kfree_skb(skb);
108
b7cb93e5 109 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
110
111 return count;
112}
113
114static const struct file_operations dut_mode_fops = {
115 .open = simple_open,
116 .read = dut_mode_read,
117 .write = dut_mode_write,
118 .llseek = default_llseek,
119};
120
4b4113d6
MH
121static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
122 size_t count, loff_t *ppos)
123{
124 struct hci_dev *hdev = file->private_data;
125 char buf[3];
126
127 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
128 buf[1] = '\n';
129 buf[2] = '\0';
130 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
131}
132
133static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
134 size_t count, loff_t *ppos)
135{
136 struct hci_dev *hdev = file->private_data;
137 char buf[32];
138 size_t buf_size = min(count, (sizeof(buf)-1));
139 bool enable;
140 int err;
141
142 if (copy_from_user(buf, user_buf, buf_size))
143 return -EFAULT;
144
145 buf[buf_size] = '\0';
146 if (strtobool(buf, &enable))
147 return -EINVAL;
148
7e995b9e
MH
149 /* When the diagnostic flags are not persistent and the transport
150 * is not active, then there is no need for the vendor callback.
151 *
152 * Instead just store the desired value. If needed the setting
153 * will be programmed when the controller gets powered on.
154 */
155 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
156 !test_bit(HCI_RUNNING, &hdev->flags))
157 goto done;
158
b504430c 159 hci_req_sync_lock(hdev);
4b4113d6 160 err = hdev->set_diag(hdev, enable);
b504430c 161 hci_req_sync_unlock(hdev);
4b4113d6
MH
162
163 if (err < 0)
164 return err;
165
7e995b9e 166done:
4b4113d6
MH
167 if (enable)
168 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
169 else
170 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
171
172 return count;
173}
174
175static const struct file_operations vendor_diag_fops = {
176 .open = simple_open,
177 .read = vendor_diag_read,
178 .write = vendor_diag_write,
179 .llseek = default_llseek,
180};
181
f640ee98
MH
182static void hci_debugfs_create_basic(struct hci_dev *hdev)
183{
184 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
185 &dut_mode_fops);
186
187 if (hdev->set_diag)
188 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
189 &vendor_diag_fops);
190}
191
42c6b129 192static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 193{
42c6b129 194 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
195
196 /* Reset device */
42c6b129
JH
197 set_bit(HCI_RESET, &req->hdev->flags);
198 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
199}
200
42c6b129 201static void bredr_init(struct hci_request *req)
1da177e4 202{
42c6b129 203 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 204
1da177e4 205 /* Read Local Supported Features */
42c6b129 206 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 207
1143e5a6 208 /* Read Local Version */
42c6b129 209 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
210
211 /* Read BD Address */
42c6b129 212 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
213}
214
0af801b9 215static void amp_init1(struct hci_request *req)
e61ef499 216{
42c6b129 217 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 218
e61ef499 219 /* Read Local Version */
42c6b129 220 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 221
f6996cfe
MH
222 /* Read Local Supported Commands */
223 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
224
6bcbc489 225 /* Read Local AMP Info */
42c6b129 226 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
227
228 /* Read Data Blk size */
42c6b129 229 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 230
f38ba941
MH
231 /* Read Flow Control Mode */
232 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
233
7528ca1c
MH
234 /* Read Location Data */
235 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
236}
237
0af801b9
JH
238static void amp_init2(struct hci_request *req)
239{
240 /* Read Local Supported Features. Not all AMP controllers
241 * support this so it's placed conditionally in the second
242 * stage init.
243 */
244 if (req->hdev->commands[14] & 0x20)
245 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
246}
247
42c6b129 248static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 249{
42c6b129 250 struct hci_dev *hdev = req->hdev;
e61ef499
AE
251
252 BT_DBG("%s %ld", hdev->name, opt);
253
11778716
AE
254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 256 hci_reset_req(req, 0);
11778716 257
e61ef499
AE
258 switch (hdev->dev_type) {
259 case HCI_BREDR:
42c6b129 260 bredr_init(req);
e61ef499
AE
261 break;
262
263 case HCI_AMP:
0af801b9 264 amp_init1(req);
e61ef499
AE
265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
e61ef499
AE
271}
272
42c6b129 273static void bredr_setup(struct hci_request *req)
2177bab5 274{
2177bab5
JH
275 __le16 param;
276 __u8 flt_type;
277
278 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 279 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
280
281 /* Read Class of Device */
42c6b129 282 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
283
284 /* Read Local Name */
42c6b129 285 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
286
287 /* Read Voice Setting */
42c6b129 288 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 289
b4cb9fb2
MH
290 /* Read Number of Supported IAC */
291 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
292
4b836f39
MH
293 /* Read Current IAC LAP */
294 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
295
2177bab5
JH
296 /* Clear Event Filters */
297 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 298 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
299
300 /* Connection accept timeout ~20 secs */
dcf4adbf 301 param = cpu_to_le16(0x7d00);
42c6b129 302 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
303}
304
42c6b129 305static void le_setup(struct hci_request *req)
2177bab5 306{
c73eee91
JH
307 struct hci_dev *hdev = req->hdev;
308
2177bab5 309 /* Read LE Buffer Size */
42c6b129 310 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
311
312 /* Read LE Local Supported Features */
42c6b129 313 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 314
747d3f03
MH
315 /* Read LE Supported States */
316 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
317
c73eee91
JH
318 /* LE-only controllers have LE implicitly enabled */
319 if (!lmp_bredr_capable(hdev))
a1536da2 320 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
321}
322
42c6b129 323static void hci_setup_event_mask(struct hci_request *req)
2177bab5 324{
42c6b129
JH
325 struct hci_dev *hdev = req->hdev;
326
2177bab5
JH
327 /* The second byte is 0xff instead of 0x9f (two reserved bits
328 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
329 * command otherwise.
330 */
331 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
332
333 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
334 * any event mask for pre 1.2 devices.
335 */
336 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
337 return;
338
339 if (lmp_bredr_capable(hdev)) {
340 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
341 } else {
342 /* Use a different default for LE-only devices */
343 memset(events, 0, sizeof(events));
c7882cbd
MH
344 events[1] |= 0x20; /* Command Complete */
345 events[1] |= 0x40; /* Command Status */
346 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
347
348 /* If the controller supports the Disconnect command, enable
349 * the corresponding event. In addition enable packet flow
350 * control related events.
351 */
352 if (hdev->commands[0] & 0x20) {
353 events[0] |= 0x10; /* Disconnection Complete */
354 events[2] |= 0x04; /* Number of Completed Packets */
355 events[3] |= 0x02; /* Data Buffer Overflow */
356 }
357
358 /* If the controller supports the Read Remote Version
359 * Information command, enable the corresponding event.
360 */
361 if (hdev->commands[2] & 0x80)
362 events[1] |= 0x08; /* Read Remote Version Information
363 * Complete
364 */
0da71f1b
MH
365
366 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
367 events[0] |= 0x80; /* Encryption Change */
368 events[5] |= 0x80; /* Encryption Key Refresh Complete */
369 }
2177bab5
JH
370 }
371
9fe759ce
MH
372 if (lmp_inq_rssi_capable(hdev) ||
373 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
374 events[4] |= 0x02; /* Inquiry Result with RSSI */
375
70f56aa2
MH
376 if (lmp_ext_feat_capable(hdev))
377 events[4] |= 0x04; /* Read Remote Extended Features Complete */
378
379 if (lmp_esco_capable(hdev)) {
380 events[5] |= 0x08; /* Synchronous Connection Complete */
381 events[5] |= 0x10; /* Synchronous Connection Changed */
382 }
383
2177bab5
JH
384 if (lmp_sniffsubr_capable(hdev))
385 events[5] |= 0x20; /* Sniff Subrating */
386
387 if (lmp_pause_enc_capable(hdev))
388 events[5] |= 0x80; /* Encryption Key Refresh Complete */
389
390 if (lmp_ext_inq_capable(hdev))
391 events[5] |= 0x40; /* Extended Inquiry Result */
392
393 if (lmp_no_flush_capable(hdev))
394 events[7] |= 0x01; /* Enhanced Flush Complete */
395
396 if (lmp_lsto_capable(hdev))
397 events[6] |= 0x80; /* Link Supervision Timeout Changed */
398
399 if (lmp_ssp_capable(hdev)) {
400 events[6] |= 0x01; /* IO Capability Request */
401 events[6] |= 0x02; /* IO Capability Response */
402 events[6] |= 0x04; /* User Confirmation Request */
403 events[6] |= 0x08; /* User Passkey Request */
404 events[6] |= 0x10; /* Remote OOB Data Request */
405 events[6] |= 0x20; /* Simple Pairing Complete */
406 events[7] |= 0x04; /* User Passkey Notification */
407 events[7] |= 0x08; /* Keypress Notification */
408 events[7] |= 0x10; /* Remote Host Supported
409 * Features Notification
410 */
411 }
412
413 if (lmp_le_capable(hdev))
414 events[7] |= 0x20; /* LE Meta-Event */
415
42c6b129 416 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
417}
418
42c6b129 419static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 420{
42c6b129
JH
421 struct hci_dev *hdev = req->hdev;
422
0af801b9
JH
423 if (hdev->dev_type == HCI_AMP)
424 return amp_init2(req);
425
2177bab5 426 if (lmp_bredr_capable(hdev))
42c6b129 427 bredr_setup(req);
56f87901 428 else
a358dc11 429 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
430
431 if (lmp_le_capable(hdev))
42c6b129 432 le_setup(req);
2177bab5 433
0f3adeae
MH
434 /* All Bluetooth 1.2 and later controllers should support the
435 * HCI command for reading the local supported commands.
436 *
437 * Unfortunately some controllers indicate Bluetooth 1.2 support,
438 * but do not have support for this command. If that is the case,
439 * the driver can quirk the behavior and skip reading the local
440 * supported commands.
3f8e2d75 441 */
0f3adeae
MH
442 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
443 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 444 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
445
446 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
447 /* When SSP is available, then the host features page
448 * should also be available as well. However some
449 * controllers list the max_page as 0 as long as SSP
450 * has not been enabled. To achieve proper debugging
451 * output, force the minimum max_page to 1 at least.
452 */
453 hdev->max_page = 0x01;
454
d7a5a11d 455 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 456 u8 mode = 0x01;
574ea3c7 457
42c6b129
JH
458 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
459 sizeof(mode), &mode);
2177bab5
JH
460 } else {
461 struct hci_cp_write_eir cp;
462
463 memset(hdev->eir, 0, sizeof(hdev->eir));
464 memset(&cp, 0, sizeof(cp));
465
42c6b129 466 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
467 }
468 }
469
043ec9bf
MH
470 if (lmp_inq_rssi_capable(hdev) ||
471 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
472 u8 mode;
473
474 /* If Extended Inquiry Result events are supported, then
475 * they are clearly preferred over Inquiry Result with RSSI
476 * events.
477 */
478 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
479
480 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
481 }
2177bab5
JH
482
483 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 484 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
485
486 if (lmp_ext_feat_capable(hdev)) {
487 struct hci_cp_read_local_ext_features cp;
488
489 cp.page = 0x01;
42c6b129
JH
490 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
491 sizeof(cp), &cp);
2177bab5
JH
492 }
493
d7a5a11d 494 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 495 u8 enable = 1;
42c6b129
JH
496 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
497 &enable);
2177bab5
JH
498 }
499}
500
42c6b129 501static void hci_setup_link_policy(struct hci_request *req)
2177bab5 502{
42c6b129 503 struct hci_dev *hdev = req->hdev;
2177bab5
JH
504 struct hci_cp_write_def_link_policy cp;
505 u16 link_policy = 0;
506
507 if (lmp_rswitch_capable(hdev))
508 link_policy |= HCI_LP_RSWITCH;
509 if (lmp_hold_capable(hdev))
510 link_policy |= HCI_LP_HOLD;
511 if (lmp_sniff_capable(hdev))
512 link_policy |= HCI_LP_SNIFF;
513 if (lmp_park_capable(hdev))
514 link_policy |= HCI_LP_PARK;
515
516 cp.policy = cpu_to_le16(link_policy);
42c6b129 517 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
518}
519
42c6b129 520static void hci_set_le_support(struct hci_request *req)
2177bab5 521{
42c6b129 522 struct hci_dev *hdev = req->hdev;
2177bab5
JH
523 struct hci_cp_write_le_host_supported cp;
524
c73eee91
JH
525 /* LE-only devices do not support explicit enablement */
526 if (!lmp_bredr_capable(hdev))
527 return;
528
2177bab5
JH
529 memset(&cp, 0, sizeof(cp));
530
d7a5a11d 531 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 532 cp.le = 0x01;
32226e4f 533 cp.simul = 0x00;
2177bab5
JH
534 }
535
536 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
537 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
538 &cp);
2177bab5
JH
539}
540
d62e6d67
JH
541static void hci_set_event_mask_page_2(struct hci_request *req)
542{
543 struct hci_dev *hdev = req->hdev;
544 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
545
546 /* If Connectionless Slave Broadcast master role is supported
547 * enable all necessary events for it.
548 */
53b834d2 549 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
550 events[1] |= 0x40; /* Triggered Clock Capture */
551 events[1] |= 0x80; /* Synchronization Train Complete */
552 events[2] |= 0x10; /* Slave Page Response Timeout */
553 events[2] |= 0x20; /* CSB Channel Map Change */
554 }
555
556 /* If Connectionless Slave Broadcast slave role is supported
557 * enable all necessary events for it.
558 */
53b834d2 559 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
560 events[2] |= 0x01; /* Synchronization Train Received */
561 events[2] |= 0x02; /* CSB Receive */
562 events[2] |= 0x04; /* CSB Timeout */
563 events[2] |= 0x08; /* Truncated Page Complete */
564 }
565
40c59fcb 566 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
568 events[2] |= 0x80;
569
d62e6d67
JH
570 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
571}
572
42c6b129 573static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 574{
42c6b129 575 struct hci_dev *hdev = req->hdev;
d2c5d77f 576 u8 p;
42c6b129 577
0da71f1b
MH
578 hci_setup_event_mask(req);
579
e81be90b
JH
580 if (hdev->commands[6] & 0x20 &&
581 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
582 struct hci_cp_read_stored_link_key cp;
583
584 bacpy(&cp.bdaddr, BDADDR_ANY);
585 cp.read_all = 0x01;
586 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
587 }
588
2177bab5 589 if (hdev->commands[5] & 0x10)
42c6b129 590 hci_setup_link_policy(req);
2177bab5 591
417287de
MH
592 if (hdev->commands[8] & 0x01)
593 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
594
595 /* Some older Broadcom based Bluetooth 1.2 controllers do not
596 * support the Read Page Scan Type command. Check support for
597 * this command in the bit mask of supported commands.
598 */
599 if (hdev->commands[13] & 0x01)
600 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
601
9193c6e8
AG
602 if (lmp_le_capable(hdev)) {
603 u8 events[8];
604
605 memset(events, 0, sizeof(events));
4d6c705b
MH
606
607 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
608 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
609
610 /* If controller supports the Connection Parameters Request
611 * Link Layer Procedure, enable the corresponding event.
612 */
613 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
614 events[0] |= 0x20; /* LE Remote Connection
615 * Parameter Request
616 */
617
a9f6068e
MH
618 /* If the controller supports the Data Length Extension
619 * feature, enable the corresponding event.
620 */
621 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
622 events[0] |= 0x40; /* LE Data Length Change */
623
4b71bba4
MH
624 /* If the controller supports Extended Scanner Filter
625 * Policies, enable the correspondig event.
626 */
627 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
628 events[1] |= 0x04; /* LE Direct Advertising
629 * Report
630 */
631
7d26f5c4
MH
632 /* If the controller supports the LE Set Scan Enable command,
633 * enable the corresponding advertising report event.
634 */
635 if (hdev->commands[26] & 0x08)
636 events[0] |= 0x02; /* LE Advertising Report */
637
638 /* If the controller supports the LE Create Connection
639 * command, enable the corresponding event.
640 */
641 if (hdev->commands[26] & 0x10)
642 events[0] |= 0x01; /* LE Connection Complete */
643
644 /* If the controller supports the LE Connection Update
645 * command, enable the corresponding event.
646 */
647 if (hdev->commands[27] & 0x04)
648 events[0] |= 0x04; /* LE Connection Update
649 * Complete
650 */
651
652 /* If the controller supports the LE Read Remote Used Features
653 * command, enable the corresponding event.
654 */
655 if (hdev->commands[27] & 0x20)
656 events[0] |= 0x08; /* LE Read Remote Used
657 * Features Complete
658 */
659
5a34bd5f
MH
660 /* If the controller supports the LE Read Local P-256
661 * Public Key command, enable the corresponding event.
662 */
663 if (hdev->commands[34] & 0x02)
664 events[0] |= 0x80; /* LE Read Local P-256
665 * Public Key Complete
666 */
667
668 /* If the controller supports the LE Generate DHKey
669 * command, enable the corresponding event.
670 */
671 if (hdev->commands[34] & 0x04)
672 events[1] |= 0x01; /* LE Generate DHKey Complete */
673
9193c6e8
AG
674 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
675 events);
676
15a49cca
MH
677 if (hdev->commands[25] & 0x40) {
678 /* Read LE Advertising Channel TX Power */
679 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
680 }
681
2ab216a7
MH
682 if (hdev->commands[26] & 0x40) {
683 /* Read LE White List Size */
684 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
685 0, NULL);
686 }
687
688 if (hdev->commands[26] & 0x80) {
689 /* Clear LE White List */
690 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
691 }
692
a9f6068e
MH
693 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
694 /* Read LE Maximum Data Length */
695 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
696
697 /* Read LE Suggested Default Data Length */
698 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
699 }
700
42c6b129 701 hci_set_le_support(req);
9193c6e8 702 }
d2c5d77f
JH
703
704 /* Read features beyond page 1 if available */
705 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
706 struct hci_cp_read_local_ext_features cp;
707
708 cp.page = p;
709 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
710 sizeof(cp), &cp);
711 }
2177bab5
JH
712}
713
5d4e7e8d
JH
714static void hci_init4_req(struct hci_request *req, unsigned long opt)
715{
716 struct hci_dev *hdev = req->hdev;
717
36f260ce
MH
718 /* Some Broadcom based Bluetooth controllers do not support the
719 * Delete Stored Link Key command. They are clearly indicating its
720 * absence in the bit mask of supported commands.
721 *
722 * Check the supported commands and only if the the command is marked
723 * as supported send it. If not supported assume that the controller
724 * does not have actual support for stored link keys which makes this
725 * command redundant anyway.
726 *
727 * Some controllers indicate that they support handling deleting
728 * stored link keys, but they don't. The quirk lets a driver
729 * just disable this command.
730 */
731 if (hdev->commands[6] & 0x80 &&
732 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
733 struct hci_cp_delete_stored_link_key cp;
734
735 bacpy(&cp.bdaddr, BDADDR_ANY);
736 cp.delete_all = 0x01;
737 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
738 sizeof(cp), &cp);
739 }
740
d62e6d67
JH
741 /* Set event mask page 2 if the HCI command for it is supported */
742 if (hdev->commands[22] & 0x04)
743 hci_set_event_mask_page_2(req);
744
109e3191
MH
745 /* Read local codec list if the HCI command is supported */
746 if (hdev->commands[29] & 0x20)
747 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
748
f4fe73ed
MH
749 /* Get MWS transport configuration if the HCI command is supported */
750 if (hdev->commands[30] & 0x08)
751 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
752
5d4e7e8d 753 /* Check for Synchronization Train support */
53b834d2 754 if (lmp_sync_train_capable(hdev))
5d4e7e8d 755 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
756
757 /* Enable Secure Connections if supported and configured */
d7a5a11d 758 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 759 bredr_sc_enabled(hdev)) {
a6d0d690 760 u8 support = 0x01;
574ea3c7 761
a6d0d690
MH
762 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
763 sizeof(support), &support);
764 }
5d4e7e8d
JH
765}
766
2177bab5
JH
767static int __hci_init(struct hci_dev *hdev)
768{
769 int err;
770
771 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
772 if (err < 0)
773 return err;
774
f640ee98
MH
775 if (hci_dev_test_flag(hdev, HCI_SETUP))
776 hci_debugfs_create_basic(hdev);
4b4148e9 777
0af801b9
JH
778 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
779 if (err < 0)
780 return err;
781
2177bab5
JH
782 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
783 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 784 * first two stages of init.
2177bab5
JH
785 */
786 if (hdev->dev_type != HCI_BREDR)
787 return 0;
788
5d4e7e8d
JH
789 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
790 if (err < 0)
791 return err;
792
baf27f6e
MH
793 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
794 if (err < 0)
795 return err;
796
ec6cef9c
MH
797 /* This function is only called when the controller is actually in
798 * configured state. When the controller is marked as unconfigured,
799 * this initialization procedure is not run.
800 *
801 * It means that it is possible that a controller runs through its
802 * setup phase and then discovers missing settings. If that is the
803 * case, then this function will not be called. It then will only
804 * be called during the config phase.
805 *
806 * So only when in setup phase or config phase, create the debugfs
807 * entries and register the SMP channels.
baf27f6e 808 */
d7a5a11d
MH
809 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
810 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
811 return 0;
812
60c5f5fb
MH
813 hci_debugfs_create_common(hdev);
814
71c3b60e 815 if (lmp_bredr_capable(hdev))
60c5f5fb 816 hci_debugfs_create_bredr(hdev);
2bfa3531 817
162a3bac 818 if (lmp_le_capable(hdev))
60c5f5fb 819 hci_debugfs_create_le(hdev);
e7b8fc92 820
baf27f6e 821 return 0;
2177bab5
JH
822}
823
0ebca7d6
MH
824static void hci_init0_req(struct hci_request *req, unsigned long opt)
825{
826 struct hci_dev *hdev = req->hdev;
827
828 BT_DBG("%s %ld", hdev->name, opt);
829
830 /* Reset */
831 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
832 hci_reset_req(req, 0);
833
834 /* Read Local Version */
835 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
836
837 /* Read BD Address */
838 if (hdev->set_bdaddr)
839 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
840}
841
842static int __hci_unconf_init(struct hci_dev *hdev)
843{
844 int err;
845
cc78b44b
MH
846 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
847 return 0;
848
0ebca7d6
MH
849 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
850 if (err < 0)
851 return err;
852
f640ee98
MH
853 if (hci_dev_test_flag(hdev, HCI_SETUP))
854 hci_debugfs_create_basic(hdev);
855
0ebca7d6
MH
856 return 0;
857}
858
42c6b129 859static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
860{
861 __u8 scan = opt;
862
42c6b129 863 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
864
865 /* Inquiry and Page scans */
42c6b129 866 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
867}
868
42c6b129 869static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
870{
871 __u8 auth = opt;
872
42c6b129 873 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
874
875 /* Authentication */
42c6b129 876 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
877}
878
42c6b129 879static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
880{
881 __u8 encrypt = opt;
882
42c6b129 883 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 884
e4e8e37c 885 /* Encryption */
42c6b129 886 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
887}
888
42c6b129 889static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
890{
891 __le16 policy = cpu_to_le16(opt);
892
42c6b129 893 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
894
895 /* Default link policy */
42c6b129 896 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
897}
898
8e87d142 899/* Get HCI device by index.
1da177e4
LT
900 * Device is held on return. */
901struct hci_dev *hci_dev_get(int index)
902{
8035ded4 903 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
904
905 BT_DBG("%d", index);
906
907 if (index < 0)
908 return NULL;
909
910 read_lock(&hci_dev_list_lock);
8035ded4 911 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
912 if (d->id == index) {
913 hdev = hci_dev_hold(d);
914 break;
915 }
916 }
917 read_unlock(&hci_dev_list_lock);
918 return hdev;
919}
1da177e4
LT
920
921/* ---- Inquiry support ---- */
ff9ef578 922
30dc78e1
JH
923bool hci_discovery_active(struct hci_dev *hdev)
924{
925 struct discovery_state *discov = &hdev->discovery;
926
6fbe195d 927 switch (discov->state) {
343f935b 928 case DISCOVERY_FINDING:
6fbe195d 929 case DISCOVERY_RESOLVING:
30dc78e1
JH
930 return true;
931
6fbe195d
AG
932 default:
933 return false;
934 }
30dc78e1
JH
935}
936
ff9ef578
JH
937void hci_discovery_set_state(struct hci_dev *hdev, int state)
938{
bb3e0a33
JH
939 int old_state = hdev->discovery.state;
940
ff9ef578
JH
941 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
942
bb3e0a33 943 if (old_state == state)
ff9ef578
JH
944 return;
945
bb3e0a33
JH
946 hdev->discovery.state = state;
947
ff9ef578
JH
948 switch (state) {
949 case DISCOVERY_STOPPED:
c54c3860
AG
950 hci_update_background_scan(hdev);
951
bb3e0a33 952 if (old_state != DISCOVERY_STARTING)
7b99b659 953 mgmt_discovering(hdev, 0);
ff9ef578
JH
954 break;
955 case DISCOVERY_STARTING:
956 break;
343f935b 957 case DISCOVERY_FINDING:
ff9ef578
JH
958 mgmt_discovering(hdev, 1);
959 break;
30dc78e1
JH
960 case DISCOVERY_RESOLVING:
961 break;
ff9ef578
JH
962 case DISCOVERY_STOPPING:
963 break;
964 }
ff9ef578
JH
965}
966
1f9b9a5d 967void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 968{
30883512 969 struct discovery_state *cache = &hdev->discovery;
b57c1a56 970 struct inquiry_entry *p, *n;
1da177e4 971
561aafbc
JH
972 list_for_each_entry_safe(p, n, &cache->all, all) {
973 list_del(&p->all);
b57c1a56 974 kfree(p);
1da177e4 975 }
561aafbc
JH
976
977 INIT_LIST_HEAD(&cache->unknown);
978 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
979}
980
a8c5fb1a
GP
981struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
982 bdaddr_t *bdaddr)
1da177e4 983{
30883512 984 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
985 struct inquiry_entry *e;
986
6ed93dc6 987 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 988
561aafbc
JH
989 list_for_each_entry(e, &cache->all, all) {
990 if (!bacmp(&e->data.bdaddr, bdaddr))
991 return e;
992 }
993
994 return NULL;
995}
996
997struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 998 bdaddr_t *bdaddr)
561aafbc 999{
30883512 1000 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1001 struct inquiry_entry *e;
1002
6ed93dc6 1003 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1004
1005 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1006 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1007 return e;
1008 }
1009
1010 return NULL;
1da177e4
LT
1011}
1012
30dc78e1 1013struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1014 bdaddr_t *bdaddr,
1015 int state)
30dc78e1
JH
1016{
1017 struct discovery_state *cache = &hdev->discovery;
1018 struct inquiry_entry *e;
1019
6ed93dc6 1020 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1021
1022 list_for_each_entry(e, &cache->resolve, list) {
1023 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1024 return e;
1025 if (!bacmp(&e->data.bdaddr, bdaddr))
1026 return e;
1027 }
1028
1029 return NULL;
1030}
1031
a3d4e20a 1032void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1033 struct inquiry_entry *ie)
a3d4e20a
JH
1034{
1035 struct discovery_state *cache = &hdev->discovery;
1036 struct list_head *pos = &cache->resolve;
1037 struct inquiry_entry *p;
1038
1039 list_del(&ie->list);
1040
1041 list_for_each_entry(p, &cache->resolve, list) {
1042 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1043 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1044 break;
1045 pos = &p->list;
1046 }
1047
1048 list_add(&ie->list, pos);
1049}
1050
af58925c
MH
1051u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1052 bool name_known)
1da177e4 1053{
30883512 1054 struct discovery_state *cache = &hdev->discovery;
70f23020 1055 struct inquiry_entry *ie;
af58925c 1056 u32 flags = 0;
1da177e4 1057
6ed93dc6 1058 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1059
6928a924 1060 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1061
af58925c
MH
1062 if (!data->ssp_mode)
1063 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1064
70f23020 1065 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1066 if (ie) {
af58925c
MH
1067 if (!ie->data.ssp_mode)
1068 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1069
a3d4e20a 1070 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1071 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1072 ie->data.rssi = data->rssi;
1073 hci_inquiry_cache_update_resolve(hdev, ie);
1074 }
1075
561aafbc 1076 goto update;
a3d4e20a 1077 }
561aafbc
JH
1078
1079 /* Entry not in the cache. Add new one. */
27f70f3e 1080 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1081 if (!ie) {
1082 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1083 goto done;
1084 }
561aafbc
JH
1085
1086 list_add(&ie->all, &cache->all);
1087
1088 if (name_known) {
1089 ie->name_state = NAME_KNOWN;
1090 } else {
1091 ie->name_state = NAME_NOT_KNOWN;
1092 list_add(&ie->list, &cache->unknown);
1093 }
70f23020 1094
561aafbc
JH
1095update:
1096 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1097 ie->name_state != NAME_PENDING) {
561aafbc
JH
1098 ie->name_state = NAME_KNOWN;
1099 list_del(&ie->list);
1da177e4
LT
1100 }
1101
70f23020
AE
1102 memcpy(&ie->data, data, sizeof(*data));
1103 ie->timestamp = jiffies;
1da177e4 1104 cache->timestamp = jiffies;
3175405b
JH
1105
1106 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1107 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1108
af58925c
MH
1109done:
1110 return flags;
1da177e4
LT
1111}
1112
1113static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1114{
30883512 1115 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1116 struct inquiry_info *info = (struct inquiry_info *) buf;
1117 struct inquiry_entry *e;
1118 int copied = 0;
1119
561aafbc 1120 list_for_each_entry(e, &cache->all, all) {
1da177e4 1121 struct inquiry_data *data = &e->data;
b57c1a56
JH
1122
1123 if (copied >= num)
1124 break;
1125
1da177e4
LT
1126 bacpy(&info->bdaddr, &data->bdaddr);
1127 info->pscan_rep_mode = data->pscan_rep_mode;
1128 info->pscan_period_mode = data->pscan_period_mode;
1129 info->pscan_mode = data->pscan_mode;
1130 memcpy(info->dev_class, data->dev_class, 3);
1131 info->clock_offset = data->clock_offset;
b57c1a56 1132
1da177e4 1133 info++;
b57c1a56 1134 copied++;
1da177e4
LT
1135 }
1136
1137 BT_DBG("cache %p, copied %d", cache, copied);
1138 return copied;
1139}
1140
42c6b129 1141static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1142{
1143 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1144 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1145 struct hci_cp_inquiry cp;
1146
1147 BT_DBG("%s", hdev->name);
1148
1149 if (test_bit(HCI_INQUIRY, &hdev->flags))
1150 return;
1151
1152 /* Start Inquiry */
1153 memcpy(&cp.lap, &ir->lap, 3);
1154 cp.length = ir->length;
1155 cp.num_rsp = ir->num_rsp;
42c6b129 1156 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1157}
1158
1159int hci_inquiry(void __user *arg)
1160{
1161 __u8 __user *ptr = arg;
1162 struct hci_inquiry_req ir;
1163 struct hci_dev *hdev;
1164 int err = 0, do_inquiry = 0, max_rsp;
1165 long timeo;
1166 __u8 *buf;
1167
1168 if (copy_from_user(&ir, ptr, sizeof(ir)))
1169 return -EFAULT;
1170
5a08ecce
AE
1171 hdev = hci_dev_get(ir.dev_id);
1172 if (!hdev)
1da177e4
LT
1173 return -ENODEV;
1174
d7a5a11d 1175 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1176 err = -EBUSY;
1177 goto done;
1178 }
1179
d7a5a11d 1180 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1181 err = -EOPNOTSUPP;
1182 goto done;
1183 }
1184
5b69bef5
MH
1185 if (hdev->dev_type != HCI_BREDR) {
1186 err = -EOPNOTSUPP;
1187 goto done;
1188 }
1189
d7a5a11d 1190 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1191 err = -EOPNOTSUPP;
1192 goto done;
1193 }
1194
09fd0de5 1195 hci_dev_lock(hdev);
8e87d142 1196 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1197 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1198 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1199 do_inquiry = 1;
1200 }
09fd0de5 1201 hci_dev_unlock(hdev);
1da177e4 1202
04837f64 1203 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1204
1205 if (do_inquiry) {
01178cd4
JH
1206 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1207 timeo);
70f23020
AE
1208 if (err < 0)
1209 goto done;
3e13fa1e
AG
1210
1211 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1212 * cleared). If it is interrupted by a signal, return -EINTR.
1213 */
74316201 1214 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1215 TASK_INTERRUPTIBLE))
1216 return -EINTR;
70f23020 1217 }
1da177e4 1218
8fc9ced3
GP
1219 /* for unlimited number of responses we will use buffer with
1220 * 255 entries
1221 */
1da177e4
LT
1222 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1223
1224 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1225 * copy it to the user space.
1226 */
01df8c31 1227 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1228 if (!buf) {
1da177e4
LT
1229 err = -ENOMEM;
1230 goto done;
1231 }
1232
09fd0de5 1233 hci_dev_lock(hdev);
1da177e4 1234 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1235 hci_dev_unlock(hdev);
1da177e4
LT
1236
1237 BT_DBG("num_rsp %d", ir.num_rsp);
1238
1239 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1240 ptr += sizeof(ir);
1241 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1242 ir.num_rsp))
1da177e4 1243 err = -EFAULT;
8e87d142 1244 } else
1da177e4
LT
1245 err = -EFAULT;
1246
1247 kfree(buf);
1248
1249done:
1250 hci_dev_put(hdev);
1251 return err;
1252}
1253
cbed0ca1 1254static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1255{
1da177e4
LT
1256 int ret = 0;
1257
1da177e4
LT
1258 BT_DBG("%s %p", hdev->name, hdev);
1259
b504430c 1260 hci_req_sync_lock(hdev);
1da177e4 1261
d7a5a11d 1262 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1263 ret = -ENODEV;
1264 goto done;
1265 }
1266
d7a5a11d
MH
1267 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1268 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1269 /* Check for rfkill but allow the HCI setup stage to
1270 * proceed (which in itself doesn't cause any RF activity).
1271 */
d7a5a11d 1272 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1273 ret = -ERFKILL;
1274 goto done;
1275 }
1276
1277 /* Check for valid public address or a configured static
1278 * random adddress, but let the HCI setup proceed to
1279 * be able to determine if there is a public address
1280 * or not.
1281 *
c6beca0e
MH
1282 * In case of user channel usage, it is not important
1283 * if a public address or static random address is
1284 * available.
1285 *
a5c8f270
MH
1286 * This check is only valid for BR/EDR controllers
1287 * since AMP controllers do not have an address.
1288 */
d7a5a11d 1289 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1290 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1291 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1292 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1293 ret = -EADDRNOTAVAIL;
1294 goto done;
1295 }
611b30f7
MH
1296 }
1297
1da177e4
LT
1298 if (test_bit(HCI_UP, &hdev->flags)) {
1299 ret = -EALREADY;
1300 goto done;
1301 }
1302
1da177e4
LT
1303 if (hdev->open(hdev)) {
1304 ret = -EIO;
1305 goto done;
1306 }
1307
e9ca8bf1 1308 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1309 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1310
f41c70c4
MH
1311 atomic_set(&hdev->cmd_cnt, 1);
1312 set_bit(HCI_INIT, &hdev->flags);
1313
d7a5a11d 1314 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1315 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1316
af202f84
MH
1317 if (hdev->setup)
1318 ret = hdev->setup(hdev);
f41c70c4 1319
af202f84
MH
1320 /* The transport driver can set these quirks before
1321 * creating the HCI device or in its setup callback.
1322 *
1323 * In case any of them is set, the controller has to
1324 * start up as unconfigured.
1325 */
eb1904f4
MH
1326 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1327 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1328 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1329
0ebca7d6
MH
1330 /* For an unconfigured controller it is required to
1331 * read at least the version information provided by
1332 * the Read Local Version Information command.
1333 *
1334 * If the set_bdaddr driver callback is provided, then
1335 * also the original Bluetooth public device address
1336 * will be read using the Read BD Address command.
1337 */
d7a5a11d 1338 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1339 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1340 }
1341
d7a5a11d 1342 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1343 /* If public address change is configured, ensure that
1344 * the address gets programmed. If the driver does not
1345 * support changing the public address, fail the power
1346 * on procedure.
1347 */
1348 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1349 hdev->set_bdaddr)
24c457e2
MH
1350 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1351 else
1352 ret = -EADDRNOTAVAIL;
1353 }
1354
f41c70c4 1355 if (!ret) {
d7a5a11d 1356 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1357 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1358 ret = __hci_init(hdev);
98a63aaf
MH
1359 if (!ret && hdev->post_init)
1360 ret = hdev->post_init(hdev);
1361 }
1da177e4
LT
1362 }
1363
7e995b9e
MH
1364 /* If the HCI Reset command is clearing all diagnostic settings,
1365 * then they need to be reprogrammed after the init procedure
1366 * completed.
1367 */
1368 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1369 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1370 ret = hdev->set_diag(hdev, true);
1371
f41c70c4
MH
1372 clear_bit(HCI_INIT, &hdev->flags);
1373
1da177e4
LT
1374 if (!ret) {
1375 hci_dev_hold(hdev);
a1536da2 1376 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1377 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1378 hci_sock_dev_event(hdev, HCI_DEV_UP);
d7a5a11d
MH
1379 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1380 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1381 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1382 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1383 hdev->dev_type == HCI_BREDR) {
09fd0de5 1384 hci_dev_lock(hdev);
744cf19e 1385 mgmt_powered(hdev, 1);
09fd0de5 1386 hci_dev_unlock(hdev);
56e5cb86 1387 }
8e87d142 1388 } else {
1da177e4 1389 /* Init failed, cleanup */
3eff45ea 1390 flush_work(&hdev->tx_work);
c347b765 1391 flush_work(&hdev->cmd_work);
b78752cc 1392 flush_work(&hdev->rx_work);
1da177e4
LT
1393
1394 skb_queue_purge(&hdev->cmd_q);
1395 skb_queue_purge(&hdev->rx_q);
1396
1397 if (hdev->flush)
1398 hdev->flush(hdev);
1399
1400 if (hdev->sent_cmd) {
1401 kfree_skb(hdev->sent_cmd);
1402 hdev->sent_cmd = NULL;
1403 }
1404
e9ca8bf1 1405 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1406 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1407
1da177e4 1408 hdev->close(hdev);
fee746b0 1409 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1410 }
1411
1412done:
b504430c 1413 hci_req_sync_unlock(hdev);
1da177e4
LT
1414 return ret;
1415}
1416
cbed0ca1
JH
1417/* ---- HCI ioctl helpers ---- */
1418
1419int hci_dev_open(__u16 dev)
1420{
1421 struct hci_dev *hdev;
1422 int err;
1423
1424 hdev = hci_dev_get(dev);
1425 if (!hdev)
1426 return -ENODEV;
1427
4a964404 1428 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1429 * up as user channel. Trying to bring them up as normal devices
1430 * will result into a failure. Only user channel operation is
1431 * possible.
1432 *
1433 * When this function is called for a user channel, the flag
1434 * HCI_USER_CHANNEL will be set first before attempting to
1435 * open the device.
1436 */
d7a5a11d
MH
1437 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1438 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1439 err = -EOPNOTSUPP;
1440 goto done;
1441 }
1442
e1d08f40
JH
1443 /* We need to ensure that no other power on/off work is pending
1444 * before proceeding to call hci_dev_do_open. This is
1445 * particularly important if the setup procedure has not yet
1446 * completed.
1447 */
a69d8927 1448 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1449 cancel_delayed_work(&hdev->power_off);
1450
a5c8f270
MH
1451 /* After this call it is guaranteed that the setup procedure
1452 * has finished. This means that error conditions like RFKILL
1453 * or no valid public or static random address apply.
1454 */
e1d08f40
JH
1455 flush_workqueue(hdev->req_workqueue);
1456
12aa4f0a 1457 /* For controllers not using the management interface and that
b6ae8457 1458 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1459 * so that pairing works for them. Once the management interface
1460 * is in use this bit will be cleared again and userspace has
1461 * to explicitly enable it.
1462 */
d7a5a11d
MH
1463 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1464 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1465 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1466
cbed0ca1
JH
1467 err = hci_dev_do_open(hdev);
1468
fee746b0 1469done:
cbed0ca1 1470 hci_dev_put(hdev);
cbed0ca1
JH
1471 return err;
1472}
1473
d7347f3c
JH
1474/* This function requires the caller holds hdev->lock */
1475static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1476{
1477 struct hci_conn_params *p;
1478
f161dd41
JH
1479 list_for_each_entry(p, &hdev->le_conn_params, list) {
1480 if (p->conn) {
1481 hci_conn_drop(p->conn);
f8aaf9b6 1482 hci_conn_put(p->conn);
f161dd41
JH
1483 p->conn = NULL;
1484 }
d7347f3c 1485 list_del_init(&p->action);
f161dd41 1486 }
d7347f3c
JH
1487
1488 BT_DBG("All LE pending actions cleared");
1489}
1490
6b3cc1db 1491int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1492{
acc649c6
MH
1493 bool auto_off;
1494
1da177e4
LT
1495 BT_DBG("%s %p", hdev->name, hdev);
1496
d24d8144 1497 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1498 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1499 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1500 /* Execute vendor specific shutdown routine */
1501 if (hdev->shutdown)
1502 hdev->shutdown(hdev);
1503 }
1504
78c04c0b
VCG
1505 cancel_delayed_work(&hdev->power_off);
1506
b504430c
JH
1507 hci_req_sync_cancel(hdev, ENODEV);
1508 hci_req_sync_lock(hdev);
1da177e4
LT
1509
1510 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1511 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1512 hci_req_sync_unlock(hdev);
1da177e4
LT
1513 return 0;
1514 }
1515
3eff45ea
GP
1516 /* Flush RX and TX works */
1517 flush_work(&hdev->tx_work);
b78752cc 1518 flush_work(&hdev->rx_work);
1da177e4 1519
16ab91ab 1520 if (hdev->discov_timeout > 0) {
e0f9309f 1521 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1522 hdev->discov_timeout = 0;
a358dc11
MH
1523 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1524 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1525 }
1526
a69d8927 1527 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1528 cancel_delayed_work(&hdev->service_cache);
1529
7ba8b4be 1530 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1531 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1532
d7a5a11d 1533 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1534 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1535
5d900e46
FG
1536 if (hdev->adv_instance_timeout) {
1537 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1538 hdev->adv_instance_timeout = 0;
1539 }
1540
76727c02
JH
1541 /* Avoid potential lockdep warnings from the *_flush() calls by
1542 * ensuring the workqueue is empty up front.
1543 */
1544 drain_workqueue(hdev->workqueue);
1545
09fd0de5 1546 hci_dev_lock(hdev);
1aeb9c65 1547
8f502f84
JH
1548 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1549
acc649c6
MH
1550 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1551
1552 if (!auto_off && hdev->dev_type == HCI_BREDR)
1553 mgmt_powered(hdev, 0);
1aeb9c65 1554
1f9b9a5d 1555 hci_inquiry_cache_flush(hdev);
d7347f3c 1556 hci_pend_le_actions_clear(hdev);
f161dd41 1557 hci_conn_hash_flush(hdev);
09fd0de5 1558 hci_dev_unlock(hdev);
1da177e4 1559
64dae967
MH
1560 smp_unregister(hdev);
1561
05fcd4c4 1562 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1563
1564 if (hdev->flush)
1565 hdev->flush(hdev);
1566
1567 /* Reset device */
1568 skb_queue_purge(&hdev->cmd_q);
1569 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1570 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1571 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1572 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1573 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1574 clear_bit(HCI_INIT, &hdev->flags);
1575 }
1576
c347b765
GP
1577 /* flush cmd work */
1578 flush_work(&hdev->cmd_work);
1da177e4
LT
1579
1580 /* Drop queues */
1581 skb_queue_purge(&hdev->rx_q);
1582 skb_queue_purge(&hdev->cmd_q);
1583 skb_queue_purge(&hdev->raw_q);
1584
1585 /* Drop last sent command */
1586 if (hdev->sent_cmd) {
65cc2b49 1587 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1588 kfree_skb(hdev->sent_cmd);
1589 hdev->sent_cmd = NULL;
1590 }
1591
e9ca8bf1 1592 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1593 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1594
1da177e4
LT
1595 /* After this point our queues are empty
1596 * and no tasks are scheduled. */
1597 hdev->close(hdev);
1598
35b973c9 1599 /* Clear flags */
fee746b0 1600 hdev->flags &= BIT(HCI_RAW);
eacb44df 1601 hci_dev_clear_volatile_flags(hdev);
35b973c9 1602
ced5c338 1603 /* Controller radio is available but is currently powered down */
536619e8 1604 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1605
e59fda8d 1606 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1607 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1608 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1609
b504430c 1610 hci_req_sync_unlock(hdev);
1da177e4 1611
5fc16cc4
JH
1612 hci_request_cancel_all(hdev);
1613
1da177e4
LT
1614 hci_dev_put(hdev);
1615 return 0;
1616}
1617
1618int hci_dev_close(__u16 dev)
1619{
1620 struct hci_dev *hdev;
1621 int err;
1622
70f23020
AE
1623 hdev = hci_dev_get(dev);
1624 if (!hdev)
1da177e4 1625 return -ENODEV;
8ee56540 1626
d7a5a11d 1627 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1628 err = -EBUSY;
1629 goto done;
1630 }
1631
a69d8927 1632 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1633 cancel_delayed_work(&hdev->power_off);
1634
1da177e4 1635 err = hci_dev_do_close(hdev);
8ee56540 1636
0736cfa8 1637done:
1da177e4
LT
1638 hci_dev_put(hdev);
1639 return err;
1640}
1641
5c912495 1642static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1643{
5c912495 1644 int ret;
1da177e4 1645
5c912495 1646 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1647
b504430c 1648 hci_req_sync_lock(hdev);
1da177e4 1649
1da177e4
LT
1650 /* Drop queues */
1651 skb_queue_purge(&hdev->rx_q);
1652 skb_queue_purge(&hdev->cmd_q);
1653
76727c02
JH
1654 /* Avoid potential lockdep warnings from the *_flush() calls by
1655 * ensuring the workqueue is empty up front.
1656 */
1657 drain_workqueue(hdev->workqueue);
1658
09fd0de5 1659 hci_dev_lock(hdev);
1f9b9a5d 1660 hci_inquiry_cache_flush(hdev);
1da177e4 1661 hci_conn_hash_flush(hdev);
09fd0de5 1662 hci_dev_unlock(hdev);
1da177e4
LT
1663
1664 if (hdev->flush)
1665 hdev->flush(hdev);
1666
8e87d142 1667 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1668 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1669
fee746b0 1670 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1671
b504430c 1672 hci_req_sync_unlock(hdev);
1da177e4
LT
1673 return ret;
1674}
1675
5c912495
MH
1676int hci_dev_reset(__u16 dev)
1677{
1678 struct hci_dev *hdev;
1679 int err;
1680
1681 hdev = hci_dev_get(dev);
1682 if (!hdev)
1683 return -ENODEV;
1684
1685 if (!test_bit(HCI_UP, &hdev->flags)) {
1686 err = -ENETDOWN;
1687 goto done;
1688 }
1689
d7a5a11d 1690 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1691 err = -EBUSY;
1692 goto done;
1693 }
1694
d7a5a11d 1695 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1696 err = -EOPNOTSUPP;
1697 goto done;
1698 }
1699
1700 err = hci_dev_do_reset(hdev);
1701
1702done:
1703 hci_dev_put(hdev);
1704 return err;
1705}
1706
1da177e4
LT
1707int hci_dev_reset_stat(__u16 dev)
1708{
1709 struct hci_dev *hdev;
1710 int ret = 0;
1711
70f23020
AE
1712 hdev = hci_dev_get(dev);
1713 if (!hdev)
1da177e4
LT
1714 return -ENODEV;
1715
d7a5a11d 1716 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1717 ret = -EBUSY;
1718 goto done;
1719 }
1720
d7a5a11d 1721 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1722 ret = -EOPNOTSUPP;
1723 goto done;
1724 }
1725
1da177e4
LT
1726 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1727
0736cfa8 1728done:
1da177e4 1729 hci_dev_put(hdev);
1da177e4
LT
1730 return ret;
1731}
1732
123abc08
JH
1733static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1734{
bc6d2d04 1735 bool conn_changed, discov_changed;
123abc08
JH
1736
1737 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1738
1739 if ((scan & SCAN_PAGE))
238be788
MH
1740 conn_changed = !hci_dev_test_and_set_flag(hdev,
1741 HCI_CONNECTABLE);
123abc08 1742 else
a69d8927
MH
1743 conn_changed = hci_dev_test_and_clear_flag(hdev,
1744 HCI_CONNECTABLE);
123abc08 1745
bc6d2d04 1746 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1747 discov_changed = !hci_dev_test_and_set_flag(hdev,
1748 HCI_DISCOVERABLE);
bc6d2d04 1749 } else {
a358dc11 1750 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1751 discov_changed = hci_dev_test_and_clear_flag(hdev,
1752 HCI_DISCOVERABLE);
bc6d2d04
JH
1753 }
1754
d7a5a11d 1755 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1756 return;
1757
bc6d2d04
JH
1758 if (conn_changed || discov_changed) {
1759 /* In case this was disabled through mgmt */
a1536da2 1760 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1761
d7a5a11d 1762 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1763 mgmt_update_adv_data(hdev);
1764
123abc08 1765 mgmt_new_settings(hdev);
bc6d2d04 1766 }
123abc08
JH
1767}
1768
1da177e4
LT
1769int hci_dev_cmd(unsigned int cmd, void __user *arg)
1770{
1771 struct hci_dev *hdev;
1772 struct hci_dev_req dr;
1773 int err = 0;
1774
1775 if (copy_from_user(&dr, arg, sizeof(dr)))
1776 return -EFAULT;
1777
70f23020
AE
1778 hdev = hci_dev_get(dr.dev_id);
1779 if (!hdev)
1da177e4
LT
1780 return -ENODEV;
1781
d7a5a11d 1782 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1783 err = -EBUSY;
1784 goto done;
1785 }
1786
d7a5a11d 1787 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1788 err = -EOPNOTSUPP;
1789 goto done;
1790 }
1791
5b69bef5
MH
1792 if (hdev->dev_type != HCI_BREDR) {
1793 err = -EOPNOTSUPP;
1794 goto done;
1795 }
1796
d7a5a11d 1797 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1798 err = -EOPNOTSUPP;
1799 goto done;
1800 }
1801
1da177e4
LT
1802 switch (cmd) {
1803 case HCISETAUTH:
01178cd4
JH
1804 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1805 HCI_INIT_TIMEOUT);
1da177e4
LT
1806 break;
1807
1808 case HCISETENCRYPT:
1809 if (!lmp_encrypt_capable(hdev)) {
1810 err = -EOPNOTSUPP;
1811 break;
1812 }
1813
1814 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1815 /* Auth must be enabled first */
01178cd4
JH
1816 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1817 HCI_INIT_TIMEOUT);
1da177e4
LT
1818 if (err)
1819 break;
1820 }
1821
01178cd4
JH
1822 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1823 HCI_INIT_TIMEOUT);
1da177e4
LT
1824 break;
1825
1826 case HCISETSCAN:
01178cd4
JH
1827 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1828 HCI_INIT_TIMEOUT);
91a668b0 1829
bc6d2d04
JH
1830 /* Ensure that the connectable and discoverable states
1831 * get correctly modified as this was a non-mgmt change.
91a668b0 1832 */
123abc08
JH
1833 if (!err)
1834 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1835 break;
1836
1da177e4 1837 case HCISETLINKPOL:
01178cd4
JH
1838 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1839 HCI_INIT_TIMEOUT);
1da177e4
LT
1840 break;
1841
1842 case HCISETLINKMODE:
e4e8e37c
MH
1843 hdev->link_mode = ((__u16) dr.dev_opt) &
1844 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1845 break;
1846
1847 case HCISETPTYPE:
1848 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1849 break;
1850
1851 case HCISETACLMTU:
e4e8e37c
MH
1852 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1853 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1854 break;
1855
1856 case HCISETSCOMTU:
e4e8e37c
MH
1857 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1858 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1859 break;
1860
1861 default:
1862 err = -EINVAL;
1863 break;
1864 }
e4e8e37c 1865
0736cfa8 1866done:
1da177e4
LT
1867 hci_dev_put(hdev);
1868 return err;
1869}
1870
1871int hci_get_dev_list(void __user *arg)
1872{
8035ded4 1873 struct hci_dev *hdev;
1da177e4
LT
1874 struct hci_dev_list_req *dl;
1875 struct hci_dev_req *dr;
1da177e4
LT
1876 int n = 0, size, err;
1877 __u16 dev_num;
1878
1879 if (get_user(dev_num, (__u16 __user *) arg))
1880 return -EFAULT;
1881
1882 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1883 return -EINVAL;
1884
1885 size = sizeof(*dl) + dev_num * sizeof(*dr);
1886
70f23020
AE
1887 dl = kzalloc(size, GFP_KERNEL);
1888 if (!dl)
1da177e4
LT
1889 return -ENOMEM;
1890
1891 dr = dl->dev_req;
1892
f20d09d5 1893 read_lock(&hci_dev_list_lock);
8035ded4 1894 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1895 unsigned long flags = hdev->flags;
c542a06c 1896
2e84d8db
MH
1897 /* When the auto-off is configured it means the transport
1898 * is running, but in that case still indicate that the
1899 * device is actually down.
1900 */
d7a5a11d 1901 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1902 flags &= ~BIT(HCI_UP);
c542a06c 1903
1da177e4 1904 (dr + n)->dev_id = hdev->id;
2e84d8db 1905 (dr + n)->dev_opt = flags;
c542a06c 1906
1da177e4
LT
1907 if (++n >= dev_num)
1908 break;
1909 }
f20d09d5 1910 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1911
1912 dl->dev_num = n;
1913 size = sizeof(*dl) + n * sizeof(*dr);
1914
1915 err = copy_to_user(arg, dl, size);
1916 kfree(dl);
1917
1918 return err ? -EFAULT : 0;
1919}
1920
1921int hci_get_dev_info(void __user *arg)
1922{
1923 struct hci_dev *hdev;
1924 struct hci_dev_info di;
2e84d8db 1925 unsigned long flags;
1da177e4
LT
1926 int err = 0;
1927
1928 if (copy_from_user(&di, arg, sizeof(di)))
1929 return -EFAULT;
1930
70f23020
AE
1931 hdev = hci_dev_get(di.dev_id);
1932 if (!hdev)
1da177e4
LT
1933 return -ENODEV;
1934
2e84d8db
MH
1935 /* When the auto-off is configured it means the transport
1936 * is running, but in that case still indicate that the
1937 * device is actually down.
1938 */
d7a5a11d 1939 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
1940 flags = hdev->flags & ~BIT(HCI_UP);
1941 else
1942 flags = hdev->flags;
c542a06c 1943
1da177e4
LT
1944 strcpy(di.name, hdev->name);
1945 di.bdaddr = hdev->bdaddr;
60f2a3ed 1946 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 1947 di.flags = flags;
1da177e4 1948 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1949 if (lmp_bredr_capable(hdev)) {
1950 di.acl_mtu = hdev->acl_mtu;
1951 di.acl_pkts = hdev->acl_pkts;
1952 di.sco_mtu = hdev->sco_mtu;
1953 di.sco_pkts = hdev->sco_pkts;
1954 } else {
1955 di.acl_mtu = hdev->le_mtu;
1956 di.acl_pkts = hdev->le_pkts;
1957 di.sco_mtu = 0;
1958 di.sco_pkts = 0;
1959 }
1da177e4
LT
1960 di.link_policy = hdev->link_policy;
1961 di.link_mode = hdev->link_mode;
1962
1963 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1964 memcpy(&di.features, &hdev->features, sizeof(di.features));
1965
1966 if (copy_to_user(arg, &di, sizeof(di)))
1967 err = -EFAULT;
1968
1969 hci_dev_put(hdev);
1970
1971 return err;
1972}
1973
1974/* ---- Interface to HCI drivers ---- */
1975
611b30f7
MH
1976static int hci_rfkill_set_block(void *data, bool blocked)
1977{
1978 struct hci_dev *hdev = data;
1979
1980 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1981
d7a5a11d 1982 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
1983 return -EBUSY;
1984
5e130367 1985 if (blocked) {
a1536da2 1986 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
1987 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1988 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 1989 hci_dev_do_close(hdev);
5e130367 1990 } else {
a358dc11 1991 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 1992 }
611b30f7
MH
1993
1994 return 0;
1995}
1996
1997static const struct rfkill_ops hci_rfkill_ops = {
1998 .set_block = hci_rfkill_set_block,
1999};
2000
ab81cbf9
JH
2001static void hci_power_on(struct work_struct *work)
2002{
2003 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2004 int err;
ab81cbf9
JH
2005
2006 BT_DBG("%s", hdev->name);
2007
cbed0ca1 2008 err = hci_dev_do_open(hdev);
96570ffc 2009 if (err < 0) {
3ad67582 2010 hci_dev_lock(hdev);
96570ffc 2011 mgmt_set_powered_failed(hdev, err);
3ad67582 2012 hci_dev_unlock(hdev);
ab81cbf9 2013 return;
96570ffc 2014 }
ab81cbf9 2015
a5c8f270
MH
2016 /* During the HCI setup phase, a few error conditions are
2017 * ignored and they need to be checked now. If they are still
2018 * valid, it is important to turn the device back off.
2019 */
d7a5a11d
MH
2020 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2021 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2022 (hdev->dev_type == HCI_BREDR &&
2023 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2024 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2025 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2026 hci_dev_do_close(hdev);
d7a5a11d 2027 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2028 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2029 HCI_AUTO_OFF_TIMEOUT);
bf543036 2030 }
ab81cbf9 2031
a69d8927 2032 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2033 /* For unconfigured devices, set the HCI_RAW flag
2034 * so that userspace can easily identify them.
4a964404 2035 */
d7a5a11d 2036 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2037 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2038
2039 /* For fully configured devices, this will send
2040 * the Index Added event. For unconfigured devices,
2041 * it will send Unconfigued Index Added event.
2042 *
2043 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2044 * and no event will be send.
2045 */
2046 mgmt_index_added(hdev);
a69d8927 2047 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2048 /* When the controller is now configured, then it
2049 * is important to clear the HCI_RAW flag.
2050 */
d7a5a11d 2051 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2052 clear_bit(HCI_RAW, &hdev->flags);
2053
d603b76b
MH
2054 /* Powering on the controller with HCI_CONFIG set only
2055 * happens with the transition from unconfigured to
2056 * configured. This will send the Index Added event.
2057 */
744cf19e 2058 mgmt_index_added(hdev);
fee746b0 2059 }
ab81cbf9
JH
2060}
2061
2062static void hci_power_off(struct work_struct *work)
2063{
3243553f 2064 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2065 power_off.work);
ab81cbf9
JH
2066
2067 BT_DBG("%s", hdev->name);
2068
8ee56540 2069 hci_dev_do_close(hdev);
ab81cbf9
JH
2070}
2071
c7741d16
MH
2072static void hci_error_reset(struct work_struct *work)
2073{
2074 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2075
2076 BT_DBG("%s", hdev->name);
2077
2078 if (hdev->hw_error)
2079 hdev->hw_error(hdev, hdev->hw_error_code);
2080 else
2081 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2082 hdev->hw_error_code);
2083
2084 if (hci_dev_do_close(hdev))
2085 return;
2086
c7741d16
MH
2087 hci_dev_do_open(hdev);
2088}
2089
16ab91ab
JH
2090static void hci_discov_off(struct work_struct *work)
2091{
2092 struct hci_dev *hdev;
16ab91ab
JH
2093
2094 hdev = container_of(work, struct hci_dev, discov_off.work);
2095
2096 BT_DBG("%s", hdev->name);
2097
d1967ff8 2098 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2099}
2100
5d900e46
FG
2101static void hci_adv_timeout_expire(struct work_struct *work)
2102{
2103 struct hci_dev *hdev;
2104
2105 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2106
2107 BT_DBG("%s", hdev->name);
2108
2109 mgmt_adv_timeout_expired(hdev);
2110}
2111
35f7498a 2112void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2113{
4821002c 2114 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2115
4821002c
JH
2116 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2117 list_del(&uuid->list);
2aeb9a1a
JH
2118 kfree(uuid);
2119 }
2aeb9a1a
JH
2120}
2121
35f7498a 2122void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2123{
0378b597 2124 struct link_key *key;
55ed8ca1 2125
0378b597
JH
2126 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2127 list_del_rcu(&key->list);
2128 kfree_rcu(key, rcu);
55ed8ca1 2129 }
55ed8ca1
JH
2130}
2131
35f7498a 2132void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2133{
970d0f1b 2134 struct smp_ltk *k;
b899efaf 2135
970d0f1b
JH
2136 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2137 list_del_rcu(&k->list);
2138 kfree_rcu(k, rcu);
b899efaf 2139 }
b899efaf
VCG
2140}
2141
970c4e46
JH
2142void hci_smp_irks_clear(struct hci_dev *hdev)
2143{
adae20cb 2144 struct smp_irk *k;
970c4e46 2145
adae20cb
JH
2146 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2147 list_del_rcu(&k->list);
2148 kfree_rcu(k, rcu);
970c4e46
JH
2149 }
2150}
2151
55ed8ca1
JH
2152struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2153{
8035ded4 2154 struct link_key *k;
55ed8ca1 2155
0378b597
JH
2156 rcu_read_lock();
2157 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2158 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2159 rcu_read_unlock();
55ed8ca1 2160 return k;
0378b597
JH
2161 }
2162 }
2163 rcu_read_unlock();
55ed8ca1
JH
2164
2165 return NULL;
2166}
2167
745c0ce3 2168static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2169 u8 key_type, u8 old_key_type)
d25e28ab
JH
2170{
2171 /* Legacy key */
2172 if (key_type < 0x03)
745c0ce3 2173 return true;
d25e28ab
JH
2174
2175 /* Debug keys are insecure so don't store them persistently */
2176 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2177 return false;
d25e28ab
JH
2178
2179 /* Changed combination key and there's no previous one */
2180 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2181 return false;
d25e28ab
JH
2182
2183 /* Security mode 3 case */
2184 if (!conn)
745c0ce3 2185 return true;
d25e28ab 2186
e3befab9
JH
2187 /* BR/EDR key derived using SC from an LE link */
2188 if (conn->type == LE_LINK)
2189 return true;
2190
d25e28ab
JH
2191 /* Neither local nor remote side had no-bonding as requirement */
2192 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2193 return true;
d25e28ab
JH
2194
2195 /* Local side had dedicated bonding as requirement */
2196 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2197 return true;
d25e28ab
JH
2198
2199 /* Remote side had dedicated bonding as requirement */
2200 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2201 return true;
d25e28ab
JH
2202
2203 /* If none of the above criteria match, then don't store the key
2204 * persistently */
745c0ce3 2205 return false;
d25e28ab
JH
2206}
2207
e804d25d 2208static u8 ltk_role(u8 type)
98a0b845 2209{
e804d25d
JH
2210 if (type == SMP_LTK)
2211 return HCI_ROLE_MASTER;
98a0b845 2212
e804d25d 2213 return HCI_ROLE_SLAVE;
98a0b845
JH
2214}
2215
f3a73d97
JH
2216struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2217 u8 addr_type, u8 role)
75d262c2 2218{
c9839a11 2219 struct smp_ltk *k;
75d262c2 2220
970d0f1b
JH
2221 rcu_read_lock();
2222 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2223 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2224 continue;
2225
923e2414 2226 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2227 rcu_read_unlock();
75d262c2 2228 return k;
970d0f1b
JH
2229 }
2230 }
2231 rcu_read_unlock();
75d262c2
VCG
2232
2233 return NULL;
2234}
75d262c2 2235
970c4e46
JH
2236struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2237{
2238 struct smp_irk *irk;
2239
adae20cb
JH
2240 rcu_read_lock();
2241 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2242 if (!bacmp(&irk->rpa, rpa)) {
2243 rcu_read_unlock();
970c4e46 2244 return irk;
adae20cb 2245 }
970c4e46
JH
2246 }
2247
adae20cb 2248 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2249 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2250 bacpy(&irk->rpa, rpa);
adae20cb 2251 rcu_read_unlock();
970c4e46
JH
2252 return irk;
2253 }
2254 }
adae20cb 2255 rcu_read_unlock();
970c4e46
JH
2256
2257 return NULL;
2258}
2259
2260struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2261 u8 addr_type)
2262{
2263 struct smp_irk *irk;
2264
6cfc9988
JH
2265 /* Identity Address must be public or static random */
2266 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2267 return NULL;
2268
adae20cb
JH
2269 rcu_read_lock();
2270 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2271 if (addr_type == irk->addr_type &&
adae20cb
JH
2272 bacmp(bdaddr, &irk->bdaddr) == 0) {
2273 rcu_read_unlock();
970c4e46 2274 return irk;
adae20cb 2275 }
970c4e46 2276 }
adae20cb 2277 rcu_read_unlock();
970c4e46
JH
2278
2279 return NULL;
2280}
2281
567fa2aa 2282struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2283 bdaddr_t *bdaddr, u8 *val, u8 type,
2284 u8 pin_len, bool *persistent)
55ed8ca1
JH
2285{
2286 struct link_key *key, *old_key;
745c0ce3 2287 u8 old_key_type;
55ed8ca1
JH
2288
2289 old_key = hci_find_link_key(hdev, bdaddr);
2290 if (old_key) {
2291 old_key_type = old_key->type;
2292 key = old_key;
2293 } else {
12adcf3a 2294 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2295 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2296 if (!key)
567fa2aa 2297 return NULL;
0378b597 2298 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2299 }
2300
6ed93dc6 2301 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2302
d25e28ab
JH
2303 /* Some buggy controller combinations generate a changed
2304 * combination key for legacy pairing even when there's no
2305 * previous key */
2306 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2307 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2308 type = HCI_LK_COMBINATION;
655fe6ec
JH
2309 if (conn)
2310 conn->key_type = type;
2311 }
d25e28ab 2312
55ed8ca1 2313 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2314 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2315 key->pin_len = pin_len;
2316
b6020ba0 2317 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2318 key->type = old_key_type;
4748fed2
JH
2319 else
2320 key->type = type;
2321
7652ff6a
JH
2322 if (persistent)
2323 *persistent = hci_persistent_key(hdev, conn, type,
2324 old_key_type);
4df378a1 2325
567fa2aa 2326 return key;
55ed8ca1
JH
2327}
2328
ca9142b8 2329struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2330 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2331 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2332{
c9839a11 2333 struct smp_ltk *key, *old_key;
e804d25d 2334 u8 role = ltk_role(type);
75d262c2 2335
f3a73d97 2336 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2337 if (old_key)
75d262c2 2338 key = old_key;
c9839a11 2339 else {
0a14ab41 2340 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2341 if (!key)
ca9142b8 2342 return NULL;
970d0f1b 2343 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2344 }
2345
75d262c2 2346 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2347 key->bdaddr_type = addr_type;
2348 memcpy(key->val, tk, sizeof(key->val));
2349 key->authenticated = authenticated;
2350 key->ediv = ediv;
fe39c7b2 2351 key->rand = rand;
c9839a11
VCG
2352 key->enc_size = enc_size;
2353 key->type = type;
75d262c2 2354
ca9142b8 2355 return key;
75d262c2
VCG
2356}
2357
ca9142b8
JH
2358struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2359 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2360{
2361 struct smp_irk *irk;
2362
2363 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2364 if (!irk) {
2365 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2366 if (!irk)
ca9142b8 2367 return NULL;
970c4e46
JH
2368
2369 bacpy(&irk->bdaddr, bdaddr);
2370 irk->addr_type = addr_type;
2371
adae20cb 2372 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2373 }
2374
2375 memcpy(irk->val, val, 16);
2376 bacpy(&irk->rpa, rpa);
2377
ca9142b8 2378 return irk;
970c4e46
JH
2379}
2380
55ed8ca1
JH
2381int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2382{
2383 struct link_key *key;
2384
2385 key = hci_find_link_key(hdev, bdaddr);
2386 if (!key)
2387 return -ENOENT;
2388
6ed93dc6 2389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2390
0378b597
JH
2391 list_del_rcu(&key->list);
2392 kfree_rcu(key, rcu);
55ed8ca1
JH
2393
2394 return 0;
2395}
2396
e0b2b27e 2397int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2398{
970d0f1b 2399 struct smp_ltk *k;
c51ffa0b 2400 int removed = 0;
b899efaf 2401
970d0f1b 2402 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2403 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2404 continue;
2405
6ed93dc6 2406 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2407
970d0f1b
JH
2408 list_del_rcu(&k->list);
2409 kfree_rcu(k, rcu);
c51ffa0b 2410 removed++;
b899efaf
VCG
2411 }
2412
c51ffa0b 2413 return removed ? 0 : -ENOENT;
b899efaf
VCG
2414}
2415
a7ec7338
JH
2416void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2417{
adae20cb 2418 struct smp_irk *k;
a7ec7338 2419
adae20cb 2420 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2421 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2422 continue;
2423
2424 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2425
adae20cb
JH
2426 list_del_rcu(&k->list);
2427 kfree_rcu(k, rcu);
a7ec7338
JH
2428 }
2429}
2430
55e76b38
JH
2431bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2432{
2433 struct smp_ltk *k;
4ba9faf3 2434 struct smp_irk *irk;
55e76b38
JH
2435 u8 addr_type;
2436
2437 if (type == BDADDR_BREDR) {
2438 if (hci_find_link_key(hdev, bdaddr))
2439 return true;
2440 return false;
2441 }
2442
2443 /* Convert to HCI addr type which struct smp_ltk uses */
2444 if (type == BDADDR_LE_PUBLIC)
2445 addr_type = ADDR_LE_DEV_PUBLIC;
2446 else
2447 addr_type = ADDR_LE_DEV_RANDOM;
2448
4ba9faf3
JH
2449 irk = hci_get_irk(hdev, bdaddr, addr_type);
2450 if (irk) {
2451 bdaddr = &irk->bdaddr;
2452 addr_type = irk->addr_type;
2453 }
2454
55e76b38
JH
2455 rcu_read_lock();
2456 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2457 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2458 rcu_read_unlock();
55e76b38 2459 return true;
87c8b28d 2460 }
55e76b38
JH
2461 }
2462 rcu_read_unlock();
2463
2464 return false;
2465}
2466
6bd32326 2467/* HCI command timer function */
65cc2b49 2468static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2469{
65cc2b49
MH
2470 struct hci_dev *hdev = container_of(work, struct hci_dev,
2471 cmd_timer.work);
6bd32326 2472
bda4f23a
AE
2473 if (hdev->sent_cmd) {
2474 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2475 u16 opcode = __le16_to_cpu(sent->opcode);
2476
2477 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2478 } else {
2479 BT_ERR("%s command tx timeout", hdev->name);
2480 }
2481
6bd32326 2482 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2483 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2484}
2485
2763eda6 2486struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2487 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2488{
2489 struct oob_data *data;
2490
6928a924
JH
2491 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2492 if (bacmp(bdaddr, &data->bdaddr) != 0)
2493 continue;
2494 if (data->bdaddr_type != bdaddr_type)
2495 continue;
2496 return data;
2497 }
2763eda6
SJ
2498
2499 return NULL;
2500}
2501
6928a924
JH
2502int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2503 u8 bdaddr_type)
2763eda6
SJ
2504{
2505 struct oob_data *data;
2506
6928a924 2507 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2508 if (!data)
2509 return -ENOENT;
2510
6928a924 2511 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2512
2513 list_del(&data->list);
2514 kfree(data);
2515
2516 return 0;
2517}
2518
35f7498a 2519void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2520{
2521 struct oob_data *data, *n;
2522
2523 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2524 list_del(&data->list);
2525 kfree(data);
2526 }
2763eda6
SJ
2527}
2528
0798872e 2529int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2530 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2531 u8 *hash256, u8 *rand256)
2763eda6
SJ
2532{
2533 struct oob_data *data;
2534
6928a924 2535 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2536 if (!data) {
0a14ab41 2537 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2538 if (!data)
2539 return -ENOMEM;
2540
2541 bacpy(&data->bdaddr, bdaddr);
6928a924 2542 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2543 list_add(&data->list, &hdev->remote_oob_data);
2544 }
2545
81328d5c
JH
2546 if (hash192 && rand192) {
2547 memcpy(data->hash192, hash192, sizeof(data->hash192));
2548 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2549 if (hash256 && rand256)
2550 data->present = 0x03;
81328d5c
JH
2551 } else {
2552 memset(data->hash192, 0, sizeof(data->hash192));
2553 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2554 if (hash256 && rand256)
2555 data->present = 0x02;
2556 else
2557 data->present = 0x00;
0798872e
MH
2558 }
2559
81328d5c
JH
2560 if (hash256 && rand256) {
2561 memcpy(data->hash256, hash256, sizeof(data->hash256));
2562 memcpy(data->rand256, rand256, sizeof(data->rand256));
2563 } else {
2564 memset(data->hash256, 0, sizeof(data->hash256));
2565 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2566 if (hash192 && rand192)
2567 data->present = 0x01;
81328d5c 2568 }
0798872e 2569
6ed93dc6 2570 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2571
2572 return 0;
2573}
2574
d2609b34
FG
2575/* This function requires the caller holds hdev->lock */
2576struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2577{
2578 struct adv_info *adv_instance;
2579
2580 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2581 if (adv_instance->instance == instance)
2582 return adv_instance;
2583 }
2584
2585 return NULL;
2586}
2587
2588/* This function requires the caller holds hdev->lock */
2589struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2590 struct adv_info *cur_instance;
2591
2592 cur_instance = hci_find_adv_instance(hdev, instance);
2593 if (!cur_instance)
2594 return NULL;
2595
2596 if (cur_instance == list_last_entry(&hdev->adv_instances,
2597 struct adv_info, list))
2598 return list_first_entry(&hdev->adv_instances,
2599 struct adv_info, list);
2600 else
2601 return list_next_entry(cur_instance, list);
2602}
2603
2604/* This function requires the caller holds hdev->lock */
2605int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2606{
2607 struct adv_info *adv_instance;
2608
2609 adv_instance = hci_find_adv_instance(hdev, instance);
2610 if (!adv_instance)
2611 return -ENOENT;
2612
2613 BT_DBG("%s removing %dMR", hdev->name, instance);
2614
5d900e46
FG
2615 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2616 cancel_delayed_work(&hdev->adv_instance_expire);
2617 hdev->adv_instance_timeout = 0;
2618 }
2619
d2609b34
FG
2620 list_del(&adv_instance->list);
2621 kfree(adv_instance);
2622
2623 hdev->adv_instance_cnt--;
2624
2625 return 0;
2626}
2627
2628/* This function requires the caller holds hdev->lock */
2629void hci_adv_instances_clear(struct hci_dev *hdev)
2630{
2631 struct adv_info *adv_instance, *n;
2632
5d900e46
FG
2633 if (hdev->adv_instance_timeout) {
2634 cancel_delayed_work(&hdev->adv_instance_expire);
2635 hdev->adv_instance_timeout = 0;
2636 }
2637
d2609b34
FG
2638 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2639 list_del(&adv_instance->list);
2640 kfree(adv_instance);
2641 }
2642
2643 hdev->adv_instance_cnt = 0;
2644}
2645
2646/* This function requires the caller holds hdev->lock */
2647int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2648 u16 adv_data_len, u8 *adv_data,
2649 u16 scan_rsp_len, u8 *scan_rsp_data,
2650 u16 timeout, u16 duration)
2651{
2652 struct adv_info *adv_instance;
2653
2654 adv_instance = hci_find_adv_instance(hdev, instance);
2655 if (adv_instance) {
2656 memset(adv_instance->adv_data, 0,
2657 sizeof(adv_instance->adv_data));
2658 memset(adv_instance->scan_rsp_data, 0,
2659 sizeof(adv_instance->scan_rsp_data));
2660 } else {
2661 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2662 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2663 return -EOVERFLOW;
2664
39ecfad6 2665 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2666 if (!adv_instance)
2667 return -ENOMEM;
2668
fffd38bc 2669 adv_instance->pending = true;
d2609b34
FG
2670 adv_instance->instance = instance;
2671 list_add(&adv_instance->list, &hdev->adv_instances);
2672 hdev->adv_instance_cnt++;
2673 }
2674
2675 adv_instance->flags = flags;
2676 adv_instance->adv_data_len = adv_data_len;
2677 adv_instance->scan_rsp_len = scan_rsp_len;
2678
2679 if (adv_data_len)
2680 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2681
2682 if (scan_rsp_len)
2683 memcpy(adv_instance->scan_rsp_data,
2684 scan_rsp_data, scan_rsp_len);
2685
2686 adv_instance->timeout = timeout;
5d900e46 2687 adv_instance->remaining_time = timeout;
d2609b34
FG
2688
2689 if (duration == 0)
2690 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2691 else
2692 adv_instance->duration = duration;
2693
2694 BT_DBG("%s for %dMR", hdev->name, instance);
2695
2696 return 0;
2697}
2698
dcc36c16 2699struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2700 bdaddr_t *bdaddr, u8 type)
b2a66aad 2701{
8035ded4 2702 struct bdaddr_list *b;
b2a66aad 2703
dcc36c16 2704 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2705 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2706 return b;
b9ee0a78 2707 }
b2a66aad
AJ
2708
2709 return NULL;
2710}
2711
dcc36c16 2712void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2713{
2714 struct list_head *p, *n;
2715
dcc36c16 2716 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2717 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2718
2719 list_del(p);
2720 kfree(b);
2721 }
b2a66aad
AJ
2722}
2723
dcc36c16 2724int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2725{
2726 struct bdaddr_list *entry;
b2a66aad 2727
b9ee0a78 2728 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2729 return -EBADF;
2730
dcc36c16 2731 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2732 return -EEXIST;
b2a66aad 2733
27f70f3e 2734 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2735 if (!entry)
2736 return -ENOMEM;
b2a66aad
AJ
2737
2738 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2739 entry->bdaddr_type = type;
b2a66aad 2740
dcc36c16 2741 list_add(&entry->list, list);
b2a66aad 2742
2a8357f2 2743 return 0;
b2a66aad
AJ
2744}
2745
dcc36c16 2746int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2747{
2748 struct bdaddr_list *entry;
b2a66aad 2749
35f7498a 2750 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2751 hci_bdaddr_list_clear(list);
35f7498a
JH
2752 return 0;
2753 }
b2a66aad 2754
dcc36c16 2755 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2756 if (!entry)
2757 return -ENOENT;
2758
2759 list_del(&entry->list);
2760 kfree(entry);
2761
2762 return 0;
2763}
2764
15819a70
AG
2765/* This function requires the caller holds hdev->lock */
2766struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2767 bdaddr_t *addr, u8 addr_type)
2768{
2769 struct hci_conn_params *params;
2770
2771 list_for_each_entry(params, &hdev->le_conn_params, list) {
2772 if (bacmp(&params->addr, addr) == 0 &&
2773 params->addr_type == addr_type) {
2774 return params;
2775 }
2776 }
2777
2778 return NULL;
2779}
2780
4b10966f 2781/* This function requires the caller holds hdev->lock */
501f8827
JH
2782struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2783 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2784{
912b42ef 2785 struct hci_conn_params *param;
a9b0a04c 2786
501f8827 2787 list_for_each_entry(param, list, action) {
912b42ef
JH
2788 if (bacmp(&param->addr, addr) == 0 &&
2789 param->addr_type == addr_type)
2790 return param;
4b10966f
MH
2791 }
2792
2793 return NULL;
a9b0a04c
AG
2794}
2795
15819a70 2796/* This function requires the caller holds hdev->lock */
51d167c0
MH
2797struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2798 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2799{
2800 struct hci_conn_params *params;
2801
2802 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2803 if (params)
51d167c0 2804 return params;
15819a70
AG
2805
2806 params = kzalloc(sizeof(*params), GFP_KERNEL);
2807 if (!params) {
2808 BT_ERR("Out of memory");
51d167c0 2809 return NULL;
15819a70
AG
2810 }
2811
2812 bacpy(&params->addr, addr);
2813 params->addr_type = addr_type;
cef952ce
AG
2814
2815 list_add(&params->list, &hdev->le_conn_params);
93450c75 2816 INIT_LIST_HEAD(&params->action);
cef952ce 2817
bf5b3c8b
MH
2818 params->conn_min_interval = hdev->le_conn_min_interval;
2819 params->conn_max_interval = hdev->le_conn_max_interval;
2820 params->conn_latency = hdev->le_conn_latency;
2821 params->supervision_timeout = hdev->le_supv_timeout;
2822 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2823
2824 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2825
51d167c0 2826 return params;
bf5b3c8b
MH
2827}
2828
f6c63249 2829static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2830{
f8aaf9b6 2831 if (params->conn) {
f161dd41 2832 hci_conn_drop(params->conn);
f8aaf9b6
JH
2833 hci_conn_put(params->conn);
2834 }
f161dd41 2835
95305baa 2836 list_del(&params->action);
15819a70
AG
2837 list_del(&params->list);
2838 kfree(params);
f6c63249
JH
2839}
2840
2841/* This function requires the caller holds hdev->lock */
2842void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2843{
2844 struct hci_conn_params *params;
2845
2846 params = hci_conn_params_lookup(hdev, addr, addr_type);
2847 if (!params)
2848 return;
2849
2850 hci_conn_params_free(params);
15819a70 2851
95305baa
JH
2852 hci_update_background_scan(hdev);
2853
15819a70
AG
2854 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2855}
2856
2857/* This function requires the caller holds hdev->lock */
55af49a8 2858void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2859{
2860 struct hci_conn_params *params, *tmp;
2861
2862 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2863 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2864 continue;
f75113a2
JP
2865
2866 /* If trying to estabilish one time connection to disabled
2867 * device, leave the params, but mark them as just once.
2868 */
2869 if (params->explicit_connect) {
2870 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2871 continue;
2872 }
2873
15819a70
AG
2874 list_del(&params->list);
2875 kfree(params);
2876 }
2877
55af49a8 2878 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2879}
2880
2881/* This function requires the caller holds hdev->lock */
030e7f81 2882static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2883{
15819a70 2884 struct hci_conn_params *params, *tmp;
77a77a30 2885
f6c63249
JH
2886 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2887 hci_conn_params_free(params);
77a77a30 2888
15819a70 2889 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2890}
2891
1904a853 2892static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2893{
4c87eaab
AG
2894 if (status) {
2895 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2896
4c87eaab
AG
2897 hci_dev_lock(hdev);
2898 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2899 hci_dev_unlock(hdev);
2900 return;
2901 }
7ba8b4be
AG
2902}
2903
1904a853
MH
2904static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2905 u16 opcode)
7ba8b4be 2906{
4c87eaab
AG
2907 /* General inquiry access code (GIAC) */
2908 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2909 struct hci_cp_inquiry cp;
7ba8b4be
AG
2910 int err;
2911
4c87eaab
AG
2912 if (status) {
2913 BT_ERR("Failed to disable LE scanning: status %d", status);
2914 return;
2915 }
7ba8b4be 2916
2d28cfe7
JP
2917 hdev->discovery.scan_start = 0;
2918
4c87eaab
AG
2919 switch (hdev->discovery.type) {
2920 case DISCOV_TYPE_LE:
2921 hci_dev_lock(hdev);
2922 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2923 hci_dev_unlock(hdev);
2924 break;
7ba8b4be 2925
4c87eaab 2926 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 2927 hci_dev_lock(hdev);
7dbfac1d 2928
07d2334a
JP
2929 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2930 &hdev->quirks)) {
2931 /* If we were running LE only scan, change discovery
2932 * state. If we were running both LE and BR/EDR inquiry
2933 * simultaneously, and BR/EDR inquiry is already
2934 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
2935 * will stop discovery when finished. If we will resolve
2936 * remote device name, do not change discovery state.
07d2334a 2937 */
177d0506
WK
2938 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2939 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
2940 hci_discovery_set_state(hdev,
2941 DISCOVERY_STOPPED);
2942 } else {
baf880a9
JH
2943 struct hci_request req;
2944
07d2334a
JP
2945 hci_inquiry_cache_flush(hdev);
2946
baf880a9
JH
2947 hci_req_init(&req, hdev);
2948
2949 memset(&cp, 0, sizeof(cp));
2950 memcpy(&cp.lap, lap, sizeof(cp.lap));
2951 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2952 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2953
07d2334a
JP
2954 err = hci_req_run(&req, inquiry_complete);
2955 if (err) {
2956 BT_ERR("Inquiry request failed: err %d", err);
2957 hci_discovery_set_state(hdev,
2958 DISCOVERY_STOPPED);
2959 }
4c87eaab 2960 }
7dbfac1d 2961
4c87eaab
AG
2962 hci_dev_unlock(hdev);
2963 break;
7dbfac1d 2964 }
7dbfac1d
AG
2965}
2966
7ba8b4be
AG
2967static void le_scan_disable_work(struct work_struct *work)
2968{
2969 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2970 le_scan_disable.work);
4c87eaab
AG
2971 struct hci_request req;
2972 int err;
7ba8b4be
AG
2973
2974 BT_DBG("%s", hdev->name);
2975
2d28cfe7
JP
2976 cancel_delayed_work_sync(&hdev->le_scan_restart);
2977
4c87eaab 2978 hci_req_init(&req, hdev);
28b75a89 2979
b1efcc28 2980 hci_req_add_le_scan_disable(&req);
28b75a89 2981
4c87eaab
AG
2982 err = hci_req_run(&req, le_scan_disable_work_complete);
2983 if (err)
2984 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2985}
2986
2d28cfe7
JP
2987static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2988 u16 opcode)
2989{
2990 unsigned long timeout, duration, scan_start, now;
2991
2992 BT_DBG("%s", hdev->name);
2993
2994 if (status) {
2995 BT_ERR("Failed to restart LE scan: status %d", status);
2996 return;
2997 }
2998
2999 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3000 !hdev->discovery.scan_start)
3001 return;
3002
3003 /* When the scan was started, hdev->le_scan_disable has been queued
3004 * after duration from scan_start. During scan restart this job
3005 * has been canceled, and we need to queue it again after proper
3006 * timeout, to make sure that scan does not run indefinitely.
3007 */
3008 duration = hdev->discovery.scan_duration;
3009 scan_start = hdev->discovery.scan_start;
3010 now = jiffies;
3011 if (now - scan_start <= duration) {
3012 int elapsed;
3013
3014 if (now >= scan_start)
3015 elapsed = now - scan_start;
3016 else
3017 elapsed = ULONG_MAX - scan_start + now;
3018
3019 timeout = duration - elapsed;
3020 } else {
3021 timeout = 0;
3022 }
3023 queue_delayed_work(hdev->workqueue,
3024 &hdev->le_scan_disable, timeout);
3025}
3026
3027static void le_scan_restart_work(struct work_struct *work)
3028{
3029 struct hci_dev *hdev = container_of(work, struct hci_dev,
3030 le_scan_restart.work);
3031 struct hci_request req;
3032 struct hci_cp_le_set_scan_enable cp;
3033 int err;
3034
3035 BT_DBG("%s", hdev->name);
3036
3037 /* If controller is not scanning we are done. */
d7a5a11d 3038 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3039 return;
3040
3041 hci_req_init(&req, hdev);
3042
3043 hci_req_add_le_scan_disable(&req);
3044
3045 memset(&cp, 0, sizeof(cp));
3046 cp.enable = LE_SCAN_ENABLE;
3047 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3048 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3049
3050 err = hci_req_run(&req, le_scan_restart_work_complete);
3051 if (err)
3052 BT_ERR("Restart LE scan request failed: err %d", err);
3053}
3054
a1f4c318
JH
3055/* Copy the Identity Address of the controller.
3056 *
3057 * If the controller has a public BD_ADDR, then by default use that one.
3058 * If this is a LE only controller without a public address, default to
3059 * the static random address.
3060 *
3061 * For debugging purposes it is possible to force controllers with a
3062 * public address to use the static random address instead.
50b5b952
MH
3063 *
3064 * In case BR/EDR has been disabled on a dual-mode controller and
3065 * userspace has configured a static address, then that address
3066 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3067 */
3068void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3069 u8 *bdaddr_type)
3070{
b7cb93e5 3071 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3072 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3073 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3074 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3075 bacpy(bdaddr, &hdev->static_addr);
3076 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3077 } else {
3078 bacpy(bdaddr, &hdev->bdaddr);
3079 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3080 }
3081}
3082
9be0dab7
DH
3083/* Alloc HCI device */
3084struct hci_dev *hci_alloc_dev(void)
3085{
3086 struct hci_dev *hdev;
3087
27f70f3e 3088 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3089 if (!hdev)
3090 return NULL;
3091
b1b813d4
DH
3092 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3093 hdev->esco_type = (ESCO_HV1);
3094 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3095 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3096 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3097 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3098 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3099 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3100 hdev->adv_instance_cnt = 0;
3101 hdev->cur_adv_instance = 0x00;
5d900e46 3102 hdev->adv_instance_timeout = 0;
b1b813d4 3103
b1b813d4
DH
3104 hdev->sniff_max_interval = 800;
3105 hdev->sniff_min_interval = 80;
3106
3f959d46 3107 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3108 hdev->le_adv_min_interval = 0x0800;
3109 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3110 hdev->le_scan_interval = 0x0060;
3111 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3112 hdev->le_conn_min_interval = 0x0028;
3113 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3114 hdev->le_conn_latency = 0x0000;
3115 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3116 hdev->le_def_tx_len = 0x001b;
3117 hdev->le_def_tx_time = 0x0148;
3118 hdev->le_max_tx_len = 0x001b;
3119 hdev->le_max_tx_time = 0x0148;
3120 hdev->le_max_rx_len = 0x001b;
3121 hdev->le_max_rx_time = 0x0148;
bef64738 3122
d6bfd59c 3123 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3124 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3125 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3126 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3127
b1b813d4
DH
3128 mutex_init(&hdev->lock);
3129 mutex_init(&hdev->req_lock);
3130
3131 INIT_LIST_HEAD(&hdev->mgmt_pending);
3132 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3133 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3134 INIT_LIST_HEAD(&hdev->uuids);
3135 INIT_LIST_HEAD(&hdev->link_keys);
3136 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3137 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3138 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3139 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3140 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3141 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3142 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3143 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3144 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3145
3146 INIT_WORK(&hdev->rx_work, hci_rx_work);
3147 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3148 INIT_WORK(&hdev->tx_work, hci_tx_work);
3149 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3150 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3151
b1b813d4
DH
3152 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3153 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3154 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3155 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3156 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3157
b1b813d4
DH
3158 skb_queue_head_init(&hdev->rx_q);
3159 skb_queue_head_init(&hdev->cmd_q);
3160 skb_queue_head_init(&hdev->raw_q);
3161
3162 init_waitqueue_head(&hdev->req_wait_q);
3163
65cc2b49 3164 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3165
5fc16cc4
JH
3166 hci_request_setup(hdev);
3167
b1b813d4
DH
3168 hci_init_sysfs(hdev);
3169 discovery_init(hdev);
9be0dab7
DH
3170
3171 return hdev;
3172}
3173EXPORT_SYMBOL(hci_alloc_dev);
3174
3175/* Free HCI device */
3176void hci_free_dev(struct hci_dev *hdev)
3177{
9be0dab7
DH
3178 /* will free via device release */
3179 put_device(&hdev->dev);
3180}
3181EXPORT_SYMBOL(hci_free_dev);
3182
1da177e4
LT
3183/* Register HCI device */
3184int hci_register_dev(struct hci_dev *hdev)
3185{
b1b813d4 3186 int id, error;
1da177e4 3187
74292d5a 3188 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3189 return -EINVAL;
3190
08add513
MM
3191 /* Do not allow HCI_AMP devices to register at index 0,
3192 * so the index can be used as the AMP controller ID.
3193 */
3df92b31
SL
3194 switch (hdev->dev_type) {
3195 case HCI_BREDR:
3196 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3197 break;
3198 case HCI_AMP:
3199 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3200 break;
3201 default:
3202 return -EINVAL;
1da177e4 3203 }
8e87d142 3204
3df92b31
SL
3205 if (id < 0)
3206 return id;
3207
1da177e4
LT
3208 sprintf(hdev->name, "hci%d", id);
3209 hdev->id = id;
2d8b3a11
AE
3210
3211 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3212
d8537548
KC
3213 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3214 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3215 if (!hdev->workqueue) {
3216 error = -ENOMEM;
3217 goto err;
3218 }
f48fd9c8 3219
d8537548
KC
3220 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3221 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3222 if (!hdev->req_workqueue) {
3223 destroy_workqueue(hdev->workqueue);
3224 error = -ENOMEM;
3225 goto err;
3226 }
3227
0153e2ec
MH
3228 if (!IS_ERR_OR_NULL(bt_debugfs))
3229 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3230
bdc3e0f1
MH
3231 dev_set_name(&hdev->dev, "%s", hdev->name);
3232
3233 error = device_add(&hdev->dev);
33ca954d 3234 if (error < 0)
54506918 3235 goto err_wqueue;
1da177e4 3236
611b30f7 3237 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3238 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3239 hdev);
611b30f7
MH
3240 if (hdev->rfkill) {
3241 if (rfkill_register(hdev->rfkill) < 0) {
3242 rfkill_destroy(hdev->rfkill);
3243 hdev->rfkill = NULL;
3244 }
3245 }
3246
5e130367 3247 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3248 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3249
a1536da2
MH
3250 hci_dev_set_flag(hdev, HCI_SETUP);
3251 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3252
01cd3404 3253 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3254 /* Assume BR/EDR support until proven otherwise (such as
3255 * through reading supported features during init.
3256 */
a1536da2 3257 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3258 }
ce2be9ac 3259
fcee3377
GP
3260 write_lock(&hci_dev_list_lock);
3261 list_add(&hdev->list, &hci_dev_list);
3262 write_unlock(&hci_dev_list_lock);
3263
4a964404
MH
3264 /* Devices that are marked for raw-only usage are unconfigured
3265 * and should not be included in normal operation.
fee746b0
MH
3266 */
3267 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3268 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3269
05fcd4c4 3270 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3271 hci_dev_hold(hdev);
1da177e4 3272
19202573 3273 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3274
1da177e4 3275 return id;
f48fd9c8 3276
33ca954d
DH
3277err_wqueue:
3278 destroy_workqueue(hdev->workqueue);
6ead1bbc 3279 destroy_workqueue(hdev->req_workqueue);
33ca954d 3280err:
3df92b31 3281 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3282
33ca954d 3283 return error;
1da177e4
LT
3284}
3285EXPORT_SYMBOL(hci_register_dev);
3286
3287/* Unregister HCI device */
59735631 3288void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3289{
2d7cc19e 3290 int id;
ef222013 3291
c13854ce 3292 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3293
a1536da2 3294 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3295
3df92b31
SL
3296 id = hdev->id;
3297
f20d09d5 3298 write_lock(&hci_dev_list_lock);
1da177e4 3299 list_del(&hdev->list);
f20d09d5 3300 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3301
3302 hci_dev_do_close(hdev);
3303
b9b5ef18
GP
3304 cancel_work_sync(&hdev->power_on);
3305
ab81cbf9 3306 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3307 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3308 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3309 hci_dev_lock(hdev);
744cf19e 3310 mgmt_index_removed(hdev);
09fd0de5 3311 hci_dev_unlock(hdev);
56e5cb86 3312 }
ab81cbf9 3313
2e58ef3e
JH
3314 /* mgmt_index_removed should take care of emptying the
3315 * pending list */
3316 BUG_ON(!list_empty(&hdev->mgmt_pending));
3317
05fcd4c4 3318 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3319
611b30f7
MH
3320 if (hdev->rfkill) {
3321 rfkill_unregister(hdev->rfkill);
3322 rfkill_destroy(hdev->rfkill);
3323 }
3324
bdc3e0f1 3325 device_del(&hdev->dev);
147e2d59 3326
0153e2ec
MH
3327 debugfs_remove_recursive(hdev->debugfs);
3328
f48fd9c8 3329 destroy_workqueue(hdev->workqueue);
6ead1bbc 3330 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3331
09fd0de5 3332 hci_dev_lock(hdev);
dcc36c16 3333 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3334 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3335 hci_uuids_clear(hdev);
55ed8ca1 3336 hci_link_keys_clear(hdev);
b899efaf 3337 hci_smp_ltks_clear(hdev);
970c4e46 3338 hci_smp_irks_clear(hdev);
2763eda6 3339 hci_remote_oob_data_clear(hdev);
d2609b34 3340 hci_adv_instances_clear(hdev);
dcc36c16 3341 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3342 hci_conn_params_clear_all(hdev);
22078800 3343 hci_discovery_filter_clear(hdev);
09fd0de5 3344 hci_dev_unlock(hdev);
e2e0cacb 3345
dc946bd8 3346 hci_dev_put(hdev);
3df92b31
SL
3347
3348 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3349}
3350EXPORT_SYMBOL(hci_unregister_dev);
3351
3352/* Suspend HCI device */
3353int hci_suspend_dev(struct hci_dev *hdev)
3354{
05fcd4c4 3355 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3356 return 0;
3357}
3358EXPORT_SYMBOL(hci_suspend_dev);
3359
3360/* Resume HCI device */
3361int hci_resume_dev(struct hci_dev *hdev)
3362{
05fcd4c4 3363 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3364 return 0;
3365}
3366EXPORT_SYMBOL(hci_resume_dev);
3367
75e0569f
MH
3368/* Reset HCI device */
3369int hci_reset_dev(struct hci_dev *hdev)
3370{
3371 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3372 struct sk_buff *skb;
3373
3374 skb = bt_skb_alloc(3, GFP_ATOMIC);
3375 if (!skb)
3376 return -ENOMEM;
3377
d79f34e3 3378 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
75e0569f
MH
3379 memcpy(skb_put(skb, 3), hw_err, 3);
3380
3381 /* Send Hardware Error to upper stack */
3382 return hci_recv_frame(hdev, skb);
3383}
3384EXPORT_SYMBOL(hci_reset_dev);
3385
76bca880 3386/* Receive frame from HCI drivers */
e1a26170 3387int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3388{
76bca880 3389 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3390 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3391 kfree_skb(skb);
3392 return -ENXIO;
3393 }
3394
d79f34e3
MH
3395 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3396 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3397 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
fe806dce
MH
3398 kfree_skb(skb);
3399 return -EINVAL;
3400 }
3401
d82603c6 3402 /* Incoming skb */
76bca880
MH
3403 bt_cb(skb)->incoming = 1;
3404
3405 /* Time stamp */
3406 __net_timestamp(skb);
3407
76bca880 3408 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3409 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3410
76bca880
MH
3411 return 0;
3412}
3413EXPORT_SYMBOL(hci_recv_frame);
3414
e875ff84
MH
3415/* Receive diagnostic message from HCI drivers */
3416int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3417{
581d6fd6 3418 /* Mark as diagnostic packet */
d79f34e3 3419 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3420
e875ff84
MH
3421 /* Time stamp */
3422 __net_timestamp(skb);
3423
581d6fd6
MH
3424 skb_queue_tail(&hdev->rx_q, skb);
3425 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3426
e875ff84
MH
3427 return 0;
3428}
3429EXPORT_SYMBOL(hci_recv_diag);
3430
1da177e4
LT
3431/* ---- Interface to upper protocols ---- */
3432
1da177e4
LT
3433int hci_register_cb(struct hci_cb *cb)
3434{
3435 BT_DBG("%p name %s", cb, cb->name);
3436
fba7ecf0 3437 mutex_lock(&hci_cb_list_lock);
00629e0f 3438 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3439 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3440
3441 return 0;
3442}
3443EXPORT_SYMBOL(hci_register_cb);
3444
3445int hci_unregister_cb(struct hci_cb *cb)
3446{
3447 BT_DBG("%p name %s", cb, cb->name);
3448
fba7ecf0 3449 mutex_lock(&hci_cb_list_lock);
1da177e4 3450 list_del(&cb->list);
fba7ecf0 3451 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3452
3453 return 0;
3454}
3455EXPORT_SYMBOL(hci_unregister_cb);
3456
51086991 3457static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3458{
cdc52faa
MH
3459 int err;
3460
d79f34e3
MH
3461 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3462 skb->len);
1da177e4 3463
cd82e61c
MH
3464 /* Time stamp */
3465 __net_timestamp(skb);
1da177e4 3466
cd82e61c
MH
3467 /* Send copy to monitor */
3468 hci_send_to_monitor(hdev, skb);
3469
3470 if (atomic_read(&hdev->promisc)) {
3471 /* Send copy to the sockets */
470fe1b5 3472 hci_send_to_sock(hdev, skb);
1da177e4
LT
3473 }
3474
3475 /* Get rid of skb owner, prior to sending to the driver. */
3476 skb_orphan(skb);
3477
73d0d3c8
MH
3478 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3479 kfree_skb(skb);
3480 return;
3481 }
3482
cdc52faa
MH
3483 err = hdev->send(hdev, skb);
3484 if (err < 0) {
3485 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3486 kfree_skb(skb);
3487 }
1da177e4
LT
3488}
3489
1ca3a9d0 3490/* Send HCI command */
07dc93dd
JH
3491int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3492 const void *param)
1ca3a9d0
JH
3493{
3494 struct sk_buff *skb;
3495
3496 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3497
3498 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3499 if (!skb) {
3500 BT_ERR("%s no memory for command", hdev->name);
3501 return -ENOMEM;
3502 }
3503
49c922bb 3504 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3505 * single-command requests.
3506 */
44d27137 3507 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3508
1da177e4 3509 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3510 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3511
3512 return 0;
3513}
1da177e4
LT
3514
3515/* Get data from the previously sent command */
a9de9248 3516void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3517{
3518 struct hci_command_hdr *hdr;
3519
3520 if (!hdev->sent_cmd)
3521 return NULL;
3522
3523 hdr = (void *) hdev->sent_cmd->data;
3524
a9de9248 3525 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3526 return NULL;
3527
f0e09510 3528 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3529
3530 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3531}
3532
fbef168f
LP
3533/* Send HCI command and wait for command commplete event */
3534struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3535 const void *param, u32 timeout)
3536{
3537 struct sk_buff *skb;
3538
3539 if (!test_bit(HCI_UP, &hdev->flags))
3540 return ERR_PTR(-ENETDOWN);
3541
3542 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3543
b504430c 3544 hci_req_sync_lock(hdev);
fbef168f 3545 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 3546 hci_req_sync_unlock(hdev);
fbef168f
LP
3547
3548 return skb;
3549}
3550EXPORT_SYMBOL(hci_cmd_sync);
3551
1da177e4
LT
3552/* Send ACL data */
3553static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3554{
3555 struct hci_acl_hdr *hdr;
3556 int len = skb->len;
3557
badff6d0
ACM
3558 skb_push(skb, HCI_ACL_HDR_SIZE);
3559 skb_reset_transport_header(skb);
9c70220b 3560 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3561 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3562 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3563}
3564
ee22be7e 3565static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3566 struct sk_buff *skb, __u16 flags)
1da177e4 3567{
ee22be7e 3568 struct hci_conn *conn = chan->conn;
1da177e4
LT
3569 struct hci_dev *hdev = conn->hdev;
3570 struct sk_buff *list;
3571
087bfd99
GP
3572 skb->len = skb_headlen(skb);
3573 skb->data_len = 0;
3574
d79f34e3 3575 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
3576
3577 switch (hdev->dev_type) {
3578 case HCI_BREDR:
3579 hci_add_acl_hdr(skb, conn->handle, flags);
3580 break;
3581 case HCI_AMP:
3582 hci_add_acl_hdr(skb, chan->handle, flags);
3583 break;
3584 default:
3585 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3586 return;
3587 }
087bfd99 3588
70f23020
AE
3589 list = skb_shinfo(skb)->frag_list;
3590 if (!list) {
1da177e4
LT
3591 /* Non fragmented */
3592 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3593
73d80deb 3594 skb_queue_tail(queue, skb);
1da177e4
LT
3595 } else {
3596 /* Fragmented */
3597 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3598
3599 skb_shinfo(skb)->frag_list = NULL;
3600
9cfd5a23
JR
3601 /* Queue all fragments atomically. We need to use spin_lock_bh
3602 * here because of 6LoWPAN links, as there this function is
3603 * called from softirq and using normal spin lock could cause
3604 * deadlocks.
3605 */
3606 spin_lock_bh(&queue->lock);
1da177e4 3607
73d80deb 3608 __skb_queue_tail(queue, skb);
e702112f
AE
3609
3610 flags &= ~ACL_START;
3611 flags |= ACL_CONT;
1da177e4
LT
3612 do {
3613 skb = list; list = list->next;
8e87d142 3614
d79f34e3 3615 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3616 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3617
3618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3619
73d80deb 3620 __skb_queue_tail(queue, skb);
1da177e4
LT
3621 } while (list);
3622
9cfd5a23 3623 spin_unlock_bh(&queue->lock);
1da177e4 3624 }
73d80deb
LAD
3625}
3626
3627void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3628{
ee22be7e 3629 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3630
f0e09510 3631 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3632
ee22be7e 3633 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3634
3eff45ea 3635 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3636}
1da177e4
LT
3637
3638/* Send SCO data */
0d861d8b 3639void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3640{
3641 struct hci_dev *hdev = conn->hdev;
3642 struct hci_sco_hdr hdr;
3643
3644 BT_DBG("%s len %d", hdev->name, skb->len);
3645
aca3192c 3646 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3647 hdr.dlen = skb->len;
3648
badff6d0
ACM
3649 skb_push(skb, HCI_SCO_HDR_SIZE);
3650 skb_reset_transport_header(skb);
9c70220b 3651 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3652
d79f34e3 3653 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3654
1da177e4 3655 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3656 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3657}
1da177e4
LT
3658
3659/* ---- HCI TX task (outgoing data) ---- */
3660
3661/* HCI Connection scheduler */
6039aa73
GP
3662static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3663 int *quote)
1da177e4
LT
3664{
3665 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3666 struct hci_conn *conn = NULL, *c;
abc5de8f 3667 unsigned int num = 0, min = ~0;
1da177e4 3668
8e87d142 3669 /* We don't have to lock device here. Connections are always
1da177e4 3670 * added and removed with TX task disabled. */
bf4c6325
GP
3671
3672 rcu_read_lock();
3673
3674 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3675 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3676 continue;
769be974
MH
3677
3678 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3679 continue;
3680
1da177e4
LT
3681 num++;
3682
3683 if (c->sent < min) {
3684 min = c->sent;
3685 conn = c;
3686 }
52087a79
LAD
3687
3688 if (hci_conn_num(hdev, type) == num)
3689 break;
1da177e4
LT
3690 }
3691
bf4c6325
GP
3692 rcu_read_unlock();
3693
1da177e4 3694 if (conn) {
6ed58ec5
VT
3695 int cnt, q;
3696
3697 switch (conn->type) {
3698 case ACL_LINK:
3699 cnt = hdev->acl_cnt;
3700 break;
3701 case SCO_LINK:
3702 case ESCO_LINK:
3703 cnt = hdev->sco_cnt;
3704 break;
3705 case LE_LINK:
3706 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3707 break;
3708 default:
3709 cnt = 0;
3710 BT_ERR("Unknown link type");
3711 }
3712
3713 q = cnt / num;
1da177e4
LT
3714 *quote = q ? q : 1;
3715 } else
3716 *quote = 0;
3717
3718 BT_DBG("conn %p quote %d", conn, *quote);
3719 return conn;
3720}
3721
6039aa73 3722static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3723{
3724 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3725 struct hci_conn *c;
1da177e4 3726
bae1f5d9 3727 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3728
bf4c6325
GP
3729 rcu_read_lock();
3730
1da177e4 3731 /* Kill stalled connections */
bf4c6325 3732 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3733 if (c->type == type && c->sent) {
6ed93dc6
AE
3734 BT_ERR("%s killing stalled connection %pMR",
3735 hdev->name, &c->dst);
bed71748 3736 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3737 }
3738 }
bf4c6325
GP
3739
3740 rcu_read_unlock();
1da177e4
LT
3741}
3742
6039aa73
GP
3743static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3744 int *quote)
1da177e4 3745{
73d80deb
LAD
3746 struct hci_conn_hash *h = &hdev->conn_hash;
3747 struct hci_chan *chan = NULL;
abc5de8f 3748 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3749 struct hci_conn *conn;
73d80deb
LAD
3750 int cnt, q, conn_num = 0;
3751
3752 BT_DBG("%s", hdev->name);
3753
bf4c6325
GP
3754 rcu_read_lock();
3755
3756 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3757 struct hci_chan *tmp;
3758
3759 if (conn->type != type)
3760 continue;
3761
3762 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3763 continue;
3764
3765 conn_num++;
3766
8192edef 3767 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3768 struct sk_buff *skb;
3769
3770 if (skb_queue_empty(&tmp->data_q))
3771 continue;
3772
3773 skb = skb_peek(&tmp->data_q);
3774 if (skb->priority < cur_prio)
3775 continue;
3776
3777 if (skb->priority > cur_prio) {
3778 num = 0;
3779 min = ~0;
3780 cur_prio = skb->priority;
3781 }
3782
3783 num++;
3784
3785 if (conn->sent < min) {
3786 min = conn->sent;
3787 chan = tmp;
3788 }
3789 }
3790
3791 if (hci_conn_num(hdev, type) == conn_num)
3792 break;
3793 }
3794
bf4c6325
GP
3795 rcu_read_unlock();
3796
73d80deb
LAD
3797 if (!chan)
3798 return NULL;
3799
3800 switch (chan->conn->type) {
3801 case ACL_LINK:
3802 cnt = hdev->acl_cnt;
3803 break;
bd1eb66b
AE
3804 case AMP_LINK:
3805 cnt = hdev->block_cnt;
3806 break;
73d80deb
LAD
3807 case SCO_LINK:
3808 case ESCO_LINK:
3809 cnt = hdev->sco_cnt;
3810 break;
3811 case LE_LINK:
3812 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3813 break;
3814 default:
3815 cnt = 0;
3816 BT_ERR("Unknown link type");
3817 }
3818
3819 q = cnt / num;
3820 *quote = q ? q : 1;
3821 BT_DBG("chan %p quote %d", chan, *quote);
3822 return chan;
3823}
3824
02b20f0b
LAD
3825static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3826{
3827 struct hci_conn_hash *h = &hdev->conn_hash;
3828 struct hci_conn *conn;
3829 int num = 0;
3830
3831 BT_DBG("%s", hdev->name);
3832
bf4c6325
GP
3833 rcu_read_lock();
3834
3835 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3836 struct hci_chan *chan;
3837
3838 if (conn->type != type)
3839 continue;
3840
3841 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3842 continue;
3843
3844 num++;
3845
8192edef 3846 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3847 struct sk_buff *skb;
3848
3849 if (chan->sent) {
3850 chan->sent = 0;
3851 continue;
3852 }
3853
3854 if (skb_queue_empty(&chan->data_q))
3855 continue;
3856
3857 skb = skb_peek(&chan->data_q);
3858 if (skb->priority >= HCI_PRIO_MAX - 1)
3859 continue;
3860
3861 skb->priority = HCI_PRIO_MAX - 1;
3862
3863 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3864 skb->priority);
02b20f0b
LAD
3865 }
3866
3867 if (hci_conn_num(hdev, type) == num)
3868 break;
3869 }
bf4c6325
GP
3870
3871 rcu_read_unlock();
3872
02b20f0b
LAD
3873}
3874
b71d385a
AE
3875static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3876{
3877 /* Calculate count of blocks used by this packet */
3878 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3879}
3880
6039aa73 3881static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3882{
d7a5a11d 3883 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3884 /* ACL tx timeout must be longer than maximum
3885 * link supervision timeout (40.9 seconds) */
63d2bc1b 3886 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3887 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3888 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3889 }
63d2bc1b 3890}
1da177e4 3891
6039aa73 3892static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3893{
3894 unsigned int cnt = hdev->acl_cnt;
3895 struct hci_chan *chan;
3896 struct sk_buff *skb;
3897 int quote;
3898
3899 __check_timeout(hdev, cnt);
04837f64 3900
73d80deb 3901 while (hdev->acl_cnt &&
a8c5fb1a 3902 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3903 u32 priority = (skb_peek(&chan->data_q))->priority;
3904 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3905 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3906 skb->len, skb->priority);
73d80deb 3907
ec1cce24
LAD
3908 /* Stop if priority has changed */
3909 if (skb->priority < priority)
3910 break;
3911
3912 skb = skb_dequeue(&chan->data_q);
3913
73d80deb 3914 hci_conn_enter_active_mode(chan->conn,
04124681 3915 bt_cb(skb)->force_active);
04837f64 3916
57d17d70 3917 hci_send_frame(hdev, skb);
1da177e4
LT
3918 hdev->acl_last_tx = jiffies;
3919
3920 hdev->acl_cnt--;
73d80deb
LAD
3921 chan->sent++;
3922 chan->conn->sent++;
1da177e4
LT
3923 }
3924 }
02b20f0b
LAD
3925
3926 if (cnt != hdev->acl_cnt)
3927 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3928}
3929
6039aa73 3930static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3931{
63d2bc1b 3932 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3933 struct hci_chan *chan;
3934 struct sk_buff *skb;
3935 int quote;
bd1eb66b 3936 u8 type;
b71d385a 3937
63d2bc1b 3938 __check_timeout(hdev, cnt);
b71d385a 3939
bd1eb66b
AE
3940 BT_DBG("%s", hdev->name);
3941
3942 if (hdev->dev_type == HCI_AMP)
3943 type = AMP_LINK;
3944 else
3945 type = ACL_LINK;
3946
b71d385a 3947 while (hdev->block_cnt > 0 &&
bd1eb66b 3948 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3949 u32 priority = (skb_peek(&chan->data_q))->priority;
3950 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3951 int blocks;
3952
3953 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3954 skb->len, skb->priority);
b71d385a
AE
3955
3956 /* Stop if priority has changed */
3957 if (skb->priority < priority)
3958 break;
3959
3960 skb = skb_dequeue(&chan->data_q);
3961
3962 blocks = __get_blocks(hdev, skb);
3963 if (blocks > hdev->block_cnt)
3964 return;
3965
3966 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3967 bt_cb(skb)->force_active);
b71d385a 3968
57d17d70 3969 hci_send_frame(hdev, skb);
b71d385a
AE
3970 hdev->acl_last_tx = jiffies;
3971
3972 hdev->block_cnt -= blocks;
3973 quote -= blocks;
3974
3975 chan->sent += blocks;
3976 chan->conn->sent += blocks;
3977 }
3978 }
3979
3980 if (cnt != hdev->block_cnt)
bd1eb66b 3981 hci_prio_recalculate(hdev, type);
b71d385a
AE
3982}
3983
6039aa73 3984static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3985{
3986 BT_DBG("%s", hdev->name);
3987
bd1eb66b
AE
3988 /* No ACL link over BR/EDR controller */
3989 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3990 return;
3991
3992 /* No AMP link over AMP controller */
3993 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3994 return;
3995
3996 switch (hdev->flow_ctl_mode) {
3997 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3998 hci_sched_acl_pkt(hdev);
3999 break;
4000
4001 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4002 hci_sched_acl_blk(hdev);
4003 break;
4004 }
4005}
4006
1da177e4 4007/* Schedule SCO */
6039aa73 4008static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4009{
4010 struct hci_conn *conn;
4011 struct sk_buff *skb;
4012 int quote;
4013
4014 BT_DBG("%s", hdev->name);
4015
52087a79
LAD
4016 if (!hci_conn_num(hdev, SCO_LINK))
4017 return;
4018
1da177e4
LT
4019 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4020 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4021 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4022 hci_send_frame(hdev, skb);
1da177e4
LT
4023
4024 conn->sent++;
4025 if (conn->sent == ~0)
4026 conn->sent = 0;
4027 }
4028 }
4029}
4030
6039aa73 4031static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4032{
4033 struct hci_conn *conn;
4034 struct sk_buff *skb;
4035 int quote;
4036
4037 BT_DBG("%s", hdev->name);
4038
52087a79
LAD
4039 if (!hci_conn_num(hdev, ESCO_LINK))
4040 return;
4041
8fc9ced3
GP
4042 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4043 &quote))) {
b6a0dc82
MH
4044 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4045 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4046 hci_send_frame(hdev, skb);
b6a0dc82
MH
4047
4048 conn->sent++;
4049 if (conn->sent == ~0)
4050 conn->sent = 0;
4051 }
4052 }
4053}
4054
6039aa73 4055static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4056{
73d80deb 4057 struct hci_chan *chan;
6ed58ec5 4058 struct sk_buff *skb;
02b20f0b 4059 int quote, cnt, tmp;
6ed58ec5
VT
4060
4061 BT_DBG("%s", hdev->name);
4062
52087a79
LAD
4063 if (!hci_conn_num(hdev, LE_LINK))
4064 return;
4065
d7a5a11d 4066 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4067 /* LE tx timeout must be longer than maximum
4068 * link supervision timeout (40.9 seconds) */
bae1f5d9 4069 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4070 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4071 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4072 }
4073
4074 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4075 tmp = cnt;
73d80deb 4076 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4077 u32 priority = (skb_peek(&chan->data_q))->priority;
4078 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4079 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4080 skb->len, skb->priority);
6ed58ec5 4081
ec1cce24
LAD
4082 /* Stop if priority has changed */
4083 if (skb->priority < priority)
4084 break;
4085
4086 skb = skb_dequeue(&chan->data_q);
4087
57d17d70 4088 hci_send_frame(hdev, skb);
6ed58ec5
VT
4089 hdev->le_last_tx = jiffies;
4090
4091 cnt--;
73d80deb
LAD
4092 chan->sent++;
4093 chan->conn->sent++;
6ed58ec5
VT
4094 }
4095 }
73d80deb 4096
6ed58ec5
VT
4097 if (hdev->le_pkts)
4098 hdev->le_cnt = cnt;
4099 else
4100 hdev->acl_cnt = cnt;
02b20f0b
LAD
4101
4102 if (cnt != tmp)
4103 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4104}
4105
3eff45ea 4106static void hci_tx_work(struct work_struct *work)
1da177e4 4107{
3eff45ea 4108 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4109 struct sk_buff *skb;
4110
6ed58ec5 4111 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4112 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4113
d7a5a11d 4114 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4115 /* Schedule queues and send stuff to HCI driver */
4116 hci_sched_acl(hdev);
4117 hci_sched_sco(hdev);
4118 hci_sched_esco(hdev);
4119 hci_sched_le(hdev);
4120 }
6ed58ec5 4121
1da177e4
LT
4122 /* Send next queued raw (unknown type) packet */
4123 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4124 hci_send_frame(hdev, skb);
1da177e4
LT
4125}
4126
25985edc 4127/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4128
4129/* ACL data packet */
6039aa73 4130static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4131{
4132 struct hci_acl_hdr *hdr = (void *) skb->data;
4133 struct hci_conn *conn;
4134 __u16 handle, flags;
4135
4136 skb_pull(skb, HCI_ACL_HDR_SIZE);
4137
4138 handle = __le16_to_cpu(hdr->handle);
4139 flags = hci_flags(handle);
4140 handle = hci_handle(handle);
4141
f0e09510 4142 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4143 handle, flags);
1da177e4
LT
4144
4145 hdev->stat.acl_rx++;
4146
4147 hci_dev_lock(hdev);
4148 conn = hci_conn_hash_lookup_handle(hdev, handle);
4149 hci_dev_unlock(hdev);
8e87d142 4150
1da177e4 4151 if (conn) {
65983fc7 4152 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4153
1da177e4 4154 /* Send to upper protocol */
686ebf28
UF
4155 l2cap_recv_acldata(conn, skb, flags);
4156 return;
1da177e4 4157 } else {
8e87d142 4158 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4159 hdev->name, handle);
1da177e4
LT
4160 }
4161
4162 kfree_skb(skb);
4163}
4164
4165/* SCO data packet */
6039aa73 4166static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4167{
4168 struct hci_sco_hdr *hdr = (void *) skb->data;
4169 struct hci_conn *conn;
4170 __u16 handle;
4171
4172 skb_pull(skb, HCI_SCO_HDR_SIZE);
4173
4174 handle = __le16_to_cpu(hdr->handle);
4175
f0e09510 4176 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4177
4178 hdev->stat.sco_rx++;
4179
4180 hci_dev_lock(hdev);
4181 conn = hci_conn_hash_lookup_handle(hdev, handle);
4182 hci_dev_unlock(hdev);
4183
4184 if (conn) {
1da177e4 4185 /* Send to upper protocol */
686ebf28
UF
4186 sco_recv_scodata(conn, skb);
4187 return;
1da177e4 4188 } else {
8e87d142 4189 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4190 hdev->name, handle);
1da177e4
LT
4191 }
4192
4193 kfree_skb(skb);
4194}
4195
9238f36a
JH
4196static bool hci_req_is_complete(struct hci_dev *hdev)
4197{
4198 struct sk_buff *skb;
4199
4200 skb = skb_peek(&hdev->cmd_q);
4201 if (!skb)
4202 return true;
4203
44d27137 4204 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4205}
4206
42c6b129
JH
4207static void hci_resend_last(struct hci_dev *hdev)
4208{
4209 struct hci_command_hdr *sent;
4210 struct sk_buff *skb;
4211 u16 opcode;
4212
4213 if (!hdev->sent_cmd)
4214 return;
4215
4216 sent = (void *) hdev->sent_cmd->data;
4217 opcode = __le16_to_cpu(sent->opcode);
4218 if (opcode == HCI_OP_RESET)
4219 return;
4220
4221 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4222 if (!skb)
4223 return;
4224
4225 skb_queue_head(&hdev->cmd_q, skb);
4226 queue_work(hdev->workqueue, &hdev->cmd_work);
4227}
4228
e6214487
JH
4229void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4230 hci_req_complete_t *req_complete,
4231 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4232{
9238f36a
JH
4233 struct sk_buff *skb;
4234 unsigned long flags;
4235
4236 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4237
42c6b129
JH
4238 /* If the completed command doesn't match the last one that was
4239 * sent we need to do special handling of it.
9238f36a 4240 */
42c6b129
JH
4241 if (!hci_sent_cmd_data(hdev, opcode)) {
4242 /* Some CSR based controllers generate a spontaneous
4243 * reset complete event during init and any pending
4244 * command will never be completed. In such a case we
4245 * need to resend whatever was the last sent
4246 * command.
4247 */
4248 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4249 hci_resend_last(hdev);
4250
9238f36a 4251 return;
42c6b129 4252 }
9238f36a
JH
4253
4254 /* If the command succeeded and there's still more commands in
4255 * this request the request is not yet complete.
4256 */
4257 if (!status && !hci_req_is_complete(hdev))
4258 return;
4259
4260 /* If this was the last command in a request the complete
4261 * callback would be found in hdev->sent_cmd instead of the
4262 * command queue (hdev->cmd_q).
4263 */
44d27137
JH
4264 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4265 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4266 return;
4267 }
53e21fbc 4268
44d27137
JH
4269 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4270 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4271 return;
9238f36a
JH
4272 }
4273
4274 /* Remove all pending commands belonging to this request */
4275 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4276 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4277 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4278 __skb_queue_head(&hdev->cmd_q, skb);
4279 break;
4280 }
4281
242c0ebd
MH
4282 *req_complete = bt_cb(skb)->hci.req_complete;
4283 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
9238f36a
JH
4284 kfree_skb(skb);
4285 }
4286 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4287}
4288
b78752cc 4289static void hci_rx_work(struct work_struct *work)
1da177e4 4290{
b78752cc 4291 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4292 struct sk_buff *skb;
4293
4294 BT_DBG("%s", hdev->name);
4295
1da177e4 4296 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4297 /* Send copy to monitor */
4298 hci_send_to_monitor(hdev, skb);
4299
1da177e4
LT
4300 if (atomic_read(&hdev->promisc)) {
4301 /* Send copy to the sockets */
470fe1b5 4302 hci_send_to_sock(hdev, skb);
1da177e4
LT
4303 }
4304
d7a5a11d 4305 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4306 kfree_skb(skb);
4307 continue;
4308 }
4309
4310 if (test_bit(HCI_INIT, &hdev->flags)) {
4311 /* Don't process data packets in this states. */
d79f34e3 4312 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4313 case HCI_ACLDATA_PKT:
4314 case HCI_SCODATA_PKT:
4315 kfree_skb(skb);
4316 continue;
3ff50b79 4317 }
1da177e4
LT
4318 }
4319
4320 /* Process frame */
d79f34e3 4321 switch (hci_skb_pkt_type(skb)) {
1da177e4 4322 case HCI_EVENT_PKT:
b78752cc 4323 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4324 hci_event_packet(hdev, skb);
4325 break;
4326
4327 case HCI_ACLDATA_PKT:
4328 BT_DBG("%s ACL data packet", hdev->name);
4329 hci_acldata_packet(hdev, skb);
4330 break;
4331
4332 case HCI_SCODATA_PKT:
4333 BT_DBG("%s SCO data packet", hdev->name);
4334 hci_scodata_packet(hdev, skb);
4335 break;
4336
4337 default:
4338 kfree_skb(skb);
4339 break;
4340 }
4341 }
1da177e4
LT
4342}
4343
c347b765 4344static void hci_cmd_work(struct work_struct *work)
1da177e4 4345{
c347b765 4346 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4347 struct sk_buff *skb;
4348
2104786b
AE
4349 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4350 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4351
1da177e4 4352 /* Send queued commands */
5a08ecce
AE
4353 if (atomic_read(&hdev->cmd_cnt)) {
4354 skb = skb_dequeue(&hdev->cmd_q);
4355 if (!skb)
4356 return;
4357
7585b97a 4358 kfree_skb(hdev->sent_cmd);
1da177e4 4359
a675d7f1 4360 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4361 if (hdev->sent_cmd) {
1da177e4 4362 atomic_dec(&hdev->cmd_cnt);
57d17d70 4363 hci_send_frame(hdev, skb);
7bdb8a5c 4364 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4365 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4366 else
65cc2b49
MH
4367 schedule_delayed_work(&hdev->cmd_timer,
4368 HCI_CMD_TIMEOUT);
1da177e4
LT
4369 } else {
4370 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4371 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4372 }
4373 }
4374}