]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/bluetooth/hci_core.c
treewide: kmalloc() -> kmalloc_array()
[mirror_ubuntu-jammy-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46 42#include "smp.h"
6d5d2ee6 43#include "leds.h"
970c4e46 44
b78752cc 45static void hci_rx_work(struct work_struct *work);
c347b765 46static void hci_cmd_work(struct work_struct *work);
3eff45ea 47static void hci_tx_work(struct work_struct *work);
1da177e4 48
1da177e4
LT
49/* HCI device list */
50LIST_HEAD(hci_dev_list);
51DEFINE_RWLOCK(hci_dev_list_lock);
52
53/* HCI callback list */
54LIST_HEAD(hci_cb_list);
fba7ecf0 55DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 56
3df92b31
SL
57/* HCI ID Numbering */
58static DEFINE_IDA(hci_index_ida);
59
baf27f6e
MH
60/* ---- HCI debugfs entries ---- */
61
4b4148e9
MH
62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
74b93e9f 68 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N';
4b4148e9
MH
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
4b4148e9 79 bool enable;
3bf5e97d 80 int err;
4b4148e9
MH
81
82 if (!test_bit(HCI_UP, &hdev->flags))
83 return -ENETDOWN;
84
3bf5e97d
AS
85 err = kstrtobool_from_user(user_buf, count, &enable);
86 if (err)
87 return err;
4b4148e9 88
b7cb93e5 89 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
90 return -EALREADY;
91
b504430c 92 hci_req_sync_lock(hdev);
4b4148e9
MH
93 if (enable)
94 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
95 HCI_CMD_TIMEOUT);
96 else
97 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
98 HCI_CMD_TIMEOUT);
b504430c 99 hci_req_sync_unlock(hdev);
4b4148e9
MH
100
101 if (IS_ERR(skb))
102 return PTR_ERR(skb);
103
4b4148e9
MH
104 kfree_skb(skb);
105
b7cb93e5 106 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
107
108 return count;
109}
110
111static const struct file_operations dut_mode_fops = {
112 .open = simple_open,
113 .read = dut_mode_read,
114 .write = dut_mode_write,
115 .llseek = default_llseek,
116};
117
4b4113d6
MH
118static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
119 size_t count, loff_t *ppos)
120{
121 struct hci_dev *hdev = file->private_data;
122 char buf[3];
123
74b93e9f 124 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N';
4b4113d6
MH
125 buf[1] = '\n';
126 buf[2] = '\0';
127 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
128}
129
130static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
4b4113d6
MH
134 bool enable;
135 int err;
136
3bf5e97d
AS
137 err = kstrtobool_from_user(user_buf, count, &enable);
138 if (err)
139 return err;
4b4113d6 140
7e995b9e 141 /* When the diagnostic flags are not persistent and the transport
b56c7b25
MH
142 * is not active or in user channel operation, then there is no need
143 * for the vendor callback. Instead just store the desired value and
144 * the setting will be programmed when the controller gets powered on.
7e995b9e
MH
145 */
146 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25
MH
147 (!test_bit(HCI_RUNNING, &hdev->flags) ||
148 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)))
7e995b9e
MH
149 goto done;
150
b504430c 151 hci_req_sync_lock(hdev);
4b4113d6 152 err = hdev->set_diag(hdev, enable);
b504430c 153 hci_req_sync_unlock(hdev);
4b4113d6
MH
154
155 if (err < 0)
156 return err;
157
7e995b9e 158done:
4b4113d6
MH
159 if (enable)
160 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
161 else
162 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
163
164 return count;
165}
166
167static const struct file_operations vendor_diag_fops = {
168 .open = simple_open,
169 .read = vendor_diag_read,
170 .write = vendor_diag_write,
171 .llseek = default_llseek,
172};
173
f640ee98
MH
174static void hci_debugfs_create_basic(struct hci_dev *hdev)
175{
176 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
177 &dut_mode_fops);
178
179 if (hdev->set_diag)
180 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
181 &vendor_diag_fops);
182}
183
a1d01db1 184static int hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 185{
42c6b129 186 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
187
188 /* Reset device */
42c6b129
JH
189 set_bit(HCI_RESET, &req->hdev->flags);
190 hci_req_add(req, HCI_OP_RESET, 0, NULL);
a1d01db1 191 return 0;
1da177e4
LT
192}
193
42c6b129 194static void bredr_init(struct hci_request *req)
1da177e4 195{
42c6b129 196 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 197
1da177e4 198 /* Read Local Supported Features */
42c6b129 199 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 200
1143e5a6 201 /* Read Local Version */
42c6b129 202 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
203
204 /* Read BD Address */
42c6b129 205 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
206}
207
0af801b9 208static void amp_init1(struct hci_request *req)
e61ef499 209{
42c6b129 210 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 211
e61ef499 212 /* Read Local Version */
42c6b129 213 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 214
f6996cfe
MH
215 /* Read Local Supported Commands */
216 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
217
6bcbc489 218 /* Read Local AMP Info */
42c6b129 219 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
220
221 /* Read Data Blk size */
42c6b129 222 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 223
f38ba941
MH
224 /* Read Flow Control Mode */
225 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
226
7528ca1c
MH
227 /* Read Location Data */
228 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
229}
230
a1d01db1 231static int amp_init2(struct hci_request *req)
0af801b9
JH
232{
233 /* Read Local Supported Features. Not all AMP controllers
234 * support this so it's placed conditionally in the second
235 * stage init.
236 */
237 if (req->hdev->commands[14] & 0x20)
238 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
a1d01db1
JH
239
240 return 0;
0af801b9
JH
241}
242
a1d01db1 243static int hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 244{
42c6b129 245 struct hci_dev *hdev = req->hdev;
e61ef499
AE
246
247 BT_DBG("%s %ld", hdev->name, opt);
248
11778716
AE
249 /* Reset */
250 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 251 hci_reset_req(req, 0);
11778716 252
e61ef499 253 switch (hdev->dev_type) {
ca8bee5d 254 case HCI_PRIMARY:
42c6b129 255 bredr_init(req);
e61ef499 256 break;
e61ef499 257 case HCI_AMP:
0af801b9 258 amp_init1(req);
e61ef499 259 break;
e61ef499 260 default:
2064ee33 261 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type);
e61ef499
AE
262 break;
263 }
a1d01db1
JH
264
265 return 0;
e61ef499
AE
266}
267
42c6b129 268static void bredr_setup(struct hci_request *req)
2177bab5 269{
2177bab5
JH
270 __le16 param;
271 __u8 flt_type;
272
273 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 274 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
275
276 /* Read Class of Device */
42c6b129 277 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
278
279 /* Read Local Name */
42c6b129 280 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
281
282 /* Read Voice Setting */
42c6b129 283 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 284
b4cb9fb2
MH
285 /* Read Number of Supported IAC */
286 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
287
4b836f39
MH
288 /* Read Current IAC LAP */
289 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
290
2177bab5
JH
291 /* Clear Event Filters */
292 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 293 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
294
295 /* Connection accept timeout ~20 secs */
dcf4adbf 296 param = cpu_to_le16(0x7d00);
42c6b129 297 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
298}
299
42c6b129 300static void le_setup(struct hci_request *req)
2177bab5 301{
c73eee91
JH
302 struct hci_dev *hdev = req->hdev;
303
2177bab5 304 /* Read LE Buffer Size */
42c6b129 305 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
306
307 /* Read LE Local Supported Features */
42c6b129 308 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 309
747d3f03
MH
310 /* Read LE Supported States */
311 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
312
c73eee91
JH
313 /* LE-only controllers have LE implicitly enabled */
314 if (!lmp_bredr_capable(hdev))
a1536da2 315 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
316}
317
42c6b129 318static void hci_setup_event_mask(struct hci_request *req)
2177bab5 319{
42c6b129
JH
320 struct hci_dev *hdev = req->hdev;
321
2177bab5
JH
322 /* The second byte is 0xff instead of 0x9f (two reserved bits
323 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
324 * command otherwise.
325 */
326 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
327
328 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
329 * any event mask for pre 1.2 devices.
330 */
331 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
332 return;
333
334 if (lmp_bredr_capable(hdev)) {
335 events[4] |= 0x01; /* Flow Specification Complete */
c7882cbd
MH
336 } else {
337 /* Use a different default for LE-only devices */
338 memset(events, 0, sizeof(events));
c7882cbd
MH
339 events[1] |= 0x20; /* Command Complete */
340 events[1] |= 0x40; /* Command Status */
341 events[1] |= 0x80; /* Hardware Error */
5c3d3b4c
MH
342
343 /* If the controller supports the Disconnect command, enable
344 * the corresponding event. In addition enable packet flow
345 * control related events.
346 */
347 if (hdev->commands[0] & 0x20) {
348 events[0] |= 0x10; /* Disconnection Complete */
349 events[2] |= 0x04; /* Number of Completed Packets */
350 events[3] |= 0x02; /* Data Buffer Overflow */
351 }
352
353 /* If the controller supports the Read Remote Version
354 * Information command, enable the corresponding event.
355 */
356 if (hdev->commands[2] & 0x80)
357 events[1] |= 0x08; /* Read Remote Version Information
358 * Complete
359 */
0da71f1b
MH
360
361 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
362 events[0] |= 0x80; /* Encryption Change */
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364 }
2177bab5
JH
365 }
366
9fe759ce
MH
367 if (lmp_inq_rssi_capable(hdev) ||
368 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
2177bab5
JH
369 events[4] |= 0x02; /* Inquiry Result with RSSI */
370
70f56aa2
MH
371 if (lmp_ext_feat_capable(hdev))
372 events[4] |= 0x04; /* Read Remote Extended Features Complete */
373
374 if (lmp_esco_capable(hdev)) {
375 events[5] |= 0x08; /* Synchronous Connection Complete */
376 events[5] |= 0x10; /* Synchronous Connection Changed */
377 }
378
2177bab5
JH
379 if (lmp_sniffsubr_capable(hdev))
380 events[5] |= 0x20; /* Sniff Subrating */
381
382 if (lmp_pause_enc_capable(hdev))
383 events[5] |= 0x80; /* Encryption Key Refresh Complete */
384
385 if (lmp_ext_inq_capable(hdev))
386 events[5] |= 0x40; /* Extended Inquiry Result */
387
388 if (lmp_no_flush_capable(hdev))
389 events[7] |= 0x01; /* Enhanced Flush Complete */
390
391 if (lmp_lsto_capable(hdev))
392 events[6] |= 0x80; /* Link Supervision Timeout Changed */
393
394 if (lmp_ssp_capable(hdev)) {
395 events[6] |= 0x01; /* IO Capability Request */
396 events[6] |= 0x02; /* IO Capability Response */
397 events[6] |= 0x04; /* User Confirmation Request */
398 events[6] |= 0x08; /* User Passkey Request */
399 events[6] |= 0x10; /* Remote OOB Data Request */
400 events[6] |= 0x20; /* Simple Pairing Complete */
401 events[7] |= 0x04; /* User Passkey Notification */
402 events[7] |= 0x08; /* Keypress Notification */
403 events[7] |= 0x10; /* Remote Host Supported
404 * Features Notification
405 */
406 }
407
408 if (lmp_le_capable(hdev))
409 events[7] |= 0x20; /* LE Meta-Event */
410
42c6b129 411 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
412}
413
a1d01db1 414static int hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 415{
42c6b129
JH
416 struct hci_dev *hdev = req->hdev;
417
0af801b9
JH
418 if (hdev->dev_type == HCI_AMP)
419 return amp_init2(req);
420
2177bab5 421 if (lmp_bredr_capable(hdev))
42c6b129 422 bredr_setup(req);
56f87901 423 else
a358dc11 424 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
425
426 if (lmp_le_capable(hdev))
42c6b129 427 le_setup(req);
2177bab5 428
0f3adeae
MH
429 /* All Bluetooth 1.2 and later controllers should support the
430 * HCI command for reading the local supported commands.
431 *
432 * Unfortunately some controllers indicate Bluetooth 1.2 support,
433 * but do not have support for this command. If that is the case,
434 * the driver can quirk the behavior and skip reading the local
435 * supported commands.
3f8e2d75 436 */
0f3adeae
MH
437 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
438 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 439 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
440
441 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
442 /* When SSP is available, then the host features page
443 * should also be available as well. However some
444 * controllers list the max_page as 0 as long as SSP
445 * has not been enabled. To achieve proper debugging
446 * output, force the minimum max_page to 1 at least.
447 */
448 hdev->max_page = 0x01;
449
d7a5a11d 450 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 451 u8 mode = 0x01;
574ea3c7 452
42c6b129
JH
453 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
454 sizeof(mode), &mode);
2177bab5
JH
455 } else {
456 struct hci_cp_write_eir cp;
457
458 memset(hdev->eir, 0, sizeof(hdev->eir));
459 memset(&cp, 0, sizeof(cp));
460
42c6b129 461 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
462 }
463 }
464
043ec9bf
MH
465 if (lmp_inq_rssi_capable(hdev) ||
466 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
467 u8 mode;
468
469 /* If Extended Inquiry Result events are supported, then
470 * they are clearly preferred over Inquiry Result with RSSI
471 * events.
472 */
473 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
474
475 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
476 }
2177bab5
JH
477
478 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 479 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
480
481 if (lmp_ext_feat_capable(hdev)) {
482 struct hci_cp_read_local_ext_features cp;
483
484 cp.page = 0x01;
42c6b129
JH
485 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
486 sizeof(cp), &cp);
2177bab5
JH
487 }
488
d7a5a11d 489 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 490 u8 enable = 1;
42c6b129
JH
491 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
492 &enable);
2177bab5 493 }
a1d01db1
JH
494
495 return 0;
2177bab5
JH
496}
497
42c6b129 498static void hci_setup_link_policy(struct hci_request *req)
2177bab5 499{
42c6b129 500 struct hci_dev *hdev = req->hdev;
2177bab5
JH
501 struct hci_cp_write_def_link_policy cp;
502 u16 link_policy = 0;
503
504 if (lmp_rswitch_capable(hdev))
505 link_policy |= HCI_LP_RSWITCH;
506 if (lmp_hold_capable(hdev))
507 link_policy |= HCI_LP_HOLD;
508 if (lmp_sniff_capable(hdev))
509 link_policy |= HCI_LP_SNIFF;
510 if (lmp_park_capable(hdev))
511 link_policy |= HCI_LP_PARK;
512
513 cp.policy = cpu_to_le16(link_policy);
42c6b129 514 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
515}
516
42c6b129 517static void hci_set_le_support(struct hci_request *req)
2177bab5 518{
42c6b129 519 struct hci_dev *hdev = req->hdev;
2177bab5
JH
520 struct hci_cp_write_le_host_supported cp;
521
c73eee91
JH
522 /* LE-only devices do not support explicit enablement */
523 if (!lmp_bredr_capable(hdev))
524 return;
525
2177bab5
JH
526 memset(&cp, 0, sizeof(cp));
527
d7a5a11d 528 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 529 cp.le = 0x01;
32226e4f 530 cp.simul = 0x00;
2177bab5
JH
531 }
532
533 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
534 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
535 &cp);
2177bab5
JH
536}
537
d62e6d67
JH
538static void hci_set_event_mask_page_2(struct hci_request *req)
539{
540 struct hci_dev *hdev = req->hdev;
541 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
313f6888 542 bool changed = false;
d62e6d67
JH
543
544 /* If Connectionless Slave Broadcast master role is supported
545 * enable all necessary events for it.
546 */
53b834d2 547 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
548 events[1] |= 0x40; /* Triggered Clock Capture */
549 events[1] |= 0x80; /* Synchronization Train Complete */
550 events[2] |= 0x10; /* Slave Page Response Timeout */
551 events[2] |= 0x20; /* CSB Channel Map Change */
313f6888 552 changed = true;
d62e6d67
JH
553 }
554
555 /* If Connectionless Slave Broadcast slave role is supported
556 * enable all necessary events for it.
557 */
53b834d2 558 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
559 events[2] |= 0x01; /* Synchronization Train Received */
560 events[2] |= 0x02; /* CSB Receive */
561 events[2] |= 0x04; /* CSB Timeout */
562 events[2] |= 0x08; /* Truncated Page Complete */
313f6888 563 changed = true;
d62e6d67
JH
564 }
565
40c59fcb 566 /* Enable Authenticated Payload Timeout Expired event if supported */
313f6888 567 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
40c59fcb 568 events[2] |= 0x80;
313f6888
MH
569 changed = true;
570 }
40c59fcb 571
313f6888
MH
572 /* Some Broadcom based controllers indicate support for Set Event
573 * Mask Page 2 command, but then actually do not support it. Since
574 * the default value is all bits set to zero, the command is only
575 * required if the event mask has to be changed. In case no change
576 * to the event mask is needed, skip this command.
577 */
578 if (changed)
579 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
580 sizeof(events), events);
d62e6d67
JH
581}
582
a1d01db1 583static int hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 584{
42c6b129 585 struct hci_dev *hdev = req->hdev;
d2c5d77f 586 u8 p;
42c6b129 587
0da71f1b
MH
588 hci_setup_event_mask(req);
589
e81be90b
JH
590 if (hdev->commands[6] & 0x20 &&
591 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
592 struct hci_cp_read_stored_link_key cp;
593
594 bacpy(&cp.bdaddr, BDADDR_ANY);
595 cp.read_all = 0x01;
596 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
597 }
598
2177bab5 599 if (hdev->commands[5] & 0x10)
42c6b129 600 hci_setup_link_policy(req);
2177bab5 601
417287de
MH
602 if (hdev->commands[8] & 0x01)
603 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
604
605 /* Some older Broadcom based Bluetooth 1.2 controllers do not
606 * support the Read Page Scan Type command. Check support for
607 * this command in the bit mask of supported commands.
608 */
609 if (hdev->commands[13] & 0x01)
610 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
611
9193c6e8
AG
612 if (lmp_le_capable(hdev)) {
613 u8 events[8];
614
615 memset(events, 0, sizeof(events));
4d6c705b
MH
616
617 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
618 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
619
620 /* If controller supports the Connection Parameters Request
621 * Link Layer Procedure, enable the corresponding event.
622 */
623 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
624 events[0] |= 0x20; /* LE Remote Connection
625 * Parameter Request
626 */
627
a9f6068e
MH
628 /* If the controller supports the Data Length Extension
629 * feature, enable the corresponding event.
630 */
631 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
632 events[0] |= 0x40; /* LE Data Length Change */
633
4b71bba4
MH
634 /* If the controller supports Extended Scanner Filter
635 * Policies, enable the correspondig event.
636 */
637 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
638 events[1] |= 0x04; /* LE Direct Advertising
639 * Report
640 */
641
9756d33b
MH
642 /* If the controller supports Channel Selection Algorithm #2
643 * feature, enable the corresponding event.
644 */
645 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
646 events[2] |= 0x08; /* LE Channel Selection
647 * Algorithm
648 */
649
7d26f5c4
MH
650 /* If the controller supports the LE Set Scan Enable command,
651 * enable the corresponding advertising report event.
652 */
653 if (hdev->commands[26] & 0x08)
654 events[0] |= 0x02; /* LE Advertising Report */
655
656 /* If the controller supports the LE Create Connection
657 * command, enable the corresponding event.
658 */
659 if (hdev->commands[26] & 0x10)
660 events[0] |= 0x01; /* LE Connection Complete */
661
662 /* If the controller supports the LE Connection Update
663 * command, enable the corresponding event.
664 */
665 if (hdev->commands[27] & 0x04)
666 events[0] |= 0x04; /* LE Connection Update
667 * Complete
668 */
669
670 /* If the controller supports the LE Read Remote Used Features
671 * command, enable the corresponding event.
672 */
673 if (hdev->commands[27] & 0x20)
674 events[0] |= 0x08; /* LE Read Remote Used
675 * Features Complete
676 */
677
5a34bd5f
MH
678 /* If the controller supports the LE Read Local P-256
679 * Public Key command, enable the corresponding event.
680 */
681 if (hdev->commands[34] & 0x02)
682 events[0] |= 0x80; /* LE Read Local P-256
683 * Public Key Complete
684 */
685
686 /* If the controller supports the LE Generate DHKey
687 * command, enable the corresponding event.
688 */
689 if (hdev->commands[34] & 0x04)
690 events[1] |= 0x01; /* LE Generate DHKey Complete */
691
27bbca44
MH
692 /* If the controller supports the LE Set Default PHY or
693 * LE Set PHY commands, enable the corresponding event.
694 */
695 if (hdev->commands[35] & (0x20 | 0x40))
696 events[1] |= 0x08; /* LE PHY Update Complete */
697
9193c6e8
AG
698 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
699 events);
700
15a49cca
MH
701 if (hdev->commands[25] & 0x40) {
702 /* Read LE Advertising Channel TX Power */
703 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
704 }
705
2ab216a7
MH
706 if (hdev->commands[26] & 0x40) {
707 /* Read LE White List Size */
708 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
709 0, NULL);
710 }
711
712 if (hdev->commands[26] & 0x80) {
713 /* Clear LE White List */
714 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
715 }
716
a9f6068e
MH
717 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
718 /* Read LE Maximum Data Length */
719 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
720
721 /* Read LE Suggested Default Data Length */
722 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
723 }
724
42c6b129 725 hci_set_le_support(req);
9193c6e8 726 }
d2c5d77f
JH
727
728 /* Read features beyond page 1 if available */
729 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
730 struct hci_cp_read_local_ext_features cp;
731
732 cp.page = p;
733 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
734 sizeof(cp), &cp);
735 }
a1d01db1
JH
736
737 return 0;
2177bab5
JH
738}
739
a1d01db1 740static int hci_init4_req(struct hci_request *req, unsigned long opt)
5d4e7e8d
JH
741{
742 struct hci_dev *hdev = req->hdev;
743
36f260ce
MH
744 /* Some Broadcom based Bluetooth controllers do not support the
745 * Delete Stored Link Key command. They are clearly indicating its
746 * absence in the bit mask of supported commands.
747 *
748 * Check the supported commands and only if the the command is marked
749 * as supported send it. If not supported assume that the controller
750 * does not have actual support for stored link keys which makes this
751 * command redundant anyway.
752 *
753 * Some controllers indicate that they support handling deleting
754 * stored link keys, but they don't. The quirk lets a driver
755 * just disable this command.
756 */
757 if (hdev->commands[6] & 0x80 &&
758 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
759 struct hci_cp_delete_stored_link_key cp;
760
761 bacpy(&cp.bdaddr, BDADDR_ANY);
762 cp.delete_all = 0x01;
763 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
764 sizeof(cp), &cp);
765 }
766
d62e6d67
JH
767 /* Set event mask page 2 if the HCI command for it is supported */
768 if (hdev->commands[22] & 0x04)
769 hci_set_event_mask_page_2(req);
770
109e3191
MH
771 /* Read local codec list if the HCI command is supported */
772 if (hdev->commands[29] & 0x20)
773 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
774
f4fe73ed
MH
775 /* Get MWS transport configuration if the HCI command is supported */
776 if (hdev->commands[30] & 0x08)
777 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
778
5d4e7e8d 779 /* Check for Synchronization Train support */
53b834d2 780 if (lmp_sync_train_capable(hdev))
5d4e7e8d 781 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
782
783 /* Enable Secure Connections if supported and configured */
d7a5a11d 784 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 785 bredr_sc_enabled(hdev)) {
a6d0d690 786 u8 support = 0x01;
574ea3c7 787
a6d0d690
MH
788 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
789 sizeof(support), &support);
790 }
a1d01db1 791
12204875
MH
792 /* Set Suggested Default Data Length to maximum if supported */
793 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
794 struct hci_cp_le_write_def_data_len cp;
795
796 cp.tx_len = hdev->le_max_tx_len;
797 cp.tx_time = hdev->le_max_tx_time;
798 hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp);
799 }
800
de2ba303
MH
801 /* Set Default PHY parameters if command is supported */
802 if (hdev->commands[35] & 0x20) {
803 struct hci_cp_le_set_default_phy cp;
804
805 /* No transmitter PHY or receiver PHY preferences */
806 cp.all_phys = 0x03;
807 cp.tx_phys = 0;
808 cp.rx_phys = 0;
809
810 hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp);
811 }
812
a1d01db1 813 return 0;
5d4e7e8d
JH
814}
815
2177bab5
JH
816static int __hci_init(struct hci_dev *hdev)
817{
818 int err;
819
4ebeee2d 820 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL);
2177bab5
JH
821 if (err < 0)
822 return err;
823
f640ee98
MH
824 if (hci_dev_test_flag(hdev, HCI_SETUP))
825 hci_debugfs_create_basic(hdev);
4b4148e9 826
4ebeee2d 827 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL);
0af801b9
JH
828 if (err < 0)
829 return err;
830
ca8bee5d 831 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode
2177bab5 832 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 833 * first two stages of init.
2177bab5 834 */
ca8bee5d 835 if (hdev->dev_type != HCI_PRIMARY)
2177bab5
JH
836 return 0;
837
4ebeee2d 838 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL);
5d4e7e8d
JH
839 if (err < 0)
840 return err;
841
4ebeee2d 842 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL);
baf27f6e
MH
843 if (err < 0)
844 return err;
845
ec6cef9c
MH
846 /* This function is only called when the controller is actually in
847 * configured state. When the controller is marked as unconfigured,
848 * this initialization procedure is not run.
849 *
850 * It means that it is possible that a controller runs through its
851 * setup phase and then discovers missing settings. If that is the
852 * case, then this function will not be called. It then will only
853 * be called during the config phase.
854 *
855 * So only when in setup phase or config phase, create the debugfs
856 * entries and register the SMP channels.
baf27f6e 857 */
d7a5a11d
MH
858 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
859 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
860 return 0;
861
60c5f5fb
MH
862 hci_debugfs_create_common(hdev);
863
71c3b60e 864 if (lmp_bredr_capable(hdev))
60c5f5fb 865 hci_debugfs_create_bredr(hdev);
2bfa3531 866
162a3bac 867 if (lmp_le_capable(hdev))
60c5f5fb 868 hci_debugfs_create_le(hdev);
e7b8fc92 869
baf27f6e 870 return 0;
2177bab5
JH
871}
872
a1d01db1 873static int hci_init0_req(struct hci_request *req, unsigned long opt)
0ebca7d6
MH
874{
875 struct hci_dev *hdev = req->hdev;
876
877 BT_DBG("%s %ld", hdev->name, opt);
878
879 /* Reset */
880 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
881 hci_reset_req(req, 0);
882
883 /* Read Local Version */
884 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
885
886 /* Read BD Address */
887 if (hdev->set_bdaddr)
888 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
a1d01db1
JH
889
890 return 0;
0ebca7d6
MH
891}
892
893static int __hci_unconf_init(struct hci_dev *hdev)
894{
895 int err;
896
cc78b44b
MH
897 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
898 return 0;
899
4ebeee2d 900 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL);
0ebca7d6
MH
901 if (err < 0)
902 return err;
903
f640ee98
MH
904 if (hci_dev_test_flag(hdev, HCI_SETUP))
905 hci_debugfs_create_basic(hdev);
906
0ebca7d6
MH
907 return 0;
908}
909
a1d01db1 910static int hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
911{
912 __u8 scan = opt;
913
42c6b129 914 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
915
916 /* Inquiry and Page scans */
42c6b129 917 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
a1d01db1 918 return 0;
1da177e4
LT
919}
920
a1d01db1 921static int hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
922{
923 __u8 auth = opt;
924
42c6b129 925 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
926
927 /* Authentication */
42c6b129 928 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
a1d01db1 929 return 0;
1da177e4
LT
930}
931
a1d01db1 932static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
933{
934 __u8 encrypt = opt;
935
42c6b129 936 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 937
e4e8e37c 938 /* Encryption */
42c6b129 939 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
a1d01db1 940 return 0;
1da177e4
LT
941}
942
a1d01db1 943static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
944{
945 __le16 policy = cpu_to_le16(opt);
946
42c6b129 947 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
948
949 /* Default link policy */
42c6b129 950 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
a1d01db1 951 return 0;
e4e8e37c
MH
952}
953
8e87d142 954/* Get HCI device by index.
1da177e4
LT
955 * Device is held on return. */
956struct hci_dev *hci_dev_get(int index)
957{
8035ded4 958 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
959
960 BT_DBG("%d", index);
961
962 if (index < 0)
963 return NULL;
964
965 read_lock(&hci_dev_list_lock);
8035ded4 966 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
967 if (d->id == index) {
968 hdev = hci_dev_hold(d);
969 break;
970 }
971 }
972 read_unlock(&hci_dev_list_lock);
973 return hdev;
974}
1da177e4
LT
975
976/* ---- Inquiry support ---- */
ff9ef578 977
30dc78e1
JH
978bool hci_discovery_active(struct hci_dev *hdev)
979{
980 struct discovery_state *discov = &hdev->discovery;
981
6fbe195d 982 switch (discov->state) {
343f935b 983 case DISCOVERY_FINDING:
6fbe195d 984 case DISCOVERY_RESOLVING:
30dc78e1
JH
985 return true;
986
6fbe195d
AG
987 default:
988 return false;
989 }
30dc78e1
JH
990}
991
ff9ef578
JH
992void hci_discovery_set_state(struct hci_dev *hdev, int state)
993{
bb3e0a33
JH
994 int old_state = hdev->discovery.state;
995
ff9ef578
JH
996 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
997
bb3e0a33 998 if (old_state == state)
ff9ef578
JH
999 return;
1000
bb3e0a33
JH
1001 hdev->discovery.state = state;
1002
ff9ef578
JH
1003 switch (state) {
1004 case DISCOVERY_STOPPED:
c54c3860
AG
1005 hci_update_background_scan(hdev);
1006
bb3e0a33 1007 if (old_state != DISCOVERY_STARTING)
7b99b659 1008 mgmt_discovering(hdev, 0);
ff9ef578
JH
1009 break;
1010 case DISCOVERY_STARTING:
1011 break;
343f935b 1012 case DISCOVERY_FINDING:
ff9ef578
JH
1013 mgmt_discovering(hdev, 1);
1014 break;
30dc78e1
JH
1015 case DISCOVERY_RESOLVING:
1016 break;
ff9ef578
JH
1017 case DISCOVERY_STOPPING:
1018 break;
1019 }
ff9ef578
JH
1020}
1021
1f9b9a5d 1022void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1023{
30883512 1024 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1025 struct inquiry_entry *p, *n;
1da177e4 1026
561aafbc
JH
1027 list_for_each_entry_safe(p, n, &cache->all, all) {
1028 list_del(&p->all);
b57c1a56 1029 kfree(p);
1da177e4 1030 }
561aafbc
JH
1031
1032 INIT_LIST_HEAD(&cache->unknown);
1033 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1034}
1035
a8c5fb1a
GP
1036struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1037 bdaddr_t *bdaddr)
1da177e4 1038{
30883512 1039 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1040 struct inquiry_entry *e;
1041
6ed93dc6 1042 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1043
561aafbc
JH
1044 list_for_each_entry(e, &cache->all, all) {
1045 if (!bacmp(&e->data.bdaddr, bdaddr))
1046 return e;
1047 }
1048
1049 return NULL;
1050}
1051
1052struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1053 bdaddr_t *bdaddr)
561aafbc 1054{
30883512 1055 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1056 struct inquiry_entry *e;
1057
6ed93dc6 1058 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1059
1060 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1061 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1062 return e;
1063 }
1064
1065 return NULL;
1da177e4
LT
1066}
1067
30dc78e1 1068struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1069 bdaddr_t *bdaddr,
1070 int state)
30dc78e1
JH
1071{
1072 struct discovery_state *cache = &hdev->discovery;
1073 struct inquiry_entry *e;
1074
6ed93dc6 1075 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1076
1077 list_for_each_entry(e, &cache->resolve, list) {
1078 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1079 return e;
1080 if (!bacmp(&e->data.bdaddr, bdaddr))
1081 return e;
1082 }
1083
1084 return NULL;
1085}
1086
a3d4e20a 1087void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1088 struct inquiry_entry *ie)
a3d4e20a
JH
1089{
1090 struct discovery_state *cache = &hdev->discovery;
1091 struct list_head *pos = &cache->resolve;
1092 struct inquiry_entry *p;
1093
1094 list_del(&ie->list);
1095
1096 list_for_each_entry(p, &cache->resolve, list) {
1097 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1098 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1099 break;
1100 pos = &p->list;
1101 }
1102
1103 list_add(&ie->list, pos);
1104}
1105
af58925c
MH
1106u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1107 bool name_known)
1da177e4 1108{
30883512 1109 struct discovery_state *cache = &hdev->discovery;
70f23020 1110 struct inquiry_entry *ie;
af58925c 1111 u32 flags = 0;
1da177e4 1112
6ed93dc6 1113 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1114
6928a924 1115 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1116
af58925c
MH
1117 if (!data->ssp_mode)
1118 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1119
70f23020 1120 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1121 if (ie) {
af58925c
MH
1122 if (!ie->data.ssp_mode)
1123 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1124
a3d4e20a 1125 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1126 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1127 ie->data.rssi = data->rssi;
1128 hci_inquiry_cache_update_resolve(hdev, ie);
1129 }
1130
561aafbc 1131 goto update;
a3d4e20a 1132 }
561aafbc
JH
1133
1134 /* Entry not in the cache. Add new one. */
27f70f3e 1135 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1136 if (!ie) {
1137 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1138 goto done;
1139 }
561aafbc
JH
1140
1141 list_add(&ie->all, &cache->all);
1142
1143 if (name_known) {
1144 ie->name_state = NAME_KNOWN;
1145 } else {
1146 ie->name_state = NAME_NOT_KNOWN;
1147 list_add(&ie->list, &cache->unknown);
1148 }
70f23020 1149
561aafbc
JH
1150update:
1151 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1152 ie->name_state != NAME_PENDING) {
561aafbc
JH
1153 ie->name_state = NAME_KNOWN;
1154 list_del(&ie->list);
1da177e4
LT
1155 }
1156
70f23020
AE
1157 memcpy(&ie->data, data, sizeof(*data));
1158 ie->timestamp = jiffies;
1da177e4 1159 cache->timestamp = jiffies;
3175405b
JH
1160
1161 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1163
af58925c
MH
1164done:
1165 return flags;
1da177e4
LT
1166}
1167
1168static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1169{
30883512 1170 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1171 struct inquiry_info *info = (struct inquiry_info *) buf;
1172 struct inquiry_entry *e;
1173 int copied = 0;
1174
561aafbc 1175 list_for_each_entry(e, &cache->all, all) {
1da177e4 1176 struct inquiry_data *data = &e->data;
b57c1a56
JH
1177
1178 if (copied >= num)
1179 break;
1180
1da177e4
LT
1181 bacpy(&info->bdaddr, &data->bdaddr);
1182 info->pscan_rep_mode = data->pscan_rep_mode;
1183 info->pscan_period_mode = data->pscan_period_mode;
1184 info->pscan_mode = data->pscan_mode;
1185 memcpy(info->dev_class, data->dev_class, 3);
1186 info->clock_offset = data->clock_offset;
b57c1a56 1187
1da177e4 1188 info++;
b57c1a56 1189 copied++;
1da177e4
LT
1190 }
1191
1192 BT_DBG("cache %p, copied %d", cache, copied);
1193 return copied;
1194}
1195
a1d01db1 1196static int hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1197{
1198 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1199 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1200 struct hci_cp_inquiry cp;
1201
1202 BT_DBG("%s", hdev->name);
1203
1204 if (test_bit(HCI_INQUIRY, &hdev->flags))
a1d01db1 1205 return 0;
1da177e4
LT
1206
1207 /* Start Inquiry */
1208 memcpy(&cp.lap, &ir->lap, 3);
1209 cp.length = ir->length;
1210 cp.num_rsp = ir->num_rsp;
42c6b129 1211 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
a1d01db1
JH
1212
1213 return 0;
1da177e4
LT
1214}
1215
1216int hci_inquiry(void __user *arg)
1217{
1218 __u8 __user *ptr = arg;
1219 struct hci_inquiry_req ir;
1220 struct hci_dev *hdev;
1221 int err = 0, do_inquiry = 0, max_rsp;
1222 long timeo;
1223 __u8 *buf;
1224
1225 if (copy_from_user(&ir, ptr, sizeof(ir)))
1226 return -EFAULT;
1227
5a08ecce
AE
1228 hdev = hci_dev_get(ir.dev_id);
1229 if (!hdev)
1da177e4
LT
1230 return -ENODEV;
1231
d7a5a11d 1232 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1233 err = -EBUSY;
1234 goto done;
1235 }
1236
d7a5a11d 1237 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1238 err = -EOPNOTSUPP;
1239 goto done;
1240 }
1241
ca8bee5d 1242 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1243 err = -EOPNOTSUPP;
1244 goto done;
1245 }
1246
d7a5a11d 1247 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1248 err = -EOPNOTSUPP;
1249 goto done;
1250 }
1251
09fd0de5 1252 hci_dev_lock(hdev);
8e87d142 1253 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1254 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1255 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1256 do_inquiry = 1;
1257 }
09fd0de5 1258 hci_dev_unlock(hdev);
1da177e4 1259
04837f64 1260 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1261
1262 if (do_inquiry) {
01178cd4 1263 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
4ebeee2d 1264 timeo, NULL);
70f23020
AE
1265 if (err < 0)
1266 goto done;
3e13fa1e
AG
1267
1268 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1269 * cleared). If it is interrupted by a signal, return -EINTR.
1270 */
74316201 1271 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1272 TASK_INTERRUPTIBLE))
1273 return -EINTR;
70f23020 1274 }
1da177e4 1275
8fc9ced3
GP
1276 /* for unlimited number of responses we will use buffer with
1277 * 255 entries
1278 */
1da177e4
LT
1279 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1280
1281 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1282 * copy it to the user space.
1283 */
6da2ec56 1284 buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
70f23020 1285 if (!buf) {
1da177e4
LT
1286 err = -ENOMEM;
1287 goto done;
1288 }
1289
09fd0de5 1290 hci_dev_lock(hdev);
1da177e4 1291 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1292 hci_dev_unlock(hdev);
1da177e4
LT
1293
1294 BT_DBG("num_rsp %d", ir.num_rsp);
1295
1296 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1297 ptr += sizeof(ir);
1298 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1299 ir.num_rsp))
1da177e4 1300 err = -EFAULT;
8e87d142 1301 } else
1da177e4
LT
1302 err = -EFAULT;
1303
1304 kfree(buf);
1305
1306done:
1307 hci_dev_put(hdev);
1308 return err;
1309}
1310
cbed0ca1 1311static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1312{
1da177e4
LT
1313 int ret = 0;
1314
1da177e4
LT
1315 BT_DBG("%s %p", hdev->name, hdev);
1316
b504430c 1317 hci_req_sync_lock(hdev);
1da177e4 1318
d7a5a11d 1319 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1320 ret = -ENODEV;
1321 goto done;
1322 }
1323
d7a5a11d
MH
1324 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1325 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1326 /* Check for rfkill but allow the HCI setup stage to
1327 * proceed (which in itself doesn't cause any RF activity).
1328 */
d7a5a11d 1329 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1330 ret = -ERFKILL;
1331 goto done;
1332 }
1333
1334 /* Check for valid public address or a configured static
1335 * random adddress, but let the HCI setup proceed to
1336 * be able to determine if there is a public address
1337 * or not.
1338 *
c6beca0e
MH
1339 * In case of user channel usage, it is not important
1340 * if a public address or static random address is
1341 * available.
1342 *
a5c8f270
MH
1343 * This check is only valid for BR/EDR controllers
1344 * since AMP controllers do not have an address.
1345 */
d7a5a11d 1346 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
ca8bee5d 1347 hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
1348 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1349 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1350 ret = -EADDRNOTAVAIL;
1351 goto done;
1352 }
611b30f7
MH
1353 }
1354
1da177e4
LT
1355 if (test_bit(HCI_UP, &hdev->flags)) {
1356 ret = -EALREADY;
1357 goto done;
1358 }
1359
1da177e4
LT
1360 if (hdev->open(hdev)) {
1361 ret = -EIO;
1362 goto done;
1363 }
1364
e9ca8bf1 1365 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1366 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1367
f41c70c4
MH
1368 atomic_set(&hdev->cmd_cnt, 1);
1369 set_bit(HCI_INIT, &hdev->flags);
1370
d7a5a11d 1371 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1372 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1373
af202f84
MH
1374 if (hdev->setup)
1375 ret = hdev->setup(hdev);
f41c70c4 1376
af202f84
MH
1377 /* The transport driver can set these quirks before
1378 * creating the HCI device or in its setup callback.
1379 *
1380 * In case any of them is set, the controller has to
1381 * start up as unconfigured.
1382 */
eb1904f4
MH
1383 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1384 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1385 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1386
0ebca7d6
MH
1387 /* For an unconfigured controller it is required to
1388 * read at least the version information provided by
1389 * the Read Local Version Information command.
1390 *
1391 * If the set_bdaddr driver callback is provided, then
1392 * also the original Bluetooth public device address
1393 * will be read using the Read BD Address command.
1394 */
d7a5a11d 1395 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1396 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1397 }
1398
d7a5a11d 1399 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1400 /* If public address change is configured, ensure that
1401 * the address gets programmed. If the driver does not
1402 * support changing the public address, fail the power
1403 * on procedure.
1404 */
1405 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1406 hdev->set_bdaddr)
24c457e2
MH
1407 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1408 else
1409 ret = -EADDRNOTAVAIL;
1410 }
1411
f41c70c4 1412 if (!ret) {
d7a5a11d 1413 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1414 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1415 ret = __hci_init(hdev);
98a63aaf
MH
1416 if (!ret && hdev->post_init)
1417 ret = hdev->post_init(hdev);
1418 }
1da177e4
LT
1419 }
1420
7e995b9e
MH
1421 /* If the HCI Reset command is clearing all diagnostic settings,
1422 * then they need to be reprogrammed after the init procedure
1423 * completed.
1424 */
1425 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
b56c7b25 1426 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
7e995b9e
MH
1427 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1428 ret = hdev->set_diag(hdev, true);
1429
f41c70c4
MH
1430 clear_bit(HCI_INIT, &hdev->flags);
1431
1da177e4
LT
1432 if (!ret) {
1433 hci_dev_hold(hdev);
a1536da2 1434 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1435 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1436 hci_sock_dev_event(hdev, HCI_DEV_UP);
6d5d2ee6 1437 hci_leds_update_powered(hdev, true);
d7a5a11d
MH
1438 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1439 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1440 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1441 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894 1442 hci_dev_test_flag(hdev, HCI_MGMT) &&
ca8bee5d 1443 hdev->dev_type == HCI_PRIMARY) {
2ff13894
JH
1444 ret = __hci_req_hci_power_on(hdev);
1445 mgmt_power_on(hdev, ret);
56e5cb86 1446 }
8e87d142 1447 } else {
1da177e4 1448 /* Init failed, cleanup */
3eff45ea 1449 flush_work(&hdev->tx_work);
c347b765 1450 flush_work(&hdev->cmd_work);
b78752cc 1451 flush_work(&hdev->rx_work);
1da177e4
LT
1452
1453 skb_queue_purge(&hdev->cmd_q);
1454 skb_queue_purge(&hdev->rx_q);
1455
1456 if (hdev->flush)
1457 hdev->flush(hdev);
1458
1459 if (hdev->sent_cmd) {
1460 kfree_skb(hdev->sent_cmd);
1461 hdev->sent_cmd = NULL;
1462 }
1463
e9ca8bf1 1464 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1465 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1466
1da177e4 1467 hdev->close(hdev);
fee746b0 1468 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1469 }
1470
1471done:
b504430c 1472 hci_req_sync_unlock(hdev);
1da177e4
LT
1473 return ret;
1474}
1475
cbed0ca1
JH
1476/* ---- HCI ioctl helpers ---- */
1477
1478int hci_dev_open(__u16 dev)
1479{
1480 struct hci_dev *hdev;
1481 int err;
1482
1483 hdev = hci_dev_get(dev);
1484 if (!hdev)
1485 return -ENODEV;
1486
4a964404 1487 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1488 * up as user channel. Trying to bring them up as normal devices
1489 * will result into a failure. Only user channel operation is
1490 * possible.
1491 *
1492 * When this function is called for a user channel, the flag
1493 * HCI_USER_CHANNEL will be set first before attempting to
1494 * open the device.
1495 */
d7a5a11d
MH
1496 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1497 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1498 err = -EOPNOTSUPP;
1499 goto done;
1500 }
1501
e1d08f40
JH
1502 /* We need to ensure that no other power on/off work is pending
1503 * before proceeding to call hci_dev_do_open. This is
1504 * particularly important if the setup procedure has not yet
1505 * completed.
1506 */
a69d8927 1507 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1508 cancel_delayed_work(&hdev->power_off);
1509
a5c8f270
MH
1510 /* After this call it is guaranteed that the setup procedure
1511 * has finished. This means that error conditions like RFKILL
1512 * or no valid public or static random address apply.
1513 */
e1d08f40
JH
1514 flush_workqueue(hdev->req_workqueue);
1515
12aa4f0a 1516 /* For controllers not using the management interface and that
b6ae8457 1517 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1518 * so that pairing works for them. Once the management interface
1519 * is in use this bit will be cleared again and userspace has
1520 * to explicitly enable it.
1521 */
d7a5a11d
MH
1522 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1523 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1524 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1525
cbed0ca1
JH
1526 err = hci_dev_do_open(hdev);
1527
fee746b0 1528done:
cbed0ca1 1529 hci_dev_put(hdev);
cbed0ca1
JH
1530 return err;
1531}
1532
d7347f3c
JH
1533/* This function requires the caller holds hdev->lock */
1534static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1535{
1536 struct hci_conn_params *p;
1537
f161dd41
JH
1538 list_for_each_entry(p, &hdev->le_conn_params, list) {
1539 if (p->conn) {
1540 hci_conn_drop(p->conn);
f8aaf9b6 1541 hci_conn_put(p->conn);
f161dd41
JH
1542 p->conn = NULL;
1543 }
d7347f3c 1544 list_del_init(&p->action);
f161dd41 1545 }
d7347f3c
JH
1546
1547 BT_DBG("All LE pending actions cleared");
1548}
1549
6b3cc1db 1550int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1551{
acc649c6
MH
1552 bool auto_off;
1553
1da177e4
LT
1554 BT_DBG("%s %p", hdev->name, hdev);
1555
d24d8144 1556 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1557 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1558 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1559 /* Execute vendor specific shutdown routine */
1560 if (hdev->shutdown)
1561 hdev->shutdown(hdev);
1562 }
1563
78c04c0b
VCG
1564 cancel_delayed_work(&hdev->power_off);
1565
7df0f73e 1566 hci_request_cancel_all(hdev);
b504430c 1567 hci_req_sync_lock(hdev);
1da177e4
LT
1568
1569 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1570 cancel_delayed_work_sync(&hdev->cmd_timer);
b504430c 1571 hci_req_sync_unlock(hdev);
1da177e4
LT
1572 return 0;
1573 }
1574
6d5d2ee6
HK
1575 hci_leds_update_powered(hdev, false);
1576
3eff45ea
GP
1577 /* Flush RX and TX works */
1578 flush_work(&hdev->tx_work);
b78752cc 1579 flush_work(&hdev->rx_work);
1da177e4 1580
16ab91ab 1581 if (hdev->discov_timeout > 0) {
16ab91ab 1582 hdev->discov_timeout = 0;
a358dc11
MH
1583 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1584 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1585 }
1586
a69d8927 1587 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1588 cancel_delayed_work(&hdev->service_cache);
1589
d7a5a11d 1590 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1591 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1592
76727c02
JH
1593 /* Avoid potential lockdep warnings from the *_flush() calls by
1594 * ensuring the workqueue is empty up front.
1595 */
1596 drain_workqueue(hdev->workqueue);
1597
09fd0de5 1598 hci_dev_lock(hdev);
1aeb9c65 1599
8f502f84
JH
1600 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1601
acc649c6
MH
1602 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1603
ca8bee5d 1604 if (!auto_off && hdev->dev_type == HCI_PRIMARY &&
baab7932 1605 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
2ff13894
JH
1606 hci_dev_test_flag(hdev, HCI_MGMT))
1607 __mgmt_power_off(hdev);
1aeb9c65 1608
1f9b9a5d 1609 hci_inquiry_cache_flush(hdev);
d7347f3c 1610 hci_pend_le_actions_clear(hdev);
f161dd41 1611 hci_conn_hash_flush(hdev);
09fd0de5 1612 hci_dev_unlock(hdev);
1da177e4 1613
64dae967
MH
1614 smp_unregister(hdev);
1615
05fcd4c4 1616 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1617
1618 if (hdev->flush)
1619 hdev->flush(hdev);
1620
1621 /* Reset device */
1622 skb_queue_purge(&hdev->cmd_q);
1623 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1624 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1625 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1626 set_bit(HCI_INIT, &hdev->flags);
4ebeee2d 1627 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL);
1da177e4
LT
1628 clear_bit(HCI_INIT, &hdev->flags);
1629 }
1630
c347b765
GP
1631 /* flush cmd work */
1632 flush_work(&hdev->cmd_work);
1da177e4
LT
1633
1634 /* Drop queues */
1635 skb_queue_purge(&hdev->rx_q);
1636 skb_queue_purge(&hdev->cmd_q);
1637 skb_queue_purge(&hdev->raw_q);
1638
1639 /* Drop last sent command */
1640 if (hdev->sent_cmd) {
65cc2b49 1641 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1642 kfree_skb(hdev->sent_cmd);
1643 hdev->sent_cmd = NULL;
1644 }
1645
e9ca8bf1 1646 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1647 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1648
1da177e4
LT
1649 /* After this point our queues are empty
1650 * and no tasks are scheduled. */
1651 hdev->close(hdev);
1652
35b973c9 1653 /* Clear flags */
fee746b0 1654 hdev->flags &= BIT(HCI_RAW);
eacb44df 1655 hci_dev_clear_volatile_flags(hdev);
35b973c9 1656
ced5c338 1657 /* Controller radio is available but is currently powered down */
536619e8 1658 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1659
e59fda8d 1660 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1661 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1662 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1663
b504430c 1664 hci_req_sync_unlock(hdev);
1da177e4
LT
1665
1666 hci_dev_put(hdev);
1667 return 0;
1668}
1669
1670int hci_dev_close(__u16 dev)
1671{
1672 struct hci_dev *hdev;
1673 int err;
1674
70f23020
AE
1675 hdev = hci_dev_get(dev);
1676 if (!hdev)
1da177e4 1677 return -ENODEV;
8ee56540 1678
d7a5a11d 1679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1680 err = -EBUSY;
1681 goto done;
1682 }
1683
a69d8927 1684 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1685 cancel_delayed_work(&hdev->power_off);
1686
1da177e4 1687 err = hci_dev_do_close(hdev);
8ee56540 1688
0736cfa8 1689done:
1da177e4
LT
1690 hci_dev_put(hdev);
1691 return err;
1692}
1693
5c912495 1694static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1695{
5c912495 1696 int ret;
1da177e4 1697
5c912495 1698 BT_DBG("%s %p", hdev->name, hdev);
1da177e4 1699
b504430c 1700 hci_req_sync_lock(hdev);
1da177e4 1701
1da177e4
LT
1702 /* Drop queues */
1703 skb_queue_purge(&hdev->rx_q);
1704 skb_queue_purge(&hdev->cmd_q);
1705
76727c02
JH
1706 /* Avoid potential lockdep warnings from the *_flush() calls by
1707 * ensuring the workqueue is empty up front.
1708 */
1709 drain_workqueue(hdev->workqueue);
1710
09fd0de5 1711 hci_dev_lock(hdev);
1f9b9a5d 1712 hci_inquiry_cache_flush(hdev);
1da177e4 1713 hci_conn_hash_flush(hdev);
09fd0de5 1714 hci_dev_unlock(hdev);
1da177e4
LT
1715
1716 if (hdev->flush)
1717 hdev->flush(hdev);
1718
8e87d142 1719 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1720 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1721
4ebeee2d 1722 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL);
1da177e4 1723
b504430c 1724 hci_req_sync_unlock(hdev);
1da177e4
LT
1725 return ret;
1726}
1727
5c912495
MH
1728int hci_dev_reset(__u16 dev)
1729{
1730 struct hci_dev *hdev;
1731 int err;
1732
1733 hdev = hci_dev_get(dev);
1734 if (!hdev)
1735 return -ENODEV;
1736
1737 if (!test_bit(HCI_UP, &hdev->flags)) {
1738 err = -ENETDOWN;
1739 goto done;
1740 }
1741
d7a5a11d 1742 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1743 err = -EBUSY;
1744 goto done;
1745 }
1746
d7a5a11d 1747 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1748 err = -EOPNOTSUPP;
1749 goto done;
1750 }
1751
1752 err = hci_dev_do_reset(hdev);
1753
1754done:
1755 hci_dev_put(hdev);
1756 return err;
1757}
1758
1da177e4
LT
1759int hci_dev_reset_stat(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int ret = 0;
1763
70f23020
AE
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1da177e4
LT
1766 return -ENODEV;
1767
d7a5a11d 1768 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1769 ret = -EBUSY;
1770 goto done;
1771 }
1772
d7a5a11d 1773 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1774 ret = -EOPNOTSUPP;
1775 goto done;
1776 }
1777
1da177e4
LT
1778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1779
0736cfa8 1780done:
1da177e4 1781 hci_dev_put(hdev);
1da177e4
LT
1782 return ret;
1783}
1784
123abc08
JH
1785static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1786{
bc6d2d04 1787 bool conn_changed, discov_changed;
123abc08
JH
1788
1789 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1790
1791 if ((scan & SCAN_PAGE))
238be788
MH
1792 conn_changed = !hci_dev_test_and_set_flag(hdev,
1793 HCI_CONNECTABLE);
123abc08 1794 else
a69d8927
MH
1795 conn_changed = hci_dev_test_and_clear_flag(hdev,
1796 HCI_CONNECTABLE);
123abc08 1797
bc6d2d04 1798 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1799 discov_changed = !hci_dev_test_and_set_flag(hdev,
1800 HCI_DISCOVERABLE);
bc6d2d04 1801 } else {
a358dc11 1802 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1803 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804 HCI_DISCOVERABLE);
bc6d2d04
JH
1805 }
1806
d7a5a11d 1807 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1808 return;
1809
bc6d2d04
JH
1810 if (conn_changed || discov_changed) {
1811 /* In case this was disabled through mgmt */
a1536da2 1812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1813
d7a5a11d 1814 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
cab054ab 1815 hci_req_update_adv_data(hdev, hdev->cur_adv_instance);
bc6d2d04 1816
123abc08 1817 mgmt_new_settings(hdev);
bc6d2d04 1818 }
123abc08
JH
1819}
1820
1da177e4
LT
1821int hci_dev_cmd(unsigned int cmd, void __user *arg)
1822{
1823 struct hci_dev *hdev;
1824 struct hci_dev_req dr;
1825 int err = 0;
1826
1827 if (copy_from_user(&dr, arg, sizeof(dr)))
1828 return -EFAULT;
1829
70f23020
AE
1830 hdev = hci_dev_get(dr.dev_id);
1831 if (!hdev)
1da177e4
LT
1832 return -ENODEV;
1833
d7a5a11d 1834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1835 err = -EBUSY;
1836 goto done;
1837 }
1838
d7a5a11d 1839 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1840 err = -EOPNOTSUPP;
1841 goto done;
1842 }
1843
ca8bee5d 1844 if (hdev->dev_type != HCI_PRIMARY) {
5b69bef5
MH
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
d7a5a11d 1849 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
1da177e4
LT
1854 switch (cmd) {
1855 case HCISETAUTH:
01178cd4 1856 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 1857 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1858 break;
1859
1860 case HCISETENCRYPT:
1861 if (!lmp_encrypt_capable(hdev)) {
1862 err = -EOPNOTSUPP;
1863 break;
1864 }
1865
1866 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867 /* Auth must be enabled first */
01178cd4 1868 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
4ebeee2d 1869 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1870 if (err)
1871 break;
1872 }
1873
01178cd4 1874 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
4ebeee2d 1875 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1876 break;
1877
1878 case HCISETSCAN:
01178cd4 1879 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
4ebeee2d 1880 HCI_INIT_TIMEOUT, NULL);
91a668b0 1881
bc6d2d04
JH
1882 /* Ensure that the connectable and discoverable states
1883 * get correctly modified as this was a non-mgmt change.
91a668b0 1884 */
123abc08
JH
1885 if (!err)
1886 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1887 break;
1888
1da177e4 1889 case HCISETLINKPOL:
01178cd4 1890 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
4ebeee2d 1891 HCI_INIT_TIMEOUT, NULL);
1da177e4
LT
1892 break;
1893
1894 case HCISETLINKMODE:
e4e8e37c
MH
1895 hdev->link_mode = ((__u16) dr.dev_opt) &
1896 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897 break;
1898
1899 case HCISETPTYPE:
1900 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1901 break;
1902
1903 case HCISETACLMTU:
e4e8e37c
MH
1904 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1905 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1906 break;
1907
1908 case HCISETSCOMTU:
e4e8e37c
MH
1909 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1910 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1911 break;
1912
1913 default:
1914 err = -EINVAL;
1915 break;
1916 }
e4e8e37c 1917
0736cfa8 1918done:
1da177e4
LT
1919 hci_dev_put(hdev);
1920 return err;
1921}
1922
1923int hci_get_dev_list(void __user *arg)
1924{
8035ded4 1925 struct hci_dev *hdev;
1da177e4
LT
1926 struct hci_dev_list_req *dl;
1927 struct hci_dev_req *dr;
1da177e4
LT
1928 int n = 0, size, err;
1929 __u16 dev_num;
1930
1931 if (get_user(dev_num, (__u16 __user *) arg))
1932 return -EFAULT;
1933
1934 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1935 return -EINVAL;
1936
1937 size = sizeof(*dl) + dev_num * sizeof(*dr);
1938
70f23020
AE
1939 dl = kzalloc(size, GFP_KERNEL);
1940 if (!dl)
1da177e4
LT
1941 return -ENOMEM;
1942
1943 dr = dl->dev_req;
1944
f20d09d5 1945 read_lock(&hci_dev_list_lock);
8035ded4 1946 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1947 unsigned long flags = hdev->flags;
c542a06c 1948
2e84d8db
MH
1949 /* When the auto-off is configured it means the transport
1950 * is running, but in that case still indicate that the
1951 * device is actually down.
1952 */
d7a5a11d 1953 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1954 flags &= ~BIT(HCI_UP);
c542a06c 1955
1da177e4 1956 (dr + n)->dev_id = hdev->id;
2e84d8db 1957 (dr + n)->dev_opt = flags;
c542a06c 1958
1da177e4
LT
1959 if (++n >= dev_num)
1960 break;
1961 }
f20d09d5 1962 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1963
1964 dl->dev_num = n;
1965 size = sizeof(*dl) + n * sizeof(*dr);
1966
1967 err = copy_to_user(arg, dl, size);
1968 kfree(dl);
1969
1970 return err ? -EFAULT : 0;
1971}
1972
1973int hci_get_dev_info(void __user *arg)
1974{
1975 struct hci_dev *hdev;
1976 struct hci_dev_info di;
2e84d8db 1977 unsigned long flags;
1da177e4
LT
1978 int err = 0;
1979
1980 if (copy_from_user(&di, arg, sizeof(di)))
1981 return -EFAULT;
1982
70f23020
AE
1983 hdev = hci_dev_get(di.dev_id);
1984 if (!hdev)
1da177e4
LT
1985 return -ENODEV;
1986
2e84d8db
MH
1987 /* When the auto-off is configured it means the transport
1988 * is running, but in that case still indicate that the
1989 * device is actually down.
1990 */
d7a5a11d 1991 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
1992 flags = hdev->flags & ~BIT(HCI_UP);
1993 else
1994 flags = hdev->flags;
c542a06c 1995
1da177e4
LT
1996 strcpy(di.name, hdev->name);
1997 di.bdaddr = hdev->bdaddr;
60f2a3ed 1998 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 1999 di.flags = flags;
1da177e4 2000 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2001 if (lmp_bredr_capable(hdev)) {
2002 di.acl_mtu = hdev->acl_mtu;
2003 di.acl_pkts = hdev->acl_pkts;
2004 di.sco_mtu = hdev->sco_mtu;
2005 di.sco_pkts = hdev->sco_pkts;
2006 } else {
2007 di.acl_mtu = hdev->le_mtu;
2008 di.acl_pkts = hdev->le_pkts;
2009 di.sco_mtu = 0;
2010 di.sco_pkts = 0;
2011 }
1da177e4
LT
2012 di.link_policy = hdev->link_policy;
2013 di.link_mode = hdev->link_mode;
2014
2015 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016 memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018 if (copy_to_user(arg, &di, sizeof(di)))
2019 err = -EFAULT;
2020
2021 hci_dev_put(hdev);
2022
2023 return err;
2024}
2025
2026/* ---- Interface to HCI drivers ---- */
2027
611b30f7
MH
2028static int hci_rfkill_set_block(void *data, bool blocked)
2029{
2030 struct hci_dev *hdev = data;
2031
2032 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
d7a5a11d 2034 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2035 return -EBUSY;
2036
5e130367 2037 if (blocked) {
a1536da2 2038 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2039 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2041 hci_dev_do_close(hdev);
5e130367 2042 } else {
a358dc11 2043 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2044 }
611b30f7
MH
2045
2046 return 0;
2047}
2048
2049static const struct rfkill_ops hci_rfkill_ops = {
2050 .set_block = hci_rfkill_set_block,
2051};
2052
ab81cbf9
JH
2053static void hci_power_on(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2056 int err;
ab81cbf9
JH
2057
2058 BT_DBG("%s", hdev->name);
2059
2ff13894
JH
2060 if (test_bit(HCI_UP, &hdev->flags) &&
2061 hci_dev_test_flag(hdev, HCI_MGMT) &&
2062 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
d82142a8 2063 cancel_delayed_work(&hdev->power_off);
2ff13894
JH
2064 hci_req_sync_lock(hdev);
2065 err = __hci_req_hci_power_on(hdev);
2066 hci_req_sync_unlock(hdev);
2067 mgmt_power_on(hdev, err);
2068 return;
2069 }
2070
cbed0ca1 2071 err = hci_dev_do_open(hdev);
96570ffc 2072 if (err < 0) {
3ad67582 2073 hci_dev_lock(hdev);
96570ffc 2074 mgmt_set_powered_failed(hdev, err);
3ad67582 2075 hci_dev_unlock(hdev);
ab81cbf9 2076 return;
96570ffc 2077 }
ab81cbf9 2078
a5c8f270
MH
2079 /* During the HCI setup phase, a few error conditions are
2080 * ignored and they need to be checked now. If they are still
2081 * valid, it is important to turn the device back off.
2082 */
d7a5a11d
MH
2083 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2084 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
ca8bee5d 2085 (hdev->dev_type == HCI_PRIMARY &&
a5c8f270
MH
2086 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2087 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2088 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2089 hci_dev_do_close(hdev);
d7a5a11d 2090 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2091 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2092 HCI_AUTO_OFF_TIMEOUT);
bf543036 2093 }
ab81cbf9 2094
a69d8927 2095 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2096 /* For unconfigured devices, set the HCI_RAW flag
2097 * so that userspace can easily identify them.
4a964404 2098 */
d7a5a11d 2099 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2100 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2101
2102 /* For fully configured devices, this will send
2103 * the Index Added event. For unconfigured devices,
2104 * it will send Unconfigued Index Added event.
2105 *
2106 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2107 * and no event will be send.
2108 */
2109 mgmt_index_added(hdev);
a69d8927 2110 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2111 /* When the controller is now configured, then it
2112 * is important to clear the HCI_RAW flag.
2113 */
d7a5a11d 2114 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2115 clear_bit(HCI_RAW, &hdev->flags);
2116
d603b76b
MH
2117 /* Powering on the controller with HCI_CONFIG set only
2118 * happens with the transition from unconfigured to
2119 * configured. This will send the Index Added event.
2120 */
744cf19e 2121 mgmt_index_added(hdev);
fee746b0 2122 }
ab81cbf9
JH
2123}
2124
2125static void hci_power_off(struct work_struct *work)
2126{
3243553f 2127 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2128 power_off.work);
ab81cbf9
JH
2129
2130 BT_DBG("%s", hdev->name);
2131
8ee56540 2132 hci_dev_do_close(hdev);
ab81cbf9
JH
2133}
2134
c7741d16
MH
2135static void hci_error_reset(struct work_struct *work)
2136{
2137 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2138
2139 BT_DBG("%s", hdev->name);
2140
2141 if (hdev->hw_error)
2142 hdev->hw_error(hdev, hdev->hw_error_code);
2143 else
2064ee33 2144 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
c7741d16
MH
2145
2146 if (hci_dev_do_close(hdev))
2147 return;
2148
c7741d16
MH
2149 hci_dev_do_open(hdev);
2150}
2151
35f7498a 2152void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2153{
4821002c 2154 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2155
4821002c
JH
2156 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2157 list_del(&uuid->list);
2aeb9a1a
JH
2158 kfree(uuid);
2159 }
2aeb9a1a
JH
2160}
2161
35f7498a 2162void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2163{
0378b597 2164 struct link_key *key;
55ed8ca1 2165
0378b597
JH
2166 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2167 list_del_rcu(&key->list);
2168 kfree_rcu(key, rcu);
55ed8ca1 2169 }
55ed8ca1
JH
2170}
2171
35f7498a 2172void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2173{
970d0f1b 2174 struct smp_ltk *k;
b899efaf 2175
970d0f1b
JH
2176 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2177 list_del_rcu(&k->list);
2178 kfree_rcu(k, rcu);
b899efaf 2179 }
b899efaf
VCG
2180}
2181
970c4e46
JH
2182void hci_smp_irks_clear(struct hci_dev *hdev)
2183{
adae20cb 2184 struct smp_irk *k;
970c4e46 2185
adae20cb
JH
2186 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2187 list_del_rcu(&k->list);
2188 kfree_rcu(k, rcu);
970c4e46
JH
2189 }
2190}
2191
55ed8ca1
JH
2192struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2193{
8035ded4 2194 struct link_key *k;
55ed8ca1 2195
0378b597
JH
2196 rcu_read_lock();
2197 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2198 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2199 rcu_read_unlock();
55ed8ca1 2200 return k;
0378b597
JH
2201 }
2202 }
2203 rcu_read_unlock();
55ed8ca1
JH
2204
2205 return NULL;
2206}
2207
745c0ce3 2208static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2209 u8 key_type, u8 old_key_type)
d25e28ab
JH
2210{
2211 /* Legacy key */
2212 if (key_type < 0x03)
745c0ce3 2213 return true;
d25e28ab
JH
2214
2215 /* Debug keys are insecure so don't store them persistently */
2216 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2217 return false;
d25e28ab
JH
2218
2219 /* Changed combination key and there's no previous one */
2220 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2221 return false;
d25e28ab
JH
2222
2223 /* Security mode 3 case */
2224 if (!conn)
745c0ce3 2225 return true;
d25e28ab 2226
e3befab9
JH
2227 /* BR/EDR key derived using SC from an LE link */
2228 if (conn->type == LE_LINK)
2229 return true;
2230
d25e28ab
JH
2231 /* Neither local nor remote side had no-bonding as requirement */
2232 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2233 return true;
d25e28ab
JH
2234
2235 /* Local side had dedicated bonding as requirement */
2236 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2237 return true;
d25e28ab
JH
2238
2239 /* Remote side had dedicated bonding as requirement */
2240 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2241 return true;
d25e28ab
JH
2242
2243 /* If none of the above criteria match, then don't store the key
2244 * persistently */
745c0ce3 2245 return false;
d25e28ab
JH
2246}
2247
e804d25d 2248static u8 ltk_role(u8 type)
98a0b845 2249{
e804d25d
JH
2250 if (type == SMP_LTK)
2251 return HCI_ROLE_MASTER;
98a0b845 2252
e804d25d 2253 return HCI_ROLE_SLAVE;
98a0b845
JH
2254}
2255
f3a73d97
JH
2256struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2257 u8 addr_type, u8 role)
75d262c2 2258{
c9839a11 2259 struct smp_ltk *k;
75d262c2 2260
970d0f1b
JH
2261 rcu_read_lock();
2262 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2263 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2264 continue;
2265
923e2414 2266 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2267 rcu_read_unlock();
75d262c2 2268 return k;
970d0f1b
JH
2269 }
2270 }
2271 rcu_read_unlock();
75d262c2
VCG
2272
2273 return NULL;
2274}
75d262c2 2275
970c4e46
JH
2276struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2277{
2278 struct smp_irk *irk;
2279
adae20cb
JH
2280 rcu_read_lock();
2281 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2282 if (!bacmp(&irk->rpa, rpa)) {
2283 rcu_read_unlock();
970c4e46 2284 return irk;
adae20cb 2285 }
970c4e46
JH
2286 }
2287
adae20cb 2288 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2289 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2290 bacpy(&irk->rpa, rpa);
adae20cb 2291 rcu_read_unlock();
970c4e46
JH
2292 return irk;
2293 }
2294 }
adae20cb 2295 rcu_read_unlock();
970c4e46
JH
2296
2297 return NULL;
2298}
2299
2300struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2301 u8 addr_type)
2302{
2303 struct smp_irk *irk;
2304
6cfc9988
JH
2305 /* Identity Address must be public or static random */
2306 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2307 return NULL;
2308
adae20cb
JH
2309 rcu_read_lock();
2310 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2311 if (addr_type == irk->addr_type &&
adae20cb
JH
2312 bacmp(bdaddr, &irk->bdaddr) == 0) {
2313 rcu_read_unlock();
970c4e46 2314 return irk;
adae20cb 2315 }
970c4e46 2316 }
adae20cb 2317 rcu_read_unlock();
970c4e46
JH
2318
2319 return NULL;
2320}
2321
567fa2aa 2322struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2323 bdaddr_t *bdaddr, u8 *val, u8 type,
2324 u8 pin_len, bool *persistent)
55ed8ca1
JH
2325{
2326 struct link_key *key, *old_key;
745c0ce3 2327 u8 old_key_type;
55ed8ca1
JH
2328
2329 old_key = hci_find_link_key(hdev, bdaddr);
2330 if (old_key) {
2331 old_key_type = old_key->type;
2332 key = old_key;
2333 } else {
12adcf3a 2334 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2335 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2336 if (!key)
567fa2aa 2337 return NULL;
0378b597 2338 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2339 }
2340
6ed93dc6 2341 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2342
d25e28ab
JH
2343 /* Some buggy controller combinations generate a changed
2344 * combination key for legacy pairing even when there's no
2345 * previous key */
2346 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2347 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2348 type = HCI_LK_COMBINATION;
655fe6ec
JH
2349 if (conn)
2350 conn->key_type = type;
2351 }
d25e28ab 2352
55ed8ca1 2353 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2354 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2355 key->pin_len = pin_len;
2356
b6020ba0 2357 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2358 key->type = old_key_type;
4748fed2
JH
2359 else
2360 key->type = type;
2361
7652ff6a
JH
2362 if (persistent)
2363 *persistent = hci_persistent_key(hdev, conn, type,
2364 old_key_type);
4df378a1 2365
567fa2aa 2366 return key;
55ed8ca1
JH
2367}
2368
ca9142b8 2369struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2370 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2371 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2372{
c9839a11 2373 struct smp_ltk *key, *old_key;
e804d25d 2374 u8 role = ltk_role(type);
75d262c2 2375
f3a73d97 2376 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2377 if (old_key)
75d262c2 2378 key = old_key;
c9839a11 2379 else {
0a14ab41 2380 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2381 if (!key)
ca9142b8 2382 return NULL;
970d0f1b 2383 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2384 }
2385
75d262c2 2386 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2387 key->bdaddr_type = addr_type;
2388 memcpy(key->val, tk, sizeof(key->val));
2389 key->authenticated = authenticated;
2390 key->ediv = ediv;
fe39c7b2 2391 key->rand = rand;
c9839a11
VCG
2392 key->enc_size = enc_size;
2393 key->type = type;
75d262c2 2394
ca9142b8 2395 return key;
75d262c2
VCG
2396}
2397
ca9142b8
JH
2398struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2399 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2400{
2401 struct smp_irk *irk;
2402
2403 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2404 if (!irk) {
2405 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2406 if (!irk)
ca9142b8 2407 return NULL;
970c4e46
JH
2408
2409 bacpy(&irk->bdaddr, bdaddr);
2410 irk->addr_type = addr_type;
2411
adae20cb 2412 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2413 }
2414
2415 memcpy(irk->val, val, 16);
2416 bacpy(&irk->rpa, rpa);
2417
ca9142b8 2418 return irk;
970c4e46
JH
2419}
2420
55ed8ca1
JH
2421int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2422{
2423 struct link_key *key;
2424
2425 key = hci_find_link_key(hdev, bdaddr);
2426 if (!key)
2427 return -ENOENT;
2428
6ed93dc6 2429 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2430
0378b597
JH
2431 list_del_rcu(&key->list);
2432 kfree_rcu(key, rcu);
55ed8ca1
JH
2433
2434 return 0;
2435}
2436
e0b2b27e 2437int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2438{
970d0f1b 2439 struct smp_ltk *k;
c51ffa0b 2440 int removed = 0;
b899efaf 2441
970d0f1b 2442 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2443 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2444 continue;
2445
6ed93dc6 2446 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2447
970d0f1b
JH
2448 list_del_rcu(&k->list);
2449 kfree_rcu(k, rcu);
c51ffa0b 2450 removed++;
b899efaf
VCG
2451 }
2452
c51ffa0b 2453 return removed ? 0 : -ENOENT;
b899efaf
VCG
2454}
2455
a7ec7338
JH
2456void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2457{
adae20cb 2458 struct smp_irk *k;
a7ec7338 2459
adae20cb 2460 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2461 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2462 continue;
2463
2464 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2465
adae20cb
JH
2466 list_del_rcu(&k->list);
2467 kfree_rcu(k, rcu);
a7ec7338
JH
2468 }
2469}
2470
55e76b38
JH
2471bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2472{
2473 struct smp_ltk *k;
4ba9faf3 2474 struct smp_irk *irk;
55e76b38
JH
2475 u8 addr_type;
2476
2477 if (type == BDADDR_BREDR) {
2478 if (hci_find_link_key(hdev, bdaddr))
2479 return true;
2480 return false;
2481 }
2482
2483 /* Convert to HCI addr type which struct smp_ltk uses */
2484 if (type == BDADDR_LE_PUBLIC)
2485 addr_type = ADDR_LE_DEV_PUBLIC;
2486 else
2487 addr_type = ADDR_LE_DEV_RANDOM;
2488
4ba9faf3
JH
2489 irk = hci_get_irk(hdev, bdaddr, addr_type);
2490 if (irk) {
2491 bdaddr = &irk->bdaddr;
2492 addr_type = irk->addr_type;
2493 }
2494
55e76b38
JH
2495 rcu_read_lock();
2496 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2497 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2498 rcu_read_unlock();
55e76b38 2499 return true;
87c8b28d 2500 }
55e76b38
JH
2501 }
2502 rcu_read_unlock();
2503
2504 return false;
2505}
2506
6bd32326 2507/* HCI command timer function */
65cc2b49 2508static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2509{
65cc2b49
MH
2510 struct hci_dev *hdev = container_of(work, struct hci_dev,
2511 cmd_timer.work);
6bd32326 2512
bda4f23a
AE
2513 if (hdev->sent_cmd) {
2514 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2515 u16 opcode = __le16_to_cpu(sent->opcode);
2516
2064ee33 2517 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
bda4f23a 2518 } else {
2064ee33 2519 bt_dev_err(hdev, "command tx timeout");
bda4f23a
AE
2520 }
2521
6bd32326 2522 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2523 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2524}
2525
2763eda6 2526struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2527 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2528{
2529 struct oob_data *data;
2530
6928a924
JH
2531 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2532 if (bacmp(bdaddr, &data->bdaddr) != 0)
2533 continue;
2534 if (data->bdaddr_type != bdaddr_type)
2535 continue;
2536 return data;
2537 }
2763eda6
SJ
2538
2539 return NULL;
2540}
2541
6928a924
JH
2542int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543 u8 bdaddr_type)
2763eda6
SJ
2544{
2545 struct oob_data *data;
2546
6928a924 2547 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2548 if (!data)
2549 return -ENOENT;
2550
6928a924 2551 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2552
2553 list_del(&data->list);
2554 kfree(data);
2555
2556 return 0;
2557}
2558
35f7498a 2559void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2560{
2561 struct oob_data *data, *n;
2562
2563 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2564 list_del(&data->list);
2565 kfree(data);
2566 }
2763eda6
SJ
2567}
2568
0798872e 2569int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2570 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2571 u8 *hash256, u8 *rand256)
2763eda6
SJ
2572{
2573 struct oob_data *data;
2574
6928a924 2575 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2576 if (!data) {
0a14ab41 2577 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2578 if (!data)
2579 return -ENOMEM;
2580
2581 bacpy(&data->bdaddr, bdaddr);
6928a924 2582 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2583 list_add(&data->list, &hdev->remote_oob_data);
2584 }
2585
81328d5c
JH
2586 if (hash192 && rand192) {
2587 memcpy(data->hash192, hash192, sizeof(data->hash192));
2588 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2589 if (hash256 && rand256)
2590 data->present = 0x03;
81328d5c
JH
2591 } else {
2592 memset(data->hash192, 0, sizeof(data->hash192));
2593 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2594 if (hash256 && rand256)
2595 data->present = 0x02;
2596 else
2597 data->present = 0x00;
0798872e
MH
2598 }
2599
81328d5c
JH
2600 if (hash256 && rand256) {
2601 memcpy(data->hash256, hash256, sizeof(data->hash256));
2602 memcpy(data->rand256, rand256, sizeof(data->rand256));
2603 } else {
2604 memset(data->hash256, 0, sizeof(data->hash256));
2605 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2606 if (hash192 && rand192)
2607 data->present = 0x01;
81328d5c 2608 }
0798872e 2609
6ed93dc6 2610 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2611
2612 return 0;
2613}
2614
d2609b34
FG
2615/* This function requires the caller holds hdev->lock */
2616struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2617{
2618 struct adv_info *adv_instance;
2619
2620 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2621 if (adv_instance->instance == instance)
2622 return adv_instance;
2623 }
2624
2625 return NULL;
2626}
2627
2628/* This function requires the caller holds hdev->lock */
74b93e9f
PK
2629struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
2630{
d2609b34
FG
2631 struct adv_info *cur_instance;
2632
2633 cur_instance = hci_find_adv_instance(hdev, instance);
2634 if (!cur_instance)
2635 return NULL;
2636
2637 if (cur_instance == list_last_entry(&hdev->adv_instances,
2638 struct adv_info, list))
2639 return list_first_entry(&hdev->adv_instances,
2640 struct adv_info, list);
2641 else
2642 return list_next_entry(cur_instance, list);
2643}
2644
2645/* This function requires the caller holds hdev->lock */
2646int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2647{
2648 struct adv_info *adv_instance;
2649
2650 adv_instance = hci_find_adv_instance(hdev, instance);
2651 if (!adv_instance)
2652 return -ENOENT;
2653
2654 BT_DBG("%s removing %dMR", hdev->name, instance);
2655
cab054ab
JH
2656 if (hdev->cur_adv_instance == instance) {
2657 if (hdev->adv_instance_timeout) {
2658 cancel_delayed_work(&hdev->adv_instance_expire);
2659 hdev->adv_instance_timeout = 0;
2660 }
2661 hdev->cur_adv_instance = 0x00;
5d900e46
FG
2662 }
2663
d2609b34
FG
2664 list_del(&adv_instance->list);
2665 kfree(adv_instance);
2666
2667 hdev->adv_instance_cnt--;
2668
2669 return 0;
2670}
2671
2672/* This function requires the caller holds hdev->lock */
2673void hci_adv_instances_clear(struct hci_dev *hdev)
2674{
2675 struct adv_info *adv_instance, *n;
2676
5d900e46
FG
2677 if (hdev->adv_instance_timeout) {
2678 cancel_delayed_work(&hdev->adv_instance_expire);
2679 hdev->adv_instance_timeout = 0;
2680 }
2681
d2609b34
FG
2682 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2683 list_del(&adv_instance->list);
2684 kfree(adv_instance);
2685 }
2686
2687 hdev->adv_instance_cnt = 0;
cab054ab 2688 hdev->cur_adv_instance = 0x00;
d2609b34
FG
2689}
2690
2691/* This function requires the caller holds hdev->lock */
2692int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2693 u16 adv_data_len, u8 *adv_data,
2694 u16 scan_rsp_len, u8 *scan_rsp_data,
2695 u16 timeout, u16 duration)
2696{
2697 struct adv_info *adv_instance;
2698
2699 adv_instance = hci_find_adv_instance(hdev, instance);
2700 if (adv_instance) {
2701 memset(adv_instance->adv_data, 0,
2702 sizeof(adv_instance->adv_data));
2703 memset(adv_instance->scan_rsp_data, 0,
2704 sizeof(adv_instance->scan_rsp_data));
2705 } else {
2706 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2707 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2708 return -EOVERFLOW;
2709
39ecfad6 2710 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2711 if (!adv_instance)
2712 return -ENOMEM;
2713
fffd38bc 2714 adv_instance->pending = true;
d2609b34
FG
2715 adv_instance->instance = instance;
2716 list_add(&adv_instance->list, &hdev->adv_instances);
2717 hdev->adv_instance_cnt++;
2718 }
2719
2720 adv_instance->flags = flags;
2721 adv_instance->adv_data_len = adv_data_len;
2722 adv_instance->scan_rsp_len = scan_rsp_len;
2723
2724 if (adv_data_len)
2725 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2726
2727 if (scan_rsp_len)
2728 memcpy(adv_instance->scan_rsp_data,
2729 scan_rsp_data, scan_rsp_len);
2730
2731 adv_instance->timeout = timeout;
5d900e46 2732 adv_instance->remaining_time = timeout;
d2609b34
FG
2733
2734 if (duration == 0)
2735 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2736 else
2737 adv_instance->duration = duration;
2738
2739 BT_DBG("%s for %dMR", hdev->name, instance);
2740
2741 return 0;
2742}
2743
dcc36c16 2744struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2745 bdaddr_t *bdaddr, u8 type)
b2a66aad 2746{
8035ded4 2747 struct bdaddr_list *b;
b2a66aad 2748
dcc36c16 2749 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2750 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2751 return b;
b9ee0a78 2752 }
b2a66aad
AJ
2753
2754 return NULL;
2755}
2756
dcc36c16 2757void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad 2758{
7eb7404f 2759 struct bdaddr_list *b, *n;
b2a66aad 2760
7eb7404f
GT
2761 list_for_each_entry_safe(b, n, bdaddr_list, list) {
2762 list_del(&b->list);
b2a66aad
AJ
2763 kfree(b);
2764 }
b2a66aad
AJ
2765}
2766
dcc36c16 2767int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2768{
2769 struct bdaddr_list *entry;
b2a66aad 2770
b9ee0a78 2771 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2772 return -EBADF;
2773
dcc36c16 2774 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2775 return -EEXIST;
b2a66aad 2776
27f70f3e 2777 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2778 if (!entry)
2779 return -ENOMEM;
b2a66aad
AJ
2780
2781 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2782 entry->bdaddr_type = type;
b2a66aad 2783
dcc36c16 2784 list_add(&entry->list, list);
b2a66aad 2785
2a8357f2 2786 return 0;
b2a66aad
AJ
2787}
2788
dcc36c16 2789int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2790{
2791 struct bdaddr_list *entry;
b2a66aad 2792
35f7498a 2793 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2794 hci_bdaddr_list_clear(list);
35f7498a
JH
2795 return 0;
2796 }
b2a66aad 2797
dcc36c16 2798 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2799 if (!entry)
2800 return -ENOENT;
2801
2802 list_del(&entry->list);
2803 kfree(entry);
2804
2805 return 0;
2806}
2807
15819a70
AG
2808/* This function requires the caller holds hdev->lock */
2809struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2810 bdaddr_t *addr, u8 addr_type)
2811{
2812 struct hci_conn_params *params;
2813
2814 list_for_each_entry(params, &hdev->le_conn_params, list) {
2815 if (bacmp(&params->addr, addr) == 0 &&
2816 params->addr_type == addr_type) {
2817 return params;
2818 }
2819 }
2820
2821 return NULL;
2822}
2823
4b10966f 2824/* This function requires the caller holds hdev->lock */
501f8827
JH
2825struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2826 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2827{
912b42ef 2828 struct hci_conn_params *param;
a9b0a04c 2829
501f8827 2830 list_for_each_entry(param, list, action) {
912b42ef
JH
2831 if (bacmp(&param->addr, addr) == 0 &&
2832 param->addr_type == addr_type)
2833 return param;
4b10966f
MH
2834 }
2835
2836 return NULL;
a9b0a04c
AG
2837}
2838
15819a70 2839/* This function requires the caller holds hdev->lock */
51d167c0
MH
2840struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2841 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2842{
2843 struct hci_conn_params *params;
2844
2845 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2846 if (params)
51d167c0 2847 return params;
15819a70
AG
2848
2849 params = kzalloc(sizeof(*params), GFP_KERNEL);
2850 if (!params) {
2064ee33 2851 bt_dev_err(hdev, "out of memory");
51d167c0 2852 return NULL;
15819a70
AG
2853 }
2854
2855 bacpy(&params->addr, addr);
2856 params->addr_type = addr_type;
cef952ce
AG
2857
2858 list_add(&params->list, &hdev->le_conn_params);
93450c75 2859 INIT_LIST_HEAD(&params->action);
cef952ce 2860
bf5b3c8b
MH
2861 params->conn_min_interval = hdev->le_conn_min_interval;
2862 params->conn_max_interval = hdev->le_conn_max_interval;
2863 params->conn_latency = hdev->le_conn_latency;
2864 params->supervision_timeout = hdev->le_supv_timeout;
2865 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2866
2867 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2868
51d167c0 2869 return params;
bf5b3c8b
MH
2870}
2871
f6c63249 2872static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2873{
f8aaf9b6 2874 if (params->conn) {
f161dd41 2875 hci_conn_drop(params->conn);
f8aaf9b6
JH
2876 hci_conn_put(params->conn);
2877 }
f161dd41 2878
95305baa 2879 list_del(&params->action);
15819a70
AG
2880 list_del(&params->list);
2881 kfree(params);
f6c63249
JH
2882}
2883
2884/* This function requires the caller holds hdev->lock */
2885void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2886{
2887 struct hci_conn_params *params;
2888
2889 params = hci_conn_params_lookup(hdev, addr, addr_type);
2890 if (!params)
2891 return;
2892
2893 hci_conn_params_free(params);
15819a70 2894
95305baa
JH
2895 hci_update_background_scan(hdev);
2896
15819a70
AG
2897 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2898}
2899
2900/* This function requires the caller holds hdev->lock */
55af49a8 2901void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2902{
2903 struct hci_conn_params *params, *tmp;
2904
2905 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2906 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2907 continue;
f75113a2
JP
2908
2909 /* If trying to estabilish one time connection to disabled
2910 * device, leave the params, but mark them as just once.
2911 */
2912 if (params->explicit_connect) {
2913 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2914 continue;
2915 }
2916
15819a70
AG
2917 list_del(&params->list);
2918 kfree(params);
2919 }
2920
55af49a8 2921 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2922}
2923
2924/* This function requires the caller holds hdev->lock */
030e7f81 2925static void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2926{
15819a70 2927 struct hci_conn_params *params, *tmp;
77a77a30 2928
f6c63249
JH
2929 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2930 hci_conn_params_free(params);
77a77a30 2931
15819a70 2932 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2933}
2934
a1f4c318
JH
2935/* Copy the Identity Address of the controller.
2936 *
2937 * If the controller has a public BD_ADDR, then by default use that one.
2938 * If this is a LE only controller without a public address, default to
2939 * the static random address.
2940 *
2941 * For debugging purposes it is possible to force controllers with a
2942 * public address to use the static random address instead.
50b5b952
MH
2943 *
2944 * In case BR/EDR has been disabled on a dual-mode controller and
2945 * userspace has configured a static address, then that address
2946 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2947 */
2948void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2949 u8 *bdaddr_type)
2950{
b7cb93e5 2951 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2952 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2953 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2954 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2955 bacpy(bdaddr, &hdev->static_addr);
2956 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2957 } else {
2958 bacpy(bdaddr, &hdev->bdaddr);
2959 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2960 }
2961}
2962
9be0dab7
DH
2963/* Alloc HCI device */
2964struct hci_dev *hci_alloc_dev(void)
2965{
2966 struct hci_dev *hdev;
2967
27f70f3e 2968 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
2969 if (!hdev)
2970 return NULL;
2971
b1b813d4
DH
2972 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2973 hdev->esco_type = (ESCO_HV1);
2974 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2975 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2976 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 2977 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
2978 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2979 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
2980 hdev->adv_instance_cnt = 0;
2981 hdev->cur_adv_instance = 0x00;
5d900e46 2982 hdev->adv_instance_timeout = 0;
b1b813d4 2983
b1b813d4
DH
2984 hdev->sniff_max_interval = 800;
2985 hdev->sniff_min_interval = 80;
2986
3f959d46 2987 hdev->le_adv_channel_map = 0x07;
628531c9
GL
2988 hdev->le_adv_min_interval = 0x0800;
2989 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
2990 hdev->le_scan_interval = 0x0060;
2991 hdev->le_scan_window = 0x0030;
b48c3b59
JH
2992 hdev->le_conn_min_interval = 0x0018;
2993 hdev->le_conn_max_interval = 0x0028;
04fb7d90
MH
2994 hdev->le_conn_latency = 0x0000;
2995 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
2996 hdev->le_def_tx_len = 0x001b;
2997 hdev->le_def_tx_time = 0x0148;
2998 hdev->le_max_tx_len = 0x001b;
2999 hdev->le_max_tx_time = 0x0148;
3000 hdev->le_max_rx_len = 0x001b;
3001 hdev->le_max_rx_time = 0x0148;
bef64738 3002
d6bfd59c 3003 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3004 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3005 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3006 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3007
b1b813d4
DH
3008 mutex_init(&hdev->lock);
3009 mutex_init(&hdev->req_lock);
3010
3011 INIT_LIST_HEAD(&hdev->mgmt_pending);
3012 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3013 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3014 INIT_LIST_HEAD(&hdev->uuids);
3015 INIT_LIST_HEAD(&hdev->link_keys);
3016 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3017 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3018 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3019 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3020 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3021 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3022 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3023 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3024 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3025
3026 INIT_WORK(&hdev->rx_work, hci_rx_work);
3027 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3028 INIT_WORK(&hdev->tx_work, hci_tx_work);
3029 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3030 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3031
b1b813d4 3032 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
b1b813d4 3033
b1b813d4
DH
3034 skb_queue_head_init(&hdev->rx_q);
3035 skb_queue_head_init(&hdev->cmd_q);
3036 skb_queue_head_init(&hdev->raw_q);
3037
3038 init_waitqueue_head(&hdev->req_wait_q);
3039
65cc2b49 3040 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3041
5fc16cc4
JH
3042 hci_request_setup(hdev);
3043
b1b813d4
DH
3044 hci_init_sysfs(hdev);
3045 discovery_init(hdev);
9be0dab7
DH
3046
3047 return hdev;
3048}
3049EXPORT_SYMBOL(hci_alloc_dev);
3050
3051/* Free HCI device */
3052void hci_free_dev(struct hci_dev *hdev)
3053{
9be0dab7
DH
3054 /* will free via device release */
3055 put_device(&hdev->dev);
3056}
3057EXPORT_SYMBOL(hci_free_dev);
3058
1da177e4
LT
3059/* Register HCI device */
3060int hci_register_dev(struct hci_dev *hdev)
3061{
b1b813d4 3062 int id, error;
1da177e4 3063
74292d5a 3064 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3065 return -EINVAL;
3066
08add513
MM
3067 /* Do not allow HCI_AMP devices to register at index 0,
3068 * so the index can be used as the AMP controller ID.
3069 */
3df92b31 3070 switch (hdev->dev_type) {
ca8bee5d 3071 case HCI_PRIMARY:
3df92b31
SL
3072 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3073 break;
3074 case HCI_AMP:
3075 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3076 break;
3077 default:
3078 return -EINVAL;
1da177e4 3079 }
8e87d142 3080
3df92b31
SL
3081 if (id < 0)
3082 return id;
3083
1da177e4
LT
3084 sprintf(hdev->name, "hci%d", id);
3085 hdev->id = id;
2d8b3a11
AE
3086
3087 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3088
29e2dd0d 3089 hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
33ca954d
DH
3090 if (!hdev->workqueue) {
3091 error = -ENOMEM;
3092 goto err;
3093 }
f48fd9c8 3094
29e2dd0d
TH
3095 hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
3096 hdev->name);
6ead1bbc
JH
3097 if (!hdev->req_workqueue) {
3098 destroy_workqueue(hdev->workqueue);
3099 error = -ENOMEM;
3100 goto err;
3101 }
3102
0153e2ec
MH
3103 if (!IS_ERR_OR_NULL(bt_debugfs))
3104 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3105
bdc3e0f1
MH
3106 dev_set_name(&hdev->dev, "%s", hdev->name);
3107
3108 error = device_add(&hdev->dev);
33ca954d 3109 if (error < 0)
54506918 3110 goto err_wqueue;
1da177e4 3111
6d5d2ee6
HK
3112 hci_leds_init(hdev);
3113
611b30f7 3114 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3115 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3116 hdev);
611b30f7
MH
3117 if (hdev->rfkill) {
3118 if (rfkill_register(hdev->rfkill) < 0) {
3119 rfkill_destroy(hdev->rfkill);
3120 hdev->rfkill = NULL;
3121 }
3122 }
3123
5e130367 3124 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3125 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3126
a1536da2
MH
3127 hci_dev_set_flag(hdev, HCI_SETUP);
3128 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3129
ca8bee5d 3130 if (hdev->dev_type == HCI_PRIMARY) {
56f87901
JH
3131 /* Assume BR/EDR support until proven otherwise (such as
3132 * through reading supported features during init.
3133 */
a1536da2 3134 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3135 }
ce2be9ac 3136
fcee3377
GP
3137 write_lock(&hci_dev_list_lock);
3138 list_add(&hdev->list, &hci_dev_list);
3139 write_unlock(&hci_dev_list_lock);
3140
4a964404
MH
3141 /* Devices that are marked for raw-only usage are unconfigured
3142 * and should not be included in normal operation.
fee746b0
MH
3143 */
3144 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3145 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3146
05fcd4c4 3147 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3148 hci_dev_hold(hdev);
1da177e4 3149
19202573 3150 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3151
1da177e4 3152 return id;
f48fd9c8 3153
33ca954d
DH
3154err_wqueue:
3155 destroy_workqueue(hdev->workqueue);
6ead1bbc 3156 destroy_workqueue(hdev->req_workqueue);
33ca954d 3157err:
3df92b31 3158 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3159
33ca954d 3160 return error;
1da177e4
LT
3161}
3162EXPORT_SYMBOL(hci_register_dev);
3163
3164/* Unregister HCI device */
59735631 3165void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3166{
2d7cc19e 3167 int id;
ef222013 3168
c13854ce 3169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3170
a1536da2 3171 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3172
3df92b31
SL
3173 id = hdev->id;
3174
f20d09d5 3175 write_lock(&hci_dev_list_lock);
1da177e4 3176 list_del(&hdev->list);
f20d09d5 3177 write_unlock(&hci_dev_list_lock);
1da177e4 3178
b9b5ef18
GP
3179 cancel_work_sync(&hdev->power_on);
3180
bf389cab
JS
3181 hci_dev_do_close(hdev);
3182
ab81cbf9 3183 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3184 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3185 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3186 hci_dev_lock(hdev);
744cf19e 3187 mgmt_index_removed(hdev);
09fd0de5 3188 hci_dev_unlock(hdev);
56e5cb86 3189 }
ab81cbf9 3190
2e58ef3e
JH
3191 /* mgmt_index_removed should take care of emptying the
3192 * pending list */
3193 BUG_ON(!list_empty(&hdev->mgmt_pending));
3194
05fcd4c4 3195 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3196
611b30f7
MH
3197 if (hdev->rfkill) {
3198 rfkill_unregister(hdev->rfkill);
3199 rfkill_destroy(hdev->rfkill);
3200 }
3201
bdc3e0f1 3202 device_del(&hdev->dev);
147e2d59 3203
0153e2ec 3204 debugfs_remove_recursive(hdev->debugfs);
5177a838
MH
3205 kfree_const(hdev->hw_info);
3206 kfree_const(hdev->fw_info);
0153e2ec 3207
f48fd9c8 3208 destroy_workqueue(hdev->workqueue);
6ead1bbc 3209 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3210
09fd0de5 3211 hci_dev_lock(hdev);
dcc36c16 3212 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3213 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3214 hci_uuids_clear(hdev);
55ed8ca1 3215 hci_link_keys_clear(hdev);
b899efaf 3216 hci_smp_ltks_clear(hdev);
970c4e46 3217 hci_smp_irks_clear(hdev);
2763eda6 3218 hci_remote_oob_data_clear(hdev);
d2609b34 3219 hci_adv_instances_clear(hdev);
dcc36c16 3220 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3221 hci_conn_params_clear_all(hdev);
22078800 3222 hci_discovery_filter_clear(hdev);
09fd0de5 3223 hci_dev_unlock(hdev);
e2e0cacb 3224
dc946bd8 3225 hci_dev_put(hdev);
3df92b31
SL
3226
3227 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3228}
3229EXPORT_SYMBOL(hci_unregister_dev);
3230
3231/* Suspend HCI device */
3232int hci_suspend_dev(struct hci_dev *hdev)
3233{
05fcd4c4 3234 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3235 return 0;
3236}
3237EXPORT_SYMBOL(hci_suspend_dev);
3238
3239/* Resume HCI device */
3240int hci_resume_dev(struct hci_dev *hdev)
3241{
05fcd4c4 3242 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3243 return 0;
3244}
3245EXPORT_SYMBOL(hci_resume_dev);
3246
75e0569f
MH
3247/* Reset HCI device */
3248int hci_reset_dev(struct hci_dev *hdev)
3249{
3250 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3251 struct sk_buff *skb;
3252
3253 skb = bt_skb_alloc(3, GFP_ATOMIC);
3254 if (!skb)
3255 return -ENOMEM;
3256
d79f34e3 3257 hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
59ae1d12 3258 skb_put_data(skb, hw_err, 3);
75e0569f
MH
3259
3260 /* Send Hardware Error to upper stack */
3261 return hci_recv_frame(hdev, skb);
3262}
3263EXPORT_SYMBOL(hci_reset_dev);
3264
76bca880 3265/* Receive frame from HCI drivers */
e1a26170 3266int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3267{
76bca880 3268 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3269 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3270 kfree_skb(skb);
3271 return -ENXIO;
3272 }
3273
d79f34e3
MH
3274 if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT &&
3275 hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT &&
3276 hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) {
fe806dce
MH
3277 kfree_skb(skb);
3278 return -EINVAL;
3279 }
3280
d82603c6 3281 /* Incoming skb */
76bca880
MH
3282 bt_cb(skb)->incoming = 1;
3283
3284 /* Time stamp */
3285 __net_timestamp(skb);
3286
76bca880 3287 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3288 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3289
76bca880
MH
3290 return 0;
3291}
3292EXPORT_SYMBOL(hci_recv_frame);
3293
e875ff84
MH
3294/* Receive diagnostic message from HCI drivers */
3295int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3296{
581d6fd6 3297 /* Mark as diagnostic packet */
d79f34e3 3298 hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
581d6fd6 3299
e875ff84
MH
3300 /* Time stamp */
3301 __net_timestamp(skb);
3302
581d6fd6
MH
3303 skb_queue_tail(&hdev->rx_q, skb);
3304 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3305
e875ff84
MH
3306 return 0;
3307}
3308EXPORT_SYMBOL(hci_recv_diag);
3309
5177a838
MH
3310void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3311{
3312 va_list vargs;
3313
3314 va_start(vargs, fmt);
3315 kfree_const(hdev->hw_info);
3316 hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3317 va_end(vargs);
3318}
3319EXPORT_SYMBOL(hci_set_hw_info);
3320
3321void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3322{
3323 va_list vargs;
3324
3325 va_start(vargs, fmt);
3326 kfree_const(hdev->fw_info);
3327 hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3328 va_end(vargs);
3329}
3330EXPORT_SYMBOL(hci_set_fw_info);
3331
1da177e4
LT
3332/* ---- Interface to upper protocols ---- */
3333
1da177e4
LT
3334int hci_register_cb(struct hci_cb *cb)
3335{
3336 BT_DBG("%p name %s", cb, cb->name);
3337
fba7ecf0 3338 mutex_lock(&hci_cb_list_lock);
00629e0f 3339 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3340 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3341
3342 return 0;
3343}
3344EXPORT_SYMBOL(hci_register_cb);
3345
3346int hci_unregister_cb(struct hci_cb *cb)
3347{
3348 BT_DBG("%p name %s", cb, cb->name);
3349
fba7ecf0 3350 mutex_lock(&hci_cb_list_lock);
1da177e4 3351 list_del(&cb->list);
fba7ecf0 3352 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3353
3354 return 0;
3355}
3356EXPORT_SYMBOL(hci_unregister_cb);
3357
51086991 3358static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3359{
cdc52faa
MH
3360 int err;
3361
d79f34e3
MH
3362 BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3363 skb->len);
1da177e4 3364
cd82e61c
MH
3365 /* Time stamp */
3366 __net_timestamp(skb);
1da177e4 3367
cd82e61c
MH
3368 /* Send copy to monitor */
3369 hci_send_to_monitor(hdev, skb);
3370
3371 if (atomic_read(&hdev->promisc)) {
3372 /* Send copy to the sockets */
470fe1b5 3373 hci_send_to_sock(hdev, skb);
1da177e4
LT
3374 }
3375
3376 /* Get rid of skb owner, prior to sending to the driver. */
3377 skb_orphan(skb);
3378
73d0d3c8
MH
3379 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3380 kfree_skb(skb);
3381 return;
3382 }
3383
cdc52faa
MH
3384 err = hdev->send(hdev, skb);
3385 if (err < 0) {
2064ee33 3386 bt_dev_err(hdev, "sending frame failed (%d)", err);
cdc52faa
MH
3387 kfree_skb(skb);
3388 }
1da177e4
LT
3389}
3390
1ca3a9d0 3391/* Send HCI command */
07dc93dd
JH
3392int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3393 const void *param)
1ca3a9d0
JH
3394{
3395 struct sk_buff *skb;
3396
3397 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3398
3399 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3400 if (!skb) {
2064ee33 3401 bt_dev_err(hdev, "no memory for command");
1ca3a9d0
JH
3402 return -ENOMEM;
3403 }
3404
49c922bb 3405 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3406 * single-command requests.
3407 */
44d27137 3408 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
11714b3d 3409
1da177e4 3410 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3411 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3412
3413 return 0;
3414}
1da177e4 3415
d6ee6ad7
LP
3416int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3417 const void *param)
3418{
3419 struct sk_buff *skb;
3420
3421 if (hci_opcode_ogf(opcode) != 0x3f) {
3422 /* A controller receiving a command shall respond with either
3423 * a Command Status Event or a Command Complete Event.
3424 * Therefore, all standard HCI commands must be sent via the
3425 * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3426 * Some vendors do not comply with this rule for vendor-specific
3427 * commands and do not return any event. We want to support
3428 * unresponded commands for such cases only.
3429 */
3430 bt_dev_err(hdev, "unresponded command not supported");
3431 return -EINVAL;
3432 }
3433
3434 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3435 if (!skb) {
3436 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3437 opcode);
3438 return -ENOMEM;
3439 }
3440
3441 hci_send_frame(hdev, skb);
3442
3443 return 0;
3444}
3445EXPORT_SYMBOL(__hci_cmd_send);
3446
1da177e4 3447/* Get data from the previously sent command */
a9de9248 3448void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3449{
3450 struct hci_command_hdr *hdr;
3451
3452 if (!hdev->sent_cmd)
3453 return NULL;
3454
3455 hdr = (void *) hdev->sent_cmd->data;
3456
a9de9248 3457 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3458 return NULL;
3459
f0e09510 3460 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3461
3462 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3463}
3464
fbef168f
LP
3465/* Send HCI command and wait for command commplete event */
3466struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3467 const void *param, u32 timeout)
3468{
3469 struct sk_buff *skb;
3470
3471 if (!test_bit(HCI_UP, &hdev->flags))
3472 return ERR_PTR(-ENETDOWN);
3473
3474 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3475
b504430c 3476 hci_req_sync_lock(hdev);
fbef168f 3477 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
b504430c 3478 hci_req_sync_unlock(hdev);
fbef168f
LP
3479
3480 return skb;
3481}
3482EXPORT_SYMBOL(hci_cmd_sync);
3483
1da177e4
LT
3484/* Send ACL data */
3485static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3486{
3487 struct hci_acl_hdr *hdr;
3488 int len = skb->len;
3489
badff6d0
ACM
3490 skb_push(skb, HCI_ACL_HDR_SIZE);
3491 skb_reset_transport_header(skb);
9c70220b 3492 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3493 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3494 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3495}
3496
ee22be7e 3497static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3498 struct sk_buff *skb, __u16 flags)
1da177e4 3499{
ee22be7e 3500 struct hci_conn *conn = chan->conn;
1da177e4
LT
3501 struct hci_dev *hdev = conn->hdev;
3502 struct sk_buff *list;
3503
087bfd99
GP
3504 skb->len = skb_headlen(skb);
3505 skb->data_len = 0;
3506
d79f34e3 3507 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
204a6e54
AE
3508
3509 switch (hdev->dev_type) {
ca8bee5d 3510 case HCI_PRIMARY:
204a6e54
AE
3511 hci_add_acl_hdr(skb, conn->handle, flags);
3512 break;
3513 case HCI_AMP:
3514 hci_add_acl_hdr(skb, chan->handle, flags);
3515 break;
3516 default:
2064ee33 3517 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
204a6e54
AE
3518 return;
3519 }
087bfd99 3520
70f23020
AE
3521 list = skb_shinfo(skb)->frag_list;
3522 if (!list) {
1da177e4
LT
3523 /* Non fragmented */
3524 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3525
73d80deb 3526 skb_queue_tail(queue, skb);
1da177e4
LT
3527 } else {
3528 /* Fragmented */
3529 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3530
3531 skb_shinfo(skb)->frag_list = NULL;
3532
9cfd5a23
JR
3533 /* Queue all fragments atomically. We need to use spin_lock_bh
3534 * here because of 6LoWPAN links, as there this function is
3535 * called from softirq and using normal spin lock could cause
3536 * deadlocks.
3537 */
3538 spin_lock_bh(&queue->lock);
1da177e4 3539
73d80deb 3540 __skb_queue_tail(queue, skb);
e702112f
AE
3541
3542 flags &= ~ACL_START;
3543 flags |= ACL_CONT;
1da177e4
LT
3544 do {
3545 skb = list; list = list->next;
8e87d142 3546
d79f34e3 3547 hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
e702112f 3548 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3549
3550 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3551
73d80deb 3552 __skb_queue_tail(queue, skb);
1da177e4
LT
3553 } while (list);
3554
9cfd5a23 3555 spin_unlock_bh(&queue->lock);
1da177e4 3556 }
73d80deb
LAD
3557}
3558
3559void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3560{
ee22be7e 3561 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3562
f0e09510 3563 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3564
ee22be7e 3565 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3566
3eff45ea 3567 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3568}
1da177e4
LT
3569
3570/* Send SCO data */
0d861d8b 3571void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3572{
3573 struct hci_dev *hdev = conn->hdev;
3574 struct hci_sco_hdr hdr;
3575
3576 BT_DBG("%s len %d", hdev->name, skb->len);
3577
aca3192c 3578 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3579 hdr.dlen = skb->len;
3580
badff6d0
ACM
3581 skb_push(skb, HCI_SCO_HDR_SIZE);
3582 skb_reset_transport_header(skb);
9c70220b 3583 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3584
d79f34e3 3585 hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
c78ae283 3586
1da177e4 3587 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3588 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3589}
1da177e4
LT
3590
3591/* ---- HCI TX task (outgoing data) ---- */
3592
3593/* HCI Connection scheduler */
6039aa73
GP
3594static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3595 int *quote)
1da177e4
LT
3596{
3597 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3598 struct hci_conn *conn = NULL, *c;
abc5de8f 3599 unsigned int num = 0, min = ~0;
1da177e4 3600
8e87d142 3601 /* We don't have to lock device here. Connections are always
1da177e4 3602 * added and removed with TX task disabled. */
bf4c6325
GP
3603
3604 rcu_read_lock();
3605
3606 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3607 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3608 continue;
769be974
MH
3609
3610 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3611 continue;
3612
1da177e4
LT
3613 num++;
3614
3615 if (c->sent < min) {
3616 min = c->sent;
3617 conn = c;
3618 }
52087a79
LAD
3619
3620 if (hci_conn_num(hdev, type) == num)
3621 break;
1da177e4
LT
3622 }
3623
bf4c6325
GP
3624 rcu_read_unlock();
3625
1da177e4 3626 if (conn) {
6ed58ec5
VT
3627 int cnt, q;
3628
3629 switch (conn->type) {
3630 case ACL_LINK:
3631 cnt = hdev->acl_cnt;
3632 break;
3633 case SCO_LINK:
3634 case ESCO_LINK:
3635 cnt = hdev->sco_cnt;
3636 break;
3637 case LE_LINK:
3638 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3639 break;
3640 default:
3641 cnt = 0;
2064ee33 3642 bt_dev_err(hdev, "unknown link type %d", conn->type);
6ed58ec5
VT
3643 }
3644
3645 q = cnt / num;
1da177e4
LT
3646 *quote = q ? q : 1;
3647 } else
3648 *quote = 0;
3649
3650 BT_DBG("conn %p quote %d", conn, *quote);
3651 return conn;
3652}
3653
6039aa73 3654static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3655{
3656 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3657 struct hci_conn *c;
1da177e4 3658
2064ee33 3659 bt_dev_err(hdev, "link tx timeout");
1da177e4 3660
bf4c6325
GP
3661 rcu_read_lock();
3662
1da177e4 3663 /* Kill stalled connections */
bf4c6325 3664 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3665 if (c->type == type && c->sent) {
2064ee33
MH
3666 bt_dev_err(hdev, "killing stalled connection %pMR",
3667 &c->dst);
bed71748 3668 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3669 }
3670 }
bf4c6325
GP
3671
3672 rcu_read_unlock();
1da177e4
LT
3673}
3674
6039aa73
GP
3675static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3676 int *quote)
1da177e4 3677{
73d80deb
LAD
3678 struct hci_conn_hash *h = &hdev->conn_hash;
3679 struct hci_chan *chan = NULL;
abc5de8f 3680 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3681 struct hci_conn *conn;
73d80deb
LAD
3682 int cnt, q, conn_num = 0;
3683
3684 BT_DBG("%s", hdev->name);
3685
bf4c6325
GP
3686 rcu_read_lock();
3687
3688 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3689 struct hci_chan *tmp;
3690
3691 if (conn->type != type)
3692 continue;
3693
3694 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3695 continue;
3696
3697 conn_num++;
3698
8192edef 3699 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3700 struct sk_buff *skb;
3701
3702 if (skb_queue_empty(&tmp->data_q))
3703 continue;
3704
3705 skb = skb_peek(&tmp->data_q);
3706 if (skb->priority < cur_prio)
3707 continue;
3708
3709 if (skb->priority > cur_prio) {
3710 num = 0;
3711 min = ~0;
3712 cur_prio = skb->priority;
3713 }
3714
3715 num++;
3716
3717 if (conn->sent < min) {
3718 min = conn->sent;
3719 chan = tmp;
3720 }
3721 }
3722
3723 if (hci_conn_num(hdev, type) == conn_num)
3724 break;
3725 }
3726
bf4c6325
GP
3727 rcu_read_unlock();
3728
73d80deb
LAD
3729 if (!chan)
3730 return NULL;
3731
3732 switch (chan->conn->type) {
3733 case ACL_LINK:
3734 cnt = hdev->acl_cnt;
3735 break;
bd1eb66b
AE
3736 case AMP_LINK:
3737 cnt = hdev->block_cnt;
3738 break;
73d80deb
LAD
3739 case SCO_LINK:
3740 case ESCO_LINK:
3741 cnt = hdev->sco_cnt;
3742 break;
3743 case LE_LINK:
3744 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3745 break;
3746 default:
3747 cnt = 0;
2064ee33 3748 bt_dev_err(hdev, "unknown link type %d", chan->conn->type);
73d80deb
LAD
3749 }
3750
3751 q = cnt / num;
3752 *quote = q ? q : 1;
3753 BT_DBG("chan %p quote %d", chan, *quote);
3754 return chan;
3755}
3756
02b20f0b
LAD
3757static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3758{
3759 struct hci_conn_hash *h = &hdev->conn_hash;
3760 struct hci_conn *conn;
3761 int num = 0;
3762
3763 BT_DBG("%s", hdev->name);
3764
bf4c6325
GP
3765 rcu_read_lock();
3766
3767 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3768 struct hci_chan *chan;
3769
3770 if (conn->type != type)
3771 continue;
3772
3773 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3774 continue;
3775
3776 num++;
3777
8192edef 3778 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3779 struct sk_buff *skb;
3780
3781 if (chan->sent) {
3782 chan->sent = 0;
3783 continue;
3784 }
3785
3786 if (skb_queue_empty(&chan->data_q))
3787 continue;
3788
3789 skb = skb_peek(&chan->data_q);
3790 if (skb->priority >= HCI_PRIO_MAX - 1)
3791 continue;
3792
3793 skb->priority = HCI_PRIO_MAX - 1;
3794
3795 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3796 skb->priority);
02b20f0b
LAD
3797 }
3798
3799 if (hci_conn_num(hdev, type) == num)
3800 break;
3801 }
bf4c6325
GP
3802
3803 rcu_read_unlock();
3804
02b20f0b
LAD
3805}
3806
b71d385a
AE
3807static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3808{
3809 /* Calculate count of blocks used by this packet */
3810 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3811}
3812
6039aa73 3813static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3814{
d7a5a11d 3815 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3816 /* ACL tx timeout must be longer than maximum
3817 * link supervision timeout (40.9 seconds) */
63d2bc1b 3818 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3819 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3820 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3821 }
63d2bc1b 3822}
1da177e4 3823
6039aa73 3824static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3825{
3826 unsigned int cnt = hdev->acl_cnt;
3827 struct hci_chan *chan;
3828 struct sk_buff *skb;
3829 int quote;
3830
3831 __check_timeout(hdev, cnt);
04837f64 3832
73d80deb 3833 while (hdev->acl_cnt &&
a8c5fb1a 3834 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3835 u32 priority = (skb_peek(&chan->data_q))->priority;
3836 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3837 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3838 skb->len, skb->priority);
73d80deb 3839
ec1cce24
LAD
3840 /* Stop if priority has changed */
3841 if (skb->priority < priority)
3842 break;
3843
3844 skb = skb_dequeue(&chan->data_q);
3845
73d80deb 3846 hci_conn_enter_active_mode(chan->conn,
04124681 3847 bt_cb(skb)->force_active);
04837f64 3848
57d17d70 3849 hci_send_frame(hdev, skb);
1da177e4
LT
3850 hdev->acl_last_tx = jiffies;
3851
3852 hdev->acl_cnt--;
73d80deb
LAD
3853 chan->sent++;
3854 chan->conn->sent++;
1da177e4
LT
3855 }
3856 }
02b20f0b
LAD
3857
3858 if (cnt != hdev->acl_cnt)
3859 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3860}
3861
6039aa73 3862static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3863{
63d2bc1b 3864 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3865 struct hci_chan *chan;
3866 struct sk_buff *skb;
3867 int quote;
bd1eb66b 3868 u8 type;
b71d385a 3869
63d2bc1b 3870 __check_timeout(hdev, cnt);
b71d385a 3871
bd1eb66b
AE
3872 BT_DBG("%s", hdev->name);
3873
3874 if (hdev->dev_type == HCI_AMP)
3875 type = AMP_LINK;
3876 else
3877 type = ACL_LINK;
3878
b71d385a 3879 while (hdev->block_cnt > 0 &&
bd1eb66b 3880 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3881 u32 priority = (skb_peek(&chan->data_q))->priority;
3882 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3883 int blocks;
3884
3885 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3886 skb->len, skb->priority);
b71d385a
AE
3887
3888 /* Stop if priority has changed */
3889 if (skb->priority < priority)
3890 break;
3891
3892 skb = skb_dequeue(&chan->data_q);
3893
3894 blocks = __get_blocks(hdev, skb);
3895 if (blocks > hdev->block_cnt)
3896 return;
3897
3898 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3899 bt_cb(skb)->force_active);
b71d385a 3900
57d17d70 3901 hci_send_frame(hdev, skb);
b71d385a
AE
3902 hdev->acl_last_tx = jiffies;
3903
3904 hdev->block_cnt -= blocks;
3905 quote -= blocks;
3906
3907 chan->sent += blocks;
3908 chan->conn->sent += blocks;
3909 }
3910 }
3911
3912 if (cnt != hdev->block_cnt)
bd1eb66b 3913 hci_prio_recalculate(hdev, type);
b71d385a
AE
3914}
3915
6039aa73 3916static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3917{
3918 BT_DBG("%s", hdev->name);
3919
bd1eb66b 3920 /* No ACL link over BR/EDR controller */
ca8bee5d 3921 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
bd1eb66b
AE
3922 return;
3923
3924 /* No AMP link over AMP controller */
3925 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3926 return;
3927
3928 switch (hdev->flow_ctl_mode) {
3929 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3930 hci_sched_acl_pkt(hdev);
3931 break;
3932
3933 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3934 hci_sched_acl_blk(hdev);
3935 break;
3936 }
3937}
3938
1da177e4 3939/* Schedule SCO */
6039aa73 3940static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3941{
3942 struct hci_conn *conn;
3943 struct sk_buff *skb;
3944 int quote;
3945
3946 BT_DBG("%s", hdev->name);
3947
52087a79
LAD
3948 if (!hci_conn_num(hdev, SCO_LINK))
3949 return;
3950
1da177e4
LT
3951 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3952 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3953 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3954 hci_send_frame(hdev, skb);
1da177e4
LT
3955
3956 conn->sent++;
3957 if (conn->sent == ~0)
3958 conn->sent = 0;
3959 }
3960 }
3961}
3962
6039aa73 3963static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3964{
3965 struct hci_conn *conn;
3966 struct sk_buff *skb;
3967 int quote;
3968
3969 BT_DBG("%s", hdev->name);
3970
52087a79
LAD
3971 if (!hci_conn_num(hdev, ESCO_LINK))
3972 return;
3973
8fc9ced3
GP
3974 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3975 &quote))) {
b6a0dc82
MH
3976 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3977 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3978 hci_send_frame(hdev, skb);
b6a0dc82
MH
3979
3980 conn->sent++;
3981 if (conn->sent == ~0)
3982 conn->sent = 0;
3983 }
3984 }
3985}
3986
6039aa73 3987static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3988{
73d80deb 3989 struct hci_chan *chan;
6ed58ec5 3990 struct sk_buff *skb;
02b20f0b 3991 int quote, cnt, tmp;
6ed58ec5
VT
3992
3993 BT_DBG("%s", hdev->name);
3994
52087a79
LAD
3995 if (!hci_conn_num(hdev, LE_LINK))
3996 return;
3997
d7a5a11d 3998 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
3999 /* LE tx timeout must be longer than maximum
4000 * link supervision timeout (40.9 seconds) */
bae1f5d9 4001 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4002 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4003 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4004 }
4005
4006 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4007 tmp = cnt;
73d80deb 4008 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4009 u32 priority = (skb_peek(&chan->data_q))->priority;
4010 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4011 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4012 skb->len, skb->priority);
6ed58ec5 4013
ec1cce24
LAD
4014 /* Stop if priority has changed */
4015 if (skb->priority < priority)
4016 break;
4017
4018 skb = skb_dequeue(&chan->data_q);
4019
57d17d70 4020 hci_send_frame(hdev, skb);
6ed58ec5
VT
4021 hdev->le_last_tx = jiffies;
4022
4023 cnt--;
73d80deb
LAD
4024 chan->sent++;
4025 chan->conn->sent++;
6ed58ec5
VT
4026 }
4027 }
73d80deb 4028
6ed58ec5
VT
4029 if (hdev->le_pkts)
4030 hdev->le_cnt = cnt;
4031 else
4032 hdev->acl_cnt = cnt;
02b20f0b
LAD
4033
4034 if (cnt != tmp)
4035 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4036}
4037
3eff45ea 4038static void hci_tx_work(struct work_struct *work)
1da177e4 4039{
3eff45ea 4040 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4041 struct sk_buff *skb;
4042
6ed58ec5 4043 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4044 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4045
d7a5a11d 4046 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4047 /* Schedule queues and send stuff to HCI driver */
4048 hci_sched_acl(hdev);
4049 hci_sched_sco(hdev);
4050 hci_sched_esco(hdev);
4051 hci_sched_le(hdev);
4052 }
6ed58ec5 4053
1da177e4
LT
4054 /* Send next queued raw (unknown type) packet */
4055 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4056 hci_send_frame(hdev, skb);
1da177e4
LT
4057}
4058
25985edc 4059/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4060
4061/* ACL data packet */
6039aa73 4062static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4063{
4064 struct hci_acl_hdr *hdr = (void *) skb->data;
4065 struct hci_conn *conn;
4066 __u16 handle, flags;
4067
4068 skb_pull(skb, HCI_ACL_HDR_SIZE);
4069
4070 handle = __le16_to_cpu(hdr->handle);
4071 flags = hci_flags(handle);
4072 handle = hci_handle(handle);
4073
f0e09510 4074 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4075 handle, flags);
1da177e4
LT
4076
4077 hdev->stat.acl_rx++;
4078
4079 hci_dev_lock(hdev);
4080 conn = hci_conn_hash_lookup_handle(hdev, handle);
4081 hci_dev_unlock(hdev);
8e87d142 4082
1da177e4 4083 if (conn) {
65983fc7 4084 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4085
1da177e4 4086 /* Send to upper protocol */
686ebf28
UF
4087 l2cap_recv_acldata(conn, skb, flags);
4088 return;
1da177e4 4089 } else {
2064ee33
MH
4090 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
4091 handle);
1da177e4
LT
4092 }
4093
4094 kfree_skb(skb);
4095}
4096
4097/* SCO data packet */
6039aa73 4098static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4099{
4100 struct hci_sco_hdr *hdr = (void *) skb->data;
4101 struct hci_conn *conn;
4102 __u16 handle;
4103
4104 skb_pull(skb, HCI_SCO_HDR_SIZE);
4105
4106 handle = __le16_to_cpu(hdr->handle);
4107
f0e09510 4108 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4109
4110 hdev->stat.sco_rx++;
4111
4112 hci_dev_lock(hdev);
4113 conn = hci_conn_hash_lookup_handle(hdev, handle);
4114 hci_dev_unlock(hdev);
4115
4116 if (conn) {
1da177e4 4117 /* Send to upper protocol */
686ebf28
UF
4118 sco_recv_scodata(conn, skb);
4119 return;
1da177e4 4120 } else {
2064ee33
MH
4121 bt_dev_err(hdev, "SCO packet for unknown connection handle %d",
4122 handle);
1da177e4
LT
4123 }
4124
4125 kfree_skb(skb);
4126}
4127
9238f36a
JH
4128static bool hci_req_is_complete(struct hci_dev *hdev)
4129{
4130 struct sk_buff *skb;
4131
4132 skb = skb_peek(&hdev->cmd_q);
4133 if (!skb)
4134 return true;
4135
44d27137 4136 return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
9238f36a
JH
4137}
4138
42c6b129
JH
4139static void hci_resend_last(struct hci_dev *hdev)
4140{
4141 struct hci_command_hdr *sent;
4142 struct sk_buff *skb;
4143 u16 opcode;
4144
4145 if (!hdev->sent_cmd)
4146 return;
4147
4148 sent = (void *) hdev->sent_cmd->data;
4149 opcode = __le16_to_cpu(sent->opcode);
4150 if (opcode == HCI_OP_RESET)
4151 return;
4152
4153 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4154 if (!skb)
4155 return;
4156
4157 skb_queue_head(&hdev->cmd_q, skb);
4158 queue_work(hdev->workqueue, &hdev->cmd_work);
4159}
4160
e6214487
JH
4161void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4162 hci_req_complete_t *req_complete,
4163 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4164{
9238f36a
JH
4165 struct sk_buff *skb;
4166 unsigned long flags;
4167
4168 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4169
42c6b129
JH
4170 /* If the completed command doesn't match the last one that was
4171 * sent we need to do special handling of it.
9238f36a 4172 */
42c6b129
JH
4173 if (!hci_sent_cmd_data(hdev, opcode)) {
4174 /* Some CSR based controllers generate a spontaneous
4175 * reset complete event during init and any pending
4176 * command will never be completed. In such a case we
4177 * need to resend whatever was the last sent
4178 * command.
4179 */
4180 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4181 hci_resend_last(hdev);
4182
9238f36a 4183 return;
42c6b129 4184 }
9238f36a
JH
4185
4186 /* If the command succeeded and there's still more commands in
4187 * this request the request is not yet complete.
4188 */
4189 if (!status && !hci_req_is_complete(hdev))
4190 return;
4191
4192 /* If this was the last command in a request the complete
4193 * callback would be found in hdev->sent_cmd instead of the
4194 * command queue (hdev->cmd_q).
4195 */
44d27137
JH
4196 if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) {
4197 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487
JH
4198 return;
4199 }
53e21fbc 4200
44d27137
JH
4201 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4202 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487 4203 return;
9238f36a
JH
4204 }
4205
4206 /* Remove all pending commands belonging to this request */
4207 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4208 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
44d27137 4209 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
9238f36a
JH
4210 __skb_queue_head(&hdev->cmd_q, skb);
4211 break;
4212 }
4213
3bd7594e
DA
4214 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4215 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4216 else
4217 *req_complete = bt_cb(skb)->hci.req_complete;
9238f36a
JH
4218 kfree_skb(skb);
4219 }
4220 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4221}
4222
b78752cc 4223static void hci_rx_work(struct work_struct *work)
1da177e4 4224{
b78752cc 4225 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4226 struct sk_buff *skb;
4227
4228 BT_DBG("%s", hdev->name);
4229
1da177e4 4230 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4231 /* Send copy to monitor */
4232 hci_send_to_monitor(hdev, skb);
4233
1da177e4
LT
4234 if (atomic_read(&hdev->promisc)) {
4235 /* Send copy to the sockets */
470fe1b5 4236 hci_send_to_sock(hdev, skb);
1da177e4
LT
4237 }
4238
d7a5a11d 4239 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4240 kfree_skb(skb);
4241 continue;
4242 }
4243
4244 if (test_bit(HCI_INIT, &hdev->flags)) {
4245 /* Don't process data packets in this states. */
d79f34e3 4246 switch (hci_skb_pkt_type(skb)) {
1da177e4
LT
4247 case HCI_ACLDATA_PKT:
4248 case HCI_SCODATA_PKT:
4249 kfree_skb(skb);
4250 continue;
3ff50b79 4251 }
1da177e4
LT
4252 }
4253
4254 /* Process frame */
d79f34e3 4255 switch (hci_skb_pkt_type(skb)) {
1da177e4 4256 case HCI_EVENT_PKT:
b78752cc 4257 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4258 hci_event_packet(hdev, skb);
4259 break;
4260
4261 case HCI_ACLDATA_PKT:
4262 BT_DBG("%s ACL data packet", hdev->name);
4263 hci_acldata_packet(hdev, skb);
4264 break;
4265
4266 case HCI_SCODATA_PKT:
4267 BT_DBG("%s SCO data packet", hdev->name);
4268 hci_scodata_packet(hdev, skb);
4269 break;
4270
4271 default:
4272 kfree_skb(skb);
4273 break;
4274 }
4275 }
1da177e4
LT
4276}
4277
c347b765 4278static void hci_cmd_work(struct work_struct *work)
1da177e4 4279{
c347b765 4280 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4281 struct sk_buff *skb;
4282
2104786b
AE
4283 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4284 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4285
1da177e4 4286 /* Send queued commands */
5a08ecce
AE
4287 if (atomic_read(&hdev->cmd_cnt)) {
4288 skb = skb_dequeue(&hdev->cmd_q);
4289 if (!skb)
4290 return;
4291
7585b97a 4292 kfree_skb(hdev->sent_cmd);
1da177e4 4293
a675d7f1 4294 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4295 if (hdev->sent_cmd) {
1da177e4 4296 atomic_dec(&hdev->cmd_cnt);
57d17d70 4297 hci_send_frame(hdev, skb);
7bdb8a5c 4298 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4299 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4300 else
65cc2b49
MH
4301 schedule_delayed_work(&hdev->cmd_timer,
4302 HCI_CMD_TIMEOUT);
1da177e4
LT
4303 } else {
4304 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4305 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4306 }
4307 }
4308}