]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
baf27f6e
MH
68/* ---- HCI debugfs entries ---- */
69
4b4148e9
MH
70static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72{
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
b7cb93e5 76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
77 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80}
81
82static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84{
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
4b4148e9
MH
90
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
b7cb93e5 101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
4b4148e9
MH
116 kfree_skb(skb);
117
b7cb93e5 118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
4b4113d6
MH
130static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140}
141
142static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144{
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
7e995b9e
MH
158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
4b4113d6
MH
168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
7e995b9e 175done:
4b4113d6
MH
176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182}
183
184static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189};
190
f640ee98
MH
191static void hci_debugfs_create_basic(struct hci_dev *hdev)
192{
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199}
200
1da177e4
LT
201/* ---- HCI requests ---- */
202
f60cb305
JH
203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
1da177e4 205{
42c6b129 206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
211 if (skb)
212 hdev->req_skb = skb_get(skb);
1da177e4
LT
213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
7b1abbbe 228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 229 const void *param, u8 event, u32 timeout)
75e84b7c
JH
230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
f60cb305 233 struct sk_buff *skb;
75e84b7c
JH
234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
7b1abbbe 240 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
241
242 hdev->req_status = HCI_REQ_PEND;
243
75e84b7c
JH
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
f60cb305 247 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 250 set_current_state(TASK_RUNNING);
039fada5
CP
251 return ERR_PTR(err);
252 }
253
75e84b7c
JH
254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
75e84b7c
JH
278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
f60cb305
JH
281 if (err < 0) {
282 kfree_skb(skb);
75e84b7c 283 return ERR_PTR(err);
f60cb305 284 }
75e84b7c 285
757aa0b5
JH
286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
7b1abbbe
JH
290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 294 const void *param, u32 timeout)
7b1abbbe
JH
295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
1da177e4 300/* Execute request and wait for completion. */
01178cd4 301static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
302 void (*func)(struct hci_request *req,
303 unsigned long opt),
01178cd4 304 unsigned long opt, __u32 timeout)
1da177e4 305{
42c6b129 306 struct hci_request req;
1da177e4
LT
307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
42c6b129
JH
312 hci_req_init(&req, hdev);
313
1da177e4
LT
314 hdev->req_status = HCI_REQ_PEND;
315
42c6b129 316 func(&req, opt);
53cce22d 317
039fada5
CP
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
f60cb305 321 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 322 if (err < 0) {
53cce22d 323 hdev->req_status = 0;
920c8300 324
039fada5 325 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 326 set_current_state(TASK_RUNNING);
039fada5 327
920c8300
AG
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
42c6b129 332 */
920c8300
AG
333 if (err == -ENODATA)
334 return 0;
335
336 return err;
53cce22d
JH
337 }
338
1da177e4
LT
339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
e175072f 348 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
3ff50b79 358 }
1da177e4 359
a5040efa 360 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
01178cd4 367static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
368 void (*req)(struct hci_request *req,
369 unsigned long opt),
01178cd4 370 unsigned long opt, __u32 timeout)
1da177e4
LT
371{
372 int ret;
373
7c6a329e
MH
374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
1da177e4
LT
377 /* Serialize all requests */
378 hci_req_lock(hdev);
01178cd4 379 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
42c6b129 385static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 386{
42c6b129 387 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
388
389 /* Reset device */
42c6b129
JH
390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
392}
393
42c6b129 394static void bredr_init(struct hci_request *req)
1da177e4 395{
42c6b129 396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 397
1da177e4 398 /* Read Local Supported Features */
42c6b129 399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 400
1143e5a6 401 /* Read Local Version */
42c6b129 402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
403
404 /* Read BD Address */
42c6b129 405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
406}
407
0af801b9 408static void amp_init1(struct hci_request *req)
e61ef499 409{
42c6b129 410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 411
e61ef499 412 /* Read Local Version */
42c6b129 413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 414
f6996cfe
MH
415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
6bcbc489 418 /* Read Local AMP Info */
42c6b129 419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
420
421 /* Read Data Blk size */
42c6b129 422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 423
f38ba941
MH
424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
7528ca1c
MH
427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
429}
430
0af801b9
JH
431static void amp_init2(struct hci_request *req)
432{
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
436 */
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439}
440
42c6b129 441static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 442{
42c6b129 443 struct hci_dev *hdev = req->hdev;
e61ef499
AE
444
445 BT_DBG("%s %ld", hdev->name, opt);
446
11778716
AE
447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 449 hci_reset_req(req, 0);
11778716 450
e61ef499
AE
451 switch (hdev->dev_type) {
452 case HCI_BREDR:
42c6b129 453 bredr_init(req);
e61ef499
AE
454 break;
455
456 case HCI_AMP:
0af801b9 457 amp_init1(req);
e61ef499
AE
458 break;
459
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
463 }
e61ef499
AE
464}
465
42c6b129 466static void bredr_setup(struct hci_request *req)
2177bab5 467{
2177bab5
JH
468 __le16 param;
469 __u8 flt_type;
470
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
473
474 /* Read Class of Device */
42c6b129 475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
476
477 /* Read Local Name */
42c6b129 478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
479
480 /* Read Voice Setting */
42c6b129 481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 482
b4cb9fb2
MH
483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
4b836f39
MH
486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
2177bab5
JH
489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
492
493 /* Connection accept timeout ~20 secs */
dcf4adbf 494 param = cpu_to_le16(0x7d00);
42c6b129 495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
496}
497
42c6b129 498static void le_setup(struct hci_request *req)
2177bab5 499{
c73eee91
JH
500 struct hci_dev *hdev = req->hdev;
501
2177bab5 502 /* Read LE Buffer Size */
42c6b129 503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
504
505 /* Read LE Local Supported Features */
42c6b129 506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 507
747d3f03
MH
508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
c73eee91
JH
511 /* LE-only controllers have LE implicitly enabled */
512 if (!lmp_bredr_capable(hdev))
a1536da2 513 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
514}
515
42c6b129 516static void hci_setup_event_mask(struct hci_request *req)
2177bab5 517{
42c6b129
JH
518 struct hci_dev *hdev = req->hdev;
519
2177bab5
JH
520 /* The second byte is 0xff instead of 0x9f (two reserved bits
521 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
522 * command otherwise.
523 */
524 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
525
526 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
527 * any event mask for pre 1.2 devices.
528 */
529 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
530 return;
531
532 if (lmp_bredr_capable(hdev)) {
533 events[4] |= 0x01; /* Flow Specification Complete */
534 events[4] |= 0x02; /* Inquiry Result with RSSI */
535 events[4] |= 0x04; /* Read Remote Extended Features Complete */
536 events[5] |= 0x08; /* Synchronous Connection Complete */
537 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
538 } else {
539 /* Use a different default for LE-only devices */
540 memset(events, 0, sizeof(events));
541 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
542 events[1] |= 0x08; /* Read Remote Version Information Complete */
543 events[1] |= 0x20; /* Command Complete */
544 events[1] |= 0x40; /* Command Status */
545 events[1] |= 0x80; /* Hardware Error */
546 events[2] |= 0x04; /* Number of Completed Packets */
547 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
548
549 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
550 events[0] |= 0x80; /* Encryption Change */
551 events[5] |= 0x80; /* Encryption Key Refresh Complete */
552 }
2177bab5
JH
553 }
554
555 if (lmp_inq_rssi_capable(hdev))
556 events[4] |= 0x02; /* Inquiry Result with RSSI */
557
558 if (lmp_sniffsubr_capable(hdev))
559 events[5] |= 0x20; /* Sniff Subrating */
560
561 if (lmp_pause_enc_capable(hdev))
562 events[5] |= 0x80; /* Encryption Key Refresh Complete */
563
564 if (lmp_ext_inq_capable(hdev))
565 events[5] |= 0x40; /* Extended Inquiry Result */
566
567 if (lmp_no_flush_capable(hdev))
568 events[7] |= 0x01; /* Enhanced Flush Complete */
569
570 if (lmp_lsto_capable(hdev))
571 events[6] |= 0x80; /* Link Supervision Timeout Changed */
572
573 if (lmp_ssp_capable(hdev)) {
574 events[6] |= 0x01; /* IO Capability Request */
575 events[6] |= 0x02; /* IO Capability Response */
576 events[6] |= 0x04; /* User Confirmation Request */
577 events[6] |= 0x08; /* User Passkey Request */
578 events[6] |= 0x10; /* Remote OOB Data Request */
579 events[6] |= 0x20; /* Simple Pairing Complete */
580 events[7] |= 0x04; /* User Passkey Notification */
581 events[7] |= 0x08; /* Keypress Notification */
582 events[7] |= 0x10; /* Remote Host Supported
583 * Features Notification
584 */
585 }
586
587 if (lmp_le_capable(hdev))
588 events[7] |= 0x20; /* LE Meta-Event */
589
42c6b129 590 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
591}
592
42c6b129 593static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 594{
42c6b129
JH
595 struct hci_dev *hdev = req->hdev;
596
0af801b9
JH
597 if (hdev->dev_type == HCI_AMP)
598 return amp_init2(req);
599
2177bab5 600 if (lmp_bredr_capable(hdev))
42c6b129 601 bredr_setup(req);
56f87901 602 else
a358dc11 603 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
604
605 if (lmp_le_capable(hdev))
42c6b129 606 le_setup(req);
2177bab5 607
0f3adeae
MH
608 /* All Bluetooth 1.2 and later controllers should support the
609 * HCI command for reading the local supported commands.
610 *
611 * Unfortunately some controllers indicate Bluetooth 1.2 support,
612 * but do not have support for this command. If that is the case,
613 * the driver can quirk the behavior and skip reading the local
614 * supported commands.
3f8e2d75 615 */
0f3adeae
MH
616 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
617 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 618 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
619
620 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
621 /* When SSP is available, then the host features page
622 * should also be available as well. However some
623 * controllers list the max_page as 0 as long as SSP
624 * has not been enabled. To achieve proper debugging
625 * output, force the minimum max_page to 1 at least.
626 */
627 hdev->max_page = 0x01;
628
d7a5a11d 629 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 630 u8 mode = 0x01;
574ea3c7 631
42c6b129
JH
632 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
633 sizeof(mode), &mode);
2177bab5
JH
634 } else {
635 struct hci_cp_write_eir cp;
636
637 memset(hdev->eir, 0, sizeof(hdev->eir));
638 memset(&cp, 0, sizeof(cp));
639
42c6b129 640 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
641 }
642 }
643
043ec9bf
MH
644 if (lmp_inq_rssi_capable(hdev) ||
645 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
646 u8 mode;
647
648 /* If Extended Inquiry Result events are supported, then
649 * they are clearly preferred over Inquiry Result with RSSI
650 * events.
651 */
652 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
653
654 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
655 }
2177bab5
JH
656
657 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 658 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
659
660 if (lmp_ext_feat_capable(hdev)) {
661 struct hci_cp_read_local_ext_features cp;
662
663 cp.page = 0x01;
42c6b129
JH
664 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
665 sizeof(cp), &cp);
2177bab5
JH
666 }
667
d7a5a11d 668 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 669 u8 enable = 1;
42c6b129
JH
670 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
671 &enable);
2177bab5
JH
672 }
673}
674
42c6b129 675static void hci_setup_link_policy(struct hci_request *req)
2177bab5 676{
42c6b129 677 struct hci_dev *hdev = req->hdev;
2177bab5
JH
678 struct hci_cp_write_def_link_policy cp;
679 u16 link_policy = 0;
680
681 if (lmp_rswitch_capable(hdev))
682 link_policy |= HCI_LP_RSWITCH;
683 if (lmp_hold_capable(hdev))
684 link_policy |= HCI_LP_HOLD;
685 if (lmp_sniff_capable(hdev))
686 link_policy |= HCI_LP_SNIFF;
687 if (lmp_park_capable(hdev))
688 link_policy |= HCI_LP_PARK;
689
690 cp.policy = cpu_to_le16(link_policy);
42c6b129 691 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
692}
693
42c6b129 694static void hci_set_le_support(struct hci_request *req)
2177bab5 695{
42c6b129 696 struct hci_dev *hdev = req->hdev;
2177bab5
JH
697 struct hci_cp_write_le_host_supported cp;
698
c73eee91
JH
699 /* LE-only devices do not support explicit enablement */
700 if (!lmp_bredr_capable(hdev))
701 return;
702
2177bab5
JH
703 memset(&cp, 0, sizeof(cp));
704
d7a5a11d 705 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 706 cp.le = 0x01;
32226e4f 707 cp.simul = 0x00;
2177bab5
JH
708 }
709
710 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
711 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
712 &cp);
2177bab5
JH
713}
714
d62e6d67
JH
715static void hci_set_event_mask_page_2(struct hci_request *req)
716{
717 struct hci_dev *hdev = req->hdev;
718 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
719
720 /* If Connectionless Slave Broadcast master role is supported
721 * enable all necessary events for it.
722 */
53b834d2 723 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
724 events[1] |= 0x40; /* Triggered Clock Capture */
725 events[1] |= 0x80; /* Synchronization Train Complete */
726 events[2] |= 0x10; /* Slave Page Response Timeout */
727 events[2] |= 0x20; /* CSB Channel Map Change */
728 }
729
730 /* If Connectionless Slave Broadcast slave role is supported
731 * enable all necessary events for it.
732 */
53b834d2 733 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
734 events[2] |= 0x01; /* Synchronization Train Received */
735 events[2] |= 0x02; /* CSB Receive */
736 events[2] |= 0x04; /* CSB Timeout */
737 events[2] |= 0x08; /* Truncated Page Complete */
738 }
739
40c59fcb 740 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 741 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
742 events[2] |= 0x80;
743
d62e6d67
JH
744 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
745}
746
42c6b129 747static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 748{
42c6b129 749 struct hci_dev *hdev = req->hdev;
d2c5d77f 750 u8 p;
42c6b129 751
0da71f1b
MH
752 hci_setup_event_mask(req);
753
e81be90b
JH
754 if (hdev->commands[6] & 0x20 &&
755 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
756 struct hci_cp_read_stored_link_key cp;
757
758 bacpy(&cp.bdaddr, BDADDR_ANY);
759 cp.read_all = 0x01;
760 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
761 }
762
2177bab5 763 if (hdev->commands[5] & 0x10)
42c6b129 764 hci_setup_link_policy(req);
2177bab5 765
417287de
MH
766 if (hdev->commands[8] & 0x01)
767 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
768
769 /* Some older Broadcom based Bluetooth 1.2 controllers do not
770 * support the Read Page Scan Type command. Check support for
771 * this command in the bit mask of supported commands.
772 */
773 if (hdev->commands[13] & 0x01)
774 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
775
9193c6e8
AG
776 if (lmp_le_capable(hdev)) {
777 u8 events[8];
778
779 memset(events, 0, sizeof(events));
4d6c705b
MH
780 events[0] = 0x0f;
781
782 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
783 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
784
785 /* If controller supports the Connection Parameters Request
786 * Link Layer Procedure, enable the corresponding event.
787 */
788 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
789 events[0] |= 0x20; /* LE Remote Connection
790 * Parameter Request
791 */
792
a9f6068e
MH
793 /* If the controller supports the Data Length Extension
794 * feature, enable the corresponding event.
795 */
796 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
797 events[0] |= 0x40; /* LE Data Length Change */
798
4b71bba4
MH
799 /* If the controller supports Extended Scanner Filter
800 * Policies, enable the correspondig event.
801 */
802 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
803 events[1] |= 0x04; /* LE Direct Advertising
804 * Report
805 */
806
5a34bd5f
MH
807 /* If the controller supports the LE Read Local P-256
808 * Public Key command, enable the corresponding event.
809 */
810 if (hdev->commands[34] & 0x02)
811 events[0] |= 0x80; /* LE Read Local P-256
812 * Public Key Complete
813 */
814
815 /* If the controller supports the LE Generate DHKey
816 * command, enable the corresponding event.
817 */
818 if (hdev->commands[34] & 0x04)
819 events[1] |= 0x01; /* LE Generate DHKey Complete */
820
9193c6e8
AG
821 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
822 events);
823
15a49cca
MH
824 if (hdev->commands[25] & 0x40) {
825 /* Read LE Advertising Channel TX Power */
826 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
827 }
828
2ab216a7
MH
829 if (hdev->commands[26] & 0x40) {
830 /* Read LE White List Size */
831 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE,
832 0, NULL);
833 }
834
835 if (hdev->commands[26] & 0x80) {
836 /* Clear LE White List */
837 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
838 }
839
a9f6068e
MH
840 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
841 /* Read LE Maximum Data Length */
842 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
843
844 /* Read LE Suggested Default Data Length */
845 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
846 }
847
42c6b129 848 hci_set_le_support(req);
9193c6e8 849 }
d2c5d77f
JH
850
851 /* Read features beyond page 1 if available */
852 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
853 struct hci_cp_read_local_ext_features cp;
854
855 cp.page = p;
856 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
857 sizeof(cp), &cp);
858 }
2177bab5
JH
859}
860
5d4e7e8d
JH
861static void hci_init4_req(struct hci_request *req, unsigned long opt)
862{
863 struct hci_dev *hdev = req->hdev;
864
36f260ce
MH
865 /* Some Broadcom based Bluetooth controllers do not support the
866 * Delete Stored Link Key command. They are clearly indicating its
867 * absence in the bit mask of supported commands.
868 *
869 * Check the supported commands and only if the the command is marked
870 * as supported send it. If not supported assume that the controller
871 * does not have actual support for stored link keys which makes this
872 * command redundant anyway.
873 *
874 * Some controllers indicate that they support handling deleting
875 * stored link keys, but they don't. The quirk lets a driver
876 * just disable this command.
877 */
878 if (hdev->commands[6] & 0x80 &&
879 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
880 struct hci_cp_delete_stored_link_key cp;
881
882 bacpy(&cp.bdaddr, BDADDR_ANY);
883 cp.delete_all = 0x01;
884 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
885 sizeof(cp), &cp);
886 }
887
d62e6d67
JH
888 /* Set event mask page 2 if the HCI command for it is supported */
889 if (hdev->commands[22] & 0x04)
890 hci_set_event_mask_page_2(req);
891
109e3191
MH
892 /* Read local codec list if the HCI command is supported */
893 if (hdev->commands[29] & 0x20)
894 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
895
f4fe73ed
MH
896 /* Get MWS transport configuration if the HCI command is supported */
897 if (hdev->commands[30] & 0x08)
898 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
899
5d4e7e8d 900 /* Check for Synchronization Train support */
53b834d2 901 if (lmp_sync_train_capable(hdev))
5d4e7e8d 902 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
903
904 /* Enable Secure Connections if supported and configured */
d7a5a11d 905 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 906 bredr_sc_enabled(hdev)) {
a6d0d690 907 u8 support = 0x01;
574ea3c7 908
a6d0d690
MH
909 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
910 sizeof(support), &support);
911 }
5d4e7e8d
JH
912}
913
2177bab5
JH
914static int __hci_init(struct hci_dev *hdev)
915{
916 int err;
917
918 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
919 if (err < 0)
920 return err;
921
f640ee98
MH
922 if (hci_dev_test_flag(hdev, HCI_SETUP))
923 hci_debugfs_create_basic(hdev);
4b4148e9 924
0af801b9
JH
925 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
926 if (err < 0)
927 return err;
928
2177bab5
JH
929 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
930 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 931 * first two stages of init.
2177bab5
JH
932 */
933 if (hdev->dev_type != HCI_BREDR)
934 return 0;
935
5d4e7e8d
JH
936 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
937 if (err < 0)
938 return err;
939
baf27f6e
MH
940 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
941 if (err < 0)
942 return err;
943
ec6cef9c
MH
944 /* This function is only called when the controller is actually in
945 * configured state. When the controller is marked as unconfigured,
946 * this initialization procedure is not run.
947 *
948 * It means that it is possible that a controller runs through its
949 * setup phase and then discovers missing settings. If that is the
950 * case, then this function will not be called. It then will only
951 * be called during the config phase.
952 *
953 * So only when in setup phase or config phase, create the debugfs
954 * entries and register the SMP channels.
baf27f6e 955 */
d7a5a11d
MH
956 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
957 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
958 return 0;
959
60c5f5fb
MH
960 hci_debugfs_create_common(hdev);
961
71c3b60e 962 if (lmp_bredr_capable(hdev))
60c5f5fb 963 hci_debugfs_create_bredr(hdev);
2bfa3531 964
162a3bac 965 if (lmp_le_capable(hdev))
60c5f5fb 966 hci_debugfs_create_le(hdev);
e7b8fc92 967
baf27f6e 968 return 0;
2177bab5
JH
969}
970
0ebca7d6
MH
971static void hci_init0_req(struct hci_request *req, unsigned long opt)
972{
973 struct hci_dev *hdev = req->hdev;
974
975 BT_DBG("%s %ld", hdev->name, opt);
976
977 /* Reset */
978 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
979 hci_reset_req(req, 0);
980
981 /* Read Local Version */
982 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
983
984 /* Read BD Address */
985 if (hdev->set_bdaddr)
986 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
987}
988
989static int __hci_unconf_init(struct hci_dev *hdev)
990{
991 int err;
992
cc78b44b
MH
993 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
994 return 0;
995
0ebca7d6
MH
996 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
997 if (err < 0)
998 return err;
999
f640ee98
MH
1000 if (hci_dev_test_flag(hdev, HCI_SETUP))
1001 hci_debugfs_create_basic(hdev);
1002
0ebca7d6
MH
1003 return 0;
1004}
1005
42c6b129 1006static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1007{
1008 __u8 scan = opt;
1009
42c6b129 1010 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1011
1012 /* Inquiry and Page scans */
42c6b129 1013 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1014}
1015
42c6b129 1016static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1017{
1018 __u8 auth = opt;
1019
42c6b129 1020 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1021
1022 /* Authentication */
42c6b129 1023 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1024}
1025
42c6b129 1026static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1027{
1028 __u8 encrypt = opt;
1029
42c6b129 1030 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1031
e4e8e37c 1032 /* Encryption */
42c6b129 1033 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1034}
1035
42c6b129 1036static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1037{
1038 __le16 policy = cpu_to_le16(opt);
1039
42c6b129 1040 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1041
1042 /* Default link policy */
42c6b129 1043 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1044}
1045
8e87d142 1046/* Get HCI device by index.
1da177e4
LT
1047 * Device is held on return. */
1048struct hci_dev *hci_dev_get(int index)
1049{
8035ded4 1050 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1051
1052 BT_DBG("%d", index);
1053
1054 if (index < 0)
1055 return NULL;
1056
1057 read_lock(&hci_dev_list_lock);
8035ded4 1058 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1059 if (d->id == index) {
1060 hdev = hci_dev_hold(d);
1061 break;
1062 }
1063 }
1064 read_unlock(&hci_dev_list_lock);
1065 return hdev;
1066}
1da177e4
LT
1067
1068/* ---- Inquiry support ---- */
ff9ef578 1069
30dc78e1
JH
1070bool hci_discovery_active(struct hci_dev *hdev)
1071{
1072 struct discovery_state *discov = &hdev->discovery;
1073
6fbe195d 1074 switch (discov->state) {
343f935b 1075 case DISCOVERY_FINDING:
6fbe195d 1076 case DISCOVERY_RESOLVING:
30dc78e1
JH
1077 return true;
1078
6fbe195d
AG
1079 default:
1080 return false;
1081 }
30dc78e1
JH
1082}
1083
ff9ef578
JH
1084void hci_discovery_set_state(struct hci_dev *hdev, int state)
1085{
bb3e0a33
JH
1086 int old_state = hdev->discovery.state;
1087
ff9ef578
JH
1088 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1089
bb3e0a33 1090 if (old_state == state)
ff9ef578
JH
1091 return;
1092
bb3e0a33
JH
1093 hdev->discovery.state = state;
1094
ff9ef578
JH
1095 switch (state) {
1096 case DISCOVERY_STOPPED:
c54c3860
AG
1097 hci_update_background_scan(hdev);
1098
bb3e0a33 1099 if (old_state != DISCOVERY_STARTING)
7b99b659 1100 mgmt_discovering(hdev, 0);
ff9ef578
JH
1101 break;
1102 case DISCOVERY_STARTING:
1103 break;
343f935b 1104 case DISCOVERY_FINDING:
ff9ef578
JH
1105 mgmt_discovering(hdev, 1);
1106 break;
30dc78e1
JH
1107 case DISCOVERY_RESOLVING:
1108 break;
ff9ef578
JH
1109 case DISCOVERY_STOPPING:
1110 break;
1111 }
ff9ef578
JH
1112}
1113
1f9b9a5d 1114void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1115{
30883512 1116 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1117 struct inquiry_entry *p, *n;
1da177e4 1118
561aafbc
JH
1119 list_for_each_entry_safe(p, n, &cache->all, all) {
1120 list_del(&p->all);
b57c1a56 1121 kfree(p);
1da177e4 1122 }
561aafbc
JH
1123
1124 INIT_LIST_HEAD(&cache->unknown);
1125 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1126}
1127
a8c5fb1a
GP
1128struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1129 bdaddr_t *bdaddr)
1da177e4 1130{
30883512 1131 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1132 struct inquiry_entry *e;
1133
6ed93dc6 1134 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1135
561aafbc
JH
1136 list_for_each_entry(e, &cache->all, all) {
1137 if (!bacmp(&e->data.bdaddr, bdaddr))
1138 return e;
1139 }
1140
1141 return NULL;
1142}
1143
1144struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1145 bdaddr_t *bdaddr)
561aafbc 1146{
30883512 1147 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1148 struct inquiry_entry *e;
1149
6ed93dc6 1150 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1151
1152 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1153 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1154 return e;
1155 }
1156
1157 return NULL;
1da177e4
LT
1158}
1159
30dc78e1 1160struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1161 bdaddr_t *bdaddr,
1162 int state)
30dc78e1
JH
1163{
1164 struct discovery_state *cache = &hdev->discovery;
1165 struct inquiry_entry *e;
1166
6ed93dc6 1167 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1168
1169 list_for_each_entry(e, &cache->resolve, list) {
1170 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1171 return e;
1172 if (!bacmp(&e->data.bdaddr, bdaddr))
1173 return e;
1174 }
1175
1176 return NULL;
1177}
1178
a3d4e20a 1179void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1180 struct inquiry_entry *ie)
a3d4e20a
JH
1181{
1182 struct discovery_state *cache = &hdev->discovery;
1183 struct list_head *pos = &cache->resolve;
1184 struct inquiry_entry *p;
1185
1186 list_del(&ie->list);
1187
1188 list_for_each_entry(p, &cache->resolve, list) {
1189 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1190 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1191 break;
1192 pos = &p->list;
1193 }
1194
1195 list_add(&ie->list, pos);
1196}
1197
af58925c
MH
1198u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1199 bool name_known)
1da177e4 1200{
30883512 1201 struct discovery_state *cache = &hdev->discovery;
70f23020 1202 struct inquiry_entry *ie;
af58925c 1203 u32 flags = 0;
1da177e4 1204
6ed93dc6 1205 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1206
6928a924 1207 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1208
af58925c
MH
1209 if (!data->ssp_mode)
1210 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1211
70f23020 1212 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1213 if (ie) {
af58925c
MH
1214 if (!ie->data.ssp_mode)
1215 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1216
a3d4e20a 1217 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1218 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1219 ie->data.rssi = data->rssi;
1220 hci_inquiry_cache_update_resolve(hdev, ie);
1221 }
1222
561aafbc 1223 goto update;
a3d4e20a 1224 }
561aafbc
JH
1225
1226 /* Entry not in the cache. Add new one. */
27f70f3e 1227 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1228 if (!ie) {
1229 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1230 goto done;
1231 }
561aafbc
JH
1232
1233 list_add(&ie->all, &cache->all);
1234
1235 if (name_known) {
1236 ie->name_state = NAME_KNOWN;
1237 } else {
1238 ie->name_state = NAME_NOT_KNOWN;
1239 list_add(&ie->list, &cache->unknown);
1240 }
70f23020 1241
561aafbc
JH
1242update:
1243 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1244 ie->name_state != NAME_PENDING) {
561aafbc
JH
1245 ie->name_state = NAME_KNOWN;
1246 list_del(&ie->list);
1da177e4
LT
1247 }
1248
70f23020
AE
1249 memcpy(&ie->data, data, sizeof(*data));
1250 ie->timestamp = jiffies;
1da177e4 1251 cache->timestamp = jiffies;
3175405b
JH
1252
1253 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1254 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1255
af58925c
MH
1256done:
1257 return flags;
1da177e4
LT
1258}
1259
1260static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1261{
30883512 1262 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1263 struct inquiry_info *info = (struct inquiry_info *) buf;
1264 struct inquiry_entry *e;
1265 int copied = 0;
1266
561aafbc 1267 list_for_each_entry(e, &cache->all, all) {
1da177e4 1268 struct inquiry_data *data = &e->data;
b57c1a56
JH
1269
1270 if (copied >= num)
1271 break;
1272
1da177e4
LT
1273 bacpy(&info->bdaddr, &data->bdaddr);
1274 info->pscan_rep_mode = data->pscan_rep_mode;
1275 info->pscan_period_mode = data->pscan_period_mode;
1276 info->pscan_mode = data->pscan_mode;
1277 memcpy(info->dev_class, data->dev_class, 3);
1278 info->clock_offset = data->clock_offset;
b57c1a56 1279
1da177e4 1280 info++;
b57c1a56 1281 copied++;
1da177e4
LT
1282 }
1283
1284 BT_DBG("cache %p, copied %d", cache, copied);
1285 return copied;
1286}
1287
42c6b129 1288static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1289{
1290 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1291 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1292 struct hci_cp_inquiry cp;
1293
1294 BT_DBG("%s", hdev->name);
1295
1296 if (test_bit(HCI_INQUIRY, &hdev->flags))
1297 return;
1298
1299 /* Start Inquiry */
1300 memcpy(&cp.lap, &ir->lap, 3);
1301 cp.length = ir->length;
1302 cp.num_rsp = ir->num_rsp;
42c6b129 1303 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1304}
1305
1306int hci_inquiry(void __user *arg)
1307{
1308 __u8 __user *ptr = arg;
1309 struct hci_inquiry_req ir;
1310 struct hci_dev *hdev;
1311 int err = 0, do_inquiry = 0, max_rsp;
1312 long timeo;
1313 __u8 *buf;
1314
1315 if (copy_from_user(&ir, ptr, sizeof(ir)))
1316 return -EFAULT;
1317
5a08ecce
AE
1318 hdev = hci_dev_get(ir.dev_id);
1319 if (!hdev)
1da177e4
LT
1320 return -ENODEV;
1321
d7a5a11d 1322 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1323 err = -EBUSY;
1324 goto done;
1325 }
1326
d7a5a11d 1327 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1328 err = -EOPNOTSUPP;
1329 goto done;
1330 }
1331
5b69bef5
MH
1332 if (hdev->dev_type != HCI_BREDR) {
1333 err = -EOPNOTSUPP;
1334 goto done;
1335 }
1336
d7a5a11d 1337 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1338 err = -EOPNOTSUPP;
1339 goto done;
1340 }
1341
09fd0de5 1342 hci_dev_lock(hdev);
8e87d142 1343 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1344 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1345 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1346 do_inquiry = 1;
1347 }
09fd0de5 1348 hci_dev_unlock(hdev);
1da177e4 1349
04837f64 1350 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1351
1352 if (do_inquiry) {
01178cd4
JH
1353 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1354 timeo);
70f23020
AE
1355 if (err < 0)
1356 goto done;
3e13fa1e
AG
1357
1358 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1359 * cleared). If it is interrupted by a signal, return -EINTR.
1360 */
74316201 1361 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1362 TASK_INTERRUPTIBLE))
1363 return -EINTR;
70f23020 1364 }
1da177e4 1365
8fc9ced3
GP
1366 /* for unlimited number of responses we will use buffer with
1367 * 255 entries
1368 */
1da177e4
LT
1369 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1370
1371 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1372 * copy it to the user space.
1373 */
01df8c31 1374 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1375 if (!buf) {
1da177e4
LT
1376 err = -ENOMEM;
1377 goto done;
1378 }
1379
09fd0de5 1380 hci_dev_lock(hdev);
1da177e4 1381 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1382 hci_dev_unlock(hdev);
1da177e4
LT
1383
1384 BT_DBG("num_rsp %d", ir.num_rsp);
1385
1386 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1387 ptr += sizeof(ir);
1388 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1389 ir.num_rsp))
1da177e4 1390 err = -EFAULT;
8e87d142 1391 } else
1da177e4
LT
1392 err = -EFAULT;
1393
1394 kfree(buf);
1395
1396done:
1397 hci_dev_put(hdev);
1398 return err;
1399}
1400
cbed0ca1 1401static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1402{
1da177e4
LT
1403 int ret = 0;
1404
1da177e4
LT
1405 BT_DBG("%s %p", hdev->name, hdev);
1406
1407 hci_req_lock(hdev);
1408
d7a5a11d 1409 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1410 ret = -ENODEV;
1411 goto done;
1412 }
1413
d7a5a11d
MH
1414 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1415 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1416 /* Check for rfkill but allow the HCI setup stage to
1417 * proceed (which in itself doesn't cause any RF activity).
1418 */
d7a5a11d 1419 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1420 ret = -ERFKILL;
1421 goto done;
1422 }
1423
1424 /* Check for valid public address or a configured static
1425 * random adddress, but let the HCI setup proceed to
1426 * be able to determine if there is a public address
1427 * or not.
1428 *
c6beca0e
MH
1429 * In case of user channel usage, it is not important
1430 * if a public address or static random address is
1431 * available.
1432 *
a5c8f270
MH
1433 * This check is only valid for BR/EDR controllers
1434 * since AMP controllers do not have an address.
1435 */
d7a5a11d 1436 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1437 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1438 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1439 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1440 ret = -EADDRNOTAVAIL;
1441 goto done;
1442 }
611b30f7
MH
1443 }
1444
1da177e4
LT
1445 if (test_bit(HCI_UP, &hdev->flags)) {
1446 ret = -EALREADY;
1447 goto done;
1448 }
1449
1da177e4
LT
1450 if (hdev->open(hdev)) {
1451 ret = -EIO;
1452 goto done;
1453 }
1454
e9ca8bf1 1455 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1456 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1457
f41c70c4
MH
1458 atomic_set(&hdev->cmd_cnt, 1);
1459 set_bit(HCI_INIT, &hdev->flags);
1460
d7a5a11d 1461 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1462 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1463
af202f84
MH
1464 if (hdev->setup)
1465 ret = hdev->setup(hdev);
f41c70c4 1466
af202f84
MH
1467 /* The transport driver can set these quirks before
1468 * creating the HCI device or in its setup callback.
1469 *
1470 * In case any of them is set, the controller has to
1471 * start up as unconfigured.
1472 */
eb1904f4
MH
1473 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1474 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1475 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1476
0ebca7d6
MH
1477 /* For an unconfigured controller it is required to
1478 * read at least the version information provided by
1479 * the Read Local Version Information command.
1480 *
1481 * If the set_bdaddr driver callback is provided, then
1482 * also the original Bluetooth public device address
1483 * will be read using the Read BD Address command.
1484 */
d7a5a11d 1485 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1486 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1487 }
1488
d7a5a11d 1489 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1490 /* If public address change is configured, ensure that
1491 * the address gets programmed. If the driver does not
1492 * support changing the public address, fail the power
1493 * on procedure.
1494 */
1495 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1496 hdev->set_bdaddr)
24c457e2
MH
1497 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1498 else
1499 ret = -EADDRNOTAVAIL;
1500 }
1501
f41c70c4 1502 if (!ret) {
d7a5a11d 1503 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1504 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1505 ret = __hci_init(hdev);
98a63aaf
MH
1506 if (!ret && hdev->post_init)
1507 ret = hdev->post_init(hdev);
1508 }
1da177e4
LT
1509 }
1510
7e995b9e
MH
1511 /* If the HCI Reset command is clearing all diagnostic settings,
1512 * then they need to be reprogrammed after the init procedure
1513 * completed.
1514 */
1515 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1516 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1517 ret = hdev->set_diag(hdev, true);
1518
f41c70c4
MH
1519 clear_bit(HCI_INIT, &hdev->flags);
1520
1da177e4
LT
1521 if (!ret) {
1522 hci_dev_hold(hdev);
a1536da2 1523 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1524 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1525 hci_sock_dev_event(hdev, HCI_DEV_UP);
d7a5a11d
MH
1526 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1527 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1528 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1529 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1530 hdev->dev_type == HCI_BREDR) {
09fd0de5 1531 hci_dev_lock(hdev);
744cf19e 1532 mgmt_powered(hdev, 1);
09fd0de5 1533 hci_dev_unlock(hdev);
56e5cb86 1534 }
8e87d142 1535 } else {
1da177e4 1536 /* Init failed, cleanup */
3eff45ea 1537 flush_work(&hdev->tx_work);
c347b765 1538 flush_work(&hdev->cmd_work);
b78752cc 1539 flush_work(&hdev->rx_work);
1da177e4
LT
1540
1541 skb_queue_purge(&hdev->cmd_q);
1542 skb_queue_purge(&hdev->rx_q);
1543
1544 if (hdev->flush)
1545 hdev->flush(hdev);
1546
1547 if (hdev->sent_cmd) {
1548 kfree_skb(hdev->sent_cmd);
1549 hdev->sent_cmd = NULL;
1550 }
1551
e9ca8bf1 1552 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1553 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1554
1da177e4 1555 hdev->close(hdev);
fee746b0 1556 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1557 }
1558
1559done:
1560 hci_req_unlock(hdev);
1da177e4
LT
1561 return ret;
1562}
1563
cbed0ca1
JH
1564/* ---- HCI ioctl helpers ---- */
1565
1566int hci_dev_open(__u16 dev)
1567{
1568 struct hci_dev *hdev;
1569 int err;
1570
1571 hdev = hci_dev_get(dev);
1572 if (!hdev)
1573 return -ENODEV;
1574
4a964404 1575 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1576 * up as user channel. Trying to bring them up as normal devices
1577 * will result into a failure. Only user channel operation is
1578 * possible.
1579 *
1580 * When this function is called for a user channel, the flag
1581 * HCI_USER_CHANNEL will be set first before attempting to
1582 * open the device.
1583 */
d7a5a11d
MH
1584 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1585 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1586 err = -EOPNOTSUPP;
1587 goto done;
1588 }
1589
e1d08f40
JH
1590 /* We need to ensure that no other power on/off work is pending
1591 * before proceeding to call hci_dev_do_open. This is
1592 * particularly important if the setup procedure has not yet
1593 * completed.
1594 */
a69d8927 1595 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1596 cancel_delayed_work(&hdev->power_off);
1597
a5c8f270
MH
1598 /* After this call it is guaranteed that the setup procedure
1599 * has finished. This means that error conditions like RFKILL
1600 * or no valid public or static random address apply.
1601 */
e1d08f40
JH
1602 flush_workqueue(hdev->req_workqueue);
1603
12aa4f0a 1604 /* For controllers not using the management interface and that
b6ae8457 1605 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1606 * so that pairing works for them. Once the management interface
1607 * is in use this bit will be cleared again and userspace has
1608 * to explicitly enable it.
1609 */
d7a5a11d
MH
1610 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1611 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1612 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1613
cbed0ca1
JH
1614 err = hci_dev_do_open(hdev);
1615
fee746b0 1616done:
cbed0ca1 1617 hci_dev_put(hdev);
cbed0ca1
JH
1618 return err;
1619}
1620
d7347f3c
JH
1621/* This function requires the caller holds hdev->lock */
1622static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1623{
1624 struct hci_conn_params *p;
1625
f161dd41
JH
1626 list_for_each_entry(p, &hdev->le_conn_params, list) {
1627 if (p->conn) {
1628 hci_conn_drop(p->conn);
f8aaf9b6 1629 hci_conn_put(p->conn);
f161dd41
JH
1630 p->conn = NULL;
1631 }
d7347f3c 1632 list_del_init(&p->action);
f161dd41 1633 }
d7347f3c
JH
1634
1635 BT_DBG("All LE pending actions cleared");
1636}
1637
6b3cc1db 1638int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1639{
acc649c6
MH
1640 bool auto_off;
1641
1da177e4
LT
1642 BT_DBG("%s %p", hdev->name, hdev);
1643
d24d8144 1644 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1645 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1646 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1647 /* Execute vendor specific shutdown routine */
1648 if (hdev->shutdown)
1649 hdev->shutdown(hdev);
1650 }
1651
78c04c0b
VCG
1652 cancel_delayed_work(&hdev->power_off);
1653
1da177e4
LT
1654 hci_req_cancel(hdev, ENODEV);
1655 hci_req_lock(hdev);
1656
1657 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1658 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1659 hci_req_unlock(hdev);
1660 return 0;
1661 }
1662
3eff45ea
GP
1663 /* Flush RX and TX works */
1664 flush_work(&hdev->tx_work);
b78752cc 1665 flush_work(&hdev->rx_work);
1da177e4 1666
16ab91ab 1667 if (hdev->discov_timeout > 0) {
e0f9309f 1668 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1669 hdev->discov_timeout = 0;
a358dc11
MH
1670 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1671 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1672 }
1673
a69d8927 1674 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1675 cancel_delayed_work(&hdev->service_cache);
1676
7ba8b4be 1677 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1678 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1679
d7a5a11d 1680 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1681 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1682
5d900e46
FG
1683 if (hdev->adv_instance_timeout) {
1684 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1685 hdev->adv_instance_timeout = 0;
1686 }
1687
76727c02
JH
1688 /* Avoid potential lockdep warnings from the *_flush() calls by
1689 * ensuring the workqueue is empty up front.
1690 */
1691 drain_workqueue(hdev->workqueue);
1692
09fd0de5 1693 hci_dev_lock(hdev);
1aeb9c65 1694
8f502f84
JH
1695 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1696
acc649c6
MH
1697 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1698
1699 if (!auto_off && hdev->dev_type == HCI_BREDR)
1700 mgmt_powered(hdev, 0);
1aeb9c65 1701
1f9b9a5d 1702 hci_inquiry_cache_flush(hdev);
d7347f3c 1703 hci_pend_le_actions_clear(hdev);
f161dd41 1704 hci_conn_hash_flush(hdev);
09fd0de5 1705 hci_dev_unlock(hdev);
1da177e4 1706
64dae967
MH
1707 smp_unregister(hdev);
1708
05fcd4c4 1709 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1710
1711 if (hdev->flush)
1712 hdev->flush(hdev);
1713
1714 /* Reset device */
1715 skb_queue_purge(&hdev->cmd_q);
1716 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1717 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1718 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1719 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1720 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1721 clear_bit(HCI_INIT, &hdev->flags);
1722 }
1723
c347b765
GP
1724 /* flush cmd work */
1725 flush_work(&hdev->cmd_work);
1da177e4
LT
1726
1727 /* Drop queues */
1728 skb_queue_purge(&hdev->rx_q);
1729 skb_queue_purge(&hdev->cmd_q);
1730 skb_queue_purge(&hdev->raw_q);
1731
1732 /* Drop last sent command */
1733 if (hdev->sent_cmd) {
65cc2b49 1734 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1735 kfree_skb(hdev->sent_cmd);
1736 hdev->sent_cmd = NULL;
1737 }
1738
e9ca8bf1 1739 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1740 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1741
1da177e4
LT
1742 /* After this point our queues are empty
1743 * and no tasks are scheduled. */
1744 hdev->close(hdev);
1745
35b973c9 1746 /* Clear flags */
fee746b0 1747 hdev->flags &= BIT(HCI_RAW);
eacb44df 1748 hci_dev_clear_volatile_flags(hdev);
35b973c9 1749
ced5c338 1750 /* Controller radio is available but is currently powered down */
536619e8 1751 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1752
e59fda8d 1753 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1754 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1755 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1756
1da177e4
LT
1757 hci_req_unlock(hdev);
1758
1759 hci_dev_put(hdev);
1760 return 0;
1761}
1762
1763int hci_dev_close(__u16 dev)
1764{
1765 struct hci_dev *hdev;
1766 int err;
1767
70f23020
AE
1768 hdev = hci_dev_get(dev);
1769 if (!hdev)
1da177e4 1770 return -ENODEV;
8ee56540 1771
d7a5a11d 1772 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1773 err = -EBUSY;
1774 goto done;
1775 }
1776
a69d8927 1777 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1778 cancel_delayed_work(&hdev->power_off);
1779
1da177e4 1780 err = hci_dev_do_close(hdev);
8ee56540 1781
0736cfa8 1782done:
1da177e4
LT
1783 hci_dev_put(hdev);
1784 return err;
1785}
1786
5c912495 1787static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1788{
5c912495 1789 int ret;
1da177e4 1790
5c912495 1791 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1792
1793 hci_req_lock(hdev);
1da177e4 1794
1da177e4
LT
1795 /* Drop queues */
1796 skb_queue_purge(&hdev->rx_q);
1797 skb_queue_purge(&hdev->cmd_q);
1798
76727c02
JH
1799 /* Avoid potential lockdep warnings from the *_flush() calls by
1800 * ensuring the workqueue is empty up front.
1801 */
1802 drain_workqueue(hdev->workqueue);
1803
09fd0de5 1804 hci_dev_lock(hdev);
1f9b9a5d 1805 hci_inquiry_cache_flush(hdev);
1da177e4 1806 hci_conn_hash_flush(hdev);
09fd0de5 1807 hci_dev_unlock(hdev);
1da177e4
LT
1808
1809 if (hdev->flush)
1810 hdev->flush(hdev);
1811
8e87d142 1812 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1813 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1814
fee746b0 1815 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1816
1da177e4 1817 hci_req_unlock(hdev);
1da177e4
LT
1818 return ret;
1819}
1820
5c912495
MH
1821int hci_dev_reset(__u16 dev)
1822{
1823 struct hci_dev *hdev;
1824 int err;
1825
1826 hdev = hci_dev_get(dev);
1827 if (!hdev)
1828 return -ENODEV;
1829
1830 if (!test_bit(HCI_UP, &hdev->flags)) {
1831 err = -ENETDOWN;
1832 goto done;
1833 }
1834
d7a5a11d 1835 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1836 err = -EBUSY;
1837 goto done;
1838 }
1839
d7a5a11d 1840 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1841 err = -EOPNOTSUPP;
1842 goto done;
1843 }
1844
1845 err = hci_dev_do_reset(hdev);
1846
1847done:
1848 hci_dev_put(hdev);
1849 return err;
1850}
1851
1da177e4
LT
1852int hci_dev_reset_stat(__u16 dev)
1853{
1854 struct hci_dev *hdev;
1855 int ret = 0;
1856
70f23020
AE
1857 hdev = hci_dev_get(dev);
1858 if (!hdev)
1da177e4
LT
1859 return -ENODEV;
1860
d7a5a11d 1861 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1862 ret = -EBUSY;
1863 goto done;
1864 }
1865
d7a5a11d 1866 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1867 ret = -EOPNOTSUPP;
1868 goto done;
1869 }
1870
1da177e4
LT
1871 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1872
0736cfa8 1873done:
1da177e4 1874 hci_dev_put(hdev);
1da177e4
LT
1875 return ret;
1876}
1877
123abc08
JH
1878static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1879{
bc6d2d04 1880 bool conn_changed, discov_changed;
123abc08
JH
1881
1882 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1883
1884 if ((scan & SCAN_PAGE))
238be788
MH
1885 conn_changed = !hci_dev_test_and_set_flag(hdev,
1886 HCI_CONNECTABLE);
123abc08 1887 else
a69d8927
MH
1888 conn_changed = hci_dev_test_and_clear_flag(hdev,
1889 HCI_CONNECTABLE);
123abc08 1890
bc6d2d04 1891 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1892 discov_changed = !hci_dev_test_and_set_flag(hdev,
1893 HCI_DISCOVERABLE);
bc6d2d04 1894 } else {
a358dc11 1895 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1896 discov_changed = hci_dev_test_and_clear_flag(hdev,
1897 HCI_DISCOVERABLE);
bc6d2d04
JH
1898 }
1899
d7a5a11d 1900 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1901 return;
1902
bc6d2d04
JH
1903 if (conn_changed || discov_changed) {
1904 /* In case this was disabled through mgmt */
a1536da2 1905 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1906
d7a5a11d 1907 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1908 mgmt_update_adv_data(hdev);
1909
123abc08 1910 mgmt_new_settings(hdev);
bc6d2d04 1911 }
123abc08
JH
1912}
1913
1da177e4
LT
1914int hci_dev_cmd(unsigned int cmd, void __user *arg)
1915{
1916 struct hci_dev *hdev;
1917 struct hci_dev_req dr;
1918 int err = 0;
1919
1920 if (copy_from_user(&dr, arg, sizeof(dr)))
1921 return -EFAULT;
1922
70f23020
AE
1923 hdev = hci_dev_get(dr.dev_id);
1924 if (!hdev)
1da177e4
LT
1925 return -ENODEV;
1926
d7a5a11d 1927 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1928 err = -EBUSY;
1929 goto done;
1930 }
1931
d7a5a11d 1932 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1933 err = -EOPNOTSUPP;
1934 goto done;
1935 }
1936
5b69bef5
MH
1937 if (hdev->dev_type != HCI_BREDR) {
1938 err = -EOPNOTSUPP;
1939 goto done;
1940 }
1941
d7a5a11d 1942 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1943 err = -EOPNOTSUPP;
1944 goto done;
1945 }
1946
1da177e4
LT
1947 switch (cmd) {
1948 case HCISETAUTH:
01178cd4
JH
1949 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1950 HCI_INIT_TIMEOUT);
1da177e4
LT
1951 break;
1952
1953 case HCISETENCRYPT:
1954 if (!lmp_encrypt_capable(hdev)) {
1955 err = -EOPNOTSUPP;
1956 break;
1957 }
1958
1959 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1960 /* Auth must be enabled first */
01178cd4
JH
1961 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1962 HCI_INIT_TIMEOUT);
1da177e4
LT
1963 if (err)
1964 break;
1965 }
1966
01178cd4
JH
1967 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1968 HCI_INIT_TIMEOUT);
1da177e4
LT
1969 break;
1970
1971 case HCISETSCAN:
01178cd4
JH
1972 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1973 HCI_INIT_TIMEOUT);
91a668b0 1974
bc6d2d04
JH
1975 /* Ensure that the connectable and discoverable states
1976 * get correctly modified as this was a non-mgmt change.
91a668b0 1977 */
123abc08
JH
1978 if (!err)
1979 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1980 break;
1981
1da177e4 1982 case HCISETLINKPOL:
01178cd4
JH
1983 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1984 HCI_INIT_TIMEOUT);
1da177e4
LT
1985 break;
1986
1987 case HCISETLINKMODE:
e4e8e37c
MH
1988 hdev->link_mode = ((__u16) dr.dev_opt) &
1989 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1990 break;
1991
1992 case HCISETPTYPE:
1993 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1994 break;
1995
1996 case HCISETACLMTU:
e4e8e37c
MH
1997 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1998 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1999 break;
2000
2001 case HCISETSCOMTU:
e4e8e37c
MH
2002 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2003 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2004 break;
2005
2006 default:
2007 err = -EINVAL;
2008 break;
2009 }
e4e8e37c 2010
0736cfa8 2011done:
1da177e4
LT
2012 hci_dev_put(hdev);
2013 return err;
2014}
2015
2016int hci_get_dev_list(void __user *arg)
2017{
8035ded4 2018 struct hci_dev *hdev;
1da177e4
LT
2019 struct hci_dev_list_req *dl;
2020 struct hci_dev_req *dr;
1da177e4
LT
2021 int n = 0, size, err;
2022 __u16 dev_num;
2023
2024 if (get_user(dev_num, (__u16 __user *) arg))
2025 return -EFAULT;
2026
2027 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2028 return -EINVAL;
2029
2030 size = sizeof(*dl) + dev_num * sizeof(*dr);
2031
70f23020
AE
2032 dl = kzalloc(size, GFP_KERNEL);
2033 if (!dl)
1da177e4
LT
2034 return -ENOMEM;
2035
2036 dr = dl->dev_req;
2037
f20d09d5 2038 read_lock(&hci_dev_list_lock);
8035ded4 2039 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2040 unsigned long flags = hdev->flags;
c542a06c 2041
2e84d8db
MH
2042 /* When the auto-off is configured it means the transport
2043 * is running, but in that case still indicate that the
2044 * device is actually down.
2045 */
d7a5a11d 2046 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2047 flags &= ~BIT(HCI_UP);
c542a06c 2048
1da177e4 2049 (dr + n)->dev_id = hdev->id;
2e84d8db 2050 (dr + n)->dev_opt = flags;
c542a06c 2051
1da177e4
LT
2052 if (++n >= dev_num)
2053 break;
2054 }
f20d09d5 2055 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2056
2057 dl->dev_num = n;
2058 size = sizeof(*dl) + n * sizeof(*dr);
2059
2060 err = copy_to_user(arg, dl, size);
2061 kfree(dl);
2062
2063 return err ? -EFAULT : 0;
2064}
2065
2066int hci_get_dev_info(void __user *arg)
2067{
2068 struct hci_dev *hdev;
2069 struct hci_dev_info di;
2e84d8db 2070 unsigned long flags;
1da177e4
LT
2071 int err = 0;
2072
2073 if (copy_from_user(&di, arg, sizeof(di)))
2074 return -EFAULT;
2075
70f23020
AE
2076 hdev = hci_dev_get(di.dev_id);
2077 if (!hdev)
1da177e4
LT
2078 return -ENODEV;
2079
2e84d8db
MH
2080 /* When the auto-off is configured it means the transport
2081 * is running, but in that case still indicate that the
2082 * device is actually down.
2083 */
d7a5a11d 2084 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2085 flags = hdev->flags & ~BIT(HCI_UP);
2086 else
2087 flags = hdev->flags;
c542a06c 2088
1da177e4
LT
2089 strcpy(di.name, hdev->name);
2090 di.bdaddr = hdev->bdaddr;
60f2a3ed 2091 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2092 di.flags = flags;
1da177e4 2093 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2094 if (lmp_bredr_capable(hdev)) {
2095 di.acl_mtu = hdev->acl_mtu;
2096 di.acl_pkts = hdev->acl_pkts;
2097 di.sco_mtu = hdev->sco_mtu;
2098 di.sco_pkts = hdev->sco_pkts;
2099 } else {
2100 di.acl_mtu = hdev->le_mtu;
2101 di.acl_pkts = hdev->le_pkts;
2102 di.sco_mtu = 0;
2103 di.sco_pkts = 0;
2104 }
1da177e4
LT
2105 di.link_policy = hdev->link_policy;
2106 di.link_mode = hdev->link_mode;
2107
2108 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2109 memcpy(&di.features, &hdev->features, sizeof(di.features));
2110
2111 if (copy_to_user(arg, &di, sizeof(di)))
2112 err = -EFAULT;
2113
2114 hci_dev_put(hdev);
2115
2116 return err;
2117}
2118
2119/* ---- Interface to HCI drivers ---- */
2120
611b30f7
MH
2121static int hci_rfkill_set_block(void *data, bool blocked)
2122{
2123 struct hci_dev *hdev = data;
2124
2125 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2126
d7a5a11d 2127 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2128 return -EBUSY;
2129
5e130367 2130 if (blocked) {
a1536da2 2131 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2132 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2133 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2134 hci_dev_do_close(hdev);
5e130367 2135 } else {
a358dc11 2136 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2137 }
611b30f7
MH
2138
2139 return 0;
2140}
2141
2142static const struct rfkill_ops hci_rfkill_ops = {
2143 .set_block = hci_rfkill_set_block,
2144};
2145
ab81cbf9
JH
2146static void hci_power_on(struct work_struct *work)
2147{
2148 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2149 int err;
ab81cbf9
JH
2150
2151 BT_DBG("%s", hdev->name);
2152
cbed0ca1 2153 err = hci_dev_do_open(hdev);
96570ffc 2154 if (err < 0) {
3ad67582 2155 hci_dev_lock(hdev);
96570ffc 2156 mgmt_set_powered_failed(hdev, err);
3ad67582 2157 hci_dev_unlock(hdev);
ab81cbf9 2158 return;
96570ffc 2159 }
ab81cbf9 2160
a5c8f270
MH
2161 /* During the HCI setup phase, a few error conditions are
2162 * ignored and they need to be checked now. If they are still
2163 * valid, it is important to turn the device back off.
2164 */
d7a5a11d
MH
2165 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2166 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2167 (hdev->dev_type == HCI_BREDR &&
2168 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2169 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2170 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2171 hci_dev_do_close(hdev);
d7a5a11d 2172 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2173 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2174 HCI_AUTO_OFF_TIMEOUT);
bf543036 2175 }
ab81cbf9 2176
a69d8927 2177 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2178 /* For unconfigured devices, set the HCI_RAW flag
2179 * so that userspace can easily identify them.
4a964404 2180 */
d7a5a11d 2181 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2182 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2183
2184 /* For fully configured devices, this will send
2185 * the Index Added event. For unconfigured devices,
2186 * it will send Unconfigued Index Added event.
2187 *
2188 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2189 * and no event will be send.
2190 */
2191 mgmt_index_added(hdev);
a69d8927 2192 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2193 /* When the controller is now configured, then it
2194 * is important to clear the HCI_RAW flag.
2195 */
d7a5a11d 2196 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2197 clear_bit(HCI_RAW, &hdev->flags);
2198
d603b76b
MH
2199 /* Powering on the controller with HCI_CONFIG set only
2200 * happens with the transition from unconfigured to
2201 * configured. This will send the Index Added event.
2202 */
744cf19e 2203 mgmt_index_added(hdev);
fee746b0 2204 }
ab81cbf9
JH
2205}
2206
2207static void hci_power_off(struct work_struct *work)
2208{
3243553f 2209 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2210 power_off.work);
ab81cbf9
JH
2211
2212 BT_DBG("%s", hdev->name);
2213
8ee56540 2214 hci_dev_do_close(hdev);
ab81cbf9
JH
2215}
2216
c7741d16
MH
2217static void hci_error_reset(struct work_struct *work)
2218{
2219 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2220
2221 BT_DBG("%s", hdev->name);
2222
2223 if (hdev->hw_error)
2224 hdev->hw_error(hdev, hdev->hw_error_code);
2225 else
2226 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2227 hdev->hw_error_code);
2228
2229 if (hci_dev_do_close(hdev))
2230 return;
2231
c7741d16
MH
2232 hci_dev_do_open(hdev);
2233}
2234
16ab91ab
JH
2235static void hci_discov_off(struct work_struct *work)
2236{
2237 struct hci_dev *hdev;
16ab91ab
JH
2238
2239 hdev = container_of(work, struct hci_dev, discov_off.work);
2240
2241 BT_DBG("%s", hdev->name);
2242
d1967ff8 2243 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2244}
2245
5d900e46
FG
2246static void hci_adv_timeout_expire(struct work_struct *work)
2247{
2248 struct hci_dev *hdev;
2249
2250 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2251
2252 BT_DBG("%s", hdev->name);
2253
2254 mgmt_adv_timeout_expired(hdev);
2255}
2256
35f7498a 2257void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2258{
4821002c 2259 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2260
4821002c
JH
2261 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2262 list_del(&uuid->list);
2aeb9a1a
JH
2263 kfree(uuid);
2264 }
2aeb9a1a
JH
2265}
2266
35f7498a 2267void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2268{
0378b597 2269 struct link_key *key;
55ed8ca1 2270
0378b597
JH
2271 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2272 list_del_rcu(&key->list);
2273 kfree_rcu(key, rcu);
55ed8ca1 2274 }
55ed8ca1
JH
2275}
2276
35f7498a 2277void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2278{
970d0f1b 2279 struct smp_ltk *k;
b899efaf 2280
970d0f1b
JH
2281 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2282 list_del_rcu(&k->list);
2283 kfree_rcu(k, rcu);
b899efaf 2284 }
b899efaf
VCG
2285}
2286
970c4e46
JH
2287void hci_smp_irks_clear(struct hci_dev *hdev)
2288{
adae20cb 2289 struct smp_irk *k;
970c4e46 2290
adae20cb
JH
2291 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2292 list_del_rcu(&k->list);
2293 kfree_rcu(k, rcu);
970c4e46
JH
2294 }
2295}
2296
55ed8ca1
JH
2297struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2298{
8035ded4 2299 struct link_key *k;
55ed8ca1 2300
0378b597
JH
2301 rcu_read_lock();
2302 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2303 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2304 rcu_read_unlock();
55ed8ca1 2305 return k;
0378b597
JH
2306 }
2307 }
2308 rcu_read_unlock();
55ed8ca1
JH
2309
2310 return NULL;
2311}
2312
745c0ce3 2313static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2314 u8 key_type, u8 old_key_type)
d25e28ab
JH
2315{
2316 /* Legacy key */
2317 if (key_type < 0x03)
745c0ce3 2318 return true;
d25e28ab
JH
2319
2320 /* Debug keys are insecure so don't store them persistently */
2321 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2322 return false;
d25e28ab
JH
2323
2324 /* Changed combination key and there's no previous one */
2325 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2326 return false;
d25e28ab
JH
2327
2328 /* Security mode 3 case */
2329 if (!conn)
745c0ce3 2330 return true;
d25e28ab 2331
e3befab9
JH
2332 /* BR/EDR key derived using SC from an LE link */
2333 if (conn->type == LE_LINK)
2334 return true;
2335
d25e28ab
JH
2336 /* Neither local nor remote side had no-bonding as requirement */
2337 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2338 return true;
d25e28ab
JH
2339
2340 /* Local side had dedicated bonding as requirement */
2341 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2342 return true;
d25e28ab
JH
2343
2344 /* Remote side had dedicated bonding as requirement */
2345 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2346 return true;
d25e28ab
JH
2347
2348 /* If none of the above criteria match, then don't store the key
2349 * persistently */
745c0ce3 2350 return false;
d25e28ab
JH
2351}
2352
e804d25d 2353static u8 ltk_role(u8 type)
98a0b845 2354{
e804d25d
JH
2355 if (type == SMP_LTK)
2356 return HCI_ROLE_MASTER;
98a0b845 2357
e804d25d 2358 return HCI_ROLE_SLAVE;
98a0b845
JH
2359}
2360
f3a73d97
JH
2361struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2362 u8 addr_type, u8 role)
75d262c2 2363{
c9839a11 2364 struct smp_ltk *k;
75d262c2 2365
970d0f1b
JH
2366 rcu_read_lock();
2367 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2368 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2369 continue;
2370
923e2414 2371 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2372 rcu_read_unlock();
75d262c2 2373 return k;
970d0f1b
JH
2374 }
2375 }
2376 rcu_read_unlock();
75d262c2
VCG
2377
2378 return NULL;
2379}
75d262c2 2380
970c4e46
JH
2381struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2382{
2383 struct smp_irk *irk;
2384
adae20cb
JH
2385 rcu_read_lock();
2386 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2387 if (!bacmp(&irk->rpa, rpa)) {
2388 rcu_read_unlock();
970c4e46 2389 return irk;
adae20cb 2390 }
970c4e46
JH
2391 }
2392
adae20cb 2393 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2394 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2395 bacpy(&irk->rpa, rpa);
adae20cb 2396 rcu_read_unlock();
970c4e46
JH
2397 return irk;
2398 }
2399 }
adae20cb 2400 rcu_read_unlock();
970c4e46
JH
2401
2402 return NULL;
2403}
2404
2405struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2406 u8 addr_type)
2407{
2408 struct smp_irk *irk;
2409
6cfc9988
JH
2410 /* Identity Address must be public or static random */
2411 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2412 return NULL;
2413
adae20cb
JH
2414 rcu_read_lock();
2415 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2416 if (addr_type == irk->addr_type &&
adae20cb
JH
2417 bacmp(bdaddr, &irk->bdaddr) == 0) {
2418 rcu_read_unlock();
970c4e46 2419 return irk;
adae20cb 2420 }
970c4e46 2421 }
adae20cb 2422 rcu_read_unlock();
970c4e46
JH
2423
2424 return NULL;
2425}
2426
567fa2aa 2427struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2428 bdaddr_t *bdaddr, u8 *val, u8 type,
2429 u8 pin_len, bool *persistent)
55ed8ca1
JH
2430{
2431 struct link_key *key, *old_key;
745c0ce3 2432 u8 old_key_type;
55ed8ca1
JH
2433
2434 old_key = hci_find_link_key(hdev, bdaddr);
2435 if (old_key) {
2436 old_key_type = old_key->type;
2437 key = old_key;
2438 } else {
12adcf3a 2439 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2440 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2441 if (!key)
567fa2aa 2442 return NULL;
0378b597 2443 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2444 }
2445
6ed93dc6 2446 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2447
d25e28ab
JH
2448 /* Some buggy controller combinations generate a changed
2449 * combination key for legacy pairing even when there's no
2450 * previous key */
2451 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2452 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2453 type = HCI_LK_COMBINATION;
655fe6ec
JH
2454 if (conn)
2455 conn->key_type = type;
2456 }
d25e28ab 2457
55ed8ca1 2458 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2459 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2460 key->pin_len = pin_len;
2461
b6020ba0 2462 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2463 key->type = old_key_type;
4748fed2
JH
2464 else
2465 key->type = type;
2466
7652ff6a
JH
2467 if (persistent)
2468 *persistent = hci_persistent_key(hdev, conn, type,
2469 old_key_type);
4df378a1 2470
567fa2aa 2471 return key;
55ed8ca1
JH
2472}
2473
ca9142b8 2474struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2475 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2476 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2477{
c9839a11 2478 struct smp_ltk *key, *old_key;
e804d25d 2479 u8 role = ltk_role(type);
75d262c2 2480
f3a73d97 2481 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2482 if (old_key)
75d262c2 2483 key = old_key;
c9839a11 2484 else {
0a14ab41 2485 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2486 if (!key)
ca9142b8 2487 return NULL;
970d0f1b 2488 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2489 }
2490
75d262c2 2491 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2492 key->bdaddr_type = addr_type;
2493 memcpy(key->val, tk, sizeof(key->val));
2494 key->authenticated = authenticated;
2495 key->ediv = ediv;
fe39c7b2 2496 key->rand = rand;
c9839a11
VCG
2497 key->enc_size = enc_size;
2498 key->type = type;
75d262c2 2499
ca9142b8 2500 return key;
75d262c2
VCG
2501}
2502
ca9142b8
JH
2503struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2504 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2505{
2506 struct smp_irk *irk;
2507
2508 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2509 if (!irk) {
2510 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2511 if (!irk)
ca9142b8 2512 return NULL;
970c4e46
JH
2513
2514 bacpy(&irk->bdaddr, bdaddr);
2515 irk->addr_type = addr_type;
2516
adae20cb 2517 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2518 }
2519
2520 memcpy(irk->val, val, 16);
2521 bacpy(&irk->rpa, rpa);
2522
ca9142b8 2523 return irk;
970c4e46
JH
2524}
2525
55ed8ca1
JH
2526int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2527{
2528 struct link_key *key;
2529
2530 key = hci_find_link_key(hdev, bdaddr);
2531 if (!key)
2532 return -ENOENT;
2533
6ed93dc6 2534 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2535
0378b597
JH
2536 list_del_rcu(&key->list);
2537 kfree_rcu(key, rcu);
55ed8ca1
JH
2538
2539 return 0;
2540}
2541
e0b2b27e 2542int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2543{
970d0f1b 2544 struct smp_ltk *k;
c51ffa0b 2545 int removed = 0;
b899efaf 2546
970d0f1b 2547 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2548 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2549 continue;
2550
6ed93dc6 2551 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2552
970d0f1b
JH
2553 list_del_rcu(&k->list);
2554 kfree_rcu(k, rcu);
c51ffa0b 2555 removed++;
b899efaf
VCG
2556 }
2557
c51ffa0b 2558 return removed ? 0 : -ENOENT;
b899efaf
VCG
2559}
2560
a7ec7338
JH
2561void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2562{
adae20cb 2563 struct smp_irk *k;
a7ec7338 2564
adae20cb 2565 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2566 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2567 continue;
2568
2569 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2570
adae20cb
JH
2571 list_del_rcu(&k->list);
2572 kfree_rcu(k, rcu);
a7ec7338
JH
2573 }
2574}
2575
55e76b38
JH
2576bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2577{
2578 struct smp_ltk *k;
4ba9faf3 2579 struct smp_irk *irk;
55e76b38
JH
2580 u8 addr_type;
2581
2582 if (type == BDADDR_BREDR) {
2583 if (hci_find_link_key(hdev, bdaddr))
2584 return true;
2585 return false;
2586 }
2587
2588 /* Convert to HCI addr type which struct smp_ltk uses */
2589 if (type == BDADDR_LE_PUBLIC)
2590 addr_type = ADDR_LE_DEV_PUBLIC;
2591 else
2592 addr_type = ADDR_LE_DEV_RANDOM;
2593
4ba9faf3
JH
2594 irk = hci_get_irk(hdev, bdaddr, addr_type);
2595 if (irk) {
2596 bdaddr = &irk->bdaddr;
2597 addr_type = irk->addr_type;
2598 }
2599
55e76b38
JH
2600 rcu_read_lock();
2601 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2602 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2603 rcu_read_unlock();
55e76b38 2604 return true;
87c8b28d 2605 }
55e76b38
JH
2606 }
2607 rcu_read_unlock();
2608
2609 return false;
2610}
2611
6bd32326 2612/* HCI command timer function */
65cc2b49 2613static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2614{
65cc2b49
MH
2615 struct hci_dev *hdev = container_of(work, struct hci_dev,
2616 cmd_timer.work);
6bd32326 2617
bda4f23a
AE
2618 if (hdev->sent_cmd) {
2619 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2620 u16 opcode = __le16_to_cpu(sent->opcode);
2621
2622 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2623 } else {
2624 BT_ERR("%s command tx timeout", hdev->name);
2625 }
2626
6bd32326 2627 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2628 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2629}
2630
2763eda6 2631struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2632 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2633{
2634 struct oob_data *data;
2635
6928a924
JH
2636 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2637 if (bacmp(bdaddr, &data->bdaddr) != 0)
2638 continue;
2639 if (data->bdaddr_type != bdaddr_type)
2640 continue;
2641 return data;
2642 }
2763eda6
SJ
2643
2644 return NULL;
2645}
2646
6928a924
JH
2647int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648 u8 bdaddr_type)
2763eda6
SJ
2649{
2650 struct oob_data *data;
2651
6928a924 2652 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2653 if (!data)
2654 return -ENOENT;
2655
6928a924 2656 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2657
2658 list_del(&data->list);
2659 kfree(data);
2660
2661 return 0;
2662}
2663
35f7498a 2664void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2665{
2666 struct oob_data *data, *n;
2667
2668 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2669 list_del(&data->list);
2670 kfree(data);
2671 }
2763eda6
SJ
2672}
2673
0798872e 2674int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2675 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2676 u8 *hash256, u8 *rand256)
2763eda6
SJ
2677{
2678 struct oob_data *data;
2679
6928a924 2680 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2681 if (!data) {
0a14ab41 2682 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2683 if (!data)
2684 return -ENOMEM;
2685
2686 bacpy(&data->bdaddr, bdaddr);
6928a924 2687 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2688 list_add(&data->list, &hdev->remote_oob_data);
2689 }
2690
81328d5c
JH
2691 if (hash192 && rand192) {
2692 memcpy(data->hash192, hash192, sizeof(data->hash192));
2693 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2694 if (hash256 && rand256)
2695 data->present = 0x03;
81328d5c
JH
2696 } else {
2697 memset(data->hash192, 0, sizeof(data->hash192));
2698 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2699 if (hash256 && rand256)
2700 data->present = 0x02;
2701 else
2702 data->present = 0x00;
0798872e
MH
2703 }
2704
81328d5c
JH
2705 if (hash256 && rand256) {
2706 memcpy(data->hash256, hash256, sizeof(data->hash256));
2707 memcpy(data->rand256, rand256, sizeof(data->rand256));
2708 } else {
2709 memset(data->hash256, 0, sizeof(data->hash256));
2710 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2711 if (hash192 && rand192)
2712 data->present = 0x01;
81328d5c 2713 }
0798872e 2714
6ed93dc6 2715 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2716
2717 return 0;
2718}
2719
d2609b34
FG
2720/* This function requires the caller holds hdev->lock */
2721struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2722{
2723 struct adv_info *adv_instance;
2724
2725 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2726 if (adv_instance->instance == instance)
2727 return adv_instance;
2728 }
2729
2730 return NULL;
2731}
2732
2733/* This function requires the caller holds hdev->lock */
2734struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2735 struct adv_info *cur_instance;
2736
2737 cur_instance = hci_find_adv_instance(hdev, instance);
2738 if (!cur_instance)
2739 return NULL;
2740
2741 if (cur_instance == list_last_entry(&hdev->adv_instances,
2742 struct adv_info, list))
2743 return list_first_entry(&hdev->adv_instances,
2744 struct adv_info, list);
2745 else
2746 return list_next_entry(cur_instance, list);
2747}
2748
2749/* This function requires the caller holds hdev->lock */
2750int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2751{
2752 struct adv_info *adv_instance;
2753
2754 adv_instance = hci_find_adv_instance(hdev, instance);
2755 if (!adv_instance)
2756 return -ENOENT;
2757
2758 BT_DBG("%s removing %dMR", hdev->name, instance);
2759
5d900e46
FG
2760 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2761 cancel_delayed_work(&hdev->adv_instance_expire);
2762 hdev->adv_instance_timeout = 0;
2763 }
2764
d2609b34
FG
2765 list_del(&adv_instance->list);
2766 kfree(adv_instance);
2767
2768 hdev->adv_instance_cnt--;
2769
2770 return 0;
2771}
2772
2773/* This function requires the caller holds hdev->lock */
2774void hci_adv_instances_clear(struct hci_dev *hdev)
2775{
2776 struct adv_info *adv_instance, *n;
2777
5d900e46
FG
2778 if (hdev->adv_instance_timeout) {
2779 cancel_delayed_work(&hdev->adv_instance_expire);
2780 hdev->adv_instance_timeout = 0;
2781 }
2782
d2609b34
FG
2783 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2784 list_del(&adv_instance->list);
2785 kfree(adv_instance);
2786 }
2787
2788 hdev->adv_instance_cnt = 0;
2789}
2790
2791/* This function requires the caller holds hdev->lock */
2792int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2793 u16 adv_data_len, u8 *adv_data,
2794 u16 scan_rsp_len, u8 *scan_rsp_data,
2795 u16 timeout, u16 duration)
2796{
2797 struct adv_info *adv_instance;
2798
2799 adv_instance = hci_find_adv_instance(hdev, instance);
2800 if (adv_instance) {
2801 memset(adv_instance->adv_data, 0,
2802 sizeof(adv_instance->adv_data));
2803 memset(adv_instance->scan_rsp_data, 0,
2804 sizeof(adv_instance->scan_rsp_data));
2805 } else {
2806 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2807 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2808 return -EOVERFLOW;
2809
39ecfad6 2810 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2811 if (!adv_instance)
2812 return -ENOMEM;
2813
fffd38bc 2814 adv_instance->pending = true;
d2609b34
FG
2815 adv_instance->instance = instance;
2816 list_add(&adv_instance->list, &hdev->adv_instances);
2817 hdev->adv_instance_cnt++;
2818 }
2819
2820 adv_instance->flags = flags;
2821 adv_instance->adv_data_len = adv_data_len;
2822 adv_instance->scan_rsp_len = scan_rsp_len;
2823
2824 if (adv_data_len)
2825 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2826
2827 if (scan_rsp_len)
2828 memcpy(adv_instance->scan_rsp_data,
2829 scan_rsp_data, scan_rsp_len);
2830
2831 adv_instance->timeout = timeout;
5d900e46 2832 adv_instance->remaining_time = timeout;
d2609b34
FG
2833
2834 if (duration == 0)
2835 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2836 else
2837 adv_instance->duration = duration;
2838
2839 BT_DBG("%s for %dMR", hdev->name, instance);
2840
2841 return 0;
2842}
2843
dcc36c16 2844struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2845 bdaddr_t *bdaddr, u8 type)
b2a66aad 2846{
8035ded4 2847 struct bdaddr_list *b;
b2a66aad 2848
dcc36c16 2849 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2850 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2851 return b;
b9ee0a78 2852 }
b2a66aad
AJ
2853
2854 return NULL;
2855}
2856
dcc36c16 2857void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2858{
2859 struct list_head *p, *n;
2860
dcc36c16 2861 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2862 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2863
2864 list_del(p);
2865 kfree(b);
2866 }
b2a66aad
AJ
2867}
2868
dcc36c16 2869int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2870{
2871 struct bdaddr_list *entry;
b2a66aad 2872
b9ee0a78 2873 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2874 return -EBADF;
2875
dcc36c16 2876 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2877 return -EEXIST;
b2a66aad 2878
27f70f3e 2879 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2880 if (!entry)
2881 return -ENOMEM;
b2a66aad
AJ
2882
2883 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2884 entry->bdaddr_type = type;
b2a66aad 2885
dcc36c16 2886 list_add(&entry->list, list);
b2a66aad 2887
2a8357f2 2888 return 0;
b2a66aad
AJ
2889}
2890
dcc36c16 2891int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2892{
2893 struct bdaddr_list *entry;
b2a66aad 2894
35f7498a 2895 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2896 hci_bdaddr_list_clear(list);
35f7498a
JH
2897 return 0;
2898 }
b2a66aad 2899
dcc36c16 2900 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2901 if (!entry)
2902 return -ENOENT;
2903
2904 list_del(&entry->list);
2905 kfree(entry);
2906
2907 return 0;
2908}
2909
15819a70
AG
2910/* This function requires the caller holds hdev->lock */
2911struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2912 bdaddr_t *addr, u8 addr_type)
2913{
2914 struct hci_conn_params *params;
2915
2916 list_for_each_entry(params, &hdev->le_conn_params, list) {
2917 if (bacmp(&params->addr, addr) == 0 &&
2918 params->addr_type == addr_type) {
2919 return params;
2920 }
2921 }
2922
2923 return NULL;
2924}
2925
4b10966f 2926/* This function requires the caller holds hdev->lock */
501f8827
JH
2927struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2928 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2929{
912b42ef 2930 struct hci_conn_params *param;
a9b0a04c 2931
501f8827 2932 list_for_each_entry(param, list, action) {
912b42ef
JH
2933 if (bacmp(&param->addr, addr) == 0 &&
2934 param->addr_type == addr_type)
2935 return param;
4b10966f
MH
2936 }
2937
2938 return NULL;
a9b0a04c
AG
2939}
2940
15819a70 2941/* This function requires the caller holds hdev->lock */
51d167c0
MH
2942struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2943 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2944{
2945 struct hci_conn_params *params;
2946
2947 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2948 if (params)
51d167c0 2949 return params;
15819a70
AG
2950
2951 params = kzalloc(sizeof(*params), GFP_KERNEL);
2952 if (!params) {
2953 BT_ERR("Out of memory");
51d167c0 2954 return NULL;
15819a70
AG
2955 }
2956
2957 bacpy(&params->addr, addr);
2958 params->addr_type = addr_type;
cef952ce
AG
2959
2960 list_add(&params->list, &hdev->le_conn_params);
93450c75 2961 INIT_LIST_HEAD(&params->action);
cef952ce 2962
bf5b3c8b
MH
2963 params->conn_min_interval = hdev->le_conn_min_interval;
2964 params->conn_max_interval = hdev->le_conn_max_interval;
2965 params->conn_latency = hdev->le_conn_latency;
2966 params->supervision_timeout = hdev->le_supv_timeout;
2967 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2968
2969 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2970
51d167c0 2971 return params;
bf5b3c8b
MH
2972}
2973
f6c63249 2974static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2975{
f8aaf9b6 2976 if (params->conn) {
f161dd41 2977 hci_conn_drop(params->conn);
f8aaf9b6
JH
2978 hci_conn_put(params->conn);
2979 }
f161dd41 2980
95305baa 2981 list_del(&params->action);
15819a70
AG
2982 list_del(&params->list);
2983 kfree(params);
f6c63249
JH
2984}
2985
2986/* This function requires the caller holds hdev->lock */
2987void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2988{
2989 struct hci_conn_params *params;
2990
2991 params = hci_conn_params_lookup(hdev, addr, addr_type);
2992 if (!params)
2993 return;
2994
2995 hci_conn_params_free(params);
15819a70 2996
95305baa
JH
2997 hci_update_background_scan(hdev);
2998
15819a70
AG
2999 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3000}
3001
3002/* This function requires the caller holds hdev->lock */
55af49a8 3003void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3004{
3005 struct hci_conn_params *params, *tmp;
3006
3007 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3008 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3009 continue;
f75113a2
JP
3010
3011 /* If trying to estabilish one time connection to disabled
3012 * device, leave the params, but mark them as just once.
3013 */
3014 if (params->explicit_connect) {
3015 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3016 continue;
3017 }
3018
15819a70
AG
3019 list_del(&params->list);
3020 kfree(params);
3021 }
3022
55af49a8 3023 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3024}
3025
3026/* This function requires the caller holds hdev->lock */
373110c5 3027void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3028{
15819a70 3029 struct hci_conn_params *params, *tmp;
77a77a30 3030
f6c63249
JH
3031 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3032 hci_conn_params_free(params);
77a77a30 3033
a4790dbd 3034 hci_update_background_scan(hdev);
77a77a30 3035
15819a70 3036 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3037}
3038
1904a853 3039static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 3040{
4c87eaab
AG
3041 if (status) {
3042 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3043
4c87eaab
AG
3044 hci_dev_lock(hdev);
3045 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3046 hci_dev_unlock(hdev);
3047 return;
3048 }
7ba8b4be
AG
3049}
3050
1904a853
MH
3051static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3052 u16 opcode)
7ba8b4be 3053{
4c87eaab
AG
3054 /* General inquiry access code (GIAC) */
3055 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 3056 struct hci_cp_inquiry cp;
7ba8b4be
AG
3057 int err;
3058
4c87eaab
AG
3059 if (status) {
3060 BT_ERR("Failed to disable LE scanning: status %d", status);
3061 return;
3062 }
7ba8b4be 3063
2d28cfe7
JP
3064 hdev->discovery.scan_start = 0;
3065
4c87eaab
AG
3066 switch (hdev->discovery.type) {
3067 case DISCOV_TYPE_LE:
3068 hci_dev_lock(hdev);
3069 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3070 hci_dev_unlock(hdev);
3071 break;
7ba8b4be 3072
4c87eaab 3073 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3074 hci_dev_lock(hdev);
7dbfac1d 3075
07d2334a
JP
3076 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3077 &hdev->quirks)) {
3078 /* If we were running LE only scan, change discovery
3079 * state. If we were running both LE and BR/EDR inquiry
3080 * simultaneously, and BR/EDR inquiry is already
3081 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3082 * will stop discovery when finished. If we will resolve
3083 * remote device name, do not change discovery state.
07d2334a 3084 */
177d0506
WK
3085 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3086 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3087 hci_discovery_set_state(hdev,
3088 DISCOVERY_STOPPED);
3089 } else {
baf880a9
JH
3090 struct hci_request req;
3091
07d2334a
JP
3092 hci_inquiry_cache_flush(hdev);
3093
baf880a9
JH
3094 hci_req_init(&req, hdev);
3095
3096 memset(&cp, 0, sizeof(cp));
3097 memcpy(&cp.lap, lap, sizeof(cp.lap));
3098 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3099 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3100
07d2334a
JP
3101 err = hci_req_run(&req, inquiry_complete);
3102 if (err) {
3103 BT_ERR("Inquiry request failed: err %d", err);
3104 hci_discovery_set_state(hdev,
3105 DISCOVERY_STOPPED);
3106 }
4c87eaab 3107 }
7dbfac1d 3108
4c87eaab
AG
3109 hci_dev_unlock(hdev);
3110 break;
7dbfac1d 3111 }
7dbfac1d
AG
3112}
3113
7ba8b4be
AG
3114static void le_scan_disable_work(struct work_struct *work)
3115{
3116 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3117 le_scan_disable.work);
4c87eaab
AG
3118 struct hci_request req;
3119 int err;
7ba8b4be
AG
3120
3121 BT_DBG("%s", hdev->name);
3122
2d28cfe7
JP
3123 cancel_delayed_work_sync(&hdev->le_scan_restart);
3124
4c87eaab 3125 hci_req_init(&req, hdev);
28b75a89 3126
b1efcc28 3127 hci_req_add_le_scan_disable(&req);
28b75a89 3128
4c87eaab
AG
3129 err = hci_req_run(&req, le_scan_disable_work_complete);
3130 if (err)
3131 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3132}
3133
2d28cfe7
JP
3134static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3135 u16 opcode)
3136{
3137 unsigned long timeout, duration, scan_start, now;
3138
3139 BT_DBG("%s", hdev->name);
3140
3141 if (status) {
3142 BT_ERR("Failed to restart LE scan: status %d", status);
3143 return;
3144 }
3145
3146 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3147 !hdev->discovery.scan_start)
3148 return;
3149
3150 /* When the scan was started, hdev->le_scan_disable has been queued
3151 * after duration from scan_start. During scan restart this job
3152 * has been canceled, and we need to queue it again after proper
3153 * timeout, to make sure that scan does not run indefinitely.
3154 */
3155 duration = hdev->discovery.scan_duration;
3156 scan_start = hdev->discovery.scan_start;
3157 now = jiffies;
3158 if (now - scan_start <= duration) {
3159 int elapsed;
3160
3161 if (now >= scan_start)
3162 elapsed = now - scan_start;
3163 else
3164 elapsed = ULONG_MAX - scan_start + now;
3165
3166 timeout = duration - elapsed;
3167 } else {
3168 timeout = 0;
3169 }
3170 queue_delayed_work(hdev->workqueue,
3171 &hdev->le_scan_disable, timeout);
3172}
3173
3174static void le_scan_restart_work(struct work_struct *work)
3175{
3176 struct hci_dev *hdev = container_of(work, struct hci_dev,
3177 le_scan_restart.work);
3178 struct hci_request req;
3179 struct hci_cp_le_set_scan_enable cp;
3180 int err;
3181
3182 BT_DBG("%s", hdev->name);
3183
3184 /* If controller is not scanning we are done. */
d7a5a11d 3185 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3186 return;
3187
3188 hci_req_init(&req, hdev);
3189
3190 hci_req_add_le_scan_disable(&req);
3191
3192 memset(&cp, 0, sizeof(cp));
3193 cp.enable = LE_SCAN_ENABLE;
3194 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3195 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3196
3197 err = hci_req_run(&req, le_scan_restart_work_complete);
3198 if (err)
3199 BT_ERR("Restart LE scan request failed: err %d", err);
3200}
3201
a1f4c318
JH
3202/* Copy the Identity Address of the controller.
3203 *
3204 * If the controller has a public BD_ADDR, then by default use that one.
3205 * If this is a LE only controller without a public address, default to
3206 * the static random address.
3207 *
3208 * For debugging purposes it is possible to force controllers with a
3209 * public address to use the static random address instead.
50b5b952
MH
3210 *
3211 * In case BR/EDR has been disabled on a dual-mode controller and
3212 * userspace has configured a static address, then that address
3213 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3214 */
3215void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3216 u8 *bdaddr_type)
3217{
b7cb93e5 3218 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3219 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3220 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3221 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3222 bacpy(bdaddr, &hdev->static_addr);
3223 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3224 } else {
3225 bacpy(bdaddr, &hdev->bdaddr);
3226 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3227 }
3228}
3229
9be0dab7
DH
3230/* Alloc HCI device */
3231struct hci_dev *hci_alloc_dev(void)
3232{
3233 struct hci_dev *hdev;
3234
27f70f3e 3235 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3236 if (!hdev)
3237 return NULL;
3238
b1b813d4
DH
3239 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3240 hdev->esco_type = (ESCO_HV1);
3241 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3242 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3243 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3244 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3245 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3246 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3247 hdev->adv_instance_cnt = 0;
3248 hdev->cur_adv_instance = 0x00;
5d900e46 3249 hdev->adv_instance_timeout = 0;
b1b813d4 3250
b1b813d4
DH
3251 hdev->sniff_max_interval = 800;
3252 hdev->sniff_min_interval = 80;
3253
3f959d46 3254 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3255 hdev->le_adv_min_interval = 0x0800;
3256 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3257 hdev->le_scan_interval = 0x0060;
3258 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3259 hdev->le_conn_min_interval = 0x0028;
3260 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3261 hdev->le_conn_latency = 0x0000;
3262 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3263 hdev->le_def_tx_len = 0x001b;
3264 hdev->le_def_tx_time = 0x0148;
3265 hdev->le_max_tx_len = 0x001b;
3266 hdev->le_max_tx_time = 0x0148;
3267 hdev->le_max_rx_len = 0x001b;
3268 hdev->le_max_rx_time = 0x0148;
bef64738 3269
d6bfd59c 3270 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3271 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3272 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3273 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3274
b1b813d4
DH
3275 mutex_init(&hdev->lock);
3276 mutex_init(&hdev->req_lock);
3277
3278 INIT_LIST_HEAD(&hdev->mgmt_pending);
3279 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3280 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3281 INIT_LIST_HEAD(&hdev->uuids);
3282 INIT_LIST_HEAD(&hdev->link_keys);
3283 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3284 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3285 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3286 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3287 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3288 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3289 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3290 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3291 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3292
3293 INIT_WORK(&hdev->rx_work, hci_rx_work);
3294 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3295 INIT_WORK(&hdev->tx_work, hci_tx_work);
3296 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3297 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3298
b1b813d4
DH
3299 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3300 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3301 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3302 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3303 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3304
b1b813d4
DH
3305 skb_queue_head_init(&hdev->rx_q);
3306 skb_queue_head_init(&hdev->cmd_q);
3307 skb_queue_head_init(&hdev->raw_q);
3308
3309 init_waitqueue_head(&hdev->req_wait_q);
3310
65cc2b49 3311 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3312
b1b813d4
DH
3313 hci_init_sysfs(hdev);
3314 discovery_init(hdev);
9be0dab7
DH
3315
3316 return hdev;
3317}
3318EXPORT_SYMBOL(hci_alloc_dev);
3319
3320/* Free HCI device */
3321void hci_free_dev(struct hci_dev *hdev)
3322{
9be0dab7
DH
3323 /* will free via device release */
3324 put_device(&hdev->dev);
3325}
3326EXPORT_SYMBOL(hci_free_dev);
3327
1da177e4
LT
3328/* Register HCI device */
3329int hci_register_dev(struct hci_dev *hdev)
3330{
b1b813d4 3331 int id, error;
1da177e4 3332
74292d5a 3333 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3334 return -EINVAL;
3335
08add513
MM
3336 /* Do not allow HCI_AMP devices to register at index 0,
3337 * so the index can be used as the AMP controller ID.
3338 */
3df92b31
SL
3339 switch (hdev->dev_type) {
3340 case HCI_BREDR:
3341 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3342 break;
3343 case HCI_AMP:
3344 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3345 break;
3346 default:
3347 return -EINVAL;
1da177e4 3348 }
8e87d142 3349
3df92b31
SL
3350 if (id < 0)
3351 return id;
3352
1da177e4
LT
3353 sprintf(hdev->name, "hci%d", id);
3354 hdev->id = id;
2d8b3a11
AE
3355
3356 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3357
d8537548
KC
3358 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3359 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3360 if (!hdev->workqueue) {
3361 error = -ENOMEM;
3362 goto err;
3363 }
f48fd9c8 3364
d8537548
KC
3365 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3366 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3367 if (!hdev->req_workqueue) {
3368 destroy_workqueue(hdev->workqueue);
3369 error = -ENOMEM;
3370 goto err;
3371 }
3372
0153e2ec
MH
3373 if (!IS_ERR_OR_NULL(bt_debugfs))
3374 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3375
bdc3e0f1
MH
3376 dev_set_name(&hdev->dev, "%s", hdev->name);
3377
3378 error = device_add(&hdev->dev);
33ca954d 3379 if (error < 0)
54506918 3380 goto err_wqueue;
1da177e4 3381
611b30f7 3382 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3383 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3384 hdev);
611b30f7
MH
3385 if (hdev->rfkill) {
3386 if (rfkill_register(hdev->rfkill) < 0) {
3387 rfkill_destroy(hdev->rfkill);
3388 hdev->rfkill = NULL;
3389 }
3390 }
3391
5e130367 3392 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3393 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3394
a1536da2
MH
3395 hci_dev_set_flag(hdev, HCI_SETUP);
3396 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3397
01cd3404 3398 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3399 /* Assume BR/EDR support until proven otherwise (such as
3400 * through reading supported features during init.
3401 */
a1536da2 3402 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3403 }
ce2be9ac 3404
fcee3377
GP
3405 write_lock(&hci_dev_list_lock);
3406 list_add(&hdev->list, &hci_dev_list);
3407 write_unlock(&hci_dev_list_lock);
3408
4a964404
MH
3409 /* Devices that are marked for raw-only usage are unconfigured
3410 * and should not be included in normal operation.
fee746b0
MH
3411 */
3412 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3413 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3414
05fcd4c4 3415 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3416 hci_dev_hold(hdev);
1da177e4 3417
19202573 3418 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3419
1da177e4 3420 return id;
f48fd9c8 3421
33ca954d
DH
3422err_wqueue:
3423 destroy_workqueue(hdev->workqueue);
6ead1bbc 3424 destroy_workqueue(hdev->req_workqueue);
33ca954d 3425err:
3df92b31 3426 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3427
33ca954d 3428 return error;
1da177e4
LT
3429}
3430EXPORT_SYMBOL(hci_register_dev);
3431
3432/* Unregister HCI device */
59735631 3433void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3434{
2d7cc19e 3435 int id;
ef222013 3436
c13854ce 3437 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3438
a1536da2 3439 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3440
3df92b31
SL
3441 id = hdev->id;
3442
f20d09d5 3443 write_lock(&hci_dev_list_lock);
1da177e4 3444 list_del(&hdev->list);
f20d09d5 3445 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3446
3447 hci_dev_do_close(hdev);
3448
b9b5ef18
GP
3449 cancel_work_sync(&hdev->power_on);
3450
ab81cbf9 3451 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3452 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3453 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3454 hci_dev_lock(hdev);
744cf19e 3455 mgmt_index_removed(hdev);
09fd0de5 3456 hci_dev_unlock(hdev);
56e5cb86 3457 }
ab81cbf9 3458
2e58ef3e
JH
3459 /* mgmt_index_removed should take care of emptying the
3460 * pending list */
3461 BUG_ON(!list_empty(&hdev->mgmt_pending));
3462
05fcd4c4 3463 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3464
611b30f7
MH
3465 if (hdev->rfkill) {
3466 rfkill_unregister(hdev->rfkill);
3467 rfkill_destroy(hdev->rfkill);
3468 }
3469
bdc3e0f1 3470 device_del(&hdev->dev);
147e2d59 3471
0153e2ec
MH
3472 debugfs_remove_recursive(hdev->debugfs);
3473
f48fd9c8 3474 destroy_workqueue(hdev->workqueue);
6ead1bbc 3475 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3476
09fd0de5 3477 hci_dev_lock(hdev);
dcc36c16 3478 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3479 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3480 hci_uuids_clear(hdev);
55ed8ca1 3481 hci_link_keys_clear(hdev);
b899efaf 3482 hci_smp_ltks_clear(hdev);
970c4e46 3483 hci_smp_irks_clear(hdev);
2763eda6 3484 hci_remote_oob_data_clear(hdev);
d2609b34 3485 hci_adv_instances_clear(hdev);
dcc36c16 3486 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3487 hci_conn_params_clear_all(hdev);
22078800 3488 hci_discovery_filter_clear(hdev);
09fd0de5 3489 hci_dev_unlock(hdev);
e2e0cacb 3490
dc946bd8 3491 hci_dev_put(hdev);
3df92b31
SL
3492
3493 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3494}
3495EXPORT_SYMBOL(hci_unregister_dev);
3496
3497/* Suspend HCI device */
3498int hci_suspend_dev(struct hci_dev *hdev)
3499{
05fcd4c4 3500 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3501 return 0;
3502}
3503EXPORT_SYMBOL(hci_suspend_dev);
3504
3505/* Resume HCI device */
3506int hci_resume_dev(struct hci_dev *hdev)
3507{
05fcd4c4 3508 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3509 return 0;
3510}
3511EXPORT_SYMBOL(hci_resume_dev);
3512
75e0569f
MH
3513/* Reset HCI device */
3514int hci_reset_dev(struct hci_dev *hdev)
3515{
3516 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3517 struct sk_buff *skb;
3518
3519 skb = bt_skb_alloc(3, GFP_ATOMIC);
3520 if (!skb)
3521 return -ENOMEM;
3522
3523 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3524 memcpy(skb_put(skb, 3), hw_err, 3);
3525
3526 /* Send Hardware Error to upper stack */
3527 return hci_recv_frame(hdev, skb);
3528}
3529EXPORT_SYMBOL(hci_reset_dev);
3530
76bca880 3531/* Receive frame from HCI drivers */
e1a26170 3532int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3533{
76bca880 3534 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3535 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3536 kfree_skb(skb);
3537 return -ENXIO;
3538 }
3539
fe806dce
MH
3540 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3541 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3542 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3543 kfree_skb(skb);
3544 return -EINVAL;
3545 }
3546
d82603c6 3547 /* Incoming skb */
76bca880
MH
3548 bt_cb(skb)->incoming = 1;
3549
3550 /* Time stamp */
3551 __net_timestamp(skb);
3552
76bca880 3553 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3554 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3555
76bca880
MH
3556 return 0;
3557}
3558EXPORT_SYMBOL(hci_recv_frame);
3559
e875ff84
MH
3560/* Receive diagnostic message from HCI drivers */
3561int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3562{
581d6fd6
MH
3563 /* Mark as diagnostic packet */
3564 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3565
e875ff84
MH
3566 /* Time stamp */
3567 __net_timestamp(skb);
3568
581d6fd6
MH
3569 skb_queue_tail(&hdev->rx_q, skb);
3570 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3571
e875ff84
MH
3572 return 0;
3573}
3574EXPORT_SYMBOL(hci_recv_diag);
3575
1da177e4
LT
3576/* ---- Interface to upper protocols ---- */
3577
1da177e4
LT
3578int hci_register_cb(struct hci_cb *cb)
3579{
3580 BT_DBG("%p name %s", cb, cb->name);
3581
fba7ecf0 3582 mutex_lock(&hci_cb_list_lock);
00629e0f 3583 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3584 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3585
3586 return 0;
3587}
3588EXPORT_SYMBOL(hci_register_cb);
3589
3590int hci_unregister_cb(struct hci_cb *cb)
3591{
3592 BT_DBG("%p name %s", cb, cb->name);
3593
fba7ecf0 3594 mutex_lock(&hci_cb_list_lock);
1da177e4 3595 list_del(&cb->list);
fba7ecf0 3596 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3597
3598 return 0;
3599}
3600EXPORT_SYMBOL(hci_unregister_cb);
3601
51086991 3602static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3603{
cdc52faa
MH
3604 int err;
3605
0d48d939 3606 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3607
cd82e61c
MH
3608 /* Time stamp */
3609 __net_timestamp(skb);
1da177e4 3610
cd82e61c
MH
3611 /* Send copy to monitor */
3612 hci_send_to_monitor(hdev, skb);
3613
3614 if (atomic_read(&hdev->promisc)) {
3615 /* Send copy to the sockets */
470fe1b5 3616 hci_send_to_sock(hdev, skb);
1da177e4
LT
3617 }
3618
3619 /* Get rid of skb owner, prior to sending to the driver. */
3620 skb_orphan(skb);
3621
73d0d3c8
MH
3622 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3623 kfree_skb(skb);
3624 return;
3625 }
3626
cdc52faa
MH
3627 err = hdev->send(hdev, skb);
3628 if (err < 0) {
3629 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3630 kfree_skb(skb);
3631 }
1da177e4
LT
3632}
3633
1ca3a9d0 3634/* Send HCI command */
07dc93dd
JH
3635int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3636 const void *param)
1ca3a9d0
JH
3637{
3638 struct sk_buff *skb;
3639
3640 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3641
3642 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3643 if (!skb) {
3644 BT_ERR("%s no memory for command", hdev->name);
3645 return -ENOMEM;
3646 }
3647
49c922bb 3648 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3649 * single-command requests.
3650 */
242c0ebd 3651 bt_cb(skb)->hci.req_start = true;
11714b3d 3652
1da177e4 3653 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3654 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3655
3656 return 0;
3657}
1da177e4
LT
3658
3659/* Get data from the previously sent command */
a9de9248 3660void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3661{
3662 struct hci_command_hdr *hdr;
3663
3664 if (!hdev->sent_cmd)
3665 return NULL;
3666
3667 hdr = (void *) hdev->sent_cmd->data;
3668
a9de9248 3669 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3670 return NULL;
3671
f0e09510 3672 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3673
3674 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3675}
3676
fbef168f
LP
3677/* Send HCI command and wait for command commplete event */
3678struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3679 const void *param, u32 timeout)
3680{
3681 struct sk_buff *skb;
3682
3683 if (!test_bit(HCI_UP, &hdev->flags))
3684 return ERR_PTR(-ENETDOWN);
3685
3686 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3687
3688 hci_req_lock(hdev);
3689 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3690 hci_req_unlock(hdev);
3691
3692 return skb;
3693}
3694EXPORT_SYMBOL(hci_cmd_sync);
3695
1da177e4
LT
3696/* Send ACL data */
3697static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3698{
3699 struct hci_acl_hdr *hdr;
3700 int len = skb->len;
3701
badff6d0
ACM
3702 skb_push(skb, HCI_ACL_HDR_SIZE);
3703 skb_reset_transport_header(skb);
9c70220b 3704 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3705 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3706 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3707}
3708
ee22be7e 3709static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3710 struct sk_buff *skb, __u16 flags)
1da177e4 3711{
ee22be7e 3712 struct hci_conn *conn = chan->conn;
1da177e4
LT
3713 struct hci_dev *hdev = conn->hdev;
3714 struct sk_buff *list;
3715
087bfd99
GP
3716 skb->len = skb_headlen(skb);
3717 skb->data_len = 0;
3718
3719 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3720
3721 switch (hdev->dev_type) {
3722 case HCI_BREDR:
3723 hci_add_acl_hdr(skb, conn->handle, flags);
3724 break;
3725 case HCI_AMP:
3726 hci_add_acl_hdr(skb, chan->handle, flags);
3727 break;
3728 default:
3729 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3730 return;
3731 }
087bfd99 3732
70f23020
AE
3733 list = skb_shinfo(skb)->frag_list;
3734 if (!list) {
1da177e4
LT
3735 /* Non fragmented */
3736 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3737
73d80deb 3738 skb_queue_tail(queue, skb);
1da177e4
LT
3739 } else {
3740 /* Fragmented */
3741 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3742
3743 skb_shinfo(skb)->frag_list = NULL;
3744
9cfd5a23
JR
3745 /* Queue all fragments atomically. We need to use spin_lock_bh
3746 * here because of 6LoWPAN links, as there this function is
3747 * called from softirq and using normal spin lock could cause
3748 * deadlocks.
3749 */
3750 spin_lock_bh(&queue->lock);
1da177e4 3751
73d80deb 3752 __skb_queue_tail(queue, skb);
e702112f
AE
3753
3754 flags &= ~ACL_START;
3755 flags |= ACL_CONT;
1da177e4
LT
3756 do {
3757 skb = list; list = list->next;
8e87d142 3758
0d48d939 3759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3760 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3761
3762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3763
73d80deb 3764 __skb_queue_tail(queue, skb);
1da177e4
LT
3765 } while (list);
3766
9cfd5a23 3767 spin_unlock_bh(&queue->lock);
1da177e4 3768 }
73d80deb
LAD
3769}
3770
3771void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3772{
ee22be7e 3773 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3774
f0e09510 3775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3776
ee22be7e 3777 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3778
3eff45ea 3779 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3780}
1da177e4
LT
3781
3782/* Send SCO data */
0d861d8b 3783void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3784{
3785 struct hci_dev *hdev = conn->hdev;
3786 struct hci_sco_hdr hdr;
3787
3788 BT_DBG("%s len %d", hdev->name, skb->len);
3789
aca3192c 3790 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3791 hdr.dlen = skb->len;
3792
badff6d0
ACM
3793 skb_push(skb, HCI_SCO_HDR_SIZE);
3794 skb_reset_transport_header(skb);
9c70220b 3795 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3796
0d48d939 3797 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3798
1da177e4 3799 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3800 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3801}
1da177e4
LT
3802
3803/* ---- HCI TX task (outgoing data) ---- */
3804
3805/* HCI Connection scheduler */
6039aa73
GP
3806static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3807 int *quote)
1da177e4
LT
3808{
3809 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3810 struct hci_conn *conn = NULL, *c;
abc5de8f 3811 unsigned int num = 0, min = ~0;
1da177e4 3812
8e87d142 3813 /* We don't have to lock device here. Connections are always
1da177e4 3814 * added and removed with TX task disabled. */
bf4c6325
GP
3815
3816 rcu_read_lock();
3817
3818 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3819 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3820 continue;
769be974
MH
3821
3822 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3823 continue;
3824
1da177e4
LT
3825 num++;
3826
3827 if (c->sent < min) {
3828 min = c->sent;
3829 conn = c;
3830 }
52087a79
LAD
3831
3832 if (hci_conn_num(hdev, type) == num)
3833 break;
1da177e4
LT
3834 }
3835
bf4c6325
GP
3836 rcu_read_unlock();
3837
1da177e4 3838 if (conn) {
6ed58ec5
VT
3839 int cnt, q;
3840
3841 switch (conn->type) {
3842 case ACL_LINK:
3843 cnt = hdev->acl_cnt;
3844 break;
3845 case SCO_LINK:
3846 case ESCO_LINK:
3847 cnt = hdev->sco_cnt;
3848 break;
3849 case LE_LINK:
3850 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3851 break;
3852 default:
3853 cnt = 0;
3854 BT_ERR("Unknown link type");
3855 }
3856
3857 q = cnt / num;
1da177e4
LT
3858 *quote = q ? q : 1;
3859 } else
3860 *quote = 0;
3861
3862 BT_DBG("conn %p quote %d", conn, *quote);
3863 return conn;
3864}
3865
6039aa73 3866static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3867{
3868 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3869 struct hci_conn *c;
1da177e4 3870
bae1f5d9 3871 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3872
bf4c6325
GP
3873 rcu_read_lock();
3874
1da177e4 3875 /* Kill stalled connections */
bf4c6325 3876 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3877 if (c->type == type && c->sent) {
6ed93dc6
AE
3878 BT_ERR("%s killing stalled connection %pMR",
3879 hdev->name, &c->dst);
bed71748 3880 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3881 }
3882 }
bf4c6325
GP
3883
3884 rcu_read_unlock();
1da177e4
LT
3885}
3886
6039aa73
GP
3887static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3888 int *quote)
1da177e4 3889{
73d80deb
LAD
3890 struct hci_conn_hash *h = &hdev->conn_hash;
3891 struct hci_chan *chan = NULL;
abc5de8f 3892 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3893 struct hci_conn *conn;
73d80deb
LAD
3894 int cnt, q, conn_num = 0;
3895
3896 BT_DBG("%s", hdev->name);
3897
bf4c6325
GP
3898 rcu_read_lock();
3899
3900 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3901 struct hci_chan *tmp;
3902
3903 if (conn->type != type)
3904 continue;
3905
3906 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3907 continue;
3908
3909 conn_num++;
3910
8192edef 3911 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3912 struct sk_buff *skb;
3913
3914 if (skb_queue_empty(&tmp->data_q))
3915 continue;
3916
3917 skb = skb_peek(&tmp->data_q);
3918 if (skb->priority < cur_prio)
3919 continue;
3920
3921 if (skb->priority > cur_prio) {
3922 num = 0;
3923 min = ~0;
3924 cur_prio = skb->priority;
3925 }
3926
3927 num++;
3928
3929 if (conn->sent < min) {
3930 min = conn->sent;
3931 chan = tmp;
3932 }
3933 }
3934
3935 if (hci_conn_num(hdev, type) == conn_num)
3936 break;
3937 }
3938
bf4c6325
GP
3939 rcu_read_unlock();
3940
73d80deb
LAD
3941 if (!chan)
3942 return NULL;
3943
3944 switch (chan->conn->type) {
3945 case ACL_LINK:
3946 cnt = hdev->acl_cnt;
3947 break;
bd1eb66b
AE
3948 case AMP_LINK:
3949 cnt = hdev->block_cnt;
3950 break;
73d80deb
LAD
3951 case SCO_LINK:
3952 case ESCO_LINK:
3953 cnt = hdev->sco_cnt;
3954 break;
3955 case LE_LINK:
3956 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3957 break;
3958 default:
3959 cnt = 0;
3960 BT_ERR("Unknown link type");
3961 }
3962
3963 q = cnt / num;
3964 *quote = q ? q : 1;
3965 BT_DBG("chan %p quote %d", chan, *quote);
3966 return chan;
3967}
3968
02b20f0b
LAD
3969static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3970{
3971 struct hci_conn_hash *h = &hdev->conn_hash;
3972 struct hci_conn *conn;
3973 int num = 0;
3974
3975 BT_DBG("%s", hdev->name);
3976
bf4c6325
GP
3977 rcu_read_lock();
3978
3979 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3980 struct hci_chan *chan;
3981
3982 if (conn->type != type)
3983 continue;
3984
3985 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3986 continue;
3987
3988 num++;
3989
8192edef 3990 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3991 struct sk_buff *skb;
3992
3993 if (chan->sent) {
3994 chan->sent = 0;
3995 continue;
3996 }
3997
3998 if (skb_queue_empty(&chan->data_q))
3999 continue;
4000
4001 skb = skb_peek(&chan->data_q);
4002 if (skb->priority >= HCI_PRIO_MAX - 1)
4003 continue;
4004
4005 skb->priority = HCI_PRIO_MAX - 1;
4006
4007 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4008 skb->priority);
02b20f0b
LAD
4009 }
4010
4011 if (hci_conn_num(hdev, type) == num)
4012 break;
4013 }
bf4c6325
GP
4014
4015 rcu_read_unlock();
4016
02b20f0b
LAD
4017}
4018
b71d385a
AE
4019static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4020{
4021 /* Calculate count of blocks used by this packet */
4022 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4023}
4024
6039aa73 4025static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4026{
d7a5a11d 4027 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4028 /* ACL tx timeout must be longer than maximum
4029 * link supervision timeout (40.9 seconds) */
63d2bc1b 4030 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4031 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4032 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4033 }
63d2bc1b 4034}
1da177e4 4035
6039aa73 4036static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4037{
4038 unsigned int cnt = hdev->acl_cnt;
4039 struct hci_chan *chan;
4040 struct sk_buff *skb;
4041 int quote;
4042
4043 __check_timeout(hdev, cnt);
04837f64 4044
73d80deb 4045 while (hdev->acl_cnt &&
a8c5fb1a 4046 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4050 skb->len, skb->priority);
73d80deb 4051
ec1cce24
LAD
4052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
73d80deb 4058 hci_conn_enter_active_mode(chan->conn,
04124681 4059 bt_cb(skb)->force_active);
04837f64 4060
57d17d70 4061 hci_send_frame(hdev, skb);
1da177e4
LT
4062 hdev->acl_last_tx = jiffies;
4063
4064 hdev->acl_cnt--;
73d80deb
LAD
4065 chan->sent++;
4066 chan->conn->sent++;
1da177e4
LT
4067 }
4068 }
02b20f0b
LAD
4069
4070 if (cnt != hdev->acl_cnt)
4071 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4072}
4073
6039aa73 4074static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4075{
63d2bc1b 4076 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4077 struct hci_chan *chan;
4078 struct sk_buff *skb;
4079 int quote;
bd1eb66b 4080 u8 type;
b71d385a 4081
63d2bc1b 4082 __check_timeout(hdev, cnt);
b71d385a 4083
bd1eb66b
AE
4084 BT_DBG("%s", hdev->name);
4085
4086 if (hdev->dev_type == HCI_AMP)
4087 type = AMP_LINK;
4088 else
4089 type = ACL_LINK;
4090
b71d385a 4091 while (hdev->block_cnt > 0 &&
bd1eb66b 4092 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4093 u32 priority = (skb_peek(&chan->data_q))->priority;
4094 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4095 int blocks;
4096
4097 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4098 skb->len, skb->priority);
b71d385a
AE
4099
4100 /* Stop if priority has changed */
4101 if (skb->priority < priority)
4102 break;
4103
4104 skb = skb_dequeue(&chan->data_q);
4105
4106 blocks = __get_blocks(hdev, skb);
4107 if (blocks > hdev->block_cnt)
4108 return;
4109
4110 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4111 bt_cb(skb)->force_active);
b71d385a 4112
57d17d70 4113 hci_send_frame(hdev, skb);
b71d385a
AE
4114 hdev->acl_last_tx = jiffies;
4115
4116 hdev->block_cnt -= blocks;
4117 quote -= blocks;
4118
4119 chan->sent += blocks;
4120 chan->conn->sent += blocks;
4121 }
4122 }
4123
4124 if (cnt != hdev->block_cnt)
bd1eb66b 4125 hci_prio_recalculate(hdev, type);
b71d385a
AE
4126}
4127
6039aa73 4128static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4129{
4130 BT_DBG("%s", hdev->name);
4131
bd1eb66b
AE
4132 /* No ACL link over BR/EDR controller */
4133 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4134 return;
4135
4136 /* No AMP link over AMP controller */
4137 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4138 return;
4139
4140 switch (hdev->flow_ctl_mode) {
4141 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4142 hci_sched_acl_pkt(hdev);
4143 break;
4144
4145 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4146 hci_sched_acl_blk(hdev);
4147 break;
4148 }
4149}
4150
1da177e4 4151/* Schedule SCO */
6039aa73 4152static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4153{
4154 struct hci_conn *conn;
4155 struct sk_buff *skb;
4156 int quote;
4157
4158 BT_DBG("%s", hdev->name);
4159
52087a79
LAD
4160 if (!hci_conn_num(hdev, SCO_LINK))
4161 return;
4162
1da177e4
LT
4163 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4164 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4165 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4166 hci_send_frame(hdev, skb);
1da177e4
LT
4167
4168 conn->sent++;
4169 if (conn->sent == ~0)
4170 conn->sent = 0;
4171 }
4172 }
4173}
4174
6039aa73 4175static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4176{
4177 struct hci_conn *conn;
4178 struct sk_buff *skb;
4179 int quote;
4180
4181 BT_DBG("%s", hdev->name);
4182
52087a79
LAD
4183 if (!hci_conn_num(hdev, ESCO_LINK))
4184 return;
4185
8fc9ced3
GP
4186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4187 &quote))) {
b6a0dc82
MH
4188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4189 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4190 hci_send_frame(hdev, skb);
b6a0dc82
MH
4191
4192 conn->sent++;
4193 if (conn->sent == ~0)
4194 conn->sent = 0;
4195 }
4196 }
4197}
4198
6039aa73 4199static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4200{
73d80deb 4201 struct hci_chan *chan;
6ed58ec5 4202 struct sk_buff *skb;
02b20f0b 4203 int quote, cnt, tmp;
6ed58ec5
VT
4204
4205 BT_DBG("%s", hdev->name);
4206
52087a79
LAD
4207 if (!hci_conn_num(hdev, LE_LINK))
4208 return;
4209
d7a5a11d 4210 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4211 /* LE tx timeout must be longer than maximum
4212 * link supervision timeout (40.9 seconds) */
bae1f5d9 4213 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4214 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4215 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4216 }
4217
4218 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4219 tmp = cnt;
73d80deb 4220 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4221 u32 priority = (skb_peek(&chan->data_q))->priority;
4222 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4223 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4224 skb->len, skb->priority);
6ed58ec5 4225
ec1cce24
LAD
4226 /* Stop if priority has changed */
4227 if (skb->priority < priority)
4228 break;
4229
4230 skb = skb_dequeue(&chan->data_q);
4231
57d17d70 4232 hci_send_frame(hdev, skb);
6ed58ec5
VT
4233 hdev->le_last_tx = jiffies;
4234
4235 cnt--;
73d80deb
LAD
4236 chan->sent++;
4237 chan->conn->sent++;
6ed58ec5
VT
4238 }
4239 }
73d80deb 4240
6ed58ec5
VT
4241 if (hdev->le_pkts)
4242 hdev->le_cnt = cnt;
4243 else
4244 hdev->acl_cnt = cnt;
02b20f0b
LAD
4245
4246 if (cnt != tmp)
4247 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4248}
4249
3eff45ea 4250static void hci_tx_work(struct work_struct *work)
1da177e4 4251{
3eff45ea 4252 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4253 struct sk_buff *skb;
4254
6ed58ec5 4255 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4256 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4257
d7a5a11d 4258 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4259 /* Schedule queues and send stuff to HCI driver */
4260 hci_sched_acl(hdev);
4261 hci_sched_sco(hdev);
4262 hci_sched_esco(hdev);
4263 hci_sched_le(hdev);
4264 }
6ed58ec5 4265
1da177e4
LT
4266 /* Send next queued raw (unknown type) packet */
4267 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4268 hci_send_frame(hdev, skb);
1da177e4
LT
4269}
4270
25985edc 4271/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4272
4273/* ACL data packet */
6039aa73 4274static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4275{
4276 struct hci_acl_hdr *hdr = (void *) skb->data;
4277 struct hci_conn *conn;
4278 __u16 handle, flags;
4279
4280 skb_pull(skb, HCI_ACL_HDR_SIZE);
4281
4282 handle = __le16_to_cpu(hdr->handle);
4283 flags = hci_flags(handle);
4284 handle = hci_handle(handle);
4285
f0e09510 4286 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4287 handle, flags);
1da177e4
LT
4288
4289 hdev->stat.acl_rx++;
4290
4291 hci_dev_lock(hdev);
4292 conn = hci_conn_hash_lookup_handle(hdev, handle);
4293 hci_dev_unlock(hdev);
8e87d142 4294
1da177e4 4295 if (conn) {
65983fc7 4296 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4297
1da177e4 4298 /* Send to upper protocol */
686ebf28
UF
4299 l2cap_recv_acldata(conn, skb, flags);
4300 return;
1da177e4 4301 } else {
8e87d142 4302 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4303 hdev->name, handle);
1da177e4
LT
4304 }
4305
4306 kfree_skb(skb);
4307}
4308
4309/* SCO data packet */
6039aa73 4310static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4311{
4312 struct hci_sco_hdr *hdr = (void *) skb->data;
4313 struct hci_conn *conn;
4314 __u16 handle;
4315
4316 skb_pull(skb, HCI_SCO_HDR_SIZE);
4317
4318 handle = __le16_to_cpu(hdr->handle);
4319
f0e09510 4320 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4321
4322 hdev->stat.sco_rx++;
4323
4324 hci_dev_lock(hdev);
4325 conn = hci_conn_hash_lookup_handle(hdev, handle);
4326 hci_dev_unlock(hdev);
4327
4328 if (conn) {
1da177e4 4329 /* Send to upper protocol */
686ebf28
UF
4330 sco_recv_scodata(conn, skb);
4331 return;
1da177e4 4332 } else {
8e87d142 4333 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4334 hdev->name, handle);
1da177e4
LT
4335 }
4336
4337 kfree_skb(skb);
4338}
4339
9238f36a
JH
4340static bool hci_req_is_complete(struct hci_dev *hdev)
4341{
4342 struct sk_buff *skb;
4343
4344 skb = skb_peek(&hdev->cmd_q);
4345 if (!skb)
4346 return true;
4347
242c0ebd 4348 return bt_cb(skb)->hci.req_start;
9238f36a
JH
4349}
4350
42c6b129
JH
4351static void hci_resend_last(struct hci_dev *hdev)
4352{
4353 struct hci_command_hdr *sent;
4354 struct sk_buff *skb;
4355 u16 opcode;
4356
4357 if (!hdev->sent_cmd)
4358 return;
4359
4360 sent = (void *) hdev->sent_cmd->data;
4361 opcode = __le16_to_cpu(sent->opcode);
4362 if (opcode == HCI_OP_RESET)
4363 return;
4364
4365 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4366 if (!skb)
4367 return;
4368
4369 skb_queue_head(&hdev->cmd_q, skb);
4370 queue_work(hdev->workqueue, &hdev->cmd_work);
4371}
4372
e6214487
JH
4373void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4374 hci_req_complete_t *req_complete,
4375 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4376{
9238f36a
JH
4377 struct sk_buff *skb;
4378 unsigned long flags;
4379
4380 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4381
42c6b129
JH
4382 /* If the completed command doesn't match the last one that was
4383 * sent we need to do special handling of it.
9238f36a 4384 */
42c6b129
JH
4385 if (!hci_sent_cmd_data(hdev, opcode)) {
4386 /* Some CSR based controllers generate a spontaneous
4387 * reset complete event during init and any pending
4388 * command will never be completed. In such a case we
4389 * need to resend whatever was the last sent
4390 * command.
4391 */
4392 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4393 hci_resend_last(hdev);
4394
9238f36a 4395 return;
42c6b129 4396 }
9238f36a
JH
4397
4398 /* If the command succeeded and there's still more commands in
4399 * this request the request is not yet complete.
4400 */
4401 if (!status && !hci_req_is_complete(hdev))
4402 return;
4403
4404 /* If this was the last command in a request the complete
4405 * callback would be found in hdev->sent_cmd instead of the
4406 * command queue (hdev->cmd_q).
4407 */
242c0ebd
MH
4408 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4409 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487
JH
4410 return;
4411 }
53e21fbc 4412
242c0ebd
MH
4413 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4414 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487 4415 return;
9238f36a
JH
4416 }
4417
4418 /* Remove all pending commands belonging to this request */
4419 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4420 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
242c0ebd 4421 if (bt_cb(skb)->hci.req_start) {
9238f36a
JH
4422 __skb_queue_head(&hdev->cmd_q, skb);
4423 break;
4424 }
4425
242c0ebd
MH
4426 *req_complete = bt_cb(skb)->hci.req_complete;
4427 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
9238f36a
JH
4428 kfree_skb(skb);
4429 }
4430 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4431}
4432
b78752cc 4433static void hci_rx_work(struct work_struct *work)
1da177e4 4434{
b78752cc 4435 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4436 struct sk_buff *skb;
4437
4438 BT_DBG("%s", hdev->name);
4439
1da177e4 4440 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4441 /* Send copy to monitor */
4442 hci_send_to_monitor(hdev, skb);
4443
1da177e4
LT
4444 if (atomic_read(&hdev->promisc)) {
4445 /* Send copy to the sockets */
470fe1b5 4446 hci_send_to_sock(hdev, skb);
1da177e4
LT
4447 }
4448
d7a5a11d 4449 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4450 kfree_skb(skb);
4451 continue;
4452 }
4453
4454 if (test_bit(HCI_INIT, &hdev->flags)) {
4455 /* Don't process data packets in this states. */
0d48d939 4456 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4457 case HCI_ACLDATA_PKT:
4458 case HCI_SCODATA_PKT:
4459 kfree_skb(skb);
4460 continue;
3ff50b79 4461 }
1da177e4
LT
4462 }
4463
4464 /* Process frame */
0d48d939 4465 switch (bt_cb(skb)->pkt_type) {
1da177e4 4466 case HCI_EVENT_PKT:
b78752cc 4467 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4468 hci_event_packet(hdev, skb);
4469 break;
4470
4471 case HCI_ACLDATA_PKT:
4472 BT_DBG("%s ACL data packet", hdev->name);
4473 hci_acldata_packet(hdev, skb);
4474 break;
4475
4476 case HCI_SCODATA_PKT:
4477 BT_DBG("%s SCO data packet", hdev->name);
4478 hci_scodata_packet(hdev, skb);
4479 break;
4480
4481 default:
4482 kfree_skb(skb);
4483 break;
4484 }
4485 }
1da177e4
LT
4486}
4487
c347b765 4488static void hci_cmd_work(struct work_struct *work)
1da177e4 4489{
c347b765 4490 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4491 struct sk_buff *skb;
4492
2104786b
AE
4493 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4494 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4495
1da177e4 4496 /* Send queued commands */
5a08ecce
AE
4497 if (atomic_read(&hdev->cmd_cnt)) {
4498 skb = skb_dequeue(&hdev->cmd_q);
4499 if (!skb)
4500 return;
4501
7585b97a 4502 kfree_skb(hdev->sent_cmd);
1da177e4 4503
a675d7f1 4504 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4505 if (hdev->sent_cmd) {
1da177e4 4506 atomic_dec(&hdev->cmd_cnt);
57d17d70 4507 hci_send_frame(hdev, skb);
7bdb8a5c 4508 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4509 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4510 else
65cc2b49
MH
4511 schedule_delayed_work(&hdev->cmd_timer,
4512 HCI_CMD_TIMEOUT);
1da177e4
LT
4513 } else {
4514 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4515 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4516 }
4517 }
4518}