]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Fix possible deadlock in btusb
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
baf27f6e
MH
68/* ---- HCI debugfs entries ---- */
69
4b4148e9
MH
70static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
71 size_t count, loff_t *ppos)
72{
73 struct hci_dev *hdev = file->private_data;
74 char buf[3];
75
b7cb93e5 76 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
77 buf[1] = '\n';
78 buf[2] = '\0';
79 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
80}
81
82static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
83 size_t count, loff_t *ppos)
84{
85 struct hci_dev *hdev = file->private_data;
86 struct sk_buff *skb;
87 char buf[32];
88 size_t buf_size = min(count, (sizeof(buf)-1));
89 bool enable;
4b4148e9
MH
90
91 if (!test_bit(HCI_UP, &hdev->flags))
92 return -ENETDOWN;
93
94 if (copy_from_user(buf, user_buf, buf_size))
95 return -EFAULT;
96
97 buf[buf_size] = '\0';
98 if (strtobool(buf, &enable))
99 return -EINVAL;
100
b7cb93e5 101 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
102 return -EALREADY;
103
104 hci_req_lock(hdev);
105 if (enable)
106 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 else
109 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
110 HCI_CMD_TIMEOUT);
111 hci_req_unlock(hdev);
112
113 if (IS_ERR(skb))
114 return PTR_ERR(skb);
115
4b4148e9
MH
116 kfree_skb(skb);
117
b7cb93e5 118 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
4b4113d6
MH
130static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
131 size_t count, loff_t *ppos)
132{
133 struct hci_dev *hdev = file->private_data;
134 char buf[3];
135
136 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
137 buf[1] = '\n';
138 buf[2] = '\0';
139 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
140}
141
142static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
143 size_t count, loff_t *ppos)
144{
145 struct hci_dev *hdev = file->private_data;
146 char buf[32];
147 size_t buf_size = min(count, (sizeof(buf)-1));
148 bool enable;
149 int err;
150
151 if (copy_from_user(buf, user_buf, buf_size))
152 return -EFAULT;
153
154 buf[buf_size] = '\0';
155 if (strtobool(buf, &enable))
156 return -EINVAL;
157
7e995b9e
MH
158 /* When the diagnostic flags are not persistent and the transport
159 * is not active, then there is no need for the vendor callback.
160 *
161 * Instead just store the desired value. If needed the setting
162 * will be programmed when the controller gets powered on.
163 */
164 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
165 !test_bit(HCI_RUNNING, &hdev->flags))
166 goto done;
167
4b4113d6
MH
168 hci_req_lock(hdev);
169 err = hdev->set_diag(hdev, enable);
170 hci_req_unlock(hdev);
171
172 if (err < 0)
173 return err;
174
7e995b9e 175done:
4b4113d6
MH
176 if (enable)
177 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
178 else
179 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
180
181 return count;
182}
183
184static const struct file_operations vendor_diag_fops = {
185 .open = simple_open,
186 .read = vendor_diag_read,
187 .write = vendor_diag_write,
188 .llseek = default_llseek,
189};
190
f640ee98
MH
191static void hci_debugfs_create_basic(struct hci_dev *hdev)
192{
193 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
194 &dut_mode_fops);
195
196 if (hdev->set_diag)
197 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
198 &vendor_diag_fops);
199}
200
1da177e4
LT
201/* ---- HCI requests ---- */
202
f60cb305
JH
203static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
204 struct sk_buff *skb)
1da177e4 205{
42c6b129 206 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
207
208 if (hdev->req_status == HCI_REQ_PEND) {
209 hdev->req_result = result;
210 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
211 if (skb)
212 hdev->req_skb = skb_get(skb);
1da177e4
LT
213 wake_up_interruptible(&hdev->req_wait_q);
214 }
215}
216
217static void hci_req_cancel(struct hci_dev *hdev, int err)
218{
219 BT_DBG("%s err 0x%2.2x", hdev->name, err);
220
221 if (hdev->req_status == HCI_REQ_PEND) {
222 hdev->req_result = err;
223 hdev->req_status = HCI_REQ_CANCELED;
224 wake_up_interruptible(&hdev->req_wait_q);
225 }
226}
227
7b1abbbe 228struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 229 const void *param, u8 event, u32 timeout)
75e84b7c
JH
230{
231 DECLARE_WAITQUEUE(wait, current);
232 struct hci_request req;
f60cb305 233 struct sk_buff *skb;
75e84b7c
JH
234 int err = 0;
235
236 BT_DBG("%s", hdev->name);
237
238 hci_req_init(&req, hdev);
239
7b1abbbe 240 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
241
242 hdev->req_status = HCI_REQ_PEND;
243
75e84b7c
JH
244 add_wait_queue(&hdev->req_wait_q, &wait);
245 set_current_state(TASK_INTERRUPTIBLE);
246
f60cb305 247 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
248 if (err < 0) {
249 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 250 set_current_state(TASK_RUNNING);
039fada5
CP
251 return ERR_PTR(err);
252 }
253
75e84b7c
JH
254 schedule_timeout(timeout);
255
256 remove_wait_queue(&hdev->req_wait_q, &wait);
257
258 if (signal_pending(current))
259 return ERR_PTR(-EINTR);
260
261 switch (hdev->req_status) {
262 case HCI_REQ_DONE:
263 err = -bt_to_errno(hdev->req_result);
264 break;
265
266 case HCI_REQ_CANCELED:
267 err = -hdev->req_result;
268 break;
269
270 default:
271 err = -ETIMEDOUT;
272 break;
273 }
274
275 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
276 skb = hdev->req_skb;
277 hdev->req_skb = NULL;
75e84b7c
JH
278
279 BT_DBG("%s end: err %d", hdev->name, err);
280
f60cb305
JH
281 if (err < 0) {
282 kfree_skb(skb);
75e84b7c 283 return ERR_PTR(err);
f60cb305 284 }
75e84b7c 285
757aa0b5
JH
286 if (!skb)
287 return ERR_PTR(-ENODATA);
288
289 return skb;
7b1abbbe
JH
290}
291EXPORT_SYMBOL(__hci_cmd_sync_ev);
292
293struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 294 const void *param, u32 timeout)
7b1abbbe
JH
295{
296 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
297}
298EXPORT_SYMBOL(__hci_cmd_sync);
299
1da177e4 300/* Execute request and wait for completion. */
01178cd4 301static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
302 void (*func)(struct hci_request *req,
303 unsigned long opt),
01178cd4 304 unsigned long opt, __u32 timeout)
1da177e4 305{
42c6b129 306 struct hci_request req;
1da177e4
LT
307 DECLARE_WAITQUEUE(wait, current);
308 int err = 0;
309
310 BT_DBG("%s start", hdev->name);
311
42c6b129
JH
312 hci_req_init(&req, hdev);
313
1da177e4
LT
314 hdev->req_status = HCI_REQ_PEND;
315
42c6b129 316 func(&req, opt);
53cce22d 317
039fada5
CP
318 add_wait_queue(&hdev->req_wait_q, &wait);
319 set_current_state(TASK_INTERRUPTIBLE);
320
f60cb305 321 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 322 if (err < 0) {
53cce22d 323 hdev->req_status = 0;
920c8300 324
039fada5 325 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 326 set_current_state(TASK_RUNNING);
039fada5 327
920c8300
AG
328 /* ENODATA means the HCI request command queue is empty.
329 * This can happen when a request with conditionals doesn't
330 * trigger any commands to be sent. This is normal behavior
331 * and should not trigger an error return.
42c6b129 332 */
920c8300
AG
333 if (err == -ENODATA)
334 return 0;
335
336 return err;
53cce22d
JH
337 }
338
1da177e4
LT
339 schedule_timeout(timeout);
340
341 remove_wait_queue(&hdev->req_wait_q, &wait);
342
343 if (signal_pending(current))
344 return -EINTR;
345
346 switch (hdev->req_status) {
347 case HCI_REQ_DONE:
e175072f 348 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
349 break;
350
351 case HCI_REQ_CANCELED:
352 err = -hdev->req_result;
353 break;
354
355 default:
356 err = -ETIMEDOUT;
357 break;
3ff50b79 358 }
1da177e4 359
a5040efa 360 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
361
362 BT_DBG("%s end: err %d", hdev->name, err);
363
364 return err;
365}
366
01178cd4 367static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
368 void (*req)(struct hci_request *req,
369 unsigned long opt),
01178cd4 370 unsigned long opt, __u32 timeout)
1da177e4
LT
371{
372 int ret;
373
7c6a329e
MH
374 if (!test_bit(HCI_UP, &hdev->flags))
375 return -ENETDOWN;
376
1da177e4
LT
377 /* Serialize all requests */
378 hci_req_lock(hdev);
01178cd4 379 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
380 hci_req_unlock(hdev);
381
382 return ret;
383}
384
42c6b129 385static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 386{
42c6b129 387 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
388
389 /* Reset device */
42c6b129
JH
390 set_bit(HCI_RESET, &req->hdev->flags);
391 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
392}
393
42c6b129 394static void bredr_init(struct hci_request *req)
1da177e4 395{
42c6b129 396 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 397
1da177e4 398 /* Read Local Supported Features */
42c6b129 399 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 400
1143e5a6 401 /* Read Local Version */
42c6b129 402 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
403
404 /* Read BD Address */
42c6b129 405 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
406}
407
0af801b9 408static void amp_init1(struct hci_request *req)
e61ef499 409{
42c6b129 410 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 411
e61ef499 412 /* Read Local Version */
42c6b129 413 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 414
f6996cfe
MH
415 /* Read Local Supported Commands */
416 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
417
6bcbc489 418 /* Read Local AMP Info */
42c6b129 419 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
420
421 /* Read Data Blk size */
42c6b129 422 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 423
f38ba941
MH
424 /* Read Flow Control Mode */
425 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
426
7528ca1c
MH
427 /* Read Location Data */
428 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
429}
430
0af801b9
JH
431static void amp_init2(struct hci_request *req)
432{
433 /* Read Local Supported Features. Not all AMP controllers
434 * support this so it's placed conditionally in the second
435 * stage init.
436 */
437 if (req->hdev->commands[14] & 0x20)
438 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
439}
440
42c6b129 441static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 442{
42c6b129 443 struct hci_dev *hdev = req->hdev;
e61ef499
AE
444
445 BT_DBG("%s %ld", hdev->name, opt);
446
11778716
AE
447 /* Reset */
448 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 449 hci_reset_req(req, 0);
11778716 450
e61ef499
AE
451 switch (hdev->dev_type) {
452 case HCI_BREDR:
42c6b129 453 bredr_init(req);
e61ef499
AE
454 break;
455
456 case HCI_AMP:
0af801b9 457 amp_init1(req);
e61ef499
AE
458 break;
459
460 default:
461 BT_ERR("Unknown device type %d", hdev->dev_type);
462 break;
463 }
e61ef499
AE
464}
465
42c6b129 466static void bredr_setup(struct hci_request *req)
2177bab5 467{
2177bab5
JH
468 __le16 param;
469 __u8 flt_type;
470
471 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 472 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
473
474 /* Read Class of Device */
42c6b129 475 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
476
477 /* Read Local Name */
42c6b129 478 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
479
480 /* Read Voice Setting */
42c6b129 481 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 482
b4cb9fb2
MH
483 /* Read Number of Supported IAC */
484 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
485
4b836f39
MH
486 /* Read Current IAC LAP */
487 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
488
2177bab5
JH
489 /* Clear Event Filters */
490 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 491 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
492
493 /* Connection accept timeout ~20 secs */
dcf4adbf 494 param = cpu_to_le16(0x7d00);
42c6b129 495 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
496}
497
42c6b129 498static void le_setup(struct hci_request *req)
2177bab5 499{
c73eee91
JH
500 struct hci_dev *hdev = req->hdev;
501
2177bab5 502 /* Read LE Buffer Size */
42c6b129 503 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
504
505 /* Read LE Local Supported Features */
42c6b129 506 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 507
747d3f03
MH
508 /* Read LE Supported States */
509 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
510
2177bab5 511 /* Read LE White List Size */
42c6b129 512 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 513
747d3f03
MH
514 /* Clear LE White List */
515 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
516
517 /* LE-only controllers have LE implicitly enabled */
518 if (!lmp_bredr_capable(hdev))
a1536da2 519 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
520}
521
42c6b129 522static void hci_setup_event_mask(struct hci_request *req)
2177bab5 523{
42c6b129
JH
524 struct hci_dev *hdev = req->hdev;
525
2177bab5
JH
526 /* The second byte is 0xff instead of 0x9f (two reserved bits
527 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
528 * command otherwise.
529 */
530 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
531
532 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
533 * any event mask for pre 1.2 devices.
534 */
535 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
536 return;
537
538 if (lmp_bredr_capable(hdev)) {
539 events[4] |= 0x01; /* Flow Specification Complete */
540 events[4] |= 0x02; /* Inquiry Result with RSSI */
541 events[4] |= 0x04; /* Read Remote Extended Features Complete */
542 events[5] |= 0x08; /* Synchronous Connection Complete */
543 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
544 } else {
545 /* Use a different default for LE-only devices */
546 memset(events, 0, sizeof(events));
547 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
548 events[1] |= 0x08; /* Read Remote Version Information Complete */
549 events[1] |= 0x20; /* Command Complete */
550 events[1] |= 0x40; /* Command Status */
551 events[1] |= 0x80; /* Hardware Error */
552 events[2] |= 0x04; /* Number of Completed Packets */
553 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
554
555 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
556 events[0] |= 0x80; /* Encryption Change */
557 events[5] |= 0x80; /* Encryption Key Refresh Complete */
558 }
2177bab5
JH
559 }
560
561 if (lmp_inq_rssi_capable(hdev))
562 events[4] |= 0x02; /* Inquiry Result with RSSI */
563
564 if (lmp_sniffsubr_capable(hdev))
565 events[5] |= 0x20; /* Sniff Subrating */
566
567 if (lmp_pause_enc_capable(hdev))
568 events[5] |= 0x80; /* Encryption Key Refresh Complete */
569
570 if (lmp_ext_inq_capable(hdev))
571 events[5] |= 0x40; /* Extended Inquiry Result */
572
573 if (lmp_no_flush_capable(hdev))
574 events[7] |= 0x01; /* Enhanced Flush Complete */
575
576 if (lmp_lsto_capable(hdev))
577 events[6] |= 0x80; /* Link Supervision Timeout Changed */
578
579 if (lmp_ssp_capable(hdev)) {
580 events[6] |= 0x01; /* IO Capability Request */
581 events[6] |= 0x02; /* IO Capability Response */
582 events[6] |= 0x04; /* User Confirmation Request */
583 events[6] |= 0x08; /* User Passkey Request */
584 events[6] |= 0x10; /* Remote OOB Data Request */
585 events[6] |= 0x20; /* Simple Pairing Complete */
586 events[7] |= 0x04; /* User Passkey Notification */
587 events[7] |= 0x08; /* Keypress Notification */
588 events[7] |= 0x10; /* Remote Host Supported
589 * Features Notification
590 */
591 }
592
593 if (lmp_le_capable(hdev))
594 events[7] |= 0x20; /* LE Meta-Event */
595
42c6b129 596 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
597}
598
42c6b129 599static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 600{
42c6b129
JH
601 struct hci_dev *hdev = req->hdev;
602
0af801b9
JH
603 if (hdev->dev_type == HCI_AMP)
604 return amp_init2(req);
605
2177bab5 606 if (lmp_bredr_capable(hdev))
42c6b129 607 bredr_setup(req);
56f87901 608 else
a358dc11 609 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
610
611 if (lmp_le_capable(hdev))
42c6b129 612 le_setup(req);
2177bab5 613
0f3adeae
MH
614 /* All Bluetooth 1.2 and later controllers should support the
615 * HCI command for reading the local supported commands.
616 *
617 * Unfortunately some controllers indicate Bluetooth 1.2 support,
618 * but do not have support for this command. If that is the case,
619 * the driver can quirk the behavior and skip reading the local
620 * supported commands.
3f8e2d75 621 */
0f3adeae
MH
622 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
623 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 624 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
625
626 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
627 /* When SSP is available, then the host features page
628 * should also be available as well. However some
629 * controllers list the max_page as 0 as long as SSP
630 * has not been enabled. To achieve proper debugging
631 * output, force the minimum max_page to 1 at least.
632 */
633 hdev->max_page = 0x01;
634
d7a5a11d 635 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 636 u8 mode = 0x01;
574ea3c7 637
42c6b129
JH
638 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
639 sizeof(mode), &mode);
2177bab5
JH
640 } else {
641 struct hci_cp_write_eir cp;
642
643 memset(hdev->eir, 0, sizeof(hdev->eir));
644 memset(&cp, 0, sizeof(cp));
645
42c6b129 646 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
647 }
648 }
649
043ec9bf
MH
650 if (lmp_inq_rssi_capable(hdev) ||
651 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
652 u8 mode;
653
654 /* If Extended Inquiry Result events are supported, then
655 * they are clearly preferred over Inquiry Result with RSSI
656 * events.
657 */
658 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
659
660 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
661 }
2177bab5
JH
662
663 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 664 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
665
666 if (lmp_ext_feat_capable(hdev)) {
667 struct hci_cp_read_local_ext_features cp;
668
669 cp.page = 0x01;
42c6b129
JH
670 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
671 sizeof(cp), &cp);
2177bab5
JH
672 }
673
d7a5a11d 674 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 675 u8 enable = 1;
42c6b129
JH
676 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
677 &enable);
2177bab5
JH
678 }
679}
680
42c6b129 681static void hci_setup_link_policy(struct hci_request *req)
2177bab5 682{
42c6b129 683 struct hci_dev *hdev = req->hdev;
2177bab5
JH
684 struct hci_cp_write_def_link_policy cp;
685 u16 link_policy = 0;
686
687 if (lmp_rswitch_capable(hdev))
688 link_policy |= HCI_LP_RSWITCH;
689 if (lmp_hold_capable(hdev))
690 link_policy |= HCI_LP_HOLD;
691 if (lmp_sniff_capable(hdev))
692 link_policy |= HCI_LP_SNIFF;
693 if (lmp_park_capable(hdev))
694 link_policy |= HCI_LP_PARK;
695
696 cp.policy = cpu_to_le16(link_policy);
42c6b129 697 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
698}
699
42c6b129 700static void hci_set_le_support(struct hci_request *req)
2177bab5 701{
42c6b129 702 struct hci_dev *hdev = req->hdev;
2177bab5
JH
703 struct hci_cp_write_le_host_supported cp;
704
c73eee91
JH
705 /* LE-only devices do not support explicit enablement */
706 if (!lmp_bredr_capable(hdev))
707 return;
708
2177bab5
JH
709 memset(&cp, 0, sizeof(cp));
710
d7a5a11d 711 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 712 cp.le = 0x01;
32226e4f 713 cp.simul = 0x00;
2177bab5
JH
714 }
715
716 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
717 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
718 &cp);
2177bab5
JH
719}
720
d62e6d67
JH
721static void hci_set_event_mask_page_2(struct hci_request *req)
722{
723 struct hci_dev *hdev = req->hdev;
724 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
725
726 /* If Connectionless Slave Broadcast master role is supported
727 * enable all necessary events for it.
728 */
53b834d2 729 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
730 events[1] |= 0x40; /* Triggered Clock Capture */
731 events[1] |= 0x80; /* Synchronization Train Complete */
732 events[2] |= 0x10; /* Slave Page Response Timeout */
733 events[2] |= 0x20; /* CSB Channel Map Change */
734 }
735
736 /* If Connectionless Slave Broadcast slave role is supported
737 * enable all necessary events for it.
738 */
53b834d2 739 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
740 events[2] |= 0x01; /* Synchronization Train Received */
741 events[2] |= 0x02; /* CSB Receive */
742 events[2] |= 0x04; /* CSB Timeout */
743 events[2] |= 0x08; /* Truncated Page Complete */
744 }
745
40c59fcb 746 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 747 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
748 events[2] |= 0x80;
749
d62e6d67
JH
750 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
751}
752
42c6b129 753static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 754{
42c6b129 755 struct hci_dev *hdev = req->hdev;
d2c5d77f 756 u8 p;
42c6b129 757
0da71f1b
MH
758 hci_setup_event_mask(req);
759
e81be90b
JH
760 if (hdev->commands[6] & 0x20 &&
761 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
762 struct hci_cp_read_stored_link_key cp;
763
764 bacpy(&cp.bdaddr, BDADDR_ANY);
765 cp.read_all = 0x01;
766 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
767 }
768
2177bab5 769 if (hdev->commands[5] & 0x10)
42c6b129 770 hci_setup_link_policy(req);
2177bab5 771
417287de
MH
772 if (hdev->commands[8] & 0x01)
773 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
774
775 /* Some older Broadcom based Bluetooth 1.2 controllers do not
776 * support the Read Page Scan Type command. Check support for
777 * this command in the bit mask of supported commands.
778 */
779 if (hdev->commands[13] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
781
9193c6e8
AG
782 if (lmp_le_capable(hdev)) {
783 u8 events[8];
784
785 memset(events, 0, sizeof(events));
4d6c705b
MH
786 events[0] = 0x0f;
787
788 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
789 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
790
791 /* If controller supports the Connection Parameters Request
792 * Link Layer Procedure, enable the corresponding event.
793 */
794 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
795 events[0] |= 0x20; /* LE Remote Connection
796 * Parameter Request
797 */
798
a9f6068e
MH
799 /* If the controller supports the Data Length Extension
800 * feature, enable the corresponding event.
801 */
802 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
803 events[0] |= 0x40; /* LE Data Length Change */
804
4b71bba4
MH
805 /* If the controller supports Extended Scanner Filter
806 * Policies, enable the correspondig event.
807 */
808 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
809 events[1] |= 0x04; /* LE Direct Advertising
810 * Report
811 */
812
5a34bd5f
MH
813 /* If the controller supports the LE Read Local P-256
814 * Public Key command, enable the corresponding event.
815 */
816 if (hdev->commands[34] & 0x02)
817 events[0] |= 0x80; /* LE Read Local P-256
818 * Public Key Complete
819 */
820
821 /* If the controller supports the LE Generate DHKey
822 * command, enable the corresponding event.
823 */
824 if (hdev->commands[34] & 0x04)
825 events[1] |= 0x01; /* LE Generate DHKey Complete */
826
9193c6e8
AG
827 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
828 events);
829
15a49cca
MH
830 if (hdev->commands[25] & 0x40) {
831 /* Read LE Advertising Channel TX Power */
832 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
833 }
834
a9f6068e
MH
835 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
836 /* Read LE Maximum Data Length */
837 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
838
839 /* Read LE Suggested Default Data Length */
840 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
841 }
842
42c6b129 843 hci_set_le_support(req);
9193c6e8 844 }
d2c5d77f
JH
845
846 /* Read features beyond page 1 if available */
847 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
848 struct hci_cp_read_local_ext_features cp;
849
850 cp.page = p;
851 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
852 sizeof(cp), &cp);
853 }
2177bab5
JH
854}
855
5d4e7e8d
JH
856static void hci_init4_req(struct hci_request *req, unsigned long opt)
857{
858 struct hci_dev *hdev = req->hdev;
859
36f260ce
MH
860 /* Some Broadcom based Bluetooth controllers do not support the
861 * Delete Stored Link Key command. They are clearly indicating its
862 * absence in the bit mask of supported commands.
863 *
864 * Check the supported commands and only if the the command is marked
865 * as supported send it. If not supported assume that the controller
866 * does not have actual support for stored link keys which makes this
867 * command redundant anyway.
868 *
869 * Some controllers indicate that they support handling deleting
870 * stored link keys, but they don't. The quirk lets a driver
871 * just disable this command.
872 */
873 if (hdev->commands[6] & 0x80 &&
874 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
875 struct hci_cp_delete_stored_link_key cp;
876
877 bacpy(&cp.bdaddr, BDADDR_ANY);
878 cp.delete_all = 0x01;
879 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
880 sizeof(cp), &cp);
881 }
882
d62e6d67
JH
883 /* Set event mask page 2 if the HCI command for it is supported */
884 if (hdev->commands[22] & 0x04)
885 hci_set_event_mask_page_2(req);
886
109e3191
MH
887 /* Read local codec list if the HCI command is supported */
888 if (hdev->commands[29] & 0x20)
889 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
890
f4fe73ed
MH
891 /* Get MWS transport configuration if the HCI command is supported */
892 if (hdev->commands[30] & 0x08)
893 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
894
5d4e7e8d 895 /* Check for Synchronization Train support */
53b834d2 896 if (lmp_sync_train_capable(hdev))
5d4e7e8d 897 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
898
899 /* Enable Secure Connections if supported and configured */
d7a5a11d 900 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 901 bredr_sc_enabled(hdev)) {
a6d0d690 902 u8 support = 0x01;
574ea3c7 903
a6d0d690
MH
904 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
905 sizeof(support), &support);
906 }
5d4e7e8d
JH
907}
908
2177bab5
JH
909static int __hci_init(struct hci_dev *hdev)
910{
911 int err;
912
913 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
914 if (err < 0)
915 return err;
916
f640ee98
MH
917 if (hci_dev_test_flag(hdev, HCI_SETUP))
918 hci_debugfs_create_basic(hdev);
4b4148e9 919
0af801b9
JH
920 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
2177bab5
JH
924 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
925 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 926 * first two stages of init.
2177bab5
JH
927 */
928 if (hdev->dev_type != HCI_BREDR)
929 return 0;
930
5d4e7e8d
JH
931 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
932 if (err < 0)
933 return err;
934
baf27f6e
MH
935 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
936 if (err < 0)
937 return err;
938
ec6cef9c
MH
939 /* This function is only called when the controller is actually in
940 * configured state. When the controller is marked as unconfigured,
941 * this initialization procedure is not run.
942 *
943 * It means that it is possible that a controller runs through its
944 * setup phase and then discovers missing settings. If that is the
945 * case, then this function will not be called. It then will only
946 * be called during the config phase.
947 *
948 * So only when in setup phase or config phase, create the debugfs
949 * entries and register the SMP channels.
baf27f6e 950 */
d7a5a11d
MH
951 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
952 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
953 return 0;
954
60c5f5fb
MH
955 hci_debugfs_create_common(hdev);
956
71c3b60e 957 if (lmp_bredr_capable(hdev))
60c5f5fb 958 hci_debugfs_create_bredr(hdev);
2bfa3531 959
162a3bac 960 if (lmp_le_capable(hdev))
60c5f5fb 961 hci_debugfs_create_le(hdev);
e7b8fc92 962
baf27f6e 963 return 0;
2177bab5
JH
964}
965
0ebca7d6
MH
966static void hci_init0_req(struct hci_request *req, unsigned long opt)
967{
968 struct hci_dev *hdev = req->hdev;
969
970 BT_DBG("%s %ld", hdev->name, opt);
971
972 /* Reset */
973 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
974 hci_reset_req(req, 0);
975
976 /* Read Local Version */
977 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
978
979 /* Read BD Address */
980 if (hdev->set_bdaddr)
981 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
982}
983
984static int __hci_unconf_init(struct hci_dev *hdev)
985{
986 int err;
987
cc78b44b
MH
988 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
989 return 0;
990
0ebca7d6
MH
991 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
992 if (err < 0)
993 return err;
994
f640ee98
MH
995 if (hci_dev_test_flag(hdev, HCI_SETUP))
996 hci_debugfs_create_basic(hdev);
997
0ebca7d6
MH
998 return 0;
999}
1000
42c6b129 1001static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1002{
1003 __u8 scan = opt;
1004
42c6b129 1005 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1006
1007 /* Inquiry and Page scans */
42c6b129 1008 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1009}
1010
42c6b129 1011static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1012{
1013 __u8 auth = opt;
1014
42c6b129 1015 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1016
1017 /* Authentication */
42c6b129 1018 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1019}
1020
42c6b129 1021static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1022{
1023 __u8 encrypt = opt;
1024
42c6b129 1025 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1026
e4e8e37c 1027 /* Encryption */
42c6b129 1028 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1029}
1030
42c6b129 1031static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1032{
1033 __le16 policy = cpu_to_le16(opt);
1034
42c6b129 1035 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1036
1037 /* Default link policy */
42c6b129 1038 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1039}
1040
8e87d142 1041/* Get HCI device by index.
1da177e4
LT
1042 * Device is held on return. */
1043struct hci_dev *hci_dev_get(int index)
1044{
8035ded4 1045 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1046
1047 BT_DBG("%d", index);
1048
1049 if (index < 0)
1050 return NULL;
1051
1052 read_lock(&hci_dev_list_lock);
8035ded4 1053 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1054 if (d->id == index) {
1055 hdev = hci_dev_hold(d);
1056 break;
1057 }
1058 }
1059 read_unlock(&hci_dev_list_lock);
1060 return hdev;
1061}
1da177e4
LT
1062
1063/* ---- Inquiry support ---- */
ff9ef578 1064
30dc78e1
JH
1065bool hci_discovery_active(struct hci_dev *hdev)
1066{
1067 struct discovery_state *discov = &hdev->discovery;
1068
6fbe195d 1069 switch (discov->state) {
343f935b 1070 case DISCOVERY_FINDING:
6fbe195d 1071 case DISCOVERY_RESOLVING:
30dc78e1
JH
1072 return true;
1073
6fbe195d
AG
1074 default:
1075 return false;
1076 }
30dc78e1
JH
1077}
1078
ff9ef578
JH
1079void hci_discovery_set_state(struct hci_dev *hdev, int state)
1080{
bb3e0a33
JH
1081 int old_state = hdev->discovery.state;
1082
ff9ef578
JH
1083 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1084
bb3e0a33 1085 if (old_state == state)
ff9ef578
JH
1086 return;
1087
bb3e0a33
JH
1088 hdev->discovery.state = state;
1089
ff9ef578
JH
1090 switch (state) {
1091 case DISCOVERY_STOPPED:
c54c3860
AG
1092 hci_update_background_scan(hdev);
1093
bb3e0a33 1094 if (old_state != DISCOVERY_STARTING)
7b99b659 1095 mgmt_discovering(hdev, 0);
ff9ef578
JH
1096 break;
1097 case DISCOVERY_STARTING:
1098 break;
343f935b 1099 case DISCOVERY_FINDING:
ff9ef578
JH
1100 mgmt_discovering(hdev, 1);
1101 break;
30dc78e1
JH
1102 case DISCOVERY_RESOLVING:
1103 break;
ff9ef578
JH
1104 case DISCOVERY_STOPPING:
1105 break;
1106 }
ff9ef578
JH
1107}
1108
1f9b9a5d 1109void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1110{
30883512 1111 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1112 struct inquiry_entry *p, *n;
1da177e4 1113
561aafbc
JH
1114 list_for_each_entry_safe(p, n, &cache->all, all) {
1115 list_del(&p->all);
b57c1a56 1116 kfree(p);
1da177e4 1117 }
561aafbc
JH
1118
1119 INIT_LIST_HEAD(&cache->unknown);
1120 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1121}
1122
a8c5fb1a
GP
1123struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1124 bdaddr_t *bdaddr)
1da177e4 1125{
30883512 1126 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1127 struct inquiry_entry *e;
1128
6ed93dc6 1129 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1130
561aafbc
JH
1131 list_for_each_entry(e, &cache->all, all) {
1132 if (!bacmp(&e->data.bdaddr, bdaddr))
1133 return e;
1134 }
1135
1136 return NULL;
1137}
1138
1139struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1140 bdaddr_t *bdaddr)
561aafbc 1141{
30883512 1142 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1143 struct inquiry_entry *e;
1144
6ed93dc6 1145 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1146
1147 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1148 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1149 return e;
1150 }
1151
1152 return NULL;
1da177e4
LT
1153}
1154
30dc78e1 1155struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1156 bdaddr_t *bdaddr,
1157 int state)
30dc78e1
JH
1158{
1159 struct discovery_state *cache = &hdev->discovery;
1160 struct inquiry_entry *e;
1161
6ed93dc6 1162 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1163
1164 list_for_each_entry(e, &cache->resolve, list) {
1165 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1166 return e;
1167 if (!bacmp(&e->data.bdaddr, bdaddr))
1168 return e;
1169 }
1170
1171 return NULL;
1172}
1173
a3d4e20a 1174void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1175 struct inquiry_entry *ie)
a3d4e20a
JH
1176{
1177 struct discovery_state *cache = &hdev->discovery;
1178 struct list_head *pos = &cache->resolve;
1179 struct inquiry_entry *p;
1180
1181 list_del(&ie->list);
1182
1183 list_for_each_entry(p, &cache->resolve, list) {
1184 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1185 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1186 break;
1187 pos = &p->list;
1188 }
1189
1190 list_add(&ie->list, pos);
1191}
1192
af58925c
MH
1193u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1194 bool name_known)
1da177e4 1195{
30883512 1196 struct discovery_state *cache = &hdev->discovery;
70f23020 1197 struct inquiry_entry *ie;
af58925c 1198 u32 flags = 0;
1da177e4 1199
6ed93dc6 1200 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1201
6928a924 1202 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1203
af58925c
MH
1204 if (!data->ssp_mode)
1205 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1206
70f23020 1207 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1208 if (ie) {
af58925c
MH
1209 if (!ie->data.ssp_mode)
1210 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1211
a3d4e20a 1212 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1213 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1214 ie->data.rssi = data->rssi;
1215 hci_inquiry_cache_update_resolve(hdev, ie);
1216 }
1217
561aafbc 1218 goto update;
a3d4e20a 1219 }
561aafbc
JH
1220
1221 /* Entry not in the cache. Add new one. */
27f70f3e 1222 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1223 if (!ie) {
1224 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1225 goto done;
1226 }
561aafbc
JH
1227
1228 list_add(&ie->all, &cache->all);
1229
1230 if (name_known) {
1231 ie->name_state = NAME_KNOWN;
1232 } else {
1233 ie->name_state = NAME_NOT_KNOWN;
1234 list_add(&ie->list, &cache->unknown);
1235 }
70f23020 1236
561aafbc
JH
1237update:
1238 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1239 ie->name_state != NAME_PENDING) {
561aafbc
JH
1240 ie->name_state = NAME_KNOWN;
1241 list_del(&ie->list);
1da177e4
LT
1242 }
1243
70f23020
AE
1244 memcpy(&ie->data, data, sizeof(*data));
1245 ie->timestamp = jiffies;
1da177e4 1246 cache->timestamp = jiffies;
3175405b
JH
1247
1248 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1249 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1250
af58925c
MH
1251done:
1252 return flags;
1da177e4
LT
1253}
1254
1255static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1256{
30883512 1257 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1258 struct inquiry_info *info = (struct inquiry_info *) buf;
1259 struct inquiry_entry *e;
1260 int copied = 0;
1261
561aafbc 1262 list_for_each_entry(e, &cache->all, all) {
1da177e4 1263 struct inquiry_data *data = &e->data;
b57c1a56
JH
1264
1265 if (copied >= num)
1266 break;
1267
1da177e4
LT
1268 bacpy(&info->bdaddr, &data->bdaddr);
1269 info->pscan_rep_mode = data->pscan_rep_mode;
1270 info->pscan_period_mode = data->pscan_period_mode;
1271 info->pscan_mode = data->pscan_mode;
1272 memcpy(info->dev_class, data->dev_class, 3);
1273 info->clock_offset = data->clock_offset;
b57c1a56 1274
1da177e4 1275 info++;
b57c1a56 1276 copied++;
1da177e4
LT
1277 }
1278
1279 BT_DBG("cache %p, copied %d", cache, copied);
1280 return copied;
1281}
1282
42c6b129 1283static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1284{
1285 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1286 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1287 struct hci_cp_inquiry cp;
1288
1289 BT_DBG("%s", hdev->name);
1290
1291 if (test_bit(HCI_INQUIRY, &hdev->flags))
1292 return;
1293
1294 /* Start Inquiry */
1295 memcpy(&cp.lap, &ir->lap, 3);
1296 cp.length = ir->length;
1297 cp.num_rsp = ir->num_rsp;
42c6b129 1298 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1299}
1300
1301int hci_inquiry(void __user *arg)
1302{
1303 __u8 __user *ptr = arg;
1304 struct hci_inquiry_req ir;
1305 struct hci_dev *hdev;
1306 int err = 0, do_inquiry = 0, max_rsp;
1307 long timeo;
1308 __u8 *buf;
1309
1310 if (copy_from_user(&ir, ptr, sizeof(ir)))
1311 return -EFAULT;
1312
5a08ecce
AE
1313 hdev = hci_dev_get(ir.dev_id);
1314 if (!hdev)
1da177e4
LT
1315 return -ENODEV;
1316
d7a5a11d 1317 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1318 err = -EBUSY;
1319 goto done;
1320 }
1321
d7a5a11d 1322 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1323 err = -EOPNOTSUPP;
1324 goto done;
1325 }
1326
5b69bef5
MH
1327 if (hdev->dev_type != HCI_BREDR) {
1328 err = -EOPNOTSUPP;
1329 goto done;
1330 }
1331
d7a5a11d 1332 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1333 err = -EOPNOTSUPP;
1334 goto done;
1335 }
1336
09fd0de5 1337 hci_dev_lock(hdev);
8e87d142 1338 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1339 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1340 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1341 do_inquiry = 1;
1342 }
09fd0de5 1343 hci_dev_unlock(hdev);
1da177e4 1344
04837f64 1345 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1346
1347 if (do_inquiry) {
01178cd4
JH
1348 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1349 timeo);
70f23020
AE
1350 if (err < 0)
1351 goto done;
3e13fa1e
AG
1352
1353 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1354 * cleared). If it is interrupted by a signal, return -EINTR.
1355 */
74316201 1356 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1357 TASK_INTERRUPTIBLE))
1358 return -EINTR;
70f23020 1359 }
1da177e4 1360
8fc9ced3
GP
1361 /* for unlimited number of responses we will use buffer with
1362 * 255 entries
1363 */
1da177e4
LT
1364 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1365
1366 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1367 * copy it to the user space.
1368 */
01df8c31 1369 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1370 if (!buf) {
1da177e4
LT
1371 err = -ENOMEM;
1372 goto done;
1373 }
1374
09fd0de5 1375 hci_dev_lock(hdev);
1da177e4 1376 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1377 hci_dev_unlock(hdev);
1da177e4
LT
1378
1379 BT_DBG("num_rsp %d", ir.num_rsp);
1380
1381 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1382 ptr += sizeof(ir);
1383 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1384 ir.num_rsp))
1da177e4 1385 err = -EFAULT;
8e87d142 1386 } else
1da177e4
LT
1387 err = -EFAULT;
1388
1389 kfree(buf);
1390
1391done:
1392 hci_dev_put(hdev);
1393 return err;
1394}
1395
cbed0ca1 1396static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1397{
1da177e4
LT
1398 int ret = 0;
1399
1da177e4
LT
1400 BT_DBG("%s %p", hdev->name, hdev);
1401
1402 hci_req_lock(hdev);
1403
d7a5a11d 1404 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1405 ret = -ENODEV;
1406 goto done;
1407 }
1408
d7a5a11d
MH
1409 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1410 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1411 /* Check for rfkill but allow the HCI setup stage to
1412 * proceed (which in itself doesn't cause any RF activity).
1413 */
d7a5a11d 1414 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1415 ret = -ERFKILL;
1416 goto done;
1417 }
1418
1419 /* Check for valid public address or a configured static
1420 * random adddress, but let the HCI setup proceed to
1421 * be able to determine if there is a public address
1422 * or not.
1423 *
c6beca0e
MH
1424 * In case of user channel usage, it is not important
1425 * if a public address or static random address is
1426 * available.
1427 *
a5c8f270
MH
1428 * This check is only valid for BR/EDR controllers
1429 * since AMP controllers do not have an address.
1430 */
d7a5a11d 1431 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1432 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1433 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1434 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1435 ret = -EADDRNOTAVAIL;
1436 goto done;
1437 }
611b30f7
MH
1438 }
1439
1da177e4
LT
1440 if (test_bit(HCI_UP, &hdev->flags)) {
1441 ret = -EALREADY;
1442 goto done;
1443 }
1444
1da177e4
LT
1445 if (hdev->open(hdev)) {
1446 ret = -EIO;
1447 goto done;
1448 }
1449
e9ca8bf1 1450 set_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1451 hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4a3f95b7 1452
f41c70c4
MH
1453 atomic_set(&hdev->cmd_cnt, 1);
1454 set_bit(HCI_INIT, &hdev->flags);
1455
d7a5a11d 1456 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1457 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1458
af202f84
MH
1459 if (hdev->setup)
1460 ret = hdev->setup(hdev);
f41c70c4 1461
af202f84
MH
1462 /* The transport driver can set these quirks before
1463 * creating the HCI device or in its setup callback.
1464 *
1465 * In case any of them is set, the controller has to
1466 * start up as unconfigured.
1467 */
eb1904f4
MH
1468 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1469 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1470 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1471
0ebca7d6
MH
1472 /* For an unconfigured controller it is required to
1473 * read at least the version information provided by
1474 * the Read Local Version Information command.
1475 *
1476 * If the set_bdaddr driver callback is provided, then
1477 * also the original Bluetooth public device address
1478 * will be read using the Read BD Address command.
1479 */
d7a5a11d 1480 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1481 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1482 }
1483
d7a5a11d 1484 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1485 /* If public address change is configured, ensure that
1486 * the address gets programmed. If the driver does not
1487 * support changing the public address, fail the power
1488 * on procedure.
1489 */
1490 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1491 hdev->set_bdaddr)
24c457e2
MH
1492 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1493 else
1494 ret = -EADDRNOTAVAIL;
1495 }
1496
f41c70c4 1497 if (!ret) {
d7a5a11d 1498 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1499 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1500 ret = __hci_init(hdev);
98a63aaf
MH
1501 if (!ret && hdev->post_init)
1502 ret = hdev->post_init(hdev);
1503 }
1da177e4
LT
1504 }
1505
7e995b9e
MH
1506 /* If the HCI Reset command is clearing all diagnostic settings,
1507 * then they need to be reprogrammed after the init procedure
1508 * completed.
1509 */
1510 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1511 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1512 ret = hdev->set_diag(hdev, true);
1513
f41c70c4
MH
1514 clear_bit(HCI_INIT, &hdev->flags);
1515
1da177e4
LT
1516 if (!ret) {
1517 hci_dev_hold(hdev);
a1536da2 1518 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4 1519 set_bit(HCI_UP, &hdev->flags);
05fcd4c4 1520 hci_sock_dev_event(hdev, HCI_DEV_UP);
d7a5a11d
MH
1521 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1522 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1523 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1524 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1525 hdev->dev_type == HCI_BREDR) {
09fd0de5 1526 hci_dev_lock(hdev);
744cf19e 1527 mgmt_powered(hdev, 1);
09fd0de5 1528 hci_dev_unlock(hdev);
56e5cb86 1529 }
8e87d142 1530 } else {
1da177e4 1531 /* Init failed, cleanup */
3eff45ea 1532 flush_work(&hdev->tx_work);
c347b765 1533 flush_work(&hdev->cmd_work);
b78752cc 1534 flush_work(&hdev->rx_work);
1da177e4
LT
1535
1536 skb_queue_purge(&hdev->cmd_q);
1537 skb_queue_purge(&hdev->rx_q);
1538
1539 if (hdev->flush)
1540 hdev->flush(hdev);
1541
1542 if (hdev->sent_cmd) {
1543 kfree_skb(hdev->sent_cmd);
1544 hdev->sent_cmd = NULL;
1545 }
1546
e9ca8bf1 1547 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1548 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1549
1da177e4 1550 hdev->close(hdev);
fee746b0 1551 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1552 }
1553
1554done:
1555 hci_req_unlock(hdev);
1da177e4
LT
1556 return ret;
1557}
1558
cbed0ca1
JH
1559/* ---- HCI ioctl helpers ---- */
1560
1561int hci_dev_open(__u16 dev)
1562{
1563 struct hci_dev *hdev;
1564 int err;
1565
1566 hdev = hci_dev_get(dev);
1567 if (!hdev)
1568 return -ENODEV;
1569
4a964404 1570 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1571 * up as user channel. Trying to bring them up as normal devices
1572 * will result into a failure. Only user channel operation is
1573 * possible.
1574 *
1575 * When this function is called for a user channel, the flag
1576 * HCI_USER_CHANNEL will be set first before attempting to
1577 * open the device.
1578 */
d7a5a11d
MH
1579 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1580 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1581 err = -EOPNOTSUPP;
1582 goto done;
1583 }
1584
e1d08f40
JH
1585 /* We need to ensure that no other power on/off work is pending
1586 * before proceeding to call hci_dev_do_open. This is
1587 * particularly important if the setup procedure has not yet
1588 * completed.
1589 */
a69d8927 1590 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1591 cancel_delayed_work(&hdev->power_off);
1592
a5c8f270
MH
1593 /* After this call it is guaranteed that the setup procedure
1594 * has finished. This means that error conditions like RFKILL
1595 * or no valid public or static random address apply.
1596 */
e1d08f40
JH
1597 flush_workqueue(hdev->req_workqueue);
1598
12aa4f0a 1599 /* For controllers not using the management interface and that
b6ae8457 1600 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1601 * so that pairing works for them. Once the management interface
1602 * is in use this bit will be cleared again and userspace has
1603 * to explicitly enable it.
1604 */
d7a5a11d
MH
1605 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1606 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1607 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1608
cbed0ca1
JH
1609 err = hci_dev_do_open(hdev);
1610
fee746b0 1611done:
cbed0ca1 1612 hci_dev_put(hdev);
cbed0ca1
JH
1613 return err;
1614}
1615
d7347f3c
JH
1616/* This function requires the caller holds hdev->lock */
1617static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1618{
1619 struct hci_conn_params *p;
1620
f161dd41
JH
1621 list_for_each_entry(p, &hdev->le_conn_params, list) {
1622 if (p->conn) {
1623 hci_conn_drop(p->conn);
f8aaf9b6 1624 hci_conn_put(p->conn);
f161dd41
JH
1625 p->conn = NULL;
1626 }
d7347f3c 1627 list_del_init(&p->action);
f161dd41 1628 }
d7347f3c
JH
1629
1630 BT_DBG("All LE pending actions cleared");
1631}
1632
6b3cc1db 1633int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1634{
acc649c6
MH
1635 bool auto_off;
1636
1da177e4
LT
1637 BT_DBG("%s %p", hdev->name, hdev);
1638
d24d8144 1639 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1640 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1641 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1642 /* Execute vendor specific shutdown routine */
1643 if (hdev->shutdown)
1644 hdev->shutdown(hdev);
1645 }
1646
78c04c0b
VCG
1647 cancel_delayed_work(&hdev->power_off);
1648
1da177e4
LT
1649 hci_req_cancel(hdev, ENODEV);
1650 hci_req_lock(hdev);
1651
1652 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1653 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1654 hci_req_unlock(hdev);
1655 return 0;
1656 }
1657
3eff45ea
GP
1658 /* Flush RX and TX works */
1659 flush_work(&hdev->tx_work);
b78752cc 1660 flush_work(&hdev->rx_work);
1da177e4 1661
16ab91ab 1662 if (hdev->discov_timeout > 0) {
e0f9309f 1663 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1664 hdev->discov_timeout = 0;
a358dc11
MH
1665 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1666 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1667 }
1668
a69d8927 1669 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1670 cancel_delayed_work(&hdev->service_cache);
1671
7ba8b4be 1672 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1673 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1674
d7a5a11d 1675 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1676 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1677
5d900e46
FG
1678 if (hdev->adv_instance_timeout) {
1679 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1680 hdev->adv_instance_timeout = 0;
1681 }
1682
76727c02
JH
1683 /* Avoid potential lockdep warnings from the *_flush() calls by
1684 * ensuring the workqueue is empty up front.
1685 */
1686 drain_workqueue(hdev->workqueue);
1687
09fd0de5 1688 hci_dev_lock(hdev);
1aeb9c65 1689
8f502f84
JH
1690 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1691
acc649c6
MH
1692 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1693
1694 if (!auto_off && hdev->dev_type == HCI_BREDR)
1695 mgmt_powered(hdev, 0);
1aeb9c65 1696
1f9b9a5d 1697 hci_inquiry_cache_flush(hdev);
d7347f3c 1698 hci_pend_le_actions_clear(hdev);
f161dd41 1699 hci_conn_hash_flush(hdev);
09fd0de5 1700 hci_dev_unlock(hdev);
1da177e4 1701
64dae967
MH
1702 smp_unregister(hdev);
1703
05fcd4c4 1704 hci_sock_dev_event(hdev, HCI_DEV_DOWN);
1da177e4
LT
1705
1706 if (hdev->flush)
1707 hdev->flush(hdev);
1708
1709 /* Reset device */
1710 skb_queue_purge(&hdev->cmd_q);
1711 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1712 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1713 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1714 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1715 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1716 clear_bit(HCI_INIT, &hdev->flags);
1717 }
1718
c347b765
GP
1719 /* flush cmd work */
1720 flush_work(&hdev->cmd_work);
1da177e4
LT
1721
1722 /* Drop queues */
1723 skb_queue_purge(&hdev->rx_q);
1724 skb_queue_purge(&hdev->cmd_q);
1725 skb_queue_purge(&hdev->raw_q);
1726
1727 /* Drop last sent command */
1728 if (hdev->sent_cmd) {
65cc2b49 1729 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1730 kfree_skb(hdev->sent_cmd);
1731 hdev->sent_cmd = NULL;
1732 }
1733
e9ca8bf1 1734 clear_bit(HCI_RUNNING, &hdev->flags);
05fcd4c4 1735 hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
4a3f95b7 1736
1da177e4
LT
1737 /* After this point our queues are empty
1738 * and no tasks are scheduled. */
1739 hdev->close(hdev);
1740
35b973c9 1741 /* Clear flags */
fee746b0 1742 hdev->flags &= BIT(HCI_RAW);
eacb44df 1743 hci_dev_clear_volatile_flags(hdev);
35b973c9 1744
ced5c338 1745 /* Controller radio is available but is currently powered down */
536619e8 1746 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1747
e59fda8d 1748 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1749 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1750 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1751
1da177e4
LT
1752 hci_req_unlock(hdev);
1753
1754 hci_dev_put(hdev);
1755 return 0;
1756}
1757
1758int hci_dev_close(__u16 dev)
1759{
1760 struct hci_dev *hdev;
1761 int err;
1762
70f23020
AE
1763 hdev = hci_dev_get(dev);
1764 if (!hdev)
1da177e4 1765 return -ENODEV;
8ee56540 1766
d7a5a11d 1767 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1768 err = -EBUSY;
1769 goto done;
1770 }
1771
a69d8927 1772 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1773 cancel_delayed_work(&hdev->power_off);
1774
1da177e4 1775 err = hci_dev_do_close(hdev);
8ee56540 1776
0736cfa8 1777done:
1da177e4
LT
1778 hci_dev_put(hdev);
1779 return err;
1780}
1781
5c912495 1782static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1783{
5c912495 1784 int ret;
1da177e4 1785
5c912495 1786 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1787
1788 hci_req_lock(hdev);
1da177e4 1789
1da177e4
LT
1790 /* Drop queues */
1791 skb_queue_purge(&hdev->rx_q);
1792 skb_queue_purge(&hdev->cmd_q);
1793
76727c02
JH
1794 /* Avoid potential lockdep warnings from the *_flush() calls by
1795 * ensuring the workqueue is empty up front.
1796 */
1797 drain_workqueue(hdev->workqueue);
1798
09fd0de5 1799 hci_dev_lock(hdev);
1f9b9a5d 1800 hci_inquiry_cache_flush(hdev);
1da177e4 1801 hci_conn_hash_flush(hdev);
09fd0de5 1802 hci_dev_unlock(hdev);
1da177e4
LT
1803
1804 if (hdev->flush)
1805 hdev->flush(hdev);
1806
8e87d142 1807 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1808 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1809
fee746b0 1810 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1811
1da177e4 1812 hci_req_unlock(hdev);
1da177e4
LT
1813 return ret;
1814}
1815
5c912495
MH
1816int hci_dev_reset(__u16 dev)
1817{
1818 struct hci_dev *hdev;
1819 int err;
1820
1821 hdev = hci_dev_get(dev);
1822 if (!hdev)
1823 return -ENODEV;
1824
1825 if (!test_bit(HCI_UP, &hdev->flags)) {
1826 err = -ENETDOWN;
1827 goto done;
1828 }
1829
d7a5a11d 1830 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1831 err = -EBUSY;
1832 goto done;
1833 }
1834
d7a5a11d 1835 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1836 err = -EOPNOTSUPP;
1837 goto done;
1838 }
1839
1840 err = hci_dev_do_reset(hdev);
1841
1842done:
1843 hci_dev_put(hdev);
1844 return err;
1845}
1846
1da177e4
LT
1847int hci_dev_reset_stat(__u16 dev)
1848{
1849 struct hci_dev *hdev;
1850 int ret = 0;
1851
70f23020
AE
1852 hdev = hci_dev_get(dev);
1853 if (!hdev)
1da177e4
LT
1854 return -ENODEV;
1855
d7a5a11d 1856 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1857 ret = -EBUSY;
1858 goto done;
1859 }
1860
d7a5a11d 1861 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1862 ret = -EOPNOTSUPP;
1863 goto done;
1864 }
1865
1da177e4
LT
1866 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1867
0736cfa8 1868done:
1da177e4 1869 hci_dev_put(hdev);
1da177e4
LT
1870 return ret;
1871}
1872
123abc08
JH
1873static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1874{
bc6d2d04 1875 bool conn_changed, discov_changed;
123abc08
JH
1876
1877 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1878
1879 if ((scan & SCAN_PAGE))
238be788
MH
1880 conn_changed = !hci_dev_test_and_set_flag(hdev,
1881 HCI_CONNECTABLE);
123abc08 1882 else
a69d8927
MH
1883 conn_changed = hci_dev_test_and_clear_flag(hdev,
1884 HCI_CONNECTABLE);
123abc08 1885
bc6d2d04 1886 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1887 discov_changed = !hci_dev_test_and_set_flag(hdev,
1888 HCI_DISCOVERABLE);
bc6d2d04 1889 } else {
a358dc11 1890 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1891 discov_changed = hci_dev_test_and_clear_flag(hdev,
1892 HCI_DISCOVERABLE);
bc6d2d04
JH
1893 }
1894
d7a5a11d 1895 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1896 return;
1897
bc6d2d04
JH
1898 if (conn_changed || discov_changed) {
1899 /* In case this was disabled through mgmt */
a1536da2 1900 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1901
d7a5a11d 1902 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1903 mgmt_update_adv_data(hdev);
1904
123abc08 1905 mgmt_new_settings(hdev);
bc6d2d04 1906 }
123abc08
JH
1907}
1908
1da177e4
LT
1909int hci_dev_cmd(unsigned int cmd, void __user *arg)
1910{
1911 struct hci_dev *hdev;
1912 struct hci_dev_req dr;
1913 int err = 0;
1914
1915 if (copy_from_user(&dr, arg, sizeof(dr)))
1916 return -EFAULT;
1917
70f23020
AE
1918 hdev = hci_dev_get(dr.dev_id);
1919 if (!hdev)
1da177e4
LT
1920 return -ENODEV;
1921
d7a5a11d 1922 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1923 err = -EBUSY;
1924 goto done;
1925 }
1926
d7a5a11d 1927 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1928 err = -EOPNOTSUPP;
1929 goto done;
1930 }
1931
5b69bef5
MH
1932 if (hdev->dev_type != HCI_BREDR) {
1933 err = -EOPNOTSUPP;
1934 goto done;
1935 }
1936
d7a5a11d 1937 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1938 err = -EOPNOTSUPP;
1939 goto done;
1940 }
1941
1da177e4
LT
1942 switch (cmd) {
1943 case HCISETAUTH:
01178cd4
JH
1944 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1945 HCI_INIT_TIMEOUT);
1da177e4
LT
1946 break;
1947
1948 case HCISETENCRYPT:
1949 if (!lmp_encrypt_capable(hdev)) {
1950 err = -EOPNOTSUPP;
1951 break;
1952 }
1953
1954 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1955 /* Auth must be enabled first */
01178cd4
JH
1956 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1957 HCI_INIT_TIMEOUT);
1da177e4
LT
1958 if (err)
1959 break;
1960 }
1961
01178cd4
JH
1962 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1963 HCI_INIT_TIMEOUT);
1da177e4
LT
1964 break;
1965
1966 case HCISETSCAN:
01178cd4
JH
1967 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1968 HCI_INIT_TIMEOUT);
91a668b0 1969
bc6d2d04
JH
1970 /* Ensure that the connectable and discoverable states
1971 * get correctly modified as this was a non-mgmt change.
91a668b0 1972 */
123abc08
JH
1973 if (!err)
1974 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1975 break;
1976
1da177e4 1977 case HCISETLINKPOL:
01178cd4
JH
1978 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1979 HCI_INIT_TIMEOUT);
1da177e4
LT
1980 break;
1981
1982 case HCISETLINKMODE:
e4e8e37c
MH
1983 hdev->link_mode = ((__u16) dr.dev_opt) &
1984 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1985 break;
1986
1987 case HCISETPTYPE:
1988 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1989 break;
1990
1991 case HCISETACLMTU:
e4e8e37c
MH
1992 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1993 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1994 break;
1995
1996 case HCISETSCOMTU:
e4e8e37c
MH
1997 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1998 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1999 break;
2000
2001 default:
2002 err = -EINVAL;
2003 break;
2004 }
e4e8e37c 2005
0736cfa8 2006done:
1da177e4
LT
2007 hci_dev_put(hdev);
2008 return err;
2009}
2010
2011int hci_get_dev_list(void __user *arg)
2012{
8035ded4 2013 struct hci_dev *hdev;
1da177e4
LT
2014 struct hci_dev_list_req *dl;
2015 struct hci_dev_req *dr;
1da177e4
LT
2016 int n = 0, size, err;
2017 __u16 dev_num;
2018
2019 if (get_user(dev_num, (__u16 __user *) arg))
2020 return -EFAULT;
2021
2022 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2023 return -EINVAL;
2024
2025 size = sizeof(*dl) + dev_num * sizeof(*dr);
2026
70f23020
AE
2027 dl = kzalloc(size, GFP_KERNEL);
2028 if (!dl)
1da177e4
LT
2029 return -ENOMEM;
2030
2031 dr = dl->dev_req;
2032
f20d09d5 2033 read_lock(&hci_dev_list_lock);
8035ded4 2034 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2035 unsigned long flags = hdev->flags;
c542a06c 2036
2e84d8db
MH
2037 /* When the auto-off is configured it means the transport
2038 * is running, but in that case still indicate that the
2039 * device is actually down.
2040 */
d7a5a11d 2041 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2042 flags &= ~BIT(HCI_UP);
c542a06c 2043
1da177e4 2044 (dr + n)->dev_id = hdev->id;
2e84d8db 2045 (dr + n)->dev_opt = flags;
c542a06c 2046
1da177e4
LT
2047 if (++n >= dev_num)
2048 break;
2049 }
f20d09d5 2050 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2051
2052 dl->dev_num = n;
2053 size = sizeof(*dl) + n * sizeof(*dr);
2054
2055 err = copy_to_user(arg, dl, size);
2056 kfree(dl);
2057
2058 return err ? -EFAULT : 0;
2059}
2060
2061int hci_get_dev_info(void __user *arg)
2062{
2063 struct hci_dev *hdev;
2064 struct hci_dev_info di;
2e84d8db 2065 unsigned long flags;
1da177e4
LT
2066 int err = 0;
2067
2068 if (copy_from_user(&di, arg, sizeof(di)))
2069 return -EFAULT;
2070
70f23020
AE
2071 hdev = hci_dev_get(di.dev_id);
2072 if (!hdev)
1da177e4
LT
2073 return -ENODEV;
2074
2e84d8db
MH
2075 /* When the auto-off is configured it means the transport
2076 * is running, but in that case still indicate that the
2077 * device is actually down.
2078 */
d7a5a11d 2079 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2080 flags = hdev->flags & ~BIT(HCI_UP);
2081 else
2082 flags = hdev->flags;
c542a06c 2083
1da177e4
LT
2084 strcpy(di.name, hdev->name);
2085 di.bdaddr = hdev->bdaddr;
60f2a3ed 2086 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2087 di.flags = flags;
1da177e4 2088 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2089 if (lmp_bredr_capable(hdev)) {
2090 di.acl_mtu = hdev->acl_mtu;
2091 di.acl_pkts = hdev->acl_pkts;
2092 di.sco_mtu = hdev->sco_mtu;
2093 di.sco_pkts = hdev->sco_pkts;
2094 } else {
2095 di.acl_mtu = hdev->le_mtu;
2096 di.acl_pkts = hdev->le_pkts;
2097 di.sco_mtu = 0;
2098 di.sco_pkts = 0;
2099 }
1da177e4
LT
2100 di.link_policy = hdev->link_policy;
2101 di.link_mode = hdev->link_mode;
2102
2103 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2104 memcpy(&di.features, &hdev->features, sizeof(di.features));
2105
2106 if (copy_to_user(arg, &di, sizeof(di)))
2107 err = -EFAULT;
2108
2109 hci_dev_put(hdev);
2110
2111 return err;
2112}
2113
2114/* ---- Interface to HCI drivers ---- */
2115
611b30f7
MH
2116static int hci_rfkill_set_block(void *data, bool blocked)
2117{
2118 struct hci_dev *hdev = data;
2119
2120 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2121
d7a5a11d 2122 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2123 return -EBUSY;
2124
5e130367 2125 if (blocked) {
a1536da2 2126 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2127 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2128 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2129 hci_dev_do_close(hdev);
5e130367 2130 } else {
a358dc11 2131 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2132 }
611b30f7
MH
2133
2134 return 0;
2135}
2136
2137static const struct rfkill_ops hci_rfkill_ops = {
2138 .set_block = hci_rfkill_set_block,
2139};
2140
ab81cbf9
JH
2141static void hci_power_on(struct work_struct *work)
2142{
2143 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2144 int err;
ab81cbf9
JH
2145
2146 BT_DBG("%s", hdev->name);
2147
cbed0ca1 2148 err = hci_dev_do_open(hdev);
96570ffc 2149 if (err < 0) {
3ad67582 2150 hci_dev_lock(hdev);
96570ffc 2151 mgmt_set_powered_failed(hdev, err);
3ad67582 2152 hci_dev_unlock(hdev);
ab81cbf9 2153 return;
96570ffc 2154 }
ab81cbf9 2155
a5c8f270
MH
2156 /* During the HCI setup phase, a few error conditions are
2157 * ignored and they need to be checked now. If they are still
2158 * valid, it is important to turn the device back off.
2159 */
d7a5a11d
MH
2160 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2161 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2162 (hdev->dev_type == HCI_BREDR &&
2163 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2164 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2165 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2166 hci_dev_do_close(hdev);
d7a5a11d 2167 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2168 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2169 HCI_AUTO_OFF_TIMEOUT);
bf543036 2170 }
ab81cbf9 2171
a69d8927 2172 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2173 /* For unconfigured devices, set the HCI_RAW flag
2174 * so that userspace can easily identify them.
4a964404 2175 */
d7a5a11d 2176 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2177 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2178
2179 /* For fully configured devices, this will send
2180 * the Index Added event. For unconfigured devices,
2181 * it will send Unconfigued Index Added event.
2182 *
2183 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2184 * and no event will be send.
2185 */
2186 mgmt_index_added(hdev);
a69d8927 2187 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2188 /* When the controller is now configured, then it
2189 * is important to clear the HCI_RAW flag.
2190 */
d7a5a11d 2191 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2192 clear_bit(HCI_RAW, &hdev->flags);
2193
d603b76b
MH
2194 /* Powering on the controller with HCI_CONFIG set only
2195 * happens with the transition from unconfigured to
2196 * configured. This will send the Index Added event.
2197 */
744cf19e 2198 mgmt_index_added(hdev);
fee746b0 2199 }
ab81cbf9
JH
2200}
2201
2202static void hci_power_off(struct work_struct *work)
2203{
3243553f 2204 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2205 power_off.work);
ab81cbf9
JH
2206
2207 BT_DBG("%s", hdev->name);
2208
8ee56540 2209 hci_dev_do_close(hdev);
ab81cbf9
JH
2210}
2211
c7741d16
MH
2212static void hci_error_reset(struct work_struct *work)
2213{
2214 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2215
2216 BT_DBG("%s", hdev->name);
2217
2218 if (hdev->hw_error)
2219 hdev->hw_error(hdev, hdev->hw_error_code);
2220 else
2221 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2222 hdev->hw_error_code);
2223
2224 if (hci_dev_do_close(hdev))
2225 return;
2226
c7741d16
MH
2227 hci_dev_do_open(hdev);
2228}
2229
16ab91ab
JH
2230static void hci_discov_off(struct work_struct *work)
2231{
2232 struct hci_dev *hdev;
16ab91ab
JH
2233
2234 hdev = container_of(work, struct hci_dev, discov_off.work);
2235
2236 BT_DBG("%s", hdev->name);
2237
d1967ff8 2238 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2239}
2240
5d900e46
FG
2241static void hci_adv_timeout_expire(struct work_struct *work)
2242{
2243 struct hci_dev *hdev;
2244
2245 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2246
2247 BT_DBG("%s", hdev->name);
2248
2249 mgmt_adv_timeout_expired(hdev);
2250}
2251
35f7498a 2252void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2253{
4821002c 2254 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2255
4821002c
JH
2256 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2257 list_del(&uuid->list);
2aeb9a1a
JH
2258 kfree(uuid);
2259 }
2aeb9a1a
JH
2260}
2261
35f7498a 2262void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2263{
0378b597 2264 struct link_key *key;
55ed8ca1 2265
0378b597
JH
2266 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2267 list_del_rcu(&key->list);
2268 kfree_rcu(key, rcu);
55ed8ca1 2269 }
55ed8ca1
JH
2270}
2271
35f7498a 2272void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2273{
970d0f1b 2274 struct smp_ltk *k;
b899efaf 2275
970d0f1b
JH
2276 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2277 list_del_rcu(&k->list);
2278 kfree_rcu(k, rcu);
b899efaf 2279 }
b899efaf
VCG
2280}
2281
970c4e46
JH
2282void hci_smp_irks_clear(struct hci_dev *hdev)
2283{
adae20cb 2284 struct smp_irk *k;
970c4e46 2285
adae20cb
JH
2286 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2287 list_del_rcu(&k->list);
2288 kfree_rcu(k, rcu);
970c4e46
JH
2289 }
2290}
2291
55ed8ca1
JH
2292struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2293{
8035ded4 2294 struct link_key *k;
55ed8ca1 2295
0378b597
JH
2296 rcu_read_lock();
2297 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2298 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2299 rcu_read_unlock();
55ed8ca1 2300 return k;
0378b597
JH
2301 }
2302 }
2303 rcu_read_unlock();
55ed8ca1
JH
2304
2305 return NULL;
2306}
2307
745c0ce3 2308static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2309 u8 key_type, u8 old_key_type)
d25e28ab
JH
2310{
2311 /* Legacy key */
2312 if (key_type < 0x03)
745c0ce3 2313 return true;
d25e28ab
JH
2314
2315 /* Debug keys are insecure so don't store them persistently */
2316 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2317 return false;
d25e28ab
JH
2318
2319 /* Changed combination key and there's no previous one */
2320 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2321 return false;
d25e28ab
JH
2322
2323 /* Security mode 3 case */
2324 if (!conn)
745c0ce3 2325 return true;
d25e28ab 2326
e3befab9
JH
2327 /* BR/EDR key derived using SC from an LE link */
2328 if (conn->type == LE_LINK)
2329 return true;
2330
d25e28ab
JH
2331 /* Neither local nor remote side had no-bonding as requirement */
2332 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2333 return true;
d25e28ab
JH
2334
2335 /* Local side had dedicated bonding as requirement */
2336 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2337 return true;
d25e28ab
JH
2338
2339 /* Remote side had dedicated bonding as requirement */
2340 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2341 return true;
d25e28ab
JH
2342
2343 /* If none of the above criteria match, then don't store the key
2344 * persistently */
745c0ce3 2345 return false;
d25e28ab
JH
2346}
2347
e804d25d 2348static u8 ltk_role(u8 type)
98a0b845 2349{
e804d25d
JH
2350 if (type == SMP_LTK)
2351 return HCI_ROLE_MASTER;
98a0b845 2352
e804d25d 2353 return HCI_ROLE_SLAVE;
98a0b845
JH
2354}
2355
f3a73d97
JH
2356struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2357 u8 addr_type, u8 role)
75d262c2 2358{
c9839a11 2359 struct smp_ltk *k;
75d262c2 2360
970d0f1b
JH
2361 rcu_read_lock();
2362 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2363 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2364 continue;
2365
923e2414 2366 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2367 rcu_read_unlock();
75d262c2 2368 return k;
970d0f1b
JH
2369 }
2370 }
2371 rcu_read_unlock();
75d262c2
VCG
2372
2373 return NULL;
2374}
75d262c2 2375
970c4e46
JH
2376struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2377{
2378 struct smp_irk *irk;
2379
adae20cb
JH
2380 rcu_read_lock();
2381 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2382 if (!bacmp(&irk->rpa, rpa)) {
2383 rcu_read_unlock();
970c4e46 2384 return irk;
adae20cb 2385 }
970c4e46
JH
2386 }
2387
adae20cb 2388 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2389 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2390 bacpy(&irk->rpa, rpa);
adae20cb 2391 rcu_read_unlock();
970c4e46
JH
2392 return irk;
2393 }
2394 }
adae20cb 2395 rcu_read_unlock();
970c4e46
JH
2396
2397 return NULL;
2398}
2399
2400struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2401 u8 addr_type)
2402{
2403 struct smp_irk *irk;
2404
6cfc9988
JH
2405 /* Identity Address must be public or static random */
2406 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2407 return NULL;
2408
adae20cb
JH
2409 rcu_read_lock();
2410 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2411 if (addr_type == irk->addr_type &&
adae20cb
JH
2412 bacmp(bdaddr, &irk->bdaddr) == 0) {
2413 rcu_read_unlock();
970c4e46 2414 return irk;
adae20cb 2415 }
970c4e46 2416 }
adae20cb 2417 rcu_read_unlock();
970c4e46
JH
2418
2419 return NULL;
2420}
2421
567fa2aa 2422struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2423 bdaddr_t *bdaddr, u8 *val, u8 type,
2424 u8 pin_len, bool *persistent)
55ed8ca1
JH
2425{
2426 struct link_key *key, *old_key;
745c0ce3 2427 u8 old_key_type;
55ed8ca1
JH
2428
2429 old_key = hci_find_link_key(hdev, bdaddr);
2430 if (old_key) {
2431 old_key_type = old_key->type;
2432 key = old_key;
2433 } else {
12adcf3a 2434 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2435 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2436 if (!key)
567fa2aa 2437 return NULL;
0378b597 2438 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2439 }
2440
6ed93dc6 2441 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2442
d25e28ab
JH
2443 /* Some buggy controller combinations generate a changed
2444 * combination key for legacy pairing even when there's no
2445 * previous key */
2446 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2447 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2448 type = HCI_LK_COMBINATION;
655fe6ec
JH
2449 if (conn)
2450 conn->key_type = type;
2451 }
d25e28ab 2452
55ed8ca1 2453 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2454 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2455 key->pin_len = pin_len;
2456
b6020ba0 2457 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2458 key->type = old_key_type;
4748fed2
JH
2459 else
2460 key->type = type;
2461
7652ff6a
JH
2462 if (persistent)
2463 *persistent = hci_persistent_key(hdev, conn, type,
2464 old_key_type);
4df378a1 2465
567fa2aa 2466 return key;
55ed8ca1
JH
2467}
2468
ca9142b8 2469struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2470 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2471 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2472{
c9839a11 2473 struct smp_ltk *key, *old_key;
e804d25d 2474 u8 role = ltk_role(type);
75d262c2 2475
f3a73d97 2476 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2477 if (old_key)
75d262c2 2478 key = old_key;
c9839a11 2479 else {
0a14ab41 2480 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2481 if (!key)
ca9142b8 2482 return NULL;
970d0f1b 2483 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2484 }
2485
75d262c2 2486 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2487 key->bdaddr_type = addr_type;
2488 memcpy(key->val, tk, sizeof(key->val));
2489 key->authenticated = authenticated;
2490 key->ediv = ediv;
fe39c7b2 2491 key->rand = rand;
c9839a11
VCG
2492 key->enc_size = enc_size;
2493 key->type = type;
75d262c2 2494
ca9142b8 2495 return key;
75d262c2
VCG
2496}
2497
ca9142b8
JH
2498struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2499 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2500{
2501 struct smp_irk *irk;
2502
2503 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2504 if (!irk) {
2505 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2506 if (!irk)
ca9142b8 2507 return NULL;
970c4e46
JH
2508
2509 bacpy(&irk->bdaddr, bdaddr);
2510 irk->addr_type = addr_type;
2511
adae20cb 2512 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2513 }
2514
2515 memcpy(irk->val, val, 16);
2516 bacpy(&irk->rpa, rpa);
2517
ca9142b8 2518 return irk;
970c4e46
JH
2519}
2520
55ed8ca1
JH
2521int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2522{
2523 struct link_key *key;
2524
2525 key = hci_find_link_key(hdev, bdaddr);
2526 if (!key)
2527 return -ENOENT;
2528
6ed93dc6 2529 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2530
0378b597
JH
2531 list_del_rcu(&key->list);
2532 kfree_rcu(key, rcu);
55ed8ca1
JH
2533
2534 return 0;
2535}
2536
e0b2b27e 2537int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2538{
970d0f1b 2539 struct smp_ltk *k;
c51ffa0b 2540 int removed = 0;
b899efaf 2541
970d0f1b 2542 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2543 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2544 continue;
2545
6ed93dc6 2546 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2547
970d0f1b
JH
2548 list_del_rcu(&k->list);
2549 kfree_rcu(k, rcu);
c51ffa0b 2550 removed++;
b899efaf
VCG
2551 }
2552
c51ffa0b 2553 return removed ? 0 : -ENOENT;
b899efaf
VCG
2554}
2555
a7ec7338
JH
2556void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2557{
adae20cb 2558 struct smp_irk *k;
a7ec7338 2559
adae20cb 2560 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2561 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2562 continue;
2563
2564 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2565
adae20cb
JH
2566 list_del_rcu(&k->list);
2567 kfree_rcu(k, rcu);
a7ec7338
JH
2568 }
2569}
2570
55e76b38
JH
2571bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2572{
2573 struct smp_ltk *k;
4ba9faf3 2574 struct smp_irk *irk;
55e76b38
JH
2575 u8 addr_type;
2576
2577 if (type == BDADDR_BREDR) {
2578 if (hci_find_link_key(hdev, bdaddr))
2579 return true;
2580 return false;
2581 }
2582
2583 /* Convert to HCI addr type which struct smp_ltk uses */
2584 if (type == BDADDR_LE_PUBLIC)
2585 addr_type = ADDR_LE_DEV_PUBLIC;
2586 else
2587 addr_type = ADDR_LE_DEV_RANDOM;
2588
4ba9faf3
JH
2589 irk = hci_get_irk(hdev, bdaddr, addr_type);
2590 if (irk) {
2591 bdaddr = &irk->bdaddr;
2592 addr_type = irk->addr_type;
2593 }
2594
55e76b38
JH
2595 rcu_read_lock();
2596 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2597 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2598 rcu_read_unlock();
55e76b38 2599 return true;
87c8b28d 2600 }
55e76b38
JH
2601 }
2602 rcu_read_unlock();
2603
2604 return false;
2605}
2606
6bd32326 2607/* HCI command timer function */
65cc2b49 2608static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2609{
65cc2b49
MH
2610 struct hci_dev *hdev = container_of(work, struct hci_dev,
2611 cmd_timer.work);
6bd32326 2612
bda4f23a
AE
2613 if (hdev->sent_cmd) {
2614 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2615 u16 opcode = __le16_to_cpu(sent->opcode);
2616
2617 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2618 } else {
2619 BT_ERR("%s command tx timeout", hdev->name);
2620 }
2621
6bd32326 2622 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2623 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2624}
2625
2763eda6 2626struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2627 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2628{
2629 struct oob_data *data;
2630
6928a924
JH
2631 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2632 if (bacmp(bdaddr, &data->bdaddr) != 0)
2633 continue;
2634 if (data->bdaddr_type != bdaddr_type)
2635 continue;
2636 return data;
2637 }
2763eda6
SJ
2638
2639 return NULL;
2640}
2641
6928a924
JH
2642int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2643 u8 bdaddr_type)
2763eda6
SJ
2644{
2645 struct oob_data *data;
2646
6928a924 2647 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2648 if (!data)
2649 return -ENOENT;
2650
6928a924 2651 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2652
2653 list_del(&data->list);
2654 kfree(data);
2655
2656 return 0;
2657}
2658
35f7498a 2659void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2660{
2661 struct oob_data *data, *n;
2662
2663 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2664 list_del(&data->list);
2665 kfree(data);
2666 }
2763eda6
SJ
2667}
2668
0798872e 2669int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2670 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2671 u8 *hash256, u8 *rand256)
2763eda6
SJ
2672{
2673 struct oob_data *data;
2674
6928a924 2675 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2676 if (!data) {
0a14ab41 2677 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2678 if (!data)
2679 return -ENOMEM;
2680
2681 bacpy(&data->bdaddr, bdaddr);
6928a924 2682 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2683 list_add(&data->list, &hdev->remote_oob_data);
2684 }
2685
81328d5c
JH
2686 if (hash192 && rand192) {
2687 memcpy(data->hash192, hash192, sizeof(data->hash192));
2688 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2689 if (hash256 && rand256)
2690 data->present = 0x03;
81328d5c
JH
2691 } else {
2692 memset(data->hash192, 0, sizeof(data->hash192));
2693 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2694 if (hash256 && rand256)
2695 data->present = 0x02;
2696 else
2697 data->present = 0x00;
0798872e
MH
2698 }
2699
81328d5c
JH
2700 if (hash256 && rand256) {
2701 memcpy(data->hash256, hash256, sizeof(data->hash256));
2702 memcpy(data->rand256, rand256, sizeof(data->rand256));
2703 } else {
2704 memset(data->hash256, 0, sizeof(data->hash256));
2705 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2706 if (hash192 && rand192)
2707 data->present = 0x01;
81328d5c 2708 }
0798872e 2709
6ed93dc6 2710 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2711
2712 return 0;
2713}
2714
d2609b34
FG
2715/* This function requires the caller holds hdev->lock */
2716struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2717{
2718 struct adv_info *adv_instance;
2719
2720 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2721 if (adv_instance->instance == instance)
2722 return adv_instance;
2723 }
2724
2725 return NULL;
2726}
2727
2728/* This function requires the caller holds hdev->lock */
2729struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2730 struct adv_info *cur_instance;
2731
2732 cur_instance = hci_find_adv_instance(hdev, instance);
2733 if (!cur_instance)
2734 return NULL;
2735
2736 if (cur_instance == list_last_entry(&hdev->adv_instances,
2737 struct adv_info, list))
2738 return list_first_entry(&hdev->adv_instances,
2739 struct adv_info, list);
2740 else
2741 return list_next_entry(cur_instance, list);
2742}
2743
2744/* This function requires the caller holds hdev->lock */
2745int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2746{
2747 struct adv_info *adv_instance;
2748
2749 adv_instance = hci_find_adv_instance(hdev, instance);
2750 if (!adv_instance)
2751 return -ENOENT;
2752
2753 BT_DBG("%s removing %dMR", hdev->name, instance);
2754
5d900e46
FG
2755 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2756 cancel_delayed_work(&hdev->adv_instance_expire);
2757 hdev->adv_instance_timeout = 0;
2758 }
2759
d2609b34
FG
2760 list_del(&adv_instance->list);
2761 kfree(adv_instance);
2762
2763 hdev->adv_instance_cnt--;
2764
2765 return 0;
2766}
2767
2768/* This function requires the caller holds hdev->lock */
2769void hci_adv_instances_clear(struct hci_dev *hdev)
2770{
2771 struct adv_info *adv_instance, *n;
2772
5d900e46
FG
2773 if (hdev->adv_instance_timeout) {
2774 cancel_delayed_work(&hdev->adv_instance_expire);
2775 hdev->adv_instance_timeout = 0;
2776 }
2777
d2609b34
FG
2778 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2779 list_del(&adv_instance->list);
2780 kfree(adv_instance);
2781 }
2782
2783 hdev->adv_instance_cnt = 0;
2784}
2785
2786/* This function requires the caller holds hdev->lock */
2787int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2788 u16 adv_data_len, u8 *adv_data,
2789 u16 scan_rsp_len, u8 *scan_rsp_data,
2790 u16 timeout, u16 duration)
2791{
2792 struct adv_info *adv_instance;
2793
2794 adv_instance = hci_find_adv_instance(hdev, instance);
2795 if (adv_instance) {
2796 memset(adv_instance->adv_data, 0,
2797 sizeof(adv_instance->adv_data));
2798 memset(adv_instance->scan_rsp_data, 0,
2799 sizeof(adv_instance->scan_rsp_data));
2800 } else {
2801 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2802 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2803 return -EOVERFLOW;
2804
39ecfad6 2805 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2806 if (!adv_instance)
2807 return -ENOMEM;
2808
fffd38bc 2809 adv_instance->pending = true;
d2609b34
FG
2810 adv_instance->instance = instance;
2811 list_add(&adv_instance->list, &hdev->adv_instances);
2812 hdev->adv_instance_cnt++;
2813 }
2814
2815 adv_instance->flags = flags;
2816 adv_instance->adv_data_len = adv_data_len;
2817 adv_instance->scan_rsp_len = scan_rsp_len;
2818
2819 if (adv_data_len)
2820 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2821
2822 if (scan_rsp_len)
2823 memcpy(adv_instance->scan_rsp_data,
2824 scan_rsp_data, scan_rsp_len);
2825
2826 adv_instance->timeout = timeout;
5d900e46 2827 adv_instance->remaining_time = timeout;
d2609b34
FG
2828
2829 if (duration == 0)
2830 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2831 else
2832 adv_instance->duration = duration;
2833
2834 BT_DBG("%s for %dMR", hdev->name, instance);
2835
2836 return 0;
2837}
2838
dcc36c16 2839struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2840 bdaddr_t *bdaddr, u8 type)
b2a66aad 2841{
8035ded4 2842 struct bdaddr_list *b;
b2a66aad 2843
dcc36c16 2844 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2845 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2846 return b;
b9ee0a78 2847 }
b2a66aad
AJ
2848
2849 return NULL;
2850}
2851
dcc36c16 2852void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2853{
2854 struct list_head *p, *n;
2855
dcc36c16 2856 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2857 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2858
2859 list_del(p);
2860 kfree(b);
2861 }
b2a66aad
AJ
2862}
2863
dcc36c16 2864int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2865{
2866 struct bdaddr_list *entry;
b2a66aad 2867
b9ee0a78 2868 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2869 return -EBADF;
2870
dcc36c16 2871 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2872 return -EEXIST;
b2a66aad 2873
27f70f3e 2874 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2875 if (!entry)
2876 return -ENOMEM;
b2a66aad
AJ
2877
2878 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2879 entry->bdaddr_type = type;
b2a66aad 2880
dcc36c16 2881 list_add(&entry->list, list);
b2a66aad 2882
2a8357f2 2883 return 0;
b2a66aad
AJ
2884}
2885
dcc36c16 2886int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2887{
2888 struct bdaddr_list *entry;
b2a66aad 2889
35f7498a 2890 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2891 hci_bdaddr_list_clear(list);
35f7498a
JH
2892 return 0;
2893 }
b2a66aad 2894
dcc36c16 2895 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2896 if (!entry)
2897 return -ENOENT;
2898
2899 list_del(&entry->list);
2900 kfree(entry);
2901
2902 return 0;
2903}
2904
15819a70
AG
2905/* This function requires the caller holds hdev->lock */
2906struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2907 bdaddr_t *addr, u8 addr_type)
2908{
2909 struct hci_conn_params *params;
2910
2911 list_for_each_entry(params, &hdev->le_conn_params, list) {
2912 if (bacmp(&params->addr, addr) == 0 &&
2913 params->addr_type == addr_type) {
2914 return params;
2915 }
2916 }
2917
2918 return NULL;
2919}
2920
4b10966f 2921/* This function requires the caller holds hdev->lock */
501f8827
JH
2922struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2923 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2924{
912b42ef 2925 struct hci_conn_params *param;
a9b0a04c 2926
501f8827 2927 list_for_each_entry(param, list, action) {
912b42ef
JH
2928 if (bacmp(&param->addr, addr) == 0 &&
2929 param->addr_type == addr_type)
2930 return param;
4b10966f
MH
2931 }
2932
2933 return NULL;
a9b0a04c
AG
2934}
2935
15819a70 2936/* This function requires the caller holds hdev->lock */
51d167c0
MH
2937struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2938 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2939{
2940 struct hci_conn_params *params;
2941
2942 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2943 if (params)
51d167c0 2944 return params;
15819a70
AG
2945
2946 params = kzalloc(sizeof(*params), GFP_KERNEL);
2947 if (!params) {
2948 BT_ERR("Out of memory");
51d167c0 2949 return NULL;
15819a70
AG
2950 }
2951
2952 bacpy(&params->addr, addr);
2953 params->addr_type = addr_type;
cef952ce
AG
2954
2955 list_add(&params->list, &hdev->le_conn_params);
93450c75 2956 INIT_LIST_HEAD(&params->action);
cef952ce 2957
bf5b3c8b
MH
2958 params->conn_min_interval = hdev->le_conn_min_interval;
2959 params->conn_max_interval = hdev->le_conn_max_interval;
2960 params->conn_latency = hdev->le_conn_latency;
2961 params->supervision_timeout = hdev->le_supv_timeout;
2962 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2963
2964 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2965
51d167c0 2966 return params;
bf5b3c8b
MH
2967}
2968
f6c63249 2969static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2970{
f8aaf9b6 2971 if (params->conn) {
f161dd41 2972 hci_conn_drop(params->conn);
f8aaf9b6
JH
2973 hci_conn_put(params->conn);
2974 }
f161dd41 2975
95305baa 2976 list_del(&params->action);
15819a70
AG
2977 list_del(&params->list);
2978 kfree(params);
f6c63249
JH
2979}
2980
2981/* This function requires the caller holds hdev->lock */
2982void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2983{
2984 struct hci_conn_params *params;
2985
2986 params = hci_conn_params_lookup(hdev, addr, addr_type);
2987 if (!params)
2988 return;
2989
2990 hci_conn_params_free(params);
15819a70 2991
95305baa
JH
2992 hci_update_background_scan(hdev);
2993
15819a70
AG
2994 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2995}
2996
2997/* This function requires the caller holds hdev->lock */
55af49a8 2998void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2999{
3000 struct hci_conn_params *params, *tmp;
3001
3002 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3003 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3004 continue;
f75113a2
JP
3005
3006 /* If trying to estabilish one time connection to disabled
3007 * device, leave the params, but mark them as just once.
3008 */
3009 if (params->explicit_connect) {
3010 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3011 continue;
3012 }
3013
15819a70
AG
3014 list_del(&params->list);
3015 kfree(params);
3016 }
3017
55af49a8 3018 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3019}
3020
3021/* This function requires the caller holds hdev->lock */
373110c5 3022void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3023{
15819a70 3024 struct hci_conn_params *params, *tmp;
77a77a30 3025
f6c63249
JH
3026 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3027 hci_conn_params_free(params);
77a77a30 3028
a4790dbd 3029 hci_update_background_scan(hdev);
77a77a30 3030
15819a70 3031 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3032}
3033
1904a853 3034static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 3035{
4c87eaab
AG
3036 if (status) {
3037 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3038
4c87eaab
AG
3039 hci_dev_lock(hdev);
3040 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3041 hci_dev_unlock(hdev);
3042 return;
3043 }
7ba8b4be
AG
3044}
3045
1904a853
MH
3046static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3047 u16 opcode)
7ba8b4be 3048{
4c87eaab
AG
3049 /* General inquiry access code (GIAC) */
3050 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 3051 struct hci_cp_inquiry cp;
7ba8b4be
AG
3052 int err;
3053
4c87eaab
AG
3054 if (status) {
3055 BT_ERR("Failed to disable LE scanning: status %d", status);
3056 return;
3057 }
7ba8b4be 3058
2d28cfe7
JP
3059 hdev->discovery.scan_start = 0;
3060
4c87eaab
AG
3061 switch (hdev->discovery.type) {
3062 case DISCOV_TYPE_LE:
3063 hci_dev_lock(hdev);
3064 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3065 hci_dev_unlock(hdev);
3066 break;
7ba8b4be 3067
4c87eaab 3068 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3069 hci_dev_lock(hdev);
7dbfac1d 3070
07d2334a
JP
3071 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3072 &hdev->quirks)) {
3073 /* If we were running LE only scan, change discovery
3074 * state. If we were running both LE and BR/EDR inquiry
3075 * simultaneously, and BR/EDR inquiry is already
3076 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3077 * will stop discovery when finished. If we will resolve
3078 * remote device name, do not change discovery state.
07d2334a 3079 */
177d0506
WK
3080 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3081 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3082 hci_discovery_set_state(hdev,
3083 DISCOVERY_STOPPED);
3084 } else {
baf880a9
JH
3085 struct hci_request req;
3086
07d2334a
JP
3087 hci_inquiry_cache_flush(hdev);
3088
baf880a9
JH
3089 hci_req_init(&req, hdev);
3090
3091 memset(&cp, 0, sizeof(cp));
3092 memcpy(&cp.lap, lap, sizeof(cp.lap));
3093 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3094 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3095
07d2334a
JP
3096 err = hci_req_run(&req, inquiry_complete);
3097 if (err) {
3098 BT_ERR("Inquiry request failed: err %d", err);
3099 hci_discovery_set_state(hdev,
3100 DISCOVERY_STOPPED);
3101 }
4c87eaab 3102 }
7dbfac1d 3103
4c87eaab
AG
3104 hci_dev_unlock(hdev);
3105 break;
7dbfac1d 3106 }
7dbfac1d
AG
3107}
3108
7ba8b4be
AG
3109static void le_scan_disable_work(struct work_struct *work)
3110{
3111 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3112 le_scan_disable.work);
4c87eaab
AG
3113 struct hci_request req;
3114 int err;
7ba8b4be
AG
3115
3116 BT_DBG("%s", hdev->name);
3117
2d28cfe7
JP
3118 cancel_delayed_work_sync(&hdev->le_scan_restart);
3119
4c87eaab 3120 hci_req_init(&req, hdev);
28b75a89 3121
b1efcc28 3122 hci_req_add_le_scan_disable(&req);
28b75a89 3123
4c87eaab
AG
3124 err = hci_req_run(&req, le_scan_disable_work_complete);
3125 if (err)
3126 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3127}
3128
2d28cfe7
JP
3129static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3130 u16 opcode)
3131{
3132 unsigned long timeout, duration, scan_start, now;
3133
3134 BT_DBG("%s", hdev->name);
3135
3136 if (status) {
3137 BT_ERR("Failed to restart LE scan: status %d", status);
3138 return;
3139 }
3140
3141 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3142 !hdev->discovery.scan_start)
3143 return;
3144
3145 /* When the scan was started, hdev->le_scan_disable has been queued
3146 * after duration from scan_start. During scan restart this job
3147 * has been canceled, and we need to queue it again after proper
3148 * timeout, to make sure that scan does not run indefinitely.
3149 */
3150 duration = hdev->discovery.scan_duration;
3151 scan_start = hdev->discovery.scan_start;
3152 now = jiffies;
3153 if (now - scan_start <= duration) {
3154 int elapsed;
3155
3156 if (now >= scan_start)
3157 elapsed = now - scan_start;
3158 else
3159 elapsed = ULONG_MAX - scan_start + now;
3160
3161 timeout = duration - elapsed;
3162 } else {
3163 timeout = 0;
3164 }
3165 queue_delayed_work(hdev->workqueue,
3166 &hdev->le_scan_disable, timeout);
3167}
3168
3169static void le_scan_restart_work(struct work_struct *work)
3170{
3171 struct hci_dev *hdev = container_of(work, struct hci_dev,
3172 le_scan_restart.work);
3173 struct hci_request req;
3174 struct hci_cp_le_set_scan_enable cp;
3175 int err;
3176
3177 BT_DBG("%s", hdev->name);
3178
3179 /* If controller is not scanning we are done. */
d7a5a11d 3180 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3181 return;
3182
3183 hci_req_init(&req, hdev);
3184
3185 hci_req_add_le_scan_disable(&req);
3186
3187 memset(&cp, 0, sizeof(cp));
3188 cp.enable = LE_SCAN_ENABLE;
3189 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3190 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3191
3192 err = hci_req_run(&req, le_scan_restart_work_complete);
3193 if (err)
3194 BT_ERR("Restart LE scan request failed: err %d", err);
3195}
3196
a1f4c318
JH
3197/* Copy the Identity Address of the controller.
3198 *
3199 * If the controller has a public BD_ADDR, then by default use that one.
3200 * If this is a LE only controller without a public address, default to
3201 * the static random address.
3202 *
3203 * For debugging purposes it is possible to force controllers with a
3204 * public address to use the static random address instead.
50b5b952
MH
3205 *
3206 * In case BR/EDR has been disabled on a dual-mode controller and
3207 * userspace has configured a static address, then that address
3208 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3209 */
3210void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3211 u8 *bdaddr_type)
3212{
b7cb93e5 3213 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3214 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3215 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3216 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3217 bacpy(bdaddr, &hdev->static_addr);
3218 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3219 } else {
3220 bacpy(bdaddr, &hdev->bdaddr);
3221 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3222 }
3223}
3224
9be0dab7
DH
3225/* Alloc HCI device */
3226struct hci_dev *hci_alloc_dev(void)
3227{
3228 struct hci_dev *hdev;
3229
27f70f3e 3230 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3231 if (!hdev)
3232 return NULL;
3233
b1b813d4
DH
3234 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3235 hdev->esco_type = (ESCO_HV1);
3236 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3237 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3238 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3239 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3240 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3241 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3242 hdev->adv_instance_cnt = 0;
3243 hdev->cur_adv_instance = 0x00;
5d900e46 3244 hdev->adv_instance_timeout = 0;
b1b813d4 3245
b1b813d4
DH
3246 hdev->sniff_max_interval = 800;
3247 hdev->sniff_min_interval = 80;
3248
3f959d46 3249 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3250 hdev->le_adv_min_interval = 0x0800;
3251 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3252 hdev->le_scan_interval = 0x0060;
3253 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3254 hdev->le_conn_min_interval = 0x0028;
3255 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3256 hdev->le_conn_latency = 0x0000;
3257 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3258 hdev->le_def_tx_len = 0x001b;
3259 hdev->le_def_tx_time = 0x0148;
3260 hdev->le_max_tx_len = 0x001b;
3261 hdev->le_max_tx_time = 0x0148;
3262 hdev->le_max_rx_len = 0x001b;
3263 hdev->le_max_rx_time = 0x0148;
bef64738 3264
d6bfd59c 3265 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3266 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3267 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3268 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3269
b1b813d4
DH
3270 mutex_init(&hdev->lock);
3271 mutex_init(&hdev->req_lock);
3272
3273 INIT_LIST_HEAD(&hdev->mgmt_pending);
3274 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3275 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3276 INIT_LIST_HEAD(&hdev->uuids);
3277 INIT_LIST_HEAD(&hdev->link_keys);
3278 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3279 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3280 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3281 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3282 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3283 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3284 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3285 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3286 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3287
3288 INIT_WORK(&hdev->rx_work, hci_rx_work);
3289 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3290 INIT_WORK(&hdev->tx_work, hci_tx_work);
3291 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3292 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3293
b1b813d4
DH
3294 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3295 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3296 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3297 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3298 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3299
b1b813d4
DH
3300 skb_queue_head_init(&hdev->rx_q);
3301 skb_queue_head_init(&hdev->cmd_q);
3302 skb_queue_head_init(&hdev->raw_q);
3303
3304 init_waitqueue_head(&hdev->req_wait_q);
3305
65cc2b49 3306 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3307
b1b813d4
DH
3308 hci_init_sysfs(hdev);
3309 discovery_init(hdev);
9be0dab7
DH
3310
3311 return hdev;
3312}
3313EXPORT_SYMBOL(hci_alloc_dev);
3314
3315/* Free HCI device */
3316void hci_free_dev(struct hci_dev *hdev)
3317{
9be0dab7
DH
3318 /* will free via device release */
3319 put_device(&hdev->dev);
3320}
3321EXPORT_SYMBOL(hci_free_dev);
3322
1da177e4
LT
3323/* Register HCI device */
3324int hci_register_dev(struct hci_dev *hdev)
3325{
b1b813d4 3326 int id, error;
1da177e4 3327
74292d5a 3328 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3329 return -EINVAL;
3330
08add513
MM
3331 /* Do not allow HCI_AMP devices to register at index 0,
3332 * so the index can be used as the AMP controller ID.
3333 */
3df92b31
SL
3334 switch (hdev->dev_type) {
3335 case HCI_BREDR:
3336 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3337 break;
3338 case HCI_AMP:
3339 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3340 break;
3341 default:
3342 return -EINVAL;
1da177e4 3343 }
8e87d142 3344
3df92b31
SL
3345 if (id < 0)
3346 return id;
3347
1da177e4
LT
3348 sprintf(hdev->name, "hci%d", id);
3349 hdev->id = id;
2d8b3a11
AE
3350
3351 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3352
d8537548
KC
3353 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3354 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3355 if (!hdev->workqueue) {
3356 error = -ENOMEM;
3357 goto err;
3358 }
f48fd9c8 3359
d8537548
KC
3360 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3361 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3362 if (!hdev->req_workqueue) {
3363 destroy_workqueue(hdev->workqueue);
3364 error = -ENOMEM;
3365 goto err;
3366 }
3367
0153e2ec
MH
3368 if (!IS_ERR_OR_NULL(bt_debugfs))
3369 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3370
bdc3e0f1
MH
3371 dev_set_name(&hdev->dev, "%s", hdev->name);
3372
3373 error = device_add(&hdev->dev);
33ca954d 3374 if (error < 0)
54506918 3375 goto err_wqueue;
1da177e4 3376
611b30f7 3377 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3378 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3379 hdev);
611b30f7
MH
3380 if (hdev->rfkill) {
3381 if (rfkill_register(hdev->rfkill) < 0) {
3382 rfkill_destroy(hdev->rfkill);
3383 hdev->rfkill = NULL;
3384 }
3385 }
3386
5e130367 3387 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3388 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3389
a1536da2
MH
3390 hci_dev_set_flag(hdev, HCI_SETUP);
3391 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3392
01cd3404 3393 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3394 /* Assume BR/EDR support until proven otherwise (such as
3395 * through reading supported features during init.
3396 */
a1536da2 3397 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3398 }
ce2be9ac 3399
fcee3377
GP
3400 write_lock(&hci_dev_list_lock);
3401 list_add(&hdev->list, &hci_dev_list);
3402 write_unlock(&hci_dev_list_lock);
3403
4a964404
MH
3404 /* Devices that are marked for raw-only usage are unconfigured
3405 * and should not be included in normal operation.
fee746b0
MH
3406 */
3407 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3408 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3409
05fcd4c4 3410 hci_sock_dev_event(hdev, HCI_DEV_REG);
dc946bd8 3411 hci_dev_hold(hdev);
1da177e4 3412
19202573 3413 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3414
1da177e4 3415 return id;
f48fd9c8 3416
33ca954d
DH
3417err_wqueue:
3418 destroy_workqueue(hdev->workqueue);
6ead1bbc 3419 destroy_workqueue(hdev->req_workqueue);
33ca954d 3420err:
3df92b31 3421 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3422
33ca954d 3423 return error;
1da177e4
LT
3424}
3425EXPORT_SYMBOL(hci_register_dev);
3426
3427/* Unregister HCI device */
59735631 3428void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3429{
2d7cc19e 3430 int id;
ef222013 3431
c13854ce 3432 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3433
a1536da2 3434 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3435
3df92b31
SL
3436 id = hdev->id;
3437
f20d09d5 3438 write_lock(&hci_dev_list_lock);
1da177e4 3439 list_del(&hdev->list);
f20d09d5 3440 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3441
3442 hci_dev_do_close(hdev);
3443
b9b5ef18
GP
3444 cancel_work_sync(&hdev->power_on);
3445
ab81cbf9 3446 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3447 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3448 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3449 hci_dev_lock(hdev);
744cf19e 3450 mgmt_index_removed(hdev);
09fd0de5 3451 hci_dev_unlock(hdev);
56e5cb86 3452 }
ab81cbf9 3453
2e58ef3e
JH
3454 /* mgmt_index_removed should take care of emptying the
3455 * pending list */
3456 BUG_ON(!list_empty(&hdev->mgmt_pending));
3457
05fcd4c4 3458 hci_sock_dev_event(hdev, HCI_DEV_UNREG);
1da177e4 3459
611b30f7
MH
3460 if (hdev->rfkill) {
3461 rfkill_unregister(hdev->rfkill);
3462 rfkill_destroy(hdev->rfkill);
3463 }
3464
bdc3e0f1 3465 device_del(&hdev->dev);
147e2d59 3466
0153e2ec
MH
3467 debugfs_remove_recursive(hdev->debugfs);
3468
f48fd9c8 3469 destroy_workqueue(hdev->workqueue);
6ead1bbc 3470 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3471
09fd0de5 3472 hci_dev_lock(hdev);
dcc36c16 3473 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3474 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3475 hci_uuids_clear(hdev);
55ed8ca1 3476 hci_link_keys_clear(hdev);
b899efaf 3477 hci_smp_ltks_clear(hdev);
970c4e46 3478 hci_smp_irks_clear(hdev);
2763eda6 3479 hci_remote_oob_data_clear(hdev);
d2609b34 3480 hci_adv_instances_clear(hdev);
dcc36c16 3481 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3482 hci_conn_params_clear_all(hdev);
22078800 3483 hci_discovery_filter_clear(hdev);
09fd0de5 3484 hci_dev_unlock(hdev);
e2e0cacb 3485
dc946bd8 3486 hci_dev_put(hdev);
3df92b31
SL
3487
3488 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3489}
3490EXPORT_SYMBOL(hci_unregister_dev);
3491
3492/* Suspend HCI device */
3493int hci_suspend_dev(struct hci_dev *hdev)
3494{
05fcd4c4 3495 hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
1da177e4
LT
3496 return 0;
3497}
3498EXPORT_SYMBOL(hci_suspend_dev);
3499
3500/* Resume HCI device */
3501int hci_resume_dev(struct hci_dev *hdev)
3502{
05fcd4c4 3503 hci_sock_dev_event(hdev, HCI_DEV_RESUME);
1da177e4
LT
3504 return 0;
3505}
3506EXPORT_SYMBOL(hci_resume_dev);
3507
75e0569f
MH
3508/* Reset HCI device */
3509int hci_reset_dev(struct hci_dev *hdev)
3510{
3511 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3512 struct sk_buff *skb;
3513
3514 skb = bt_skb_alloc(3, GFP_ATOMIC);
3515 if (!skb)
3516 return -ENOMEM;
3517
3518 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3519 memcpy(skb_put(skb, 3), hw_err, 3);
3520
3521 /* Send Hardware Error to upper stack */
3522 return hci_recv_frame(hdev, skb);
3523}
3524EXPORT_SYMBOL(hci_reset_dev);
3525
76bca880 3526/* Receive frame from HCI drivers */
e1a26170 3527int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3528{
76bca880 3529 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3530 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3531 kfree_skb(skb);
3532 return -ENXIO;
3533 }
3534
fe806dce
MH
3535 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3536 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3537 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3538 kfree_skb(skb);
3539 return -EINVAL;
3540 }
3541
d82603c6 3542 /* Incoming skb */
76bca880
MH
3543 bt_cb(skb)->incoming = 1;
3544
3545 /* Time stamp */
3546 __net_timestamp(skb);
3547
76bca880 3548 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3549 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3550
76bca880
MH
3551 return 0;
3552}
3553EXPORT_SYMBOL(hci_recv_frame);
3554
e875ff84
MH
3555/* Receive diagnostic message from HCI drivers */
3556int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3557{
581d6fd6
MH
3558 /* Mark as diagnostic packet */
3559 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3560
e875ff84
MH
3561 /* Time stamp */
3562 __net_timestamp(skb);
3563
581d6fd6
MH
3564 skb_queue_tail(&hdev->rx_q, skb);
3565 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3566
e875ff84
MH
3567 return 0;
3568}
3569EXPORT_SYMBOL(hci_recv_diag);
3570
1da177e4
LT
3571/* ---- Interface to upper protocols ---- */
3572
1da177e4
LT
3573int hci_register_cb(struct hci_cb *cb)
3574{
3575 BT_DBG("%p name %s", cb, cb->name);
3576
fba7ecf0 3577 mutex_lock(&hci_cb_list_lock);
00629e0f 3578 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3579 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3580
3581 return 0;
3582}
3583EXPORT_SYMBOL(hci_register_cb);
3584
3585int hci_unregister_cb(struct hci_cb *cb)
3586{
3587 BT_DBG("%p name %s", cb, cb->name);
3588
fba7ecf0 3589 mutex_lock(&hci_cb_list_lock);
1da177e4 3590 list_del(&cb->list);
fba7ecf0 3591 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3592
3593 return 0;
3594}
3595EXPORT_SYMBOL(hci_unregister_cb);
3596
51086991 3597static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3598{
cdc52faa
MH
3599 int err;
3600
0d48d939 3601 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3602
cd82e61c
MH
3603 /* Time stamp */
3604 __net_timestamp(skb);
1da177e4 3605
cd82e61c
MH
3606 /* Send copy to monitor */
3607 hci_send_to_monitor(hdev, skb);
3608
3609 if (atomic_read(&hdev->promisc)) {
3610 /* Send copy to the sockets */
470fe1b5 3611 hci_send_to_sock(hdev, skb);
1da177e4
LT
3612 }
3613
3614 /* Get rid of skb owner, prior to sending to the driver. */
3615 skb_orphan(skb);
3616
73d0d3c8
MH
3617 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3618 kfree_skb(skb);
3619 return;
3620 }
3621
cdc52faa
MH
3622 err = hdev->send(hdev, skb);
3623 if (err < 0) {
3624 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3625 kfree_skb(skb);
3626 }
1da177e4
LT
3627}
3628
1ca3a9d0 3629/* Send HCI command */
07dc93dd
JH
3630int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3631 const void *param)
1ca3a9d0
JH
3632{
3633 struct sk_buff *skb;
3634
3635 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3636
3637 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3638 if (!skb) {
3639 BT_ERR("%s no memory for command", hdev->name);
3640 return -ENOMEM;
3641 }
3642
49c922bb 3643 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3644 * single-command requests.
3645 */
242c0ebd 3646 bt_cb(skb)->hci.req_start = true;
11714b3d 3647
1da177e4 3648 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3649 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3650
3651 return 0;
3652}
1da177e4
LT
3653
3654/* Get data from the previously sent command */
a9de9248 3655void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3656{
3657 struct hci_command_hdr *hdr;
3658
3659 if (!hdev->sent_cmd)
3660 return NULL;
3661
3662 hdr = (void *) hdev->sent_cmd->data;
3663
a9de9248 3664 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3665 return NULL;
3666
f0e09510 3667 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3668
3669 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3670}
3671
fbef168f
LP
3672/* Send HCI command and wait for command commplete event */
3673struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3674 const void *param, u32 timeout)
3675{
3676 struct sk_buff *skb;
3677
3678 if (!test_bit(HCI_UP, &hdev->flags))
3679 return ERR_PTR(-ENETDOWN);
3680
3681 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3682
3683 hci_req_lock(hdev);
3684 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3685 hci_req_unlock(hdev);
3686
3687 return skb;
3688}
3689EXPORT_SYMBOL(hci_cmd_sync);
3690
1da177e4
LT
3691/* Send ACL data */
3692static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3693{
3694 struct hci_acl_hdr *hdr;
3695 int len = skb->len;
3696
badff6d0
ACM
3697 skb_push(skb, HCI_ACL_HDR_SIZE);
3698 skb_reset_transport_header(skb);
9c70220b 3699 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3700 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3701 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3702}
3703
ee22be7e 3704static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3705 struct sk_buff *skb, __u16 flags)
1da177e4 3706{
ee22be7e 3707 struct hci_conn *conn = chan->conn;
1da177e4
LT
3708 struct hci_dev *hdev = conn->hdev;
3709 struct sk_buff *list;
3710
087bfd99
GP
3711 skb->len = skb_headlen(skb);
3712 skb->data_len = 0;
3713
3714 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3715
3716 switch (hdev->dev_type) {
3717 case HCI_BREDR:
3718 hci_add_acl_hdr(skb, conn->handle, flags);
3719 break;
3720 case HCI_AMP:
3721 hci_add_acl_hdr(skb, chan->handle, flags);
3722 break;
3723 default:
3724 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3725 return;
3726 }
087bfd99 3727
70f23020
AE
3728 list = skb_shinfo(skb)->frag_list;
3729 if (!list) {
1da177e4
LT
3730 /* Non fragmented */
3731 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3732
73d80deb 3733 skb_queue_tail(queue, skb);
1da177e4
LT
3734 } else {
3735 /* Fragmented */
3736 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3737
3738 skb_shinfo(skb)->frag_list = NULL;
3739
9cfd5a23
JR
3740 /* Queue all fragments atomically. We need to use spin_lock_bh
3741 * here because of 6LoWPAN links, as there this function is
3742 * called from softirq and using normal spin lock could cause
3743 * deadlocks.
3744 */
3745 spin_lock_bh(&queue->lock);
1da177e4 3746
73d80deb 3747 __skb_queue_tail(queue, skb);
e702112f
AE
3748
3749 flags &= ~ACL_START;
3750 flags |= ACL_CONT;
1da177e4
LT
3751 do {
3752 skb = list; list = list->next;
8e87d142 3753
0d48d939 3754 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3755 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3756
3757 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3758
73d80deb 3759 __skb_queue_tail(queue, skb);
1da177e4
LT
3760 } while (list);
3761
9cfd5a23 3762 spin_unlock_bh(&queue->lock);
1da177e4 3763 }
73d80deb
LAD
3764}
3765
3766void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3767{
ee22be7e 3768 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3769
f0e09510 3770 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3771
ee22be7e 3772 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3773
3eff45ea 3774 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3775}
1da177e4
LT
3776
3777/* Send SCO data */
0d861d8b 3778void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3779{
3780 struct hci_dev *hdev = conn->hdev;
3781 struct hci_sco_hdr hdr;
3782
3783 BT_DBG("%s len %d", hdev->name, skb->len);
3784
aca3192c 3785 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3786 hdr.dlen = skb->len;
3787
badff6d0
ACM
3788 skb_push(skb, HCI_SCO_HDR_SIZE);
3789 skb_reset_transport_header(skb);
9c70220b 3790 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3791
0d48d939 3792 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3793
1da177e4 3794 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3795 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3796}
1da177e4
LT
3797
3798/* ---- HCI TX task (outgoing data) ---- */
3799
3800/* HCI Connection scheduler */
6039aa73
GP
3801static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3802 int *quote)
1da177e4
LT
3803{
3804 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3805 struct hci_conn *conn = NULL, *c;
abc5de8f 3806 unsigned int num = 0, min = ~0;
1da177e4 3807
8e87d142 3808 /* We don't have to lock device here. Connections are always
1da177e4 3809 * added and removed with TX task disabled. */
bf4c6325
GP
3810
3811 rcu_read_lock();
3812
3813 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3814 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3815 continue;
769be974
MH
3816
3817 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3818 continue;
3819
1da177e4
LT
3820 num++;
3821
3822 if (c->sent < min) {
3823 min = c->sent;
3824 conn = c;
3825 }
52087a79
LAD
3826
3827 if (hci_conn_num(hdev, type) == num)
3828 break;
1da177e4
LT
3829 }
3830
bf4c6325
GP
3831 rcu_read_unlock();
3832
1da177e4 3833 if (conn) {
6ed58ec5
VT
3834 int cnt, q;
3835
3836 switch (conn->type) {
3837 case ACL_LINK:
3838 cnt = hdev->acl_cnt;
3839 break;
3840 case SCO_LINK:
3841 case ESCO_LINK:
3842 cnt = hdev->sco_cnt;
3843 break;
3844 case LE_LINK:
3845 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3846 break;
3847 default:
3848 cnt = 0;
3849 BT_ERR("Unknown link type");
3850 }
3851
3852 q = cnt / num;
1da177e4
LT
3853 *quote = q ? q : 1;
3854 } else
3855 *quote = 0;
3856
3857 BT_DBG("conn %p quote %d", conn, *quote);
3858 return conn;
3859}
3860
6039aa73 3861static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3862{
3863 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3864 struct hci_conn *c;
1da177e4 3865
bae1f5d9 3866 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3867
bf4c6325
GP
3868 rcu_read_lock();
3869
1da177e4 3870 /* Kill stalled connections */
bf4c6325 3871 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3872 if (c->type == type && c->sent) {
6ed93dc6
AE
3873 BT_ERR("%s killing stalled connection %pMR",
3874 hdev->name, &c->dst);
bed71748 3875 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3876 }
3877 }
bf4c6325
GP
3878
3879 rcu_read_unlock();
1da177e4
LT
3880}
3881
6039aa73
GP
3882static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3883 int *quote)
1da177e4 3884{
73d80deb
LAD
3885 struct hci_conn_hash *h = &hdev->conn_hash;
3886 struct hci_chan *chan = NULL;
abc5de8f 3887 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3888 struct hci_conn *conn;
73d80deb
LAD
3889 int cnt, q, conn_num = 0;
3890
3891 BT_DBG("%s", hdev->name);
3892
bf4c6325
GP
3893 rcu_read_lock();
3894
3895 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3896 struct hci_chan *tmp;
3897
3898 if (conn->type != type)
3899 continue;
3900
3901 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3902 continue;
3903
3904 conn_num++;
3905
8192edef 3906 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3907 struct sk_buff *skb;
3908
3909 if (skb_queue_empty(&tmp->data_q))
3910 continue;
3911
3912 skb = skb_peek(&tmp->data_q);
3913 if (skb->priority < cur_prio)
3914 continue;
3915
3916 if (skb->priority > cur_prio) {
3917 num = 0;
3918 min = ~0;
3919 cur_prio = skb->priority;
3920 }
3921
3922 num++;
3923
3924 if (conn->sent < min) {
3925 min = conn->sent;
3926 chan = tmp;
3927 }
3928 }
3929
3930 if (hci_conn_num(hdev, type) == conn_num)
3931 break;
3932 }
3933
bf4c6325
GP
3934 rcu_read_unlock();
3935
73d80deb
LAD
3936 if (!chan)
3937 return NULL;
3938
3939 switch (chan->conn->type) {
3940 case ACL_LINK:
3941 cnt = hdev->acl_cnt;
3942 break;
bd1eb66b
AE
3943 case AMP_LINK:
3944 cnt = hdev->block_cnt;
3945 break;
73d80deb
LAD
3946 case SCO_LINK:
3947 case ESCO_LINK:
3948 cnt = hdev->sco_cnt;
3949 break;
3950 case LE_LINK:
3951 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3952 break;
3953 default:
3954 cnt = 0;
3955 BT_ERR("Unknown link type");
3956 }
3957
3958 q = cnt / num;
3959 *quote = q ? q : 1;
3960 BT_DBG("chan %p quote %d", chan, *quote);
3961 return chan;
3962}
3963
02b20f0b
LAD
3964static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3965{
3966 struct hci_conn_hash *h = &hdev->conn_hash;
3967 struct hci_conn *conn;
3968 int num = 0;
3969
3970 BT_DBG("%s", hdev->name);
3971
bf4c6325
GP
3972 rcu_read_lock();
3973
3974 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3975 struct hci_chan *chan;
3976
3977 if (conn->type != type)
3978 continue;
3979
3980 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3981 continue;
3982
3983 num++;
3984
8192edef 3985 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3986 struct sk_buff *skb;
3987
3988 if (chan->sent) {
3989 chan->sent = 0;
3990 continue;
3991 }
3992
3993 if (skb_queue_empty(&chan->data_q))
3994 continue;
3995
3996 skb = skb_peek(&chan->data_q);
3997 if (skb->priority >= HCI_PRIO_MAX - 1)
3998 continue;
3999
4000 skb->priority = HCI_PRIO_MAX - 1;
4001
4002 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4003 skb->priority);
02b20f0b
LAD
4004 }
4005
4006 if (hci_conn_num(hdev, type) == num)
4007 break;
4008 }
bf4c6325
GP
4009
4010 rcu_read_unlock();
4011
02b20f0b
LAD
4012}
4013
b71d385a
AE
4014static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4015{
4016 /* Calculate count of blocks used by this packet */
4017 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4018}
4019
6039aa73 4020static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4021{
d7a5a11d 4022 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4023 /* ACL tx timeout must be longer than maximum
4024 * link supervision timeout (40.9 seconds) */
63d2bc1b 4025 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4026 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4027 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4028 }
63d2bc1b 4029}
1da177e4 4030
6039aa73 4031static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4032{
4033 unsigned int cnt = hdev->acl_cnt;
4034 struct hci_chan *chan;
4035 struct sk_buff *skb;
4036 int quote;
4037
4038 __check_timeout(hdev, cnt);
04837f64 4039
73d80deb 4040 while (hdev->acl_cnt &&
a8c5fb1a 4041 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4042 u32 priority = (skb_peek(&chan->data_q))->priority;
4043 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4044 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4045 skb->len, skb->priority);
73d80deb 4046
ec1cce24
LAD
4047 /* Stop if priority has changed */
4048 if (skb->priority < priority)
4049 break;
4050
4051 skb = skb_dequeue(&chan->data_q);
4052
73d80deb 4053 hci_conn_enter_active_mode(chan->conn,
04124681 4054 bt_cb(skb)->force_active);
04837f64 4055
57d17d70 4056 hci_send_frame(hdev, skb);
1da177e4
LT
4057 hdev->acl_last_tx = jiffies;
4058
4059 hdev->acl_cnt--;
73d80deb
LAD
4060 chan->sent++;
4061 chan->conn->sent++;
1da177e4
LT
4062 }
4063 }
02b20f0b
LAD
4064
4065 if (cnt != hdev->acl_cnt)
4066 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4067}
4068
6039aa73 4069static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4070{
63d2bc1b 4071 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4072 struct hci_chan *chan;
4073 struct sk_buff *skb;
4074 int quote;
bd1eb66b 4075 u8 type;
b71d385a 4076
63d2bc1b 4077 __check_timeout(hdev, cnt);
b71d385a 4078
bd1eb66b
AE
4079 BT_DBG("%s", hdev->name);
4080
4081 if (hdev->dev_type == HCI_AMP)
4082 type = AMP_LINK;
4083 else
4084 type = ACL_LINK;
4085
b71d385a 4086 while (hdev->block_cnt > 0 &&
bd1eb66b 4087 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4088 u32 priority = (skb_peek(&chan->data_q))->priority;
4089 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4090 int blocks;
4091
4092 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4093 skb->len, skb->priority);
b71d385a
AE
4094
4095 /* Stop if priority has changed */
4096 if (skb->priority < priority)
4097 break;
4098
4099 skb = skb_dequeue(&chan->data_q);
4100
4101 blocks = __get_blocks(hdev, skb);
4102 if (blocks > hdev->block_cnt)
4103 return;
4104
4105 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4106 bt_cb(skb)->force_active);
b71d385a 4107
57d17d70 4108 hci_send_frame(hdev, skb);
b71d385a
AE
4109 hdev->acl_last_tx = jiffies;
4110
4111 hdev->block_cnt -= blocks;
4112 quote -= blocks;
4113
4114 chan->sent += blocks;
4115 chan->conn->sent += blocks;
4116 }
4117 }
4118
4119 if (cnt != hdev->block_cnt)
bd1eb66b 4120 hci_prio_recalculate(hdev, type);
b71d385a
AE
4121}
4122
6039aa73 4123static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4124{
4125 BT_DBG("%s", hdev->name);
4126
bd1eb66b
AE
4127 /* No ACL link over BR/EDR controller */
4128 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4129 return;
4130
4131 /* No AMP link over AMP controller */
4132 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4133 return;
4134
4135 switch (hdev->flow_ctl_mode) {
4136 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4137 hci_sched_acl_pkt(hdev);
4138 break;
4139
4140 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4141 hci_sched_acl_blk(hdev);
4142 break;
4143 }
4144}
4145
1da177e4 4146/* Schedule SCO */
6039aa73 4147static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4148{
4149 struct hci_conn *conn;
4150 struct sk_buff *skb;
4151 int quote;
4152
4153 BT_DBG("%s", hdev->name);
4154
52087a79
LAD
4155 if (!hci_conn_num(hdev, SCO_LINK))
4156 return;
4157
1da177e4
LT
4158 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4159 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4160 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4161 hci_send_frame(hdev, skb);
1da177e4
LT
4162
4163 conn->sent++;
4164 if (conn->sent == ~0)
4165 conn->sent = 0;
4166 }
4167 }
4168}
4169
6039aa73 4170static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4171{
4172 struct hci_conn *conn;
4173 struct sk_buff *skb;
4174 int quote;
4175
4176 BT_DBG("%s", hdev->name);
4177
52087a79
LAD
4178 if (!hci_conn_num(hdev, ESCO_LINK))
4179 return;
4180
8fc9ced3
GP
4181 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4182 &quote))) {
b6a0dc82
MH
4183 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4184 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4185 hci_send_frame(hdev, skb);
b6a0dc82
MH
4186
4187 conn->sent++;
4188 if (conn->sent == ~0)
4189 conn->sent = 0;
4190 }
4191 }
4192}
4193
6039aa73 4194static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4195{
73d80deb 4196 struct hci_chan *chan;
6ed58ec5 4197 struct sk_buff *skb;
02b20f0b 4198 int quote, cnt, tmp;
6ed58ec5
VT
4199
4200 BT_DBG("%s", hdev->name);
4201
52087a79
LAD
4202 if (!hci_conn_num(hdev, LE_LINK))
4203 return;
4204
d7a5a11d 4205 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4206 /* LE tx timeout must be longer than maximum
4207 * link supervision timeout (40.9 seconds) */
bae1f5d9 4208 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4209 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4210 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4211 }
4212
4213 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4214 tmp = cnt;
73d80deb 4215 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4216 u32 priority = (skb_peek(&chan->data_q))->priority;
4217 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4218 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4219 skb->len, skb->priority);
6ed58ec5 4220
ec1cce24
LAD
4221 /* Stop if priority has changed */
4222 if (skb->priority < priority)
4223 break;
4224
4225 skb = skb_dequeue(&chan->data_q);
4226
57d17d70 4227 hci_send_frame(hdev, skb);
6ed58ec5
VT
4228 hdev->le_last_tx = jiffies;
4229
4230 cnt--;
73d80deb
LAD
4231 chan->sent++;
4232 chan->conn->sent++;
6ed58ec5
VT
4233 }
4234 }
73d80deb 4235
6ed58ec5
VT
4236 if (hdev->le_pkts)
4237 hdev->le_cnt = cnt;
4238 else
4239 hdev->acl_cnt = cnt;
02b20f0b
LAD
4240
4241 if (cnt != tmp)
4242 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4243}
4244
3eff45ea 4245static void hci_tx_work(struct work_struct *work)
1da177e4 4246{
3eff45ea 4247 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4248 struct sk_buff *skb;
4249
6ed58ec5 4250 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4251 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4252
d7a5a11d 4253 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4254 /* Schedule queues and send stuff to HCI driver */
4255 hci_sched_acl(hdev);
4256 hci_sched_sco(hdev);
4257 hci_sched_esco(hdev);
4258 hci_sched_le(hdev);
4259 }
6ed58ec5 4260
1da177e4
LT
4261 /* Send next queued raw (unknown type) packet */
4262 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4263 hci_send_frame(hdev, skb);
1da177e4
LT
4264}
4265
25985edc 4266/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4267
4268/* ACL data packet */
6039aa73 4269static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4270{
4271 struct hci_acl_hdr *hdr = (void *) skb->data;
4272 struct hci_conn *conn;
4273 __u16 handle, flags;
4274
4275 skb_pull(skb, HCI_ACL_HDR_SIZE);
4276
4277 handle = __le16_to_cpu(hdr->handle);
4278 flags = hci_flags(handle);
4279 handle = hci_handle(handle);
4280
f0e09510 4281 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4282 handle, flags);
1da177e4
LT
4283
4284 hdev->stat.acl_rx++;
4285
4286 hci_dev_lock(hdev);
4287 conn = hci_conn_hash_lookup_handle(hdev, handle);
4288 hci_dev_unlock(hdev);
8e87d142 4289
1da177e4 4290 if (conn) {
65983fc7 4291 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4292
1da177e4 4293 /* Send to upper protocol */
686ebf28
UF
4294 l2cap_recv_acldata(conn, skb, flags);
4295 return;
1da177e4 4296 } else {
8e87d142 4297 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4298 hdev->name, handle);
1da177e4
LT
4299 }
4300
4301 kfree_skb(skb);
4302}
4303
4304/* SCO data packet */
6039aa73 4305static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4306{
4307 struct hci_sco_hdr *hdr = (void *) skb->data;
4308 struct hci_conn *conn;
4309 __u16 handle;
4310
4311 skb_pull(skb, HCI_SCO_HDR_SIZE);
4312
4313 handle = __le16_to_cpu(hdr->handle);
4314
f0e09510 4315 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4316
4317 hdev->stat.sco_rx++;
4318
4319 hci_dev_lock(hdev);
4320 conn = hci_conn_hash_lookup_handle(hdev, handle);
4321 hci_dev_unlock(hdev);
4322
4323 if (conn) {
1da177e4 4324 /* Send to upper protocol */
686ebf28
UF
4325 sco_recv_scodata(conn, skb);
4326 return;
1da177e4 4327 } else {
8e87d142 4328 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4329 hdev->name, handle);
1da177e4
LT
4330 }
4331
4332 kfree_skb(skb);
4333}
4334
9238f36a
JH
4335static bool hci_req_is_complete(struct hci_dev *hdev)
4336{
4337 struct sk_buff *skb;
4338
4339 skb = skb_peek(&hdev->cmd_q);
4340 if (!skb)
4341 return true;
4342
242c0ebd 4343 return bt_cb(skb)->hci.req_start;
9238f36a
JH
4344}
4345
42c6b129
JH
4346static void hci_resend_last(struct hci_dev *hdev)
4347{
4348 struct hci_command_hdr *sent;
4349 struct sk_buff *skb;
4350 u16 opcode;
4351
4352 if (!hdev->sent_cmd)
4353 return;
4354
4355 sent = (void *) hdev->sent_cmd->data;
4356 opcode = __le16_to_cpu(sent->opcode);
4357 if (opcode == HCI_OP_RESET)
4358 return;
4359
4360 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4361 if (!skb)
4362 return;
4363
4364 skb_queue_head(&hdev->cmd_q, skb);
4365 queue_work(hdev->workqueue, &hdev->cmd_work);
4366}
4367
e6214487
JH
4368void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4369 hci_req_complete_t *req_complete,
4370 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4371{
9238f36a
JH
4372 struct sk_buff *skb;
4373 unsigned long flags;
4374
4375 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4376
42c6b129
JH
4377 /* If the completed command doesn't match the last one that was
4378 * sent we need to do special handling of it.
9238f36a 4379 */
42c6b129
JH
4380 if (!hci_sent_cmd_data(hdev, opcode)) {
4381 /* Some CSR based controllers generate a spontaneous
4382 * reset complete event during init and any pending
4383 * command will never be completed. In such a case we
4384 * need to resend whatever was the last sent
4385 * command.
4386 */
4387 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4388 hci_resend_last(hdev);
4389
9238f36a 4390 return;
42c6b129 4391 }
9238f36a
JH
4392
4393 /* If the command succeeded and there's still more commands in
4394 * this request the request is not yet complete.
4395 */
4396 if (!status && !hci_req_is_complete(hdev))
4397 return;
4398
4399 /* If this was the last command in a request the complete
4400 * callback would be found in hdev->sent_cmd instead of the
4401 * command queue (hdev->cmd_q).
4402 */
242c0ebd
MH
4403 if (bt_cb(hdev->sent_cmd)->hci.req_complete) {
4404 *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete;
e6214487
JH
4405 return;
4406 }
53e21fbc 4407
242c0ebd
MH
4408 if (bt_cb(hdev->sent_cmd)->hci.req_complete_skb) {
4409 *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb;
e6214487 4410 return;
9238f36a
JH
4411 }
4412
4413 /* Remove all pending commands belonging to this request */
4414 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4415 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
242c0ebd 4416 if (bt_cb(skb)->hci.req_start) {
9238f36a
JH
4417 __skb_queue_head(&hdev->cmd_q, skb);
4418 break;
4419 }
4420
242c0ebd
MH
4421 *req_complete = bt_cb(skb)->hci.req_complete;
4422 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
9238f36a
JH
4423 kfree_skb(skb);
4424 }
4425 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4426}
4427
b78752cc 4428static void hci_rx_work(struct work_struct *work)
1da177e4 4429{
b78752cc 4430 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4431 struct sk_buff *skb;
4432
4433 BT_DBG("%s", hdev->name);
4434
1da177e4 4435 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4436 /* Send copy to monitor */
4437 hci_send_to_monitor(hdev, skb);
4438
1da177e4
LT
4439 if (atomic_read(&hdev->promisc)) {
4440 /* Send copy to the sockets */
470fe1b5 4441 hci_send_to_sock(hdev, skb);
1da177e4
LT
4442 }
4443
d7a5a11d 4444 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4445 kfree_skb(skb);
4446 continue;
4447 }
4448
4449 if (test_bit(HCI_INIT, &hdev->flags)) {
4450 /* Don't process data packets in this states. */
0d48d939 4451 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4452 case HCI_ACLDATA_PKT:
4453 case HCI_SCODATA_PKT:
4454 kfree_skb(skb);
4455 continue;
3ff50b79 4456 }
1da177e4
LT
4457 }
4458
4459 /* Process frame */
0d48d939 4460 switch (bt_cb(skb)->pkt_type) {
1da177e4 4461 case HCI_EVENT_PKT:
b78752cc 4462 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4463 hci_event_packet(hdev, skb);
4464 break;
4465
4466 case HCI_ACLDATA_PKT:
4467 BT_DBG("%s ACL data packet", hdev->name);
4468 hci_acldata_packet(hdev, skb);
4469 break;
4470
4471 case HCI_SCODATA_PKT:
4472 BT_DBG("%s SCO data packet", hdev->name);
4473 hci_scodata_packet(hdev, skb);
4474 break;
4475
4476 default:
4477 kfree_skb(skb);
4478 break;
4479 }
4480 }
1da177e4
LT
4481}
4482
c347b765 4483static void hci_cmd_work(struct work_struct *work)
1da177e4 4484{
c347b765 4485 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4486 struct sk_buff *skb;
4487
2104786b
AE
4488 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4489 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4490
1da177e4 4491 /* Send queued commands */
5a08ecce
AE
4492 if (atomic_read(&hdev->cmd_cnt)) {
4493 skb = skb_dequeue(&hdev->cmd_q);
4494 if (!skb)
4495 return;
4496
7585b97a 4497 kfree_skb(hdev->sent_cmd);
1da177e4 4498
a675d7f1 4499 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4500 if (hdev->sent_cmd) {
1da177e4 4501 atomic_dec(&hdev->cmd_cnt);
57d17d70 4502 hci_send_frame(hdev, skb);
7bdb8a5c 4503 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4504 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4505 else
65cc2b49
MH
4506 schedule_delayed_work(&hdev->cmd_timer,
4507 HCI_CMD_TIMEOUT);
1da177e4
LT
4508 } else {
4509 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4510 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4511 }
4512 }
4513}