]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Remove unneeded parenthesis around MSG_OOB
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
4b4113d6
MH
137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
7e995b9e
MH
165 /* When the diagnostic flags are not persistent and the transport
166 * is not active, then there is no need for the vendor callback.
167 *
168 * Instead just store the desired value. If needed the setting
169 * will be programmed when the controller gets powered on.
170 */
171 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
172 !test_bit(HCI_RUNNING, &hdev->flags))
173 goto done;
174
4b4113d6
MH
175 hci_req_lock(hdev);
176 err = hdev->set_diag(hdev, enable);
177 hci_req_unlock(hdev);
178
179 if (err < 0)
180 return err;
181
7e995b9e 182done:
4b4113d6
MH
183 if (enable)
184 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
185 else
186 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
187
188 return count;
189}
190
191static const struct file_operations vendor_diag_fops = {
192 .open = simple_open,
193 .read = vendor_diag_read,
194 .write = vendor_diag_write,
195 .llseek = default_llseek,
196};
197
f640ee98
MH
198static void hci_debugfs_create_basic(struct hci_dev *hdev)
199{
200 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
201 &dut_mode_fops);
202
203 if (hdev->set_diag)
204 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
205 &vendor_diag_fops);
206}
207
1da177e4
LT
208/* ---- HCI requests ---- */
209
f60cb305
JH
210static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
211 struct sk_buff *skb)
1da177e4 212{
42c6b129 213 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
214
215 if (hdev->req_status == HCI_REQ_PEND) {
216 hdev->req_result = result;
217 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
218 if (skb)
219 hdev->req_skb = skb_get(skb);
1da177e4
LT
220 wake_up_interruptible(&hdev->req_wait_q);
221 }
222}
223
224static void hci_req_cancel(struct hci_dev *hdev, int err)
225{
226 BT_DBG("%s err 0x%2.2x", hdev->name, err);
227
228 if (hdev->req_status == HCI_REQ_PEND) {
229 hdev->req_result = err;
230 hdev->req_status = HCI_REQ_CANCELED;
231 wake_up_interruptible(&hdev->req_wait_q);
232 }
233}
234
7b1abbbe 235struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 236 const void *param, u8 event, u32 timeout)
75e84b7c
JH
237{
238 DECLARE_WAITQUEUE(wait, current);
239 struct hci_request req;
f60cb305 240 struct sk_buff *skb;
75e84b7c
JH
241 int err = 0;
242
243 BT_DBG("%s", hdev->name);
244
245 hci_req_init(&req, hdev);
246
7b1abbbe 247 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
248
249 hdev->req_status = HCI_REQ_PEND;
250
75e84b7c
JH
251 add_wait_queue(&hdev->req_wait_q, &wait);
252 set_current_state(TASK_INTERRUPTIBLE);
253
f60cb305 254 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
255 if (err < 0) {
256 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 257 set_current_state(TASK_RUNNING);
039fada5
CP
258 return ERR_PTR(err);
259 }
260
75e84b7c
JH
261 schedule_timeout(timeout);
262
263 remove_wait_queue(&hdev->req_wait_q, &wait);
264
265 if (signal_pending(current))
266 return ERR_PTR(-EINTR);
267
268 switch (hdev->req_status) {
269 case HCI_REQ_DONE:
270 err = -bt_to_errno(hdev->req_result);
271 break;
272
273 case HCI_REQ_CANCELED:
274 err = -hdev->req_result;
275 break;
276
277 default:
278 err = -ETIMEDOUT;
279 break;
280 }
281
282 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
283 skb = hdev->req_skb;
284 hdev->req_skb = NULL;
75e84b7c
JH
285
286 BT_DBG("%s end: err %d", hdev->name, err);
287
f60cb305
JH
288 if (err < 0) {
289 kfree_skb(skb);
75e84b7c 290 return ERR_PTR(err);
f60cb305 291 }
75e84b7c 292
757aa0b5
JH
293 if (!skb)
294 return ERR_PTR(-ENODATA);
295
296 return skb;
7b1abbbe
JH
297}
298EXPORT_SYMBOL(__hci_cmd_sync_ev);
299
300struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 301 const void *param, u32 timeout)
7b1abbbe
JH
302{
303 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
304}
305EXPORT_SYMBOL(__hci_cmd_sync);
306
1da177e4 307/* Execute request and wait for completion. */
01178cd4 308static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
309 void (*func)(struct hci_request *req,
310 unsigned long opt),
01178cd4 311 unsigned long opt, __u32 timeout)
1da177e4 312{
42c6b129 313 struct hci_request req;
1da177e4
LT
314 DECLARE_WAITQUEUE(wait, current);
315 int err = 0;
316
317 BT_DBG("%s start", hdev->name);
318
42c6b129
JH
319 hci_req_init(&req, hdev);
320
1da177e4
LT
321 hdev->req_status = HCI_REQ_PEND;
322
42c6b129 323 func(&req, opt);
53cce22d 324
039fada5
CP
325 add_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_INTERRUPTIBLE);
327
f60cb305 328 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 329 if (err < 0) {
53cce22d 330 hdev->req_status = 0;
920c8300 331
039fada5 332 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 333 set_current_state(TASK_RUNNING);
039fada5 334
920c8300
AG
335 /* ENODATA means the HCI request command queue is empty.
336 * This can happen when a request with conditionals doesn't
337 * trigger any commands to be sent. This is normal behavior
338 * and should not trigger an error return.
42c6b129 339 */
920c8300
AG
340 if (err == -ENODATA)
341 return 0;
342
343 return err;
53cce22d
JH
344 }
345
1da177e4
LT
346 schedule_timeout(timeout);
347
348 remove_wait_queue(&hdev->req_wait_q, &wait);
349
350 if (signal_pending(current))
351 return -EINTR;
352
353 switch (hdev->req_status) {
354 case HCI_REQ_DONE:
e175072f 355 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
356 break;
357
358 case HCI_REQ_CANCELED:
359 err = -hdev->req_result;
360 break;
361
362 default:
363 err = -ETIMEDOUT;
364 break;
3ff50b79 365 }
1da177e4 366
a5040efa 367 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
368
369 BT_DBG("%s end: err %d", hdev->name, err);
370
371 return err;
372}
373
01178cd4 374static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
375 void (*req)(struct hci_request *req,
376 unsigned long opt),
01178cd4 377 unsigned long opt, __u32 timeout)
1da177e4
LT
378{
379 int ret;
380
7c6a329e
MH
381 if (!test_bit(HCI_UP, &hdev->flags))
382 return -ENETDOWN;
383
1da177e4
LT
384 /* Serialize all requests */
385 hci_req_lock(hdev);
01178cd4 386 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
387 hci_req_unlock(hdev);
388
389 return ret;
390}
391
42c6b129 392static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 393{
42c6b129 394 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
395
396 /* Reset device */
42c6b129
JH
397 set_bit(HCI_RESET, &req->hdev->flags);
398 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
399}
400
42c6b129 401static void bredr_init(struct hci_request *req)
1da177e4 402{
42c6b129 403 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 404
1da177e4 405 /* Read Local Supported Features */
42c6b129 406 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 407
1143e5a6 408 /* Read Local Version */
42c6b129 409 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
410
411 /* Read BD Address */
42c6b129 412 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
413}
414
0af801b9 415static void amp_init1(struct hci_request *req)
e61ef499 416{
42c6b129 417 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 418
e61ef499 419 /* Read Local Version */
42c6b129 420 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 421
f6996cfe
MH
422 /* Read Local Supported Commands */
423 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
424
6bcbc489 425 /* Read Local AMP Info */
42c6b129 426 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
427
428 /* Read Data Blk size */
42c6b129 429 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 430
f38ba941
MH
431 /* Read Flow Control Mode */
432 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
433
7528ca1c
MH
434 /* Read Location Data */
435 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
436}
437
0af801b9
JH
438static void amp_init2(struct hci_request *req)
439{
440 /* Read Local Supported Features. Not all AMP controllers
441 * support this so it's placed conditionally in the second
442 * stage init.
443 */
444 if (req->hdev->commands[14] & 0x20)
445 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
446}
447
42c6b129 448static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 449{
42c6b129 450 struct hci_dev *hdev = req->hdev;
e61ef499
AE
451
452 BT_DBG("%s %ld", hdev->name, opt);
453
11778716
AE
454 /* Reset */
455 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 456 hci_reset_req(req, 0);
11778716 457
e61ef499
AE
458 switch (hdev->dev_type) {
459 case HCI_BREDR:
42c6b129 460 bredr_init(req);
e61ef499
AE
461 break;
462
463 case HCI_AMP:
0af801b9 464 amp_init1(req);
e61ef499
AE
465 break;
466
467 default:
468 BT_ERR("Unknown device type %d", hdev->dev_type);
469 break;
470 }
e61ef499
AE
471}
472
42c6b129 473static void bredr_setup(struct hci_request *req)
2177bab5 474{
2177bab5
JH
475 __le16 param;
476 __u8 flt_type;
477
478 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 479 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
480
481 /* Read Class of Device */
42c6b129 482 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
483
484 /* Read Local Name */
42c6b129 485 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
486
487 /* Read Voice Setting */
42c6b129 488 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 489
b4cb9fb2
MH
490 /* Read Number of Supported IAC */
491 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
492
4b836f39
MH
493 /* Read Current IAC LAP */
494 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
495
2177bab5
JH
496 /* Clear Event Filters */
497 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 498 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
499
500 /* Connection accept timeout ~20 secs */
dcf4adbf 501 param = cpu_to_le16(0x7d00);
42c6b129 502 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
503}
504
42c6b129 505static void le_setup(struct hci_request *req)
2177bab5 506{
c73eee91
JH
507 struct hci_dev *hdev = req->hdev;
508
2177bab5 509 /* Read LE Buffer Size */
42c6b129 510 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
511
512 /* Read LE Local Supported Features */
42c6b129 513 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 514
747d3f03
MH
515 /* Read LE Supported States */
516 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
517
2177bab5 518 /* Read LE White List Size */
42c6b129 519 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 520
747d3f03
MH
521 /* Clear LE White List */
522 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
523
524 /* LE-only controllers have LE implicitly enabled */
525 if (!lmp_bredr_capable(hdev))
a1536da2 526 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
527}
528
42c6b129 529static void hci_setup_event_mask(struct hci_request *req)
2177bab5 530{
42c6b129
JH
531 struct hci_dev *hdev = req->hdev;
532
2177bab5
JH
533 /* The second byte is 0xff instead of 0x9f (two reserved bits
534 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
535 * command otherwise.
536 */
537 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
538
539 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
540 * any event mask for pre 1.2 devices.
541 */
542 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
543 return;
544
545 if (lmp_bredr_capable(hdev)) {
546 events[4] |= 0x01; /* Flow Specification Complete */
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548 events[4] |= 0x04; /* Read Remote Extended Features Complete */
549 events[5] |= 0x08; /* Synchronous Connection Complete */
550 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
551 } else {
552 /* Use a different default for LE-only devices */
553 memset(events, 0, sizeof(events));
554 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
555 events[1] |= 0x08; /* Read Remote Version Information Complete */
556 events[1] |= 0x20; /* Command Complete */
557 events[1] |= 0x40; /* Command Status */
558 events[1] |= 0x80; /* Hardware Error */
559 events[2] |= 0x04; /* Number of Completed Packets */
560 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
561
562 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
563 events[0] |= 0x80; /* Encryption Change */
564 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565 }
2177bab5
JH
566 }
567
568 if (lmp_inq_rssi_capable(hdev))
569 events[4] |= 0x02; /* Inquiry Result with RSSI */
570
571 if (lmp_sniffsubr_capable(hdev))
572 events[5] |= 0x20; /* Sniff Subrating */
573
574 if (lmp_pause_enc_capable(hdev))
575 events[5] |= 0x80; /* Encryption Key Refresh Complete */
576
577 if (lmp_ext_inq_capable(hdev))
578 events[5] |= 0x40; /* Extended Inquiry Result */
579
580 if (lmp_no_flush_capable(hdev))
581 events[7] |= 0x01; /* Enhanced Flush Complete */
582
583 if (lmp_lsto_capable(hdev))
584 events[6] |= 0x80; /* Link Supervision Timeout Changed */
585
586 if (lmp_ssp_capable(hdev)) {
587 events[6] |= 0x01; /* IO Capability Request */
588 events[6] |= 0x02; /* IO Capability Response */
589 events[6] |= 0x04; /* User Confirmation Request */
590 events[6] |= 0x08; /* User Passkey Request */
591 events[6] |= 0x10; /* Remote OOB Data Request */
592 events[6] |= 0x20; /* Simple Pairing Complete */
593 events[7] |= 0x04; /* User Passkey Notification */
594 events[7] |= 0x08; /* Keypress Notification */
595 events[7] |= 0x10; /* Remote Host Supported
596 * Features Notification
597 */
598 }
599
600 if (lmp_le_capable(hdev))
601 events[7] |= 0x20; /* LE Meta-Event */
602
42c6b129 603 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
604}
605
42c6b129 606static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 607{
42c6b129
JH
608 struct hci_dev *hdev = req->hdev;
609
0af801b9
JH
610 if (hdev->dev_type == HCI_AMP)
611 return amp_init2(req);
612
2177bab5 613 if (lmp_bredr_capable(hdev))
42c6b129 614 bredr_setup(req);
56f87901 615 else
a358dc11 616 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
617
618 if (lmp_le_capable(hdev))
42c6b129 619 le_setup(req);
2177bab5 620
0f3adeae
MH
621 /* All Bluetooth 1.2 and later controllers should support the
622 * HCI command for reading the local supported commands.
623 *
624 * Unfortunately some controllers indicate Bluetooth 1.2 support,
625 * but do not have support for this command. If that is the case,
626 * the driver can quirk the behavior and skip reading the local
627 * supported commands.
3f8e2d75 628 */
0f3adeae
MH
629 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
630 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 631 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
632
633 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
634 /* When SSP is available, then the host features page
635 * should also be available as well. However some
636 * controllers list the max_page as 0 as long as SSP
637 * has not been enabled. To achieve proper debugging
638 * output, force the minimum max_page to 1 at least.
639 */
640 hdev->max_page = 0x01;
641
d7a5a11d 642 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 643 u8 mode = 0x01;
574ea3c7 644
42c6b129
JH
645 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
646 sizeof(mode), &mode);
2177bab5
JH
647 } else {
648 struct hci_cp_write_eir cp;
649
650 memset(hdev->eir, 0, sizeof(hdev->eir));
651 memset(&cp, 0, sizeof(cp));
652
42c6b129 653 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
654 }
655 }
656
043ec9bf
MH
657 if (lmp_inq_rssi_capable(hdev) ||
658 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
659 u8 mode;
660
661 /* If Extended Inquiry Result events are supported, then
662 * they are clearly preferred over Inquiry Result with RSSI
663 * events.
664 */
665 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
666
667 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
668 }
2177bab5
JH
669
670 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 671 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
672
673 if (lmp_ext_feat_capable(hdev)) {
674 struct hci_cp_read_local_ext_features cp;
675
676 cp.page = 0x01;
42c6b129
JH
677 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
678 sizeof(cp), &cp);
2177bab5
JH
679 }
680
d7a5a11d 681 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 682 u8 enable = 1;
42c6b129
JH
683 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
684 &enable);
2177bab5
JH
685 }
686}
687
42c6b129 688static void hci_setup_link_policy(struct hci_request *req)
2177bab5 689{
42c6b129 690 struct hci_dev *hdev = req->hdev;
2177bab5
JH
691 struct hci_cp_write_def_link_policy cp;
692 u16 link_policy = 0;
693
694 if (lmp_rswitch_capable(hdev))
695 link_policy |= HCI_LP_RSWITCH;
696 if (lmp_hold_capable(hdev))
697 link_policy |= HCI_LP_HOLD;
698 if (lmp_sniff_capable(hdev))
699 link_policy |= HCI_LP_SNIFF;
700 if (lmp_park_capable(hdev))
701 link_policy |= HCI_LP_PARK;
702
703 cp.policy = cpu_to_le16(link_policy);
42c6b129 704 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
705}
706
42c6b129 707static void hci_set_le_support(struct hci_request *req)
2177bab5 708{
42c6b129 709 struct hci_dev *hdev = req->hdev;
2177bab5
JH
710 struct hci_cp_write_le_host_supported cp;
711
c73eee91
JH
712 /* LE-only devices do not support explicit enablement */
713 if (!lmp_bredr_capable(hdev))
714 return;
715
2177bab5
JH
716 memset(&cp, 0, sizeof(cp));
717
d7a5a11d 718 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 719 cp.le = 0x01;
32226e4f 720 cp.simul = 0x00;
2177bab5
JH
721 }
722
723 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
724 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
725 &cp);
2177bab5
JH
726}
727
d62e6d67
JH
728static void hci_set_event_mask_page_2(struct hci_request *req)
729{
730 struct hci_dev *hdev = req->hdev;
731 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
732
733 /* If Connectionless Slave Broadcast master role is supported
734 * enable all necessary events for it.
735 */
53b834d2 736 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
737 events[1] |= 0x40; /* Triggered Clock Capture */
738 events[1] |= 0x80; /* Synchronization Train Complete */
739 events[2] |= 0x10; /* Slave Page Response Timeout */
740 events[2] |= 0x20; /* CSB Channel Map Change */
741 }
742
743 /* If Connectionless Slave Broadcast slave role is supported
744 * enable all necessary events for it.
745 */
53b834d2 746 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
747 events[2] |= 0x01; /* Synchronization Train Received */
748 events[2] |= 0x02; /* CSB Receive */
749 events[2] |= 0x04; /* CSB Timeout */
750 events[2] |= 0x08; /* Truncated Page Complete */
751 }
752
40c59fcb 753 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 754 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
755 events[2] |= 0x80;
756
d62e6d67
JH
757 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
758}
759
42c6b129 760static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 761{
42c6b129 762 struct hci_dev *hdev = req->hdev;
d2c5d77f 763 u8 p;
42c6b129 764
0da71f1b
MH
765 hci_setup_event_mask(req);
766
e81be90b
JH
767 if (hdev->commands[6] & 0x20 &&
768 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
769 struct hci_cp_read_stored_link_key cp;
770
771 bacpy(&cp.bdaddr, BDADDR_ANY);
772 cp.read_all = 0x01;
773 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
774 }
775
2177bab5 776 if (hdev->commands[5] & 0x10)
42c6b129 777 hci_setup_link_policy(req);
2177bab5 778
417287de
MH
779 if (hdev->commands[8] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
781
782 /* Some older Broadcom based Bluetooth 1.2 controllers do not
783 * support the Read Page Scan Type command. Check support for
784 * this command in the bit mask of supported commands.
785 */
786 if (hdev->commands[13] & 0x01)
787 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
788
9193c6e8
AG
789 if (lmp_le_capable(hdev)) {
790 u8 events[8];
791
792 memset(events, 0, sizeof(events));
4d6c705b
MH
793 events[0] = 0x0f;
794
795 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
796 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
797
798 /* If controller supports the Connection Parameters Request
799 * Link Layer Procedure, enable the corresponding event.
800 */
801 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
802 events[0] |= 0x20; /* LE Remote Connection
803 * Parameter Request
804 */
805
a9f6068e
MH
806 /* If the controller supports the Data Length Extension
807 * feature, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
810 events[0] |= 0x40; /* LE Data Length Change */
811
4b71bba4
MH
812 /* If the controller supports Extended Scanner Filter
813 * Policies, enable the correspondig event.
814 */
815 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
816 events[1] |= 0x04; /* LE Direct Advertising
817 * Report
818 */
819
5a34bd5f
MH
820 /* If the controller supports the LE Read Local P-256
821 * Public Key command, enable the corresponding event.
822 */
823 if (hdev->commands[34] & 0x02)
824 events[0] |= 0x80; /* LE Read Local P-256
825 * Public Key Complete
826 */
827
828 /* If the controller supports the LE Generate DHKey
829 * command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x04)
832 events[1] |= 0x01; /* LE Generate DHKey Complete */
833
9193c6e8
AG
834 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
835 events);
836
15a49cca
MH
837 if (hdev->commands[25] & 0x40) {
838 /* Read LE Advertising Channel TX Power */
839 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
840 }
841
a9f6068e
MH
842 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 /* Read LE Maximum Data Length */
844 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
845
846 /* Read LE Suggested Default Data Length */
847 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
848 }
849
42c6b129 850 hci_set_le_support(req);
9193c6e8 851 }
d2c5d77f
JH
852
853 /* Read features beyond page 1 if available */
854 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
855 struct hci_cp_read_local_ext_features cp;
856
857 cp.page = p;
858 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
859 sizeof(cp), &cp);
860 }
2177bab5
JH
861}
862
5d4e7e8d
JH
863static void hci_init4_req(struct hci_request *req, unsigned long opt)
864{
865 struct hci_dev *hdev = req->hdev;
866
36f260ce
MH
867 /* Some Broadcom based Bluetooth controllers do not support the
868 * Delete Stored Link Key command. They are clearly indicating its
869 * absence in the bit mask of supported commands.
870 *
871 * Check the supported commands and only if the the command is marked
872 * as supported send it. If not supported assume that the controller
873 * does not have actual support for stored link keys which makes this
874 * command redundant anyway.
875 *
876 * Some controllers indicate that they support handling deleting
877 * stored link keys, but they don't. The quirk lets a driver
878 * just disable this command.
879 */
880 if (hdev->commands[6] & 0x80 &&
881 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
882 struct hci_cp_delete_stored_link_key cp;
883
884 bacpy(&cp.bdaddr, BDADDR_ANY);
885 cp.delete_all = 0x01;
886 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
887 sizeof(cp), &cp);
888 }
889
d62e6d67
JH
890 /* Set event mask page 2 if the HCI command for it is supported */
891 if (hdev->commands[22] & 0x04)
892 hci_set_event_mask_page_2(req);
893
109e3191
MH
894 /* Read local codec list if the HCI command is supported */
895 if (hdev->commands[29] & 0x20)
896 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
897
f4fe73ed
MH
898 /* Get MWS transport configuration if the HCI command is supported */
899 if (hdev->commands[30] & 0x08)
900 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
901
5d4e7e8d 902 /* Check for Synchronization Train support */
53b834d2 903 if (lmp_sync_train_capable(hdev))
5d4e7e8d 904 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
905
906 /* Enable Secure Connections if supported and configured */
d7a5a11d 907 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 908 bredr_sc_enabled(hdev)) {
a6d0d690 909 u8 support = 0x01;
574ea3c7 910
a6d0d690
MH
911 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
912 sizeof(support), &support);
913 }
5d4e7e8d
JH
914}
915
2177bab5
JH
916static int __hci_init(struct hci_dev *hdev)
917{
918 int err;
919
920 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
f640ee98
MH
924 if (hci_dev_test_flag(hdev, HCI_SETUP))
925 hci_debugfs_create_basic(hdev);
4b4148e9 926
0af801b9
JH
927 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
928 if (err < 0)
929 return err;
930
2177bab5
JH
931 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
932 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 933 * first two stages of init.
2177bab5
JH
934 */
935 if (hdev->dev_type != HCI_BREDR)
936 return 0;
937
5d4e7e8d
JH
938 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
939 if (err < 0)
940 return err;
941
baf27f6e
MH
942 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
943 if (err < 0)
944 return err;
945
ec6cef9c
MH
946 /* This function is only called when the controller is actually in
947 * configured state. When the controller is marked as unconfigured,
948 * this initialization procedure is not run.
949 *
950 * It means that it is possible that a controller runs through its
951 * setup phase and then discovers missing settings. If that is the
952 * case, then this function will not be called. It then will only
953 * be called during the config phase.
954 *
955 * So only when in setup phase or config phase, create the debugfs
956 * entries and register the SMP channels.
baf27f6e 957 */
d7a5a11d
MH
958 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
959 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
960 return 0;
961
60c5f5fb
MH
962 hci_debugfs_create_common(hdev);
963
71c3b60e 964 if (lmp_bredr_capable(hdev))
60c5f5fb 965 hci_debugfs_create_bredr(hdev);
2bfa3531 966
162a3bac 967 if (lmp_le_capable(hdev))
60c5f5fb 968 hci_debugfs_create_le(hdev);
e7b8fc92 969
baf27f6e 970 return 0;
2177bab5
JH
971}
972
0ebca7d6
MH
973static void hci_init0_req(struct hci_request *req, unsigned long opt)
974{
975 struct hci_dev *hdev = req->hdev;
976
977 BT_DBG("%s %ld", hdev->name, opt);
978
979 /* Reset */
980 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
981 hci_reset_req(req, 0);
982
983 /* Read Local Version */
984 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
985
986 /* Read BD Address */
987 if (hdev->set_bdaddr)
988 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
989}
990
991static int __hci_unconf_init(struct hci_dev *hdev)
992{
993 int err;
994
cc78b44b
MH
995 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
996 return 0;
997
0ebca7d6
MH
998 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
999 if (err < 0)
1000 return err;
1001
f640ee98
MH
1002 if (hci_dev_test_flag(hdev, HCI_SETUP))
1003 hci_debugfs_create_basic(hdev);
1004
0ebca7d6
MH
1005 return 0;
1006}
1007
42c6b129 1008static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1009{
1010 __u8 scan = opt;
1011
42c6b129 1012 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1013
1014 /* Inquiry and Page scans */
42c6b129 1015 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1016}
1017
42c6b129 1018static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1019{
1020 __u8 auth = opt;
1021
42c6b129 1022 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1023
1024 /* Authentication */
42c6b129 1025 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1026}
1027
42c6b129 1028static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1029{
1030 __u8 encrypt = opt;
1031
42c6b129 1032 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1033
e4e8e37c 1034 /* Encryption */
42c6b129 1035 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1036}
1037
42c6b129 1038static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1039{
1040 __le16 policy = cpu_to_le16(opt);
1041
42c6b129 1042 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1043
1044 /* Default link policy */
42c6b129 1045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1046}
1047
8e87d142 1048/* Get HCI device by index.
1da177e4
LT
1049 * Device is held on return. */
1050struct hci_dev *hci_dev_get(int index)
1051{
8035ded4 1052 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
8035ded4 1060 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068}
1da177e4
LT
1069
1070/* ---- Inquiry support ---- */
ff9ef578 1071
30dc78e1
JH
1072bool hci_discovery_active(struct hci_dev *hdev)
1073{
1074 struct discovery_state *discov = &hdev->discovery;
1075
6fbe195d 1076 switch (discov->state) {
343f935b 1077 case DISCOVERY_FINDING:
6fbe195d 1078 case DISCOVERY_RESOLVING:
30dc78e1
JH
1079 return true;
1080
6fbe195d
AG
1081 default:
1082 return false;
1083 }
30dc78e1
JH
1084}
1085
ff9ef578
JH
1086void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087{
bb3e0a33
JH
1088 int old_state = hdev->discovery.state;
1089
ff9ef578
JH
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
bb3e0a33 1092 if (old_state == state)
ff9ef578
JH
1093 return;
1094
bb3e0a33
JH
1095 hdev->discovery.state = state;
1096
ff9ef578
JH
1097 switch (state) {
1098 case DISCOVERY_STOPPED:
c54c3860
AG
1099 hci_update_background_scan(hdev);
1100
bb3e0a33 1101 if (old_state != DISCOVERY_STARTING)
7b99b659 1102 mgmt_discovering(hdev, 0);
ff9ef578
JH
1103 break;
1104 case DISCOVERY_STARTING:
1105 break;
343f935b 1106 case DISCOVERY_FINDING:
ff9ef578
JH
1107 mgmt_discovering(hdev, 1);
1108 break;
30dc78e1
JH
1109 case DISCOVERY_RESOLVING:
1110 break;
ff9ef578
JH
1111 case DISCOVERY_STOPPING:
1112 break;
1113 }
ff9ef578
JH
1114}
1115
1f9b9a5d 1116void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1117{
30883512 1118 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1119 struct inquiry_entry *p, *n;
1da177e4 1120
561aafbc
JH
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
b57c1a56 1123 kfree(p);
1da177e4 1124 }
561aafbc
JH
1125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1128}
1129
a8c5fb1a
GP
1130struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1da177e4 1132{
30883512 1133 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1134 struct inquiry_entry *e;
1135
6ed93dc6 1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1137
561aafbc
JH
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144}
1145
1146struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1147 bdaddr_t *bdaddr)
561aafbc 1148{
30883512 1149 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1150 struct inquiry_entry *e;
1151
6ed93dc6 1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1153
1154 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1155 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1156 return e;
1157 }
1158
1159 return NULL;
1da177e4
LT
1160}
1161
30dc78e1 1162struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1163 bdaddr_t *bdaddr,
1164 int state)
30dc78e1
JH
1165{
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
6ed93dc6 1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179}
1180
a3d4e20a 1181void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1182 struct inquiry_entry *ie)
a3d4e20a
JH
1183{
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1192 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198}
1199
af58925c
MH
1200u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
1da177e4 1202{
30883512 1203 struct discovery_state *cache = &hdev->discovery;
70f23020 1204 struct inquiry_entry *ie;
af58925c 1205 u32 flags = 0;
1da177e4 1206
6ed93dc6 1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1208
6928a924 1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1210
af58925c
MH
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1213
70f23020 1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1215 if (ie) {
af58925c
MH
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1218
a3d4e20a 1219 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1220 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
561aafbc 1225 goto update;
a3d4e20a 1226 }
561aafbc
JH
1227
1228 /* Entry not in the cache. Add new one. */
27f70f3e 1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
561aafbc
JH
1234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
70f23020 1243
561aafbc
JH
1244update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1246 ie->name_state != NAME_PENDING) {
561aafbc
JH
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1da177e4
LT
1249 }
1250
70f23020
AE
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1da177e4 1253 cache->timestamp = jiffies;
3175405b
JH
1254
1255 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1257
af58925c
MH
1258done:
1259 return flags;
1da177e4
LT
1260}
1261
1262static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263{
30883512 1264 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
561aafbc 1269 list_for_each_entry(e, &cache->all, all) {
1da177e4 1270 struct inquiry_data *data = &e->data;
b57c1a56
JH
1271
1272 if (copied >= num)
1273 break;
1274
1da177e4
LT
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
b57c1a56 1281
1da177e4 1282 info++;
b57c1a56 1283 copied++;
1da177e4
LT
1284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288}
1289
42c6b129 1290static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1291{
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1293 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
1299 return;
1300
1301 /* Start Inquiry */
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
42c6b129 1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1306}
1307
1308int hci_inquiry(void __user *arg)
1309{
1310 __u8 __user *ptr = arg;
1311 struct hci_inquiry_req ir;
1312 struct hci_dev *hdev;
1313 int err = 0, do_inquiry = 0, max_rsp;
1314 long timeo;
1315 __u8 *buf;
1316
1317 if (copy_from_user(&ir, ptr, sizeof(ir)))
1318 return -EFAULT;
1319
5a08ecce
AE
1320 hdev = hci_dev_get(ir.dev_id);
1321 if (!hdev)
1da177e4
LT
1322 return -ENODEV;
1323
d7a5a11d 1324 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1325 err = -EBUSY;
1326 goto done;
1327 }
1328
d7a5a11d 1329 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1330 err = -EOPNOTSUPP;
1331 goto done;
1332 }
1333
5b69bef5
MH
1334 if (hdev->dev_type != HCI_BREDR) {
1335 err = -EOPNOTSUPP;
1336 goto done;
1337 }
1338
d7a5a11d 1339 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1340 err = -EOPNOTSUPP;
1341 goto done;
1342 }
1343
09fd0de5 1344 hci_dev_lock(hdev);
8e87d142 1345 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1346 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1347 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1348 do_inquiry = 1;
1349 }
09fd0de5 1350 hci_dev_unlock(hdev);
1da177e4 1351
04837f64 1352 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1353
1354 if (do_inquiry) {
01178cd4
JH
1355 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1356 timeo);
70f23020
AE
1357 if (err < 0)
1358 goto done;
3e13fa1e
AG
1359
1360 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361 * cleared). If it is interrupted by a signal, return -EINTR.
1362 */
74316201 1363 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1364 TASK_INTERRUPTIBLE))
1365 return -EINTR;
70f23020 1366 }
1da177e4 1367
8fc9ced3
GP
1368 /* for unlimited number of responses we will use buffer with
1369 * 255 entries
1370 */
1da177e4
LT
1371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1372
1373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374 * copy it to the user space.
1375 */
01df8c31 1376 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1377 if (!buf) {
1da177e4
LT
1378 err = -ENOMEM;
1379 goto done;
1380 }
1381
09fd0de5 1382 hci_dev_lock(hdev);
1da177e4 1383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1384 hci_dev_unlock(hdev);
1da177e4
LT
1385
1386 BT_DBG("num_rsp %d", ir.num_rsp);
1387
1388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1389 ptr += sizeof(ir);
1390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1391 ir.num_rsp))
1da177e4 1392 err = -EFAULT;
8e87d142 1393 } else
1da177e4
LT
1394 err = -EFAULT;
1395
1396 kfree(buf);
1397
1398done:
1399 hci_dev_put(hdev);
1400 return err;
1401}
1402
cbed0ca1 1403static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1404{
1da177e4
LT
1405 int ret = 0;
1406
1da177e4
LT
1407 BT_DBG("%s %p", hdev->name, hdev);
1408
1409 hci_req_lock(hdev);
1410
d7a5a11d 1411 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1412 ret = -ENODEV;
1413 goto done;
1414 }
1415
d7a5a11d
MH
1416 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1417 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1418 /* Check for rfkill but allow the HCI setup stage to
1419 * proceed (which in itself doesn't cause any RF activity).
1420 */
d7a5a11d 1421 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1422 ret = -ERFKILL;
1423 goto done;
1424 }
1425
1426 /* Check for valid public address or a configured static
1427 * random adddress, but let the HCI setup proceed to
1428 * be able to determine if there is a public address
1429 * or not.
1430 *
c6beca0e
MH
1431 * In case of user channel usage, it is not important
1432 * if a public address or static random address is
1433 * available.
1434 *
a5c8f270
MH
1435 * This check is only valid for BR/EDR controllers
1436 * since AMP controllers do not have an address.
1437 */
d7a5a11d 1438 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1439 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1440 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1441 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1442 ret = -EADDRNOTAVAIL;
1443 goto done;
1444 }
611b30f7
MH
1445 }
1446
1da177e4
LT
1447 if (test_bit(HCI_UP, &hdev->flags)) {
1448 ret = -EALREADY;
1449 goto done;
1450 }
1451
1da177e4
LT
1452 if (hdev->open(hdev)) {
1453 ret = -EIO;
1454 goto done;
1455 }
1456
e9ca8bf1 1457 set_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1458 hci_notify(hdev, HCI_DEV_OPEN);
1459
f41c70c4
MH
1460 atomic_set(&hdev->cmd_cnt, 1);
1461 set_bit(HCI_INIT, &hdev->flags);
1462
d7a5a11d 1463 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1464 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1465
af202f84
MH
1466 if (hdev->setup)
1467 ret = hdev->setup(hdev);
f41c70c4 1468
af202f84
MH
1469 /* The transport driver can set these quirks before
1470 * creating the HCI device or in its setup callback.
1471 *
1472 * In case any of them is set, the controller has to
1473 * start up as unconfigured.
1474 */
eb1904f4
MH
1475 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1476 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1477 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1478
0ebca7d6
MH
1479 /* For an unconfigured controller it is required to
1480 * read at least the version information provided by
1481 * the Read Local Version Information command.
1482 *
1483 * If the set_bdaddr driver callback is provided, then
1484 * also the original Bluetooth public device address
1485 * will be read using the Read BD Address command.
1486 */
d7a5a11d 1487 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1488 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1489 }
1490
d7a5a11d 1491 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1492 /* If public address change is configured, ensure that
1493 * the address gets programmed. If the driver does not
1494 * support changing the public address, fail the power
1495 * on procedure.
1496 */
1497 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1498 hdev->set_bdaddr)
24c457e2
MH
1499 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1500 else
1501 ret = -EADDRNOTAVAIL;
1502 }
1503
f41c70c4 1504 if (!ret) {
d7a5a11d 1505 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
98a63aaf 1506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
f41c70c4 1507 ret = __hci_init(hdev);
98a63aaf
MH
1508 if (!ret && hdev->post_init)
1509 ret = hdev->post_init(hdev);
1510 }
1da177e4
LT
1511 }
1512
7e995b9e
MH
1513 /* If the HCI Reset command is clearing all diagnostic settings,
1514 * then they need to be reprogrammed after the init procedure
1515 * completed.
1516 */
1517 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1518 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1519 ret = hdev->set_diag(hdev, true);
1520
f41c70c4
MH
1521 clear_bit(HCI_INIT, &hdev->flags);
1522
1da177e4
LT
1523 if (!ret) {
1524 hci_dev_hold(hdev);
a1536da2 1525 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1526 set_bit(HCI_UP, &hdev->flags);
1527 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1528 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1529 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1530 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1531 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1532 hdev->dev_type == HCI_BREDR) {
09fd0de5 1533 hci_dev_lock(hdev);
744cf19e 1534 mgmt_powered(hdev, 1);
09fd0de5 1535 hci_dev_unlock(hdev);
56e5cb86 1536 }
8e87d142 1537 } else {
1da177e4 1538 /* Init failed, cleanup */
3eff45ea 1539 flush_work(&hdev->tx_work);
c347b765 1540 flush_work(&hdev->cmd_work);
b78752cc 1541 flush_work(&hdev->rx_work);
1da177e4
LT
1542
1543 skb_queue_purge(&hdev->cmd_q);
1544 skb_queue_purge(&hdev->rx_q);
1545
1546 if (hdev->flush)
1547 hdev->flush(hdev);
1548
1549 if (hdev->sent_cmd) {
1550 kfree_skb(hdev->sent_cmd);
1551 hdev->sent_cmd = NULL;
1552 }
1553
e9ca8bf1 1554 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1555 hci_notify(hdev, HCI_DEV_CLOSE);
1556
1da177e4 1557 hdev->close(hdev);
fee746b0 1558 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1559 }
1560
1561done:
1562 hci_req_unlock(hdev);
1da177e4
LT
1563 return ret;
1564}
1565
cbed0ca1
JH
1566/* ---- HCI ioctl helpers ---- */
1567
1568int hci_dev_open(__u16 dev)
1569{
1570 struct hci_dev *hdev;
1571 int err;
1572
1573 hdev = hci_dev_get(dev);
1574 if (!hdev)
1575 return -ENODEV;
1576
4a964404 1577 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1578 * up as user channel. Trying to bring them up as normal devices
1579 * will result into a failure. Only user channel operation is
1580 * possible.
1581 *
1582 * When this function is called for a user channel, the flag
1583 * HCI_USER_CHANNEL will be set first before attempting to
1584 * open the device.
1585 */
d7a5a11d
MH
1586 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1587 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1588 err = -EOPNOTSUPP;
1589 goto done;
1590 }
1591
e1d08f40
JH
1592 /* We need to ensure that no other power on/off work is pending
1593 * before proceeding to call hci_dev_do_open. This is
1594 * particularly important if the setup procedure has not yet
1595 * completed.
1596 */
a69d8927 1597 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1598 cancel_delayed_work(&hdev->power_off);
1599
a5c8f270
MH
1600 /* After this call it is guaranteed that the setup procedure
1601 * has finished. This means that error conditions like RFKILL
1602 * or no valid public or static random address apply.
1603 */
e1d08f40
JH
1604 flush_workqueue(hdev->req_workqueue);
1605
12aa4f0a 1606 /* For controllers not using the management interface and that
b6ae8457 1607 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1608 * so that pairing works for them. Once the management interface
1609 * is in use this bit will be cleared again and userspace has
1610 * to explicitly enable it.
1611 */
d7a5a11d
MH
1612 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1613 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1614 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1615
cbed0ca1
JH
1616 err = hci_dev_do_open(hdev);
1617
fee746b0 1618done:
cbed0ca1 1619 hci_dev_put(hdev);
cbed0ca1
JH
1620 return err;
1621}
1622
d7347f3c
JH
1623/* This function requires the caller holds hdev->lock */
1624static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1625{
1626 struct hci_conn_params *p;
1627
f161dd41
JH
1628 list_for_each_entry(p, &hdev->le_conn_params, list) {
1629 if (p->conn) {
1630 hci_conn_drop(p->conn);
f8aaf9b6 1631 hci_conn_put(p->conn);
f161dd41
JH
1632 p->conn = NULL;
1633 }
d7347f3c 1634 list_del_init(&p->action);
f161dd41 1635 }
d7347f3c
JH
1636
1637 BT_DBG("All LE pending actions cleared");
1638}
1639
6b3cc1db 1640int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1641{
acc649c6
MH
1642 bool auto_off;
1643
1da177e4
LT
1644 BT_DBG("%s %p", hdev->name, hdev);
1645
d24d8144 1646 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1647 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1648 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1649 /* Execute vendor specific shutdown routine */
1650 if (hdev->shutdown)
1651 hdev->shutdown(hdev);
1652 }
1653
78c04c0b
VCG
1654 cancel_delayed_work(&hdev->power_off);
1655
1da177e4
LT
1656 hci_req_cancel(hdev, ENODEV);
1657 hci_req_lock(hdev);
1658
1659 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1660 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1661 hci_req_unlock(hdev);
1662 return 0;
1663 }
1664
3eff45ea
GP
1665 /* Flush RX and TX works */
1666 flush_work(&hdev->tx_work);
b78752cc 1667 flush_work(&hdev->rx_work);
1da177e4 1668
16ab91ab 1669 if (hdev->discov_timeout > 0) {
e0f9309f 1670 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1671 hdev->discov_timeout = 0;
a358dc11
MH
1672 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1673 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1674 }
1675
a69d8927 1676 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1677 cancel_delayed_work(&hdev->service_cache);
1678
7ba8b4be 1679 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1680 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1681
d7a5a11d 1682 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1683 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1684
5d900e46
FG
1685 if (hdev->adv_instance_timeout) {
1686 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1687 hdev->adv_instance_timeout = 0;
1688 }
1689
76727c02
JH
1690 /* Avoid potential lockdep warnings from the *_flush() calls by
1691 * ensuring the workqueue is empty up front.
1692 */
1693 drain_workqueue(hdev->workqueue);
1694
09fd0de5 1695 hci_dev_lock(hdev);
1aeb9c65 1696
8f502f84
JH
1697 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1698
acc649c6
MH
1699 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1700
1701 if (!auto_off && hdev->dev_type == HCI_BREDR)
1702 mgmt_powered(hdev, 0);
1aeb9c65 1703
1f9b9a5d 1704 hci_inquiry_cache_flush(hdev);
d7347f3c 1705 hci_pend_le_actions_clear(hdev);
f161dd41 1706 hci_conn_hash_flush(hdev);
09fd0de5 1707 hci_dev_unlock(hdev);
1da177e4 1708
64dae967
MH
1709 smp_unregister(hdev);
1710
1da177e4
LT
1711 hci_notify(hdev, HCI_DEV_DOWN);
1712
1713 if (hdev->flush)
1714 hdev->flush(hdev);
1715
1716 /* Reset device */
1717 skb_queue_purge(&hdev->cmd_q);
1718 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1719 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1720 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1721 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1722 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1723 clear_bit(HCI_INIT, &hdev->flags);
1724 }
1725
c347b765
GP
1726 /* flush cmd work */
1727 flush_work(&hdev->cmd_work);
1da177e4
LT
1728
1729 /* Drop queues */
1730 skb_queue_purge(&hdev->rx_q);
1731 skb_queue_purge(&hdev->cmd_q);
1732 skb_queue_purge(&hdev->raw_q);
1733
1734 /* Drop last sent command */
1735 if (hdev->sent_cmd) {
65cc2b49 1736 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1737 kfree_skb(hdev->sent_cmd);
1738 hdev->sent_cmd = NULL;
1739 }
1740
e9ca8bf1 1741 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1742 hci_notify(hdev, HCI_DEV_CLOSE);
1743
1da177e4
LT
1744 /* After this point our queues are empty
1745 * and no tasks are scheduled. */
1746 hdev->close(hdev);
1747
35b973c9 1748 /* Clear flags */
fee746b0 1749 hdev->flags &= BIT(HCI_RAW);
eacb44df 1750 hci_dev_clear_volatile_flags(hdev);
35b973c9 1751
ced5c338 1752 /* Controller radio is available but is currently powered down */
536619e8 1753 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1754
e59fda8d 1755 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1756 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1757 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1758
1da177e4
LT
1759 hci_req_unlock(hdev);
1760
1761 hci_dev_put(hdev);
1762 return 0;
1763}
1764
1765int hci_dev_close(__u16 dev)
1766{
1767 struct hci_dev *hdev;
1768 int err;
1769
70f23020
AE
1770 hdev = hci_dev_get(dev);
1771 if (!hdev)
1da177e4 1772 return -ENODEV;
8ee56540 1773
d7a5a11d 1774 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1775 err = -EBUSY;
1776 goto done;
1777 }
1778
a69d8927 1779 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1780 cancel_delayed_work(&hdev->power_off);
1781
1da177e4 1782 err = hci_dev_do_close(hdev);
8ee56540 1783
0736cfa8 1784done:
1da177e4
LT
1785 hci_dev_put(hdev);
1786 return err;
1787}
1788
5c912495 1789static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1790{
5c912495 1791 int ret;
1da177e4 1792
5c912495 1793 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1794
1795 hci_req_lock(hdev);
1da177e4 1796
1da177e4
LT
1797 /* Drop queues */
1798 skb_queue_purge(&hdev->rx_q);
1799 skb_queue_purge(&hdev->cmd_q);
1800
76727c02
JH
1801 /* Avoid potential lockdep warnings from the *_flush() calls by
1802 * ensuring the workqueue is empty up front.
1803 */
1804 drain_workqueue(hdev->workqueue);
1805
09fd0de5 1806 hci_dev_lock(hdev);
1f9b9a5d 1807 hci_inquiry_cache_flush(hdev);
1da177e4 1808 hci_conn_hash_flush(hdev);
09fd0de5 1809 hci_dev_unlock(hdev);
1da177e4
LT
1810
1811 if (hdev->flush)
1812 hdev->flush(hdev);
1813
8e87d142 1814 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1815 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1816
fee746b0 1817 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1818
1da177e4 1819 hci_req_unlock(hdev);
1da177e4
LT
1820 return ret;
1821}
1822
5c912495
MH
1823int hci_dev_reset(__u16 dev)
1824{
1825 struct hci_dev *hdev;
1826 int err;
1827
1828 hdev = hci_dev_get(dev);
1829 if (!hdev)
1830 return -ENODEV;
1831
1832 if (!test_bit(HCI_UP, &hdev->flags)) {
1833 err = -ENETDOWN;
1834 goto done;
1835 }
1836
d7a5a11d 1837 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1838 err = -EBUSY;
1839 goto done;
1840 }
1841
d7a5a11d 1842 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1843 err = -EOPNOTSUPP;
1844 goto done;
1845 }
1846
1847 err = hci_dev_do_reset(hdev);
1848
1849done:
1850 hci_dev_put(hdev);
1851 return err;
1852}
1853
1da177e4
LT
1854int hci_dev_reset_stat(__u16 dev)
1855{
1856 struct hci_dev *hdev;
1857 int ret = 0;
1858
70f23020
AE
1859 hdev = hci_dev_get(dev);
1860 if (!hdev)
1da177e4
LT
1861 return -ENODEV;
1862
d7a5a11d 1863 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1864 ret = -EBUSY;
1865 goto done;
1866 }
1867
d7a5a11d 1868 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1869 ret = -EOPNOTSUPP;
1870 goto done;
1871 }
1872
1da177e4
LT
1873 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1874
0736cfa8 1875done:
1da177e4 1876 hci_dev_put(hdev);
1da177e4
LT
1877 return ret;
1878}
1879
123abc08
JH
1880static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1881{
bc6d2d04 1882 bool conn_changed, discov_changed;
123abc08
JH
1883
1884 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1885
1886 if ((scan & SCAN_PAGE))
238be788
MH
1887 conn_changed = !hci_dev_test_and_set_flag(hdev,
1888 HCI_CONNECTABLE);
123abc08 1889 else
a69d8927
MH
1890 conn_changed = hci_dev_test_and_clear_flag(hdev,
1891 HCI_CONNECTABLE);
123abc08 1892
bc6d2d04 1893 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1894 discov_changed = !hci_dev_test_and_set_flag(hdev,
1895 HCI_DISCOVERABLE);
bc6d2d04 1896 } else {
a358dc11 1897 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1898 discov_changed = hci_dev_test_and_clear_flag(hdev,
1899 HCI_DISCOVERABLE);
bc6d2d04
JH
1900 }
1901
d7a5a11d 1902 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1903 return;
1904
bc6d2d04
JH
1905 if (conn_changed || discov_changed) {
1906 /* In case this was disabled through mgmt */
a1536da2 1907 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1908
d7a5a11d 1909 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1910 mgmt_update_adv_data(hdev);
1911
123abc08 1912 mgmt_new_settings(hdev);
bc6d2d04 1913 }
123abc08
JH
1914}
1915
1da177e4
LT
1916int hci_dev_cmd(unsigned int cmd, void __user *arg)
1917{
1918 struct hci_dev *hdev;
1919 struct hci_dev_req dr;
1920 int err = 0;
1921
1922 if (copy_from_user(&dr, arg, sizeof(dr)))
1923 return -EFAULT;
1924
70f23020
AE
1925 hdev = hci_dev_get(dr.dev_id);
1926 if (!hdev)
1da177e4
LT
1927 return -ENODEV;
1928
d7a5a11d 1929 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1930 err = -EBUSY;
1931 goto done;
1932 }
1933
d7a5a11d 1934 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1935 err = -EOPNOTSUPP;
1936 goto done;
1937 }
1938
5b69bef5
MH
1939 if (hdev->dev_type != HCI_BREDR) {
1940 err = -EOPNOTSUPP;
1941 goto done;
1942 }
1943
d7a5a11d 1944 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1945 err = -EOPNOTSUPP;
1946 goto done;
1947 }
1948
1da177e4
LT
1949 switch (cmd) {
1950 case HCISETAUTH:
01178cd4
JH
1951 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1952 HCI_INIT_TIMEOUT);
1da177e4
LT
1953 break;
1954
1955 case HCISETENCRYPT:
1956 if (!lmp_encrypt_capable(hdev)) {
1957 err = -EOPNOTSUPP;
1958 break;
1959 }
1960
1961 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1962 /* Auth must be enabled first */
01178cd4
JH
1963 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1964 HCI_INIT_TIMEOUT);
1da177e4
LT
1965 if (err)
1966 break;
1967 }
1968
01178cd4
JH
1969 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1970 HCI_INIT_TIMEOUT);
1da177e4
LT
1971 break;
1972
1973 case HCISETSCAN:
01178cd4
JH
1974 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1975 HCI_INIT_TIMEOUT);
91a668b0 1976
bc6d2d04
JH
1977 /* Ensure that the connectable and discoverable states
1978 * get correctly modified as this was a non-mgmt change.
91a668b0 1979 */
123abc08
JH
1980 if (!err)
1981 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1982 break;
1983
1da177e4 1984 case HCISETLINKPOL:
01178cd4
JH
1985 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1986 HCI_INIT_TIMEOUT);
1da177e4
LT
1987 break;
1988
1989 case HCISETLINKMODE:
e4e8e37c
MH
1990 hdev->link_mode = ((__u16) dr.dev_opt) &
1991 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1992 break;
1993
1994 case HCISETPTYPE:
1995 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1996 break;
1997
1998 case HCISETACLMTU:
e4e8e37c
MH
1999 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2000 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2001 break;
2002
2003 case HCISETSCOMTU:
e4e8e37c
MH
2004 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2005 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2006 break;
2007
2008 default:
2009 err = -EINVAL;
2010 break;
2011 }
e4e8e37c 2012
0736cfa8 2013done:
1da177e4
LT
2014 hci_dev_put(hdev);
2015 return err;
2016}
2017
2018int hci_get_dev_list(void __user *arg)
2019{
8035ded4 2020 struct hci_dev *hdev;
1da177e4
LT
2021 struct hci_dev_list_req *dl;
2022 struct hci_dev_req *dr;
1da177e4
LT
2023 int n = 0, size, err;
2024 __u16 dev_num;
2025
2026 if (get_user(dev_num, (__u16 __user *) arg))
2027 return -EFAULT;
2028
2029 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2030 return -EINVAL;
2031
2032 size = sizeof(*dl) + dev_num * sizeof(*dr);
2033
70f23020
AE
2034 dl = kzalloc(size, GFP_KERNEL);
2035 if (!dl)
1da177e4
LT
2036 return -ENOMEM;
2037
2038 dr = dl->dev_req;
2039
f20d09d5 2040 read_lock(&hci_dev_list_lock);
8035ded4 2041 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2042 unsigned long flags = hdev->flags;
c542a06c 2043
2e84d8db
MH
2044 /* When the auto-off is configured it means the transport
2045 * is running, but in that case still indicate that the
2046 * device is actually down.
2047 */
d7a5a11d 2048 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2049 flags &= ~BIT(HCI_UP);
c542a06c 2050
1da177e4 2051 (dr + n)->dev_id = hdev->id;
2e84d8db 2052 (dr + n)->dev_opt = flags;
c542a06c 2053
1da177e4
LT
2054 if (++n >= dev_num)
2055 break;
2056 }
f20d09d5 2057 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2058
2059 dl->dev_num = n;
2060 size = sizeof(*dl) + n * sizeof(*dr);
2061
2062 err = copy_to_user(arg, dl, size);
2063 kfree(dl);
2064
2065 return err ? -EFAULT : 0;
2066}
2067
2068int hci_get_dev_info(void __user *arg)
2069{
2070 struct hci_dev *hdev;
2071 struct hci_dev_info di;
2e84d8db 2072 unsigned long flags;
1da177e4
LT
2073 int err = 0;
2074
2075 if (copy_from_user(&di, arg, sizeof(di)))
2076 return -EFAULT;
2077
70f23020
AE
2078 hdev = hci_dev_get(di.dev_id);
2079 if (!hdev)
1da177e4
LT
2080 return -ENODEV;
2081
2e84d8db
MH
2082 /* When the auto-off is configured it means the transport
2083 * is running, but in that case still indicate that the
2084 * device is actually down.
2085 */
d7a5a11d 2086 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2087 flags = hdev->flags & ~BIT(HCI_UP);
2088 else
2089 flags = hdev->flags;
c542a06c 2090
1da177e4
LT
2091 strcpy(di.name, hdev->name);
2092 di.bdaddr = hdev->bdaddr;
60f2a3ed 2093 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2094 di.flags = flags;
1da177e4 2095 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2096 if (lmp_bredr_capable(hdev)) {
2097 di.acl_mtu = hdev->acl_mtu;
2098 di.acl_pkts = hdev->acl_pkts;
2099 di.sco_mtu = hdev->sco_mtu;
2100 di.sco_pkts = hdev->sco_pkts;
2101 } else {
2102 di.acl_mtu = hdev->le_mtu;
2103 di.acl_pkts = hdev->le_pkts;
2104 di.sco_mtu = 0;
2105 di.sco_pkts = 0;
2106 }
1da177e4
LT
2107 di.link_policy = hdev->link_policy;
2108 di.link_mode = hdev->link_mode;
2109
2110 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2111 memcpy(&di.features, &hdev->features, sizeof(di.features));
2112
2113 if (copy_to_user(arg, &di, sizeof(di)))
2114 err = -EFAULT;
2115
2116 hci_dev_put(hdev);
2117
2118 return err;
2119}
2120
2121/* ---- Interface to HCI drivers ---- */
2122
611b30f7
MH
2123static int hci_rfkill_set_block(void *data, bool blocked)
2124{
2125 struct hci_dev *hdev = data;
2126
2127 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2128
d7a5a11d 2129 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2130 return -EBUSY;
2131
5e130367 2132 if (blocked) {
a1536da2 2133 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2134 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2135 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2136 hci_dev_do_close(hdev);
5e130367 2137 } else {
a358dc11 2138 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2139 }
611b30f7
MH
2140
2141 return 0;
2142}
2143
2144static const struct rfkill_ops hci_rfkill_ops = {
2145 .set_block = hci_rfkill_set_block,
2146};
2147
ab81cbf9
JH
2148static void hci_power_on(struct work_struct *work)
2149{
2150 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2151 int err;
ab81cbf9
JH
2152
2153 BT_DBG("%s", hdev->name);
2154
cbed0ca1 2155 err = hci_dev_do_open(hdev);
96570ffc 2156 if (err < 0) {
3ad67582 2157 hci_dev_lock(hdev);
96570ffc 2158 mgmt_set_powered_failed(hdev, err);
3ad67582 2159 hci_dev_unlock(hdev);
ab81cbf9 2160 return;
96570ffc 2161 }
ab81cbf9 2162
a5c8f270
MH
2163 /* During the HCI setup phase, a few error conditions are
2164 * ignored and they need to be checked now. If they are still
2165 * valid, it is important to turn the device back off.
2166 */
d7a5a11d
MH
2167 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2168 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2169 (hdev->dev_type == HCI_BREDR &&
2170 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2171 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2172 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2173 hci_dev_do_close(hdev);
d7a5a11d 2174 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2175 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2176 HCI_AUTO_OFF_TIMEOUT);
bf543036 2177 }
ab81cbf9 2178
a69d8927 2179 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2180 /* For unconfigured devices, set the HCI_RAW flag
2181 * so that userspace can easily identify them.
4a964404 2182 */
d7a5a11d 2183 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2184 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2185
2186 /* For fully configured devices, this will send
2187 * the Index Added event. For unconfigured devices,
2188 * it will send Unconfigued Index Added event.
2189 *
2190 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2191 * and no event will be send.
2192 */
2193 mgmt_index_added(hdev);
a69d8927 2194 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2195 /* When the controller is now configured, then it
2196 * is important to clear the HCI_RAW flag.
2197 */
d7a5a11d 2198 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2199 clear_bit(HCI_RAW, &hdev->flags);
2200
d603b76b
MH
2201 /* Powering on the controller with HCI_CONFIG set only
2202 * happens with the transition from unconfigured to
2203 * configured. This will send the Index Added event.
2204 */
744cf19e 2205 mgmt_index_added(hdev);
fee746b0 2206 }
ab81cbf9
JH
2207}
2208
2209static void hci_power_off(struct work_struct *work)
2210{
3243553f 2211 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2212 power_off.work);
ab81cbf9
JH
2213
2214 BT_DBG("%s", hdev->name);
2215
8ee56540 2216 hci_dev_do_close(hdev);
ab81cbf9
JH
2217}
2218
c7741d16
MH
2219static void hci_error_reset(struct work_struct *work)
2220{
2221 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2222
2223 BT_DBG("%s", hdev->name);
2224
2225 if (hdev->hw_error)
2226 hdev->hw_error(hdev, hdev->hw_error_code);
2227 else
2228 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2229 hdev->hw_error_code);
2230
2231 if (hci_dev_do_close(hdev))
2232 return;
2233
c7741d16
MH
2234 hci_dev_do_open(hdev);
2235}
2236
16ab91ab
JH
2237static void hci_discov_off(struct work_struct *work)
2238{
2239 struct hci_dev *hdev;
16ab91ab
JH
2240
2241 hdev = container_of(work, struct hci_dev, discov_off.work);
2242
2243 BT_DBG("%s", hdev->name);
2244
d1967ff8 2245 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2246}
2247
5d900e46
FG
2248static void hci_adv_timeout_expire(struct work_struct *work)
2249{
2250 struct hci_dev *hdev;
2251
2252 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2253
2254 BT_DBG("%s", hdev->name);
2255
2256 mgmt_adv_timeout_expired(hdev);
2257}
2258
35f7498a 2259void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2260{
4821002c 2261 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2262
4821002c
JH
2263 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2264 list_del(&uuid->list);
2aeb9a1a
JH
2265 kfree(uuid);
2266 }
2aeb9a1a
JH
2267}
2268
35f7498a 2269void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2270{
0378b597 2271 struct link_key *key;
55ed8ca1 2272
0378b597
JH
2273 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2274 list_del_rcu(&key->list);
2275 kfree_rcu(key, rcu);
55ed8ca1 2276 }
55ed8ca1
JH
2277}
2278
35f7498a 2279void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2280{
970d0f1b 2281 struct smp_ltk *k;
b899efaf 2282
970d0f1b
JH
2283 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2284 list_del_rcu(&k->list);
2285 kfree_rcu(k, rcu);
b899efaf 2286 }
b899efaf
VCG
2287}
2288
970c4e46
JH
2289void hci_smp_irks_clear(struct hci_dev *hdev)
2290{
adae20cb 2291 struct smp_irk *k;
970c4e46 2292
adae20cb
JH
2293 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2294 list_del_rcu(&k->list);
2295 kfree_rcu(k, rcu);
970c4e46
JH
2296 }
2297}
2298
55ed8ca1
JH
2299struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2300{
8035ded4 2301 struct link_key *k;
55ed8ca1 2302
0378b597
JH
2303 rcu_read_lock();
2304 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2305 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2306 rcu_read_unlock();
55ed8ca1 2307 return k;
0378b597
JH
2308 }
2309 }
2310 rcu_read_unlock();
55ed8ca1
JH
2311
2312 return NULL;
2313}
2314
745c0ce3 2315static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2316 u8 key_type, u8 old_key_type)
d25e28ab
JH
2317{
2318 /* Legacy key */
2319 if (key_type < 0x03)
745c0ce3 2320 return true;
d25e28ab
JH
2321
2322 /* Debug keys are insecure so don't store them persistently */
2323 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2324 return false;
d25e28ab
JH
2325
2326 /* Changed combination key and there's no previous one */
2327 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2328 return false;
d25e28ab
JH
2329
2330 /* Security mode 3 case */
2331 if (!conn)
745c0ce3 2332 return true;
d25e28ab 2333
e3befab9
JH
2334 /* BR/EDR key derived using SC from an LE link */
2335 if (conn->type == LE_LINK)
2336 return true;
2337
d25e28ab
JH
2338 /* Neither local nor remote side had no-bonding as requirement */
2339 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2340 return true;
d25e28ab
JH
2341
2342 /* Local side had dedicated bonding as requirement */
2343 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2344 return true;
d25e28ab
JH
2345
2346 /* Remote side had dedicated bonding as requirement */
2347 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2348 return true;
d25e28ab
JH
2349
2350 /* If none of the above criteria match, then don't store the key
2351 * persistently */
745c0ce3 2352 return false;
d25e28ab
JH
2353}
2354
e804d25d 2355static u8 ltk_role(u8 type)
98a0b845 2356{
e804d25d
JH
2357 if (type == SMP_LTK)
2358 return HCI_ROLE_MASTER;
98a0b845 2359
e804d25d 2360 return HCI_ROLE_SLAVE;
98a0b845
JH
2361}
2362
f3a73d97
JH
2363struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2364 u8 addr_type, u8 role)
75d262c2 2365{
c9839a11 2366 struct smp_ltk *k;
75d262c2 2367
970d0f1b
JH
2368 rcu_read_lock();
2369 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2370 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2371 continue;
2372
923e2414 2373 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2374 rcu_read_unlock();
75d262c2 2375 return k;
970d0f1b
JH
2376 }
2377 }
2378 rcu_read_unlock();
75d262c2
VCG
2379
2380 return NULL;
2381}
75d262c2 2382
970c4e46
JH
2383struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2384{
2385 struct smp_irk *irk;
2386
adae20cb
JH
2387 rcu_read_lock();
2388 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2389 if (!bacmp(&irk->rpa, rpa)) {
2390 rcu_read_unlock();
970c4e46 2391 return irk;
adae20cb 2392 }
970c4e46
JH
2393 }
2394
adae20cb 2395 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2396 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2397 bacpy(&irk->rpa, rpa);
adae20cb 2398 rcu_read_unlock();
970c4e46
JH
2399 return irk;
2400 }
2401 }
adae20cb 2402 rcu_read_unlock();
970c4e46
JH
2403
2404 return NULL;
2405}
2406
2407struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2408 u8 addr_type)
2409{
2410 struct smp_irk *irk;
2411
6cfc9988
JH
2412 /* Identity Address must be public or static random */
2413 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2414 return NULL;
2415
adae20cb
JH
2416 rcu_read_lock();
2417 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2418 if (addr_type == irk->addr_type &&
adae20cb
JH
2419 bacmp(bdaddr, &irk->bdaddr) == 0) {
2420 rcu_read_unlock();
970c4e46 2421 return irk;
adae20cb 2422 }
970c4e46 2423 }
adae20cb 2424 rcu_read_unlock();
970c4e46
JH
2425
2426 return NULL;
2427}
2428
567fa2aa 2429struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2430 bdaddr_t *bdaddr, u8 *val, u8 type,
2431 u8 pin_len, bool *persistent)
55ed8ca1
JH
2432{
2433 struct link_key *key, *old_key;
745c0ce3 2434 u8 old_key_type;
55ed8ca1
JH
2435
2436 old_key = hci_find_link_key(hdev, bdaddr);
2437 if (old_key) {
2438 old_key_type = old_key->type;
2439 key = old_key;
2440 } else {
12adcf3a 2441 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2442 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2443 if (!key)
567fa2aa 2444 return NULL;
0378b597 2445 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2446 }
2447
6ed93dc6 2448 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2449
d25e28ab
JH
2450 /* Some buggy controller combinations generate a changed
2451 * combination key for legacy pairing even when there's no
2452 * previous key */
2453 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2454 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2455 type = HCI_LK_COMBINATION;
655fe6ec
JH
2456 if (conn)
2457 conn->key_type = type;
2458 }
d25e28ab 2459
55ed8ca1 2460 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2461 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2462 key->pin_len = pin_len;
2463
b6020ba0 2464 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2465 key->type = old_key_type;
4748fed2
JH
2466 else
2467 key->type = type;
2468
7652ff6a
JH
2469 if (persistent)
2470 *persistent = hci_persistent_key(hdev, conn, type,
2471 old_key_type);
4df378a1 2472
567fa2aa 2473 return key;
55ed8ca1
JH
2474}
2475
ca9142b8 2476struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2477 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2478 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2479{
c9839a11 2480 struct smp_ltk *key, *old_key;
e804d25d 2481 u8 role = ltk_role(type);
75d262c2 2482
f3a73d97 2483 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2484 if (old_key)
75d262c2 2485 key = old_key;
c9839a11 2486 else {
0a14ab41 2487 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2488 if (!key)
ca9142b8 2489 return NULL;
970d0f1b 2490 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2491 }
2492
75d262c2 2493 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2494 key->bdaddr_type = addr_type;
2495 memcpy(key->val, tk, sizeof(key->val));
2496 key->authenticated = authenticated;
2497 key->ediv = ediv;
fe39c7b2 2498 key->rand = rand;
c9839a11
VCG
2499 key->enc_size = enc_size;
2500 key->type = type;
75d262c2 2501
ca9142b8 2502 return key;
75d262c2
VCG
2503}
2504
ca9142b8
JH
2505struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2506 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2507{
2508 struct smp_irk *irk;
2509
2510 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2511 if (!irk) {
2512 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2513 if (!irk)
ca9142b8 2514 return NULL;
970c4e46
JH
2515
2516 bacpy(&irk->bdaddr, bdaddr);
2517 irk->addr_type = addr_type;
2518
adae20cb 2519 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2520 }
2521
2522 memcpy(irk->val, val, 16);
2523 bacpy(&irk->rpa, rpa);
2524
ca9142b8 2525 return irk;
970c4e46
JH
2526}
2527
55ed8ca1
JH
2528int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2529{
2530 struct link_key *key;
2531
2532 key = hci_find_link_key(hdev, bdaddr);
2533 if (!key)
2534 return -ENOENT;
2535
6ed93dc6 2536 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2537
0378b597
JH
2538 list_del_rcu(&key->list);
2539 kfree_rcu(key, rcu);
55ed8ca1
JH
2540
2541 return 0;
2542}
2543
e0b2b27e 2544int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2545{
970d0f1b 2546 struct smp_ltk *k;
c51ffa0b 2547 int removed = 0;
b899efaf 2548
970d0f1b 2549 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2550 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2551 continue;
2552
6ed93dc6 2553 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2554
970d0f1b
JH
2555 list_del_rcu(&k->list);
2556 kfree_rcu(k, rcu);
c51ffa0b 2557 removed++;
b899efaf
VCG
2558 }
2559
c51ffa0b 2560 return removed ? 0 : -ENOENT;
b899efaf
VCG
2561}
2562
a7ec7338
JH
2563void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2564{
adae20cb 2565 struct smp_irk *k;
a7ec7338 2566
adae20cb 2567 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2568 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2569 continue;
2570
2571 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2572
adae20cb
JH
2573 list_del_rcu(&k->list);
2574 kfree_rcu(k, rcu);
a7ec7338
JH
2575 }
2576}
2577
55e76b38
JH
2578bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2579{
2580 struct smp_ltk *k;
4ba9faf3 2581 struct smp_irk *irk;
55e76b38
JH
2582 u8 addr_type;
2583
2584 if (type == BDADDR_BREDR) {
2585 if (hci_find_link_key(hdev, bdaddr))
2586 return true;
2587 return false;
2588 }
2589
2590 /* Convert to HCI addr type which struct smp_ltk uses */
2591 if (type == BDADDR_LE_PUBLIC)
2592 addr_type = ADDR_LE_DEV_PUBLIC;
2593 else
2594 addr_type = ADDR_LE_DEV_RANDOM;
2595
4ba9faf3
JH
2596 irk = hci_get_irk(hdev, bdaddr, addr_type);
2597 if (irk) {
2598 bdaddr = &irk->bdaddr;
2599 addr_type = irk->addr_type;
2600 }
2601
55e76b38
JH
2602 rcu_read_lock();
2603 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2604 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2605 rcu_read_unlock();
55e76b38 2606 return true;
87c8b28d 2607 }
55e76b38
JH
2608 }
2609 rcu_read_unlock();
2610
2611 return false;
2612}
2613
6bd32326 2614/* HCI command timer function */
65cc2b49 2615static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2616{
65cc2b49
MH
2617 struct hci_dev *hdev = container_of(work, struct hci_dev,
2618 cmd_timer.work);
6bd32326 2619
bda4f23a
AE
2620 if (hdev->sent_cmd) {
2621 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2622 u16 opcode = __le16_to_cpu(sent->opcode);
2623
2624 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2625 } else {
2626 BT_ERR("%s command tx timeout", hdev->name);
2627 }
2628
6bd32326 2629 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2630 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2631}
2632
2763eda6 2633struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2634 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2635{
2636 struct oob_data *data;
2637
6928a924
JH
2638 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2639 if (bacmp(bdaddr, &data->bdaddr) != 0)
2640 continue;
2641 if (data->bdaddr_type != bdaddr_type)
2642 continue;
2643 return data;
2644 }
2763eda6
SJ
2645
2646 return NULL;
2647}
2648
6928a924
JH
2649int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2650 u8 bdaddr_type)
2763eda6
SJ
2651{
2652 struct oob_data *data;
2653
6928a924 2654 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2655 if (!data)
2656 return -ENOENT;
2657
6928a924 2658 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2659
2660 list_del(&data->list);
2661 kfree(data);
2662
2663 return 0;
2664}
2665
35f7498a 2666void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2667{
2668 struct oob_data *data, *n;
2669
2670 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2671 list_del(&data->list);
2672 kfree(data);
2673 }
2763eda6
SJ
2674}
2675
0798872e 2676int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2677 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2678 u8 *hash256, u8 *rand256)
2763eda6
SJ
2679{
2680 struct oob_data *data;
2681
6928a924 2682 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2683 if (!data) {
0a14ab41 2684 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2685 if (!data)
2686 return -ENOMEM;
2687
2688 bacpy(&data->bdaddr, bdaddr);
6928a924 2689 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2690 list_add(&data->list, &hdev->remote_oob_data);
2691 }
2692
81328d5c
JH
2693 if (hash192 && rand192) {
2694 memcpy(data->hash192, hash192, sizeof(data->hash192));
2695 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2696 if (hash256 && rand256)
2697 data->present = 0x03;
81328d5c
JH
2698 } else {
2699 memset(data->hash192, 0, sizeof(data->hash192));
2700 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2701 if (hash256 && rand256)
2702 data->present = 0x02;
2703 else
2704 data->present = 0x00;
0798872e
MH
2705 }
2706
81328d5c
JH
2707 if (hash256 && rand256) {
2708 memcpy(data->hash256, hash256, sizeof(data->hash256));
2709 memcpy(data->rand256, rand256, sizeof(data->rand256));
2710 } else {
2711 memset(data->hash256, 0, sizeof(data->hash256));
2712 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2713 if (hash192 && rand192)
2714 data->present = 0x01;
81328d5c 2715 }
0798872e 2716
6ed93dc6 2717 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2718
2719 return 0;
2720}
2721
d2609b34
FG
2722/* This function requires the caller holds hdev->lock */
2723struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2724{
2725 struct adv_info *adv_instance;
2726
2727 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2728 if (adv_instance->instance == instance)
2729 return adv_instance;
2730 }
2731
2732 return NULL;
2733}
2734
2735/* This function requires the caller holds hdev->lock */
2736struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2737 struct adv_info *cur_instance;
2738
2739 cur_instance = hci_find_adv_instance(hdev, instance);
2740 if (!cur_instance)
2741 return NULL;
2742
2743 if (cur_instance == list_last_entry(&hdev->adv_instances,
2744 struct adv_info, list))
2745 return list_first_entry(&hdev->adv_instances,
2746 struct adv_info, list);
2747 else
2748 return list_next_entry(cur_instance, list);
2749}
2750
2751/* This function requires the caller holds hdev->lock */
2752int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2753{
2754 struct adv_info *adv_instance;
2755
2756 adv_instance = hci_find_adv_instance(hdev, instance);
2757 if (!adv_instance)
2758 return -ENOENT;
2759
2760 BT_DBG("%s removing %dMR", hdev->name, instance);
2761
5d900e46
FG
2762 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2763 cancel_delayed_work(&hdev->adv_instance_expire);
2764 hdev->adv_instance_timeout = 0;
2765 }
2766
d2609b34
FG
2767 list_del(&adv_instance->list);
2768 kfree(adv_instance);
2769
2770 hdev->adv_instance_cnt--;
2771
2772 return 0;
2773}
2774
2775/* This function requires the caller holds hdev->lock */
2776void hci_adv_instances_clear(struct hci_dev *hdev)
2777{
2778 struct adv_info *adv_instance, *n;
2779
5d900e46
FG
2780 if (hdev->adv_instance_timeout) {
2781 cancel_delayed_work(&hdev->adv_instance_expire);
2782 hdev->adv_instance_timeout = 0;
2783 }
2784
d2609b34
FG
2785 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2786 list_del(&adv_instance->list);
2787 kfree(adv_instance);
2788 }
2789
2790 hdev->adv_instance_cnt = 0;
2791}
2792
2793/* This function requires the caller holds hdev->lock */
2794int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2795 u16 adv_data_len, u8 *adv_data,
2796 u16 scan_rsp_len, u8 *scan_rsp_data,
2797 u16 timeout, u16 duration)
2798{
2799 struct adv_info *adv_instance;
2800
2801 adv_instance = hci_find_adv_instance(hdev, instance);
2802 if (adv_instance) {
2803 memset(adv_instance->adv_data, 0,
2804 sizeof(adv_instance->adv_data));
2805 memset(adv_instance->scan_rsp_data, 0,
2806 sizeof(adv_instance->scan_rsp_data));
2807 } else {
2808 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2809 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2810 return -EOVERFLOW;
2811
39ecfad6 2812 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2813 if (!adv_instance)
2814 return -ENOMEM;
2815
fffd38bc 2816 adv_instance->pending = true;
d2609b34
FG
2817 adv_instance->instance = instance;
2818 list_add(&adv_instance->list, &hdev->adv_instances);
2819 hdev->adv_instance_cnt++;
2820 }
2821
2822 adv_instance->flags = flags;
2823 adv_instance->adv_data_len = adv_data_len;
2824 adv_instance->scan_rsp_len = scan_rsp_len;
2825
2826 if (adv_data_len)
2827 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2828
2829 if (scan_rsp_len)
2830 memcpy(adv_instance->scan_rsp_data,
2831 scan_rsp_data, scan_rsp_len);
2832
2833 adv_instance->timeout = timeout;
5d900e46 2834 adv_instance->remaining_time = timeout;
d2609b34
FG
2835
2836 if (duration == 0)
2837 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2838 else
2839 adv_instance->duration = duration;
2840
2841 BT_DBG("%s for %dMR", hdev->name, instance);
2842
2843 return 0;
2844}
2845
dcc36c16 2846struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2847 bdaddr_t *bdaddr, u8 type)
b2a66aad 2848{
8035ded4 2849 struct bdaddr_list *b;
b2a66aad 2850
dcc36c16 2851 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2852 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2853 return b;
b9ee0a78 2854 }
b2a66aad
AJ
2855
2856 return NULL;
2857}
2858
dcc36c16 2859void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2860{
2861 struct list_head *p, *n;
2862
dcc36c16 2863 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2864 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2865
2866 list_del(p);
2867 kfree(b);
2868 }
b2a66aad
AJ
2869}
2870
dcc36c16 2871int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2872{
2873 struct bdaddr_list *entry;
b2a66aad 2874
b9ee0a78 2875 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2876 return -EBADF;
2877
dcc36c16 2878 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2879 return -EEXIST;
b2a66aad 2880
27f70f3e 2881 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2882 if (!entry)
2883 return -ENOMEM;
b2a66aad
AJ
2884
2885 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2886 entry->bdaddr_type = type;
b2a66aad 2887
dcc36c16 2888 list_add(&entry->list, list);
b2a66aad 2889
2a8357f2 2890 return 0;
b2a66aad
AJ
2891}
2892
dcc36c16 2893int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2894{
2895 struct bdaddr_list *entry;
b2a66aad 2896
35f7498a 2897 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2898 hci_bdaddr_list_clear(list);
35f7498a
JH
2899 return 0;
2900 }
b2a66aad 2901
dcc36c16 2902 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2903 if (!entry)
2904 return -ENOENT;
2905
2906 list_del(&entry->list);
2907 kfree(entry);
2908
2909 return 0;
2910}
2911
15819a70
AG
2912/* This function requires the caller holds hdev->lock */
2913struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2914 bdaddr_t *addr, u8 addr_type)
2915{
2916 struct hci_conn_params *params;
2917
2918 list_for_each_entry(params, &hdev->le_conn_params, list) {
2919 if (bacmp(&params->addr, addr) == 0 &&
2920 params->addr_type == addr_type) {
2921 return params;
2922 }
2923 }
2924
2925 return NULL;
2926}
2927
4b10966f 2928/* This function requires the caller holds hdev->lock */
501f8827
JH
2929struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2930 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2931{
912b42ef 2932 struct hci_conn_params *param;
a9b0a04c 2933
501f8827 2934 list_for_each_entry(param, list, action) {
912b42ef
JH
2935 if (bacmp(&param->addr, addr) == 0 &&
2936 param->addr_type == addr_type)
2937 return param;
4b10966f
MH
2938 }
2939
2940 return NULL;
a9b0a04c
AG
2941}
2942
15819a70 2943/* This function requires the caller holds hdev->lock */
51d167c0
MH
2944struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2945 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2946{
2947 struct hci_conn_params *params;
2948
2949 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2950 if (params)
51d167c0 2951 return params;
15819a70
AG
2952
2953 params = kzalloc(sizeof(*params), GFP_KERNEL);
2954 if (!params) {
2955 BT_ERR("Out of memory");
51d167c0 2956 return NULL;
15819a70
AG
2957 }
2958
2959 bacpy(&params->addr, addr);
2960 params->addr_type = addr_type;
cef952ce
AG
2961
2962 list_add(&params->list, &hdev->le_conn_params);
93450c75 2963 INIT_LIST_HEAD(&params->action);
cef952ce 2964
bf5b3c8b
MH
2965 params->conn_min_interval = hdev->le_conn_min_interval;
2966 params->conn_max_interval = hdev->le_conn_max_interval;
2967 params->conn_latency = hdev->le_conn_latency;
2968 params->supervision_timeout = hdev->le_supv_timeout;
2969 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2970
2971 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2972
51d167c0 2973 return params;
bf5b3c8b
MH
2974}
2975
f6c63249 2976static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2977{
f8aaf9b6 2978 if (params->conn) {
f161dd41 2979 hci_conn_drop(params->conn);
f8aaf9b6
JH
2980 hci_conn_put(params->conn);
2981 }
f161dd41 2982
95305baa 2983 list_del(&params->action);
15819a70
AG
2984 list_del(&params->list);
2985 kfree(params);
f6c63249
JH
2986}
2987
2988/* This function requires the caller holds hdev->lock */
2989void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2990{
2991 struct hci_conn_params *params;
2992
2993 params = hci_conn_params_lookup(hdev, addr, addr_type);
2994 if (!params)
2995 return;
2996
2997 hci_conn_params_free(params);
15819a70 2998
95305baa
JH
2999 hci_update_background_scan(hdev);
3000
15819a70
AG
3001 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3002}
3003
3004/* This function requires the caller holds hdev->lock */
55af49a8 3005void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3006{
3007 struct hci_conn_params *params, *tmp;
3008
3009 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3010 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3011 continue;
f75113a2
JP
3012
3013 /* If trying to estabilish one time connection to disabled
3014 * device, leave the params, but mark them as just once.
3015 */
3016 if (params->explicit_connect) {
3017 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3018 continue;
3019 }
3020
15819a70
AG
3021 list_del(&params->list);
3022 kfree(params);
3023 }
3024
55af49a8 3025 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3026}
3027
3028/* This function requires the caller holds hdev->lock */
373110c5 3029void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3030{
15819a70 3031 struct hci_conn_params *params, *tmp;
77a77a30 3032
f6c63249
JH
3033 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3034 hci_conn_params_free(params);
77a77a30 3035
a4790dbd 3036 hci_update_background_scan(hdev);
77a77a30 3037
15819a70 3038 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3039}
3040
1904a853 3041static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 3042{
4c87eaab
AG
3043 if (status) {
3044 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3045
4c87eaab
AG
3046 hci_dev_lock(hdev);
3047 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3048 hci_dev_unlock(hdev);
3049 return;
3050 }
7ba8b4be
AG
3051}
3052
1904a853
MH
3053static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3054 u16 opcode)
7ba8b4be 3055{
4c87eaab
AG
3056 /* General inquiry access code (GIAC) */
3057 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 3058 struct hci_cp_inquiry cp;
7ba8b4be
AG
3059 int err;
3060
4c87eaab
AG
3061 if (status) {
3062 BT_ERR("Failed to disable LE scanning: status %d", status);
3063 return;
3064 }
7ba8b4be 3065
2d28cfe7
JP
3066 hdev->discovery.scan_start = 0;
3067
4c87eaab
AG
3068 switch (hdev->discovery.type) {
3069 case DISCOV_TYPE_LE:
3070 hci_dev_lock(hdev);
3071 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3072 hci_dev_unlock(hdev);
3073 break;
7ba8b4be 3074
4c87eaab 3075 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3076 hci_dev_lock(hdev);
7dbfac1d 3077
07d2334a
JP
3078 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3079 &hdev->quirks)) {
3080 /* If we were running LE only scan, change discovery
3081 * state. If we were running both LE and BR/EDR inquiry
3082 * simultaneously, and BR/EDR inquiry is already
3083 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3084 * will stop discovery when finished. If we will resolve
3085 * remote device name, do not change discovery state.
07d2334a 3086 */
177d0506
WK
3087 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3088 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3089 hci_discovery_set_state(hdev,
3090 DISCOVERY_STOPPED);
3091 } else {
baf880a9
JH
3092 struct hci_request req;
3093
07d2334a
JP
3094 hci_inquiry_cache_flush(hdev);
3095
baf880a9
JH
3096 hci_req_init(&req, hdev);
3097
3098 memset(&cp, 0, sizeof(cp));
3099 memcpy(&cp.lap, lap, sizeof(cp.lap));
3100 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3101 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3102
07d2334a
JP
3103 err = hci_req_run(&req, inquiry_complete);
3104 if (err) {
3105 BT_ERR("Inquiry request failed: err %d", err);
3106 hci_discovery_set_state(hdev,
3107 DISCOVERY_STOPPED);
3108 }
4c87eaab 3109 }
7dbfac1d 3110
4c87eaab
AG
3111 hci_dev_unlock(hdev);
3112 break;
7dbfac1d 3113 }
7dbfac1d
AG
3114}
3115
7ba8b4be
AG
3116static void le_scan_disable_work(struct work_struct *work)
3117{
3118 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3119 le_scan_disable.work);
4c87eaab
AG
3120 struct hci_request req;
3121 int err;
7ba8b4be
AG
3122
3123 BT_DBG("%s", hdev->name);
3124
2d28cfe7
JP
3125 cancel_delayed_work_sync(&hdev->le_scan_restart);
3126
4c87eaab 3127 hci_req_init(&req, hdev);
28b75a89 3128
b1efcc28 3129 hci_req_add_le_scan_disable(&req);
28b75a89 3130
4c87eaab
AG
3131 err = hci_req_run(&req, le_scan_disable_work_complete);
3132 if (err)
3133 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3134}
3135
2d28cfe7
JP
3136static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3137 u16 opcode)
3138{
3139 unsigned long timeout, duration, scan_start, now;
3140
3141 BT_DBG("%s", hdev->name);
3142
3143 if (status) {
3144 BT_ERR("Failed to restart LE scan: status %d", status);
3145 return;
3146 }
3147
3148 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3149 !hdev->discovery.scan_start)
3150 return;
3151
3152 /* When the scan was started, hdev->le_scan_disable has been queued
3153 * after duration from scan_start. During scan restart this job
3154 * has been canceled, and we need to queue it again after proper
3155 * timeout, to make sure that scan does not run indefinitely.
3156 */
3157 duration = hdev->discovery.scan_duration;
3158 scan_start = hdev->discovery.scan_start;
3159 now = jiffies;
3160 if (now - scan_start <= duration) {
3161 int elapsed;
3162
3163 if (now >= scan_start)
3164 elapsed = now - scan_start;
3165 else
3166 elapsed = ULONG_MAX - scan_start + now;
3167
3168 timeout = duration - elapsed;
3169 } else {
3170 timeout = 0;
3171 }
3172 queue_delayed_work(hdev->workqueue,
3173 &hdev->le_scan_disable, timeout);
3174}
3175
3176static void le_scan_restart_work(struct work_struct *work)
3177{
3178 struct hci_dev *hdev = container_of(work, struct hci_dev,
3179 le_scan_restart.work);
3180 struct hci_request req;
3181 struct hci_cp_le_set_scan_enable cp;
3182 int err;
3183
3184 BT_DBG("%s", hdev->name);
3185
3186 /* If controller is not scanning we are done. */
d7a5a11d 3187 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3188 return;
3189
3190 hci_req_init(&req, hdev);
3191
3192 hci_req_add_le_scan_disable(&req);
3193
3194 memset(&cp, 0, sizeof(cp));
3195 cp.enable = LE_SCAN_ENABLE;
3196 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3197 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3198
3199 err = hci_req_run(&req, le_scan_restart_work_complete);
3200 if (err)
3201 BT_ERR("Restart LE scan request failed: err %d", err);
3202}
3203
a1f4c318
JH
3204/* Copy the Identity Address of the controller.
3205 *
3206 * If the controller has a public BD_ADDR, then by default use that one.
3207 * If this is a LE only controller without a public address, default to
3208 * the static random address.
3209 *
3210 * For debugging purposes it is possible to force controllers with a
3211 * public address to use the static random address instead.
50b5b952
MH
3212 *
3213 * In case BR/EDR has been disabled on a dual-mode controller and
3214 * userspace has configured a static address, then that address
3215 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3216 */
3217void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *bdaddr_type)
3219{
b7cb93e5 3220 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3221 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3222 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3223 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3224 bacpy(bdaddr, &hdev->static_addr);
3225 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3226 } else {
3227 bacpy(bdaddr, &hdev->bdaddr);
3228 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3229 }
3230}
3231
9be0dab7
DH
3232/* Alloc HCI device */
3233struct hci_dev *hci_alloc_dev(void)
3234{
3235 struct hci_dev *hdev;
3236
27f70f3e 3237 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3238 if (!hdev)
3239 return NULL;
3240
b1b813d4
DH
3241 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3242 hdev->esco_type = (ESCO_HV1);
3243 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3244 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3245 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3246 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3247 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3248 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3249 hdev->adv_instance_cnt = 0;
3250 hdev->cur_adv_instance = 0x00;
5d900e46 3251 hdev->adv_instance_timeout = 0;
b1b813d4 3252
b1b813d4
DH
3253 hdev->sniff_max_interval = 800;
3254 hdev->sniff_min_interval = 80;
3255
3f959d46 3256 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3257 hdev->le_adv_min_interval = 0x0800;
3258 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3259 hdev->le_scan_interval = 0x0060;
3260 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3261 hdev->le_conn_min_interval = 0x0028;
3262 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3263 hdev->le_conn_latency = 0x0000;
3264 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3265 hdev->le_def_tx_len = 0x001b;
3266 hdev->le_def_tx_time = 0x0148;
3267 hdev->le_max_tx_len = 0x001b;
3268 hdev->le_max_tx_time = 0x0148;
3269 hdev->le_max_rx_len = 0x001b;
3270 hdev->le_max_rx_time = 0x0148;
bef64738 3271
d6bfd59c 3272 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3273 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3274 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3275 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3276
b1b813d4
DH
3277 mutex_init(&hdev->lock);
3278 mutex_init(&hdev->req_lock);
3279
3280 INIT_LIST_HEAD(&hdev->mgmt_pending);
3281 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3282 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3283 INIT_LIST_HEAD(&hdev->uuids);
3284 INIT_LIST_HEAD(&hdev->link_keys);
3285 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3286 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3287 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3288 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3289 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3290 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3291 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3292 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3293 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3294
3295 INIT_WORK(&hdev->rx_work, hci_rx_work);
3296 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3297 INIT_WORK(&hdev->tx_work, hci_tx_work);
3298 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3299 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3300
b1b813d4
DH
3301 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3302 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3303 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3304 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3305 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3306
b1b813d4
DH
3307 skb_queue_head_init(&hdev->rx_q);
3308 skb_queue_head_init(&hdev->cmd_q);
3309 skb_queue_head_init(&hdev->raw_q);
3310
3311 init_waitqueue_head(&hdev->req_wait_q);
3312
65cc2b49 3313 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3314
b1b813d4
DH
3315 hci_init_sysfs(hdev);
3316 discovery_init(hdev);
9be0dab7
DH
3317
3318 return hdev;
3319}
3320EXPORT_SYMBOL(hci_alloc_dev);
3321
3322/* Free HCI device */
3323void hci_free_dev(struct hci_dev *hdev)
3324{
9be0dab7
DH
3325 /* will free via device release */
3326 put_device(&hdev->dev);
3327}
3328EXPORT_SYMBOL(hci_free_dev);
3329
1da177e4
LT
3330/* Register HCI device */
3331int hci_register_dev(struct hci_dev *hdev)
3332{
b1b813d4 3333 int id, error;
1da177e4 3334
74292d5a 3335 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3336 return -EINVAL;
3337
08add513
MM
3338 /* Do not allow HCI_AMP devices to register at index 0,
3339 * so the index can be used as the AMP controller ID.
3340 */
3df92b31
SL
3341 switch (hdev->dev_type) {
3342 case HCI_BREDR:
3343 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3344 break;
3345 case HCI_AMP:
3346 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3347 break;
3348 default:
3349 return -EINVAL;
1da177e4 3350 }
8e87d142 3351
3df92b31
SL
3352 if (id < 0)
3353 return id;
3354
1da177e4
LT
3355 sprintf(hdev->name, "hci%d", id);
3356 hdev->id = id;
2d8b3a11
AE
3357
3358 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3359
d8537548
KC
3360 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3361 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3362 if (!hdev->workqueue) {
3363 error = -ENOMEM;
3364 goto err;
3365 }
f48fd9c8 3366
d8537548
KC
3367 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3368 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3369 if (!hdev->req_workqueue) {
3370 destroy_workqueue(hdev->workqueue);
3371 error = -ENOMEM;
3372 goto err;
3373 }
3374
0153e2ec
MH
3375 if (!IS_ERR_OR_NULL(bt_debugfs))
3376 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3377
bdc3e0f1
MH
3378 dev_set_name(&hdev->dev, "%s", hdev->name);
3379
3380 error = device_add(&hdev->dev);
33ca954d 3381 if (error < 0)
54506918 3382 goto err_wqueue;
1da177e4 3383
611b30f7 3384 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3385 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3386 hdev);
611b30f7
MH
3387 if (hdev->rfkill) {
3388 if (rfkill_register(hdev->rfkill) < 0) {
3389 rfkill_destroy(hdev->rfkill);
3390 hdev->rfkill = NULL;
3391 }
3392 }
3393
5e130367 3394 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3395 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3396
a1536da2
MH
3397 hci_dev_set_flag(hdev, HCI_SETUP);
3398 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3399
01cd3404 3400 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3401 /* Assume BR/EDR support until proven otherwise (such as
3402 * through reading supported features during init.
3403 */
a1536da2 3404 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3405 }
ce2be9ac 3406
fcee3377
GP
3407 write_lock(&hci_dev_list_lock);
3408 list_add(&hdev->list, &hci_dev_list);
3409 write_unlock(&hci_dev_list_lock);
3410
4a964404
MH
3411 /* Devices that are marked for raw-only usage are unconfigured
3412 * and should not be included in normal operation.
fee746b0
MH
3413 */
3414 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3415 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3416
1da177e4 3417 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3418 hci_dev_hold(hdev);
1da177e4 3419
19202573 3420 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3421
1da177e4 3422 return id;
f48fd9c8 3423
33ca954d
DH
3424err_wqueue:
3425 destroy_workqueue(hdev->workqueue);
6ead1bbc 3426 destroy_workqueue(hdev->req_workqueue);
33ca954d 3427err:
3df92b31 3428 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3429
33ca954d 3430 return error;
1da177e4
LT
3431}
3432EXPORT_SYMBOL(hci_register_dev);
3433
3434/* Unregister HCI device */
59735631 3435void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3436{
2d7cc19e 3437 int id;
ef222013 3438
c13854ce 3439 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3440
a1536da2 3441 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3442
3df92b31
SL
3443 id = hdev->id;
3444
f20d09d5 3445 write_lock(&hci_dev_list_lock);
1da177e4 3446 list_del(&hdev->list);
f20d09d5 3447 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3448
3449 hci_dev_do_close(hdev);
3450
b9b5ef18
GP
3451 cancel_work_sync(&hdev->power_on);
3452
ab81cbf9 3453 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3454 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3455 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3456 hci_dev_lock(hdev);
744cf19e 3457 mgmt_index_removed(hdev);
09fd0de5 3458 hci_dev_unlock(hdev);
56e5cb86 3459 }
ab81cbf9 3460
2e58ef3e
JH
3461 /* mgmt_index_removed should take care of emptying the
3462 * pending list */
3463 BUG_ON(!list_empty(&hdev->mgmt_pending));
3464
1da177e4
LT
3465 hci_notify(hdev, HCI_DEV_UNREG);
3466
611b30f7
MH
3467 if (hdev->rfkill) {
3468 rfkill_unregister(hdev->rfkill);
3469 rfkill_destroy(hdev->rfkill);
3470 }
3471
bdc3e0f1 3472 device_del(&hdev->dev);
147e2d59 3473
0153e2ec
MH
3474 debugfs_remove_recursive(hdev->debugfs);
3475
f48fd9c8 3476 destroy_workqueue(hdev->workqueue);
6ead1bbc 3477 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3478
09fd0de5 3479 hci_dev_lock(hdev);
dcc36c16 3480 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3481 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3482 hci_uuids_clear(hdev);
55ed8ca1 3483 hci_link_keys_clear(hdev);
b899efaf 3484 hci_smp_ltks_clear(hdev);
970c4e46 3485 hci_smp_irks_clear(hdev);
2763eda6 3486 hci_remote_oob_data_clear(hdev);
d2609b34 3487 hci_adv_instances_clear(hdev);
dcc36c16 3488 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3489 hci_conn_params_clear_all(hdev);
22078800 3490 hci_discovery_filter_clear(hdev);
09fd0de5 3491 hci_dev_unlock(hdev);
e2e0cacb 3492
dc946bd8 3493 hci_dev_put(hdev);
3df92b31
SL
3494
3495 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3496}
3497EXPORT_SYMBOL(hci_unregister_dev);
3498
3499/* Suspend HCI device */
3500int hci_suspend_dev(struct hci_dev *hdev)
3501{
3502 hci_notify(hdev, HCI_DEV_SUSPEND);
3503 return 0;
3504}
3505EXPORT_SYMBOL(hci_suspend_dev);
3506
3507/* Resume HCI device */
3508int hci_resume_dev(struct hci_dev *hdev)
3509{
3510 hci_notify(hdev, HCI_DEV_RESUME);
3511 return 0;
3512}
3513EXPORT_SYMBOL(hci_resume_dev);
3514
75e0569f
MH
3515/* Reset HCI device */
3516int hci_reset_dev(struct hci_dev *hdev)
3517{
3518 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3519 struct sk_buff *skb;
3520
3521 skb = bt_skb_alloc(3, GFP_ATOMIC);
3522 if (!skb)
3523 return -ENOMEM;
3524
3525 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3526 memcpy(skb_put(skb, 3), hw_err, 3);
3527
3528 /* Send Hardware Error to upper stack */
3529 return hci_recv_frame(hdev, skb);
3530}
3531EXPORT_SYMBOL(hci_reset_dev);
3532
76bca880 3533/* Receive frame from HCI drivers */
e1a26170 3534int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3535{
76bca880 3536 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3537 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3538 kfree_skb(skb);
3539 return -ENXIO;
3540 }
3541
fe806dce
MH
3542 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3543 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3544 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3545 kfree_skb(skb);
3546 return -EINVAL;
3547 }
3548
d82603c6 3549 /* Incoming skb */
76bca880
MH
3550 bt_cb(skb)->incoming = 1;
3551
3552 /* Time stamp */
3553 __net_timestamp(skb);
3554
76bca880 3555 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3556 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3557
76bca880
MH
3558 return 0;
3559}
3560EXPORT_SYMBOL(hci_recv_frame);
3561
e875ff84
MH
3562/* Receive diagnostic message from HCI drivers */
3563int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3564{
581d6fd6
MH
3565 /* Mark as diagnostic packet */
3566 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3567
e875ff84
MH
3568 /* Time stamp */
3569 __net_timestamp(skb);
3570
581d6fd6
MH
3571 skb_queue_tail(&hdev->rx_q, skb);
3572 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3573
e875ff84
MH
3574 return 0;
3575}
3576EXPORT_SYMBOL(hci_recv_diag);
3577
1da177e4
LT
3578/* ---- Interface to upper protocols ---- */
3579
1da177e4
LT
3580int hci_register_cb(struct hci_cb *cb)
3581{
3582 BT_DBG("%p name %s", cb, cb->name);
3583
fba7ecf0 3584 mutex_lock(&hci_cb_list_lock);
00629e0f 3585 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3586 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3587
3588 return 0;
3589}
3590EXPORT_SYMBOL(hci_register_cb);
3591
3592int hci_unregister_cb(struct hci_cb *cb)
3593{
3594 BT_DBG("%p name %s", cb, cb->name);
3595
fba7ecf0 3596 mutex_lock(&hci_cb_list_lock);
1da177e4 3597 list_del(&cb->list);
fba7ecf0 3598 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3599
3600 return 0;
3601}
3602EXPORT_SYMBOL(hci_unregister_cb);
3603
51086991 3604static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3605{
cdc52faa
MH
3606 int err;
3607
0d48d939 3608 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3609
cd82e61c
MH
3610 /* Time stamp */
3611 __net_timestamp(skb);
1da177e4 3612
cd82e61c
MH
3613 /* Send copy to monitor */
3614 hci_send_to_monitor(hdev, skb);
3615
3616 if (atomic_read(&hdev->promisc)) {
3617 /* Send copy to the sockets */
470fe1b5 3618 hci_send_to_sock(hdev, skb);
1da177e4
LT
3619 }
3620
3621 /* Get rid of skb owner, prior to sending to the driver. */
3622 skb_orphan(skb);
3623
73d0d3c8
MH
3624 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3625 kfree_skb(skb);
3626 return;
3627 }
3628
cdc52faa
MH
3629 err = hdev->send(hdev, skb);
3630 if (err < 0) {
3631 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3632 kfree_skb(skb);
3633 }
1da177e4
LT
3634}
3635
1ca3a9d0 3636/* Send HCI command */
07dc93dd
JH
3637int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3638 const void *param)
1ca3a9d0
JH
3639{
3640 struct sk_buff *skb;
3641
3642 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3643
3644 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3645 if (!skb) {
3646 BT_ERR("%s no memory for command", hdev->name);
3647 return -ENOMEM;
3648 }
3649
49c922bb 3650 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3651 * single-command requests.
3652 */
db6e3e8d 3653 bt_cb(skb)->req.start = true;
11714b3d 3654
1da177e4 3655 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3656 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3657
3658 return 0;
3659}
1da177e4
LT
3660
3661/* Get data from the previously sent command */
a9de9248 3662void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3663{
3664 struct hci_command_hdr *hdr;
3665
3666 if (!hdev->sent_cmd)
3667 return NULL;
3668
3669 hdr = (void *) hdev->sent_cmd->data;
3670
a9de9248 3671 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3672 return NULL;
3673
f0e09510 3674 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3675
3676 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3677}
3678
fbef168f
LP
3679/* Send HCI command and wait for command commplete event */
3680struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3681 const void *param, u32 timeout)
3682{
3683 struct sk_buff *skb;
3684
3685 if (!test_bit(HCI_UP, &hdev->flags))
3686 return ERR_PTR(-ENETDOWN);
3687
3688 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3689
3690 hci_req_lock(hdev);
3691 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3692 hci_req_unlock(hdev);
3693
3694 return skb;
3695}
3696EXPORT_SYMBOL(hci_cmd_sync);
3697
1da177e4
LT
3698/* Send ACL data */
3699static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3700{
3701 struct hci_acl_hdr *hdr;
3702 int len = skb->len;
3703
badff6d0
ACM
3704 skb_push(skb, HCI_ACL_HDR_SIZE);
3705 skb_reset_transport_header(skb);
9c70220b 3706 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3707 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3708 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3709}
3710
ee22be7e 3711static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3712 struct sk_buff *skb, __u16 flags)
1da177e4 3713{
ee22be7e 3714 struct hci_conn *conn = chan->conn;
1da177e4
LT
3715 struct hci_dev *hdev = conn->hdev;
3716 struct sk_buff *list;
3717
087bfd99
GP
3718 skb->len = skb_headlen(skb);
3719 skb->data_len = 0;
3720
3721 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3722
3723 switch (hdev->dev_type) {
3724 case HCI_BREDR:
3725 hci_add_acl_hdr(skb, conn->handle, flags);
3726 break;
3727 case HCI_AMP:
3728 hci_add_acl_hdr(skb, chan->handle, flags);
3729 break;
3730 default:
3731 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3732 return;
3733 }
087bfd99 3734
70f23020
AE
3735 list = skb_shinfo(skb)->frag_list;
3736 if (!list) {
1da177e4
LT
3737 /* Non fragmented */
3738 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3739
73d80deb 3740 skb_queue_tail(queue, skb);
1da177e4
LT
3741 } else {
3742 /* Fragmented */
3743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3744
3745 skb_shinfo(skb)->frag_list = NULL;
3746
9cfd5a23
JR
3747 /* Queue all fragments atomically. We need to use spin_lock_bh
3748 * here because of 6LoWPAN links, as there this function is
3749 * called from softirq and using normal spin lock could cause
3750 * deadlocks.
3751 */
3752 spin_lock_bh(&queue->lock);
1da177e4 3753
73d80deb 3754 __skb_queue_tail(queue, skb);
e702112f
AE
3755
3756 flags &= ~ACL_START;
3757 flags |= ACL_CONT;
1da177e4
LT
3758 do {
3759 skb = list; list = list->next;
8e87d142 3760
0d48d939 3761 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3762 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3763
3764 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3765
73d80deb 3766 __skb_queue_tail(queue, skb);
1da177e4
LT
3767 } while (list);
3768
9cfd5a23 3769 spin_unlock_bh(&queue->lock);
1da177e4 3770 }
73d80deb
LAD
3771}
3772
3773void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3774{
ee22be7e 3775 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3776
f0e09510 3777 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3778
ee22be7e 3779 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3780
3eff45ea 3781 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3782}
1da177e4
LT
3783
3784/* Send SCO data */
0d861d8b 3785void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3786{
3787 struct hci_dev *hdev = conn->hdev;
3788 struct hci_sco_hdr hdr;
3789
3790 BT_DBG("%s len %d", hdev->name, skb->len);
3791
aca3192c 3792 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3793 hdr.dlen = skb->len;
3794
badff6d0
ACM
3795 skb_push(skb, HCI_SCO_HDR_SIZE);
3796 skb_reset_transport_header(skb);
9c70220b 3797 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3798
0d48d939 3799 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3800
1da177e4 3801 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3802 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3803}
1da177e4
LT
3804
3805/* ---- HCI TX task (outgoing data) ---- */
3806
3807/* HCI Connection scheduler */
6039aa73
GP
3808static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3809 int *quote)
1da177e4
LT
3810{
3811 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3812 struct hci_conn *conn = NULL, *c;
abc5de8f 3813 unsigned int num = 0, min = ~0;
1da177e4 3814
8e87d142 3815 /* We don't have to lock device here. Connections are always
1da177e4 3816 * added and removed with TX task disabled. */
bf4c6325
GP
3817
3818 rcu_read_lock();
3819
3820 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3821 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3822 continue;
769be974
MH
3823
3824 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3825 continue;
3826
1da177e4
LT
3827 num++;
3828
3829 if (c->sent < min) {
3830 min = c->sent;
3831 conn = c;
3832 }
52087a79
LAD
3833
3834 if (hci_conn_num(hdev, type) == num)
3835 break;
1da177e4
LT
3836 }
3837
bf4c6325
GP
3838 rcu_read_unlock();
3839
1da177e4 3840 if (conn) {
6ed58ec5
VT
3841 int cnt, q;
3842
3843 switch (conn->type) {
3844 case ACL_LINK:
3845 cnt = hdev->acl_cnt;
3846 break;
3847 case SCO_LINK:
3848 case ESCO_LINK:
3849 cnt = hdev->sco_cnt;
3850 break;
3851 case LE_LINK:
3852 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3853 break;
3854 default:
3855 cnt = 0;
3856 BT_ERR("Unknown link type");
3857 }
3858
3859 q = cnt / num;
1da177e4
LT
3860 *quote = q ? q : 1;
3861 } else
3862 *quote = 0;
3863
3864 BT_DBG("conn %p quote %d", conn, *quote);
3865 return conn;
3866}
3867
6039aa73 3868static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3869{
3870 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3871 struct hci_conn *c;
1da177e4 3872
bae1f5d9 3873 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3874
bf4c6325
GP
3875 rcu_read_lock();
3876
1da177e4 3877 /* Kill stalled connections */
bf4c6325 3878 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3879 if (c->type == type && c->sent) {
6ed93dc6
AE
3880 BT_ERR("%s killing stalled connection %pMR",
3881 hdev->name, &c->dst);
bed71748 3882 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3883 }
3884 }
bf4c6325
GP
3885
3886 rcu_read_unlock();
1da177e4
LT
3887}
3888
6039aa73
GP
3889static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3890 int *quote)
1da177e4 3891{
73d80deb
LAD
3892 struct hci_conn_hash *h = &hdev->conn_hash;
3893 struct hci_chan *chan = NULL;
abc5de8f 3894 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3895 struct hci_conn *conn;
73d80deb
LAD
3896 int cnt, q, conn_num = 0;
3897
3898 BT_DBG("%s", hdev->name);
3899
bf4c6325
GP
3900 rcu_read_lock();
3901
3902 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3903 struct hci_chan *tmp;
3904
3905 if (conn->type != type)
3906 continue;
3907
3908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3909 continue;
3910
3911 conn_num++;
3912
8192edef 3913 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3914 struct sk_buff *skb;
3915
3916 if (skb_queue_empty(&tmp->data_q))
3917 continue;
3918
3919 skb = skb_peek(&tmp->data_q);
3920 if (skb->priority < cur_prio)
3921 continue;
3922
3923 if (skb->priority > cur_prio) {
3924 num = 0;
3925 min = ~0;
3926 cur_prio = skb->priority;
3927 }
3928
3929 num++;
3930
3931 if (conn->sent < min) {
3932 min = conn->sent;
3933 chan = tmp;
3934 }
3935 }
3936
3937 if (hci_conn_num(hdev, type) == conn_num)
3938 break;
3939 }
3940
bf4c6325
GP
3941 rcu_read_unlock();
3942
73d80deb
LAD
3943 if (!chan)
3944 return NULL;
3945
3946 switch (chan->conn->type) {
3947 case ACL_LINK:
3948 cnt = hdev->acl_cnt;
3949 break;
bd1eb66b
AE
3950 case AMP_LINK:
3951 cnt = hdev->block_cnt;
3952 break;
73d80deb
LAD
3953 case SCO_LINK:
3954 case ESCO_LINK:
3955 cnt = hdev->sco_cnt;
3956 break;
3957 case LE_LINK:
3958 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3959 break;
3960 default:
3961 cnt = 0;
3962 BT_ERR("Unknown link type");
3963 }
3964
3965 q = cnt / num;
3966 *quote = q ? q : 1;
3967 BT_DBG("chan %p quote %d", chan, *quote);
3968 return chan;
3969}
3970
02b20f0b
LAD
3971static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3972{
3973 struct hci_conn_hash *h = &hdev->conn_hash;
3974 struct hci_conn *conn;
3975 int num = 0;
3976
3977 BT_DBG("%s", hdev->name);
3978
bf4c6325
GP
3979 rcu_read_lock();
3980
3981 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3982 struct hci_chan *chan;
3983
3984 if (conn->type != type)
3985 continue;
3986
3987 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3988 continue;
3989
3990 num++;
3991
8192edef 3992 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3993 struct sk_buff *skb;
3994
3995 if (chan->sent) {
3996 chan->sent = 0;
3997 continue;
3998 }
3999
4000 if (skb_queue_empty(&chan->data_q))
4001 continue;
4002
4003 skb = skb_peek(&chan->data_q);
4004 if (skb->priority >= HCI_PRIO_MAX - 1)
4005 continue;
4006
4007 skb->priority = HCI_PRIO_MAX - 1;
4008
4009 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4010 skb->priority);
02b20f0b
LAD
4011 }
4012
4013 if (hci_conn_num(hdev, type) == num)
4014 break;
4015 }
bf4c6325
GP
4016
4017 rcu_read_unlock();
4018
02b20f0b
LAD
4019}
4020
b71d385a
AE
4021static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4022{
4023 /* Calculate count of blocks used by this packet */
4024 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4025}
4026
6039aa73 4027static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4028{
d7a5a11d 4029 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4030 /* ACL tx timeout must be longer than maximum
4031 * link supervision timeout (40.9 seconds) */
63d2bc1b 4032 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4033 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4034 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4035 }
63d2bc1b 4036}
1da177e4 4037
6039aa73 4038static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4039{
4040 unsigned int cnt = hdev->acl_cnt;
4041 struct hci_chan *chan;
4042 struct sk_buff *skb;
4043 int quote;
4044
4045 __check_timeout(hdev, cnt);
04837f64 4046
73d80deb 4047 while (hdev->acl_cnt &&
a8c5fb1a 4048 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4049 u32 priority = (skb_peek(&chan->data_q))->priority;
4050 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4051 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4052 skb->len, skb->priority);
73d80deb 4053
ec1cce24
LAD
4054 /* Stop if priority has changed */
4055 if (skb->priority < priority)
4056 break;
4057
4058 skb = skb_dequeue(&chan->data_q);
4059
73d80deb 4060 hci_conn_enter_active_mode(chan->conn,
04124681 4061 bt_cb(skb)->force_active);
04837f64 4062
57d17d70 4063 hci_send_frame(hdev, skb);
1da177e4
LT
4064 hdev->acl_last_tx = jiffies;
4065
4066 hdev->acl_cnt--;
73d80deb
LAD
4067 chan->sent++;
4068 chan->conn->sent++;
1da177e4
LT
4069 }
4070 }
02b20f0b
LAD
4071
4072 if (cnt != hdev->acl_cnt)
4073 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4074}
4075
6039aa73 4076static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4077{
63d2bc1b 4078 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4079 struct hci_chan *chan;
4080 struct sk_buff *skb;
4081 int quote;
bd1eb66b 4082 u8 type;
b71d385a 4083
63d2bc1b 4084 __check_timeout(hdev, cnt);
b71d385a 4085
bd1eb66b
AE
4086 BT_DBG("%s", hdev->name);
4087
4088 if (hdev->dev_type == HCI_AMP)
4089 type = AMP_LINK;
4090 else
4091 type = ACL_LINK;
4092
b71d385a 4093 while (hdev->block_cnt > 0 &&
bd1eb66b 4094 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4095 u32 priority = (skb_peek(&chan->data_q))->priority;
4096 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4097 int blocks;
4098
4099 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4100 skb->len, skb->priority);
b71d385a
AE
4101
4102 /* Stop if priority has changed */
4103 if (skb->priority < priority)
4104 break;
4105
4106 skb = skb_dequeue(&chan->data_q);
4107
4108 blocks = __get_blocks(hdev, skb);
4109 if (blocks > hdev->block_cnt)
4110 return;
4111
4112 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4113 bt_cb(skb)->force_active);
b71d385a 4114
57d17d70 4115 hci_send_frame(hdev, skb);
b71d385a
AE
4116 hdev->acl_last_tx = jiffies;
4117
4118 hdev->block_cnt -= blocks;
4119 quote -= blocks;
4120
4121 chan->sent += blocks;
4122 chan->conn->sent += blocks;
4123 }
4124 }
4125
4126 if (cnt != hdev->block_cnt)
bd1eb66b 4127 hci_prio_recalculate(hdev, type);
b71d385a
AE
4128}
4129
6039aa73 4130static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4131{
4132 BT_DBG("%s", hdev->name);
4133
bd1eb66b
AE
4134 /* No ACL link over BR/EDR controller */
4135 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4136 return;
4137
4138 /* No AMP link over AMP controller */
4139 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4140 return;
4141
4142 switch (hdev->flow_ctl_mode) {
4143 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4144 hci_sched_acl_pkt(hdev);
4145 break;
4146
4147 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4148 hci_sched_acl_blk(hdev);
4149 break;
4150 }
4151}
4152
1da177e4 4153/* Schedule SCO */
6039aa73 4154static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4155{
4156 struct hci_conn *conn;
4157 struct sk_buff *skb;
4158 int quote;
4159
4160 BT_DBG("%s", hdev->name);
4161
52087a79
LAD
4162 if (!hci_conn_num(hdev, SCO_LINK))
4163 return;
4164
1da177e4
LT
4165 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4166 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4167 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4168 hci_send_frame(hdev, skb);
1da177e4
LT
4169
4170 conn->sent++;
4171 if (conn->sent == ~0)
4172 conn->sent = 0;
4173 }
4174 }
4175}
4176
6039aa73 4177static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4178{
4179 struct hci_conn *conn;
4180 struct sk_buff *skb;
4181 int quote;
4182
4183 BT_DBG("%s", hdev->name);
4184
52087a79
LAD
4185 if (!hci_conn_num(hdev, ESCO_LINK))
4186 return;
4187
8fc9ced3
GP
4188 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4189 &quote))) {
b6a0dc82
MH
4190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4191 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4192 hci_send_frame(hdev, skb);
b6a0dc82
MH
4193
4194 conn->sent++;
4195 if (conn->sent == ~0)
4196 conn->sent = 0;
4197 }
4198 }
4199}
4200
6039aa73 4201static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4202{
73d80deb 4203 struct hci_chan *chan;
6ed58ec5 4204 struct sk_buff *skb;
02b20f0b 4205 int quote, cnt, tmp;
6ed58ec5
VT
4206
4207 BT_DBG("%s", hdev->name);
4208
52087a79
LAD
4209 if (!hci_conn_num(hdev, LE_LINK))
4210 return;
4211
d7a5a11d 4212 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4213 /* LE tx timeout must be longer than maximum
4214 * link supervision timeout (40.9 seconds) */
bae1f5d9 4215 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4216 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4217 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4218 }
4219
4220 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4221 tmp = cnt;
73d80deb 4222 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4223 u32 priority = (skb_peek(&chan->data_q))->priority;
4224 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4225 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4226 skb->len, skb->priority);
6ed58ec5 4227
ec1cce24
LAD
4228 /* Stop if priority has changed */
4229 if (skb->priority < priority)
4230 break;
4231
4232 skb = skb_dequeue(&chan->data_q);
4233
57d17d70 4234 hci_send_frame(hdev, skb);
6ed58ec5
VT
4235 hdev->le_last_tx = jiffies;
4236
4237 cnt--;
73d80deb
LAD
4238 chan->sent++;
4239 chan->conn->sent++;
6ed58ec5
VT
4240 }
4241 }
73d80deb 4242
6ed58ec5
VT
4243 if (hdev->le_pkts)
4244 hdev->le_cnt = cnt;
4245 else
4246 hdev->acl_cnt = cnt;
02b20f0b
LAD
4247
4248 if (cnt != tmp)
4249 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4250}
4251
3eff45ea 4252static void hci_tx_work(struct work_struct *work)
1da177e4 4253{
3eff45ea 4254 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4255 struct sk_buff *skb;
4256
6ed58ec5 4257 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4258 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4259
d7a5a11d 4260 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4261 /* Schedule queues and send stuff to HCI driver */
4262 hci_sched_acl(hdev);
4263 hci_sched_sco(hdev);
4264 hci_sched_esco(hdev);
4265 hci_sched_le(hdev);
4266 }
6ed58ec5 4267
1da177e4
LT
4268 /* Send next queued raw (unknown type) packet */
4269 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4270 hci_send_frame(hdev, skb);
1da177e4
LT
4271}
4272
25985edc 4273/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4274
4275/* ACL data packet */
6039aa73 4276static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4277{
4278 struct hci_acl_hdr *hdr = (void *) skb->data;
4279 struct hci_conn *conn;
4280 __u16 handle, flags;
4281
4282 skb_pull(skb, HCI_ACL_HDR_SIZE);
4283
4284 handle = __le16_to_cpu(hdr->handle);
4285 flags = hci_flags(handle);
4286 handle = hci_handle(handle);
4287
f0e09510 4288 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4289 handle, flags);
1da177e4
LT
4290
4291 hdev->stat.acl_rx++;
4292
4293 hci_dev_lock(hdev);
4294 conn = hci_conn_hash_lookup_handle(hdev, handle);
4295 hci_dev_unlock(hdev);
8e87d142 4296
1da177e4 4297 if (conn) {
65983fc7 4298 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4299
1da177e4 4300 /* Send to upper protocol */
686ebf28
UF
4301 l2cap_recv_acldata(conn, skb, flags);
4302 return;
1da177e4 4303 } else {
8e87d142 4304 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4305 hdev->name, handle);
1da177e4
LT
4306 }
4307
4308 kfree_skb(skb);
4309}
4310
4311/* SCO data packet */
6039aa73 4312static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4313{
4314 struct hci_sco_hdr *hdr = (void *) skb->data;
4315 struct hci_conn *conn;
4316 __u16 handle;
4317
4318 skb_pull(skb, HCI_SCO_HDR_SIZE);
4319
4320 handle = __le16_to_cpu(hdr->handle);
4321
f0e09510 4322 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4323
4324 hdev->stat.sco_rx++;
4325
4326 hci_dev_lock(hdev);
4327 conn = hci_conn_hash_lookup_handle(hdev, handle);
4328 hci_dev_unlock(hdev);
4329
4330 if (conn) {
1da177e4 4331 /* Send to upper protocol */
686ebf28
UF
4332 sco_recv_scodata(conn, skb);
4333 return;
1da177e4 4334 } else {
8e87d142 4335 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4336 hdev->name, handle);
1da177e4
LT
4337 }
4338
4339 kfree_skb(skb);
4340}
4341
9238f36a
JH
4342static bool hci_req_is_complete(struct hci_dev *hdev)
4343{
4344 struct sk_buff *skb;
4345
4346 skb = skb_peek(&hdev->cmd_q);
4347 if (!skb)
4348 return true;
4349
db6e3e8d 4350 return bt_cb(skb)->req.start;
9238f36a
JH
4351}
4352
42c6b129
JH
4353static void hci_resend_last(struct hci_dev *hdev)
4354{
4355 struct hci_command_hdr *sent;
4356 struct sk_buff *skb;
4357 u16 opcode;
4358
4359 if (!hdev->sent_cmd)
4360 return;
4361
4362 sent = (void *) hdev->sent_cmd->data;
4363 opcode = __le16_to_cpu(sent->opcode);
4364 if (opcode == HCI_OP_RESET)
4365 return;
4366
4367 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4368 if (!skb)
4369 return;
4370
4371 skb_queue_head(&hdev->cmd_q, skb);
4372 queue_work(hdev->workqueue, &hdev->cmd_work);
4373}
4374
e6214487
JH
4375void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4376 hci_req_complete_t *req_complete,
4377 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4378{
9238f36a
JH
4379 struct sk_buff *skb;
4380 unsigned long flags;
4381
4382 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4383
42c6b129
JH
4384 /* If the completed command doesn't match the last one that was
4385 * sent we need to do special handling of it.
9238f36a 4386 */
42c6b129
JH
4387 if (!hci_sent_cmd_data(hdev, opcode)) {
4388 /* Some CSR based controllers generate a spontaneous
4389 * reset complete event during init and any pending
4390 * command will never be completed. In such a case we
4391 * need to resend whatever was the last sent
4392 * command.
4393 */
4394 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4395 hci_resend_last(hdev);
4396
9238f36a 4397 return;
42c6b129 4398 }
9238f36a
JH
4399
4400 /* If the command succeeded and there's still more commands in
4401 * this request the request is not yet complete.
4402 */
4403 if (!status && !hci_req_is_complete(hdev))
4404 return;
4405
4406 /* If this was the last command in a request the complete
4407 * callback would be found in hdev->sent_cmd instead of the
4408 * command queue (hdev->cmd_q).
4409 */
e6214487
JH
4410 if (bt_cb(hdev->sent_cmd)->req.complete) {
4411 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4412 return;
4413 }
53e21fbc 4414
e6214487
JH
4415 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4416 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4417 return;
9238f36a
JH
4418 }
4419
4420 /* Remove all pending commands belonging to this request */
4421 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4422 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4423 if (bt_cb(skb)->req.start) {
9238f36a
JH
4424 __skb_queue_head(&hdev->cmd_q, skb);
4425 break;
4426 }
4427
e6214487
JH
4428 *req_complete = bt_cb(skb)->req.complete;
4429 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4430 kfree_skb(skb);
4431 }
4432 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4433}
4434
b78752cc 4435static void hci_rx_work(struct work_struct *work)
1da177e4 4436{
b78752cc 4437 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4438 struct sk_buff *skb;
4439
4440 BT_DBG("%s", hdev->name);
4441
1da177e4 4442 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4443 /* Send copy to monitor */
4444 hci_send_to_monitor(hdev, skb);
4445
1da177e4
LT
4446 if (atomic_read(&hdev->promisc)) {
4447 /* Send copy to the sockets */
470fe1b5 4448 hci_send_to_sock(hdev, skb);
1da177e4
LT
4449 }
4450
d7a5a11d 4451 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4452 kfree_skb(skb);
4453 continue;
4454 }
4455
4456 if (test_bit(HCI_INIT, &hdev->flags)) {
4457 /* Don't process data packets in this states. */
0d48d939 4458 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4459 case HCI_ACLDATA_PKT:
4460 case HCI_SCODATA_PKT:
4461 kfree_skb(skb);
4462 continue;
3ff50b79 4463 }
1da177e4
LT
4464 }
4465
4466 /* Process frame */
0d48d939 4467 switch (bt_cb(skb)->pkt_type) {
1da177e4 4468 case HCI_EVENT_PKT:
b78752cc 4469 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4470 hci_event_packet(hdev, skb);
4471 break;
4472
4473 case HCI_ACLDATA_PKT:
4474 BT_DBG("%s ACL data packet", hdev->name);
4475 hci_acldata_packet(hdev, skb);
4476 break;
4477
4478 case HCI_SCODATA_PKT:
4479 BT_DBG("%s SCO data packet", hdev->name);
4480 hci_scodata_packet(hdev, skb);
4481 break;
4482
4483 default:
4484 kfree_skb(skb);
4485 break;
4486 }
4487 }
1da177e4
LT
4488}
4489
c347b765 4490static void hci_cmd_work(struct work_struct *work)
1da177e4 4491{
c347b765 4492 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4493 struct sk_buff *skb;
4494
2104786b
AE
4495 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4496 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4497
1da177e4 4498 /* Send queued commands */
5a08ecce
AE
4499 if (atomic_read(&hdev->cmd_cnt)) {
4500 skb = skb_dequeue(&hdev->cmd_q);
4501 if (!skb)
4502 return;
4503
7585b97a 4504 kfree_skb(hdev->sent_cmd);
1da177e4 4505
a675d7f1 4506 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4507 if (hdev->sent_cmd) {
1da177e4 4508 atomic_dec(&hdev->cmd_cnt);
57d17d70 4509 hci_send_frame(hdev, skb);
7bdb8a5c 4510 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4511 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4512 else
65cc2b49
MH
4513 schedule_delayed_work(&hdev->cmd_timer,
4514 HCI_CMD_TIMEOUT);
1da177e4
LT
4515 } else {
4516 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4517 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4518 }
4519 }
4520}