]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Add support setup stage internal notification event
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
4b4113d6
MH
137static ssize_t vendor_diag_read(struct file *file, char __user *user_buf,
138 size_t count, loff_t *ppos)
139{
140 struct hci_dev *hdev = file->private_data;
141 char buf[3];
142
143 buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N';
144 buf[1] = '\n';
145 buf[2] = '\0';
146 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
147}
148
149static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
150 size_t count, loff_t *ppos)
151{
152 struct hci_dev *hdev = file->private_data;
153 char buf[32];
154 size_t buf_size = min(count, (sizeof(buf)-1));
155 bool enable;
156 int err;
157
158 if (copy_from_user(buf, user_buf, buf_size))
159 return -EFAULT;
160
161 buf[buf_size] = '\0';
162 if (strtobool(buf, &enable))
163 return -EINVAL;
164
7e995b9e
MH
165 /* When the diagnostic flags are not persistent and the transport
166 * is not active, then there is no need for the vendor callback.
167 *
168 * Instead just store the desired value. If needed the setting
169 * will be programmed when the controller gets powered on.
170 */
171 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
172 !test_bit(HCI_RUNNING, &hdev->flags))
173 goto done;
174
4b4113d6
MH
175 hci_req_lock(hdev);
176 err = hdev->set_diag(hdev, enable);
177 hci_req_unlock(hdev);
178
179 if (err < 0)
180 return err;
181
7e995b9e 182done:
4b4113d6
MH
183 if (enable)
184 hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
185 else
186 hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG);
187
188 return count;
189}
190
191static const struct file_operations vendor_diag_fops = {
192 .open = simple_open,
193 .read = vendor_diag_read,
194 .write = vendor_diag_write,
195 .llseek = default_llseek,
196};
197
f640ee98
MH
198static void hci_debugfs_create_basic(struct hci_dev *hdev)
199{
200 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
201 &dut_mode_fops);
202
203 if (hdev->set_diag)
204 debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev,
205 &vendor_diag_fops);
206}
207
1da177e4
LT
208/* ---- HCI requests ---- */
209
f60cb305
JH
210static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
211 struct sk_buff *skb)
1da177e4 212{
42c6b129 213 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
214
215 if (hdev->req_status == HCI_REQ_PEND) {
216 hdev->req_result = result;
217 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
218 if (skb)
219 hdev->req_skb = skb_get(skb);
1da177e4
LT
220 wake_up_interruptible(&hdev->req_wait_q);
221 }
222}
223
224static void hci_req_cancel(struct hci_dev *hdev, int err)
225{
226 BT_DBG("%s err 0x%2.2x", hdev->name, err);
227
228 if (hdev->req_status == HCI_REQ_PEND) {
229 hdev->req_result = err;
230 hdev->req_status = HCI_REQ_CANCELED;
231 wake_up_interruptible(&hdev->req_wait_q);
232 }
233}
234
7b1abbbe 235struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 236 const void *param, u8 event, u32 timeout)
75e84b7c
JH
237{
238 DECLARE_WAITQUEUE(wait, current);
239 struct hci_request req;
f60cb305 240 struct sk_buff *skb;
75e84b7c
JH
241 int err = 0;
242
243 BT_DBG("%s", hdev->name);
244
245 hci_req_init(&req, hdev);
246
7b1abbbe 247 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
248
249 hdev->req_status = HCI_REQ_PEND;
250
75e84b7c
JH
251 add_wait_queue(&hdev->req_wait_q, &wait);
252 set_current_state(TASK_INTERRUPTIBLE);
253
f60cb305 254 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
255 if (err < 0) {
256 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 257 set_current_state(TASK_RUNNING);
039fada5
CP
258 return ERR_PTR(err);
259 }
260
75e84b7c
JH
261 schedule_timeout(timeout);
262
263 remove_wait_queue(&hdev->req_wait_q, &wait);
264
265 if (signal_pending(current))
266 return ERR_PTR(-EINTR);
267
268 switch (hdev->req_status) {
269 case HCI_REQ_DONE:
270 err = -bt_to_errno(hdev->req_result);
271 break;
272
273 case HCI_REQ_CANCELED:
274 err = -hdev->req_result;
275 break;
276
277 default:
278 err = -ETIMEDOUT;
279 break;
280 }
281
282 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
283 skb = hdev->req_skb;
284 hdev->req_skb = NULL;
75e84b7c
JH
285
286 BT_DBG("%s end: err %d", hdev->name, err);
287
f60cb305
JH
288 if (err < 0) {
289 kfree_skb(skb);
75e84b7c 290 return ERR_PTR(err);
f60cb305 291 }
75e84b7c 292
757aa0b5
JH
293 if (!skb)
294 return ERR_PTR(-ENODATA);
295
296 return skb;
7b1abbbe
JH
297}
298EXPORT_SYMBOL(__hci_cmd_sync_ev);
299
300struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 301 const void *param, u32 timeout)
7b1abbbe
JH
302{
303 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
304}
305EXPORT_SYMBOL(__hci_cmd_sync);
306
1da177e4 307/* Execute request and wait for completion. */
01178cd4 308static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
309 void (*func)(struct hci_request *req,
310 unsigned long opt),
01178cd4 311 unsigned long opt, __u32 timeout)
1da177e4 312{
42c6b129 313 struct hci_request req;
1da177e4
LT
314 DECLARE_WAITQUEUE(wait, current);
315 int err = 0;
316
317 BT_DBG("%s start", hdev->name);
318
42c6b129
JH
319 hci_req_init(&req, hdev);
320
1da177e4
LT
321 hdev->req_status = HCI_REQ_PEND;
322
42c6b129 323 func(&req, opt);
53cce22d 324
039fada5
CP
325 add_wait_queue(&hdev->req_wait_q, &wait);
326 set_current_state(TASK_INTERRUPTIBLE);
327
f60cb305 328 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 329 if (err < 0) {
53cce22d 330 hdev->req_status = 0;
920c8300 331
039fada5 332 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 333 set_current_state(TASK_RUNNING);
039fada5 334
920c8300
AG
335 /* ENODATA means the HCI request command queue is empty.
336 * This can happen when a request with conditionals doesn't
337 * trigger any commands to be sent. This is normal behavior
338 * and should not trigger an error return.
42c6b129 339 */
920c8300
AG
340 if (err == -ENODATA)
341 return 0;
342
343 return err;
53cce22d
JH
344 }
345
1da177e4
LT
346 schedule_timeout(timeout);
347
348 remove_wait_queue(&hdev->req_wait_q, &wait);
349
350 if (signal_pending(current))
351 return -EINTR;
352
353 switch (hdev->req_status) {
354 case HCI_REQ_DONE:
e175072f 355 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
356 break;
357
358 case HCI_REQ_CANCELED:
359 err = -hdev->req_result;
360 break;
361
362 default:
363 err = -ETIMEDOUT;
364 break;
3ff50b79 365 }
1da177e4 366
a5040efa 367 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
368
369 BT_DBG("%s end: err %d", hdev->name, err);
370
371 return err;
372}
373
01178cd4 374static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
375 void (*req)(struct hci_request *req,
376 unsigned long opt),
01178cd4 377 unsigned long opt, __u32 timeout)
1da177e4
LT
378{
379 int ret;
380
7c6a329e
MH
381 if (!test_bit(HCI_UP, &hdev->flags))
382 return -ENETDOWN;
383
1da177e4
LT
384 /* Serialize all requests */
385 hci_req_lock(hdev);
01178cd4 386 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
387 hci_req_unlock(hdev);
388
389 return ret;
390}
391
42c6b129 392static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 393{
42c6b129 394 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
395
396 /* Reset device */
42c6b129
JH
397 set_bit(HCI_RESET, &req->hdev->flags);
398 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
399}
400
42c6b129 401static void bredr_init(struct hci_request *req)
1da177e4 402{
42c6b129 403 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 404
1da177e4 405 /* Read Local Supported Features */
42c6b129 406 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 407
1143e5a6 408 /* Read Local Version */
42c6b129 409 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
410
411 /* Read BD Address */
42c6b129 412 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
413}
414
0af801b9 415static void amp_init1(struct hci_request *req)
e61ef499 416{
42c6b129 417 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 418
e61ef499 419 /* Read Local Version */
42c6b129 420 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 421
f6996cfe
MH
422 /* Read Local Supported Commands */
423 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
424
6bcbc489 425 /* Read Local AMP Info */
42c6b129 426 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
427
428 /* Read Data Blk size */
42c6b129 429 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 430
f38ba941
MH
431 /* Read Flow Control Mode */
432 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
433
7528ca1c
MH
434 /* Read Location Data */
435 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
436}
437
0af801b9
JH
438static void amp_init2(struct hci_request *req)
439{
440 /* Read Local Supported Features. Not all AMP controllers
441 * support this so it's placed conditionally in the second
442 * stage init.
443 */
444 if (req->hdev->commands[14] & 0x20)
445 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
446}
447
42c6b129 448static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 449{
42c6b129 450 struct hci_dev *hdev = req->hdev;
e61ef499
AE
451
452 BT_DBG("%s %ld", hdev->name, opt);
453
11778716
AE
454 /* Reset */
455 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 456 hci_reset_req(req, 0);
11778716 457
e61ef499
AE
458 switch (hdev->dev_type) {
459 case HCI_BREDR:
42c6b129 460 bredr_init(req);
e61ef499
AE
461 break;
462
463 case HCI_AMP:
0af801b9 464 amp_init1(req);
e61ef499
AE
465 break;
466
467 default:
468 BT_ERR("Unknown device type %d", hdev->dev_type);
469 break;
470 }
e61ef499
AE
471}
472
42c6b129 473static void bredr_setup(struct hci_request *req)
2177bab5 474{
2177bab5
JH
475 __le16 param;
476 __u8 flt_type;
477
478 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 479 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
480
481 /* Read Class of Device */
42c6b129 482 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
483
484 /* Read Local Name */
42c6b129 485 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
486
487 /* Read Voice Setting */
42c6b129 488 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 489
b4cb9fb2
MH
490 /* Read Number of Supported IAC */
491 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
492
4b836f39
MH
493 /* Read Current IAC LAP */
494 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
495
2177bab5
JH
496 /* Clear Event Filters */
497 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 498 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
499
500 /* Connection accept timeout ~20 secs */
dcf4adbf 501 param = cpu_to_le16(0x7d00);
42c6b129 502 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
503}
504
42c6b129 505static void le_setup(struct hci_request *req)
2177bab5 506{
c73eee91
JH
507 struct hci_dev *hdev = req->hdev;
508
2177bab5 509 /* Read LE Buffer Size */
42c6b129 510 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
511
512 /* Read LE Local Supported Features */
42c6b129 513 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 514
747d3f03
MH
515 /* Read LE Supported States */
516 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
517
2177bab5 518 /* Read LE White List Size */
42c6b129 519 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 520
747d3f03
MH
521 /* Clear LE White List */
522 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
523
524 /* LE-only controllers have LE implicitly enabled */
525 if (!lmp_bredr_capable(hdev))
a1536da2 526 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
527}
528
42c6b129 529static void hci_setup_event_mask(struct hci_request *req)
2177bab5 530{
42c6b129
JH
531 struct hci_dev *hdev = req->hdev;
532
2177bab5
JH
533 /* The second byte is 0xff instead of 0x9f (two reserved bits
534 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
535 * command otherwise.
536 */
537 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
538
539 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
540 * any event mask for pre 1.2 devices.
541 */
542 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
543 return;
544
545 if (lmp_bredr_capable(hdev)) {
546 events[4] |= 0x01; /* Flow Specification Complete */
547 events[4] |= 0x02; /* Inquiry Result with RSSI */
548 events[4] |= 0x04; /* Read Remote Extended Features Complete */
549 events[5] |= 0x08; /* Synchronous Connection Complete */
550 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
551 } else {
552 /* Use a different default for LE-only devices */
553 memset(events, 0, sizeof(events));
554 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
555 events[1] |= 0x08; /* Read Remote Version Information Complete */
556 events[1] |= 0x20; /* Command Complete */
557 events[1] |= 0x40; /* Command Status */
558 events[1] |= 0x80; /* Hardware Error */
559 events[2] |= 0x04; /* Number of Completed Packets */
560 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
561
562 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
563 events[0] |= 0x80; /* Encryption Change */
564 events[5] |= 0x80; /* Encryption Key Refresh Complete */
565 }
2177bab5
JH
566 }
567
568 if (lmp_inq_rssi_capable(hdev))
569 events[4] |= 0x02; /* Inquiry Result with RSSI */
570
571 if (lmp_sniffsubr_capable(hdev))
572 events[5] |= 0x20; /* Sniff Subrating */
573
574 if (lmp_pause_enc_capable(hdev))
575 events[5] |= 0x80; /* Encryption Key Refresh Complete */
576
577 if (lmp_ext_inq_capable(hdev))
578 events[5] |= 0x40; /* Extended Inquiry Result */
579
580 if (lmp_no_flush_capable(hdev))
581 events[7] |= 0x01; /* Enhanced Flush Complete */
582
583 if (lmp_lsto_capable(hdev))
584 events[6] |= 0x80; /* Link Supervision Timeout Changed */
585
586 if (lmp_ssp_capable(hdev)) {
587 events[6] |= 0x01; /* IO Capability Request */
588 events[6] |= 0x02; /* IO Capability Response */
589 events[6] |= 0x04; /* User Confirmation Request */
590 events[6] |= 0x08; /* User Passkey Request */
591 events[6] |= 0x10; /* Remote OOB Data Request */
592 events[6] |= 0x20; /* Simple Pairing Complete */
593 events[7] |= 0x04; /* User Passkey Notification */
594 events[7] |= 0x08; /* Keypress Notification */
595 events[7] |= 0x10; /* Remote Host Supported
596 * Features Notification
597 */
598 }
599
600 if (lmp_le_capable(hdev))
601 events[7] |= 0x20; /* LE Meta-Event */
602
42c6b129 603 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
604}
605
42c6b129 606static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 607{
42c6b129
JH
608 struct hci_dev *hdev = req->hdev;
609
0af801b9
JH
610 if (hdev->dev_type == HCI_AMP)
611 return amp_init2(req);
612
2177bab5 613 if (lmp_bredr_capable(hdev))
42c6b129 614 bredr_setup(req);
56f87901 615 else
a358dc11 616 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
617
618 if (lmp_le_capable(hdev))
42c6b129 619 le_setup(req);
2177bab5 620
0f3adeae
MH
621 /* All Bluetooth 1.2 and later controllers should support the
622 * HCI command for reading the local supported commands.
623 *
624 * Unfortunately some controllers indicate Bluetooth 1.2 support,
625 * but do not have support for this command. If that is the case,
626 * the driver can quirk the behavior and skip reading the local
627 * supported commands.
3f8e2d75 628 */
0f3adeae
MH
629 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
630 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 631 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
632
633 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
634 /* When SSP is available, then the host features page
635 * should also be available as well. However some
636 * controllers list the max_page as 0 as long as SSP
637 * has not been enabled. To achieve proper debugging
638 * output, force the minimum max_page to 1 at least.
639 */
640 hdev->max_page = 0x01;
641
d7a5a11d 642 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 643 u8 mode = 0x01;
574ea3c7 644
42c6b129
JH
645 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
646 sizeof(mode), &mode);
2177bab5
JH
647 } else {
648 struct hci_cp_write_eir cp;
649
650 memset(hdev->eir, 0, sizeof(hdev->eir));
651 memset(&cp, 0, sizeof(cp));
652
42c6b129 653 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
654 }
655 }
656
043ec9bf
MH
657 if (lmp_inq_rssi_capable(hdev) ||
658 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
659 u8 mode;
660
661 /* If Extended Inquiry Result events are supported, then
662 * they are clearly preferred over Inquiry Result with RSSI
663 * events.
664 */
665 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
666
667 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
668 }
2177bab5
JH
669
670 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 671 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
672
673 if (lmp_ext_feat_capable(hdev)) {
674 struct hci_cp_read_local_ext_features cp;
675
676 cp.page = 0x01;
42c6b129
JH
677 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
678 sizeof(cp), &cp);
2177bab5
JH
679 }
680
d7a5a11d 681 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 682 u8 enable = 1;
42c6b129
JH
683 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
684 &enable);
2177bab5
JH
685 }
686}
687
42c6b129 688static void hci_setup_link_policy(struct hci_request *req)
2177bab5 689{
42c6b129 690 struct hci_dev *hdev = req->hdev;
2177bab5
JH
691 struct hci_cp_write_def_link_policy cp;
692 u16 link_policy = 0;
693
694 if (lmp_rswitch_capable(hdev))
695 link_policy |= HCI_LP_RSWITCH;
696 if (lmp_hold_capable(hdev))
697 link_policy |= HCI_LP_HOLD;
698 if (lmp_sniff_capable(hdev))
699 link_policy |= HCI_LP_SNIFF;
700 if (lmp_park_capable(hdev))
701 link_policy |= HCI_LP_PARK;
702
703 cp.policy = cpu_to_le16(link_policy);
42c6b129 704 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
705}
706
42c6b129 707static void hci_set_le_support(struct hci_request *req)
2177bab5 708{
42c6b129 709 struct hci_dev *hdev = req->hdev;
2177bab5
JH
710 struct hci_cp_write_le_host_supported cp;
711
c73eee91
JH
712 /* LE-only devices do not support explicit enablement */
713 if (!lmp_bredr_capable(hdev))
714 return;
715
2177bab5
JH
716 memset(&cp, 0, sizeof(cp));
717
d7a5a11d 718 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 719 cp.le = 0x01;
32226e4f 720 cp.simul = 0x00;
2177bab5
JH
721 }
722
723 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
724 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
725 &cp);
2177bab5
JH
726}
727
d62e6d67
JH
728static void hci_set_event_mask_page_2(struct hci_request *req)
729{
730 struct hci_dev *hdev = req->hdev;
731 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
732
733 /* If Connectionless Slave Broadcast master role is supported
734 * enable all necessary events for it.
735 */
53b834d2 736 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
737 events[1] |= 0x40; /* Triggered Clock Capture */
738 events[1] |= 0x80; /* Synchronization Train Complete */
739 events[2] |= 0x10; /* Slave Page Response Timeout */
740 events[2] |= 0x20; /* CSB Channel Map Change */
741 }
742
743 /* If Connectionless Slave Broadcast slave role is supported
744 * enable all necessary events for it.
745 */
53b834d2 746 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
747 events[2] |= 0x01; /* Synchronization Train Received */
748 events[2] |= 0x02; /* CSB Receive */
749 events[2] |= 0x04; /* CSB Timeout */
750 events[2] |= 0x08; /* Truncated Page Complete */
751 }
752
40c59fcb 753 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 754 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
755 events[2] |= 0x80;
756
d62e6d67
JH
757 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
758}
759
42c6b129 760static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 761{
42c6b129 762 struct hci_dev *hdev = req->hdev;
d2c5d77f 763 u8 p;
42c6b129 764
0da71f1b
MH
765 hci_setup_event_mask(req);
766
e81be90b
JH
767 if (hdev->commands[6] & 0x20 &&
768 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
769 struct hci_cp_read_stored_link_key cp;
770
771 bacpy(&cp.bdaddr, BDADDR_ANY);
772 cp.read_all = 0x01;
773 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
774 }
775
2177bab5 776 if (hdev->commands[5] & 0x10)
42c6b129 777 hci_setup_link_policy(req);
2177bab5 778
417287de
MH
779 if (hdev->commands[8] & 0x01)
780 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
781
782 /* Some older Broadcom based Bluetooth 1.2 controllers do not
783 * support the Read Page Scan Type command. Check support for
784 * this command in the bit mask of supported commands.
785 */
786 if (hdev->commands[13] & 0x01)
787 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
788
9193c6e8
AG
789 if (lmp_le_capable(hdev)) {
790 u8 events[8];
791
792 memset(events, 0, sizeof(events));
4d6c705b
MH
793 events[0] = 0x0f;
794
795 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
796 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
797
798 /* If controller supports the Connection Parameters Request
799 * Link Layer Procedure, enable the corresponding event.
800 */
801 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
802 events[0] |= 0x20; /* LE Remote Connection
803 * Parameter Request
804 */
805
a9f6068e
MH
806 /* If the controller supports the Data Length Extension
807 * feature, enable the corresponding event.
808 */
809 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
810 events[0] |= 0x40; /* LE Data Length Change */
811
4b71bba4
MH
812 /* If the controller supports Extended Scanner Filter
813 * Policies, enable the correspondig event.
814 */
815 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
816 events[1] |= 0x04; /* LE Direct Advertising
817 * Report
818 */
819
5a34bd5f
MH
820 /* If the controller supports the LE Read Local P-256
821 * Public Key command, enable the corresponding event.
822 */
823 if (hdev->commands[34] & 0x02)
824 events[0] |= 0x80; /* LE Read Local P-256
825 * Public Key Complete
826 */
827
828 /* If the controller supports the LE Generate DHKey
829 * command, enable the corresponding event.
830 */
831 if (hdev->commands[34] & 0x04)
832 events[1] |= 0x01; /* LE Generate DHKey Complete */
833
9193c6e8
AG
834 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
835 events);
836
15a49cca
MH
837 if (hdev->commands[25] & 0x40) {
838 /* Read LE Advertising Channel TX Power */
839 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
840 }
841
a9f6068e
MH
842 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
843 /* Read LE Maximum Data Length */
844 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
845
846 /* Read LE Suggested Default Data Length */
847 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
848 }
849
42c6b129 850 hci_set_le_support(req);
9193c6e8 851 }
d2c5d77f
JH
852
853 /* Read features beyond page 1 if available */
854 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
855 struct hci_cp_read_local_ext_features cp;
856
857 cp.page = p;
858 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
859 sizeof(cp), &cp);
860 }
2177bab5
JH
861}
862
5d4e7e8d
JH
863static void hci_init4_req(struct hci_request *req, unsigned long opt)
864{
865 struct hci_dev *hdev = req->hdev;
866
36f260ce
MH
867 /* Some Broadcom based Bluetooth controllers do not support the
868 * Delete Stored Link Key command. They are clearly indicating its
869 * absence in the bit mask of supported commands.
870 *
871 * Check the supported commands and only if the the command is marked
872 * as supported send it. If not supported assume that the controller
873 * does not have actual support for stored link keys which makes this
874 * command redundant anyway.
875 *
876 * Some controllers indicate that they support handling deleting
877 * stored link keys, but they don't. The quirk lets a driver
878 * just disable this command.
879 */
880 if (hdev->commands[6] & 0x80 &&
881 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
882 struct hci_cp_delete_stored_link_key cp;
883
884 bacpy(&cp.bdaddr, BDADDR_ANY);
885 cp.delete_all = 0x01;
886 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
887 sizeof(cp), &cp);
888 }
889
d62e6d67
JH
890 /* Set event mask page 2 if the HCI command for it is supported */
891 if (hdev->commands[22] & 0x04)
892 hci_set_event_mask_page_2(req);
893
109e3191
MH
894 /* Read local codec list if the HCI command is supported */
895 if (hdev->commands[29] & 0x20)
896 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
897
f4fe73ed
MH
898 /* Get MWS transport configuration if the HCI command is supported */
899 if (hdev->commands[30] & 0x08)
900 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
901
5d4e7e8d 902 /* Check for Synchronization Train support */
53b834d2 903 if (lmp_sync_train_capable(hdev))
5d4e7e8d 904 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
905
906 /* Enable Secure Connections if supported and configured */
d7a5a11d 907 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 908 bredr_sc_enabled(hdev)) {
a6d0d690 909 u8 support = 0x01;
574ea3c7 910
a6d0d690
MH
911 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
912 sizeof(support), &support);
913 }
5d4e7e8d
JH
914}
915
2177bab5
JH
916static int __hci_init(struct hci_dev *hdev)
917{
918 int err;
919
920 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
921 if (err < 0)
922 return err;
923
f640ee98
MH
924 if (hci_dev_test_flag(hdev, HCI_SETUP))
925 hci_debugfs_create_basic(hdev);
4b4148e9 926
0af801b9
JH
927 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
928 if (err < 0)
929 return err;
930
2177bab5
JH
931 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
932 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 933 * first two stages of init.
2177bab5
JH
934 */
935 if (hdev->dev_type != HCI_BREDR)
936 return 0;
937
5d4e7e8d
JH
938 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
939 if (err < 0)
940 return err;
941
baf27f6e
MH
942 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
943 if (err < 0)
944 return err;
945
ec6cef9c
MH
946 /* This function is only called when the controller is actually in
947 * configured state. When the controller is marked as unconfigured,
948 * this initialization procedure is not run.
949 *
950 * It means that it is possible that a controller runs through its
951 * setup phase and then discovers missing settings. If that is the
952 * case, then this function will not be called. It then will only
953 * be called during the config phase.
954 *
955 * So only when in setup phase or config phase, create the debugfs
956 * entries and register the SMP channels.
baf27f6e 957 */
d7a5a11d
MH
958 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
959 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
960 return 0;
961
60c5f5fb
MH
962 hci_debugfs_create_common(hdev);
963
71c3b60e 964 if (lmp_bredr_capable(hdev))
60c5f5fb 965 hci_debugfs_create_bredr(hdev);
2bfa3531 966
162a3bac 967 if (lmp_le_capable(hdev))
60c5f5fb 968 hci_debugfs_create_le(hdev);
e7b8fc92 969
baf27f6e 970 return 0;
2177bab5
JH
971}
972
0ebca7d6
MH
973static void hci_init0_req(struct hci_request *req, unsigned long opt)
974{
975 struct hci_dev *hdev = req->hdev;
976
977 BT_DBG("%s %ld", hdev->name, opt);
978
979 /* Reset */
980 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
981 hci_reset_req(req, 0);
982
983 /* Read Local Version */
984 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
985
986 /* Read BD Address */
987 if (hdev->set_bdaddr)
988 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
989}
990
991static int __hci_unconf_init(struct hci_dev *hdev)
992{
993 int err;
994
cc78b44b
MH
995 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
996 return 0;
997
0ebca7d6
MH
998 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
999 if (err < 0)
1000 return err;
1001
f640ee98
MH
1002 if (hci_dev_test_flag(hdev, HCI_SETUP))
1003 hci_debugfs_create_basic(hdev);
1004
0ebca7d6
MH
1005 return 0;
1006}
1007
42c6b129 1008static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1009{
1010 __u8 scan = opt;
1011
42c6b129 1012 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1013
1014 /* Inquiry and Page scans */
42c6b129 1015 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1016}
1017
42c6b129 1018static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1019{
1020 __u8 auth = opt;
1021
42c6b129 1022 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1023
1024 /* Authentication */
42c6b129 1025 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1026}
1027
42c6b129 1028static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1029{
1030 __u8 encrypt = opt;
1031
42c6b129 1032 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1033
e4e8e37c 1034 /* Encryption */
42c6b129 1035 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1036}
1037
42c6b129 1038static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1039{
1040 __le16 policy = cpu_to_le16(opt);
1041
42c6b129 1042 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1043
1044 /* Default link policy */
42c6b129 1045 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1046}
1047
8e87d142 1048/* Get HCI device by index.
1da177e4
LT
1049 * Device is held on return. */
1050struct hci_dev *hci_dev_get(int index)
1051{
8035ded4 1052 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1053
1054 BT_DBG("%d", index);
1055
1056 if (index < 0)
1057 return NULL;
1058
1059 read_lock(&hci_dev_list_lock);
8035ded4 1060 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1061 if (d->id == index) {
1062 hdev = hci_dev_hold(d);
1063 break;
1064 }
1065 }
1066 read_unlock(&hci_dev_list_lock);
1067 return hdev;
1068}
1da177e4
LT
1069
1070/* ---- Inquiry support ---- */
ff9ef578 1071
30dc78e1
JH
1072bool hci_discovery_active(struct hci_dev *hdev)
1073{
1074 struct discovery_state *discov = &hdev->discovery;
1075
6fbe195d 1076 switch (discov->state) {
343f935b 1077 case DISCOVERY_FINDING:
6fbe195d 1078 case DISCOVERY_RESOLVING:
30dc78e1
JH
1079 return true;
1080
6fbe195d
AG
1081 default:
1082 return false;
1083 }
30dc78e1
JH
1084}
1085
ff9ef578
JH
1086void hci_discovery_set_state(struct hci_dev *hdev, int state)
1087{
bb3e0a33
JH
1088 int old_state = hdev->discovery.state;
1089
ff9ef578
JH
1090 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1091
bb3e0a33 1092 if (old_state == state)
ff9ef578
JH
1093 return;
1094
bb3e0a33
JH
1095 hdev->discovery.state = state;
1096
ff9ef578
JH
1097 switch (state) {
1098 case DISCOVERY_STOPPED:
c54c3860
AG
1099 hci_update_background_scan(hdev);
1100
bb3e0a33 1101 if (old_state != DISCOVERY_STARTING)
7b99b659 1102 mgmt_discovering(hdev, 0);
ff9ef578
JH
1103 break;
1104 case DISCOVERY_STARTING:
1105 break;
343f935b 1106 case DISCOVERY_FINDING:
ff9ef578
JH
1107 mgmt_discovering(hdev, 1);
1108 break;
30dc78e1
JH
1109 case DISCOVERY_RESOLVING:
1110 break;
ff9ef578
JH
1111 case DISCOVERY_STOPPING:
1112 break;
1113 }
ff9ef578
JH
1114}
1115
1f9b9a5d 1116void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1117{
30883512 1118 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1119 struct inquiry_entry *p, *n;
1da177e4 1120
561aafbc
JH
1121 list_for_each_entry_safe(p, n, &cache->all, all) {
1122 list_del(&p->all);
b57c1a56 1123 kfree(p);
1da177e4 1124 }
561aafbc
JH
1125
1126 INIT_LIST_HEAD(&cache->unknown);
1127 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1128}
1129
a8c5fb1a
GP
1130struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1131 bdaddr_t *bdaddr)
1da177e4 1132{
30883512 1133 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1134 struct inquiry_entry *e;
1135
6ed93dc6 1136 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1137
561aafbc
JH
1138 list_for_each_entry(e, &cache->all, all) {
1139 if (!bacmp(&e->data.bdaddr, bdaddr))
1140 return e;
1141 }
1142
1143 return NULL;
1144}
1145
1146struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1147 bdaddr_t *bdaddr)
561aafbc 1148{
30883512 1149 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1150 struct inquiry_entry *e;
1151
6ed93dc6 1152 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1153
1154 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1155 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1156 return e;
1157 }
1158
1159 return NULL;
1da177e4
LT
1160}
1161
30dc78e1 1162struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1163 bdaddr_t *bdaddr,
1164 int state)
30dc78e1
JH
1165{
1166 struct discovery_state *cache = &hdev->discovery;
1167 struct inquiry_entry *e;
1168
6ed93dc6 1169 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1170
1171 list_for_each_entry(e, &cache->resolve, list) {
1172 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1173 return e;
1174 if (!bacmp(&e->data.bdaddr, bdaddr))
1175 return e;
1176 }
1177
1178 return NULL;
1179}
1180
a3d4e20a 1181void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1182 struct inquiry_entry *ie)
a3d4e20a
JH
1183{
1184 struct discovery_state *cache = &hdev->discovery;
1185 struct list_head *pos = &cache->resolve;
1186 struct inquiry_entry *p;
1187
1188 list_del(&ie->list);
1189
1190 list_for_each_entry(p, &cache->resolve, list) {
1191 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1192 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1193 break;
1194 pos = &p->list;
1195 }
1196
1197 list_add(&ie->list, pos);
1198}
1199
af58925c
MH
1200u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1201 bool name_known)
1da177e4 1202{
30883512 1203 struct discovery_state *cache = &hdev->discovery;
70f23020 1204 struct inquiry_entry *ie;
af58925c 1205 u32 flags = 0;
1da177e4 1206
6ed93dc6 1207 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1208
6928a924 1209 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1210
af58925c
MH
1211 if (!data->ssp_mode)
1212 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1213
70f23020 1214 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1215 if (ie) {
af58925c
MH
1216 if (!ie->data.ssp_mode)
1217 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1218
a3d4e20a 1219 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1220 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1221 ie->data.rssi = data->rssi;
1222 hci_inquiry_cache_update_resolve(hdev, ie);
1223 }
1224
561aafbc 1225 goto update;
a3d4e20a 1226 }
561aafbc
JH
1227
1228 /* Entry not in the cache. Add new one. */
27f70f3e 1229 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1230 if (!ie) {
1231 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1232 goto done;
1233 }
561aafbc
JH
1234
1235 list_add(&ie->all, &cache->all);
1236
1237 if (name_known) {
1238 ie->name_state = NAME_KNOWN;
1239 } else {
1240 ie->name_state = NAME_NOT_KNOWN;
1241 list_add(&ie->list, &cache->unknown);
1242 }
70f23020 1243
561aafbc
JH
1244update:
1245 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1246 ie->name_state != NAME_PENDING) {
561aafbc
JH
1247 ie->name_state = NAME_KNOWN;
1248 list_del(&ie->list);
1da177e4
LT
1249 }
1250
70f23020
AE
1251 memcpy(&ie->data, data, sizeof(*data));
1252 ie->timestamp = jiffies;
1da177e4 1253 cache->timestamp = jiffies;
3175405b
JH
1254
1255 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1256 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1257
af58925c
MH
1258done:
1259 return flags;
1da177e4
LT
1260}
1261
1262static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1263{
30883512 1264 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1265 struct inquiry_info *info = (struct inquiry_info *) buf;
1266 struct inquiry_entry *e;
1267 int copied = 0;
1268
561aafbc 1269 list_for_each_entry(e, &cache->all, all) {
1da177e4 1270 struct inquiry_data *data = &e->data;
b57c1a56
JH
1271
1272 if (copied >= num)
1273 break;
1274
1da177e4
LT
1275 bacpy(&info->bdaddr, &data->bdaddr);
1276 info->pscan_rep_mode = data->pscan_rep_mode;
1277 info->pscan_period_mode = data->pscan_period_mode;
1278 info->pscan_mode = data->pscan_mode;
1279 memcpy(info->dev_class, data->dev_class, 3);
1280 info->clock_offset = data->clock_offset;
b57c1a56 1281
1da177e4 1282 info++;
b57c1a56 1283 copied++;
1da177e4
LT
1284 }
1285
1286 BT_DBG("cache %p, copied %d", cache, copied);
1287 return copied;
1288}
1289
42c6b129 1290static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1291{
1292 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1293 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1294 struct hci_cp_inquiry cp;
1295
1296 BT_DBG("%s", hdev->name);
1297
1298 if (test_bit(HCI_INQUIRY, &hdev->flags))
1299 return;
1300
1301 /* Start Inquiry */
1302 memcpy(&cp.lap, &ir->lap, 3);
1303 cp.length = ir->length;
1304 cp.num_rsp = ir->num_rsp;
42c6b129 1305 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1306}
1307
1308int hci_inquiry(void __user *arg)
1309{
1310 __u8 __user *ptr = arg;
1311 struct hci_inquiry_req ir;
1312 struct hci_dev *hdev;
1313 int err = 0, do_inquiry = 0, max_rsp;
1314 long timeo;
1315 __u8 *buf;
1316
1317 if (copy_from_user(&ir, ptr, sizeof(ir)))
1318 return -EFAULT;
1319
5a08ecce
AE
1320 hdev = hci_dev_get(ir.dev_id);
1321 if (!hdev)
1da177e4
LT
1322 return -ENODEV;
1323
d7a5a11d 1324 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1325 err = -EBUSY;
1326 goto done;
1327 }
1328
d7a5a11d 1329 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1330 err = -EOPNOTSUPP;
1331 goto done;
1332 }
1333
5b69bef5
MH
1334 if (hdev->dev_type != HCI_BREDR) {
1335 err = -EOPNOTSUPP;
1336 goto done;
1337 }
1338
d7a5a11d 1339 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1340 err = -EOPNOTSUPP;
1341 goto done;
1342 }
1343
09fd0de5 1344 hci_dev_lock(hdev);
8e87d142 1345 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1346 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1347 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1348 do_inquiry = 1;
1349 }
09fd0de5 1350 hci_dev_unlock(hdev);
1da177e4 1351
04837f64 1352 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1353
1354 if (do_inquiry) {
01178cd4
JH
1355 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1356 timeo);
70f23020
AE
1357 if (err < 0)
1358 goto done;
3e13fa1e
AG
1359
1360 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1361 * cleared). If it is interrupted by a signal, return -EINTR.
1362 */
74316201 1363 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1364 TASK_INTERRUPTIBLE))
1365 return -EINTR;
70f23020 1366 }
1da177e4 1367
8fc9ced3
GP
1368 /* for unlimited number of responses we will use buffer with
1369 * 255 entries
1370 */
1da177e4
LT
1371 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1372
1373 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1374 * copy it to the user space.
1375 */
01df8c31 1376 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1377 if (!buf) {
1da177e4
LT
1378 err = -ENOMEM;
1379 goto done;
1380 }
1381
09fd0de5 1382 hci_dev_lock(hdev);
1da177e4 1383 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1384 hci_dev_unlock(hdev);
1da177e4
LT
1385
1386 BT_DBG("num_rsp %d", ir.num_rsp);
1387
1388 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1389 ptr += sizeof(ir);
1390 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1391 ir.num_rsp))
1da177e4 1392 err = -EFAULT;
8e87d142 1393 } else
1da177e4
LT
1394 err = -EFAULT;
1395
1396 kfree(buf);
1397
1398done:
1399 hci_dev_put(hdev);
1400 return err;
1401}
1402
cbed0ca1 1403static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1404{
1da177e4
LT
1405 int ret = 0;
1406
1da177e4
LT
1407 BT_DBG("%s %p", hdev->name, hdev);
1408
1409 hci_req_lock(hdev);
1410
d7a5a11d 1411 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1412 ret = -ENODEV;
1413 goto done;
1414 }
1415
d7a5a11d
MH
1416 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1417 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1418 /* Check for rfkill but allow the HCI setup stage to
1419 * proceed (which in itself doesn't cause any RF activity).
1420 */
d7a5a11d 1421 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1422 ret = -ERFKILL;
1423 goto done;
1424 }
1425
1426 /* Check for valid public address or a configured static
1427 * random adddress, but let the HCI setup proceed to
1428 * be able to determine if there is a public address
1429 * or not.
1430 *
c6beca0e
MH
1431 * In case of user channel usage, it is not important
1432 * if a public address or static random address is
1433 * available.
1434 *
a5c8f270
MH
1435 * This check is only valid for BR/EDR controllers
1436 * since AMP controllers do not have an address.
1437 */
d7a5a11d 1438 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1439 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1440 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1441 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1442 ret = -EADDRNOTAVAIL;
1443 goto done;
1444 }
611b30f7
MH
1445 }
1446
1da177e4
LT
1447 if (test_bit(HCI_UP, &hdev->flags)) {
1448 ret = -EALREADY;
1449 goto done;
1450 }
1451
1da177e4
LT
1452 if (hdev->open(hdev)) {
1453 ret = -EIO;
1454 goto done;
1455 }
1456
e9ca8bf1 1457 set_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1458 hci_notify(hdev, HCI_DEV_OPEN);
1459
f41c70c4
MH
1460 atomic_set(&hdev->cmd_cnt, 1);
1461 set_bit(HCI_INIT, &hdev->flags);
1462
d7a5a11d 1463 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
e131d74a
MH
1464 hci_sock_dev_event(hdev, HCI_DEV_SETUP);
1465
af202f84
MH
1466 if (hdev->setup)
1467 ret = hdev->setup(hdev);
f41c70c4 1468
af202f84
MH
1469 /* The transport driver can set these quirks before
1470 * creating the HCI device or in its setup callback.
1471 *
1472 * In case any of them is set, the controller has to
1473 * start up as unconfigured.
1474 */
eb1904f4
MH
1475 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1476 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1477 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1478
0ebca7d6
MH
1479 /* For an unconfigured controller it is required to
1480 * read at least the version information provided by
1481 * the Read Local Version Information command.
1482 *
1483 * If the set_bdaddr driver callback is provided, then
1484 * also the original Bluetooth public device address
1485 * will be read using the Read BD Address command.
1486 */
d7a5a11d 1487 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1488 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1489 }
1490
d7a5a11d 1491 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1492 /* If public address change is configured, ensure that
1493 * the address gets programmed. If the driver does not
1494 * support changing the public address, fail the power
1495 * on procedure.
1496 */
1497 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1498 hdev->set_bdaddr)
24c457e2
MH
1499 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1500 else
1501 ret = -EADDRNOTAVAIL;
1502 }
1503
f41c70c4 1504 if (!ret) {
d7a5a11d
MH
1505 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1506 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1507 ret = __hci_init(hdev);
1da177e4
LT
1508 }
1509
7e995b9e
MH
1510 /* If the HCI Reset command is clearing all diagnostic settings,
1511 * then they need to be reprogrammed after the init procedure
1512 * completed.
1513 */
1514 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
1515 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
1516 ret = hdev->set_diag(hdev, true);
1517
f41c70c4
MH
1518 clear_bit(HCI_INIT, &hdev->flags);
1519
1da177e4
LT
1520 if (!ret) {
1521 hci_dev_hold(hdev);
a1536da2 1522 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1523 set_bit(HCI_UP, &hdev->flags);
1524 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1525 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1526 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1527 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1528 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1529 hdev->dev_type == HCI_BREDR) {
09fd0de5 1530 hci_dev_lock(hdev);
744cf19e 1531 mgmt_powered(hdev, 1);
09fd0de5 1532 hci_dev_unlock(hdev);
56e5cb86 1533 }
8e87d142 1534 } else {
1da177e4 1535 /* Init failed, cleanup */
3eff45ea 1536 flush_work(&hdev->tx_work);
c347b765 1537 flush_work(&hdev->cmd_work);
b78752cc 1538 flush_work(&hdev->rx_work);
1da177e4
LT
1539
1540 skb_queue_purge(&hdev->cmd_q);
1541 skb_queue_purge(&hdev->rx_q);
1542
1543 if (hdev->flush)
1544 hdev->flush(hdev);
1545
1546 if (hdev->sent_cmd) {
1547 kfree_skb(hdev->sent_cmd);
1548 hdev->sent_cmd = NULL;
1549 }
1550
e9ca8bf1 1551 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1552 hci_notify(hdev, HCI_DEV_CLOSE);
1553
1da177e4 1554 hdev->close(hdev);
fee746b0 1555 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1556 }
1557
1558done:
1559 hci_req_unlock(hdev);
1da177e4
LT
1560 return ret;
1561}
1562
cbed0ca1
JH
1563/* ---- HCI ioctl helpers ---- */
1564
1565int hci_dev_open(__u16 dev)
1566{
1567 struct hci_dev *hdev;
1568 int err;
1569
1570 hdev = hci_dev_get(dev);
1571 if (!hdev)
1572 return -ENODEV;
1573
4a964404 1574 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1575 * up as user channel. Trying to bring them up as normal devices
1576 * will result into a failure. Only user channel operation is
1577 * possible.
1578 *
1579 * When this function is called for a user channel, the flag
1580 * HCI_USER_CHANNEL will be set first before attempting to
1581 * open the device.
1582 */
d7a5a11d
MH
1583 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1584 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1585 err = -EOPNOTSUPP;
1586 goto done;
1587 }
1588
e1d08f40
JH
1589 /* We need to ensure that no other power on/off work is pending
1590 * before proceeding to call hci_dev_do_open. This is
1591 * particularly important if the setup procedure has not yet
1592 * completed.
1593 */
a69d8927 1594 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1595 cancel_delayed_work(&hdev->power_off);
1596
a5c8f270
MH
1597 /* After this call it is guaranteed that the setup procedure
1598 * has finished. This means that error conditions like RFKILL
1599 * or no valid public or static random address apply.
1600 */
e1d08f40
JH
1601 flush_workqueue(hdev->req_workqueue);
1602
12aa4f0a 1603 /* For controllers not using the management interface and that
b6ae8457 1604 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1605 * so that pairing works for them. Once the management interface
1606 * is in use this bit will be cleared again and userspace has
1607 * to explicitly enable it.
1608 */
d7a5a11d
MH
1609 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1610 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1611 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1612
cbed0ca1
JH
1613 err = hci_dev_do_open(hdev);
1614
fee746b0 1615done:
cbed0ca1 1616 hci_dev_put(hdev);
cbed0ca1
JH
1617 return err;
1618}
1619
d7347f3c
JH
1620/* This function requires the caller holds hdev->lock */
1621static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1622{
1623 struct hci_conn_params *p;
1624
f161dd41
JH
1625 list_for_each_entry(p, &hdev->le_conn_params, list) {
1626 if (p->conn) {
1627 hci_conn_drop(p->conn);
f8aaf9b6 1628 hci_conn_put(p->conn);
f161dd41
JH
1629 p->conn = NULL;
1630 }
d7347f3c 1631 list_del_init(&p->action);
f161dd41 1632 }
d7347f3c
JH
1633
1634 BT_DBG("All LE pending actions cleared");
1635}
1636
6b3cc1db 1637int hci_dev_do_close(struct hci_dev *hdev)
1da177e4 1638{
acc649c6
MH
1639 bool auto_off;
1640
1da177e4
LT
1641 BT_DBG("%s %p", hdev->name, hdev);
1642
d24d8144 1643 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1644 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1645 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1646 /* Execute vendor specific shutdown routine */
1647 if (hdev->shutdown)
1648 hdev->shutdown(hdev);
1649 }
1650
78c04c0b
VCG
1651 cancel_delayed_work(&hdev->power_off);
1652
1da177e4
LT
1653 hci_req_cancel(hdev, ENODEV);
1654 hci_req_lock(hdev);
1655
1656 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1657 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1658 hci_req_unlock(hdev);
1659 return 0;
1660 }
1661
3eff45ea
GP
1662 /* Flush RX and TX works */
1663 flush_work(&hdev->tx_work);
b78752cc 1664 flush_work(&hdev->rx_work);
1da177e4 1665
16ab91ab 1666 if (hdev->discov_timeout > 0) {
e0f9309f 1667 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1668 hdev->discov_timeout = 0;
a358dc11
MH
1669 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1670 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1671 }
1672
a69d8927 1673 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1674 cancel_delayed_work(&hdev->service_cache);
1675
7ba8b4be 1676 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1677 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1678
d7a5a11d 1679 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1680 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1681
5d900e46
FG
1682 if (hdev->adv_instance_timeout) {
1683 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1684 hdev->adv_instance_timeout = 0;
1685 }
1686
76727c02
JH
1687 /* Avoid potential lockdep warnings from the *_flush() calls by
1688 * ensuring the workqueue is empty up front.
1689 */
1690 drain_workqueue(hdev->workqueue);
1691
09fd0de5 1692 hci_dev_lock(hdev);
1aeb9c65 1693
8f502f84
JH
1694 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1695
acc649c6
MH
1696 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
1697
1698 if (!auto_off && hdev->dev_type == HCI_BREDR)
1699 mgmt_powered(hdev, 0);
1aeb9c65 1700
1f9b9a5d 1701 hci_inquiry_cache_flush(hdev);
d7347f3c 1702 hci_pend_le_actions_clear(hdev);
f161dd41 1703 hci_conn_hash_flush(hdev);
09fd0de5 1704 hci_dev_unlock(hdev);
1da177e4 1705
64dae967
MH
1706 smp_unregister(hdev);
1707
1da177e4
LT
1708 hci_notify(hdev, HCI_DEV_DOWN);
1709
1710 if (hdev->flush)
1711 hdev->flush(hdev);
1712
1713 /* Reset device */
1714 skb_queue_purge(&hdev->cmd_q);
1715 atomic_set(&hdev->cmd_cnt, 1);
acc649c6
MH
1716 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
1717 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4 1718 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1719 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1720 clear_bit(HCI_INIT, &hdev->flags);
1721 }
1722
c347b765
GP
1723 /* flush cmd work */
1724 flush_work(&hdev->cmd_work);
1da177e4
LT
1725
1726 /* Drop queues */
1727 skb_queue_purge(&hdev->rx_q);
1728 skb_queue_purge(&hdev->cmd_q);
1729 skb_queue_purge(&hdev->raw_q);
1730
1731 /* Drop last sent command */
1732 if (hdev->sent_cmd) {
65cc2b49 1733 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1734 kfree_skb(hdev->sent_cmd);
1735 hdev->sent_cmd = NULL;
1736 }
1737
e9ca8bf1 1738 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1739 hci_notify(hdev, HCI_DEV_CLOSE);
1740
1da177e4
LT
1741 /* After this point our queues are empty
1742 * and no tasks are scheduled. */
1743 hdev->close(hdev);
1744
35b973c9 1745 /* Clear flags */
fee746b0 1746 hdev->flags &= BIT(HCI_RAW);
eacb44df 1747 hci_dev_clear_volatile_flags(hdev);
35b973c9 1748
ced5c338 1749 /* Controller radio is available but is currently powered down */
536619e8 1750 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1751
e59fda8d 1752 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1753 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1754 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1755
1da177e4
LT
1756 hci_req_unlock(hdev);
1757
1758 hci_dev_put(hdev);
1759 return 0;
1760}
1761
1762int hci_dev_close(__u16 dev)
1763{
1764 struct hci_dev *hdev;
1765 int err;
1766
70f23020
AE
1767 hdev = hci_dev_get(dev);
1768 if (!hdev)
1da177e4 1769 return -ENODEV;
8ee56540 1770
d7a5a11d 1771 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1772 err = -EBUSY;
1773 goto done;
1774 }
1775
a69d8927 1776 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1777 cancel_delayed_work(&hdev->power_off);
1778
1da177e4 1779 err = hci_dev_do_close(hdev);
8ee56540 1780
0736cfa8 1781done:
1da177e4
LT
1782 hci_dev_put(hdev);
1783 return err;
1784}
1785
5c912495 1786static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1787{
5c912495 1788 int ret;
1da177e4 1789
5c912495 1790 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1791
1792 hci_req_lock(hdev);
1da177e4 1793
1da177e4
LT
1794 /* Drop queues */
1795 skb_queue_purge(&hdev->rx_q);
1796 skb_queue_purge(&hdev->cmd_q);
1797
76727c02
JH
1798 /* Avoid potential lockdep warnings from the *_flush() calls by
1799 * ensuring the workqueue is empty up front.
1800 */
1801 drain_workqueue(hdev->workqueue);
1802
09fd0de5 1803 hci_dev_lock(hdev);
1f9b9a5d 1804 hci_inquiry_cache_flush(hdev);
1da177e4 1805 hci_conn_hash_flush(hdev);
09fd0de5 1806 hci_dev_unlock(hdev);
1da177e4
LT
1807
1808 if (hdev->flush)
1809 hdev->flush(hdev);
1810
8e87d142 1811 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1812 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1813
fee746b0 1814 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1815
1da177e4 1816 hci_req_unlock(hdev);
1da177e4
LT
1817 return ret;
1818}
1819
5c912495
MH
1820int hci_dev_reset(__u16 dev)
1821{
1822 struct hci_dev *hdev;
1823 int err;
1824
1825 hdev = hci_dev_get(dev);
1826 if (!hdev)
1827 return -ENODEV;
1828
1829 if (!test_bit(HCI_UP, &hdev->flags)) {
1830 err = -ENETDOWN;
1831 goto done;
1832 }
1833
d7a5a11d 1834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1835 err = -EBUSY;
1836 goto done;
1837 }
1838
d7a5a11d 1839 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1840 err = -EOPNOTSUPP;
1841 goto done;
1842 }
1843
1844 err = hci_dev_do_reset(hdev);
1845
1846done:
1847 hci_dev_put(hdev);
1848 return err;
1849}
1850
1da177e4
LT
1851int hci_dev_reset_stat(__u16 dev)
1852{
1853 struct hci_dev *hdev;
1854 int ret = 0;
1855
70f23020
AE
1856 hdev = hci_dev_get(dev);
1857 if (!hdev)
1da177e4
LT
1858 return -ENODEV;
1859
d7a5a11d 1860 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1861 ret = -EBUSY;
1862 goto done;
1863 }
1864
d7a5a11d 1865 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1866 ret = -EOPNOTSUPP;
1867 goto done;
1868 }
1869
1da177e4
LT
1870 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1871
0736cfa8 1872done:
1da177e4 1873 hci_dev_put(hdev);
1da177e4
LT
1874 return ret;
1875}
1876
123abc08
JH
1877static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1878{
bc6d2d04 1879 bool conn_changed, discov_changed;
123abc08
JH
1880
1881 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1882
1883 if ((scan & SCAN_PAGE))
238be788
MH
1884 conn_changed = !hci_dev_test_and_set_flag(hdev,
1885 HCI_CONNECTABLE);
123abc08 1886 else
a69d8927
MH
1887 conn_changed = hci_dev_test_and_clear_flag(hdev,
1888 HCI_CONNECTABLE);
123abc08 1889
bc6d2d04 1890 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1891 discov_changed = !hci_dev_test_and_set_flag(hdev,
1892 HCI_DISCOVERABLE);
bc6d2d04 1893 } else {
a358dc11 1894 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1895 discov_changed = hci_dev_test_and_clear_flag(hdev,
1896 HCI_DISCOVERABLE);
bc6d2d04
JH
1897 }
1898
d7a5a11d 1899 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1900 return;
1901
bc6d2d04
JH
1902 if (conn_changed || discov_changed) {
1903 /* In case this was disabled through mgmt */
a1536da2 1904 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1905
d7a5a11d 1906 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1907 mgmt_update_adv_data(hdev);
1908
123abc08 1909 mgmt_new_settings(hdev);
bc6d2d04 1910 }
123abc08
JH
1911}
1912
1da177e4
LT
1913int hci_dev_cmd(unsigned int cmd, void __user *arg)
1914{
1915 struct hci_dev *hdev;
1916 struct hci_dev_req dr;
1917 int err = 0;
1918
1919 if (copy_from_user(&dr, arg, sizeof(dr)))
1920 return -EFAULT;
1921
70f23020
AE
1922 hdev = hci_dev_get(dr.dev_id);
1923 if (!hdev)
1da177e4
LT
1924 return -ENODEV;
1925
d7a5a11d 1926 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1927 err = -EBUSY;
1928 goto done;
1929 }
1930
d7a5a11d 1931 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1932 err = -EOPNOTSUPP;
1933 goto done;
1934 }
1935
5b69bef5
MH
1936 if (hdev->dev_type != HCI_BREDR) {
1937 err = -EOPNOTSUPP;
1938 goto done;
1939 }
1940
d7a5a11d 1941 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1942 err = -EOPNOTSUPP;
1943 goto done;
1944 }
1945
1da177e4
LT
1946 switch (cmd) {
1947 case HCISETAUTH:
01178cd4
JH
1948 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1949 HCI_INIT_TIMEOUT);
1da177e4
LT
1950 break;
1951
1952 case HCISETENCRYPT:
1953 if (!lmp_encrypt_capable(hdev)) {
1954 err = -EOPNOTSUPP;
1955 break;
1956 }
1957
1958 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1959 /* Auth must be enabled first */
01178cd4
JH
1960 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1961 HCI_INIT_TIMEOUT);
1da177e4
LT
1962 if (err)
1963 break;
1964 }
1965
01178cd4
JH
1966 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1967 HCI_INIT_TIMEOUT);
1da177e4
LT
1968 break;
1969
1970 case HCISETSCAN:
01178cd4
JH
1971 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1972 HCI_INIT_TIMEOUT);
91a668b0 1973
bc6d2d04
JH
1974 /* Ensure that the connectable and discoverable states
1975 * get correctly modified as this was a non-mgmt change.
91a668b0 1976 */
123abc08
JH
1977 if (!err)
1978 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1979 break;
1980
1da177e4 1981 case HCISETLINKPOL:
01178cd4
JH
1982 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1983 HCI_INIT_TIMEOUT);
1da177e4
LT
1984 break;
1985
1986 case HCISETLINKMODE:
e4e8e37c
MH
1987 hdev->link_mode = ((__u16) dr.dev_opt) &
1988 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1989 break;
1990
1991 case HCISETPTYPE:
1992 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1993 break;
1994
1995 case HCISETACLMTU:
e4e8e37c
MH
1996 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1997 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1998 break;
1999
2000 case HCISETSCOMTU:
e4e8e37c
MH
2001 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2002 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2003 break;
2004
2005 default:
2006 err = -EINVAL;
2007 break;
2008 }
e4e8e37c 2009
0736cfa8 2010done:
1da177e4
LT
2011 hci_dev_put(hdev);
2012 return err;
2013}
2014
2015int hci_get_dev_list(void __user *arg)
2016{
8035ded4 2017 struct hci_dev *hdev;
1da177e4
LT
2018 struct hci_dev_list_req *dl;
2019 struct hci_dev_req *dr;
1da177e4
LT
2020 int n = 0, size, err;
2021 __u16 dev_num;
2022
2023 if (get_user(dev_num, (__u16 __user *) arg))
2024 return -EFAULT;
2025
2026 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2027 return -EINVAL;
2028
2029 size = sizeof(*dl) + dev_num * sizeof(*dr);
2030
70f23020
AE
2031 dl = kzalloc(size, GFP_KERNEL);
2032 if (!dl)
1da177e4
LT
2033 return -ENOMEM;
2034
2035 dr = dl->dev_req;
2036
f20d09d5 2037 read_lock(&hci_dev_list_lock);
8035ded4 2038 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2039 unsigned long flags = hdev->flags;
c542a06c 2040
2e84d8db
MH
2041 /* When the auto-off is configured it means the transport
2042 * is running, but in that case still indicate that the
2043 * device is actually down.
2044 */
d7a5a11d 2045 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 2046 flags &= ~BIT(HCI_UP);
c542a06c 2047
1da177e4 2048 (dr + n)->dev_id = hdev->id;
2e84d8db 2049 (dr + n)->dev_opt = flags;
c542a06c 2050
1da177e4
LT
2051 if (++n >= dev_num)
2052 break;
2053 }
f20d09d5 2054 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2055
2056 dl->dev_num = n;
2057 size = sizeof(*dl) + n * sizeof(*dr);
2058
2059 err = copy_to_user(arg, dl, size);
2060 kfree(dl);
2061
2062 return err ? -EFAULT : 0;
2063}
2064
2065int hci_get_dev_info(void __user *arg)
2066{
2067 struct hci_dev *hdev;
2068 struct hci_dev_info di;
2e84d8db 2069 unsigned long flags;
1da177e4
LT
2070 int err = 0;
2071
2072 if (copy_from_user(&di, arg, sizeof(di)))
2073 return -EFAULT;
2074
70f23020
AE
2075 hdev = hci_dev_get(di.dev_id);
2076 if (!hdev)
1da177e4
LT
2077 return -ENODEV;
2078
2e84d8db
MH
2079 /* When the auto-off is configured it means the transport
2080 * is running, but in that case still indicate that the
2081 * device is actually down.
2082 */
d7a5a11d 2083 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2084 flags = hdev->flags & ~BIT(HCI_UP);
2085 else
2086 flags = hdev->flags;
c542a06c 2087
1da177e4
LT
2088 strcpy(di.name, hdev->name);
2089 di.bdaddr = hdev->bdaddr;
60f2a3ed 2090 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2091 di.flags = flags;
1da177e4 2092 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2093 if (lmp_bredr_capable(hdev)) {
2094 di.acl_mtu = hdev->acl_mtu;
2095 di.acl_pkts = hdev->acl_pkts;
2096 di.sco_mtu = hdev->sco_mtu;
2097 di.sco_pkts = hdev->sco_pkts;
2098 } else {
2099 di.acl_mtu = hdev->le_mtu;
2100 di.acl_pkts = hdev->le_pkts;
2101 di.sco_mtu = 0;
2102 di.sco_pkts = 0;
2103 }
1da177e4
LT
2104 di.link_policy = hdev->link_policy;
2105 di.link_mode = hdev->link_mode;
2106
2107 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2108 memcpy(&di.features, &hdev->features, sizeof(di.features));
2109
2110 if (copy_to_user(arg, &di, sizeof(di)))
2111 err = -EFAULT;
2112
2113 hci_dev_put(hdev);
2114
2115 return err;
2116}
2117
2118/* ---- Interface to HCI drivers ---- */
2119
611b30f7
MH
2120static int hci_rfkill_set_block(void *data, bool blocked)
2121{
2122 struct hci_dev *hdev = data;
2123
2124 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2125
d7a5a11d 2126 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2127 return -EBUSY;
2128
5e130367 2129 if (blocked) {
a1536da2 2130 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2131 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2132 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2133 hci_dev_do_close(hdev);
5e130367 2134 } else {
a358dc11 2135 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2136 }
611b30f7
MH
2137
2138 return 0;
2139}
2140
2141static const struct rfkill_ops hci_rfkill_ops = {
2142 .set_block = hci_rfkill_set_block,
2143};
2144
ab81cbf9
JH
2145static void hci_power_on(struct work_struct *work)
2146{
2147 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2148 int err;
ab81cbf9
JH
2149
2150 BT_DBG("%s", hdev->name);
2151
cbed0ca1 2152 err = hci_dev_do_open(hdev);
96570ffc 2153 if (err < 0) {
3ad67582 2154 hci_dev_lock(hdev);
96570ffc 2155 mgmt_set_powered_failed(hdev, err);
3ad67582 2156 hci_dev_unlock(hdev);
ab81cbf9 2157 return;
96570ffc 2158 }
ab81cbf9 2159
a5c8f270
MH
2160 /* During the HCI setup phase, a few error conditions are
2161 * ignored and they need to be checked now. If they are still
2162 * valid, it is important to turn the device back off.
2163 */
d7a5a11d
MH
2164 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2165 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2166 (hdev->dev_type == HCI_BREDR &&
2167 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2168 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2169 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2170 hci_dev_do_close(hdev);
d7a5a11d 2171 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2172 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2173 HCI_AUTO_OFF_TIMEOUT);
bf543036 2174 }
ab81cbf9 2175
a69d8927 2176 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2177 /* For unconfigured devices, set the HCI_RAW flag
2178 * so that userspace can easily identify them.
4a964404 2179 */
d7a5a11d 2180 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2181 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2182
2183 /* For fully configured devices, this will send
2184 * the Index Added event. For unconfigured devices,
2185 * it will send Unconfigued Index Added event.
2186 *
2187 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2188 * and no event will be send.
2189 */
2190 mgmt_index_added(hdev);
a69d8927 2191 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2192 /* When the controller is now configured, then it
2193 * is important to clear the HCI_RAW flag.
2194 */
d7a5a11d 2195 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2196 clear_bit(HCI_RAW, &hdev->flags);
2197
d603b76b
MH
2198 /* Powering on the controller with HCI_CONFIG set only
2199 * happens with the transition from unconfigured to
2200 * configured. This will send the Index Added event.
2201 */
744cf19e 2202 mgmt_index_added(hdev);
fee746b0 2203 }
ab81cbf9
JH
2204}
2205
2206static void hci_power_off(struct work_struct *work)
2207{
3243553f 2208 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2209 power_off.work);
ab81cbf9
JH
2210
2211 BT_DBG("%s", hdev->name);
2212
8ee56540 2213 hci_dev_do_close(hdev);
ab81cbf9
JH
2214}
2215
c7741d16
MH
2216static void hci_error_reset(struct work_struct *work)
2217{
2218 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2219
2220 BT_DBG("%s", hdev->name);
2221
2222 if (hdev->hw_error)
2223 hdev->hw_error(hdev, hdev->hw_error_code);
2224 else
2225 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2226 hdev->hw_error_code);
2227
2228 if (hci_dev_do_close(hdev))
2229 return;
2230
c7741d16
MH
2231 hci_dev_do_open(hdev);
2232}
2233
16ab91ab
JH
2234static void hci_discov_off(struct work_struct *work)
2235{
2236 struct hci_dev *hdev;
16ab91ab
JH
2237
2238 hdev = container_of(work, struct hci_dev, discov_off.work);
2239
2240 BT_DBG("%s", hdev->name);
2241
d1967ff8 2242 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2243}
2244
5d900e46
FG
2245static void hci_adv_timeout_expire(struct work_struct *work)
2246{
2247 struct hci_dev *hdev;
2248
2249 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2250
2251 BT_DBG("%s", hdev->name);
2252
2253 mgmt_adv_timeout_expired(hdev);
2254}
2255
35f7498a 2256void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2257{
4821002c 2258 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2259
4821002c
JH
2260 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2261 list_del(&uuid->list);
2aeb9a1a
JH
2262 kfree(uuid);
2263 }
2aeb9a1a
JH
2264}
2265
35f7498a 2266void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2267{
0378b597 2268 struct link_key *key;
55ed8ca1 2269
0378b597
JH
2270 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2271 list_del_rcu(&key->list);
2272 kfree_rcu(key, rcu);
55ed8ca1 2273 }
55ed8ca1
JH
2274}
2275
35f7498a 2276void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2277{
970d0f1b 2278 struct smp_ltk *k;
b899efaf 2279
970d0f1b
JH
2280 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2281 list_del_rcu(&k->list);
2282 kfree_rcu(k, rcu);
b899efaf 2283 }
b899efaf
VCG
2284}
2285
970c4e46
JH
2286void hci_smp_irks_clear(struct hci_dev *hdev)
2287{
adae20cb 2288 struct smp_irk *k;
970c4e46 2289
adae20cb
JH
2290 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2291 list_del_rcu(&k->list);
2292 kfree_rcu(k, rcu);
970c4e46
JH
2293 }
2294}
2295
55ed8ca1
JH
2296struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2297{
8035ded4 2298 struct link_key *k;
55ed8ca1 2299
0378b597
JH
2300 rcu_read_lock();
2301 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2302 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2303 rcu_read_unlock();
55ed8ca1 2304 return k;
0378b597
JH
2305 }
2306 }
2307 rcu_read_unlock();
55ed8ca1
JH
2308
2309 return NULL;
2310}
2311
745c0ce3 2312static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2313 u8 key_type, u8 old_key_type)
d25e28ab
JH
2314{
2315 /* Legacy key */
2316 if (key_type < 0x03)
745c0ce3 2317 return true;
d25e28ab
JH
2318
2319 /* Debug keys are insecure so don't store them persistently */
2320 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2321 return false;
d25e28ab
JH
2322
2323 /* Changed combination key and there's no previous one */
2324 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2325 return false;
d25e28ab
JH
2326
2327 /* Security mode 3 case */
2328 if (!conn)
745c0ce3 2329 return true;
d25e28ab 2330
e3befab9
JH
2331 /* BR/EDR key derived using SC from an LE link */
2332 if (conn->type == LE_LINK)
2333 return true;
2334
d25e28ab
JH
2335 /* Neither local nor remote side had no-bonding as requirement */
2336 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2337 return true;
d25e28ab
JH
2338
2339 /* Local side had dedicated bonding as requirement */
2340 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2341 return true;
d25e28ab
JH
2342
2343 /* Remote side had dedicated bonding as requirement */
2344 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2345 return true;
d25e28ab
JH
2346
2347 /* If none of the above criteria match, then don't store the key
2348 * persistently */
745c0ce3 2349 return false;
d25e28ab
JH
2350}
2351
e804d25d 2352static u8 ltk_role(u8 type)
98a0b845 2353{
e804d25d
JH
2354 if (type == SMP_LTK)
2355 return HCI_ROLE_MASTER;
98a0b845 2356
e804d25d 2357 return HCI_ROLE_SLAVE;
98a0b845
JH
2358}
2359
f3a73d97
JH
2360struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2361 u8 addr_type, u8 role)
75d262c2 2362{
c9839a11 2363 struct smp_ltk *k;
75d262c2 2364
970d0f1b
JH
2365 rcu_read_lock();
2366 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2367 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2368 continue;
2369
923e2414 2370 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2371 rcu_read_unlock();
75d262c2 2372 return k;
970d0f1b
JH
2373 }
2374 }
2375 rcu_read_unlock();
75d262c2
VCG
2376
2377 return NULL;
2378}
75d262c2 2379
970c4e46
JH
2380struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2381{
2382 struct smp_irk *irk;
2383
adae20cb
JH
2384 rcu_read_lock();
2385 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2386 if (!bacmp(&irk->rpa, rpa)) {
2387 rcu_read_unlock();
970c4e46 2388 return irk;
adae20cb 2389 }
970c4e46
JH
2390 }
2391
adae20cb 2392 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2393 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2394 bacpy(&irk->rpa, rpa);
adae20cb 2395 rcu_read_unlock();
970c4e46
JH
2396 return irk;
2397 }
2398 }
adae20cb 2399 rcu_read_unlock();
970c4e46
JH
2400
2401 return NULL;
2402}
2403
2404struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2405 u8 addr_type)
2406{
2407 struct smp_irk *irk;
2408
6cfc9988
JH
2409 /* Identity Address must be public or static random */
2410 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2411 return NULL;
2412
adae20cb
JH
2413 rcu_read_lock();
2414 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2415 if (addr_type == irk->addr_type &&
adae20cb
JH
2416 bacmp(bdaddr, &irk->bdaddr) == 0) {
2417 rcu_read_unlock();
970c4e46 2418 return irk;
adae20cb 2419 }
970c4e46 2420 }
adae20cb 2421 rcu_read_unlock();
970c4e46
JH
2422
2423 return NULL;
2424}
2425
567fa2aa 2426struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2427 bdaddr_t *bdaddr, u8 *val, u8 type,
2428 u8 pin_len, bool *persistent)
55ed8ca1
JH
2429{
2430 struct link_key *key, *old_key;
745c0ce3 2431 u8 old_key_type;
55ed8ca1
JH
2432
2433 old_key = hci_find_link_key(hdev, bdaddr);
2434 if (old_key) {
2435 old_key_type = old_key->type;
2436 key = old_key;
2437 } else {
12adcf3a 2438 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2439 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2440 if (!key)
567fa2aa 2441 return NULL;
0378b597 2442 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2443 }
2444
6ed93dc6 2445 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2446
d25e28ab
JH
2447 /* Some buggy controller combinations generate a changed
2448 * combination key for legacy pairing even when there's no
2449 * previous key */
2450 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2451 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2452 type = HCI_LK_COMBINATION;
655fe6ec
JH
2453 if (conn)
2454 conn->key_type = type;
2455 }
d25e28ab 2456
55ed8ca1 2457 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2458 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2459 key->pin_len = pin_len;
2460
b6020ba0 2461 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2462 key->type = old_key_type;
4748fed2
JH
2463 else
2464 key->type = type;
2465
7652ff6a
JH
2466 if (persistent)
2467 *persistent = hci_persistent_key(hdev, conn, type,
2468 old_key_type);
4df378a1 2469
567fa2aa 2470 return key;
55ed8ca1
JH
2471}
2472
ca9142b8 2473struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2474 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2475 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2476{
c9839a11 2477 struct smp_ltk *key, *old_key;
e804d25d 2478 u8 role = ltk_role(type);
75d262c2 2479
f3a73d97 2480 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2481 if (old_key)
75d262c2 2482 key = old_key;
c9839a11 2483 else {
0a14ab41 2484 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2485 if (!key)
ca9142b8 2486 return NULL;
970d0f1b 2487 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2488 }
2489
75d262c2 2490 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2491 key->bdaddr_type = addr_type;
2492 memcpy(key->val, tk, sizeof(key->val));
2493 key->authenticated = authenticated;
2494 key->ediv = ediv;
fe39c7b2 2495 key->rand = rand;
c9839a11
VCG
2496 key->enc_size = enc_size;
2497 key->type = type;
75d262c2 2498
ca9142b8 2499 return key;
75d262c2
VCG
2500}
2501
ca9142b8
JH
2502struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2503 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2504{
2505 struct smp_irk *irk;
2506
2507 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2508 if (!irk) {
2509 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2510 if (!irk)
ca9142b8 2511 return NULL;
970c4e46
JH
2512
2513 bacpy(&irk->bdaddr, bdaddr);
2514 irk->addr_type = addr_type;
2515
adae20cb 2516 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2517 }
2518
2519 memcpy(irk->val, val, 16);
2520 bacpy(&irk->rpa, rpa);
2521
ca9142b8 2522 return irk;
970c4e46
JH
2523}
2524
55ed8ca1
JH
2525int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2526{
2527 struct link_key *key;
2528
2529 key = hci_find_link_key(hdev, bdaddr);
2530 if (!key)
2531 return -ENOENT;
2532
6ed93dc6 2533 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2534
0378b597
JH
2535 list_del_rcu(&key->list);
2536 kfree_rcu(key, rcu);
55ed8ca1
JH
2537
2538 return 0;
2539}
2540
e0b2b27e 2541int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2542{
970d0f1b 2543 struct smp_ltk *k;
c51ffa0b 2544 int removed = 0;
b899efaf 2545
970d0f1b 2546 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2547 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2548 continue;
2549
6ed93dc6 2550 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2551
970d0f1b
JH
2552 list_del_rcu(&k->list);
2553 kfree_rcu(k, rcu);
c51ffa0b 2554 removed++;
b899efaf
VCG
2555 }
2556
c51ffa0b 2557 return removed ? 0 : -ENOENT;
b899efaf
VCG
2558}
2559
a7ec7338
JH
2560void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2561{
adae20cb 2562 struct smp_irk *k;
a7ec7338 2563
adae20cb 2564 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2565 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2566 continue;
2567
2568 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2569
adae20cb
JH
2570 list_del_rcu(&k->list);
2571 kfree_rcu(k, rcu);
a7ec7338
JH
2572 }
2573}
2574
55e76b38
JH
2575bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2576{
2577 struct smp_ltk *k;
4ba9faf3 2578 struct smp_irk *irk;
55e76b38
JH
2579 u8 addr_type;
2580
2581 if (type == BDADDR_BREDR) {
2582 if (hci_find_link_key(hdev, bdaddr))
2583 return true;
2584 return false;
2585 }
2586
2587 /* Convert to HCI addr type which struct smp_ltk uses */
2588 if (type == BDADDR_LE_PUBLIC)
2589 addr_type = ADDR_LE_DEV_PUBLIC;
2590 else
2591 addr_type = ADDR_LE_DEV_RANDOM;
2592
4ba9faf3
JH
2593 irk = hci_get_irk(hdev, bdaddr, addr_type);
2594 if (irk) {
2595 bdaddr = &irk->bdaddr;
2596 addr_type = irk->addr_type;
2597 }
2598
55e76b38
JH
2599 rcu_read_lock();
2600 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2601 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2602 rcu_read_unlock();
55e76b38 2603 return true;
87c8b28d 2604 }
55e76b38
JH
2605 }
2606 rcu_read_unlock();
2607
2608 return false;
2609}
2610
6bd32326 2611/* HCI command timer function */
65cc2b49 2612static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2613{
65cc2b49
MH
2614 struct hci_dev *hdev = container_of(work, struct hci_dev,
2615 cmd_timer.work);
6bd32326 2616
bda4f23a
AE
2617 if (hdev->sent_cmd) {
2618 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2619 u16 opcode = __le16_to_cpu(sent->opcode);
2620
2621 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2622 } else {
2623 BT_ERR("%s command tx timeout", hdev->name);
2624 }
2625
6bd32326 2626 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2627 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2628}
2629
2763eda6 2630struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2631 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2632{
2633 struct oob_data *data;
2634
6928a924
JH
2635 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2636 if (bacmp(bdaddr, &data->bdaddr) != 0)
2637 continue;
2638 if (data->bdaddr_type != bdaddr_type)
2639 continue;
2640 return data;
2641 }
2763eda6
SJ
2642
2643 return NULL;
2644}
2645
6928a924
JH
2646int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2647 u8 bdaddr_type)
2763eda6
SJ
2648{
2649 struct oob_data *data;
2650
6928a924 2651 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2652 if (!data)
2653 return -ENOENT;
2654
6928a924 2655 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2656
2657 list_del(&data->list);
2658 kfree(data);
2659
2660 return 0;
2661}
2662
35f7498a 2663void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2664{
2665 struct oob_data *data, *n;
2666
2667 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2668 list_del(&data->list);
2669 kfree(data);
2670 }
2763eda6
SJ
2671}
2672
0798872e 2673int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2674 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2675 u8 *hash256, u8 *rand256)
2763eda6
SJ
2676{
2677 struct oob_data *data;
2678
6928a924 2679 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2680 if (!data) {
0a14ab41 2681 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2682 if (!data)
2683 return -ENOMEM;
2684
2685 bacpy(&data->bdaddr, bdaddr);
6928a924 2686 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2687 list_add(&data->list, &hdev->remote_oob_data);
2688 }
2689
81328d5c
JH
2690 if (hash192 && rand192) {
2691 memcpy(data->hash192, hash192, sizeof(data->hash192));
2692 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2693 if (hash256 && rand256)
2694 data->present = 0x03;
81328d5c
JH
2695 } else {
2696 memset(data->hash192, 0, sizeof(data->hash192));
2697 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2698 if (hash256 && rand256)
2699 data->present = 0x02;
2700 else
2701 data->present = 0x00;
0798872e
MH
2702 }
2703
81328d5c
JH
2704 if (hash256 && rand256) {
2705 memcpy(data->hash256, hash256, sizeof(data->hash256));
2706 memcpy(data->rand256, rand256, sizeof(data->rand256));
2707 } else {
2708 memset(data->hash256, 0, sizeof(data->hash256));
2709 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2710 if (hash192 && rand192)
2711 data->present = 0x01;
81328d5c 2712 }
0798872e 2713
6ed93dc6 2714 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2715
2716 return 0;
2717}
2718
d2609b34
FG
2719/* This function requires the caller holds hdev->lock */
2720struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2721{
2722 struct adv_info *adv_instance;
2723
2724 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2725 if (adv_instance->instance == instance)
2726 return adv_instance;
2727 }
2728
2729 return NULL;
2730}
2731
2732/* This function requires the caller holds hdev->lock */
2733struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2734 struct adv_info *cur_instance;
2735
2736 cur_instance = hci_find_adv_instance(hdev, instance);
2737 if (!cur_instance)
2738 return NULL;
2739
2740 if (cur_instance == list_last_entry(&hdev->adv_instances,
2741 struct adv_info, list))
2742 return list_first_entry(&hdev->adv_instances,
2743 struct adv_info, list);
2744 else
2745 return list_next_entry(cur_instance, list);
2746}
2747
2748/* This function requires the caller holds hdev->lock */
2749int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2750{
2751 struct adv_info *adv_instance;
2752
2753 adv_instance = hci_find_adv_instance(hdev, instance);
2754 if (!adv_instance)
2755 return -ENOENT;
2756
2757 BT_DBG("%s removing %dMR", hdev->name, instance);
2758
5d900e46
FG
2759 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2760 cancel_delayed_work(&hdev->adv_instance_expire);
2761 hdev->adv_instance_timeout = 0;
2762 }
2763
d2609b34
FG
2764 list_del(&adv_instance->list);
2765 kfree(adv_instance);
2766
2767 hdev->adv_instance_cnt--;
2768
2769 return 0;
2770}
2771
2772/* This function requires the caller holds hdev->lock */
2773void hci_adv_instances_clear(struct hci_dev *hdev)
2774{
2775 struct adv_info *adv_instance, *n;
2776
5d900e46
FG
2777 if (hdev->adv_instance_timeout) {
2778 cancel_delayed_work(&hdev->adv_instance_expire);
2779 hdev->adv_instance_timeout = 0;
2780 }
2781
d2609b34
FG
2782 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2783 list_del(&adv_instance->list);
2784 kfree(adv_instance);
2785 }
2786
2787 hdev->adv_instance_cnt = 0;
2788}
2789
2790/* This function requires the caller holds hdev->lock */
2791int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2792 u16 adv_data_len, u8 *adv_data,
2793 u16 scan_rsp_len, u8 *scan_rsp_data,
2794 u16 timeout, u16 duration)
2795{
2796 struct adv_info *adv_instance;
2797
2798 adv_instance = hci_find_adv_instance(hdev, instance);
2799 if (adv_instance) {
2800 memset(adv_instance->adv_data, 0,
2801 sizeof(adv_instance->adv_data));
2802 memset(adv_instance->scan_rsp_data, 0,
2803 sizeof(adv_instance->scan_rsp_data));
2804 } else {
2805 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2806 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2807 return -EOVERFLOW;
2808
39ecfad6 2809 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2810 if (!adv_instance)
2811 return -ENOMEM;
2812
fffd38bc 2813 adv_instance->pending = true;
d2609b34
FG
2814 adv_instance->instance = instance;
2815 list_add(&adv_instance->list, &hdev->adv_instances);
2816 hdev->adv_instance_cnt++;
2817 }
2818
2819 adv_instance->flags = flags;
2820 adv_instance->adv_data_len = adv_data_len;
2821 adv_instance->scan_rsp_len = scan_rsp_len;
2822
2823 if (adv_data_len)
2824 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2825
2826 if (scan_rsp_len)
2827 memcpy(adv_instance->scan_rsp_data,
2828 scan_rsp_data, scan_rsp_len);
2829
2830 adv_instance->timeout = timeout;
5d900e46 2831 adv_instance->remaining_time = timeout;
d2609b34
FG
2832
2833 if (duration == 0)
2834 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2835 else
2836 adv_instance->duration = duration;
2837
2838 BT_DBG("%s for %dMR", hdev->name, instance);
2839
2840 return 0;
2841}
2842
dcc36c16 2843struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2844 bdaddr_t *bdaddr, u8 type)
b2a66aad 2845{
8035ded4 2846 struct bdaddr_list *b;
b2a66aad 2847
dcc36c16 2848 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2849 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2850 return b;
b9ee0a78 2851 }
b2a66aad
AJ
2852
2853 return NULL;
2854}
2855
dcc36c16 2856void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2857{
2858 struct list_head *p, *n;
2859
dcc36c16 2860 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2861 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2862
2863 list_del(p);
2864 kfree(b);
2865 }
b2a66aad
AJ
2866}
2867
dcc36c16 2868int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2869{
2870 struct bdaddr_list *entry;
b2a66aad 2871
b9ee0a78 2872 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2873 return -EBADF;
2874
dcc36c16 2875 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2876 return -EEXIST;
b2a66aad 2877
27f70f3e 2878 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2879 if (!entry)
2880 return -ENOMEM;
b2a66aad
AJ
2881
2882 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2883 entry->bdaddr_type = type;
b2a66aad 2884
dcc36c16 2885 list_add(&entry->list, list);
b2a66aad 2886
2a8357f2 2887 return 0;
b2a66aad
AJ
2888}
2889
dcc36c16 2890int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2891{
2892 struct bdaddr_list *entry;
b2a66aad 2893
35f7498a 2894 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2895 hci_bdaddr_list_clear(list);
35f7498a
JH
2896 return 0;
2897 }
b2a66aad 2898
dcc36c16 2899 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2900 if (!entry)
2901 return -ENOENT;
2902
2903 list_del(&entry->list);
2904 kfree(entry);
2905
2906 return 0;
2907}
2908
15819a70
AG
2909/* This function requires the caller holds hdev->lock */
2910struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2911 bdaddr_t *addr, u8 addr_type)
2912{
2913 struct hci_conn_params *params;
2914
2915 list_for_each_entry(params, &hdev->le_conn_params, list) {
2916 if (bacmp(&params->addr, addr) == 0 &&
2917 params->addr_type == addr_type) {
2918 return params;
2919 }
2920 }
2921
2922 return NULL;
2923}
2924
4b10966f 2925/* This function requires the caller holds hdev->lock */
501f8827
JH
2926struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2927 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2928{
912b42ef 2929 struct hci_conn_params *param;
a9b0a04c 2930
501f8827 2931 list_for_each_entry(param, list, action) {
912b42ef
JH
2932 if (bacmp(&param->addr, addr) == 0 &&
2933 param->addr_type == addr_type)
2934 return param;
4b10966f
MH
2935 }
2936
2937 return NULL;
a9b0a04c
AG
2938}
2939
f75113a2
JP
2940/* This function requires the caller holds hdev->lock */
2941struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2942 bdaddr_t *addr,
2943 u8 addr_type)
2944{
2945 struct hci_conn_params *param;
2946
2947 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2948 if (bacmp(&param->addr, addr) == 0 &&
2949 param->addr_type == addr_type &&
2950 param->explicit_connect)
2951 return param;
2952 }
2953
f75113a2
JP
2954 return NULL;
2955}
2956
15819a70 2957/* This function requires the caller holds hdev->lock */
51d167c0
MH
2958struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2959 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2960{
2961 struct hci_conn_params *params;
2962
2963 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2964 if (params)
51d167c0 2965 return params;
15819a70
AG
2966
2967 params = kzalloc(sizeof(*params), GFP_KERNEL);
2968 if (!params) {
2969 BT_ERR("Out of memory");
51d167c0 2970 return NULL;
15819a70
AG
2971 }
2972
2973 bacpy(&params->addr, addr);
2974 params->addr_type = addr_type;
cef952ce
AG
2975
2976 list_add(&params->list, &hdev->le_conn_params);
93450c75 2977 INIT_LIST_HEAD(&params->action);
cef952ce 2978
bf5b3c8b
MH
2979 params->conn_min_interval = hdev->le_conn_min_interval;
2980 params->conn_max_interval = hdev->le_conn_max_interval;
2981 params->conn_latency = hdev->le_conn_latency;
2982 params->supervision_timeout = hdev->le_supv_timeout;
2983 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2984
2985 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2986
51d167c0 2987 return params;
bf5b3c8b
MH
2988}
2989
f6c63249 2990static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2991{
f8aaf9b6 2992 if (params->conn) {
f161dd41 2993 hci_conn_drop(params->conn);
f8aaf9b6
JH
2994 hci_conn_put(params->conn);
2995 }
f161dd41 2996
95305baa 2997 list_del(&params->action);
15819a70
AG
2998 list_del(&params->list);
2999 kfree(params);
f6c63249
JH
3000}
3001
3002/* This function requires the caller holds hdev->lock */
3003void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3004{
3005 struct hci_conn_params *params;
3006
3007 params = hci_conn_params_lookup(hdev, addr, addr_type);
3008 if (!params)
3009 return;
3010
3011 hci_conn_params_free(params);
15819a70 3012
95305baa
JH
3013 hci_update_background_scan(hdev);
3014
15819a70
AG
3015 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3016}
3017
3018/* This function requires the caller holds hdev->lock */
55af49a8 3019void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3020{
3021 struct hci_conn_params *params, *tmp;
3022
3023 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3024 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3025 continue;
f75113a2
JP
3026
3027 /* If trying to estabilish one time connection to disabled
3028 * device, leave the params, but mark them as just once.
3029 */
3030 if (params->explicit_connect) {
3031 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
3032 continue;
3033 }
3034
15819a70
AG
3035 list_del(&params->list);
3036 kfree(params);
3037 }
3038
55af49a8 3039 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3040}
3041
3042/* This function requires the caller holds hdev->lock */
373110c5 3043void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3044{
15819a70 3045 struct hci_conn_params *params, *tmp;
77a77a30 3046
f6c63249
JH
3047 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3048 hci_conn_params_free(params);
77a77a30 3049
a4790dbd 3050 hci_update_background_scan(hdev);
77a77a30 3051
15819a70 3052 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3053}
3054
1904a853 3055static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 3056{
4c87eaab
AG
3057 if (status) {
3058 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3059
4c87eaab
AG
3060 hci_dev_lock(hdev);
3061 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3062 hci_dev_unlock(hdev);
3063 return;
3064 }
7ba8b4be
AG
3065}
3066
1904a853
MH
3067static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
3068 u16 opcode)
7ba8b4be 3069{
4c87eaab
AG
3070 /* General inquiry access code (GIAC) */
3071 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 3072 struct hci_cp_inquiry cp;
7ba8b4be
AG
3073 int err;
3074
4c87eaab
AG
3075 if (status) {
3076 BT_ERR("Failed to disable LE scanning: status %d", status);
3077 return;
3078 }
7ba8b4be 3079
2d28cfe7
JP
3080 hdev->discovery.scan_start = 0;
3081
4c87eaab
AG
3082 switch (hdev->discovery.type) {
3083 case DISCOV_TYPE_LE:
3084 hci_dev_lock(hdev);
3085 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3086 hci_dev_unlock(hdev);
3087 break;
7ba8b4be 3088
4c87eaab 3089 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3090 hci_dev_lock(hdev);
7dbfac1d 3091
07d2334a
JP
3092 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3093 &hdev->quirks)) {
3094 /* If we were running LE only scan, change discovery
3095 * state. If we were running both LE and BR/EDR inquiry
3096 * simultaneously, and BR/EDR inquiry is already
3097 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3098 * will stop discovery when finished. If we will resolve
3099 * remote device name, do not change discovery state.
07d2334a 3100 */
177d0506
WK
3101 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3102 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3103 hci_discovery_set_state(hdev,
3104 DISCOVERY_STOPPED);
3105 } else {
baf880a9
JH
3106 struct hci_request req;
3107
07d2334a
JP
3108 hci_inquiry_cache_flush(hdev);
3109
baf880a9
JH
3110 hci_req_init(&req, hdev);
3111
3112 memset(&cp, 0, sizeof(cp));
3113 memcpy(&cp.lap, lap, sizeof(cp.lap));
3114 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3115 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3116
07d2334a
JP
3117 err = hci_req_run(&req, inquiry_complete);
3118 if (err) {
3119 BT_ERR("Inquiry request failed: err %d", err);
3120 hci_discovery_set_state(hdev,
3121 DISCOVERY_STOPPED);
3122 }
4c87eaab 3123 }
7dbfac1d 3124
4c87eaab
AG
3125 hci_dev_unlock(hdev);
3126 break;
7dbfac1d 3127 }
7dbfac1d
AG
3128}
3129
7ba8b4be
AG
3130static void le_scan_disable_work(struct work_struct *work)
3131{
3132 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3133 le_scan_disable.work);
4c87eaab
AG
3134 struct hci_request req;
3135 int err;
7ba8b4be
AG
3136
3137 BT_DBG("%s", hdev->name);
3138
2d28cfe7
JP
3139 cancel_delayed_work_sync(&hdev->le_scan_restart);
3140
4c87eaab 3141 hci_req_init(&req, hdev);
28b75a89 3142
b1efcc28 3143 hci_req_add_le_scan_disable(&req);
28b75a89 3144
4c87eaab
AG
3145 err = hci_req_run(&req, le_scan_disable_work_complete);
3146 if (err)
3147 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3148}
3149
2d28cfe7
JP
3150static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3151 u16 opcode)
3152{
3153 unsigned long timeout, duration, scan_start, now;
3154
3155 BT_DBG("%s", hdev->name);
3156
3157 if (status) {
3158 BT_ERR("Failed to restart LE scan: status %d", status);
3159 return;
3160 }
3161
3162 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3163 !hdev->discovery.scan_start)
3164 return;
3165
3166 /* When the scan was started, hdev->le_scan_disable has been queued
3167 * after duration from scan_start. During scan restart this job
3168 * has been canceled, and we need to queue it again after proper
3169 * timeout, to make sure that scan does not run indefinitely.
3170 */
3171 duration = hdev->discovery.scan_duration;
3172 scan_start = hdev->discovery.scan_start;
3173 now = jiffies;
3174 if (now - scan_start <= duration) {
3175 int elapsed;
3176
3177 if (now >= scan_start)
3178 elapsed = now - scan_start;
3179 else
3180 elapsed = ULONG_MAX - scan_start + now;
3181
3182 timeout = duration - elapsed;
3183 } else {
3184 timeout = 0;
3185 }
3186 queue_delayed_work(hdev->workqueue,
3187 &hdev->le_scan_disable, timeout);
3188}
3189
3190static void le_scan_restart_work(struct work_struct *work)
3191{
3192 struct hci_dev *hdev = container_of(work, struct hci_dev,
3193 le_scan_restart.work);
3194 struct hci_request req;
3195 struct hci_cp_le_set_scan_enable cp;
3196 int err;
3197
3198 BT_DBG("%s", hdev->name);
3199
3200 /* If controller is not scanning we are done. */
d7a5a11d 3201 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3202 return;
3203
3204 hci_req_init(&req, hdev);
3205
3206 hci_req_add_le_scan_disable(&req);
3207
3208 memset(&cp, 0, sizeof(cp));
3209 cp.enable = LE_SCAN_ENABLE;
3210 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3211 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3212
3213 err = hci_req_run(&req, le_scan_restart_work_complete);
3214 if (err)
3215 BT_ERR("Restart LE scan request failed: err %d", err);
3216}
3217
a1f4c318
JH
3218/* Copy the Identity Address of the controller.
3219 *
3220 * If the controller has a public BD_ADDR, then by default use that one.
3221 * If this is a LE only controller without a public address, default to
3222 * the static random address.
3223 *
3224 * For debugging purposes it is possible to force controllers with a
3225 * public address to use the static random address instead.
50b5b952
MH
3226 *
3227 * In case BR/EDR has been disabled on a dual-mode controller and
3228 * userspace has configured a static address, then that address
3229 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3230 */
3231void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3232 u8 *bdaddr_type)
3233{
b7cb93e5 3234 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3235 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3236 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3237 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3238 bacpy(bdaddr, &hdev->static_addr);
3239 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3240 } else {
3241 bacpy(bdaddr, &hdev->bdaddr);
3242 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3243 }
3244}
3245
9be0dab7
DH
3246/* Alloc HCI device */
3247struct hci_dev *hci_alloc_dev(void)
3248{
3249 struct hci_dev *hdev;
3250
27f70f3e 3251 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3252 if (!hdev)
3253 return NULL;
3254
b1b813d4
DH
3255 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3256 hdev->esco_type = (ESCO_HV1);
3257 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3258 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3259 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3260 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3261 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3262 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3263 hdev->adv_instance_cnt = 0;
3264 hdev->cur_adv_instance = 0x00;
5d900e46 3265 hdev->adv_instance_timeout = 0;
b1b813d4 3266
b1b813d4
DH
3267 hdev->sniff_max_interval = 800;
3268 hdev->sniff_min_interval = 80;
3269
3f959d46 3270 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3271 hdev->le_adv_min_interval = 0x0800;
3272 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3273 hdev->le_scan_interval = 0x0060;
3274 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3275 hdev->le_conn_min_interval = 0x0028;
3276 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3277 hdev->le_conn_latency = 0x0000;
3278 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3279 hdev->le_def_tx_len = 0x001b;
3280 hdev->le_def_tx_time = 0x0148;
3281 hdev->le_max_tx_len = 0x001b;
3282 hdev->le_max_tx_time = 0x0148;
3283 hdev->le_max_rx_len = 0x001b;
3284 hdev->le_max_rx_time = 0x0148;
bef64738 3285
d6bfd59c 3286 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3287 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3288 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3289 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3290
b1b813d4
DH
3291 mutex_init(&hdev->lock);
3292 mutex_init(&hdev->req_lock);
3293
3294 INIT_LIST_HEAD(&hdev->mgmt_pending);
3295 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3296 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3297 INIT_LIST_HEAD(&hdev->uuids);
3298 INIT_LIST_HEAD(&hdev->link_keys);
3299 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3300 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3301 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3302 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3303 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3304 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3305 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3306 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3307 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3308
3309 INIT_WORK(&hdev->rx_work, hci_rx_work);
3310 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3311 INIT_WORK(&hdev->tx_work, hci_tx_work);
3312 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3313 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3314
b1b813d4
DH
3315 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3316 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3317 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3318 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3319 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3320
b1b813d4
DH
3321 skb_queue_head_init(&hdev->rx_q);
3322 skb_queue_head_init(&hdev->cmd_q);
3323 skb_queue_head_init(&hdev->raw_q);
3324
3325 init_waitqueue_head(&hdev->req_wait_q);
3326
65cc2b49 3327 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3328
b1b813d4
DH
3329 hci_init_sysfs(hdev);
3330 discovery_init(hdev);
9be0dab7
DH
3331
3332 return hdev;
3333}
3334EXPORT_SYMBOL(hci_alloc_dev);
3335
3336/* Free HCI device */
3337void hci_free_dev(struct hci_dev *hdev)
3338{
9be0dab7
DH
3339 /* will free via device release */
3340 put_device(&hdev->dev);
3341}
3342EXPORT_SYMBOL(hci_free_dev);
3343
1da177e4
LT
3344/* Register HCI device */
3345int hci_register_dev(struct hci_dev *hdev)
3346{
b1b813d4 3347 int id, error;
1da177e4 3348
74292d5a 3349 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3350 return -EINVAL;
3351
08add513
MM
3352 /* Do not allow HCI_AMP devices to register at index 0,
3353 * so the index can be used as the AMP controller ID.
3354 */
3df92b31
SL
3355 switch (hdev->dev_type) {
3356 case HCI_BREDR:
3357 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3358 break;
3359 case HCI_AMP:
3360 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3361 break;
3362 default:
3363 return -EINVAL;
1da177e4 3364 }
8e87d142 3365
3df92b31
SL
3366 if (id < 0)
3367 return id;
3368
1da177e4
LT
3369 sprintf(hdev->name, "hci%d", id);
3370 hdev->id = id;
2d8b3a11
AE
3371
3372 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3373
d8537548
KC
3374 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3375 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3376 if (!hdev->workqueue) {
3377 error = -ENOMEM;
3378 goto err;
3379 }
f48fd9c8 3380
d8537548
KC
3381 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3382 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3383 if (!hdev->req_workqueue) {
3384 destroy_workqueue(hdev->workqueue);
3385 error = -ENOMEM;
3386 goto err;
3387 }
3388
0153e2ec
MH
3389 if (!IS_ERR_OR_NULL(bt_debugfs))
3390 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3391
bdc3e0f1
MH
3392 dev_set_name(&hdev->dev, "%s", hdev->name);
3393
3394 error = device_add(&hdev->dev);
33ca954d 3395 if (error < 0)
54506918 3396 goto err_wqueue;
1da177e4 3397
611b30f7 3398 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3399 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3400 hdev);
611b30f7
MH
3401 if (hdev->rfkill) {
3402 if (rfkill_register(hdev->rfkill) < 0) {
3403 rfkill_destroy(hdev->rfkill);
3404 hdev->rfkill = NULL;
3405 }
3406 }
3407
5e130367 3408 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3409 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3410
a1536da2
MH
3411 hci_dev_set_flag(hdev, HCI_SETUP);
3412 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3413
01cd3404 3414 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3415 /* Assume BR/EDR support until proven otherwise (such as
3416 * through reading supported features during init.
3417 */
a1536da2 3418 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3419 }
ce2be9ac 3420
fcee3377
GP
3421 write_lock(&hci_dev_list_lock);
3422 list_add(&hdev->list, &hci_dev_list);
3423 write_unlock(&hci_dev_list_lock);
3424
4a964404
MH
3425 /* Devices that are marked for raw-only usage are unconfigured
3426 * and should not be included in normal operation.
fee746b0
MH
3427 */
3428 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3429 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3430
1da177e4 3431 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3432 hci_dev_hold(hdev);
1da177e4 3433
19202573 3434 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3435
1da177e4 3436 return id;
f48fd9c8 3437
33ca954d
DH
3438err_wqueue:
3439 destroy_workqueue(hdev->workqueue);
6ead1bbc 3440 destroy_workqueue(hdev->req_workqueue);
33ca954d 3441err:
3df92b31 3442 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3443
33ca954d 3444 return error;
1da177e4
LT
3445}
3446EXPORT_SYMBOL(hci_register_dev);
3447
3448/* Unregister HCI device */
59735631 3449void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3450{
2d7cc19e 3451 int id;
ef222013 3452
c13854ce 3453 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3454
a1536da2 3455 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3456
3df92b31
SL
3457 id = hdev->id;
3458
f20d09d5 3459 write_lock(&hci_dev_list_lock);
1da177e4 3460 list_del(&hdev->list);
f20d09d5 3461 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3462
3463 hci_dev_do_close(hdev);
3464
b9b5ef18
GP
3465 cancel_work_sync(&hdev->power_on);
3466
ab81cbf9 3467 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3468 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3469 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3470 hci_dev_lock(hdev);
744cf19e 3471 mgmt_index_removed(hdev);
09fd0de5 3472 hci_dev_unlock(hdev);
56e5cb86 3473 }
ab81cbf9 3474
2e58ef3e
JH
3475 /* mgmt_index_removed should take care of emptying the
3476 * pending list */
3477 BUG_ON(!list_empty(&hdev->mgmt_pending));
3478
1da177e4
LT
3479 hci_notify(hdev, HCI_DEV_UNREG);
3480
611b30f7
MH
3481 if (hdev->rfkill) {
3482 rfkill_unregister(hdev->rfkill);
3483 rfkill_destroy(hdev->rfkill);
3484 }
3485
bdc3e0f1 3486 device_del(&hdev->dev);
147e2d59 3487
0153e2ec
MH
3488 debugfs_remove_recursive(hdev->debugfs);
3489
f48fd9c8 3490 destroy_workqueue(hdev->workqueue);
6ead1bbc 3491 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3492
09fd0de5 3493 hci_dev_lock(hdev);
dcc36c16 3494 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3495 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3496 hci_uuids_clear(hdev);
55ed8ca1 3497 hci_link_keys_clear(hdev);
b899efaf 3498 hci_smp_ltks_clear(hdev);
970c4e46 3499 hci_smp_irks_clear(hdev);
2763eda6 3500 hci_remote_oob_data_clear(hdev);
d2609b34 3501 hci_adv_instances_clear(hdev);
dcc36c16 3502 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3503 hci_conn_params_clear_all(hdev);
22078800 3504 hci_discovery_filter_clear(hdev);
09fd0de5 3505 hci_dev_unlock(hdev);
e2e0cacb 3506
dc946bd8 3507 hci_dev_put(hdev);
3df92b31
SL
3508
3509 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3510}
3511EXPORT_SYMBOL(hci_unregister_dev);
3512
3513/* Suspend HCI device */
3514int hci_suspend_dev(struct hci_dev *hdev)
3515{
3516 hci_notify(hdev, HCI_DEV_SUSPEND);
3517 return 0;
3518}
3519EXPORT_SYMBOL(hci_suspend_dev);
3520
3521/* Resume HCI device */
3522int hci_resume_dev(struct hci_dev *hdev)
3523{
3524 hci_notify(hdev, HCI_DEV_RESUME);
3525 return 0;
3526}
3527EXPORT_SYMBOL(hci_resume_dev);
3528
75e0569f
MH
3529/* Reset HCI device */
3530int hci_reset_dev(struct hci_dev *hdev)
3531{
3532 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3533 struct sk_buff *skb;
3534
3535 skb = bt_skb_alloc(3, GFP_ATOMIC);
3536 if (!skb)
3537 return -ENOMEM;
3538
3539 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3540 memcpy(skb_put(skb, 3), hw_err, 3);
3541
3542 /* Send Hardware Error to upper stack */
3543 return hci_recv_frame(hdev, skb);
3544}
3545EXPORT_SYMBOL(hci_reset_dev);
3546
76bca880 3547/* Receive frame from HCI drivers */
e1a26170 3548int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3549{
76bca880 3550 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3551 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3552 kfree_skb(skb);
3553 return -ENXIO;
3554 }
3555
fe806dce
MH
3556 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
3557 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
3558 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
3559 kfree_skb(skb);
3560 return -EINVAL;
3561 }
3562
d82603c6 3563 /* Incoming skb */
76bca880
MH
3564 bt_cb(skb)->incoming = 1;
3565
3566 /* Time stamp */
3567 __net_timestamp(skb);
3568
76bca880 3569 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3570 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3571
76bca880
MH
3572 return 0;
3573}
3574EXPORT_SYMBOL(hci_recv_frame);
3575
e875ff84
MH
3576/* Receive diagnostic message from HCI drivers */
3577int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3578{
581d6fd6
MH
3579 /* Mark as diagnostic packet */
3580 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3581
e875ff84
MH
3582 /* Time stamp */
3583 __net_timestamp(skb);
3584
581d6fd6
MH
3585 skb_queue_tail(&hdev->rx_q, skb);
3586 queue_work(hdev->workqueue, &hdev->rx_work);
e875ff84 3587
e875ff84
MH
3588 return 0;
3589}
3590EXPORT_SYMBOL(hci_recv_diag);
3591
1da177e4
LT
3592/* ---- Interface to upper protocols ---- */
3593
1da177e4
LT
3594int hci_register_cb(struct hci_cb *cb)
3595{
3596 BT_DBG("%p name %s", cb, cb->name);
3597
fba7ecf0 3598 mutex_lock(&hci_cb_list_lock);
00629e0f 3599 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3600 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3601
3602 return 0;
3603}
3604EXPORT_SYMBOL(hci_register_cb);
3605
3606int hci_unregister_cb(struct hci_cb *cb)
3607{
3608 BT_DBG("%p name %s", cb, cb->name);
3609
fba7ecf0 3610 mutex_lock(&hci_cb_list_lock);
1da177e4 3611 list_del(&cb->list);
fba7ecf0 3612 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3613
3614 return 0;
3615}
3616EXPORT_SYMBOL(hci_unregister_cb);
3617
51086991 3618static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3619{
cdc52faa
MH
3620 int err;
3621
0d48d939 3622 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3623
cd82e61c
MH
3624 /* Time stamp */
3625 __net_timestamp(skb);
1da177e4 3626
cd82e61c
MH
3627 /* Send copy to monitor */
3628 hci_send_to_monitor(hdev, skb);
3629
3630 if (atomic_read(&hdev->promisc)) {
3631 /* Send copy to the sockets */
470fe1b5 3632 hci_send_to_sock(hdev, skb);
1da177e4
LT
3633 }
3634
3635 /* Get rid of skb owner, prior to sending to the driver. */
3636 skb_orphan(skb);
3637
73d0d3c8
MH
3638 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3639 kfree_skb(skb);
3640 return;
3641 }
3642
cdc52faa
MH
3643 err = hdev->send(hdev, skb);
3644 if (err < 0) {
3645 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3646 kfree_skb(skb);
3647 }
1da177e4
LT
3648}
3649
1ca3a9d0 3650/* Send HCI command */
07dc93dd
JH
3651int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3652 const void *param)
1ca3a9d0
JH
3653{
3654 struct sk_buff *skb;
3655
3656 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3657
3658 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3659 if (!skb) {
3660 BT_ERR("%s no memory for command", hdev->name);
3661 return -ENOMEM;
3662 }
3663
49c922bb 3664 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3665 * single-command requests.
3666 */
db6e3e8d 3667 bt_cb(skb)->req.start = true;
11714b3d 3668
1da177e4 3669 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3670 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3671
3672 return 0;
3673}
1da177e4
LT
3674
3675/* Get data from the previously sent command */
a9de9248 3676void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3677{
3678 struct hci_command_hdr *hdr;
3679
3680 if (!hdev->sent_cmd)
3681 return NULL;
3682
3683 hdr = (void *) hdev->sent_cmd->data;
3684
a9de9248 3685 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3686 return NULL;
3687
f0e09510 3688 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3689
3690 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3691}
3692
fbef168f
LP
3693/* Send HCI command and wait for command commplete event */
3694struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3695 const void *param, u32 timeout)
3696{
3697 struct sk_buff *skb;
3698
3699 if (!test_bit(HCI_UP, &hdev->flags))
3700 return ERR_PTR(-ENETDOWN);
3701
3702 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3703
3704 hci_req_lock(hdev);
3705 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3706 hci_req_unlock(hdev);
3707
3708 return skb;
3709}
3710EXPORT_SYMBOL(hci_cmd_sync);
3711
1da177e4
LT
3712/* Send ACL data */
3713static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3714{
3715 struct hci_acl_hdr *hdr;
3716 int len = skb->len;
3717
badff6d0
ACM
3718 skb_push(skb, HCI_ACL_HDR_SIZE);
3719 skb_reset_transport_header(skb);
9c70220b 3720 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3721 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3722 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3723}
3724
ee22be7e 3725static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3726 struct sk_buff *skb, __u16 flags)
1da177e4 3727{
ee22be7e 3728 struct hci_conn *conn = chan->conn;
1da177e4
LT
3729 struct hci_dev *hdev = conn->hdev;
3730 struct sk_buff *list;
3731
087bfd99
GP
3732 skb->len = skb_headlen(skb);
3733 skb->data_len = 0;
3734
3735 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3736
3737 switch (hdev->dev_type) {
3738 case HCI_BREDR:
3739 hci_add_acl_hdr(skb, conn->handle, flags);
3740 break;
3741 case HCI_AMP:
3742 hci_add_acl_hdr(skb, chan->handle, flags);
3743 break;
3744 default:
3745 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3746 return;
3747 }
087bfd99 3748
70f23020
AE
3749 list = skb_shinfo(skb)->frag_list;
3750 if (!list) {
1da177e4
LT
3751 /* Non fragmented */
3752 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3753
73d80deb 3754 skb_queue_tail(queue, skb);
1da177e4
LT
3755 } else {
3756 /* Fragmented */
3757 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3758
3759 skb_shinfo(skb)->frag_list = NULL;
3760
9cfd5a23
JR
3761 /* Queue all fragments atomically. We need to use spin_lock_bh
3762 * here because of 6LoWPAN links, as there this function is
3763 * called from softirq and using normal spin lock could cause
3764 * deadlocks.
3765 */
3766 spin_lock_bh(&queue->lock);
1da177e4 3767
73d80deb 3768 __skb_queue_tail(queue, skb);
e702112f
AE
3769
3770 flags &= ~ACL_START;
3771 flags |= ACL_CONT;
1da177e4
LT
3772 do {
3773 skb = list; list = list->next;
8e87d142 3774
0d48d939 3775 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3776 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3777
3778 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3779
73d80deb 3780 __skb_queue_tail(queue, skb);
1da177e4
LT
3781 } while (list);
3782
9cfd5a23 3783 spin_unlock_bh(&queue->lock);
1da177e4 3784 }
73d80deb
LAD
3785}
3786
3787void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3788{
ee22be7e 3789 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3790
f0e09510 3791 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3792
ee22be7e 3793 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3794
3eff45ea 3795 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3796}
1da177e4
LT
3797
3798/* Send SCO data */
0d861d8b 3799void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3800{
3801 struct hci_dev *hdev = conn->hdev;
3802 struct hci_sco_hdr hdr;
3803
3804 BT_DBG("%s len %d", hdev->name, skb->len);
3805
aca3192c 3806 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3807 hdr.dlen = skb->len;
3808
badff6d0
ACM
3809 skb_push(skb, HCI_SCO_HDR_SIZE);
3810 skb_reset_transport_header(skb);
9c70220b 3811 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3812
0d48d939 3813 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3814
1da177e4 3815 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3816 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3817}
1da177e4
LT
3818
3819/* ---- HCI TX task (outgoing data) ---- */
3820
3821/* HCI Connection scheduler */
6039aa73
GP
3822static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3823 int *quote)
1da177e4
LT
3824{
3825 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3826 struct hci_conn *conn = NULL, *c;
abc5de8f 3827 unsigned int num = 0, min = ~0;
1da177e4 3828
8e87d142 3829 /* We don't have to lock device here. Connections are always
1da177e4 3830 * added and removed with TX task disabled. */
bf4c6325
GP
3831
3832 rcu_read_lock();
3833
3834 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3835 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3836 continue;
769be974
MH
3837
3838 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3839 continue;
3840
1da177e4
LT
3841 num++;
3842
3843 if (c->sent < min) {
3844 min = c->sent;
3845 conn = c;
3846 }
52087a79
LAD
3847
3848 if (hci_conn_num(hdev, type) == num)
3849 break;
1da177e4
LT
3850 }
3851
bf4c6325
GP
3852 rcu_read_unlock();
3853
1da177e4 3854 if (conn) {
6ed58ec5
VT
3855 int cnt, q;
3856
3857 switch (conn->type) {
3858 case ACL_LINK:
3859 cnt = hdev->acl_cnt;
3860 break;
3861 case SCO_LINK:
3862 case ESCO_LINK:
3863 cnt = hdev->sco_cnt;
3864 break;
3865 case LE_LINK:
3866 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3867 break;
3868 default:
3869 cnt = 0;
3870 BT_ERR("Unknown link type");
3871 }
3872
3873 q = cnt / num;
1da177e4
LT
3874 *quote = q ? q : 1;
3875 } else
3876 *quote = 0;
3877
3878 BT_DBG("conn %p quote %d", conn, *quote);
3879 return conn;
3880}
3881
6039aa73 3882static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3883{
3884 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3885 struct hci_conn *c;
1da177e4 3886
bae1f5d9 3887 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3888
bf4c6325
GP
3889 rcu_read_lock();
3890
1da177e4 3891 /* Kill stalled connections */
bf4c6325 3892 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3893 if (c->type == type && c->sent) {
6ed93dc6
AE
3894 BT_ERR("%s killing stalled connection %pMR",
3895 hdev->name, &c->dst);
bed71748 3896 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3897 }
3898 }
bf4c6325
GP
3899
3900 rcu_read_unlock();
1da177e4
LT
3901}
3902
6039aa73
GP
3903static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3904 int *quote)
1da177e4 3905{
73d80deb
LAD
3906 struct hci_conn_hash *h = &hdev->conn_hash;
3907 struct hci_chan *chan = NULL;
abc5de8f 3908 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3909 struct hci_conn *conn;
73d80deb
LAD
3910 int cnt, q, conn_num = 0;
3911
3912 BT_DBG("%s", hdev->name);
3913
bf4c6325
GP
3914 rcu_read_lock();
3915
3916 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3917 struct hci_chan *tmp;
3918
3919 if (conn->type != type)
3920 continue;
3921
3922 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3923 continue;
3924
3925 conn_num++;
3926
8192edef 3927 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3928 struct sk_buff *skb;
3929
3930 if (skb_queue_empty(&tmp->data_q))
3931 continue;
3932
3933 skb = skb_peek(&tmp->data_q);
3934 if (skb->priority < cur_prio)
3935 continue;
3936
3937 if (skb->priority > cur_prio) {
3938 num = 0;
3939 min = ~0;
3940 cur_prio = skb->priority;
3941 }
3942
3943 num++;
3944
3945 if (conn->sent < min) {
3946 min = conn->sent;
3947 chan = tmp;
3948 }
3949 }
3950
3951 if (hci_conn_num(hdev, type) == conn_num)
3952 break;
3953 }
3954
bf4c6325
GP
3955 rcu_read_unlock();
3956
73d80deb
LAD
3957 if (!chan)
3958 return NULL;
3959
3960 switch (chan->conn->type) {
3961 case ACL_LINK:
3962 cnt = hdev->acl_cnt;
3963 break;
bd1eb66b
AE
3964 case AMP_LINK:
3965 cnt = hdev->block_cnt;
3966 break;
73d80deb
LAD
3967 case SCO_LINK:
3968 case ESCO_LINK:
3969 cnt = hdev->sco_cnt;
3970 break;
3971 case LE_LINK:
3972 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3973 break;
3974 default:
3975 cnt = 0;
3976 BT_ERR("Unknown link type");
3977 }
3978
3979 q = cnt / num;
3980 *quote = q ? q : 1;
3981 BT_DBG("chan %p quote %d", chan, *quote);
3982 return chan;
3983}
3984
02b20f0b
LAD
3985static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3986{
3987 struct hci_conn_hash *h = &hdev->conn_hash;
3988 struct hci_conn *conn;
3989 int num = 0;
3990
3991 BT_DBG("%s", hdev->name);
3992
bf4c6325
GP
3993 rcu_read_lock();
3994
3995 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3996 struct hci_chan *chan;
3997
3998 if (conn->type != type)
3999 continue;
4000
4001 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4002 continue;
4003
4004 num++;
4005
8192edef 4006 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4007 struct sk_buff *skb;
4008
4009 if (chan->sent) {
4010 chan->sent = 0;
4011 continue;
4012 }
4013
4014 if (skb_queue_empty(&chan->data_q))
4015 continue;
4016
4017 skb = skb_peek(&chan->data_q);
4018 if (skb->priority >= HCI_PRIO_MAX - 1)
4019 continue;
4020
4021 skb->priority = HCI_PRIO_MAX - 1;
4022
4023 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4024 skb->priority);
02b20f0b
LAD
4025 }
4026
4027 if (hci_conn_num(hdev, type) == num)
4028 break;
4029 }
bf4c6325
GP
4030
4031 rcu_read_unlock();
4032
02b20f0b
LAD
4033}
4034
b71d385a
AE
4035static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4036{
4037 /* Calculate count of blocks used by this packet */
4038 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4039}
4040
6039aa73 4041static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4042{
d7a5a11d 4043 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
4044 /* ACL tx timeout must be longer than maximum
4045 * link supervision timeout (40.9 seconds) */
63d2bc1b 4046 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4047 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4048 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4049 }
63d2bc1b 4050}
1da177e4 4051
6039aa73 4052static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4053{
4054 unsigned int cnt = hdev->acl_cnt;
4055 struct hci_chan *chan;
4056 struct sk_buff *skb;
4057 int quote;
4058
4059 __check_timeout(hdev, cnt);
04837f64 4060
73d80deb 4061 while (hdev->acl_cnt &&
a8c5fb1a 4062 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4063 u32 priority = (skb_peek(&chan->data_q))->priority;
4064 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4065 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4066 skb->len, skb->priority);
73d80deb 4067
ec1cce24
LAD
4068 /* Stop if priority has changed */
4069 if (skb->priority < priority)
4070 break;
4071
4072 skb = skb_dequeue(&chan->data_q);
4073
73d80deb 4074 hci_conn_enter_active_mode(chan->conn,
04124681 4075 bt_cb(skb)->force_active);
04837f64 4076
57d17d70 4077 hci_send_frame(hdev, skb);
1da177e4
LT
4078 hdev->acl_last_tx = jiffies;
4079
4080 hdev->acl_cnt--;
73d80deb
LAD
4081 chan->sent++;
4082 chan->conn->sent++;
1da177e4
LT
4083 }
4084 }
02b20f0b
LAD
4085
4086 if (cnt != hdev->acl_cnt)
4087 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4088}
4089
6039aa73 4090static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4091{
63d2bc1b 4092 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4093 struct hci_chan *chan;
4094 struct sk_buff *skb;
4095 int quote;
bd1eb66b 4096 u8 type;
b71d385a 4097
63d2bc1b 4098 __check_timeout(hdev, cnt);
b71d385a 4099
bd1eb66b
AE
4100 BT_DBG("%s", hdev->name);
4101
4102 if (hdev->dev_type == HCI_AMP)
4103 type = AMP_LINK;
4104 else
4105 type = ACL_LINK;
4106
b71d385a 4107 while (hdev->block_cnt > 0 &&
bd1eb66b 4108 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4109 u32 priority = (skb_peek(&chan->data_q))->priority;
4110 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4111 int blocks;
4112
4113 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4114 skb->len, skb->priority);
b71d385a
AE
4115
4116 /* Stop if priority has changed */
4117 if (skb->priority < priority)
4118 break;
4119
4120 skb = skb_dequeue(&chan->data_q);
4121
4122 blocks = __get_blocks(hdev, skb);
4123 if (blocks > hdev->block_cnt)
4124 return;
4125
4126 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4127 bt_cb(skb)->force_active);
b71d385a 4128
57d17d70 4129 hci_send_frame(hdev, skb);
b71d385a
AE
4130 hdev->acl_last_tx = jiffies;
4131
4132 hdev->block_cnt -= blocks;
4133 quote -= blocks;
4134
4135 chan->sent += blocks;
4136 chan->conn->sent += blocks;
4137 }
4138 }
4139
4140 if (cnt != hdev->block_cnt)
bd1eb66b 4141 hci_prio_recalculate(hdev, type);
b71d385a
AE
4142}
4143
6039aa73 4144static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4145{
4146 BT_DBG("%s", hdev->name);
4147
bd1eb66b
AE
4148 /* No ACL link over BR/EDR controller */
4149 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4150 return;
4151
4152 /* No AMP link over AMP controller */
4153 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4154 return;
4155
4156 switch (hdev->flow_ctl_mode) {
4157 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4158 hci_sched_acl_pkt(hdev);
4159 break;
4160
4161 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4162 hci_sched_acl_blk(hdev);
4163 break;
4164 }
4165}
4166
1da177e4 4167/* Schedule SCO */
6039aa73 4168static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4169{
4170 struct hci_conn *conn;
4171 struct sk_buff *skb;
4172 int quote;
4173
4174 BT_DBG("%s", hdev->name);
4175
52087a79
LAD
4176 if (!hci_conn_num(hdev, SCO_LINK))
4177 return;
4178
1da177e4
LT
4179 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4180 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4181 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4182 hci_send_frame(hdev, skb);
1da177e4
LT
4183
4184 conn->sent++;
4185 if (conn->sent == ~0)
4186 conn->sent = 0;
4187 }
4188 }
4189}
4190
6039aa73 4191static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4192{
4193 struct hci_conn *conn;
4194 struct sk_buff *skb;
4195 int quote;
4196
4197 BT_DBG("%s", hdev->name);
4198
52087a79
LAD
4199 if (!hci_conn_num(hdev, ESCO_LINK))
4200 return;
4201
8fc9ced3
GP
4202 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4203 &quote))) {
b6a0dc82
MH
4204 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4205 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4206 hci_send_frame(hdev, skb);
b6a0dc82
MH
4207
4208 conn->sent++;
4209 if (conn->sent == ~0)
4210 conn->sent = 0;
4211 }
4212 }
4213}
4214
6039aa73 4215static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4216{
73d80deb 4217 struct hci_chan *chan;
6ed58ec5 4218 struct sk_buff *skb;
02b20f0b 4219 int quote, cnt, tmp;
6ed58ec5
VT
4220
4221 BT_DBG("%s", hdev->name);
4222
52087a79
LAD
4223 if (!hci_conn_num(hdev, LE_LINK))
4224 return;
4225
d7a5a11d 4226 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4227 /* LE tx timeout must be longer than maximum
4228 * link supervision timeout (40.9 seconds) */
bae1f5d9 4229 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4230 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4231 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4232 }
4233
4234 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4235 tmp = cnt;
73d80deb 4236 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4237 u32 priority = (skb_peek(&chan->data_q))->priority;
4238 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4239 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4240 skb->len, skb->priority);
6ed58ec5 4241
ec1cce24
LAD
4242 /* Stop if priority has changed */
4243 if (skb->priority < priority)
4244 break;
4245
4246 skb = skb_dequeue(&chan->data_q);
4247
57d17d70 4248 hci_send_frame(hdev, skb);
6ed58ec5
VT
4249 hdev->le_last_tx = jiffies;
4250
4251 cnt--;
73d80deb
LAD
4252 chan->sent++;
4253 chan->conn->sent++;
6ed58ec5
VT
4254 }
4255 }
73d80deb 4256
6ed58ec5
VT
4257 if (hdev->le_pkts)
4258 hdev->le_cnt = cnt;
4259 else
4260 hdev->acl_cnt = cnt;
02b20f0b
LAD
4261
4262 if (cnt != tmp)
4263 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4264}
4265
3eff45ea 4266static void hci_tx_work(struct work_struct *work)
1da177e4 4267{
3eff45ea 4268 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4269 struct sk_buff *skb;
4270
6ed58ec5 4271 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4272 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4273
d7a5a11d 4274 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4275 /* Schedule queues and send stuff to HCI driver */
4276 hci_sched_acl(hdev);
4277 hci_sched_sco(hdev);
4278 hci_sched_esco(hdev);
4279 hci_sched_le(hdev);
4280 }
6ed58ec5 4281
1da177e4
LT
4282 /* Send next queued raw (unknown type) packet */
4283 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4284 hci_send_frame(hdev, skb);
1da177e4
LT
4285}
4286
25985edc 4287/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4288
4289/* ACL data packet */
6039aa73 4290static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4291{
4292 struct hci_acl_hdr *hdr = (void *) skb->data;
4293 struct hci_conn *conn;
4294 __u16 handle, flags;
4295
4296 skb_pull(skb, HCI_ACL_HDR_SIZE);
4297
4298 handle = __le16_to_cpu(hdr->handle);
4299 flags = hci_flags(handle);
4300 handle = hci_handle(handle);
4301
f0e09510 4302 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4303 handle, flags);
1da177e4
LT
4304
4305 hdev->stat.acl_rx++;
4306
4307 hci_dev_lock(hdev);
4308 conn = hci_conn_hash_lookup_handle(hdev, handle);
4309 hci_dev_unlock(hdev);
8e87d142 4310
1da177e4 4311 if (conn) {
65983fc7 4312 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4313
1da177e4 4314 /* Send to upper protocol */
686ebf28
UF
4315 l2cap_recv_acldata(conn, skb, flags);
4316 return;
1da177e4 4317 } else {
8e87d142 4318 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4319 hdev->name, handle);
1da177e4
LT
4320 }
4321
4322 kfree_skb(skb);
4323}
4324
4325/* SCO data packet */
6039aa73 4326static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4327{
4328 struct hci_sco_hdr *hdr = (void *) skb->data;
4329 struct hci_conn *conn;
4330 __u16 handle;
4331
4332 skb_pull(skb, HCI_SCO_HDR_SIZE);
4333
4334 handle = __le16_to_cpu(hdr->handle);
4335
f0e09510 4336 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4337
4338 hdev->stat.sco_rx++;
4339
4340 hci_dev_lock(hdev);
4341 conn = hci_conn_hash_lookup_handle(hdev, handle);
4342 hci_dev_unlock(hdev);
4343
4344 if (conn) {
1da177e4 4345 /* Send to upper protocol */
686ebf28
UF
4346 sco_recv_scodata(conn, skb);
4347 return;
1da177e4 4348 } else {
8e87d142 4349 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4350 hdev->name, handle);
1da177e4
LT
4351 }
4352
4353 kfree_skb(skb);
4354}
4355
9238f36a
JH
4356static bool hci_req_is_complete(struct hci_dev *hdev)
4357{
4358 struct sk_buff *skb;
4359
4360 skb = skb_peek(&hdev->cmd_q);
4361 if (!skb)
4362 return true;
4363
db6e3e8d 4364 return bt_cb(skb)->req.start;
9238f36a
JH
4365}
4366
42c6b129
JH
4367static void hci_resend_last(struct hci_dev *hdev)
4368{
4369 struct hci_command_hdr *sent;
4370 struct sk_buff *skb;
4371 u16 opcode;
4372
4373 if (!hdev->sent_cmd)
4374 return;
4375
4376 sent = (void *) hdev->sent_cmd->data;
4377 opcode = __le16_to_cpu(sent->opcode);
4378 if (opcode == HCI_OP_RESET)
4379 return;
4380
4381 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4382 if (!skb)
4383 return;
4384
4385 skb_queue_head(&hdev->cmd_q, skb);
4386 queue_work(hdev->workqueue, &hdev->cmd_work);
4387}
4388
e6214487
JH
4389void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4390 hci_req_complete_t *req_complete,
4391 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4392{
9238f36a
JH
4393 struct sk_buff *skb;
4394 unsigned long flags;
4395
4396 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4397
42c6b129
JH
4398 /* If the completed command doesn't match the last one that was
4399 * sent we need to do special handling of it.
9238f36a 4400 */
42c6b129
JH
4401 if (!hci_sent_cmd_data(hdev, opcode)) {
4402 /* Some CSR based controllers generate a spontaneous
4403 * reset complete event during init and any pending
4404 * command will never be completed. In such a case we
4405 * need to resend whatever was the last sent
4406 * command.
4407 */
4408 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4409 hci_resend_last(hdev);
4410
9238f36a 4411 return;
42c6b129 4412 }
9238f36a
JH
4413
4414 /* If the command succeeded and there's still more commands in
4415 * this request the request is not yet complete.
4416 */
4417 if (!status && !hci_req_is_complete(hdev))
4418 return;
4419
4420 /* If this was the last command in a request the complete
4421 * callback would be found in hdev->sent_cmd instead of the
4422 * command queue (hdev->cmd_q).
4423 */
e6214487
JH
4424 if (bt_cb(hdev->sent_cmd)->req.complete) {
4425 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4426 return;
4427 }
53e21fbc 4428
e6214487
JH
4429 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4430 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4431 return;
9238f36a
JH
4432 }
4433
4434 /* Remove all pending commands belonging to this request */
4435 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4436 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4437 if (bt_cb(skb)->req.start) {
9238f36a
JH
4438 __skb_queue_head(&hdev->cmd_q, skb);
4439 break;
4440 }
4441
e6214487
JH
4442 *req_complete = bt_cb(skb)->req.complete;
4443 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4444 kfree_skb(skb);
4445 }
4446 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4447}
4448
b78752cc 4449static void hci_rx_work(struct work_struct *work)
1da177e4 4450{
b78752cc 4451 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4452 struct sk_buff *skb;
4453
4454 BT_DBG("%s", hdev->name);
4455
1da177e4 4456 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4457 /* Send copy to monitor */
4458 hci_send_to_monitor(hdev, skb);
4459
1da177e4
LT
4460 if (atomic_read(&hdev->promisc)) {
4461 /* Send copy to the sockets */
470fe1b5 4462 hci_send_to_sock(hdev, skb);
1da177e4
LT
4463 }
4464
d7a5a11d 4465 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4466 kfree_skb(skb);
4467 continue;
4468 }
4469
4470 if (test_bit(HCI_INIT, &hdev->flags)) {
4471 /* Don't process data packets in this states. */
0d48d939 4472 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4473 case HCI_ACLDATA_PKT:
4474 case HCI_SCODATA_PKT:
4475 kfree_skb(skb);
4476 continue;
3ff50b79 4477 }
1da177e4
LT
4478 }
4479
4480 /* Process frame */
0d48d939 4481 switch (bt_cb(skb)->pkt_type) {
1da177e4 4482 case HCI_EVENT_PKT:
b78752cc 4483 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4484 hci_event_packet(hdev, skb);
4485 break;
4486
4487 case HCI_ACLDATA_PKT:
4488 BT_DBG("%s ACL data packet", hdev->name);
4489 hci_acldata_packet(hdev, skb);
4490 break;
4491
4492 case HCI_SCODATA_PKT:
4493 BT_DBG("%s SCO data packet", hdev->name);
4494 hci_scodata_packet(hdev, skb);
4495 break;
4496
4497 default:
4498 kfree_skb(skb);
4499 break;
4500 }
4501 }
1da177e4
LT
4502}
4503
c347b765 4504static void hci_cmd_work(struct work_struct *work)
1da177e4 4505{
c347b765 4506 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4507 struct sk_buff *skb;
4508
2104786b
AE
4509 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4510 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4511
1da177e4 4512 /* Send queued commands */
5a08ecce
AE
4513 if (atomic_read(&hdev->cmd_cnt)) {
4514 skb = skb_dequeue(&hdev->cmd_q);
4515 if (!skb)
4516 return;
4517
7585b97a 4518 kfree_skb(hdev->sent_cmd);
1da177e4 4519
a675d7f1 4520 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4521 if (hdev->sent_cmd) {
1da177e4 4522 atomic_dec(&hdev->cmd_cnt);
57d17d70 4523 hci_send_frame(hdev, skb);
7bdb8a5c 4524 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4525 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4526 else
65cc2b49
MH
4527 schedule_delayed_work(&hdev->cmd_timer,
4528 HCI_CMD_TIMEOUT);
1da177e4
LT
4529 } else {
4530 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4531 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4532 }
4533 }
4534}