]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
cxgb4: Fix MC1 memory offset calculation
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
97 int err;
98
99 if (!test_bit(HCI_UP, &hdev->flags))
100 return -ENETDOWN;
101
102 if (copy_from_user(buf, user_buf, buf_size))
103 return -EFAULT;
104
105 buf[buf_size] = '\0';
106 if (strtobool(buf, &enable))
107 return -EINVAL;
108
b7cb93e5 109 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
110 return -EALREADY;
111
112 hci_req_lock(hdev);
113 if (enable)
114 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
115 HCI_CMD_TIMEOUT);
116 else
117 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
118 HCI_CMD_TIMEOUT);
119 hci_req_unlock(hdev);
120
121 if (IS_ERR(skb))
122 return PTR_ERR(skb);
123
124 err = -bt_to_errno(skb->data[0]);
125 kfree_skb(skb);
126
127 if (err < 0)
128 return err;
129
b7cb93e5 130 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
131
132 return count;
133}
134
135static const struct file_operations dut_mode_fops = {
136 .open = simple_open,
137 .read = dut_mode_read,
138 .write = dut_mode_write,
139 .llseek = default_llseek,
140};
141
1da177e4
LT
142/* ---- HCI requests ---- */
143
f60cb305
JH
144static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
145 struct sk_buff *skb)
1da177e4 146{
42c6b129 147 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
148
149 if (hdev->req_status == HCI_REQ_PEND) {
150 hdev->req_result = result;
151 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
152 if (skb)
153 hdev->req_skb = skb_get(skb);
1da177e4
LT
154 wake_up_interruptible(&hdev->req_wait_q);
155 }
156}
157
158static void hci_req_cancel(struct hci_dev *hdev, int err)
159{
160 BT_DBG("%s err 0x%2.2x", hdev->name, err);
161
162 if (hdev->req_status == HCI_REQ_PEND) {
163 hdev->req_result = err;
164 hdev->req_status = HCI_REQ_CANCELED;
165 wake_up_interruptible(&hdev->req_wait_q);
166 }
167}
168
7b1abbbe 169struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 170 const void *param, u8 event, u32 timeout)
75e84b7c
JH
171{
172 DECLARE_WAITQUEUE(wait, current);
173 struct hci_request req;
f60cb305 174 struct sk_buff *skb;
75e84b7c
JH
175 int err = 0;
176
177 BT_DBG("%s", hdev->name);
178
179 hci_req_init(&req, hdev);
180
7b1abbbe 181 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
182
183 hdev->req_status = HCI_REQ_PEND;
184
75e84b7c
JH
185 add_wait_queue(&hdev->req_wait_q, &wait);
186 set_current_state(TASK_INTERRUPTIBLE);
187
f60cb305 188 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
189 if (err < 0) {
190 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 191 set_current_state(TASK_RUNNING);
039fada5
CP
192 return ERR_PTR(err);
193 }
194
75e84b7c
JH
195 schedule_timeout(timeout);
196
197 remove_wait_queue(&hdev->req_wait_q, &wait);
198
199 if (signal_pending(current))
200 return ERR_PTR(-EINTR);
201
202 switch (hdev->req_status) {
203 case HCI_REQ_DONE:
204 err = -bt_to_errno(hdev->req_result);
205 break;
206
207 case HCI_REQ_CANCELED:
208 err = -hdev->req_result;
209 break;
210
211 default:
212 err = -ETIMEDOUT;
213 break;
214 }
215
216 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
217 skb = hdev->req_skb;
218 hdev->req_skb = NULL;
75e84b7c
JH
219
220 BT_DBG("%s end: err %d", hdev->name, err);
221
f60cb305
JH
222 if (err < 0) {
223 kfree_skb(skb);
75e84b7c 224 return ERR_PTR(err);
f60cb305 225 }
75e84b7c 226
757aa0b5
JH
227 if (!skb)
228 return ERR_PTR(-ENODATA);
229
230 return skb;
7b1abbbe
JH
231}
232EXPORT_SYMBOL(__hci_cmd_sync_ev);
233
234struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 235 const void *param, u32 timeout)
7b1abbbe
JH
236{
237 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
238}
239EXPORT_SYMBOL(__hci_cmd_sync);
240
1da177e4 241/* Execute request and wait for completion. */
01178cd4 242static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
243 void (*func)(struct hci_request *req,
244 unsigned long opt),
01178cd4 245 unsigned long opt, __u32 timeout)
1da177e4 246{
42c6b129 247 struct hci_request req;
1da177e4
LT
248 DECLARE_WAITQUEUE(wait, current);
249 int err = 0;
250
251 BT_DBG("%s start", hdev->name);
252
42c6b129
JH
253 hci_req_init(&req, hdev);
254
1da177e4
LT
255 hdev->req_status = HCI_REQ_PEND;
256
42c6b129 257 func(&req, opt);
53cce22d 258
039fada5
CP
259 add_wait_queue(&hdev->req_wait_q, &wait);
260 set_current_state(TASK_INTERRUPTIBLE);
261
f60cb305 262 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 263 if (err < 0) {
53cce22d 264 hdev->req_status = 0;
920c8300 265
039fada5 266 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 267 set_current_state(TASK_RUNNING);
039fada5 268
920c8300
AG
269 /* ENODATA means the HCI request command queue is empty.
270 * This can happen when a request with conditionals doesn't
271 * trigger any commands to be sent. This is normal behavior
272 * and should not trigger an error return.
42c6b129 273 */
920c8300
AG
274 if (err == -ENODATA)
275 return 0;
276
277 return err;
53cce22d
JH
278 }
279
1da177e4
LT
280 schedule_timeout(timeout);
281
282 remove_wait_queue(&hdev->req_wait_q, &wait);
283
284 if (signal_pending(current))
285 return -EINTR;
286
287 switch (hdev->req_status) {
288 case HCI_REQ_DONE:
e175072f 289 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
290 break;
291
292 case HCI_REQ_CANCELED:
293 err = -hdev->req_result;
294 break;
295
296 default:
297 err = -ETIMEDOUT;
298 break;
3ff50b79 299 }
1da177e4 300
a5040efa 301 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
302
303 BT_DBG("%s end: err %d", hdev->name, err);
304
305 return err;
306}
307
01178cd4 308static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
309 void (*req)(struct hci_request *req,
310 unsigned long opt),
01178cd4 311 unsigned long opt, __u32 timeout)
1da177e4
LT
312{
313 int ret;
314
7c6a329e
MH
315 if (!test_bit(HCI_UP, &hdev->flags))
316 return -ENETDOWN;
317
1da177e4
LT
318 /* Serialize all requests */
319 hci_req_lock(hdev);
01178cd4 320 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
321 hci_req_unlock(hdev);
322
323 return ret;
324}
325
42c6b129 326static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 327{
42c6b129 328 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
329
330 /* Reset device */
42c6b129
JH
331 set_bit(HCI_RESET, &req->hdev->flags);
332 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
333}
334
42c6b129 335static void bredr_init(struct hci_request *req)
1da177e4 336{
42c6b129 337 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 338
1da177e4 339 /* Read Local Supported Features */
42c6b129 340 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 341
1143e5a6 342 /* Read Local Version */
42c6b129 343 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
344
345 /* Read BD Address */
42c6b129 346 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
347}
348
0af801b9 349static void amp_init1(struct hci_request *req)
e61ef499 350{
42c6b129 351 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 352
e61ef499 353 /* Read Local Version */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 355
f6996cfe
MH
356 /* Read Local Supported Commands */
357 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
358
6bcbc489 359 /* Read Local AMP Info */
42c6b129 360 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
361
362 /* Read Data Blk size */
42c6b129 363 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 364
f38ba941
MH
365 /* Read Flow Control Mode */
366 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
367
7528ca1c
MH
368 /* Read Location Data */
369 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
370}
371
0af801b9
JH
372static void amp_init2(struct hci_request *req)
373{
374 /* Read Local Supported Features. Not all AMP controllers
375 * support this so it's placed conditionally in the second
376 * stage init.
377 */
378 if (req->hdev->commands[14] & 0x20)
379 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
380}
381
42c6b129 382static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 383{
42c6b129 384 struct hci_dev *hdev = req->hdev;
e61ef499
AE
385
386 BT_DBG("%s %ld", hdev->name, opt);
387
11778716
AE
388 /* Reset */
389 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 390 hci_reset_req(req, 0);
11778716 391
e61ef499
AE
392 switch (hdev->dev_type) {
393 case HCI_BREDR:
42c6b129 394 bredr_init(req);
e61ef499
AE
395 break;
396
397 case HCI_AMP:
0af801b9 398 amp_init1(req);
e61ef499
AE
399 break;
400
401 default:
402 BT_ERR("Unknown device type %d", hdev->dev_type);
403 break;
404 }
e61ef499
AE
405}
406
42c6b129 407static void bredr_setup(struct hci_request *req)
2177bab5 408{
2177bab5
JH
409 __le16 param;
410 __u8 flt_type;
411
412 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 413 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
414
415 /* Read Class of Device */
42c6b129 416 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
417
418 /* Read Local Name */
42c6b129 419 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
420
421 /* Read Voice Setting */
42c6b129 422 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 423
b4cb9fb2
MH
424 /* Read Number of Supported IAC */
425 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
426
4b836f39
MH
427 /* Read Current IAC LAP */
428 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
429
2177bab5
JH
430 /* Clear Event Filters */
431 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 432 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
433
434 /* Connection accept timeout ~20 secs */
dcf4adbf 435 param = cpu_to_le16(0x7d00);
42c6b129 436 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
437}
438
42c6b129 439static void le_setup(struct hci_request *req)
2177bab5 440{
c73eee91
JH
441 struct hci_dev *hdev = req->hdev;
442
2177bab5 443 /* Read LE Buffer Size */
42c6b129 444 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
445
446 /* Read LE Local Supported Features */
42c6b129 447 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 448
747d3f03
MH
449 /* Read LE Supported States */
450 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
451
2177bab5 452 /* Read LE White List Size */
42c6b129 453 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 454
747d3f03
MH
455 /* Clear LE White List */
456 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
457
458 /* LE-only controllers have LE implicitly enabled */
459 if (!lmp_bredr_capable(hdev))
a1536da2 460 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
461}
462
42c6b129 463static void hci_setup_event_mask(struct hci_request *req)
2177bab5 464{
42c6b129
JH
465 struct hci_dev *hdev = req->hdev;
466
2177bab5
JH
467 /* The second byte is 0xff instead of 0x9f (two reserved bits
468 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
469 * command otherwise.
470 */
471 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472
473 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474 * any event mask for pre 1.2 devices.
475 */
476 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
477 return;
478
479 if (lmp_bredr_capable(hdev)) {
480 events[4] |= 0x01; /* Flow Specification Complete */
481 events[4] |= 0x02; /* Inquiry Result with RSSI */
482 events[4] |= 0x04; /* Read Remote Extended Features Complete */
483 events[5] |= 0x08; /* Synchronous Connection Complete */
484 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
485 } else {
486 /* Use a different default for LE-only devices */
487 memset(events, 0, sizeof(events));
488 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
489 events[1] |= 0x08; /* Read Remote Version Information Complete */
490 events[1] |= 0x20; /* Command Complete */
491 events[1] |= 0x40; /* Command Status */
492 events[1] |= 0x80; /* Hardware Error */
493 events[2] |= 0x04; /* Number of Completed Packets */
494 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
495
496 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
497 events[0] |= 0x80; /* Encryption Change */
498 events[5] |= 0x80; /* Encryption Key Refresh Complete */
499 }
2177bab5
JH
500 }
501
502 if (lmp_inq_rssi_capable(hdev))
503 events[4] |= 0x02; /* Inquiry Result with RSSI */
504
505 if (lmp_sniffsubr_capable(hdev))
506 events[5] |= 0x20; /* Sniff Subrating */
507
508 if (lmp_pause_enc_capable(hdev))
509 events[5] |= 0x80; /* Encryption Key Refresh Complete */
510
511 if (lmp_ext_inq_capable(hdev))
512 events[5] |= 0x40; /* Extended Inquiry Result */
513
514 if (lmp_no_flush_capable(hdev))
515 events[7] |= 0x01; /* Enhanced Flush Complete */
516
517 if (lmp_lsto_capable(hdev))
518 events[6] |= 0x80; /* Link Supervision Timeout Changed */
519
520 if (lmp_ssp_capable(hdev)) {
521 events[6] |= 0x01; /* IO Capability Request */
522 events[6] |= 0x02; /* IO Capability Response */
523 events[6] |= 0x04; /* User Confirmation Request */
524 events[6] |= 0x08; /* User Passkey Request */
525 events[6] |= 0x10; /* Remote OOB Data Request */
526 events[6] |= 0x20; /* Simple Pairing Complete */
527 events[7] |= 0x04; /* User Passkey Notification */
528 events[7] |= 0x08; /* Keypress Notification */
529 events[7] |= 0x10; /* Remote Host Supported
530 * Features Notification
531 */
532 }
533
534 if (lmp_le_capable(hdev))
535 events[7] |= 0x20; /* LE Meta-Event */
536
42c6b129 537 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
538}
539
42c6b129 540static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 541{
42c6b129
JH
542 struct hci_dev *hdev = req->hdev;
543
0af801b9
JH
544 if (hdev->dev_type == HCI_AMP)
545 return amp_init2(req);
546
2177bab5 547 if (lmp_bredr_capable(hdev))
42c6b129 548 bredr_setup(req);
56f87901 549 else
a358dc11 550 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
551
552 if (lmp_le_capable(hdev))
42c6b129 553 le_setup(req);
2177bab5 554
0f3adeae
MH
555 /* All Bluetooth 1.2 and later controllers should support the
556 * HCI command for reading the local supported commands.
557 *
558 * Unfortunately some controllers indicate Bluetooth 1.2 support,
559 * but do not have support for this command. If that is the case,
560 * the driver can quirk the behavior and skip reading the local
561 * supported commands.
3f8e2d75 562 */
0f3adeae
MH
563 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
564 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 565 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
566
567 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
568 /* When SSP is available, then the host features page
569 * should also be available as well. However some
570 * controllers list the max_page as 0 as long as SSP
571 * has not been enabled. To achieve proper debugging
572 * output, force the minimum max_page to 1 at least.
573 */
574 hdev->max_page = 0x01;
575
d7a5a11d 576 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 577 u8 mode = 0x01;
574ea3c7 578
42c6b129
JH
579 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
580 sizeof(mode), &mode);
2177bab5
JH
581 } else {
582 struct hci_cp_write_eir cp;
583
584 memset(hdev->eir, 0, sizeof(hdev->eir));
585 memset(&cp, 0, sizeof(cp));
586
42c6b129 587 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
588 }
589 }
590
043ec9bf
MH
591 if (lmp_inq_rssi_capable(hdev) ||
592 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
593 u8 mode;
594
595 /* If Extended Inquiry Result events are supported, then
596 * they are clearly preferred over Inquiry Result with RSSI
597 * events.
598 */
599 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
600
601 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
602 }
2177bab5
JH
603
604 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 605 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
606
607 if (lmp_ext_feat_capable(hdev)) {
608 struct hci_cp_read_local_ext_features cp;
609
610 cp.page = 0x01;
42c6b129
JH
611 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
612 sizeof(cp), &cp);
2177bab5
JH
613 }
614
d7a5a11d 615 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 616 u8 enable = 1;
42c6b129
JH
617 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
618 &enable);
2177bab5
JH
619 }
620}
621
42c6b129 622static void hci_setup_link_policy(struct hci_request *req)
2177bab5 623{
42c6b129 624 struct hci_dev *hdev = req->hdev;
2177bab5
JH
625 struct hci_cp_write_def_link_policy cp;
626 u16 link_policy = 0;
627
628 if (lmp_rswitch_capable(hdev))
629 link_policy |= HCI_LP_RSWITCH;
630 if (lmp_hold_capable(hdev))
631 link_policy |= HCI_LP_HOLD;
632 if (lmp_sniff_capable(hdev))
633 link_policy |= HCI_LP_SNIFF;
634 if (lmp_park_capable(hdev))
635 link_policy |= HCI_LP_PARK;
636
637 cp.policy = cpu_to_le16(link_policy);
42c6b129 638 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
639}
640
42c6b129 641static void hci_set_le_support(struct hci_request *req)
2177bab5 642{
42c6b129 643 struct hci_dev *hdev = req->hdev;
2177bab5
JH
644 struct hci_cp_write_le_host_supported cp;
645
c73eee91
JH
646 /* LE-only devices do not support explicit enablement */
647 if (!lmp_bredr_capable(hdev))
648 return;
649
2177bab5
JH
650 memset(&cp, 0, sizeof(cp));
651
d7a5a11d 652 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 653 cp.le = 0x01;
32226e4f 654 cp.simul = 0x00;
2177bab5
JH
655 }
656
657 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
658 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
659 &cp);
2177bab5
JH
660}
661
d62e6d67
JH
662static void hci_set_event_mask_page_2(struct hci_request *req)
663{
664 struct hci_dev *hdev = req->hdev;
665 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
666
667 /* If Connectionless Slave Broadcast master role is supported
668 * enable all necessary events for it.
669 */
53b834d2 670 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
671 events[1] |= 0x40; /* Triggered Clock Capture */
672 events[1] |= 0x80; /* Synchronization Train Complete */
673 events[2] |= 0x10; /* Slave Page Response Timeout */
674 events[2] |= 0x20; /* CSB Channel Map Change */
675 }
676
677 /* If Connectionless Slave Broadcast slave role is supported
678 * enable all necessary events for it.
679 */
53b834d2 680 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
681 events[2] |= 0x01; /* Synchronization Train Received */
682 events[2] |= 0x02; /* CSB Receive */
683 events[2] |= 0x04; /* CSB Timeout */
684 events[2] |= 0x08; /* Truncated Page Complete */
685 }
686
40c59fcb 687 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 688 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
689 events[2] |= 0x80;
690
d62e6d67
JH
691 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
692}
693
42c6b129 694static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 695{
42c6b129 696 struct hci_dev *hdev = req->hdev;
d2c5d77f 697 u8 p;
42c6b129 698
0da71f1b
MH
699 hci_setup_event_mask(req);
700
48ce62c4
MH
701 if (hdev->commands[6] & 0x20) {
702 struct hci_cp_read_stored_link_key cp;
703
704 bacpy(&cp.bdaddr, BDADDR_ANY);
705 cp.read_all = 0x01;
706 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
707 }
708
2177bab5 709 if (hdev->commands[5] & 0x10)
42c6b129 710 hci_setup_link_policy(req);
2177bab5 711
417287de
MH
712 if (hdev->commands[8] & 0x01)
713 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
714
715 /* Some older Broadcom based Bluetooth 1.2 controllers do not
716 * support the Read Page Scan Type command. Check support for
717 * this command in the bit mask of supported commands.
718 */
719 if (hdev->commands[13] & 0x01)
720 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
721
9193c6e8
AG
722 if (lmp_le_capable(hdev)) {
723 u8 events[8];
724
725 memset(events, 0, sizeof(events));
4d6c705b
MH
726 events[0] = 0x0f;
727
728 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
729 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
730
731 /* If controller supports the Connection Parameters Request
732 * Link Layer Procedure, enable the corresponding event.
733 */
734 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
735 events[0] |= 0x20; /* LE Remote Connection
736 * Parameter Request
737 */
738
a9f6068e
MH
739 /* If the controller supports the Data Length Extension
740 * feature, enable the corresponding event.
741 */
742 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
743 events[0] |= 0x40; /* LE Data Length Change */
744
4b71bba4
MH
745 /* If the controller supports Extended Scanner Filter
746 * Policies, enable the correspondig event.
747 */
748 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
749 events[1] |= 0x04; /* LE Direct Advertising
750 * Report
751 */
752
5a34bd5f
MH
753 /* If the controller supports the LE Read Local P-256
754 * Public Key command, enable the corresponding event.
755 */
756 if (hdev->commands[34] & 0x02)
757 events[0] |= 0x80; /* LE Read Local P-256
758 * Public Key Complete
759 */
760
761 /* If the controller supports the LE Generate DHKey
762 * command, enable the corresponding event.
763 */
764 if (hdev->commands[34] & 0x04)
765 events[1] |= 0x01; /* LE Generate DHKey Complete */
766
9193c6e8
AG
767 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
768 events);
769
15a49cca
MH
770 if (hdev->commands[25] & 0x40) {
771 /* Read LE Advertising Channel TX Power */
772 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
773 }
774
a9f6068e
MH
775 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
776 /* Read LE Maximum Data Length */
777 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
778
779 /* Read LE Suggested Default Data Length */
780 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
781 }
782
42c6b129 783 hci_set_le_support(req);
9193c6e8 784 }
d2c5d77f
JH
785
786 /* Read features beyond page 1 if available */
787 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
788 struct hci_cp_read_local_ext_features cp;
789
790 cp.page = p;
791 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
792 sizeof(cp), &cp);
793 }
2177bab5
JH
794}
795
5d4e7e8d
JH
796static void hci_init4_req(struct hci_request *req, unsigned long opt)
797{
798 struct hci_dev *hdev = req->hdev;
799
36f260ce
MH
800 /* Some Broadcom based Bluetooth controllers do not support the
801 * Delete Stored Link Key command. They are clearly indicating its
802 * absence in the bit mask of supported commands.
803 *
804 * Check the supported commands and only if the the command is marked
805 * as supported send it. If not supported assume that the controller
806 * does not have actual support for stored link keys which makes this
807 * command redundant anyway.
808 *
809 * Some controllers indicate that they support handling deleting
810 * stored link keys, but they don't. The quirk lets a driver
811 * just disable this command.
812 */
813 if (hdev->commands[6] & 0x80 &&
814 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
815 struct hci_cp_delete_stored_link_key cp;
816
817 bacpy(&cp.bdaddr, BDADDR_ANY);
818 cp.delete_all = 0x01;
819 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
820 sizeof(cp), &cp);
821 }
822
d62e6d67
JH
823 /* Set event mask page 2 if the HCI command for it is supported */
824 if (hdev->commands[22] & 0x04)
825 hci_set_event_mask_page_2(req);
826
109e3191
MH
827 /* Read local codec list if the HCI command is supported */
828 if (hdev->commands[29] & 0x20)
829 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
830
f4fe73ed
MH
831 /* Get MWS transport configuration if the HCI command is supported */
832 if (hdev->commands[30] & 0x08)
833 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
834
5d4e7e8d 835 /* Check for Synchronization Train support */
53b834d2 836 if (lmp_sync_train_capable(hdev))
5d4e7e8d 837 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
838
839 /* Enable Secure Connections if supported and configured */
d7a5a11d 840 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 841 bredr_sc_enabled(hdev)) {
a6d0d690 842 u8 support = 0x01;
574ea3c7 843
a6d0d690
MH
844 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
845 sizeof(support), &support);
846 }
5d4e7e8d
JH
847}
848
2177bab5
JH
849static int __hci_init(struct hci_dev *hdev)
850{
851 int err;
852
853 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
854 if (err < 0)
855 return err;
856
4b4148e9
MH
857 /* The Device Under Test (DUT) mode is special and available for
858 * all controller types. So just create it early on.
859 */
d7a5a11d 860 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4148e9
MH
861 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
862 &dut_mode_fops);
863 }
864
0af801b9
JH
865 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
866 if (err < 0)
867 return err;
868
2177bab5
JH
869 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
870 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 871 * first two stages of init.
2177bab5
JH
872 */
873 if (hdev->dev_type != HCI_BREDR)
874 return 0;
875
5d4e7e8d
JH
876 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
baf27f6e
MH
880 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
881 if (err < 0)
882 return err;
883
ec6cef9c
MH
884 /* This function is only called when the controller is actually in
885 * configured state. When the controller is marked as unconfigured,
886 * this initialization procedure is not run.
887 *
888 * It means that it is possible that a controller runs through its
889 * setup phase and then discovers missing settings. If that is the
890 * case, then this function will not be called. It then will only
891 * be called during the config phase.
892 *
893 * So only when in setup phase or config phase, create the debugfs
894 * entries and register the SMP channels.
baf27f6e 895 */
d7a5a11d
MH
896 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
897 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
898 return 0;
899
60c5f5fb
MH
900 hci_debugfs_create_common(hdev);
901
71c3b60e 902 if (lmp_bredr_capable(hdev))
60c5f5fb 903 hci_debugfs_create_bredr(hdev);
2bfa3531 904
162a3bac 905 if (lmp_le_capable(hdev))
60c5f5fb 906 hci_debugfs_create_le(hdev);
e7b8fc92 907
baf27f6e 908 return 0;
2177bab5
JH
909}
910
0ebca7d6
MH
911static void hci_init0_req(struct hci_request *req, unsigned long opt)
912{
913 struct hci_dev *hdev = req->hdev;
914
915 BT_DBG("%s %ld", hdev->name, opt);
916
917 /* Reset */
918 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
919 hci_reset_req(req, 0);
920
921 /* Read Local Version */
922 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
923
924 /* Read BD Address */
925 if (hdev->set_bdaddr)
926 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
927}
928
929static int __hci_unconf_init(struct hci_dev *hdev)
930{
931 int err;
932
cc78b44b
MH
933 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
934 return 0;
935
0ebca7d6
MH
936 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
937 if (err < 0)
938 return err;
939
940 return 0;
941}
942
42c6b129 943static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
944{
945 __u8 scan = opt;
946
42c6b129 947 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
948
949 /* Inquiry and Page scans */
42c6b129 950 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
951}
952
42c6b129 953static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
954{
955 __u8 auth = opt;
956
42c6b129 957 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
958
959 /* Authentication */
42c6b129 960 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
961}
962
42c6b129 963static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
964{
965 __u8 encrypt = opt;
966
42c6b129 967 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 968
e4e8e37c 969 /* Encryption */
42c6b129 970 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
971}
972
42c6b129 973static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
974{
975 __le16 policy = cpu_to_le16(opt);
976
42c6b129 977 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
978
979 /* Default link policy */
42c6b129 980 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
981}
982
8e87d142 983/* Get HCI device by index.
1da177e4
LT
984 * Device is held on return. */
985struct hci_dev *hci_dev_get(int index)
986{
8035ded4 987 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
988
989 BT_DBG("%d", index);
990
991 if (index < 0)
992 return NULL;
993
994 read_lock(&hci_dev_list_lock);
8035ded4 995 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
996 if (d->id == index) {
997 hdev = hci_dev_hold(d);
998 break;
999 }
1000 }
1001 read_unlock(&hci_dev_list_lock);
1002 return hdev;
1003}
1da177e4
LT
1004
1005/* ---- Inquiry support ---- */
ff9ef578 1006
30dc78e1
JH
1007bool hci_discovery_active(struct hci_dev *hdev)
1008{
1009 struct discovery_state *discov = &hdev->discovery;
1010
6fbe195d 1011 switch (discov->state) {
343f935b 1012 case DISCOVERY_FINDING:
6fbe195d 1013 case DISCOVERY_RESOLVING:
30dc78e1
JH
1014 return true;
1015
6fbe195d
AG
1016 default:
1017 return false;
1018 }
30dc78e1
JH
1019}
1020
ff9ef578
JH
1021void hci_discovery_set_state(struct hci_dev *hdev, int state)
1022{
bb3e0a33
JH
1023 int old_state = hdev->discovery.state;
1024
ff9ef578
JH
1025 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1026
bb3e0a33 1027 if (old_state == state)
ff9ef578
JH
1028 return;
1029
bb3e0a33
JH
1030 hdev->discovery.state = state;
1031
ff9ef578
JH
1032 switch (state) {
1033 case DISCOVERY_STOPPED:
c54c3860
AG
1034 hci_update_background_scan(hdev);
1035
bb3e0a33 1036 if (old_state != DISCOVERY_STARTING)
7b99b659 1037 mgmt_discovering(hdev, 0);
ff9ef578
JH
1038 break;
1039 case DISCOVERY_STARTING:
1040 break;
343f935b 1041 case DISCOVERY_FINDING:
ff9ef578
JH
1042 mgmt_discovering(hdev, 1);
1043 break;
30dc78e1
JH
1044 case DISCOVERY_RESOLVING:
1045 break;
ff9ef578
JH
1046 case DISCOVERY_STOPPING:
1047 break;
1048 }
ff9ef578
JH
1049}
1050
1f9b9a5d 1051void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1052{
30883512 1053 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1054 struct inquiry_entry *p, *n;
1da177e4 1055
561aafbc
JH
1056 list_for_each_entry_safe(p, n, &cache->all, all) {
1057 list_del(&p->all);
b57c1a56 1058 kfree(p);
1da177e4 1059 }
561aafbc
JH
1060
1061 INIT_LIST_HEAD(&cache->unknown);
1062 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1063}
1064
a8c5fb1a
GP
1065struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1066 bdaddr_t *bdaddr)
1da177e4 1067{
30883512 1068 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1069 struct inquiry_entry *e;
1070
6ed93dc6 1071 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1072
561aafbc
JH
1073 list_for_each_entry(e, &cache->all, all) {
1074 if (!bacmp(&e->data.bdaddr, bdaddr))
1075 return e;
1076 }
1077
1078 return NULL;
1079}
1080
1081struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1082 bdaddr_t *bdaddr)
561aafbc 1083{
30883512 1084 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1085 struct inquiry_entry *e;
1086
6ed93dc6 1087 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1088
1089 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1090 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1091 return e;
1092 }
1093
1094 return NULL;
1da177e4
LT
1095}
1096
30dc78e1 1097struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1098 bdaddr_t *bdaddr,
1099 int state)
30dc78e1
JH
1100{
1101 struct discovery_state *cache = &hdev->discovery;
1102 struct inquiry_entry *e;
1103
6ed93dc6 1104 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1105
1106 list_for_each_entry(e, &cache->resolve, list) {
1107 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1108 return e;
1109 if (!bacmp(&e->data.bdaddr, bdaddr))
1110 return e;
1111 }
1112
1113 return NULL;
1114}
1115
a3d4e20a 1116void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1117 struct inquiry_entry *ie)
a3d4e20a
JH
1118{
1119 struct discovery_state *cache = &hdev->discovery;
1120 struct list_head *pos = &cache->resolve;
1121 struct inquiry_entry *p;
1122
1123 list_del(&ie->list);
1124
1125 list_for_each_entry(p, &cache->resolve, list) {
1126 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1127 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1128 break;
1129 pos = &p->list;
1130 }
1131
1132 list_add(&ie->list, pos);
1133}
1134
af58925c
MH
1135u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1136 bool name_known)
1da177e4 1137{
30883512 1138 struct discovery_state *cache = &hdev->discovery;
70f23020 1139 struct inquiry_entry *ie;
af58925c 1140 u32 flags = 0;
1da177e4 1141
6ed93dc6 1142 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1143
6928a924 1144 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1145
af58925c
MH
1146 if (!data->ssp_mode)
1147 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1148
70f23020 1149 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1150 if (ie) {
af58925c
MH
1151 if (!ie->data.ssp_mode)
1152 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1153
a3d4e20a 1154 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1155 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1156 ie->data.rssi = data->rssi;
1157 hci_inquiry_cache_update_resolve(hdev, ie);
1158 }
1159
561aafbc 1160 goto update;
a3d4e20a 1161 }
561aafbc
JH
1162
1163 /* Entry not in the cache. Add new one. */
27f70f3e 1164 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1165 if (!ie) {
1166 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1167 goto done;
1168 }
561aafbc
JH
1169
1170 list_add(&ie->all, &cache->all);
1171
1172 if (name_known) {
1173 ie->name_state = NAME_KNOWN;
1174 } else {
1175 ie->name_state = NAME_NOT_KNOWN;
1176 list_add(&ie->list, &cache->unknown);
1177 }
70f23020 1178
561aafbc
JH
1179update:
1180 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1181 ie->name_state != NAME_PENDING) {
561aafbc
JH
1182 ie->name_state = NAME_KNOWN;
1183 list_del(&ie->list);
1da177e4
LT
1184 }
1185
70f23020
AE
1186 memcpy(&ie->data, data, sizeof(*data));
1187 ie->timestamp = jiffies;
1da177e4 1188 cache->timestamp = jiffies;
3175405b
JH
1189
1190 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1191 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1192
af58925c
MH
1193done:
1194 return flags;
1da177e4
LT
1195}
1196
1197static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1198{
30883512 1199 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1200 struct inquiry_info *info = (struct inquiry_info *) buf;
1201 struct inquiry_entry *e;
1202 int copied = 0;
1203
561aafbc 1204 list_for_each_entry(e, &cache->all, all) {
1da177e4 1205 struct inquiry_data *data = &e->data;
b57c1a56
JH
1206
1207 if (copied >= num)
1208 break;
1209
1da177e4
LT
1210 bacpy(&info->bdaddr, &data->bdaddr);
1211 info->pscan_rep_mode = data->pscan_rep_mode;
1212 info->pscan_period_mode = data->pscan_period_mode;
1213 info->pscan_mode = data->pscan_mode;
1214 memcpy(info->dev_class, data->dev_class, 3);
1215 info->clock_offset = data->clock_offset;
b57c1a56 1216
1da177e4 1217 info++;
b57c1a56 1218 copied++;
1da177e4
LT
1219 }
1220
1221 BT_DBG("cache %p, copied %d", cache, copied);
1222 return copied;
1223}
1224
42c6b129 1225static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1226{
1227 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1228 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1229 struct hci_cp_inquiry cp;
1230
1231 BT_DBG("%s", hdev->name);
1232
1233 if (test_bit(HCI_INQUIRY, &hdev->flags))
1234 return;
1235
1236 /* Start Inquiry */
1237 memcpy(&cp.lap, &ir->lap, 3);
1238 cp.length = ir->length;
1239 cp.num_rsp = ir->num_rsp;
42c6b129 1240 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1241}
1242
1243int hci_inquiry(void __user *arg)
1244{
1245 __u8 __user *ptr = arg;
1246 struct hci_inquiry_req ir;
1247 struct hci_dev *hdev;
1248 int err = 0, do_inquiry = 0, max_rsp;
1249 long timeo;
1250 __u8 *buf;
1251
1252 if (copy_from_user(&ir, ptr, sizeof(ir)))
1253 return -EFAULT;
1254
5a08ecce
AE
1255 hdev = hci_dev_get(ir.dev_id);
1256 if (!hdev)
1da177e4
LT
1257 return -ENODEV;
1258
d7a5a11d 1259 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1260 err = -EBUSY;
1261 goto done;
1262 }
1263
d7a5a11d 1264 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1265 err = -EOPNOTSUPP;
1266 goto done;
1267 }
1268
5b69bef5
MH
1269 if (hdev->dev_type != HCI_BREDR) {
1270 err = -EOPNOTSUPP;
1271 goto done;
1272 }
1273
d7a5a11d 1274 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1275 err = -EOPNOTSUPP;
1276 goto done;
1277 }
1278
09fd0de5 1279 hci_dev_lock(hdev);
8e87d142 1280 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1281 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1282 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1283 do_inquiry = 1;
1284 }
09fd0de5 1285 hci_dev_unlock(hdev);
1da177e4 1286
04837f64 1287 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1288
1289 if (do_inquiry) {
01178cd4
JH
1290 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1291 timeo);
70f23020
AE
1292 if (err < 0)
1293 goto done;
3e13fa1e
AG
1294
1295 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1296 * cleared). If it is interrupted by a signal, return -EINTR.
1297 */
74316201 1298 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1299 TASK_INTERRUPTIBLE))
1300 return -EINTR;
70f23020 1301 }
1da177e4 1302
8fc9ced3
GP
1303 /* for unlimited number of responses we will use buffer with
1304 * 255 entries
1305 */
1da177e4
LT
1306 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1307
1308 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1309 * copy it to the user space.
1310 */
01df8c31 1311 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1312 if (!buf) {
1da177e4
LT
1313 err = -ENOMEM;
1314 goto done;
1315 }
1316
09fd0de5 1317 hci_dev_lock(hdev);
1da177e4 1318 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1319 hci_dev_unlock(hdev);
1da177e4
LT
1320
1321 BT_DBG("num_rsp %d", ir.num_rsp);
1322
1323 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1324 ptr += sizeof(ir);
1325 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1326 ir.num_rsp))
1da177e4 1327 err = -EFAULT;
8e87d142 1328 } else
1da177e4
LT
1329 err = -EFAULT;
1330
1331 kfree(buf);
1332
1333done:
1334 hci_dev_put(hdev);
1335 return err;
1336}
1337
cbed0ca1 1338static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1339{
1da177e4
LT
1340 int ret = 0;
1341
1da177e4
LT
1342 BT_DBG("%s %p", hdev->name, hdev);
1343
1344 hci_req_lock(hdev);
1345
d7a5a11d 1346 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1347 ret = -ENODEV;
1348 goto done;
1349 }
1350
d7a5a11d
MH
1351 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1352 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1353 /* Check for rfkill but allow the HCI setup stage to
1354 * proceed (which in itself doesn't cause any RF activity).
1355 */
d7a5a11d 1356 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1357 ret = -ERFKILL;
1358 goto done;
1359 }
1360
1361 /* Check for valid public address or a configured static
1362 * random adddress, but let the HCI setup proceed to
1363 * be able to determine if there is a public address
1364 * or not.
1365 *
c6beca0e
MH
1366 * In case of user channel usage, it is not important
1367 * if a public address or static random address is
1368 * available.
1369 *
a5c8f270
MH
1370 * This check is only valid for BR/EDR controllers
1371 * since AMP controllers do not have an address.
1372 */
d7a5a11d 1373 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1374 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1375 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1376 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1377 ret = -EADDRNOTAVAIL;
1378 goto done;
1379 }
611b30f7
MH
1380 }
1381
1da177e4
LT
1382 if (test_bit(HCI_UP, &hdev->flags)) {
1383 ret = -EALREADY;
1384 goto done;
1385 }
1386
1da177e4
LT
1387 if (hdev->open(hdev)) {
1388 ret = -EIO;
1389 goto done;
1390 }
1391
f41c70c4
MH
1392 atomic_set(&hdev->cmd_cnt, 1);
1393 set_bit(HCI_INIT, &hdev->flags);
1394
d7a5a11d 1395 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1396 if (hdev->setup)
1397 ret = hdev->setup(hdev);
f41c70c4 1398
af202f84
MH
1399 /* The transport driver can set these quirks before
1400 * creating the HCI device or in its setup callback.
1401 *
1402 * In case any of them is set, the controller has to
1403 * start up as unconfigured.
1404 */
eb1904f4
MH
1405 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1406 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1407 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1408
0ebca7d6
MH
1409 /* For an unconfigured controller it is required to
1410 * read at least the version information provided by
1411 * the Read Local Version Information command.
1412 *
1413 * If the set_bdaddr driver callback is provided, then
1414 * also the original Bluetooth public device address
1415 * will be read using the Read BD Address command.
1416 */
d7a5a11d 1417 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1418 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1419 }
1420
d7a5a11d 1421 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1422 /* If public address change is configured, ensure that
1423 * the address gets programmed. If the driver does not
1424 * support changing the public address, fail the power
1425 * on procedure.
1426 */
1427 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1428 hdev->set_bdaddr)
24c457e2
MH
1429 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1430 else
1431 ret = -EADDRNOTAVAIL;
1432 }
1433
f41c70c4 1434 if (!ret) {
d7a5a11d
MH
1435 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1436 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1437 ret = __hci_init(hdev);
1da177e4
LT
1438 }
1439
f41c70c4
MH
1440 clear_bit(HCI_INIT, &hdev->flags);
1441
1da177e4
LT
1442 if (!ret) {
1443 hci_dev_hold(hdev);
a1536da2 1444 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1445 set_bit(HCI_UP, &hdev->flags);
1446 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1447 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1448 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1449 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1450 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1451 hdev->dev_type == HCI_BREDR) {
09fd0de5 1452 hci_dev_lock(hdev);
744cf19e 1453 mgmt_powered(hdev, 1);
09fd0de5 1454 hci_dev_unlock(hdev);
56e5cb86 1455 }
8e87d142 1456 } else {
1da177e4 1457 /* Init failed, cleanup */
3eff45ea 1458 flush_work(&hdev->tx_work);
c347b765 1459 flush_work(&hdev->cmd_work);
b78752cc 1460 flush_work(&hdev->rx_work);
1da177e4
LT
1461
1462 skb_queue_purge(&hdev->cmd_q);
1463 skb_queue_purge(&hdev->rx_q);
1464
1465 if (hdev->flush)
1466 hdev->flush(hdev);
1467
1468 if (hdev->sent_cmd) {
1469 kfree_skb(hdev->sent_cmd);
1470 hdev->sent_cmd = NULL;
1471 }
1472
1473 hdev->close(hdev);
fee746b0 1474 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1475 }
1476
1477done:
1478 hci_req_unlock(hdev);
1da177e4
LT
1479 return ret;
1480}
1481
cbed0ca1
JH
1482/* ---- HCI ioctl helpers ---- */
1483
1484int hci_dev_open(__u16 dev)
1485{
1486 struct hci_dev *hdev;
1487 int err;
1488
1489 hdev = hci_dev_get(dev);
1490 if (!hdev)
1491 return -ENODEV;
1492
4a964404 1493 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1494 * up as user channel. Trying to bring them up as normal devices
1495 * will result into a failure. Only user channel operation is
1496 * possible.
1497 *
1498 * When this function is called for a user channel, the flag
1499 * HCI_USER_CHANNEL will be set first before attempting to
1500 * open the device.
1501 */
d7a5a11d
MH
1502 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1503 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1504 err = -EOPNOTSUPP;
1505 goto done;
1506 }
1507
e1d08f40
JH
1508 /* We need to ensure that no other power on/off work is pending
1509 * before proceeding to call hci_dev_do_open. This is
1510 * particularly important if the setup procedure has not yet
1511 * completed.
1512 */
a69d8927 1513 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1514 cancel_delayed_work(&hdev->power_off);
1515
a5c8f270
MH
1516 /* After this call it is guaranteed that the setup procedure
1517 * has finished. This means that error conditions like RFKILL
1518 * or no valid public or static random address apply.
1519 */
e1d08f40
JH
1520 flush_workqueue(hdev->req_workqueue);
1521
12aa4f0a 1522 /* For controllers not using the management interface and that
b6ae8457 1523 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1524 * so that pairing works for them. Once the management interface
1525 * is in use this bit will be cleared again and userspace has
1526 * to explicitly enable it.
1527 */
d7a5a11d
MH
1528 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1529 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1530 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1531
cbed0ca1
JH
1532 err = hci_dev_do_open(hdev);
1533
fee746b0 1534done:
cbed0ca1 1535 hci_dev_put(hdev);
cbed0ca1
JH
1536 return err;
1537}
1538
d7347f3c
JH
1539/* This function requires the caller holds hdev->lock */
1540static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1541{
1542 struct hci_conn_params *p;
1543
f161dd41
JH
1544 list_for_each_entry(p, &hdev->le_conn_params, list) {
1545 if (p->conn) {
1546 hci_conn_drop(p->conn);
f8aaf9b6 1547 hci_conn_put(p->conn);
f161dd41
JH
1548 p->conn = NULL;
1549 }
d7347f3c 1550 list_del_init(&p->action);
f161dd41 1551 }
d7347f3c
JH
1552
1553 BT_DBG("All LE pending actions cleared");
1554}
1555
1da177e4
LT
1556static int hci_dev_do_close(struct hci_dev *hdev)
1557{
1558 BT_DBG("%s %p", hdev->name, hdev);
1559
d7a5a11d 1560 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
a44fecbd
THJA
1561 /* Execute vendor specific shutdown routine */
1562 if (hdev->shutdown)
1563 hdev->shutdown(hdev);
1564 }
1565
78c04c0b
VCG
1566 cancel_delayed_work(&hdev->power_off);
1567
1da177e4
LT
1568 hci_req_cancel(hdev, ENODEV);
1569 hci_req_lock(hdev);
1570
1571 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1572 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1573 hci_req_unlock(hdev);
1574 return 0;
1575 }
1576
3eff45ea
GP
1577 /* Flush RX and TX works */
1578 flush_work(&hdev->tx_work);
b78752cc 1579 flush_work(&hdev->rx_work);
1da177e4 1580
16ab91ab 1581 if (hdev->discov_timeout > 0) {
e0f9309f 1582 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1583 hdev->discov_timeout = 0;
a358dc11
MH
1584 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1585 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1586 }
1587
a69d8927 1588 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1589 cancel_delayed_work(&hdev->service_cache);
1590
7ba8b4be 1591 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1592 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1593
d7a5a11d 1594 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1595 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1596
76727c02
JH
1597 /* Avoid potential lockdep warnings from the *_flush() calls by
1598 * ensuring the workqueue is empty up front.
1599 */
1600 drain_workqueue(hdev->workqueue);
1601
09fd0de5 1602 hci_dev_lock(hdev);
1aeb9c65 1603
8f502f84
JH
1604 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1605
a69d8927 1606 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1aeb9c65
JH
1607 if (hdev->dev_type == HCI_BREDR)
1608 mgmt_powered(hdev, 0);
1609 }
1610
1f9b9a5d 1611 hci_inquiry_cache_flush(hdev);
d7347f3c 1612 hci_pend_le_actions_clear(hdev);
f161dd41 1613 hci_conn_hash_flush(hdev);
09fd0de5 1614 hci_dev_unlock(hdev);
1da177e4 1615
64dae967
MH
1616 smp_unregister(hdev);
1617
1da177e4
LT
1618 hci_notify(hdev, HCI_DEV_DOWN);
1619
1620 if (hdev->flush)
1621 hdev->flush(hdev);
1622
1623 /* Reset device */
1624 skb_queue_purge(&hdev->cmd_q);
1625 atomic_set(&hdev->cmd_cnt, 1);
d7a5a11d
MH
1626 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1627 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
a6c511c6 1628 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1629 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1630 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1631 clear_bit(HCI_INIT, &hdev->flags);
1632 }
1633
c347b765
GP
1634 /* flush cmd work */
1635 flush_work(&hdev->cmd_work);
1da177e4
LT
1636
1637 /* Drop queues */
1638 skb_queue_purge(&hdev->rx_q);
1639 skb_queue_purge(&hdev->cmd_q);
1640 skb_queue_purge(&hdev->raw_q);
1641
1642 /* Drop last sent command */
1643 if (hdev->sent_cmd) {
65cc2b49 1644 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1645 kfree_skb(hdev->sent_cmd);
1646 hdev->sent_cmd = NULL;
1647 }
1648
1649 /* After this point our queues are empty
1650 * and no tasks are scheduled. */
1651 hdev->close(hdev);
1652
35b973c9 1653 /* Clear flags */
fee746b0 1654 hdev->flags &= BIT(HCI_RAW);
eacb44df 1655 hci_dev_clear_volatile_flags(hdev);
35b973c9 1656
ced5c338 1657 /* Controller radio is available but is currently powered down */
536619e8 1658 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1659
e59fda8d 1660 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1661 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1662 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1663
1da177e4
LT
1664 hci_req_unlock(hdev);
1665
1666 hci_dev_put(hdev);
1667 return 0;
1668}
1669
1670int hci_dev_close(__u16 dev)
1671{
1672 struct hci_dev *hdev;
1673 int err;
1674
70f23020
AE
1675 hdev = hci_dev_get(dev);
1676 if (!hdev)
1da177e4 1677 return -ENODEV;
8ee56540 1678
d7a5a11d 1679 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1680 err = -EBUSY;
1681 goto done;
1682 }
1683
a69d8927 1684 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1685 cancel_delayed_work(&hdev->power_off);
1686
1da177e4 1687 err = hci_dev_do_close(hdev);
8ee56540 1688
0736cfa8 1689done:
1da177e4
LT
1690 hci_dev_put(hdev);
1691 return err;
1692}
1693
5c912495 1694static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1695{
5c912495 1696 int ret;
1da177e4 1697
5c912495 1698 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1699
1700 hci_req_lock(hdev);
1da177e4 1701
1da177e4
LT
1702 /* Drop queues */
1703 skb_queue_purge(&hdev->rx_q);
1704 skb_queue_purge(&hdev->cmd_q);
1705
76727c02
JH
1706 /* Avoid potential lockdep warnings from the *_flush() calls by
1707 * ensuring the workqueue is empty up front.
1708 */
1709 drain_workqueue(hdev->workqueue);
1710
09fd0de5 1711 hci_dev_lock(hdev);
1f9b9a5d 1712 hci_inquiry_cache_flush(hdev);
1da177e4 1713 hci_conn_hash_flush(hdev);
09fd0de5 1714 hci_dev_unlock(hdev);
1da177e4
LT
1715
1716 if (hdev->flush)
1717 hdev->flush(hdev);
1718
8e87d142 1719 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1720 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1721
fee746b0 1722 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1723
1da177e4 1724 hci_req_unlock(hdev);
1da177e4
LT
1725 return ret;
1726}
1727
5c912495
MH
1728int hci_dev_reset(__u16 dev)
1729{
1730 struct hci_dev *hdev;
1731 int err;
1732
1733 hdev = hci_dev_get(dev);
1734 if (!hdev)
1735 return -ENODEV;
1736
1737 if (!test_bit(HCI_UP, &hdev->flags)) {
1738 err = -ENETDOWN;
1739 goto done;
1740 }
1741
d7a5a11d 1742 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1743 err = -EBUSY;
1744 goto done;
1745 }
1746
d7a5a11d 1747 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1748 err = -EOPNOTSUPP;
1749 goto done;
1750 }
1751
1752 err = hci_dev_do_reset(hdev);
1753
1754done:
1755 hci_dev_put(hdev);
1756 return err;
1757}
1758
1da177e4
LT
1759int hci_dev_reset_stat(__u16 dev)
1760{
1761 struct hci_dev *hdev;
1762 int ret = 0;
1763
70f23020
AE
1764 hdev = hci_dev_get(dev);
1765 if (!hdev)
1da177e4
LT
1766 return -ENODEV;
1767
d7a5a11d 1768 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1769 ret = -EBUSY;
1770 goto done;
1771 }
1772
d7a5a11d 1773 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1774 ret = -EOPNOTSUPP;
1775 goto done;
1776 }
1777
1da177e4
LT
1778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1779
0736cfa8 1780done:
1da177e4 1781 hci_dev_put(hdev);
1da177e4
LT
1782 return ret;
1783}
1784
123abc08
JH
1785static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1786{
bc6d2d04 1787 bool conn_changed, discov_changed;
123abc08
JH
1788
1789 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1790
1791 if ((scan & SCAN_PAGE))
238be788
MH
1792 conn_changed = !hci_dev_test_and_set_flag(hdev,
1793 HCI_CONNECTABLE);
123abc08 1794 else
a69d8927
MH
1795 conn_changed = hci_dev_test_and_clear_flag(hdev,
1796 HCI_CONNECTABLE);
123abc08 1797
bc6d2d04 1798 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1799 discov_changed = !hci_dev_test_and_set_flag(hdev,
1800 HCI_DISCOVERABLE);
bc6d2d04 1801 } else {
a358dc11 1802 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1803 discov_changed = hci_dev_test_and_clear_flag(hdev,
1804 HCI_DISCOVERABLE);
bc6d2d04
JH
1805 }
1806
d7a5a11d 1807 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1808 return;
1809
bc6d2d04
JH
1810 if (conn_changed || discov_changed) {
1811 /* In case this was disabled through mgmt */
a1536da2 1812 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1813
d7a5a11d 1814 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1815 mgmt_update_adv_data(hdev);
1816
123abc08 1817 mgmt_new_settings(hdev);
bc6d2d04 1818 }
123abc08
JH
1819}
1820
1da177e4
LT
1821int hci_dev_cmd(unsigned int cmd, void __user *arg)
1822{
1823 struct hci_dev *hdev;
1824 struct hci_dev_req dr;
1825 int err = 0;
1826
1827 if (copy_from_user(&dr, arg, sizeof(dr)))
1828 return -EFAULT;
1829
70f23020
AE
1830 hdev = hci_dev_get(dr.dev_id);
1831 if (!hdev)
1da177e4
LT
1832 return -ENODEV;
1833
d7a5a11d 1834 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1835 err = -EBUSY;
1836 goto done;
1837 }
1838
d7a5a11d 1839 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1840 err = -EOPNOTSUPP;
1841 goto done;
1842 }
1843
5b69bef5
MH
1844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
d7a5a11d 1849 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
1da177e4
LT
1854 switch (cmd) {
1855 case HCISETAUTH:
01178cd4
JH
1856 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1857 HCI_INIT_TIMEOUT);
1da177e4
LT
1858 break;
1859
1860 case HCISETENCRYPT:
1861 if (!lmp_encrypt_capable(hdev)) {
1862 err = -EOPNOTSUPP;
1863 break;
1864 }
1865
1866 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1867 /* Auth must be enabled first */
01178cd4
JH
1868 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869 HCI_INIT_TIMEOUT);
1da177e4
LT
1870 if (err)
1871 break;
1872 }
1873
01178cd4
JH
1874 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1875 HCI_INIT_TIMEOUT);
1da177e4
LT
1876 break;
1877
1878 case HCISETSCAN:
01178cd4
JH
1879 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1880 HCI_INIT_TIMEOUT);
91a668b0 1881
bc6d2d04
JH
1882 /* Ensure that the connectable and discoverable states
1883 * get correctly modified as this was a non-mgmt change.
91a668b0 1884 */
123abc08
JH
1885 if (!err)
1886 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1887 break;
1888
1da177e4 1889 case HCISETLINKPOL:
01178cd4
JH
1890 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1891 HCI_INIT_TIMEOUT);
1da177e4
LT
1892 break;
1893
1894 case HCISETLINKMODE:
e4e8e37c
MH
1895 hdev->link_mode = ((__u16) dr.dev_opt) &
1896 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1897 break;
1898
1899 case HCISETPTYPE:
1900 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1901 break;
1902
1903 case HCISETACLMTU:
e4e8e37c
MH
1904 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1905 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1906 break;
1907
1908 case HCISETSCOMTU:
e4e8e37c
MH
1909 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1910 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1911 break;
1912
1913 default:
1914 err = -EINVAL;
1915 break;
1916 }
e4e8e37c 1917
0736cfa8 1918done:
1da177e4
LT
1919 hci_dev_put(hdev);
1920 return err;
1921}
1922
1923int hci_get_dev_list(void __user *arg)
1924{
8035ded4 1925 struct hci_dev *hdev;
1da177e4
LT
1926 struct hci_dev_list_req *dl;
1927 struct hci_dev_req *dr;
1da177e4
LT
1928 int n = 0, size, err;
1929 __u16 dev_num;
1930
1931 if (get_user(dev_num, (__u16 __user *) arg))
1932 return -EFAULT;
1933
1934 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1935 return -EINVAL;
1936
1937 size = sizeof(*dl) + dev_num * sizeof(*dr);
1938
70f23020
AE
1939 dl = kzalloc(size, GFP_KERNEL);
1940 if (!dl)
1da177e4
LT
1941 return -ENOMEM;
1942
1943 dr = dl->dev_req;
1944
f20d09d5 1945 read_lock(&hci_dev_list_lock);
8035ded4 1946 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1947 unsigned long flags = hdev->flags;
c542a06c 1948
2e84d8db
MH
1949 /* When the auto-off is configured it means the transport
1950 * is running, but in that case still indicate that the
1951 * device is actually down.
1952 */
d7a5a11d 1953 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1954 flags &= ~BIT(HCI_UP);
c542a06c 1955
1da177e4 1956 (dr + n)->dev_id = hdev->id;
2e84d8db 1957 (dr + n)->dev_opt = flags;
c542a06c 1958
1da177e4
LT
1959 if (++n >= dev_num)
1960 break;
1961 }
f20d09d5 1962 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1963
1964 dl->dev_num = n;
1965 size = sizeof(*dl) + n * sizeof(*dr);
1966
1967 err = copy_to_user(arg, dl, size);
1968 kfree(dl);
1969
1970 return err ? -EFAULT : 0;
1971}
1972
1973int hci_get_dev_info(void __user *arg)
1974{
1975 struct hci_dev *hdev;
1976 struct hci_dev_info di;
2e84d8db 1977 unsigned long flags;
1da177e4
LT
1978 int err = 0;
1979
1980 if (copy_from_user(&di, arg, sizeof(di)))
1981 return -EFAULT;
1982
70f23020
AE
1983 hdev = hci_dev_get(di.dev_id);
1984 if (!hdev)
1da177e4
LT
1985 return -ENODEV;
1986
2e84d8db
MH
1987 /* When the auto-off is configured it means the transport
1988 * is running, but in that case still indicate that the
1989 * device is actually down.
1990 */
d7a5a11d 1991 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
1992 flags = hdev->flags & ~BIT(HCI_UP);
1993 else
1994 flags = hdev->flags;
c542a06c 1995
1da177e4
LT
1996 strcpy(di.name, hdev->name);
1997 di.bdaddr = hdev->bdaddr;
60f2a3ed 1998 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 1999 di.flags = flags;
1da177e4 2000 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2001 if (lmp_bredr_capable(hdev)) {
2002 di.acl_mtu = hdev->acl_mtu;
2003 di.acl_pkts = hdev->acl_pkts;
2004 di.sco_mtu = hdev->sco_mtu;
2005 di.sco_pkts = hdev->sco_pkts;
2006 } else {
2007 di.acl_mtu = hdev->le_mtu;
2008 di.acl_pkts = hdev->le_pkts;
2009 di.sco_mtu = 0;
2010 di.sco_pkts = 0;
2011 }
1da177e4
LT
2012 di.link_policy = hdev->link_policy;
2013 di.link_mode = hdev->link_mode;
2014
2015 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2016 memcpy(&di.features, &hdev->features, sizeof(di.features));
2017
2018 if (copy_to_user(arg, &di, sizeof(di)))
2019 err = -EFAULT;
2020
2021 hci_dev_put(hdev);
2022
2023 return err;
2024}
2025
2026/* ---- Interface to HCI drivers ---- */
2027
611b30f7
MH
2028static int hci_rfkill_set_block(void *data, bool blocked)
2029{
2030 struct hci_dev *hdev = data;
2031
2032 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2033
d7a5a11d 2034 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2035 return -EBUSY;
2036
5e130367 2037 if (blocked) {
a1536da2 2038 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2039 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2040 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2041 hci_dev_do_close(hdev);
5e130367 2042 } else {
a358dc11 2043 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2044 }
611b30f7
MH
2045
2046 return 0;
2047}
2048
2049static const struct rfkill_ops hci_rfkill_ops = {
2050 .set_block = hci_rfkill_set_block,
2051};
2052
ab81cbf9
JH
2053static void hci_power_on(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2056 int err;
ab81cbf9
JH
2057
2058 BT_DBG("%s", hdev->name);
2059
cbed0ca1 2060 err = hci_dev_do_open(hdev);
96570ffc 2061 if (err < 0) {
3ad67582 2062 hci_dev_lock(hdev);
96570ffc 2063 mgmt_set_powered_failed(hdev, err);
3ad67582 2064 hci_dev_unlock(hdev);
ab81cbf9 2065 return;
96570ffc 2066 }
ab81cbf9 2067
a5c8f270
MH
2068 /* During the HCI setup phase, a few error conditions are
2069 * ignored and they need to be checked now. If they are still
2070 * valid, it is important to turn the device back off.
2071 */
d7a5a11d
MH
2072 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2073 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2074 (hdev->dev_type == HCI_BREDR &&
2075 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2076 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2077 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2078 hci_dev_do_close(hdev);
d7a5a11d 2079 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2080 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2081 HCI_AUTO_OFF_TIMEOUT);
bf543036 2082 }
ab81cbf9 2083
a69d8927 2084 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2085 /* For unconfigured devices, set the HCI_RAW flag
2086 * so that userspace can easily identify them.
4a964404 2087 */
d7a5a11d 2088 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2089 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2090
2091 /* For fully configured devices, this will send
2092 * the Index Added event. For unconfigured devices,
2093 * it will send Unconfigued Index Added event.
2094 *
2095 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2096 * and no event will be send.
2097 */
2098 mgmt_index_added(hdev);
a69d8927 2099 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2100 /* When the controller is now configured, then it
2101 * is important to clear the HCI_RAW flag.
2102 */
d7a5a11d 2103 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2104 clear_bit(HCI_RAW, &hdev->flags);
2105
d603b76b
MH
2106 /* Powering on the controller with HCI_CONFIG set only
2107 * happens with the transition from unconfigured to
2108 * configured. This will send the Index Added event.
2109 */
744cf19e 2110 mgmt_index_added(hdev);
fee746b0 2111 }
ab81cbf9
JH
2112}
2113
2114static void hci_power_off(struct work_struct *work)
2115{
3243553f 2116 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2117 power_off.work);
ab81cbf9
JH
2118
2119 BT_DBG("%s", hdev->name);
2120
8ee56540 2121 hci_dev_do_close(hdev);
ab81cbf9
JH
2122}
2123
c7741d16
MH
2124static void hci_error_reset(struct work_struct *work)
2125{
2126 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2127
2128 BT_DBG("%s", hdev->name);
2129
2130 if (hdev->hw_error)
2131 hdev->hw_error(hdev, hdev->hw_error_code);
2132 else
2133 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2134 hdev->hw_error_code);
2135
2136 if (hci_dev_do_close(hdev))
2137 return;
2138
c7741d16
MH
2139 hci_dev_do_open(hdev);
2140}
2141
16ab91ab
JH
2142static void hci_discov_off(struct work_struct *work)
2143{
2144 struct hci_dev *hdev;
16ab91ab
JH
2145
2146 hdev = container_of(work, struct hci_dev, discov_off.work);
2147
2148 BT_DBG("%s", hdev->name);
2149
d1967ff8 2150 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2151}
2152
35f7498a 2153void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2154{
4821002c 2155 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2156
4821002c
JH
2157 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2158 list_del(&uuid->list);
2aeb9a1a
JH
2159 kfree(uuid);
2160 }
2aeb9a1a
JH
2161}
2162
35f7498a 2163void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2164{
0378b597 2165 struct link_key *key;
55ed8ca1 2166
0378b597
JH
2167 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2168 list_del_rcu(&key->list);
2169 kfree_rcu(key, rcu);
55ed8ca1 2170 }
55ed8ca1
JH
2171}
2172
35f7498a 2173void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2174{
970d0f1b 2175 struct smp_ltk *k;
b899efaf 2176
970d0f1b
JH
2177 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2178 list_del_rcu(&k->list);
2179 kfree_rcu(k, rcu);
b899efaf 2180 }
b899efaf
VCG
2181}
2182
970c4e46
JH
2183void hci_smp_irks_clear(struct hci_dev *hdev)
2184{
adae20cb 2185 struct smp_irk *k;
970c4e46 2186
adae20cb
JH
2187 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2188 list_del_rcu(&k->list);
2189 kfree_rcu(k, rcu);
970c4e46
JH
2190 }
2191}
2192
55ed8ca1
JH
2193struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2194{
8035ded4 2195 struct link_key *k;
55ed8ca1 2196
0378b597
JH
2197 rcu_read_lock();
2198 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2199 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2200 rcu_read_unlock();
55ed8ca1 2201 return k;
0378b597
JH
2202 }
2203 }
2204 rcu_read_unlock();
55ed8ca1
JH
2205
2206 return NULL;
2207}
2208
745c0ce3 2209static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2210 u8 key_type, u8 old_key_type)
d25e28ab
JH
2211{
2212 /* Legacy key */
2213 if (key_type < 0x03)
745c0ce3 2214 return true;
d25e28ab
JH
2215
2216 /* Debug keys are insecure so don't store them persistently */
2217 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2218 return false;
d25e28ab
JH
2219
2220 /* Changed combination key and there's no previous one */
2221 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2222 return false;
d25e28ab
JH
2223
2224 /* Security mode 3 case */
2225 if (!conn)
745c0ce3 2226 return true;
d25e28ab 2227
e3befab9
JH
2228 /* BR/EDR key derived using SC from an LE link */
2229 if (conn->type == LE_LINK)
2230 return true;
2231
d25e28ab
JH
2232 /* Neither local nor remote side had no-bonding as requirement */
2233 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2234 return true;
d25e28ab
JH
2235
2236 /* Local side had dedicated bonding as requirement */
2237 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2238 return true;
d25e28ab
JH
2239
2240 /* Remote side had dedicated bonding as requirement */
2241 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2242 return true;
d25e28ab
JH
2243
2244 /* If none of the above criteria match, then don't store the key
2245 * persistently */
745c0ce3 2246 return false;
d25e28ab
JH
2247}
2248
e804d25d 2249static u8 ltk_role(u8 type)
98a0b845 2250{
e804d25d
JH
2251 if (type == SMP_LTK)
2252 return HCI_ROLE_MASTER;
98a0b845 2253
e804d25d 2254 return HCI_ROLE_SLAVE;
98a0b845
JH
2255}
2256
f3a73d97
JH
2257struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2258 u8 addr_type, u8 role)
75d262c2 2259{
c9839a11 2260 struct smp_ltk *k;
75d262c2 2261
970d0f1b
JH
2262 rcu_read_lock();
2263 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2264 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2265 continue;
2266
923e2414 2267 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2268 rcu_read_unlock();
75d262c2 2269 return k;
970d0f1b
JH
2270 }
2271 }
2272 rcu_read_unlock();
75d262c2
VCG
2273
2274 return NULL;
2275}
75d262c2 2276
970c4e46
JH
2277struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2278{
2279 struct smp_irk *irk;
2280
adae20cb
JH
2281 rcu_read_lock();
2282 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2283 if (!bacmp(&irk->rpa, rpa)) {
2284 rcu_read_unlock();
970c4e46 2285 return irk;
adae20cb 2286 }
970c4e46
JH
2287 }
2288
adae20cb 2289 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2290 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2291 bacpy(&irk->rpa, rpa);
adae20cb 2292 rcu_read_unlock();
970c4e46
JH
2293 return irk;
2294 }
2295 }
adae20cb 2296 rcu_read_unlock();
970c4e46
JH
2297
2298 return NULL;
2299}
2300
2301struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2302 u8 addr_type)
2303{
2304 struct smp_irk *irk;
2305
6cfc9988
JH
2306 /* Identity Address must be public or static random */
2307 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2308 return NULL;
2309
adae20cb
JH
2310 rcu_read_lock();
2311 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2312 if (addr_type == irk->addr_type &&
adae20cb
JH
2313 bacmp(bdaddr, &irk->bdaddr) == 0) {
2314 rcu_read_unlock();
970c4e46 2315 return irk;
adae20cb 2316 }
970c4e46 2317 }
adae20cb 2318 rcu_read_unlock();
970c4e46
JH
2319
2320 return NULL;
2321}
2322
567fa2aa 2323struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2324 bdaddr_t *bdaddr, u8 *val, u8 type,
2325 u8 pin_len, bool *persistent)
55ed8ca1
JH
2326{
2327 struct link_key *key, *old_key;
745c0ce3 2328 u8 old_key_type;
55ed8ca1
JH
2329
2330 old_key = hci_find_link_key(hdev, bdaddr);
2331 if (old_key) {
2332 old_key_type = old_key->type;
2333 key = old_key;
2334 } else {
12adcf3a 2335 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2336 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2337 if (!key)
567fa2aa 2338 return NULL;
0378b597 2339 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2340 }
2341
6ed93dc6 2342 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2343
d25e28ab
JH
2344 /* Some buggy controller combinations generate a changed
2345 * combination key for legacy pairing even when there's no
2346 * previous key */
2347 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2348 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2349 type = HCI_LK_COMBINATION;
655fe6ec
JH
2350 if (conn)
2351 conn->key_type = type;
2352 }
d25e28ab 2353
55ed8ca1 2354 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2355 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2356 key->pin_len = pin_len;
2357
b6020ba0 2358 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2359 key->type = old_key_type;
4748fed2
JH
2360 else
2361 key->type = type;
2362
7652ff6a
JH
2363 if (persistent)
2364 *persistent = hci_persistent_key(hdev, conn, type,
2365 old_key_type);
4df378a1 2366
567fa2aa 2367 return key;
55ed8ca1
JH
2368}
2369
ca9142b8 2370struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2371 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2372 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2373{
c9839a11 2374 struct smp_ltk *key, *old_key;
e804d25d 2375 u8 role = ltk_role(type);
75d262c2 2376
f3a73d97 2377 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2378 if (old_key)
75d262c2 2379 key = old_key;
c9839a11 2380 else {
0a14ab41 2381 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2382 if (!key)
ca9142b8 2383 return NULL;
970d0f1b 2384 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2385 }
2386
75d262c2 2387 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2388 key->bdaddr_type = addr_type;
2389 memcpy(key->val, tk, sizeof(key->val));
2390 key->authenticated = authenticated;
2391 key->ediv = ediv;
fe39c7b2 2392 key->rand = rand;
c9839a11
VCG
2393 key->enc_size = enc_size;
2394 key->type = type;
75d262c2 2395
ca9142b8 2396 return key;
75d262c2
VCG
2397}
2398
ca9142b8
JH
2399struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2400 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2401{
2402 struct smp_irk *irk;
2403
2404 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2405 if (!irk) {
2406 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2407 if (!irk)
ca9142b8 2408 return NULL;
970c4e46
JH
2409
2410 bacpy(&irk->bdaddr, bdaddr);
2411 irk->addr_type = addr_type;
2412
adae20cb 2413 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2414 }
2415
2416 memcpy(irk->val, val, 16);
2417 bacpy(&irk->rpa, rpa);
2418
ca9142b8 2419 return irk;
970c4e46
JH
2420}
2421
55ed8ca1
JH
2422int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2423{
2424 struct link_key *key;
2425
2426 key = hci_find_link_key(hdev, bdaddr);
2427 if (!key)
2428 return -ENOENT;
2429
6ed93dc6 2430 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2431
0378b597
JH
2432 list_del_rcu(&key->list);
2433 kfree_rcu(key, rcu);
55ed8ca1
JH
2434
2435 return 0;
2436}
2437
e0b2b27e 2438int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2439{
970d0f1b 2440 struct smp_ltk *k;
c51ffa0b 2441 int removed = 0;
b899efaf 2442
970d0f1b 2443 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2444 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2445 continue;
2446
6ed93dc6 2447 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2448
970d0f1b
JH
2449 list_del_rcu(&k->list);
2450 kfree_rcu(k, rcu);
c51ffa0b 2451 removed++;
b899efaf
VCG
2452 }
2453
c51ffa0b 2454 return removed ? 0 : -ENOENT;
b899efaf
VCG
2455}
2456
a7ec7338
JH
2457void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2458{
adae20cb 2459 struct smp_irk *k;
a7ec7338 2460
adae20cb 2461 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2462 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2463 continue;
2464
2465 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2466
adae20cb
JH
2467 list_del_rcu(&k->list);
2468 kfree_rcu(k, rcu);
a7ec7338
JH
2469 }
2470}
2471
55e76b38
JH
2472bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2473{
2474 struct smp_ltk *k;
4ba9faf3 2475 struct smp_irk *irk;
55e76b38
JH
2476 u8 addr_type;
2477
2478 if (type == BDADDR_BREDR) {
2479 if (hci_find_link_key(hdev, bdaddr))
2480 return true;
2481 return false;
2482 }
2483
2484 /* Convert to HCI addr type which struct smp_ltk uses */
2485 if (type == BDADDR_LE_PUBLIC)
2486 addr_type = ADDR_LE_DEV_PUBLIC;
2487 else
2488 addr_type = ADDR_LE_DEV_RANDOM;
2489
4ba9faf3
JH
2490 irk = hci_get_irk(hdev, bdaddr, addr_type);
2491 if (irk) {
2492 bdaddr = &irk->bdaddr;
2493 addr_type = irk->addr_type;
2494 }
2495
55e76b38
JH
2496 rcu_read_lock();
2497 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2498 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2499 rcu_read_unlock();
55e76b38 2500 return true;
87c8b28d 2501 }
55e76b38
JH
2502 }
2503 rcu_read_unlock();
2504
2505 return false;
2506}
2507
6bd32326 2508/* HCI command timer function */
65cc2b49 2509static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2510{
65cc2b49
MH
2511 struct hci_dev *hdev = container_of(work, struct hci_dev,
2512 cmd_timer.work);
6bd32326 2513
bda4f23a
AE
2514 if (hdev->sent_cmd) {
2515 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2516 u16 opcode = __le16_to_cpu(sent->opcode);
2517
2518 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2519 } else {
2520 BT_ERR("%s command tx timeout", hdev->name);
2521 }
2522
6bd32326 2523 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2524 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2525}
2526
2763eda6 2527struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2528 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2529{
2530 struct oob_data *data;
2531
6928a924
JH
2532 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2533 if (bacmp(bdaddr, &data->bdaddr) != 0)
2534 continue;
2535 if (data->bdaddr_type != bdaddr_type)
2536 continue;
2537 return data;
2538 }
2763eda6
SJ
2539
2540 return NULL;
2541}
2542
6928a924
JH
2543int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2544 u8 bdaddr_type)
2763eda6
SJ
2545{
2546 struct oob_data *data;
2547
6928a924 2548 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2549 if (!data)
2550 return -ENOENT;
2551
6928a924 2552 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2553
2554 list_del(&data->list);
2555 kfree(data);
2556
2557 return 0;
2558}
2559
35f7498a 2560void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2561{
2562 struct oob_data *data, *n;
2563
2564 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2565 list_del(&data->list);
2566 kfree(data);
2567 }
2763eda6
SJ
2568}
2569
0798872e 2570int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2571 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2572 u8 *hash256, u8 *rand256)
2763eda6
SJ
2573{
2574 struct oob_data *data;
2575
6928a924 2576 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2577 if (!data) {
0a14ab41 2578 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2579 if (!data)
2580 return -ENOMEM;
2581
2582 bacpy(&data->bdaddr, bdaddr);
6928a924 2583 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2584 list_add(&data->list, &hdev->remote_oob_data);
2585 }
2586
81328d5c
JH
2587 if (hash192 && rand192) {
2588 memcpy(data->hash192, hash192, sizeof(data->hash192));
2589 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2590 if (hash256 && rand256)
2591 data->present = 0x03;
81328d5c
JH
2592 } else {
2593 memset(data->hash192, 0, sizeof(data->hash192));
2594 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2595 if (hash256 && rand256)
2596 data->present = 0x02;
2597 else
2598 data->present = 0x00;
0798872e
MH
2599 }
2600
81328d5c
JH
2601 if (hash256 && rand256) {
2602 memcpy(data->hash256, hash256, sizeof(data->hash256));
2603 memcpy(data->rand256, rand256, sizeof(data->rand256));
2604 } else {
2605 memset(data->hash256, 0, sizeof(data->hash256));
2606 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2607 if (hash192 && rand192)
2608 data->present = 0x01;
81328d5c 2609 }
0798872e 2610
6ed93dc6 2611 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2612
2613 return 0;
2614}
2615
dcc36c16 2616struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2617 bdaddr_t *bdaddr, u8 type)
b2a66aad 2618{
8035ded4 2619 struct bdaddr_list *b;
b2a66aad 2620
dcc36c16 2621 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2622 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2623 return b;
b9ee0a78 2624 }
b2a66aad
AJ
2625
2626 return NULL;
2627}
2628
dcc36c16 2629void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2630{
2631 struct list_head *p, *n;
2632
dcc36c16 2633 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2634 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2635
2636 list_del(p);
2637 kfree(b);
2638 }
b2a66aad
AJ
2639}
2640
dcc36c16 2641int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2642{
2643 struct bdaddr_list *entry;
b2a66aad 2644
b9ee0a78 2645 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2646 return -EBADF;
2647
dcc36c16 2648 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2649 return -EEXIST;
b2a66aad 2650
27f70f3e 2651 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2652 if (!entry)
2653 return -ENOMEM;
b2a66aad
AJ
2654
2655 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2656 entry->bdaddr_type = type;
b2a66aad 2657
dcc36c16 2658 list_add(&entry->list, list);
b2a66aad 2659
2a8357f2 2660 return 0;
b2a66aad
AJ
2661}
2662
dcc36c16 2663int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2664{
2665 struct bdaddr_list *entry;
b2a66aad 2666
35f7498a 2667 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2668 hci_bdaddr_list_clear(list);
35f7498a
JH
2669 return 0;
2670 }
b2a66aad 2671
dcc36c16 2672 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2673 if (!entry)
2674 return -ENOENT;
2675
2676 list_del(&entry->list);
2677 kfree(entry);
2678
2679 return 0;
2680}
2681
15819a70
AG
2682/* This function requires the caller holds hdev->lock */
2683struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2684 bdaddr_t *addr, u8 addr_type)
2685{
2686 struct hci_conn_params *params;
2687
738f6185
JH
2688 /* The conn params list only contains identity addresses */
2689 if (!hci_is_identity_address(addr, addr_type))
2690 return NULL;
2691
15819a70
AG
2692 list_for_each_entry(params, &hdev->le_conn_params, list) {
2693 if (bacmp(&params->addr, addr) == 0 &&
2694 params->addr_type == addr_type) {
2695 return params;
2696 }
2697 }
2698
2699 return NULL;
2700}
2701
4b10966f 2702/* This function requires the caller holds hdev->lock */
501f8827
JH
2703struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2704 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2705{
912b42ef 2706 struct hci_conn_params *param;
a9b0a04c 2707
738f6185
JH
2708 /* The list only contains identity addresses */
2709 if (!hci_is_identity_address(addr, addr_type))
2710 return NULL;
a9b0a04c 2711
501f8827 2712 list_for_each_entry(param, list, action) {
912b42ef
JH
2713 if (bacmp(&param->addr, addr) == 0 &&
2714 param->addr_type == addr_type)
2715 return param;
4b10966f
MH
2716 }
2717
2718 return NULL;
a9b0a04c
AG
2719}
2720
15819a70 2721/* This function requires the caller holds hdev->lock */
51d167c0
MH
2722struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2723 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2724{
2725 struct hci_conn_params *params;
2726
c46245b3 2727 if (!hci_is_identity_address(addr, addr_type))
51d167c0 2728 return NULL;
a9b0a04c 2729
15819a70 2730 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2731 if (params)
51d167c0 2732 return params;
15819a70
AG
2733
2734 params = kzalloc(sizeof(*params), GFP_KERNEL);
2735 if (!params) {
2736 BT_ERR("Out of memory");
51d167c0 2737 return NULL;
15819a70
AG
2738 }
2739
2740 bacpy(&params->addr, addr);
2741 params->addr_type = addr_type;
cef952ce
AG
2742
2743 list_add(&params->list, &hdev->le_conn_params);
93450c75 2744 INIT_LIST_HEAD(&params->action);
cef952ce 2745
bf5b3c8b
MH
2746 params->conn_min_interval = hdev->le_conn_min_interval;
2747 params->conn_max_interval = hdev->le_conn_max_interval;
2748 params->conn_latency = hdev->le_conn_latency;
2749 params->supervision_timeout = hdev->le_supv_timeout;
2750 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2751
2752 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2753
51d167c0 2754 return params;
bf5b3c8b
MH
2755}
2756
f6c63249 2757static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2758{
f8aaf9b6 2759 if (params->conn) {
f161dd41 2760 hci_conn_drop(params->conn);
f8aaf9b6
JH
2761 hci_conn_put(params->conn);
2762 }
f161dd41 2763
95305baa 2764 list_del(&params->action);
15819a70
AG
2765 list_del(&params->list);
2766 kfree(params);
f6c63249
JH
2767}
2768
2769/* This function requires the caller holds hdev->lock */
2770void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2771{
2772 struct hci_conn_params *params;
2773
2774 params = hci_conn_params_lookup(hdev, addr, addr_type);
2775 if (!params)
2776 return;
2777
2778 hci_conn_params_free(params);
15819a70 2779
95305baa
JH
2780 hci_update_background_scan(hdev);
2781
15819a70
AG
2782 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2783}
2784
2785/* This function requires the caller holds hdev->lock */
55af49a8 2786void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2787{
2788 struct hci_conn_params *params, *tmp;
2789
2790 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2791 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2792 continue;
15819a70
AG
2793 list_del(&params->list);
2794 kfree(params);
2795 }
2796
55af49a8 2797 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2798}
2799
2800/* This function requires the caller holds hdev->lock */
373110c5 2801void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2802{
15819a70 2803 struct hci_conn_params *params, *tmp;
77a77a30 2804
f6c63249
JH
2805 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2806 hci_conn_params_free(params);
77a77a30 2807
a4790dbd 2808 hci_update_background_scan(hdev);
77a77a30 2809
15819a70 2810 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2811}
2812
1904a853 2813static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2814{
4c87eaab
AG
2815 if (status) {
2816 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2817
4c87eaab
AG
2818 hci_dev_lock(hdev);
2819 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2820 hci_dev_unlock(hdev);
2821 return;
2822 }
7ba8b4be
AG
2823}
2824
1904a853
MH
2825static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2826 u16 opcode)
7ba8b4be 2827{
4c87eaab
AG
2828 /* General inquiry access code (GIAC) */
2829 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2830 struct hci_cp_inquiry cp;
7ba8b4be
AG
2831 int err;
2832
4c87eaab
AG
2833 if (status) {
2834 BT_ERR("Failed to disable LE scanning: status %d", status);
2835 return;
2836 }
7ba8b4be 2837
2d28cfe7
JP
2838 hdev->discovery.scan_start = 0;
2839
4c87eaab
AG
2840 switch (hdev->discovery.type) {
2841 case DISCOV_TYPE_LE:
2842 hci_dev_lock(hdev);
2843 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2844 hci_dev_unlock(hdev);
2845 break;
7ba8b4be 2846
4c87eaab 2847 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 2848 hci_dev_lock(hdev);
7dbfac1d 2849
07d2334a
JP
2850 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2851 &hdev->quirks)) {
2852 /* If we were running LE only scan, change discovery
2853 * state. If we were running both LE and BR/EDR inquiry
2854 * simultaneously, and BR/EDR inquiry is already
2855 * finished, stop discovery, otherwise BR/EDR inquiry
2856 * will stop discovery when finished.
2857 */
2858 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2859 hci_discovery_set_state(hdev,
2860 DISCOVERY_STOPPED);
2861 } else {
baf880a9
JH
2862 struct hci_request req;
2863
07d2334a
JP
2864 hci_inquiry_cache_flush(hdev);
2865
baf880a9
JH
2866 hci_req_init(&req, hdev);
2867
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2871 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2872
07d2334a
JP
2873 err = hci_req_run(&req, inquiry_complete);
2874 if (err) {
2875 BT_ERR("Inquiry request failed: err %d", err);
2876 hci_discovery_set_state(hdev,
2877 DISCOVERY_STOPPED);
2878 }
4c87eaab 2879 }
7dbfac1d 2880
4c87eaab
AG
2881 hci_dev_unlock(hdev);
2882 break;
7dbfac1d 2883 }
7dbfac1d
AG
2884}
2885
7ba8b4be
AG
2886static void le_scan_disable_work(struct work_struct *work)
2887{
2888 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2889 le_scan_disable.work);
4c87eaab
AG
2890 struct hci_request req;
2891 int err;
7ba8b4be
AG
2892
2893 BT_DBG("%s", hdev->name);
2894
2d28cfe7
JP
2895 cancel_delayed_work_sync(&hdev->le_scan_restart);
2896
4c87eaab 2897 hci_req_init(&req, hdev);
28b75a89 2898
b1efcc28 2899 hci_req_add_le_scan_disable(&req);
28b75a89 2900
4c87eaab
AG
2901 err = hci_req_run(&req, le_scan_disable_work_complete);
2902 if (err)
2903 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2904}
2905
2d28cfe7
JP
2906static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
2907 u16 opcode)
2908{
2909 unsigned long timeout, duration, scan_start, now;
2910
2911 BT_DBG("%s", hdev->name);
2912
2913 if (status) {
2914 BT_ERR("Failed to restart LE scan: status %d", status);
2915 return;
2916 }
2917
2918 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2919 !hdev->discovery.scan_start)
2920 return;
2921
2922 /* When the scan was started, hdev->le_scan_disable has been queued
2923 * after duration from scan_start. During scan restart this job
2924 * has been canceled, and we need to queue it again after proper
2925 * timeout, to make sure that scan does not run indefinitely.
2926 */
2927 duration = hdev->discovery.scan_duration;
2928 scan_start = hdev->discovery.scan_start;
2929 now = jiffies;
2930 if (now - scan_start <= duration) {
2931 int elapsed;
2932
2933 if (now >= scan_start)
2934 elapsed = now - scan_start;
2935 else
2936 elapsed = ULONG_MAX - scan_start + now;
2937
2938 timeout = duration - elapsed;
2939 } else {
2940 timeout = 0;
2941 }
2942 queue_delayed_work(hdev->workqueue,
2943 &hdev->le_scan_disable, timeout);
2944}
2945
2946static void le_scan_restart_work(struct work_struct *work)
2947{
2948 struct hci_dev *hdev = container_of(work, struct hci_dev,
2949 le_scan_restart.work);
2950 struct hci_request req;
2951 struct hci_cp_le_set_scan_enable cp;
2952 int err;
2953
2954 BT_DBG("%s", hdev->name);
2955
2956 /* If controller is not scanning we are done. */
d7a5a11d 2957 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
2958 return;
2959
2960 hci_req_init(&req, hdev);
2961
2962 hci_req_add_le_scan_disable(&req);
2963
2964 memset(&cp, 0, sizeof(cp));
2965 cp.enable = LE_SCAN_ENABLE;
2966 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2967 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2968
2969 err = hci_req_run(&req, le_scan_restart_work_complete);
2970 if (err)
2971 BT_ERR("Restart LE scan request failed: err %d", err);
2972}
2973
a1f4c318
JH
2974/* Copy the Identity Address of the controller.
2975 *
2976 * If the controller has a public BD_ADDR, then by default use that one.
2977 * If this is a LE only controller without a public address, default to
2978 * the static random address.
2979 *
2980 * For debugging purposes it is possible to force controllers with a
2981 * public address to use the static random address instead.
50b5b952
MH
2982 *
2983 * In case BR/EDR has been disabled on a dual-mode controller and
2984 * userspace has configured a static address, then that address
2985 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
2986 */
2987void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2988 u8 *bdaddr_type)
2989{
b7cb93e5 2990 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 2991 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 2992 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 2993 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
2994 bacpy(bdaddr, &hdev->static_addr);
2995 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2996 } else {
2997 bacpy(bdaddr, &hdev->bdaddr);
2998 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2999 }
3000}
3001
9be0dab7
DH
3002/* Alloc HCI device */
3003struct hci_dev *hci_alloc_dev(void)
3004{
3005 struct hci_dev *hdev;
3006
27f70f3e 3007 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3008 if (!hdev)
3009 return NULL;
3010
b1b813d4
DH
3011 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3012 hdev->esco_type = (ESCO_HV1);
3013 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3014 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3015 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3016 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3017 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3018 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3019
b1b813d4
DH
3020 hdev->sniff_max_interval = 800;
3021 hdev->sniff_min_interval = 80;
3022
3f959d46 3023 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3024 hdev->le_adv_min_interval = 0x0800;
3025 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3026 hdev->le_scan_interval = 0x0060;
3027 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3028 hdev->le_conn_min_interval = 0x0028;
3029 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3030 hdev->le_conn_latency = 0x0000;
3031 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3032 hdev->le_def_tx_len = 0x001b;
3033 hdev->le_def_tx_time = 0x0148;
3034 hdev->le_max_tx_len = 0x001b;
3035 hdev->le_max_tx_time = 0x0148;
3036 hdev->le_max_rx_len = 0x001b;
3037 hdev->le_max_rx_time = 0x0148;
bef64738 3038
d6bfd59c 3039 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3040 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3041 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3042 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3043
b1b813d4
DH
3044 mutex_init(&hdev->lock);
3045 mutex_init(&hdev->req_lock);
3046
3047 INIT_LIST_HEAD(&hdev->mgmt_pending);
3048 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3049 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3050 INIT_LIST_HEAD(&hdev->uuids);
3051 INIT_LIST_HEAD(&hdev->link_keys);
3052 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3053 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3054 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3055 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3056 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3057 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3058 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3059 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3060
3061 INIT_WORK(&hdev->rx_work, hci_rx_work);
3062 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3063 INIT_WORK(&hdev->tx_work, hci_tx_work);
3064 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3065 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3066
b1b813d4
DH
3067 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3068 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3069 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3070 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
b1b813d4 3071
b1b813d4
DH
3072 skb_queue_head_init(&hdev->rx_q);
3073 skb_queue_head_init(&hdev->cmd_q);
3074 skb_queue_head_init(&hdev->raw_q);
3075
3076 init_waitqueue_head(&hdev->req_wait_q);
3077
65cc2b49 3078 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3079
b1b813d4
DH
3080 hci_init_sysfs(hdev);
3081 discovery_init(hdev);
203fea01 3082 adv_info_init(hdev);
9be0dab7
DH
3083
3084 return hdev;
3085}
3086EXPORT_SYMBOL(hci_alloc_dev);
3087
3088/* Free HCI device */
3089void hci_free_dev(struct hci_dev *hdev)
3090{
9be0dab7
DH
3091 /* will free via device release */
3092 put_device(&hdev->dev);
3093}
3094EXPORT_SYMBOL(hci_free_dev);
3095
1da177e4
LT
3096/* Register HCI device */
3097int hci_register_dev(struct hci_dev *hdev)
3098{
b1b813d4 3099 int id, error;
1da177e4 3100
74292d5a 3101 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3102 return -EINVAL;
3103
08add513
MM
3104 /* Do not allow HCI_AMP devices to register at index 0,
3105 * so the index can be used as the AMP controller ID.
3106 */
3df92b31
SL
3107 switch (hdev->dev_type) {
3108 case HCI_BREDR:
3109 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3110 break;
3111 case HCI_AMP:
3112 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3113 break;
3114 default:
3115 return -EINVAL;
1da177e4 3116 }
8e87d142 3117
3df92b31
SL
3118 if (id < 0)
3119 return id;
3120
1da177e4
LT
3121 sprintf(hdev->name, "hci%d", id);
3122 hdev->id = id;
2d8b3a11
AE
3123
3124 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3125
d8537548
KC
3126 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3127 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3128 if (!hdev->workqueue) {
3129 error = -ENOMEM;
3130 goto err;
3131 }
f48fd9c8 3132
d8537548
KC
3133 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3134 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3135 if (!hdev->req_workqueue) {
3136 destroy_workqueue(hdev->workqueue);
3137 error = -ENOMEM;
3138 goto err;
3139 }
3140
0153e2ec
MH
3141 if (!IS_ERR_OR_NULL(bt_debugfs))
3142 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3143
bdc3e0f1
MH
3144 dev_set_name(&hdev->dev, "%s", hdev->name);
3145
3146 error = device_add(&hdev->dev);
33ca954d 3147 if (error < 0)
54506918 3148 goto err_wqueue;
1da177e4 3149
611b30f7 3150 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3151 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3152 hdev);
611b30f7
MH
3153 if (hdev->rfkill) {
3154 if (rfkill_register(hdev->rfkill) < 0) {
3155 rfkill_destroy(hdev->rfkill);
3156 hdev->rfkill = NULL;
3157 }
3158 }
3159
5e130367 3160 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3161 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3162
a1536da2
MH
3163 hci_dev_set_flag(hdev, HCI_SETUP);
3164 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3165
01cd3404 3166 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3167 /* Assume BR/EDR support until proven otherwise (such as
3168 * through reading supported features during init.
3169 */
a1536da2 3170 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3171 }
ce2be9ac 3172
fcee3377
GP
3173 write_lock(&hci_dev_list_lock);
3174 list_add(&hdev->list, &hci_dev_list);
3175 write_unlock(&hci_dev_list_lock);
3176
4a964404
MH
3177 /* Devices that are marked for raw-only usage are unconfigured
3178 * and should not be included in normal operation.
fee746b0
MH
3179 */
3180 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3181 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3182
1da177e4 3183 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3184 hci_dev_hold(hdev);
1da177e4 3185
19202573 3186 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3187
1da177e4 3188 return id;
f48fd9c8 3189
33ca954d
DH
3190err_wqueue:
3191 destroy_workqueue(hdev->workqueue);
6ead1bbc 3192 destroy_workqueue(hdev->req_workqueue);
33ca954d 3193err:
3df92b31 3194 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3195
33ca954d 3196 return error;
1da177e4
LT
3197}
3198EXPORT_SYMBOL(hci_register_dev);
3199
3200/* Unregister HCI device */
59735631 3201void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3202{
2d7cc19e 3203 int id;
ef222013 3204
c13854ce 3205 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3206
a1536da2 3207 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3208
3df92b31
SL
3209 id = hdev->id;
3210
f20d09d5 3211 write_lock(&hci_dev_list_lock);
1da177e4 3212 list_del(&hdev->list);
f20d09d5 3213 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3214
3215 hci_dev_do_close(hdev);
3216
b9b5ef18
GP
3217 cancel_work_sync(&hdev->power_on);
3218
ab81cbf9 3219 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3220 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3221 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3222 hci_dev_lock(hdev);
744cf19e 3223 mgmt_index_removed(hdev);
09fd0de5 3224 hci_dev_unlock(hdev);
56e5cb86 3225 }
ab81cbf9 3226
2e58ef3e
JH
3227 /* mgmt_index_removed should take care of emptying the
3228 * pending list */
3229 BUG_ON(!list_empty(&hdev->mgmt_pending));
3230
1da177e4
LT
3231 hci_notify(hdev, HCI_DEV_UNREG);
3232
611b30f7
MH
3233 if (hdev->rfkill) {
3234 rfkill_unregister(hdev->rfkill);
3235 rfkill_destroy(hdev->rfkill);
3236 }
3237
bdc3e0f1 3238 device_del(&hdev->dev);
147e2d59 3239
0153e2ec
MH
3240 debugfs_remove_recursive(hdev->debugfs);
3241
f48fd9c8 3242 destroy_workqueue(hdev->workqueue);
6ead1bbc 3243 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3244
09fd0de5 3245 hci_dev_lock(hdev);
dcc36c16 3246 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3247 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3248 hci_uuids_clear(hdev);
55ed8ca1 3249 hci_link_keys_clear(hdev);
b899efaf 3250 hci_smp_ltks_clear(hdev);
970c4e46 3251 hci_smp_irks_clear(hdev);
2763eda6 3252 hci_remote_oob_data_clear(hdev);
dcc36c16 3253 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3254 hci_conn_params_clear_all(hdev);
22078800 3255 hci_discovery_filter_clear(hdev);
09fd0de5 3256 hci_dev_unlock(hdev);
e2e0cacb 3257
dc946bd8 3258 hci_dev_put(hdev);
3df92b31
SL
3259
3260 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3261}
3262EXPORT_SYMBOL(hci_unregister_dev);
3263
3264/* Suspend HCI device */
3265int hci_suspend_dev(struct hci_dev *hdev)
3266{
3267 hci_notify(hdev, HCI_DEV_SUSPEND);
3268 return 0;
3269}
3270EXPORT_SYMBOL(hci_suspend_dev);
3271
3272/* Resume HCI device */
3273int hci_resume_dev(struct hci_dev *hdev)
3274{
3275 hci_notify(hdev, HCI_DEV_RESUME);
3276 return 0;
3277}
3278EXPORT_SYMBOL(hci_resume_dev);
3279
75e0569f
MH
3280/* Reset HCI device */
3281int hci_reset_dev(struct hci_dev *hdev)
3282{
3283 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3284 struct sk_buff *skb;
3285
3286 skb = bt_skb_alloc(3, GFP_ATOMIC);
3287 if (!skb)
3288 return -ENOMEM;
3289
3290 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3291 memcpy(skb_put(skb, 3), hw_err, 3);
3292
3293 /* Send Hardware Error to upper stack */
3294 return hci_recv_frame(hdev, skb);
3295}
3296EXPORT_SYMBOL(hci_reset_dev);
3297
76bca880 3298/* Receive frame from HCI drivers */
e1a26170 3299int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3300{
76bca880 3301 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3302 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3303 kfree_skb(skb);
3304 return -ENXIO;
3305 }
3306
d82603c6 3307 /* Incoming skb */
76bca880
MH
3308 bt_cb(skb)->incoming = 1;
3309
3310 /* Time stamp */
3311 __net_timestamp(skb);
3312
76bca880 3313 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3314 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3315
76bca880
MH
3316 return 0;
3317}
3318EXPORT_SYMBOL(hci_recv_frame);
3319
1da177e4
LT
3320/* ---- Interface to upper protocols ---- */
3321
1da177e4
LT
3322int hci_register_cb(struct hci_cb *cb)
3323{
3324 BT_DBG("%p name %s", cb, cb->name);
3325
fba7ecf0 3326 mutex_lock(&hci_cb_list_lock);
00629e0f 3327 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3328 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3329
3330 return 0;
3331}
3332EXPORT_SYMBOL(hci_register_cb);
3333
3334int hci_unregister_cb(struct hci_cb *cb)
3335{
3336 BT_DBG("%p name %s", cb, cb->name);
3337
fba7ecf0 3338 mutex_lock(&hci_cb_list_lock);
1da177e4 3339 list_del(&cb->list);
fba7ecf0 3340 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3341
3342 return 0;
3343}
3344EXPORT_SYMBOL(hci_unregister_cb);
3345
51086991 3346static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3347{
cdc52faa
MH
3348 int err;
3349
0d48d939 3350 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3351
cd82e61c
MH
3352 /* Time stamp */
3353 __net_timestamp(skb);
1da177e4 3354
cd82e61c
MH
3355 /* Send copy to monitor */
3356 hci_send_to_monitor(hdev, skb);
3357
3358 if (atomic_read(&hdev->promisc)) {
3359 /* Send copy to the sockets */
470fe1b5 3360 hci_send_to_sock(hdev, skb);
1da177e4
LT
3361 }
3362
3363 /* Get rid of skb owner, prior to sending to the driver. */
3364 skb_orphan(skb);
3365
cdc52faa
MH
3366 err = hdev->send(hdev, skb);
3367 if (err < 0) {
3368 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3369 kfree_skb(skb);
3370 }
1da177e4
LT
3371}
3372
1ca3a9d0 3373/* Send HCI command */
07dc93dd
JH
3374int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3375 const void *param)
1ca3a9d0
JH
3376{
3377 struct sk_buff *skb;
3378
3379 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3380
3381 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3382 if (!skb) {
3383 BT_ERR("%s no memory for command", hdev->name);
3384 return -ENOMEM;
3385 }
3386
49c922bb 3387 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3388 * single-command requests.
3389 */
db6e3e8d 3390 bt_cb(skb)->req.start = true;
11714b3d 3391
1da177e4 3392 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3393 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3394
3395 return 0;
3396}
1da177e4
LT
3397
3398/* Get data from the previously sent command */
a9de9248 3399void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3400{
3401 struct hci_command_hdr *hdr;
3402
3403 if (!hdev->sent_cmd)
3404 return NULL;
3405
3406 hdr = (void *) hdev->sent_cmd->data;
3407
a9de9248 3408 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3409 return NULL;
3410
f0e09510 3411 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3412
3413 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3414}
3415
3416/* Send ACL data */
3417static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3418{
3419 struct hci_acl_hdr *hdr;
3420 int len = skb->len;
3421
badff6d0
ACM
3422 skb_push(skb, HCI_ACL_HDR_SIZE);
3423 skb_reset_transport_header(skb);
9c70220b 3424 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3425 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3426 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3427}
3428
ee22be7e 3429static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3430 struct sk_buff *skb, __u16 flags)
1da177e4 3431{
ee22be7e 3432 struct hci_conn *conn = chan->conn;
1da177e4
LT
3433 struct hci_dev *hdev = conn->hdev;
3434 struct sk_buff *list;
3435
087bfd99
GP
3436 skb->len = skb_headlen(skb);
3437 skb->data_len = 0;
3438
3439 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3440
3441 switch (hdev->dev_type) {
3442 case HCI_BREDR:
3443 hci_add_acl_hdr(skb, conn->handle, flags);
3444 break;
3445 case HCI_AMP:
3446 hci_add_acl_hdr(skb, chan->handle, flags);
3447 break;
3448 default:
3449 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3450 return;
3451 }
087bfd99 3452
70f23020
AE
3453 list = skb_shinfo(skb)->frag_list;
3454 if (!list) {
1da177e4
LT
3455 /* Non fragmented */
3456 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3457
73d80deb 3458 skb_queue_tail(queue, skb);
1da177e4
LT
3459 } else {
3460 /* Fragmented */
3461 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3462
3463 skb_shinfo(skb)->frag_list = NULL;
3464
9cfd5a23
JR
3465 /* Queue all fragments atomically. We need to use spin_lock_bh
3466 * here because of 6LoWPAN links, as there this function is
3467 * called from softirq and using normal spin lock could cause
3468 * deadlocks.
3469 */
3470 spin_lock_bh(&queue->lock);
1da177e4 3471
73d80deb 3472 __skb_queue_tail(queue, skb);
e702112f
AE
3473
3474 flags &= ~ACL_START;
3475 flags |= ACL_CONT;
1da177e4
LT
3476 do {
3477 skb = list; list = list->next;
8e87d142 3478
0d48d939 3479 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3480 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3481
3482 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3483
73d80deb 3484 __skb_queue_tail(queue, skb);
1da177e4
LT
3485 } while (list);
3486
9cfd5a23 3487 spin_unlock_bh(&queue->lock);
1da177e4 3488 }
73d80deb
LAD
3489}
3490
3491void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3492{
ee22be7e 3493 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3494
f0e09510 3495 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3496
ee22be7e 3497 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3498
3eff45ea 3499 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3500}
1da177e4
LT
3501
3502/* Send SCO data */
0d861d8b 3503void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3504{
3505 struct hci_dev *hdev = conn->hdev;
3506 struct hci_sco_hdr hdr;
3507
3508 BT_DBG("%s len %d", hdev->name, skb->len);
3509
aca3192c 3510 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3511 hdr.dlen = skb->len;
3512
badff6d0
ACM
3513 skb_push(skb, HCI_SCO_HDR_SIZE);
3514 skb_reset_transport_header(skb);
9c70220b 3515 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3516
0d48d939 3517 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3518
1da177e4 3519 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3520 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3521}
1da177e4
LT
3522
3523/* ---- HCI TX task (outgoing data) ---- */
3524
3525/* HCI Connection scheduler */
6039aa73
GP
3526static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3527 int *quote)
1da177e4
LT
3528{
3529 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3530 struct hci_conn *conn = NULL, *c;
abc5de8f 3531 unsigned int num = 0, min = ~0;
1da177e4 3532
8e87d142 3533 /* We don't have to lock device here. Connections are always
1da177e4 3534 * added and removed with TX task disabled. */
bf4c6325
GP
3535
3536 rcu_read_lock();
3537
3538 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3539 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3540 continue;
769be974
MH
3541
3542 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3543 continue;
3544
1da177e4
LT
3545 num++;
3546
3547 if (c->sent < min) {
3548 min = c->sent;
3549 conn = c;
3550 }
52087a79
LAD
3551
3552 if (hci_conn_num(hdev, type) == num)
3553 break;
1da177e4
LT
3554 }
3555
bf4c6325
GP
3556 rcu_read_unlock();
3557
1da177e4 3558 if (conn) {
6ed58ec5
VT
3559 int cnt, q;
3560
3561 switch (conn->type) {
3562 case ACL_LINK:
3563 cnt = hdev->acl_cnt;
3564 break;
3565 case SCO_LINK:
3566 case ESCO_LINK:
3567 cnt = hdev->sco_cnt;
3568 break;
3569 case LE_LINK:
3570 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3571 break;
3572 default:
3573 cnt = 0;
3574 BT_ERR("Unknown link type");
3575 }
3576
3577 q = cnt / num;
1da177e4
LT
3578 *quote = q ? q : 1;
3579 } else
3580 *quote = 0;
3581
3582 BT_DBG("conn %p quote %d", conn, *quote);
3583 return conn;
3584}
3585
6039aa73 3586static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3587{
3588 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3589 struct hci_conn *c;
1da177e4 3590
bae1f5d9 3591 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3592
bf4c6325
GP
3593 rcu_read_lock();
3594
1da177e4 3595 /* Kill stalled connections */
bf4c6325 3596 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3597 if (c->type == type && c->sent) {
6ed93dc6
AE
3598 BT_ERR("%s killing stalled connection %pMR",
3599 hdev->name, &c->dst);
bed71748 3600 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3601 }
3602 }
bf4c6325
GP
3603
3604 rcu_read_unlock();
1da177e4
LT
3605}
3606
6039aa73
GP
3607static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3608 int *quote)
1da177e4 3609{
73d80deb
LAD
3610 struct hci_conn_hash *h = &hdev->conn_hash;
3611 struct hci_chan *chan = NULL;
abc5de8f 3612 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3613 struct hci_conn *conn;
73d80deb
LAD
3614 int cnt, q, conn_num = 0;
3615
3616 BT_DBG("%s", hdev->name);
3617
bf4c6325
GP
3618 rcu_read_lock();
3619
3620 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3621 struct hci_chan *tmp;
3622
3623 if (conn->type != type)
3624 continue;
3625
3626 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3627 continue;
3628
3629 conn_num++;
3630
8192edef 3631 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3632 struct sk_buff *skb;
3633
3634 if (skb_queue_empty(&tmp->data_q))
3635 continue;
3636
3637 skb = skb_peek(&tmp->data_q);
3638 if (skb->priority < cur_prio)
3639 continue;
3640
3641 if (skb->priority > cur_prio) {
3642 num = 0;
3643 min = ~0;
3644 cur_prio = skb->priority;
3645 }
3646
3647 num++;
3648
3649 if (conn->sent < min) {
3650 min = conn->sent;
3651 chan = tmp;
3652 }
3653 }
3654
3655 if (hci_conn_num(hdev, type) == conn_num)
3656 break;
3657 }
3658
bf4c6325
GP
3659 rcu_read_unlock();
3660
73d80deb
LAD
3661 if (!chan)
3662 return NULL;
3663
3664 switch (chan->conn->type) {
3665 case ACL_LINK:
3666 cnt = hdev->acl_cnt;
3667 break;
bd1eb66b
AE
3668 case AMP_LINK:
3669 cnt = hdev->block_cnt;
3670 break;
73d80deb
LAD
3671 case SCO_LINK:
3672 case ESCO_LINK:
3673 cnt = hdev->sco_cnt;
3674 break;
3675 case LE_LINK:
3676 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3677 break;
3678 default:
3679 cnt = 0;
3680 BT_ERR("Unknown link type");
3681 }
3682
3683 q = cnt / num;
3684 *quote = q ? q : 1;
3685 BT_DBG("chan %p quote %d", chan, *quote);
3686 return chan;
3687}
3688
02b20f0b
LAD
3689static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3690{
3691 struct hci_conn_hash *h = &hdev->conn_hash;
3692 struct hci_conn *conn;
3693 int num = 0;
3694
3695 BT_DBG("%s", hdev->name);
3696
bf4c6325
GP
3697 rcu_read_lock();
3698
3699 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3700 struct hci_chan *chan;
3701
3702 if (conn->type != type)
3703 continue;
3704
3705 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3706 continue;
3707
3708 num++;
3709
8192edef 3710 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3711 struct sk_buff *skb;
3712
3713 if (chan->sent) {
3714 chan->sent = 0;
3715 continue;
3716 }
3717
3718 if (skb_queue_empty(&chan->data_q))
3719 continue;
3720
3721 skb = skb_peek(&chan->data_q);
3722 if (skb->priority >= HCI_PRIO_MAX - 1)
3723 continue;
3724
3725 skb->priority = HCI_PRIO_MAX - 1;
3726
3727 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3728 skb->priority);
02b20f0b
LAD
3729 }
3730
3731 if (hci_conn_num(hdev, type) == num)
3732 break;
3733 }
bf4c6325
GP
3734
3735 rcu_read_unlock();
3736
02b20f0b
LAD
3737}
3738
b71d385a
AE
3739static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3740{
3741 /* Calculate count of blocks used by this packet */
3742 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3743}
3744
6039aa73 3745static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3746{
d7a5a11d 3747 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3748 /* ACL tx timeout must be longer than maximum
3749 * link supervision timeout (40.9 seconds) */
63d2bc1b 3750 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3751 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3752 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3753 }
63d2bc1b 3754}
1da177e4 3755
6039aa73 3756static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3757{
3758 unsigned int cnt = hdev->acl_cnt;
3759 struct hci_chan *chan;
3760 struct sk_buff *skb;
3761 int quote;
3762
3763 __check_timeout(hdev, cnt);
04837f64 3764
73d80deb 3765 while (hdev->acl_cnt &&
a8c5fb1a 3766 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3767 u32 priority = (skb_peek(&chan->data_q))->priority;
3768 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3769 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3770 skb->len, skb->priority);
73d80deb 3771
ec1cce24
LAD
3772 /* Stop if priority has changed */
3773 if (skb->priority < priority)
3774 break;
3775
3776 skb = skb_dequeue(&chan->data_q);
3777
73d80deb 3778 hci_conn_enter_active_mode(chan->conn,
04124681 3779 bt_cb(skb)->force_active);
04837f64 3780
57d17d70 3781 hci_send_frame(hdev, skb);
1da177e4
LT
3782 hdev->acl_last_tx = jiffies;
3783
3784 hdev->acl_cnt--;
73d80deb
LAD
3785 chan->sent++;
3786 chan->conn->sent++;
1da177e4
LT
3787 }
3788 }
02b20f0b
LAD
3789
3790 if (cnt != hdev->acl_cnt)
3791 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3792}
3793
6039aa73 3794static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3795{
63d2bc1b 3796 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3797 struct hci_chan *chan;
3798 struct sk_buff *skb;
3799 int quote;
bd1eb66b 3800 u8 type;
b71d385a 3801
63d2bc1b 3802 __check_timeout(hdev, cnt);
b71d385a 3803
bd1eb66b
AE
3804 BT_DBG("%s", hdev->name);
3805
3806 if (hdev->dev_type == HCI_AMP)
3807 type = AMP_LINK;
3808 else
3809 type = ACL_LINK;
3810
b71d385a 3811 while (hdev->block_cnt > 0 &&
bd1eb66b 3812 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3813 u32 priority = (skb_peek(&chan->data_q))->priority;
3814 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3815 int blocks;
3816
3817 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3818 skb->len, skb->priority);
b71d385a
AE
3819
3820 /* Stop if priority has changed */
3821 if (skb->priority < priority)
3822 break;
3823
3824 skb = skb_dequeue(&chan->data_q);
3825
3826 blocks = __get_blocks(hdev, skb);
3827 if (blocks > hdev->block_cnt)
3828 return;
3829
3830 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3831 bt_cb(skb)->force_active);
b71d385a 3832
57d17d70 3833 hci_send_frame(hdev, skb);
b71d385a
AE
3834 hdev->acl_last_tx = jiffies;
3835
3836 hdev->block_cnt -= blocks;
3837 quote -= blocks;
3838
3839 chan->sent += blocks;
3840 chan->conn->sent += blocks;
3841 }
3842 }
3843
3844 if (cnt != hdev->block_cnt)
bd1eb66b 3845 hci_prio_recalculate(hdev, type);
b71d385a
AE
3846}
3847
6039aa73 3848static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3849{
3850 BT_DBG("%s", hdev->name);
3851
bd1eb66b
AE
3852 /* No ACL link over BR/EDR controller */
3853 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3854 return;
3855
3856 /* No AMP link over AMP controller */
3857 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3858 return;
3859
3860 switch (hdev->flow_ctl_mode) {
3861 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3862 hci_sched_acl_pkt(hdev);
3863 break;
3864
3865 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3866 hci_sched_acl_blk(hdev);
3867 break;
3868 }
3869}
3870
1da177e4 3871/* Schedule SCO */
6039aa73 3872static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3873{
3874 struct hci_conn *conn;
3875 struct sk_buff *skb;
3876 int quote;
3877
3878 BT_DBG("%s", hdev->name);
3879
52087a79
LAD
3880 if (!hci_conn_num(hdev, SCO_LINK))
3881 return;
3882
1da177e4
LT
3883 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3884 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3885 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3886 hci_send_frame(hdev, skb);
1da177e4
LT
3887
3888 conn->sent++;
3889 if (conn->sent == ~0)
3890 conn->sent = 0;
3891 }
3892 }
3893}
3894
6039aa73 3895static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3896{
3897 struct hci_conn *conn;
3898 struct sk_buff *skb;
3899 int quote;
3900
3901 BT_DBG("%s", hdev->name);
3902
52087a79
LAD
3903 if (!hci_conn_num(hdev, ESCO_LINK))
3904 return;
3905
8fc9ced3
GP
3906 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3907 &quote))) {
b6a0dc82
MH
3908 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3909 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3910 hci_send_frame(hdev, skb);
b6a0dc82
MH
3911
3912 conn->sent++;
3913 if (conn->sent == ~0)
3914 conn->sent = 0;
3915 }
3916 }
3917}
3918
6039aa73 3919static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3920{
73d80deb 3921 struct hci_chan *chan;
6ed58ec5 3922 struct sk_buff *skb;
02b20f0b 3923 int quote, cnt, tmp;
6ed58ec5
VT
3924
3925 BT_DBG("%s", hdev->name);
3926
52087a79
LAD
3927 if (!hci_conn_num(hdev, LE_LINK))
3928 return;
3929
d7a5a11d 3930 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
3931 /* LE tx timeout must be longer than maximum
3932 * link supervision timeout (40.9 seconds) */
bae1f5d9 3933 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3934 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3935 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3936 }
3937
3938 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3939 tmp = cnt;
73d80deb 3940 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3941 u32 priority = (skb_peek(&chan->data_q))->priority;
3942 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3943 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3944 skb->len, skb->priority);
6ed58ec5 3945
ec1cce24
LAD
3946 /* Stop if priority has changed */
3947 if (skb->priority < priority)
3948 break;
3949
3950 skb = skb_dequeue(&chan->data_q);
3951
57d17d70 3952 hci_send_frame(hdev, skb);
6ed58ec5
VT
3953 hdev->le_last_tx = jiffies;
3954
3955 cnt--;
73d80deb
LAD
3956 chan->sent++;
3957 chan->conn->sent++;
6ed58ec5
VT
3958 }
3959 }
73d80deb 3960
6ed58ec5
VT
3961 if (hdev->le_pkts)
3962 hdev->le_cnt = cnt;
3963 else
3964 hdev->acl_cnt = cnt;
02b20f0b
LAD
3965
3966 if (cnt != tmp)
3967 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3968}
3969
3eff45ea 3970static void hci_tx_work(struct work_struct *work)
1da177e4 3971{
3eff45ea 3972 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3973 struct sk_buff *skb;
3974
6ed58ec5 3975 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3976 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3977
d7a5a11d 3978 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
3979 /* Schedule queues and send stuff to HCI driver */
3980 hci_sched_acl(hdev);
3981 hci_sched_sco(hdev);
3982 hci_sched_esco(hdev);
3983 hci_sched_le(hdev);
3984 }
6ed58ec5 3985
1da177e4
LT
3986 /* Send next queued raw (unknown type) packet */
3987 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 3988 hci_send_frame(hdev, skb);
1da177e4
LT
3989}
3990
25985edc 3991/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3992
3993/* ACL data packet */
6039aa73 3994static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3995{
3996 struct hci_acl_hdr *hdr = (void *) skb->data;
3997 struct hci_conn *conn;
3998 __u16 handle, flags;
3999
4000 skb_pull(skb, HCI_ACL_HDR_SIZE);
4001
4002 handle = __le16_to_cpu(hdr->handle);
4003 flags = hci_flags(handle);
4004 handle = hci_handle(handle);
4005
f0e09510 4006 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4007 handle, flags);
1da177e4
LT
4008
4009 hdev->stat.acl_rx++;
4010
4011 hci_dev_lock(hdev);
4012 conn = hci_conn_hash_lookup_handle(hdev, handle);
4013 hci_dev_unlock(hdev);
8e87d142 4014
1da177e4 4015 if (conn) {
65983fc7 4016 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4017
1da177e4 4018 /* Send to upper protocol */
686ebf28
UF
4019 l2cap_recv_acldata(conn, skb, flags);
4020 return;
1da177e4 4021 } else {
8e87d142 4022 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4023 hdev->name, handle);
1da177e4
LT
4024 }
4025
4026 kfree_skb(skb);
4027}
4028
4029/* SCO data packet */
6039aa73 4030static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4031{
4032 struct hci_sco_hdr *hdr = (void *) skb->data;
4033 struct hci_conn *conn;
4034 __u16 handle;
4035
4036 skb_pull(skb, HCI_SCO_HDR_SIZE);
4037
4038 handle = __le16_to_cpu(hdr->handle);
4039
f0e09510 4040 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4041
4042 hdev->stat.sco_rx++;
4043
4044 hci_dev_lock(hdev);
4045 conn = hci_conn_hash_lookup_handle(hdev, handle);
4046 hci_dev_unlock(hdev);
4047
4048 if (conn) {
1da177e4 4049 /* Send to upper protocol */
686ebf28
UF
4050 sco_recv_scodata(conn, skb);
4051 return;
1da177e4 4052 } else {
8e87d142 4053 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4054 hdev->name, handle);
1da177e4
LT
4055 }
4056
4057 kfree_skb(skb);
4058}
4059
9238f36a
JH
4060static bool hci_req_is_complete(struct hci_dev *hdev)
4061{
4062 struct sk_buff *skb;
4063
4064 skb = skb_peek(&hdev->cmd_q);
4065 if (!skb)
4066 return true;
4067
db6e3e8d 4068 return bt_cb(skb)->req.start;
9238f36a
JH
4069}
4070
42c6b129
JH
4071static void hci_resend_last(struct hci_dev *hdev)
4072{
4073 struct hci_command_hdr *sent;
4074 struct sk_buff *skb;
4075 u16 opcode;
4076
4077 if (!hdev->sent_cmd)
4078 return;
4079
4080 sent = (void *) hdev->sent_cmd->data;
4081 opcode = __le16_to_cpu(sent->opcode);
4082 if (opcode == HCI_OP_RESET)
4083 return;
4084
4085 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4086 if (!skb)
4087 return;
4088
4089 skb_queue_head(&hdev->cmd_q, skb);
4090 queue_work(hdev->workqueue, &hdev->cmd_work);
4091}
4092
e6214487
JH
4093void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4094 hci_req_complete_t *req_complete,
4095 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4096{
9238f36a
JH
4097 struct sk_buff *skb;
4098 unsigned long flags;
4099
4100 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4101
42c6b129
JH
4102 /* If the completed command doesn't match the last one that was
4103 * sent we need to do special handling of it.
9238f36a 4104 */
42c6b129
JH
4105 if (!hci_sent_cmd_data(hdev, opcode)) {
4106 /* Some CSR based controllers generate a spontaneous
4107 * reset complete event during init and any pending
4108 * command will never be completed. In such a case we
4109 * need to resend whatever was the last sent
4110 * command.
4111 */
4112 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4113 hci_resend_last(hdev);
4114
9238f36a 4115 return;
42c6b129 4116 }
9238f36a
JH
4117
4118 /* If the command succeeded and there's still more commands in
4119 * this request the request is not yet complete.
4120 */
4121 if (!status && !hci_req_is_complete(hdev))
4122 return;
4123
4124 /* If this was the last command in a request the complete
4125 * callback would be found in hdev->sent_cmd instead of the
4126 * command queue (hdev->cmd_q).
4127 */
e6214487
JH
4128 if (bt_cb(hdev->sent_cmd)->req.complete) {
4129 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4130 return;
4131 }
53e21fbc 4132
e6214487
JH
4133 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4134 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4135 return;
9238f36a
JH
4136 }
4137
4138 /* Remove all pending commands belonging to this request */
4139 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4140 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4141 if (bt_cb(skb)->req.start) {
9238f36a
JH
4142 __skb_queue_head(&hdev->cmd_q, skb);
4143 break;
4144 }
4145
e6214487
JH
4146 *req_complete = bt_cb(skb)->req.complete;
4147 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4148 kfree_skb(skb);
4149 }
4150 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4151}
4152
b78752cc 4153static void hci_rx_work(struct work_struct *work)
1da177e4 4154{
b78752cc 4155 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4156 struct sk_buff *skb;
4157
4158 BT_DBG("%s", hdev->name);
4159
1da177e4 4160 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4161 /* Send copy to monitor */
4162 hci_send_to_monitor(hdev, skb);
4163
1da177e4
LT
4164 if (atomic_read(&hdev->promisc)) {
4165 /* Send copy to the sockets */
470fe1b5 4166 hci_send_to_sock(hdev, skb);
1da177e4
LT
4167 }
4168
d7a5a11d 4169 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4170 kfree_skb(skb);
4171 continue;
4172 }
4173
4174 if (test_bit(HCI_INIT, &hdev->flags)) {
4175 /* Don't process data packets in this states. */
0d48d939 4176 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4177 case HCI_ACLDATA_PKT:
4178 case HCI_SCODATA_PKT:
4179 kfree_skb(skb);
4180 continue;
3ff50b79 4181 }
1da177e4
LT
4182 }
4183
4184 /* Process frame */
0d48d939 4185 switch (bt_cb(skb)->pkt_type) {
1da177e4 4186 case HCI_EVENT_PKT:
b78752cc 4187 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4188 hci_event_packet(hdev, skb);
4189 break;
4190
4191 case HCI_ACLDATA_PKT:
4192 BT_DBG("%s ACL data packet", hdev->name);
4193 hci_acldata_packet(hdev, skb);
4194 break;
4195
4196 case HCI_SCODATA_PKT:
4197 BT_DBG("%s SCO data packet", hdev->name);
4198 hci_scodata_packet(hdev, skb);
4199 break;
4200
4201 default:
4202 kfree_skb(skb);
4203 break;
4204 }
4205 }
1da177e4
LT
4206}
4207
c347b765 4208static void hci_cmd_work(struct work_struct *work)
1da177e4 4209{
c347b765 4210 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4211 struct sk_buff *skb;
4212
2104786b
AE
4213 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4214 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4215
1da177e4 4216 /* Send queued commands */
5a08ecce
AE
4217 if (atomic_read(&hdev->cmd_cnt)) {
4218 skb = skb_dequeue(&hdev->cmd_q);
4219 if (!skb)
4220 return;
4221
7585b97a 4222 kfree_skb(hdev->sent_cmd);
1da177e4 4223
a675d7f1 4224 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4225 if (hdev->sent_cmd) {
1da177e4 4226 atomic_dec(&hdev->cmd_cnt);
57d17d70 4227 hci_send_frame(hdev, skb);
7bdb8a5c 4228 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4229 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4230 else
65cc2b49
MH
4231 schedule_delayed_work(&hdev->cmd_timer,
4232 HCI_CMD_TIMEOUT);
1da177e4
LT
4233 } else {
4234 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4235 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4236 }
4237 }
4238}