]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: hci_bcm: Enable parsing of LM_DIAG messages
[mirror_ubuntu-artful-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
0857dd3b 40#include "hci_request.h"
60c5f5fb 41#include "hci_debugfs.h"
970c4e46
JH
42#include "smp.h"
43
b78752cc 44static void hci_rx_work(struct work_struct *work);
c347b765 45static void hci_cmd_work(struct work_struct *work);
3eff45ea 46static void hci_tx_work(struct work_struct *work);
1da177e4 47
1da177e4
LT
48/* HCI device list */
49LIST_HEAD(hci_dev_list);
50DEFINE_RWLOCK(hci_dev_list_lock);
51
52/* HCI callback list */
53LIST_HEAD(hci_cb_list);
fba7ecf0 54DEFINE_MUTEX(hci_cb_list_lock);
1da177e4 55
3df92b31
SL
56/* HCI ID Numbering */
57static DEFINE_IDA(hci_index_ida);
58
899de765
MH
59/* ----- HCI requests ----- */
60
61#define HCI_REQ_DONE 0
62#define HCI_REQ_PEND 1
63#define HCI_REQ_CANCELED 2
64
65#define hci_req_lock(d) mutex_lock(&d->req_lock)
66#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
67
1da177e4
LT
68/* ---- HCI notifications ---- */
69
6516455d 70static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 71{
040030ef 72 hci_sock_dev_event(hdev, event);
1da177e4
LT
73}
74
baf27f6e
MH
75/* ---- HCI debugfs entries ---- */
76
4b4148e9
MH
77static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 char buf[3];
82
b7cb93e5 83 buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y': 'N';
4b4148e9
MH
84 buf[1] = '\n';
85 buf[2] = '\0';
86 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
87}
88
89static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
90 size_t count, loff_t *ppos)
91{
92 struct hci_dev *hdev = file->private_data;
93 struct sk_buff *skb;
94 char buf[32];
95 size_t buf_size = min(count, (sizeof(buf)-1));
96 bool enable;
4b4148e9
MH
97
98 if (!test_bit(HCI_UP, &hdev->flags))
99 return -ENETDOWN;
100
101 if (copy_from_user(buf, user_buf, buf_size))
102 return -EFAULT;
103
104 buf[buf_size] = '\0';
105 if (strtobool(buf, &enable))
106 return -EINVAL;
107
b7cb93e5 108 if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE))
4b4148e9
MH
109 return -EALREADY;
110
111 hci_req_lock(hdev);
112 if (enable)
113 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
114 HCI_CMD_TIMEOUT);
115 else
116 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
117 HCI_CMD_TIMEOUT);
118 hci_req_unlock(hdev);
119
120 if (IS_ERR(skb))
121 return PTR_ERR(skb);
122
4b4148e9
MH
123 kfree_skb(skb);
124
b7cb93e5 125 hci_dev_change_flag(hdev, HCI_DUT_MODE);
4b4148e9
MH
126
127 return count;
128}
129
130static const struct file_operations dut_mode_fops = {
131 .open = simple_open,
132 .read = dut_mode_read,
133 .write = dut_mode_write,
134 .llseek = default_llseek,
135};
136
1da177e4
LT
137/* ---- HCI requests ---- */
138
f60cb305
JH
139static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
140 struct sk_buff *skb)
1da177e4 141{
42c6b129 142 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
143
144 if (hdev->req_status == HCI_REQ_PEND) {
145 hdev->req_result = result;
146 hdev->req_status = HCI_REQ_DONE;
f60cb305
JH
147 if (skb)
148 hdev->req_skb = skb_get(skb);
1da177e4
LT
149 wake_up_interruptible(&hdev->req_wait_q);
150 }
151}
152
153static void hci_req_cancel(struct hci_dev *hdev, int err)
154{
155 BT_DBG("%s err 0x%2.2x", hdev->name, err);
156
157 if (hdev->req_status == HCI_REQ_PEND) {
158 hdev->req_result = err;
159 hdev->req_status = HCI_REQ_CANCELED;
160 wake_up_interruptible(&hdev->req_wait_q);
161 }
162}
163
7b1abbbe 164struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 165 const void *param, u8 event, u32 timeout)
75e84b7c
JH
166{
167 DECLARE_WAITQUEUE(wait, current);
168 struct hci_request req;
f60cb305 169 struct sk_buff *skb;
75e84b7c
JH
170 int err = 0;
171
172 BT_DBG("%s", hdev->name);
173
174 hci_req_init(&req, hdev);
175
7b1abbbe 176 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
177
178 hdev->req_status = HCI_REQ_PEND;
179
75e84b7c
JH
180 add_wait_queue(&hdev->req_wait_q, &wait);
181 set_current_state(TASK_INTERRUPTIBLE);
182
f60cb305 183 err = hci_req_run_skb(&req, hci_req_sync_complete);
039fada5
CP
184 if (err < 0) {
185 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 186 set_current_state(TASK_RUNNING);
039fada5
CP
187 return ERR_PTR(err);
188 }
189
75e84b7c
JH
190 schedule_timeout(timeout);
191
192 remove_wait_queue(&hdev->req_wait_q, &wait);
193
194 if (signal_pending(current))
195 return ERR_PTR(-EINTR);
196
197 switch (hdev->req_status) {
198 case HCI_REQ_DONE:
199 err = -bt_to_errno(hdev->req_result);
200 break;
201
202 case HCI_REQ_CANCELED:
203 err = -hdev->req_result;
204 break;
205
206 default:
207 err = -ETIMEDOUT;
208 break;
209 }
210
211 hdev->req_status = hdev->req_result = 0;
f60cb305
JH
212 skb = hdev->req_skb;
213 hdev->req_skb = NULL;
75e84b7c
JH
214
215 BT_DBG("%s end: err %d", hdev->name, err);
216
f60cb305
JH
217 if (err < 0) {
218 kfree_skb(skb);
75e84b7c 219 return ERR_PTR(err);
f60cb305 220 }
75e84b7c 221
757aa0b5
JH
222 if (!skb)
223 return ERR_PTR(-ENODATA);
224
225 return skb;
7b1abbbe
JH
226}
227EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 230 const void *param, u32 timeout)
7b1abbbe
JH
231{
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
233}
234EXPORT_SYMBOL(__hci_cmd_sync);
235
1da177e4 236/* Execute request and wait for completion. */
01178cd4 237static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
238 void (*func)(struct hci_request *req,
239 unsigned long opt),
01178cd4 240 unsigned long opt, __u32 timeout)
1da177e4 241{
42c6b129 242 struct hci_request req;
1da177e4
LT
243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
245
246 BT_DBG("%s start", hdev->name);
247
42c6b129
JH
248 hci_req_init(&req, hdev);
249
1da177e4
LT
250 hdev->req_status = HCI_REQ_PEND;
251
42c6b129 252 func(&req, opt);
53cce22d 253
039fada5
CP
254 add_wait_queue(&hdev->req_wait_q, &wait);
255 set_current_state(TASK_INTERRUPTIBLE);
256
f60cb305 257 err = hci_req_run_skb(&req, hci_req_sync_complete);
42c6b129 258 if (err < 0) {
53cce22d 259 hdev->req_status = 0;
920c8300 260
039fada5 261 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 262 set_current_state(TASK_RUNNING);
039fada5 263
920c8300
AG
264 /* ENODATA means the HCI request command queue is empty.
265 * This can happen when a request with conditionals doesn't
266 * trigger any commands to be sent. This is normal behavior
267 * and should not trigger an error return.
42c6b129 268 */
920c8300
AG
269 if (err == -ENODATA)
270 return 0;
271
272 return err;
53cce22d
JH
273 }
274
1da177e4
LT
275 schedule_timeout(timeout);
276
277 remove_wait_queue(&hdev->req_wait_q, &wait);
278
279 if (signal_pending(current))
280 return -EINTR;
281
282 switch (hdev->req_status) {
283 case HCI_REQ_DONE:
e175072f 284 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
285 break;
286
287 case HCI_REQ_CANCELED:
288 err = -hdev->req_result;
289 break;
290
291 default:
292 err = -ETIMEDOUT;
293 break;
3ff50b79 294 }
1da177e4 295
a5040efa 296 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
297
298 BT_DBG("%s end: err %d", hdev->name, err);
299
300 return err;
301}
302
01178cd4 303static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
304 void (*req)(struct hci_request *req,
305 unsigned long opt),
01178cd4 306 unsigned long opt, __u32 timeout)
1da177e4
LT
307{
308 int ret;
309
7c6a329e
MH
310 if (!test_bit(HCI_UP, &hdev->flags))
311 return -ENETDOWN;
312
1da177e4
LT
313 /* Serialize all requests */
314 hci_req_lock(hdev);
01178cd4 315 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
316 hci_req_unlock(hdev);
317
318 return ret;
319}
320
42c6b129 321static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 322{
42c6b129 323 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
324
325 /* Reset device */
42c6b129
JH
326 set_bit(HCI_RESET, &req->hdev->flags);
327 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
328}
329
42c6b129 330static void bredr_init(struct hci_request *req)
1da177e4 331{
42c6b129 332 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 333
1da177e4 334 /* Read Local Supported Features */
42c6b129 335 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 336
1143e5a6 337 /* Read Local Version */
42c6b129 338 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
339
340 /* Read BD Address */
42c6b129 341 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
342}
343
0af801b9 344static void amp_init1(struct hci_request *req)
e61ef499 345{
42c6b129 346 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 347
e61ef499 348 /* Read Local Version */
42c6b129 349 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 350
f6996cfe
MH
351 /* Read Local Supported Commands */
352 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
353
6bcbc489 354 /* Read Local AMP Info */
42c6b129 355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
356
357 /* Read Data Blk size */
42c6b129 358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 359
f38ba941
MH
360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
7528ca1c
MH
363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
365}
366
0af801b9
JH
367static void amp_init2(struct hci_request *req)
368{
369 /* Read Local Supported Features. Not all AMP controllers
370 * support this so it's placed conditionally in the second
371 * stage init.
372 */
373 if (req->hdev->commands[14] & 0x20)
374 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
375}
376
42c6b129 377static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 378{
42c6b129 379 struct hci_dev *hdev = req->hdev;
e61ef499
AE
380
381 BT_DBG("%s %ld", hdev->name, opt);
382
11778716
AE
383 /* Reset */
384 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 385 hci_reset_req(req, 0);
11778716 386
e61ef499
AE
387 switch (hdev->dev_type) {
388 case HCI_BREDR:
42c6b129 389 bredr_init(req);
e61ef499
AE
390 break;
391
392 case HCI_AMP:
0af801b9 393 amp_init1(req);
e61ef499
AE
394 break;
395
396 default:
397 BT_ERR("Unknown device type %d", hdev->dev_type);
398 break;
399 }
e61ef499
AE
400}
401
42c6b129 402static void bredr_setup(struct hci_request *req)
2177bab5 403{
2177bab5
JH
404 __le16 param;
405 __u8 flt_type;
406
407 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 408 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
409
410 /* Read Class of Device */
42c6b129 411 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
412
413 /* Read Local Name */
42c6b129 414 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
415
416 /* Read Voice Setting */
42c6b129 417 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 418
b4cb9fb2
MH
419 /* Read Number of Supported IAC */
420 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
421
4b836f39
MH
422 /* Read Current IAC LAP */
423 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
424
2177bab5
JH
425 /* Clear Event Filters */
426 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 427 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
428
429 /* Connection accept timeout ~20 secs */
dcf4adbf 430 param = cpu_to_le16(0x7d00);
42c6b129 431 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5
JH
432}
433
42c6b129 434static void le_setup(struct hci_request *req)
2177bab5 435{
c73eee91
JH
436 struct hci_dev *hdev = req->hdev;
437
2177bab5 438 /* Read LE Buffer Size */
42c6b129 439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
440
441 /* Read LE Local Supported Features */
42c6b129 442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 443
747d3f03
MH
444 /* Read LE Supported States */
445 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
446
2177bab5 447 /* Read LE White List Size */
42c6b129 448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 449
747d3f03
MH
450 /* Clear LE White List */
451 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
452
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
a1536da2 455 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2177bab5
JH
456}
457
42c6b129 458static void hci_setup_event_mask(struct hci_request *req)
2177bab5 459{
42c6b129
JH
460 struct hci_dev *hdev = req->hdev;
461
2177bab5
JH
462 /* The second byte is 0xff instead of 0x9f (two reserved bits
463 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
464 * command otherwise.
465 */
466 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
467
468 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
469 * any event mask for pre 1.2 devices.
470 */
471 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
472 return;
473
474 if (lmp_bredr_capable(hdev)) {
475 events[4] |= 0x01; /* Flow Specification Complete */
476 events[4] |= 0x02; /* Inquiry Result with RSSI */
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */
478 events[5] |= 0x08; /* Synchronous Connection Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
480 } else {
481 /* Use a different default for LE-only devices */
482 memset(events, 0, sizeof(events));
483 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
484 events[1] |= 0x08; /* Read Remote Version Information Complete */
485 events[1] |= 0x20; /* Command Complete */
486 events[1] |= 0x40; /* Command Status */
487 events[1] |= 0x80; /* Hardware Error */
488 events[2] |= 0x04; /* Number of Completed Packets */
489 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
490
491 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
492 events[0] |= 0x80; /* Encryption Change */
493 events[5] |= 0x80; /* Encryption Key Refresh Complete */
494 }
2177bab5
JH
495 }
496
497 if (lmp_inq_rssi_capable(hdev))
498 events[4] |= 0x02; /* Inquiry Result with RSSI */
499
500 if (lmp_sniffsubr_capable(hdev))
501 events[5] |= 0x20; /* Sniff Subrating */
502
503 if (lmp_pause_enc_capable(hdev))
504 events[5] |= 0x80; /* Encryption Key Refresh Complete */
505
506 if (lmp_ext_inq_capable(hdev))
507 events[5] |= 0x40; /* Extended Inquiry Result */
508
509 if (lmp_no_flush_capable(hdev))
510 events[7] |= 0x01; /* Enhanced Flush Complete */
511
512 if (lmp_lsto_capable(hdev))
513 events[6] |= 0x80; /* Link Supervision Timeout Changed */
514
515 if (lmp_ssp_capable(hdev)) {
516 events[6] |= 0x01; /* IO Capability Request */
517 events[6] |= 0x02; /* IO Capability Response */
518 events[6] |= 0x04; /* User Confirmation Request */
519 events[6] |= 0x08; /* User Passkey Request */
520 events[6] |= 0x10; /* Remote OOB Data Request */
521 events[6] |= 0x20; /* Simple Pairing Complete */
522 events[7] |= 0x04; /* User Passkey Notification */
523 events[7] |= 0x08; /* Keypress Notification */
524 events[7] |= 0x10; /* Remote Host Supported
525 * Features Notification
526 */
527 }
528
529 if (lmp_le_capable(hdev))
530 events[7] |= 0x20; /* LE Meta-Event */
531
42c6b129 532 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
533}
534
42c6b129 535static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 536{
42c6b129
JH
537 struct hci_dev *hdev = req->hdev;
538
0af801b9
JH
539 if (hdev->dev_type == HCI_AMP)
540 return amp_init2(req);
541
2177bab5 542 if (lmp_bredr_capable(hdev))
42c6b129 543 bredr_setup(req);
56f87901 544 else
a358dc11 545 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
2177bab5
JH
546
547 if (lmp_le_capable(hdev))
42c6b129 548 le_setup(req);
2177bab5 549
0f3adeae
MH
550 /* All Bluetooth 1.2 and later controllers should support the
551 * HCI command for reading the local supported commands.
552 *
553 * Unfortunately some controllers indicate Bluetooth 1.2 support,
554 * but do not have support for this command. If that is the case,
555 * the driver can quirk the behavior and skip reading the local
556 * supported commands.
3f8e2d75 557 */
0f3adeae
MH
558 if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
559 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
42c6b129 560 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
561
562 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
563 /* When SSP is available, then the host features page
564 * should also be available as well. However some
565 * controllers list the max_page as 0 as long as SSP
566 * has not been enabled. To achieve proper debugging
567 * output, force the minimum max_page to 1 at least.
568 */
569 hdev->max_page = 0x01;
570
d7a5a11d 571 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
2177bab5 572 u8 mode = 0x01;
574ea3c7 573
42c6b129
JH
574 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
575 sizeof(mode), &mode);
2177bab5
JH
576 } else {
577 struct hci_cp_write_eir cp;
578
579 memset(hdev->eir, 0, sizeof(hdev->eir));
580 memset(&cp, 0, sizeof(cp));
581
42c6b129 582 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
583 }
584 }
585
043ec9bf
MH
586 if (lmp_inq_rssi_capable(hdev) ||
587 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) {
04422da9
MH
588 u8 mode;
589
590 /* If Extended Inquiry Result events are supported, then
591 * they are clearly preferred over Inquiry Result with RSSI
592 * events.
593 */
594 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
595
596 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
597 }
2177bab5
JH
598
599 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 600 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
601
602 if (lmp_ext_feat_capable(hdev)) {
603 struct hci_cp_read_local_ext_features cp;
604
605 cp.page = 0x01;
42c6b129
JH
606 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
607 sizeof(cp), &cp);
2177bab5
JH
608 }
609
d7a5a11d 610 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
2177bab5 611 u8 enable = 1;
42c6b129
JH
612 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
613 &enable);
2177bab5
JH
614 }
615}
616
42c6b129 617static void hci_setup_link_policy(struct hci_request *req)
2177bab5 618{
42c6b129 619 struct hci_dev *hdev = req->hdev;
2177bab5
JH
620 struct hci_cp_write_def_link_policy cp;
621 u16 link_policy = 0;
622
623 if (lmp_rswitch_capable(hdev))
624 link_policy |= HCI_LP_RSWITCH;
625 if (lmp_hold_capable(hdev))
626 link_policy |= HCI_LP_HOLD;
627 if (lmp_sniff_capable(hdev))
628 link_policy |= HCI_LP_SNIFF;
629 if (lmp_park_capable(hdev))
630 link_policy |= HCI_LP_PARK;
631
632 cp.policy = cpu_to_le16(link_policy);
42c6b129 633 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
634}
635
42c6b129 636static void hci_set_le_support(struct hci_request *req)
2177bab5 637{
42c6b129 638 struct hci_dev *hdev = req->hdev;
2177bab5
JH
639 struct hci_cp_write_le_host_supported cp;
640
c73eee91
JH
641 /* LE-only devices do not support explicit enablement */
642 if (!lmp_bredr_capable(hdev))
643 return;
644
2177bab5
JH
645 memset(&cp, 0, sizeof(cp));
646
d7a5a11d 647 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2177bab5 648 cp.le = 0x01;
32226e4f 649 cp.simul = 0x00;
2177bab5
JH
650 }
651
652 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
653 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
654 &cp);
2177bab5
JH
655}
656
d62e6d67
JH
657static void hci_set_event_mask_page_2(struct hci_request *req)
658{
659 struct hci_dev *hdev = req->hdev;
660 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
661
662 /* If Connectionless Slave Broadcast master role is supported
663 * enable all necessary events for it.
664 */
53b834d2 665 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
666 events[1] |= 0x40; /* Triggered Clock Capture */
667 events[1] |= 0x80; /* Synchronization Train Complete */
668 events[2] |= 0x10; /* Slave Page Response Timeout */
669 events[2] |= 0x20; /* CSB Channel Map Change */
670 }
671
672 /* If Connectionless Slave Broadcast slave role is supported
673 * enable all necessary events for it.
674 */
53b834d2 675 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
676 events[2] |= 0x01; /* Synchronization Train Received */
677 events[2] |= 0x02; /* CSB Receive */
678 events[2] |= 0x04; /* CSB Timeout */
679 events[2] |= 0x08; /* Truncated Page Complete */
680 }
681
40c59fcb 682 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 683 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
684 events[2] |= 0x80;
685
d62e6d67
JH
686 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
687}
688
42c6b129 689static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 690{
42c6b129 691 struct hci_dev *hdev = req->hdev;
d2c5d77f 692 u8 p;
42c6b129 693
0da71f1b
MH
694 hci_setup_event_mask(req);
695
e81be90b
JH
696 if (hdev->commands[6] & 0x20 &&
697 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
48ce62c4
MH
698 struct hci_cp_read_stored_link_key cp;
699
700 bacpy(&cp.bdaddr, BDADDR_ANY);
701 cp.read_all = 0x01;
702 hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp);
703 }
704
2177bab5 705 if (hdev->commands[5] & 0x10)
42c6b129 706 hci_setup_link_policy(req);
2177bab5 707
417287de
MH
708 if (hdev->commands[8] & 0x01)
709 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
710
711 /* Some older Broadcom based Bluetooth 1.2 controllers do not
712 * support the Read Page Scan Type command. Check support for
713 * this command in the bit mask of supported commands.
714 */
715 if (hdev->commands[13] & 0x01)
716 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
717
9193c6e8
AG
718 if (lmp_le_capable(hdev)) {
719 u8 events[8];
720
721 memset(events, 0, sizeof(events));
4d6c705b
MH
722 events[0] = 0x0f;
723
724 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
725 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
726
727 /* If controller supports the Connection Parameters Request
728 * Link Layer Procedure, enable the corresponding event.
729 */
730 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
731 events[0] |= 0x20; /* LE Remote Connection
732 * Parameter Request
733 */
734
a9f6068e
MH
735 /* If the controller supports the Data Length Extension
736 * feature, enable the corresponding event.
737 */
738 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
739 events[0] |= 0x40; /* LE Data Length Change */
740
4b71bba4
MH
741 /* If the controller supports Extended Scanner Filter
742 * Policies, enable the correspondig event.
743 */
744 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
745 events[1] |= 0x04; /* LE Direct Advertising
746 * Report
747 */
748
5a34bd5f
MH
749 /* If the controller supports the LE Read Local P-256
750 * Public Key command, enable the corresponding event.
751 */
752 if (hdev->commands[34] & 0x02)
753 events[0] |= 0x80; /* LE Read Local P-256
754 * Public Key Complete
755 */
756
757 /* If the controller supports the LE Generate DHKey
758 * command, enable the corresponding event.
759 */
760 if (hdev->commands[34] & 0x04)
761 events[1] |= 0x01; /* LE Generate DHKey Complete */
762
9193c6e8
AG
763 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
764 events);
765
15a49cca
MH
766 if (hdev->commands[25] & 0x40) {
767 /* Read LE Advertising Channel TX Power */
768 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
769 }
770
a9f6068e
MH
771 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
772 /* Read LE Maximum Data Length */
773 hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL);
774
775 /* Read LE Suggested Default Data Length */
776 hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL);
777 }
778
42c6b129 779 hci_set_le_support(req);
9193c6e8 780 }
d2c5d77f
JH
781
782 /* Read features beyond page 1 if available */
783 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
784 struct hci_cp_read_local_ext_features cp;
785
786 cp.page = p;
787 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
788 sizeof(cp), &cp);
789 }
2177bab5
JH
790}
791
5d4e7e8d
JH
792static void hci_init4_req(struct hci_request *req, unsigned long opt)
793{
794 struct hci_dev *hdev = req->hdev;
795
36f260ce
MH
796 /* Some Broadcom based Bluetooth controllers do not support the
797 * Delete Stored Link Key command. They are clearly indicating its
798 * absence in the bit mask of supported commands.
799 *
800 * Check the supported commands and only if the the command is marked
801 * as supported send it. If not supported assume that the controller
802 * does not have actual support for stored link keys which makes this
803 * command redundant anyway.
804 *
805 * Some controllers indicate that they support handling deleting
806 * stored link keys, but they don't. The quirk lets a driver
807 * just disable this command.
808 */
809 if (hdev->commands[6] & 0x80 &&
810 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
811 struct hci_cp_delete_stored_link_key cp;
812
813 bacpy(&cp.bdaddr, BDADDR_ANY);
814 cp.delete_all = 0x01;
815 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
816 sizeof(cp), &cp);
817 }
818
d62e6d67
JH
819 /* Set event mask page 2 if the HCI command for it is supported */
820 if (hdev->commands[22] & 0x04)
821 hci_set_event_mask_page_2(req);
822
109e3191
MH
823 /* Read local codec list if the HCI command is supported */
824 if (hdev->commands[29] & 0x20)
825 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
826
f4fe73ed
MH
827 /* Get MWS transport configuration if the HCI command is supported */
828 if (hdev->commands[30] & 0x08)
829 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
830
5d4e7e8d 831 /* Check for Synchronization Train support */
53b834d2 832 if (lmp_sync_train_capable(hdev))
5d4e7e8d 833 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
834
835 /* Enable Secure Connections if supported and configured */
d7a5a11d 836 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
574ea3c7 837 bredr_sc_enabled(hdev)) {
a6d0d690 838 u8 support = 0x01;
574ea3c7 839
a6d0d690
MH
840 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
841 sizeof(support), &support);
842 }
5d4e7e8d
JH
843}
844
2177bab5
JH
845static int __hci_init(struct hci_dev *hdev)
846{
847 int err;
848
849 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
850 if (err < 0)
851 return err;
852
4b4148e9
MH
853 /* The Device Under Test (DUT) mode is special and available for
854 * all controller types. So just create it early on.
855 */
d7a5a11d 856 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
4b4148e9
MH
857 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
858 &dut_mode_fops);
859 }
860
0af801b9
JH
861 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
862 if (err < 0)
863 return err;
864
2177bab5
JH
865 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
866 * BR/EDR/LE type controllers. AMP controllers only need the
0af801b9 867 * first two stages of init.
2177bab5
JH
868 */
869 if (hdev->dev_type != HCI_BREDR)
870 return 0;
871
5d4e7e8d
JH
872 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
873 if (err < 0)
874 return err;
875
baf27f6e
MH
876 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
877 if (err < 0)
878 return err;
879
ec6cef9c
MH
880 /* This function is only called when the controller is actually in
881 * configured state. When the controller is marked as unconfigured,
882 * this initialization procedure is not run.
883 *
884 * It means that it is possible that a controller runs through its
885 * setup phase and then discovers missing settings. If that is the
886 * case, then this function will not be called. It then will only
887 * be called during the config phase.
888 *
889 * So only when in setup phase or config phase, create the debugfs
890 * entries and register the SMP channels.
baf27f6e 891 */
d7a5a11d
MH
892 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
893 !hci_dev_test_flag(hdev, HCI_CONFIG))
baf27f6e
MH
894 return 0;
895
60c5f5fb
MH
896 hci_debugfs_create_common(hdev);
897
71c3b60e 898 if (lmp_bredr_capable(hdev))
60c5f5fb 899 hci_debugfs_create_bredr(hdev);
2bfa3531 900
162a3bac 901 if (lmp_le_capable(hdev))
60c5f5fb 902 hci_debugfs_create_le(hdev);
e7b8fc92 903
baf27f6e 904 return 0;
2177bab5
JH
905}
906
0ebca7d6
MH
907static void hci_init0_req(struct hci_request *req, unsigned long opt)
908{
909 struct hci_dev *hdev = req->hdev;
910
911 BT_DBG("%s %ld", hdev->name, opt);
912
913 /* Reset */
914 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
915 hci_reset_req(req, 0);
916
917 /* Read Local Version */
918 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
919
920 /* Read BD Address */
921 if (hdev->set_bdaddr)
922 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
923}
924
925static int __hci_unconf_init(struct hci_dev *hdev)
926{
927 int err;
928
cc78b44b
MH
929 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
930 return 0;
931
0ebca7d6
MH
932 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
933 if (err < 0)
934 return err;
935
936 return 0;
937}
938
42c6b129 939static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
940{
941 __u8 scan = opt;
942
42c6b129 943 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
944
945 /* Inquiry and Page scans */
42c6b129 946 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
947}
948
42c6b129 949static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
950{
951 __u8 auth = opt;
952
42c6b129 953 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
954
955 /* Authentication */
42c6b129 956 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
957}
958
42c6b129 959static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
960{
961 __u8 encrypt = opt;
962
42c6b129 963 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 964
e4e8e37c 965 /* Encryption */
42c6b129 966 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
967}
968
42c6b129 969static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
970{
971 __le16 policy = cpu_to_le16(opt);
972
42c6b129 973 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
974
975 /* Default link policy */
42c6b129 976 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
977}
978
8e87d142 979/* Get HCI device by index.
1da177e4
LT
980 * Device is held on return. */
981struct hci_dev *hci_dev_get(int index)
982{
8035ded4 983 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
984
985 BT_DBG("%d", index);
986
987 if (index < 0)
988 return NULL;
989
990 read_lock(&hci_dev_list_lock);
8035ded4 991 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
992 if (d->id == index) {
993 hdev = hci_dev_hold(d);
994 break;
995 }
996 }
997 read_unlock(&hci_dev_list_lock);
998 return hdev;
999}
1da177e4
LT
1000
1001/* ---- Inquiry support ---- */
ff9ef578 1002
30dc78e1
JH
1003bool hci_discovery_active(struct hci_dev *hdev)
1004{
1005 struct discovery_state *discov = &hdev->discovery;
1006
6fbe195d 1007 switch (discov->state) {
343f935b 1008 case DISCOVERY_FINDING:
6fbe195d 1009 case DISCOVERY_RESOLVING:
30dc78e1
JH
1010 return true;
1011
6fbe195d
AG
1012 default:
1013 return false;
1014 }
30dc78e1
JH
1015}
1016
ff9ef578
JH
1017void hci_discovery_set_state(struct hci_dev *hdev, int state)
1018{
bb3e0a33
JH
1019 int old_state = hdev->discovery.state;
1020
ff9ef578
JH
1021 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1022
bb3e0a33 1023 if (old_state == state)
ff9ef578
JH
1024 return;
1025
bb3e0a33
JH
1026 hdev->discovery.state = state;
1027
ff9ef578
JH
1028 switch (state) {
1029 case DISCOVERY_STOPPED:
c54c3860
AG
1030 hci_update_background_scan(hdev);
1031
bb3e0a33 1032 if (old_state != DISCOVERY_STARTING)
7b99b659 1033 mgmt_discovering(hdev, 0);
ff9ef578
JH
1034 break;
1035 case DISCOVERY_STARTING:
1036 break;
343f935b 1037 case DISCOVERY_FINDING:
ff9ef578
JH
1038 mgmt_discovering(hdev, 1);
1039 break;
30dc78e1
JH
1040 case DISCOVERY_RESOLVING:
1041 break;
ff9ef578
JH
1042 case DISCOVERY_STOPPING:
1043 break;
1044 }
ff9ef578
JH
1045}
1046
1f9b9a5d 1047void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1048{
30883512 1049 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1050 struct inquiry_entry *p, *n;
1da177e4 1051
561aafbc
JH
1052 list_for_each_entry_safe(p, n, &cache->all, all) {
1053 list_del(&p->all);
b57c1a56 1054 kfree(p);
1da177e4 1055 }
561aafbc
JH
1056
1057 INIT_LIST_HEAD(&cache->unknown);
1058 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1059}
1060
a8c5fb1a
GP
1061struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1062 bdaddr_t *bdaddr)
1da177e4 1063{
30883512 1064 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1065 struct inquiry_entry *e;
1066
6ed93dc6 1067 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1068
561aafbc
JH
1069 list_for_each_entry(e, &cache->all, all) {
1070 if (!bacmp(&e->data.bdaddr, bdaddr))
1071 return e;
1072 }
1073
1074 return NULL;
1075}
1076
1077struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1078 bdaddr_t *bdaddr)
561aafbc 1079{
30883512 1080 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1081 struct inquiry_entry *e;
1082
6ed93dc6 1083 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1084
1085 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1086 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1087 return e;
1088 }
1089
1090 return NULL;
1da177e4
LT
1091}
1092
30dc78e1 1093struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1094 bdaddr_t *bdaddr,
1095 int state)
30dc78e1
JH
1096{
1097 struct discovery_state *cache = &hdev->discovery;
1098 struct inquiry_entry *e;
1099
6ed93dc6 1100 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1101
1102 list_for_each_entry(e, &cache->resolve, list) {
1103 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1104 return e;
1105 if (!bacmp(&e->data.bdaddr, bdaddr))
1106 return e;
1107 }
1108
1109 return NULL;
1110}
1111
a3d4e20a 1112void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1113 struct inquiry_entry *ie)
a3d4e20a
JH
1114{
1115 struct discovery_state *cache = &hdev->discovery;
1116 struct list_head *pos = &cache->resolve;
1117 struct inquiry_entry *p;
1118
1119 list_del(&ie->list);
1120
1121 list_for_each_entry(p, &cache->resolve, list) {
1122 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1123 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1124 break;
1125 pos = &p->list;
1126 }
1127
1128 list_add(&ie->list, pos);
1129}
1130
af58925c
MH
1131u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1132 bool name_known)
1da177e4 1133{
30883512 1134 struct discovery_state *cache = &hdev->discovery;
70f23020 1135 struct inquiry_entry *ie;
af58925c 1136 u32 flags = 0;
1da177e4 1137
6ed93dc6 1138 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1139
6928a924 1140 hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
2b2fec4d 1141
af58925c
MH
1142 if (!data->ssp_mode)
1143 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1144
70f23020 1145 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1146 if (ie) {
af58925c
MH
1147 if (!ie->data.ssp_mode)
1148 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 1149
a3d4e20a 1150 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1151 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1152 ie->data.rssi = data->rssi;
1153 hci_inquiry_cache_update_resolve(hdev, ie);
1154 }
1155
561aafbc 1156 goto update;
a3d4e20a 1157 }
561aafbc
JH
1158
1159 /* Entry not in the cache. Add new one. */
27f70f3e 1160 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
1161 if (!ie) {
1162 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
1163 goto done;
1164 }
561aafbc
JH
1165
1166 list_add(&ie->all, &cache->all);
1167
1168 if (name_known) {
1169 ie->name_state = NAME_KNOWN;
1170 } else {
1171 ie->name_state = NAME_NOT_KNOWN;
1172 list_add(&ie->list, &cache->unknown);
1173 }
70f23020 1174
561aafbc
JH
1175update:
1176 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1177 ie->name_state != NAME_PENDING) {
561aafbc
JH
1178 ie->name_state = NAME_KNOWN;
1179 list_del(&ie->list);
1da177e4
LT
1180 }
1181
70f23020
AE
1182 memcpy(&ie->data, data, sizeof(*data));
1183 ie->timestamp = jiffies;
1da177e4 1184 cache->timestamp = jiffies;
3175405b
JH
1185
1186 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 1187 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 1188
af58925c
MH
1189done:
1190 return flags;
1da177e4
LT
1191}
1192
1193static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1194{
30883512 1195 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1196 struct inquiry_info *info = (struct inquiry_info *) buf;
1197 struct inquiry_entry *e;
1198 int copied = 0;
1199
561aafbc 1200 list_for_each_entry(e, &cache->all, all) {
1da177e4 1201 struct inquiry_data *data = &e->data;
b57c1a56
JH
1202
1203 if (copied >= num)
1204 break;
1205
1da177e4
LT
1206 bacpy(&info->bdaddr, &data->bdaddr);
1207 info->pscan_rep_mode = data->pscan_rep_mode;
1208 info->pscan_period_mode = data->pscan_period_mode;
1209 info->pscan_mode = data->pscan_mode;
1210 memcpy(info->dev_class, data->dev_class, 3);
1211 info->clock_offset = data->clock_offset;
b57c1a56 1212
1da177e4 1213 info++;
b57c1a56 1214 copied++;
1da177e4
LT
1215 }
1216
1217 BT_DBG("cache %p, copied %d", cache, copied);
1218 return copied;
1219}
1220
42c6b129 1221static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1222{
1223 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1224 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1225 struct hci_cp_inquiry cp;
1226
1227 BT_DBG("%s", hdev->name);
1228
1229 if (test_bit(HCI_INQUIRY, &hdev->flags))
1230 return;
1231
1232 /* Start Inquiry */
1233 memcpy(&cp.lap, &ir->lap, 3);
1234 cp.length = ir->length;
1235 cp.num_rsp = ir->num_rsp;
42c6b129 1236 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1237}
1238
1239int hci_inquiry(void __user *arg)
1240{
1241 __u8 __user *ptr = arg;
1242 struct hci_inquiry_req ir;
1243 struct hci_dev *hdev;
1244 int err = 0, do_inquiry = 0, max_rsp;
1245 long timeo;
1246 __u8 *buf;
1247
1248 if (copy_from_user(&ir, ptr, sizeof(ir)))
1249 return -EFAULT;
1250
5a08ecce
AE
1251 hdev = hci_dev_get(ir.dev_id);
1252 if (!hdev)
1da177e4
LT
1253 return -ENODEV;
1254
d7a5a11d 1255 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1256 err = -EBUSY;
1257 goto done;
1258 }
1259
d7a5a11d 1260 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1261 err = -EOPNOTSUPP;
1262 goto done;
1263 }
1264
5b69bef5
MH
1265 if (hdev->dev_type != HCI_BREDR) {
1266 err = -EOPNOTSUPP;
1267 goto done;
1268 }
1269
d7a5a11d 1270 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1271 err = -EOPNOTSUPP;
1272 goto done;
1273 }
1274
09fd0de5 1275 hci_dev_lock(hdev);
8e87d142 1276 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1277 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1278 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1279 do_inquiry = 1;
1280 }
09fd0de5 1281 hci_dev_unlock(hdev);
1da177e4 1282
04837f64 1283 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1284
1285 if (do_inquiry) {
01178cd4
JH
1286 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1287 timeo);
70f23020
AE
1288 if (err < 0)
1289 goto done;
3e13fa1e
AG
1290
1291 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1292 * cleared). If it is interrupted by a signal, return -EINTR.
1293 */
74316201 1294 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
1295 TASK_INTERRUPTIBLE))
1296 return -EINTR;
70f23020 1297 }
1da177e4 1298
8fc9ced3
GP
1299 /* for unlimited number of responses we will use buffer with
1300 * 255 entries
1301 */
1da177e4
LT
1302 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1303
1304 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1305 * copy it to the user space.
1306 */
01df8c31 1307 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1308 if (!buf) {
1da177e4
LT
1309 err = -ENOMEM;
1310 goto done;
1311 }
1312
09fd0de5 1313 hci_dev_lock(hdev);
1da177e4 1314 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1315 hci_dev_unlock(hdev);
1da177e4
LT
1316
1317 BT_DBG("num_rsp %d", ir.num_rsp);
1318
1319 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1320 ptr += sizeof(ir);
1321 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1322 ir.num_rsp))
1da177e4 1323 err = -EFAULT;
8e87d142 1324 } else
1da177e4
LT
1325 err = -EFAULT;
1326
1327 kfree(buf);
1328
1329done:
1330 hci_dev_put(hdev);
1331 return err;
1332}
1333
cbed0ca1 1334static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1335{
1da177e4
LT
1336 int ret = 0;
1337
1da177e4
LT
1338 BT_DBG("%s %p", hdev->name, hdev);
1339
1340 hci_req_lock(hdev);
1341
d7a5a11d 1342 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
94324962
JH
1343 ret = -ENODEV;
1344 goto done;
1345 }
1346
d7a5a11d
MH
1347 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1348 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
a5c8f270
MH
1349 /* Check for rfkill but allow the HCI setup stage to
1350 * proceed (which in itself doesn't cause any RF activity).
1351 */
d7a5a11d 1352 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
a5c8f270
MH
1353 ret = -ERFKILL;
1354 goto done;
1355 }
1356
1357 /* Check for valid public address or a configured static
1358 * random adddress, but let the HCI setup proceed to
1359 * be able to determine if there is a public address
1360 * or not.
1361 *
c6beca0e
MH
1362 * In case of user channel usage, it is not important
1363 * if a public address or static random address is
1364 * available.
1365 *
a5c8f270
MH
1366 * This check is only valid for BR/EDR controllers
1367 * since AMP controllers do not have an address.
1368 */
d7a5a11d 1369 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
c6beca0e 1370 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1371 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1372 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1373 ret = -EADDRNOTAVAIL;
1374 goto done;
1375 }
611b30f7
MH
1376 }
1377
1da177e4
LT
1378 if (test_bit(HCI_UP, &hdev->flags)) {
1379 ret = -EALREADY;
1380 goto done;
1381 }
1382
1da177e4
LT
1383 if (hdev->open(hdev)) {
1384 ret = -EIO;
1385 goto done;
1386 }
1387
e9ca8bf1 1388 set_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1389 hci_notify(hdev, HCI_DEV_OPEN);
1390
f41c70c4
MH
1391 atomic_set(&hdev->cmd_cnt, 1);
1392 set_bit(HCI_INIT, &hdev->flags);
1393
d7a5a11d 1394 if (hci_dev_test_flag(hdev, HCI_SETUP)) {
af202f84
MH
1395 if (hdev->setup)
1396 ret = hdev->setup(hdev);
f41c70c4 1397
af202f84
MH
1398 /* The transport driver can set these quirks before
1399 * creating the HCI device or in its setup callback.
1400 *
1401 * In case any of them is set, the controller has to
1402 * start up as unconfigured.
1403 */
eb1904f4
MH
1404 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
1405 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
a1536da2 1406 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
f41c70c4 1407
0ebca7d6
MH
1408 /* For an unconfigured controller it is required to
1409 * read at least the version information provided by
1410 * the Read Local Version Information command.
1411 *
1412 * If the set_bdaddr driver callback is provided, then
1413 * also the original Bluetooth public device address
1414 * will be read using the Read BD Address command.
1415 */
d7a5a11d 1416 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
0ebca7d6 1417 ret = __hci_unconf_init(hdev);
89bc22d2
MH
1418 }
1419
d7a5a11d 1420 if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
9713c17b
MH
1421 /* If public address change is configured, ensure that
1422 * the address gets programmed. If the driver does not
1423 * support changing the public address, fail the power
1424 * on procedure.
1425 */
1426 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
1427 hdev->set_bdaddr)
24c457e2
MH
1428 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
1429 else
1430 ret = -EADDRNOTAVAIL;
1431 }
1432
f41c70c4 1433 if (!ret) {
d7a5a11d
MH
1434 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1435 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
f41c70c4 1436 ret = __hci_init(hdev);
1da177e4
LT
1437 }
1438
f41c70c4
MH
1439 clear_bit(HCI_INIT, &hdev->flags);
1440
1da177e4
LT
1441 if (!ret) {
1442 hci_dev_hold(hdev);
a1536da2 1443 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1da177e4
LT
1444 set_bit(HCI_UP, &hdev->flags);
1445 hci_notify(hdev, HCI_DEV_UP);
d7a5a11d
MH
1446 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
1447 !hci_dev_test_flag(hdev, HCI_CONFIG) &&
1448 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1449 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1514b892 1450 hdev->dev_type == HCI_BREDR) {
09fd0de5 1451 hci_dev_lock(hdev);
744cf19e 1452 mgmt_powered(hdev, 1);
09fd0de5 1453 hci_dev_unlock(hdev);
56e5cb86 1454 }
8e87d142 1455 } else {
1da177e4 1456 /* Init failed, cleanup */
3eff45ea 1457 flush_work(&hdev->tx_work);
c347b765 1458 flush_work(&hdev->cmd_work);
b78752cc 1459 flush_work(&hdev->rx_work);
1da177e4
LT
1460
1461 skb_queue_purge(&hdev->cmd_q);
1462 skb_queue_purge(&hdev->rx_q);
1463
1464 if (hdev->flush)
1465 hdev->flush(hdev);
1466
1467 if (hdev->sent_cmd) {
1468 kfree_skb(hdev->sent_cmd);
1469 hdev->sent_cmd = NULL;
1470 }
1471
e9ca8bf1 1472 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1473 hci_notify(hdev, HCI_DEV_CLOSE);
1474
1da177e4 1475 hdev->close(hdev);
fee746b0 1476 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
1477 }
1478
1479done:
1480 hci_req_unlock(hdev);
1da177e4
LT
1481 return ret;
1482}
1483
cbed0ca1
JH
1484/* ---- HCI ioctl helpers ---- */
1485
1486int hci_dev_open(__u16 dev)
1487{
1488 struct hci_dev *hdev;
1489 int err;
1490
1491 hdev = hci_dev_get(dev);
1492 if (!hdev)
1493 return -ENODEV;
1494
4a964404 1495 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
1496 * up as user channel. Trying to bring them up as normal devices
1497 * will result into a failure. Only user channel operation is
1498 * possible.
1499 *
1500 * When this function is called for a user channel, the flag
1501 * HCI_USER_CHANNEL will be set first before attempting to
1502 * open the device.
1503 */
d7a5a11d
MH
1504 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1505 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
fee746b0
MH
1506 err = -EOPNOTSUPP;
1507 goto done;
1508 }
1509
e1d08f40
JH
1510 /* We need to ensure that no other power on/off work is pending
1511 * before proceeding to call hci_dev_do_open. This is
1512 * particularly important if the setup procedure has not yet
1513 * completed.
1514 */
a69d8927 1515 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
e1d08f40
JH
1516 cancel_delayed_work(&hdev->power_off);
1517
a5c8f270
MH
1518 /* After this call it is guaranteed that the setup procedure
1519 * has finished. This means that error conditions like RFKILL
1520 * or no valid public or static random address apply.
1521 */
e1d08f40
JH
1522 flush_workqueue(hdev->req_workqueue);
1523
12aa4f0a 1524 /* For controllers not using the management interface and that
b6ae8457 1525 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
1526 * so that pairing works for them. Once the management interface
1527 * is in use this bit will be cleared again and userspace has
1528 * to explicitly enable it.
1529 */
d7a5a11d
MH
1530 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
1531 !hci_dev_test_flag(hdev, HCI_MGMT))
a1536da2 1532 hci_dev_set_flag(hdev, HCI_BONDABLE);
12aa4f0a 1533
cbed0ca1
JH
1534 err = hci_dev_do_open(hdev);
1535
fee746b0 1536done:
cbed0ca1 1537 hci_dev_put(hdev);
cbed0ca1
JH
1538 return err;
1539}
1540
d7347f3c
JH
1541/* This function requires the caller holds hdev->lock */
1542static void hci_pend_le_actions_clear(struct hci_dev *hdev)
1543{
1544 struct hci_conn_params *p;
1545
f161dd41
JH
1546 list_for_each_entry(p, &hdev->le_conn_params, list) {
1547 if (p->conn) {
1548 hci_conn_drop(p->conn);
f8aaf9b6 1549 hci_conn_put(p->conn);
f161dd41
JH
1550 p->conn = NULL;
1551 }
d7347f3c 1552 list_del_init(&p->action);
f161dd41 1553 }
d7347f3c
JH
1554
1555 BT_DBG("All LE pending actions cleared");
1556}
1557
6b3cc1db 1558int hci_dev_do_close(struct hci_dev *hdev)
1da177e4
LT
1559{
1560 BT_DBG("%s %p", hdev->name, hdev);
1561
d24d8144 1562 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
867146a0 1563 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
d24d8144 1564 test_bit(HCI_UP, &hdev->flags)) {
a44fecbd
THJA
1565 /* Execute vendor specific shutdown routine */
1566 if (hdev->shutdown)
1567 hdev->shutdown(hdev);
1568 }
1569
78c04c0b
VCG
1570 cancel_delayed_work(&hdev->power_off);
1571
1da177e4
LT
1572 hci_req_cancel(hdev, ENODEV);
1573 hci_req_lock(hdev);
1574
1575 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 1576 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1577 hci_req_unlock(hdev);
1578 return 0;
1579 }
1580
3eff45ea
GP
1581 /* Flush RX and TX works */
1582 flush_work(&hdev->tx_work);
b78752cc 1583 flush_work(&hdev->rx_work);
1da177e4 1584
16ab91ab 1585 if (hdev->discov_timeout > 0) {
e0f9309f 1586 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1587 hdev->discov_timeout = 0;
a358dc11
MH
1588 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1589 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
16ab91ab
JH
1590 }
1591
a69d8927 1592 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
7d78525d
JH
1593 cancel_delayed_work(&hdev->service_cache);
1594
7ba8b4be 1595 cancel_delayed_work_sync(&hdev->le_scan_disable);
2d28cfe7 1596 cancel_delayed_work_sync(&hdev->le_scan_restart);
4518bb0f 1597
d7a5a11d 1598 if (hci_dev_test_flag(hdev, HCI_MGMT))
4518bb0f 1599 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 1600
5d900e46
FG
1601 if (hdev->adv_instance_timeout) {
1602 cancel_delayed_work_sync(&hdev->adv_instance_expire);
1603 hdev->adv_instance_timeout = 0;
1604 }
1605
76727c02
JH
1606 /* Avoid potential lockdep warnings from the *_flush() calls by
1607 * ensuring the workqueue is empty up front.
1608 */
1609 drain_workqueue(hdev->workqueue);
1610
09fd0de5 1611 hci_dev_lock(hdev);
1aeb9c65 1612
8f502f84
JH
1613 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1614
a69d8927 1615 if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1aeb9c65
JH
1616 if (hdev->dev_type == HCI_BREDR)
1617 mgmt_powered(hdev, 0);
1618 }
1619
1f9b9a5d 1620 hci_inquiry_cache_flush(hdev);
d7347f3c 1621 hci_pend_le_actions_clear(hdev);
f161dd41 1622 hci_conn_hash_flush(hdev);
09fd0de5 1623 hci_dev_unlock(hdev);
1da177e4 1624
64dae967
MH
1625 smp_unregister(hdev);
1626
1da177e4
LT
1627 hci_notify(hdev, HCI_DEV_DOWN);
1628
1629 if (hdev->flush)
1630 hdev->flush(hdev);
1631
1632 /* Reset device */
1633 skb_queue_purge(&hdev->cmd_q);
1634 atomic_set(&hdev->cmd_cnt, 1);
d7a5a11d
MH
1635 if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
1636 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
a6c511c6 1637 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1638 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1639 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1640 clear_bit(HCI_INIT, &hdev->flags);
1641 }
1642
c347b765
GP
1643 /* flush cmd work */
1644 flush_work(&hdev->cmd_work);
1da177e4
LT
1645
1646 /* Drop queues */
1647 skb_queue_purge(&hdev->rx_q);
1648 skb_queue_purge(&hdev->cmd_q);
1649 skb_queue_purge(&hdev->raw_q);
1650
1651 /* Drop last sent command */
1652 if (hdev->sent_cmd) {
65cc2b49 1653 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
1654 kfree_skb(hdev->sent_cmd);
1655 hdev->sent_cmd = NULL;
1656 }
1657
e9ca8bf1 1658 clear_bit(HCI_RUNNING, &hdev->flags);
4a3f95b7
MH
1659 hci_notify(hdev, HCI_DEV_CLOSE);
1660
1da177e4
LT
1661 /* After this point our queues are empty
1662 * and no tasks are scheduled. */
1663 hdev->close(hdev);
1664
35b973c9 1665 /* Clear flags */
fee746b0 1666 hdev->flags &= BIT(HCI_RAW);
eacb44df 1667 hci_dev_clear_volatile_flags(hdev);
35b973c9 1668
ced5c338 1669 /* Controller radio is available but is currently powered down */
536619e8 1670 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 1671
e59fda8d 1672 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1673 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 1674 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 1675
1da177e4
LT
1676 hci_req_unlock(hdev);
1677
1678 hci_dev_put(hdev);
1679 return 0;
1680}
1681
1682int hci_dev_close(__u16 dev)
1683{
1684 struct hci_dev *hdev;
1685 int err;
1686
70f23020
AE
1687 hdev = hci_dev_get(dev);
1688 if (!hdev)
1da177e4 1689 return -ENODEV;
8ee56540 1690
d7a5a11d 1691 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1692 err = -EBUSY;
1693 goto done;
1694 }
1695
a69d8927 1696 if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
8ee56540
MH
1697 cancel_delayed_work(&hdev->power_off);
1698
1da177e4 1699 err = hci_dev_do_close(hdev);
8ee56540 1700
0736cfa8 1701done:
1da177e4
LT
1702 hci_dev_put(hdev);
1703 return err;
1704}
1705
5c912495 1706static int hci_dev_do_reset(struct hci_dev *hdev)
1da177e4 1707{
5c912495 1708 int ret;
1da177e4 1709
5c912495 1710 BT_DBG("%s %p", hdev->name, hdev);
1da177e4
LT
1711
1712 hci_req_lock(hdev);
1da177e4 1713
1da177e4
LT
1714 /* Drop queues */
1715 skb_queue_purge(&hdev->rx_q);
1716 skb_queue_purge(&hdev->cmd_q);
1717
76727c02
JH
1718 /* Avoid potential lockdep warnings from the *_flush() calls by
1719 * ensuring the workqueue is empty up front.
1720 */
1721 drain_workqueue(hdev->workqueue);
1722
09fd0de5 1723 hci_dev_lock(hdev);
1f9b9a5d 1724 hci_inquiry_cache_flush(hdev);
1da177e4 1725 hci_conn_hash_flush(hdev);
09fd0de5 1726 hci_dev_unlock(hdev);
1da177e4
LT
1727
1728 if (hdev->flush)
1729 hdev->flush(hdev);
1730
8e87d142 1731 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1732 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 1733
fee746b0 1734 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4 1735
1da177e4 1736 hci_req_unlock(hdev);
1da177e4
LT
1737 return ret;
1738}
1739
5c912495
MH
1740int hci_dev_reset(__u16 dev)
1741{
1742 struct hci_dev *hdev;
1743 int err;
1744
1745 hdev = hci_dev_get(dev);
1746 if (!hdev)
1747 return -ENODEV;
1748
1749 if (!test_bit(HCI_UP, &hdev->flags)) {
1750 err = -ENETDOWN;
1751 goto done;
1752 }
1753
d7a5a11d 1754 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5c912495
MH
1755 err = -EBUSY;
1756 goto done;
1757 }
1758
d7a5a11d 1759 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5c912495
MH
1760 err = -EOPNOTSUPP;
1761 goto done;
1762 }
1763
1764 err = hci_dev_do_reset(hdev);
1765
1766done:
1767 hci_dev_put(hdev);
1768 return err;
1769}
1770
1da177e4
LT
1771int hci_dev_reset_stat(__u16 dev)
1772{
1773 struct hci_dev *hdev;
1774 int ret = 0;
1775
70f23020
AE
1776 hdev = hci_dev_get(dev);
1777 if (!hdev)
1da177e4
LT
1778 return -ENODEV;
1779
d7a5a11d 1780 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1781 ret = -EBUSY;
1782 goto done;
1783 }
1784
d7a5a11d 1785 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1786 ret = -EOPNOTSUPP;
1787 goto done;
1788 }
1789
1da177e4
LT
1790 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1791
0736cfa8 1792done:
1da177e4 1793 hci_dev_put(hdev);
1da177e4
LT
1794 return ret;
1795}
1796
123abc08
JH
1797static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
1798{
bc6d2d04 1799 bool conn_changed, discov_changed;
123abc08
JH
1800
1801 BT_DBG("%s scan 0x%02x", hdev->name, scan);
1802
1803 if ((scan & SCAN_PAGE))
238be788
MH
1804 conn_changed = !hci_dev_test_and_set_flag(hdev,
1805 HCI_CONNECTABLE);
123abc08 1806 else
a69d8927
MH
1807 conn_changed = hci_dev_test_and_clear_flag(hdev,
1808 HCI_CONNECTABLE);
123abc08 1809
bc6d2d04 1810 if ((scan & SCAN_INQUIRY)) {
238be788
MH
1811 discov_changed = !hci_dev_test_and_set_flag(hdev,
1812 HCI_DISCOVERABLE);
bc6d2d04 1813 } else {
a358dc11 1814 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
a69d8927
MH
1815 discov_changed = hci_dev_test_and_clear_flag(hdev,
1816 HCI_DISCOVERABLE);
bc6d2d04
JH
1817 }
1818
d7a5a11d 1819 if (!hci_dev_test_flag(hdev, HCI_MGMT))
123abc08
JH
1820 return;
1821
bc6d2d04
JH
1822 if (conn_changed || discov_changed) {
1823 /* In case this was disabled through mgmt */
a1536da2 1824 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
bc6d2d04 1825
d7a5a11d 1826 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
bc6d2d04
JH
1827 mgmt_update_adv_data(hdev);
1828
123abc08 1829 mgmt_new_settings(hdev);
bc6d2d04 1830 }
123abc08
JH
1831}
1832
1da177e4
LT
1833int hci_dev_cmd(unsigned int cmd, void __user *arg)
1834{
1835 struct hci_dev *hdev;
1836 struct hci_dev_req dr;
1837 int err = 0;
1838
1839 if (copy_from_user(&dr, arg, sizeof(dr)))
1840 return -EFAULT;
1841
70f23020
AE
1842 hdev = hci_dev_get(dr.dev_id);
1843 if (!hdev)
1da177e4
LT
1844 return -ENODEV;
1845
d7a5a11d 1846 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
0736cfa8
MH
1847 err = -EBUSY;
1848 goto done;
1849 }
1850
d7a5a11d 1851 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
fee746b0
MH
1852 err = -EOPNOTSUPP;
1853 goto done;
1854 }
1855
5b69bef5
MH
1856 if (hdev->dev_type != HCI_BREDR) {
1857 err = -EOPNOTSUPP;
1858 goto done;
1859 }
1860
d7a5a11d 1861 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
56f87901
JH
1862 err = -EOPNOTSUPP;
1863 goto done;
1864 }
1865
1da177e4
LT
1866 switch (cmd) {
1867 case HCISETAUTH:
01178cd4
JH
1868 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1869 HCI_INIT_TIMEOUT);
1da177e4
LT
1870 break;
1871
1872 case HCISETENCRYPT:
1873 if (!lmp_encrypt_capable(hdev)) {
1874 err = -EOPNOTSUPP;
1875 break;
1876 }
1877
1878 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1879 /* Auth must be enabled first */
01178cd4
JH
1880 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1881 HCI_INIT_TIMEOUT);
1da177e4
LT
1882 if (err)
1883 break;
1884 }
1885
01178cd4
JH
1886 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1887 HCI_INIT_TIMEOUT);
1da177e4
LT
1888 break;
1889
1890 case HCISETSCAN:
01178cd4
JH
1891 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1892 HCI_INIT_TIMEOUT);
91a668b0 1893
bc6d2d04
JH
1894 /* Ensure that the connectable and discoverable states
1895 * get correctly modified as this was a non-mgmt change.
91a668b0 1896 */
123abc08
JH
1897 if (!err)
1898 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
1899 break;
1900
1da177e4 1901 case HCISETLINKPOL:
01178cd4
JH
1902 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1903 HCI_INIT_TIMEOUT);
1da177e4
LT
1904 break;
1905
1906 case HCISETLINKMODE:
e4e8e37c
MH
1907 hdev->link_mode = ((__u16) dr.dev_opt) &
1908 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1909 break;
1910
1911 case HCISETPTYPE:
1912 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1913 break;
1914
1915 case HCISETACLMTU:
e4e8e37c
MH
1916 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1917 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1918 break;
1919
1920 case HCISETSCOMTU:
e4e8e37c
MH
1921 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1922 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1923 break;
1924
1925 default:
1926 err = -EINVAL;
1927 break;
1928 }
e4e8e37c 1929
0736cfa8 1930done:
1da177e4
LT
1931 hci_dev_put(hdev);
1932 return err;
1933}
1934
1935int hci_get_dev_list(void __user *arg)
1936{
8035ded4 1937 struct hci_dev *hdev;
1da177e4
LT
1938 struct hci_dev_list_req *dl;
1939 struct hci_dev_req *dr;
1da177e4
LT
1940 int n = 0, size, err;
1941 __u16 dev_num;
1942
1943 if (get_user(dev_num, (__u16 __user *) arg))
1944 return -EFAULT;
1945
1946 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1947 return -EINVAL;
1948
1949 size = sizeof(*dl) + dev_num * sizeof(*dr);
1950
70f23020
AE
1951 dl = kzalloc(size, GFP_KERNEL);
1952 if (!dl)
1da177e4
LT
1953 return -ENOMEM;
1954
1955 dr = dl->dev_req;
1956
f20d09d5 1957 read_lock(&hci_dev_list_lock);
8035ded4 1958 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 1959 unsigned long flags = hdev->flags;
c542a06c 1960
2e84d8db
MH
1961 /* When the auto-off is configured it means the transport
1962 * is running, but in that case still indicate that the
1963 * device is actually down.
1964 */
d7a5a11d 1965 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db 1966 flags &= ~BIT(HCI_UP);
c542a06c 1967
1da177e4 1968 (dr + n)->dev_id = hdev->id;
2e84d8db 1969 (dr + n)->dev_opt = flags;
c542a06c 1970
1da177e4
LT
1971 if (++n >= dev_num)
1972 break;
1973 }
f20d09d5 1974 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1975
1976 dl->dev_num = n;
1977 size = sizeof(*dl) + n * sizeof(*dr);
1978
1979 err = copy_to_user(arg, dl, size);
1980 kfree(dl);
1981
1982 return err ? -EFAULT : 0;
1983}
1984
1985int hci_get_dev_info(void __user *arg)
1986{
1987 struct hci_dev *hdev;
1988 struct hci_dev_info di;
2e84d8db 1989 unsigned long flags;
1da177e4
LT
1990 int err = 0;
1991
1992 if (copy_from_user(&di, arg, sizeof(di)))
1993 return -EFAULT;
1994
70f23020
AE
1995 hdev = hci_dev_get(di.dev_id);
1996 if (!hdev)
1da177e4
LT
1997 return -ENODEV;
1998
2e84d8db
MH
1999 /* When the auto-off is configured it means the transport
2000 * is running, but in that case still indicate that the
2001 * device is actually down.
2002 */
d7a5a11d 2003 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
2e84d8db
MH
2004 flags = hdev->flags & ~BIT(HCI_UP);
2005 else
2006 flags = hdev->flags;
c542a06c 2007
1da177e4
LT
2008 strcpy(di.name, hdev->name);
2009 di.bdaddr = hdev->bdaddr;
60f2a3ed 2010 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2011 di.flags = flags;
1da177e4 2012 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2013 if (lmp_bredr_capable(hdev)) {
2014 di.acl_mtu = hdev->acl_mtu;
2015 di.acl_pkts = hdev->acl_pkts;
2016 di.sco_mtu = hdev->sco_mtu;
2017 di.sco_pkts = hdev->sco_pkts;
2018 } else {
2019 di.acl_mtu = hdev->le_mtu;
2020 di.acl_pkts = hdev->le_pkts;
2021 di.sco_mtu = 0;
2022 di.sco_pkts = 0;
2023 }
1da177e4
LT
2024 di.link_policy = hdev->link_policy;
2025 di.link_mode = hdev->link_mode;
2026
2027 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2028 memcpy(&di.features, &hdev->features, sizeof(di.features));
2029
2030 if (copy_to_user(arg, &di, sizeof(di)))
2031 err = -EFAULT;
2032
2033 hci_dev_put(hdev);
2034
2035 return err;
2036}
2037
2038/* ---- Interface to HCI drivers ---- */
2039
611b30f7
MH
2040static int hci_rfkill_set_block(void *data, bool blocked)
2041{
2042 struct hci_dev *hdev = data;
2043
2044 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2045
d7a5a11d 2046 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
0736cfa8
MH
2047 return -EBUSY;
2048
5e130367 2049 if (blocked) {
a1536da2 2050 hci_dev_set_flag(hdev, HCI_RFKILLED);
d7a5a11d
MH
2051 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
2052 !hci_dev_test_flag(hdev, HCI_CONFIG))
bf543036 2053 hci_dev_do_close(hdev);
5e130367 2054 } else {
a358dc11 2055 hci_dev_clear_flag(hdev, HCI_RFKILLED);
1025c04c 2056 }
611b30f7
MH
2057
2058 return 0;
2059}
2060
2061static const struct rfkill_ops hci_rfkill_ops = {
2062 .set_block = hci_rfkill_set_block,
2063};
2064
ab81cbf9
JH
2065static void hci_power_on(struct work_struct *work)
2066{
2067 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2068 int err;
ab81cbf9
JH
2069
2070 BT_DBG("%s", hdev->name);
2071
cbed0ca1 2072 err = hci_dev_do_open(hdev);
96570ffc 2073 if (err < 0) {
3ad67582 2074 hci_dev_lock(hdev);
96570ffc 2075 mgmt_set_powered_failed(hdev, err);
3ad67582 2076 hci_dev_unlock(hdev);
ab81cbf9 2077 return;
96570ffc 2078 }
ab81cbf9 2079
a5c8f270
MH
2080 /* During the HCI setup phase, a few error conditions are
2081 * ignored and they need to be checked now. If they are still
2082 * valid, it is important to turn the device back off.
2083 */
d7a5a11d
MH
2084 if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
2085 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
a5c8f270
MH
2086 (hdev->dev_type == HCI_BREDR &&
2087 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2088 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
a358dc11 2089 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
bf543036 2090 hci_dev_do_close(hdev);
d7a5a11d 2091 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
19202573
JH
2092 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2093 HCI_AUTO_OFF_TIMEOUT);
bf543036 2094 }
ab81cbf9 2095
a69d8927 2096 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
4a964404
MH
2097 /* For unconfigured devices, set the HCI_RAW flag
2098 * so that userspace can easily identify them.
4a964404 2099 */
d7a5a11d 2100 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4a964404 2101 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
2102
2103 /* For fully configured devices, this will send
2104 * the Index Added event. For unconfigured devices,
2105 * it will send Unconfigued Index Added event.
2106 *
2107 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2108 * and no event will be send.
2109 */
2110 mgmt_index_added(hdev);
a69d8927 2111 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5ea234d3
MH
2112 /* When the controller is now configured, then it
2113 * is important to clear the HCI_RAW flag.
2114 */
d7a5a11d 2115 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5ea234d3
MH
2116 clear_bit(HCI_RAW, &hdev->flags);
2117
d603b76b
MH
2118 /* Powering on the controller with HCI_CONFIG set only
2119 * happens with the transition from unconfigured to
2120 * configured. This will send the Index Added event.
2121 */
744cf19e 2122 mgmt_index_added(hdev);
fee746b0 2123 }
ab81cbf9
JH
2124}
2125
2126static void hci_power_off(struct work_struct *work)
2127{
3243553f 2128 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2129 power_off.work);
ab81cbf9
JH
2130
2131 BT_DBG("%s", hdev->name);
2132
8ee56540 2133 hci_dev_do_close(hdev);
ab81cbf9
JH
2134}
2135
c7741d16
MH
2136static void hci_error_reset(struct work_struct *work)
2137{
2138 struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
2139
2140 BT_DBG("%s", hdev->name);
2141
2142 if (hdev->hw_error)
2143 hdev->hw_error(hdev, hdev->hw_error_code);
2144 else
2145 BT_ERR("%s hardware error 0x%2.2x", hdev->name,
2146 hdev->hw_error_code);
2147
2148 if (hci_dev_do_close(hdev))
2149 return;
2150
c7741d16
MH
2151 hci_dev_do_open(hdev);
2152}
2153
16ab91ab
JH
2154static void hci_discov_off(struct work_struct *work)
2155{
2156 struct hci_dev *hdev;
16ab91ab
JH
2157
2158 hdev = container_of(work, struct hci_dev, discov_off.work);
2159
2160 BT_DBG("%s", hdev->name);
2161
d1967ff8 2162 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2163}
2164
5d900e46
FG
2165static void hci_adv_timeout_expire(struct work_struct *work)
2166{
2167 struct hci_dev *hdev;
2168
2169 hdev = container_of(work, struct hci_dev, adv_instance_expire.work);
2170
2171 BT_DBG("%s", hdev->name);
2172
2173 mgmt_adv_timeout_expired(hdev);
2174}
2175
35f7498a 2176void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2177{
4821002c 2178 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2179
4821002c
JH
2180 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2181 list_del(&uuid->list);
2aeb9a1a
JH
2182 kfree(uuid);
2183 }
2aeb9a1a
JH
2184}
2185
35f7498a 2186void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 2187{
0378b597 2188 struct link_key *key;
55ed8ca1 2189
0378b597
JH
2190 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
2191 list_del_rcu(&key->list);
2192 kfree_rcu(key, rcu);
55ed8ca1 2193 }
55ed8ca1
JH
2194}
2195
35f7498a 2196void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 2197{
970d0f1b 2198 struct smp_ltk *k;
b899efaf 2199
970d0f1b
JH
2200 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
2201 list_del_rcu(&k->list);
2202 kfree_rcu(k, rcu);
b899efaf 2203 }
b899efaf
VCG
2204}
2205
970c4e46
JH
2206void hci_smp_irks_clear(struct hci_dev *hdev)
2207{
adae20cb 2208 struct smp_irk *k;
970c4e46 2209
adae20cb
JH
2210 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
2211 list_del_rcu(&k->list);
2212 kfree_rcu(k, rcu);
970c4e46
JH
2213 }
2214}
2215
55ed8ca1
JH
2216struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2217{
8035ded4 2218 struct link_key *k;
55ed8ca1 2219
0378b597
JH
2220 rcu_read_lock();
2221 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
2222 if (bacmp(bdaddr, &k->bdaddr) == 0) {
2223 rcu_read_unlock();
55ed8ca1 2224 return k;
0378b597
JH
2225 }
2226 }
2227 rcu_read_unlock();
55ed8ca1
JH
2228
2229 return NULL;
2230}
2231
745c0ce3 2232static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2233 u8 key_type, u8 old_key_type)
d25e28ab
JH
2234{
2235 /* Legacy key */
2236 if (key_type < 0x03)
745c0ce3 2237 return true;
d25e28ab
JH
2238
2239 /* Debug keys are insecure so don't store them persistently */
2240 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2241 return false;
d25e28ab
JH
2242
2243 /* Changed combination key and there's no previous one */
2244 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2245 return false;
d25e28ab
JH
2246
2247 /* Security mode 3 case */
2248 if (!conn)
745c0ce3 2249 return true;
d25e28ab 2250
e3befab9
JH
2251 /* BR/EDR key derived using SC from an LE link */
2252 if (conn->type == LE_LINK)
2253 return true;
2254
d25e28ab
JH
2255 /* Neither local nor remote side had no-bonding as requirement */
2256 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2257 return true;
d25e28ab
JH
2258
2259 /* Local side had dedicated bonding as requirement */
2260 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2261 return true;
d25e28ab
JH
2262
2263 /* Remote side had dedicated bonding as requirement */
2264 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2265 return true;
d25e28ab
JH
2266
2267 /* If none of the above criteria match, then don't store the key
2268 * persistently */
745c0ce3 2269 return false;
d25e28ab
JH
2270}
2271
e804d25d 2272static u8 ltk_role(u8 type)
98a0b845 2273{
e804d25d
JH
2274 if (type == SMP_LTK)
2275 return HCI_ROLE_MASTER;
98a0b845 2276
e804d25d 2277 return HCI_ROLE_SLAVE;
98a0b845
JH
2278}
2279
f3a73d97
JH
2280struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2281 u8 addr_type, u8 role)
75d262c2 2282{
c9839a11 2283 struct smp_ltk *k;
75d262c2 2284
970d0f1b
JH
2285 rcu_read_lock();
2286 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
5378bc56
JH
2287 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
2288 continue;
2289
923e2414 2290 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
970d0f1b 2291 rcu_read_unlock();
75d262c2 2292 return k;
970d0f1b
JH
2293 }
2294 }
2295 rcu_read_unlock();
75d262c2
VCG
2296
2297 return NULL;
2298}
75d262c2 2299
970c4e46
JH
2300struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2301{
2302 struct smp_irk *irk;
2303
adae20cb
JH
2304 rcu_read_lock();
2305 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
2306 if (!bacmp(&irk->rpa, rpa)) {
2307 rcu_read_unlock();
970c4e46 2308 return irk;
adae20cb 2309 }
970c4e46
JH
2310 }
2311
adae20cb 2312 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 2313 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 2314 bacpy(&irk->rpa, rpa);
adae20cb 2315 rcu_read_unlock();
970c4e46
JH
2316 return irk;
2317 }
2318 }
adae20cb 2319 rcu_read_unlock();
970c4e46
JH
2320
2321 return NULL;
2322}
2323
2324struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2325 u8 addr_type)
2326{
2327 struct smp_irk *irk;
2328
6cfc9988
JH
2329 /* Identity Address must be public or static random */
2330 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2331 return NULL;
2332
adae20cb
JH
2333 rcu_read_lock();
2334 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 2335 if (addr_type == irk->addr_type &&
adae20cb
JH
2336 bacmp(bdaddr, &irk->bdaddr) == 0) {
2337 rcu_read_unlock();
970c4e46 2338 return irk;
adae20cb 2339 }
970c4e46 2340 }
adae20cb 2341 rcu_read_unlock();
970c4e46
JH
2342
2343 return NULL;
2344}
2345
567fa2aa 2346struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
2347 bdaddr_t *bdaddr, u8 *val, u8 type,
2348 u8 pin_len, bool *persistent)
55ed8ca1
JH
2349{
2350 struct link_key *key, *old_key;
745c0ce3 2351 u8 old_key_type;
55ed8ca1
JH
2352
2353 old_key = hci_find_link_key(hdev, bdaddr);
2354 if (old_key) {
2355 old_key_type = old_key->type;
2356 key = old_key;
2357 } else {
12adcf3a 2358 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2359 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 2360 if (!key)
567fa2aa 2361 return NULL;
0378b597 2362 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
2363 }
2364
6ed93dc6 2365 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2366
d25e28ab
JH
2367 /* Some buggy controller combinations generate a changed
2368 * combination key for legacy pairing even when there's no
2369 * previous key */
2370 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2371 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2372 type = HCI_LK_COMBINATION;
655fe6ec
JH
2373 if (conn)
2374 conn->key_type = type;
2375 }
d25e28ab 2376
55ed8ca1 2377 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2378 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2379 key->pin_len = pin_len;
2380
b6020ba0 2381 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2382 key->type = old_key_type;
4748fed2
JH
2383 else
2384 key->type = type;
2385
7652ff6a
JH
2386 if (persistent)
2387 *persistent = hci_persistent_key(hdev, conn, type,
2388 old_key_type);
4df378a1 2389
567fa2aa 2390 return key;
55ed8ca1
JH
2391}
2392
ca9142b8 2393struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 2394 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 2395 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 2396{
c9839a11 2397 struct smp_ltk *key, *old_key;
e804d25d 2398 u8 role = ltk_role(type);
75d262c2 2399
f3a73d97 2400 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
c9839a11 2401 if (old_key)
75d262c2 2402 key = old_key;
c9839a11 2403 else {
0a14ab41 2404 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2405 if (!key)
ca9142b8 2406 return NULL;
970d0f1b 2407 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2408 }
2409
75d262c2 2410 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2411 key->bdaddr_type = addr_type;
2412 memcpy(key->val, tk, sizeof(key->val));
2413 key->authenticated = authenticated;
2414 key->ediv = ediv;
fe39c7b2 2415 key->rand = rand;
c9839a11
VCG
2416 key->enc_size = enc_size;
2417 key->type = type;
75d262c2 2418
ca9142b8 2419 return key;
75d262c2
VCG
2420}
2421
ca9142b8
JH
2422struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2423 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2424{
2425 struct smp_irk *irk;
2426
2427 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2428 if (!irk) {
2429 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2430 if (!irk)
ca9142b8 2431 return NULL;
970c4e46
JH
2432
2433 bacpy(&irk->bdaddr, bdaddr);
2434 irk->addr_type = addr_type;
2435
adae20cb 2436 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
2437 }
2438
2439 memcpy(irk->val, val, 16);
2440 bacpy(&irk->rpa, rpa);
2441
ca9142b8 2442 return irk;
970c4e46
JH
2443}
2444
55ed8ca1
JH
2445int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2446{
2447 struct link_key *key;
2448
2449 key = hci_find_link_key(hdev, bdaddr);
2450 if (!key)
2451 return -ENOENT;
2452
6ed93dc6 2453 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 2454
0378b597
JH
2455 list_del_rcu(&key->list);
2456 kfree_rcu(key, rcu);
55ed8ca1
JH
2457
2458 return 0;
2459}
2460
e0b2b27e 2461int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 2462{
970d0f1b 2463 struct smp_ltk *k;
c51ffa0b 2464 int removed = 0;
b899efaf 2465
970d0f1b 2466 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 2467 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2468 continue;
2469
6ed93dc6 2470 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 2471
970d0f1b
JH
2472 list_del_rcu(&k->list);
2473 kfree_rcu(k, rcu);
c51ffa0b 2474 removed++;
b899efaf
VCG
2475 }
2476
c51ffa0b 2477 return removed ? 0 : -ENOENT;
b899efaf
VCG
2478}
2479
a7ec7338
JH
2480void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2481{
adae20cb 2482 struct smp_irk *k;
a7ec7338 2483
adae20cb 2484 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
2485 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2486 continue;
2487
2488 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2489
adae20cb
JH
2490 list_del_rcu(&k->list);
2491 kfree_rcu(k, rcu);
a7ec7338
JH
2492 }
2493}
2494
55e76b38
JH
2495bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2496{
2497 struct smp_ltk *k;
4ba9faf3 2498 struct smp_irk *irk;
55e76b38
JH
2499 u8 addr_type;
2500
2501 if (type == BDADDR_BREDR) {
2502 if (hci_find_link_key(hdev, bdaddr))
2503 return true;
2504 return false;
2505 }
2506
2507 /* Convert to HCI addr type which struct smp_ltk uses */
2508 if (type == BDADDR_LE_PUBLIC)
2509 addr_type = ADDR_LE_DEV_PUBLIC;
2510 else
2511 addr_type = ADDR_LE_DEV_RANDOM;
2512
4ba9faf3
JH
2513 irk = hci_get_irk(hdev, bdaddr, addr_type);
2514 if (irk) {
2515 bdaddr = &irk->bdaddr;
2516 addr_type = irk->addr_type;
2517 }
2518
55e76b38
JH
2519 rcu_read_lock();
2520 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
87c8b28d
JH
2521 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
2522 rcu_read_unlock();
55e76b38 2523 return true;
87c8b28d 2524 }
55e76b38
JH
2525 }
2526 rcu_read_unlock();
2527
2528 return false;
2529}
2530
6bd32326 2531/* HCI command timer function */
65cc2b49 2532static void hci_cmd_timeout(struct work_struct *work)
6bd32326 2533{
65cc2b49
MH
2534 struct hci_dev *hdev = container_of(work, struct hci_dev,
2535 cmd_timer.work);
6bd32326 2536
bda4f23a
AE
2537 if (hdev->sent_cmd) {
2538 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2539 u16 opcode = __le16_to_cpu(sent->opcode);
2540
2541 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2542 } else {
2543 BT_ERR("%s command tx timeout", hdev->name);
2544 }
2545
6bd32326 2546 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2547 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2548}
2549
2763eda6 2550struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
6928a924 2551 bdaddr_t *bdaddr, u8 bdaddr_type)
2763eda6
SJ
2552{
2553 struct oob_data *data;
2554
6928a924
JH
2555 list_for_each_entry(data, &hdev->remote_oob_data, list) {
2556 if (bacmp(bdaddr, &data->bdaddr) != 0)
2557 continue;
2558 if (data->bdaddr_type != bdaddr_type)
2559 continue;
2560 return data;
2561 }
2763eda6
SJ
2562
2563 return NULL;
2564}
2565
6928a924
JH
2566int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2567 u8 bdaddr_type)
2763eda6
SJ
2568{
2569 struct oob_data *data;
2570
6928a924 2571 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6
SJ
2572 if (!data)
2573 return -ENOENT;
2574
6928a924 2575 BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
2763eda6
SJ
2576
2577 list_del(&data->list);
2578 kfree(data);
2579
2580 return 0;
2581}
2582
35f7498a 2583void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2584{
2585 struct oob_data *data, *n;
2586
2587 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2588 list_del(&data->list);
2589 kfree(data);
2590 }
2763eda6
SJ
2591}
2592
0798872e 2593int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
6928a924 2594 u8 bdaddr_type, u8 *hash192, u8 *rand192,
81328d5c 2595 u8 *hash256, u8 *rand256)
2763eda6
SJ
2596{
2597 struct oob_data *data;
2598
6928a924 2599 data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
2763eda6 2600 if (!data) {
0a14ab41 2601 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2602 if (!data)
2603 return -ENOMEM;
2604
2605 bacpy(&data->bdaddr, bdaddr);
6928a924 2606 data->bdaddr_type = bdaddr_type;
2763eda6
SJ
2607 list_add(&data->list, &hdev->remote_oob_data);
2608 }
2609
81328d5c
JH
2610 if (hash192 && rand192) {
2611 memcpy(data->hash192, hash192, sizeof(data->hash192));
2612 memcpy(data->rand192, rand192, sizeof(data->rand192));
f7697b16
MH
2613 if (hash256 && rand256)
2614 data->present = 0x03;
81328d5c
JH
2615 } else {
2616 memset(data->hash192, 0, sizeof(data->hash192));
2617 memset(data->rand192, 0, sizeof(data->rand192));
f7697b16
MH
2618 if (hash256 && rand256)
2619 data->present = 0x02;
2620 else
2621 data->present = 0x00;
0798872e
MH
2622 }
2623
81328d5c
JH
2624 if (hash256 && rand256) {
2625 memcpy(data->hash256, hash256, sizeof(data->hash256));
2626 memcpy(data->rand256, rand256, sizeof(data->rand256));
2627 } else {
2628 memset(data->hash256, 0, sizeof(data->hash256));
2629 memset(data->rand256, 0, sizeof(data->rand256));
f7697b16
MH
2630 if (hash192 && rand192)
2631 data->present = 0x01;
81328d5c 2632 }
0798872e 2633
6ed93dc6 2634 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2635
2636 return 0;
2637}
2638
d2609b34
FG
2639/* This function requires the caller holds hdev->lock */
2640struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
2641{
2642 struct adv_info *adv_instance;
2643
2644 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
2645 if (adv_instance->instance == instance)
2646 return adv_instance;
2647 }
2648
2649 return NULL;
2650}
2651
2652/* This function requires the caller holds hdev->lock */
2653struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) {
2654 struct adv_info *cur_instance;
2655
2656 cur_instance = hci_find_adv_instance(hdev, instance);
2657 if (!cur_instance)
2658 return NULL;
2659
2660 if (cur_instance == list_last_entry(&hdev->adv_instances,
2661 struct adv_info, list))
2662 return list_first_entry(&hdev->adv_instances,
2663 struct adv_info, list);
2664 else
2665 return list_next_entry(cur_instance, list);
2666}
2667
2668/* This function requires the caller holds hdev->lock */
2669int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
2670{
2671 struct adv_info *adv_instance;
2672
2673 adv_instance = hci_find_adv_instance(hdev, instance);
2674 if (!adv_instance)
2675 return -ENOENT;
2676
2677 BT_DBG("%s removing %dMR", hdev->name, instance);
2678
5d900e46
FG
2679 if (hdev->cur_adv_instance == instance && hdev->adv_instance_timeout) {
2680 cancel_delayed_work(&hdev->adv_instance_expire);
2681 hdev->adv_instance_timeout = 0;
2682 }
2683
d2609b34
FG
2684 list_del(&adv_instance->list);
2685 kfree(adv_instance);
2686
2687 hdev->adv_instance_cnt--;
2688
2689 return 0;
2690}
2691
2692/* This function requires the caller holds hdev->lock */
2693void hci_adv_instances_clear(struct hci_dev *hdev)
2694{
2695 struct adv_info *adv_instance, *n;
2696
5d900e46
FG
2697 if (hdev->adv_instance_timeout) {
2698 cancel_delayed_work(&hdev->adv_instance_expire);
2699 hdev->adv_instance_timeout = 0;
2700 }
2701
d2609b34
FG
2702 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
2703 list_del(&adv_instance->list);
2704 kfree(adv_instance);
2705 }
2706
2707 hdev->adv_instance_cnt = 0;
2708}
2709
2710/* This function requires the caller holds hdev->lock */
2711int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
2712 u16 adv_data_len, u8 *adv_data,
2713 u16 scan_rsp_len, u8 *scan_rsp_data,
2714 u16 timeout, u16 duration)
2715{
2716 struct adv_info *adv_instance;
2717
2718 adv_instance = hci_find_adv_instance(hdev, instance);
2719 if (adv_instance) {
2720 memset(adv_instance->adv_data, 0,
2721 sizeof(adv_instance->adv_data));
2722 memset(adv_instance->scan_rsp_data, 0,
2723 sizeof(adv_instance->scan_rsp_data));
2724 } else {
2725 if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES ||
2726 instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
2727 return -EOVERFLOW;
2728
39ecfad6 2729 adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
d2609b34
FG
2730 if (!adv_instance)
2731 return -ENOMEM;
2732
fffd38bc 2733 adv_instance->pending = true;
d2609b34
FG
2734 adv_instance->instance = instance;
2735 list_add(&adv_instance->list, &hdev->adv_instances);
2736 hdev->adv_instance_cnt++;
2737 }
2738
2739 adv_instance->flags = flags;
2740 adv_instance->adv_data_len = adv_data_len;
2741 adv_instance->scan_rsp_len = scan_rsp_len;
2742
2743 if (adv_data_len)
2744 memcpy(adv_instance->adv_data, adv_data, adv_data_len);
2745
2746 if (scan_rsp_len)
2747 memcpy(adv_instance->scan_rsp_data,
2748 scan_rsp_data, scan_rsp_len);
2749
2750 adv_instance->timeout = timeout;
5d900e46 2751 adv_instance->remaining_time = timeout;
d2609b34
FG
2752
2753 if (duration == 0)
2754 adv_instance->duration = HCI_DEFAULT_ADV_DURATION;
2755 else
2756 adv_instance->duration = duration;
2757
2758 BT_DBG("%s for %dMR", hdev->name, instance);
2759
2760 return 0;
2761}
2762
dcc36c16 2763struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 2764 bdaddr_t *bdaddr, u8 type)
b2a66aad 2765{
8035ded4 2766 struct bdaddr_list *b;
b2a66aad 2767
dcc36c16 2768 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 2769 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2770 return b;
b9ee0a78 2771 }
b2a66aad
AJ
2772
2773 return NULL;
2774}
2775
dcc36c16 2776void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
2777{
2778 struct list_head *p, *n;
2779
dcc36c16 2780 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 2781 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2782
2783 list_del(p);
2784 kfree(b);
2785 }
b2a66aad
AJ
2786}
2787
dcc36c16 2788int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2789{
2790 struct bdaddr_list *entry;
b2a66aad 2791
b9ee0a78 2792 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2793 return -EBADF;
2794
dcc36c16 2795 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 2796 return -EEXIST;
b2a66aad 2797
27f70f3e 2798 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
2799 if (!entry)
2800 return -ENOMEM;
b2a66aad
AJ
2801
2802 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2803 entry->bdaddr_type = type;
b2a66aad 2804
dcc36c16 2805 list_add(&entry->list, list);
b2a66aad 2806
2a8357f2 2807 return 0;
b2a66aad
AJ
2808}
2809
dcc36c16 2810int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2811{
2812 struct bdaddr_list *entry;
b2a66aad 2813
35f7498a 2814 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 2815 hci_bdaddr_list_clear(list);
35f7498a
JH
2816 return 0;
2817 }
b2a66aad 2818
dcc36c16 2819 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
2820 if (!entry)
2821 return -ENOENT;
2822
2823 list_del(&entry->list);
2824 kfree(entry);
2825
2826 return 0;
2827}
2828
15819a70
AG
2829/* This function requires the caller holds hdev->lock */
2830struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2831 bdaddr_t *addr, u8 addr_type)
2832{
2833 struct hci_conn_params *params;
2834
2835 list_for_each_entry(params, &hdev->le_conn_params, list) {
2836 if (bacmp(&params->addr, addr) == 0 &&
2837 params->addr_type == addr_type) {
2838 return params;
2839 }
2840 }
2841
2842 return NULL;
2843}
2844
4b10966f 2845/* This function requires the caller holds hdev->lock */
501f8827
JH
2846struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2847 bdaddr_t *addr, u8 addr_type)
a9b0a04c 2848{
912b42ef 2849 struct hci_conn_params *param;
a9b0a04c 2850
501f8827 2851 list_for_each_entry(param, list, action) {
912b42ef
JH
2852 if (bacmp(&param->addr, addr) == 0 &&
2853 param->addr_type == addr_type)
2854 return param;
4b10966f
MH
2855 }
2856
2857 return NULL;
a9b0a04c
AG
2858}
2859
f75113a2
JP
2860/* This function requires the caller holds hdev->lock */
2861struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
2862 bdaddr_t *addr,
2863 u8 addr_type)
2864{
2865 struct hci_conn_params *param;
2866
2867 list_for_each_entry(param, &hdev->pend_le_conns, action) {
2868 if (bacmp(&param->addr, addr) == 0 &&
2869 param->addr_type == addr_type &&
2870 param->explicit_connect)
2871 return param;
2872 }
2873
2874 list_for_each_entry(param, &hdev->pend_le_reports, action) {
2875 if (bacmp(&param->addr, addr) == 0 &&
2876 param->addr_type == addr_type &&
2877 param->explicit_connect)
2878 return param;
2879 }
2880
2881 return NULL;
2882}
2883
15819a70 2884/* This function requires the caller holds hdev->lock */
51d167c0
MH
2885struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2886 bdaddr_t *addr, u8 addr_type)
15819a70
AG
2887{
2888 struct hci_conn_params *params;
2889
2890 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 2891 if (params)
51d167c0 2892 return params;
15819a70
AG
2893
2894 params = kzalloc(sizeof(*params), GFP_KERNEL);
2895 if (!params) {
2896 BT_ERR("Out of memory");
51d167c0 2897 return NULL;
15819a70
AG
2898 }
2899
2900 bacpy(&params->addr, addr);
2901 params->addr_type = addr_type;
cef952ce
AG
2902
2903 list_add(&params->list, &hdev->le_conn_params);
93450c75 2904 INIT_LIST_HEAD(&params->action);
cef952ce 2905
bf5b3c8b
MH
2906 params->conn_min_interval = hdev->le_conn_min_interval;
2907 params->conn_max_interval = hdev->le_conn_max_interval;
2908 params->conn_latency = hdev->le_conn_latency;
2909 params->supervision_timeout = hdev->le_supv_timeout;
2910 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2911
2912 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2913
51d167c0 2914 return params;
bf5b3c8b
MH
2915}
2916
f6c63249 2917static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 2918{
f8aaf9b6 2919 if (params->conn) {
f161dd41 2920 hci_conn_drop(params->conn);
f8aaf9b6
JH
2921 hci_conn_put(params->conn);
2922 }
f161dd41 2923
95305baa 2924 list_del(&params->action);
15819a70
AG
2925 list_del(&params->list);
2926 kfree(params);
f6c63249
JH
2927}
2928
2929/* This function requires the caller holds hdev->lock */
2930void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2931{
2932 struct hci_conn_params *params;
2933
2934 params = hci_conn_params_lookup(hdev, addr, addr_type);
2935 if (!params)
2936 return;
2937
2938 hci_conn_params_free(params);
15819a70 2939
95305baa
JH
2940 hci_update_background_scan(hdev);
2941
15819a70
AG
2942 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2943}
2944
2945/* This function requires the caller holds hdev->lock */
55af49a8 2946void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
2947{
2948 struct hci_conn_params *params, *tmp;
2949
2950 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
2951 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2952 continue;
f75113a2
JP
2953
2954 /* If trying to estabilish one time connection to disabled
2955 * device, leave the params, but mark them as just once.
2956 */
2957 if (params->explicit_connect) {
2958 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2959 continue;
2960 }
2961
15819a70
AG
2962 list_del(&params->list);
2963 kfree(params);
2964 }
2965
55af49a8 2966 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
2967}
2968
2969/* This function requires the caller holds hdev->lock */
373110c5 2970void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 2971{
15819a70 2972 struct hci_conn_params *params, *tmp;
77a77a30 2973
f6c63249
JH
2974 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2975 hci_conn_params_free(params);
77a77a30 2976
a4790dbd 2977 hci_update_background_scan(hdev);
77a77a30 2978
15819a70 2979 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
2980}
2981
1904a853 2982static void inquiry_complete(struct hci_dev *hdev, u8 status, u16 opcode)
7ba8b4be 2983{
4c87eaab
AG
2984 if (status) {
2985 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2986
4c87eaab
AG
2987 hci_dev_lock(hdev);
2988 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2989 hci_dev_unlock(hdev);
2990 return;
2991 }
7ba8b4be
AG
2992}
2993
1904a853
MH
2994static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
2995 u16 opcode)
7ba8b4be 2996{
4c87eaab
AG
2997 /* General inquiry access code (GIAC) */
2998 u8 lap[3] = { 0x33, 0x8b, 0x9e };
4c87eaab 2999 struct hci_cp_inquiry cp;
7ba8b4be
AG
3000 int err;
3001
4c87eaab
AG
3002 if (status) {
3003 BT_ERR("Failed to disable LE scanning: status %d", status);
3004 return;
3005 }
7ba8b4be 3006
2d28cfe7
JP
3007 hdev->discovery.scan_start = 0;
3008
4c87eaab
AG
3009 switch (hdev->discovery.type) {
3010 case DISCOV_TYPE_LE:
3011 hci_dev_lock(hdev);
3012 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3013 hci_dev_unlock(hdev);
3014 break;
7ba8b4be 3015
4c87eaab 3016 case DISCOV_TYPE_INTERLEAVED:
4c87eaab 3017 hci_dev_lock(hdev);
7dbfac1d 3018
07d2334a
JP
3019 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
3020 &hdev->quirks)) {
3021 /* If we were running LE only scan, change discovery
3022 * state. If we were running both LE and BR/EDR inquiry
3023 * simultaneously, and BR/EDR inquiry is already
3024 * finished, stop discovery, otherwise BR/EDR inquiry
177d0506
WK
3025 * will stop discovery when finished. If we will resolve
3026 * remote device name, do not change discovery state.
07d2334a 3027 */
177d0506
WK
3028 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
3029 hdev->discovery.state != DISCOVERY_RESOLVING)
07d2334a
JP
3030 hci_discovery_set_state(hdev,
3031 DISCOVERY_STOPPED);
3032 } else {
baf880a9
JH
3033 struct hci_request req;
3034
07d2334a
JP
3035 hci_inquiry_cache_flush(hdev);
3036
baf880a9
JH
3037 hci_req_init(&req, hdev);
3038
3039 memset(&cp, 0, sizeof(cp));
3040 memcpy(&cp.lap, lap, sizeof(cp.lap));
3041 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3042 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3043
07d2334a
JP
3044 err = hci_req_run(&req, inquiry_complete);
3045 if (err) {
3046 BT_ERR("Inquiry request failed: err %d", err);
3047 hci_discovery_set_state(hdev,
3048 DISCOVERY_STOPPED);
3049 }
4c87eaab 3050 }
7dbfac1d 3051
4c87eaab
AG
3052 hci_dev_unlock(hdev);
3053 break;
7dbfac1d 3054 }
7dbfac1d
AG
3055}
3056
7ba8b4be
AG
3057static void le_scan_disable_work(struct work_struct *work)
3058{
3059 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3060 le_scan_disable.work);
4c87eaab
AG
3061 struct hci_request req;
3062 int err;
7ba8b4be
AG
3063
3064 BT_DBG("%s", hdev->name);
3065
2d28cfe7
JP
3066 cancel_delayed_work_sync(&hdev->le_scan_restart);
3067
4c87eaab 3068 hci_req_init(&req, hdev);
28b75a89 3069
b1efcc28 3070 hci_req_add_le_scan_disable(&req);
28b75a89 3071
4c87eaab
AG
3072 err = hci_req_run(&req, le_scan_disable_work_complete);
3073 if (err)
3074 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3075}
3076
2d28cfe7
JP
3077static void le_scan_restart_work_complete(struct hci_dev *hdev, u8 status,
3078 u16 opcode)
3079{
3080 unsigned long timeout, duration, scan_start, now;
3081
3082 BT_DBG("%s", hdev->name);
3083
3084 if (status) {
3085 BT_ERR("Failed to restart LE scan: status %d", status);
3086 return;
3087 }
3088
3089 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
3090 !hdev->discovery.scan_start)
3091 return;
3092
3093 /* When the scan was started, hdev->le_scan_disable has been queued
3094 * after duration from scan_start. During scan restart this job
3095 * has been canceled, and we need to queue it again after proper
3096 * timeout, to make sure that scan does not run indefinitely.
3097 */
3098 duration = hdev->discovery.scan_duration;
3099 scan_start = hdev->discovery.scan_start;
3100 now = jiffies;
3101 if (now - scan_start <= duration) {
3102 int elapsed;
3103
3104 if (now >= scan_start)
3105 elapsed = now - scan_start;
3106 else
3107 elapsed = ULONG_MAX - scan_start + now;
3108
3109 timeout = duration - elapsed;
3110 } else {
3111 timeout = 0;
3112 }
3113 queue_delayed_work(hdev->workqueue,
3114 &hdev->le_scan_disable, timeout);
3115}
3116
3117static void le_scan_restart_work(struct work_struct *work)
3118{
3119 struct hci_dev *hdev = container_of(work, struct hci_dev,
3120 le_scan_restart.work);
3121 struct hci_request req;
3122 struct hci_cp_le_set_scan_enable cp;
3123 int err;
3124
3125 BT_DBG("%s", hdev->name);
3126
3127 /* If controller is not scanning we are done. */
d7a5a11d 3128 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2d28cfe7
JP
3129 return;
3130
3131 hci_req_init(&req, hdev);
3132
3133 hci_req_add_le_scan_disable(&req);
3134
3135 memset(&cp, 0, sizeof(cp));
3136 cp.enable = LE_SCAN_ENABLE;
3137 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3138 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
3139
3140 err = hci_req_run(&req, le_scan_restart_work_complete);
3141 if (err)
3142 BT_ERR("Restart LE scan request failed: err %d", err);
3143}
3144
a1f4c318
JH
3145/* Copy the Identity Address of the controller.
3146 *
3147 * If the controller has a public BD_ADDR, then by default use that one.
3148 * If this is a LE only controller without a public address, default to
3149 * the static random address.
3150 *
3151 * For debugging purposes it is possible to force controllers with a
3152 * public address to use the static random address instead.
50b5b952
MH
3153 *
3154 * In case BR/EDR has been disabled on a dual-mode controller and
3155 * userspace has configured a static address, then that address
3156 * becomes the identity address instead of the public BR/EDR address.
a1f4c318
JH
3157 */
3158void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3159 u8 *bdaddr_type)
3160{
b7cb93e5 3161 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
50b5b952 3162 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
d7a5a11d 3163 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
50b5b952 3164 bacmp(&hdev->static_addr, BDADDR_ANY))) {
a1f4c318
JH
3165 bacpy(bdaddr, &hdev->static_addr);
3166 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3167 } else {
3168 bacpy(bdaddr, &hdev->bdaddr);
3169 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3170 }
3171}
3172
9be0dab7
DH
3173/* Alloc HCI device */
3174struct hci_dev *hci_alloc_dev(void)
3175{
3176 struct hci_dev *hdev;
3177
27f70f3e 3178 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3179 if (!hdev)
3180 return NULL;
3181
b1b813d4
DH
3182 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3183 hdev->esco_type = (ESCO_HV1);
3184 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3185 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3186 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3187 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3188 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3189 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
d2609b34
FG
3190 hdev->adv_instance_cnt = 0;
3191 hdev->cur_adv_instance = 0x00;
5d900e46 3192 hdev->adv_instance_timeout = 0;
b1b813d4 3193
b1b813d4
DH
3194 hdev->sniff_max_interval = 800;
3195 hdev->sniff_min_interval = 80;
3196
3f959d46 3197 hdev->le_adv_channel_map = 0x07;
628531c9
GL
3198 hdev->le_adv_min_interval = 0x0800;
3199 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
3200 hdev->le_scan_interval = 0x0060;
3201 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3202 hdev->le_conn_min_interval = 0x0028;
3203 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
3204 hdev->le_conn_latency = 0x0000;
3205 hdev->le_supv_timeout = 0x002a;
a8e1bfaa
MH
3206 hdev->le_def_tx_len = 0x001b;
3207 hdev->le_def_tx_time = 0x0148;
3208 hdev->le_max_tx_len = 0x001b;
3209 hdev->le_max_tx_time = 0x0148;
3210 hdev->le_max_rx_len = 0x001b;
3211 hdev->le_max_rx_time = 0x0148;
bef64738 3212
d6bfd59c 3213 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3214 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3215 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3216 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3217
b1b813d4
DH
3218 mutex_init(&hdev->lock);
3219 mutex_init(&hdev->req_lock);
3220
3221 INIT_LIST_HEAD(&hdev->mgmt_pending);
3222 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 3223 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
3224 INIT_LIST_HEAD(&hdev->uuids);
3225 INIT_LIST_HEAD(&hdev->link_keys);
3226 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3227 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3228 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3229 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3230 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3231 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 3232 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 3233 INIT_LIST_HEAD(&hdev->conn_hash.list);
d2609b34 3234 INIT_LIST_HEAD(&hdev->adv_instances);
b1b813d4
DH
3235
3236 INIT_WORK(&hdev->rx_work, hci_rx_work);
3237 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3238 INIT_WORK(&hdev->tx_work, hci_tx_work);
3239 INIT_WORK(&hdev->power_on, hci_power_on);
c7741d16 3240 INIT_WORK(&hdev->error_reset, hci_error_reset);
b1b813d4 3241
b1b813d4
DH
3242 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3243 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3244 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2d28cfe7 3245 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
5d900e46 3246 INIT_DELAYED_WORK(&hdev->adv_instance_expire, hci_adv_timeout_expire);
b1b813d4 3247
b1b813d4
DH
3248 skb_queue_head_init(&hdev->rx_q);
3249 skb_queue_head_init(&hdev->cmd_q);
3250 skb_queue_head_init(&hdev->raw_q);
3251
3252 init_waitqueue_head(&hdev->req_wait_q);
3253
65cc2b49 3254 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 3255
b1b813d4
DH
3256 hci_init_sysfs(hdev);
3257 discovery_init(hdev);
9be0dab7
DH
3258
3259 return hdev;
3260}
3261EXPORT_SYMBOL(hci_alloc_dev);
3262
3263/* Free HCI device */
3264void hci_free_dev(struct hci_dev *hdev)
3265{
9be0dab7
DH
3266 /* will free via device release */
3267 put_device(&hdev->dev);
3268}
3269EXPORT_SYMBOL(hci_free_dev);
3270
1da177e4
LT
3271/* Register HCI device */
3272int hci_register_dev(struct hci_dev *hdev)
3273{
b1b813d4 3274 int id, error;
1da177e4 3275
74292d5a 3276 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
3277 return -EINVAL;
3278
08add513
MM
3279 /* Do not allow HCI_AMP devices to register at index 0,
3280 * so the index can be used as the AMP controller ID.
3281 */
3df92b31
SL
3282 switch (hdev->dev_type) {
3283 case HCI_BREDR:
3284 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3285 break;
3286 case HCI_AMP:
3287 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3288 break;
3289 default:
3290 return -EINVAL;
1da177e4 3291 }
8e87d142 3292
3df92b31
SL
3293 if (id < 0)
3294 return id;
3295
1da177e4
LT
3296 sprintf(hdev->name, "hci%d", id);
3297 hdev->id = id;
2d8b3a11
AE
3298
3299 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3300
d8537548
KC
3301 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3302 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3303 if (!hdev->workqueue) {
3304 error = -ENOMEM;
3305 goto err;
3306 }
f48fd9c8 3307
d8537548
KC
3308 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3309 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3310 if (!hdev->req_workqueue) {
3311 destroy_workqueue(hdev->workqueue);
3312 error = -ENOMEM;
3313 goto err;
3314 }
3315
0153e2ec
MH
3316 if (!IS_ERR_OR_NULL(bt_debugfs))
3317 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3318
bdc3e0f1
MH
3319 dev_set_name(&hdev->dev, "%s", hdev->name);
3320
3321 error = device_add(&hdev->dev);
33ca954d 3322 if (error < 0)
54506918 3323 goto err_wqueue;
1da177e4 3324
611b30f7 3325 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3326 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3327 hdev);
611b30f7
MH
3328 if (hdev->rfkill) {
3329 if (rfkill_register(hdev->rfkill) < 0) {
3330 rfkill_destroy(hdev->rfkill);
3331 hdev->rfkill = NULL;
3332 }
3333 }
3334
5e130367 3335 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
a1536da2 3336 hci_dev_set_flag(hdev, HCI_RFKILLED);
5e130367 3337
a1536da2
MH
3338 hci_dev_set_flag(hdev, HCI_SETUP);
3339 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
ce2be9ac 3340
01cd3404 3341 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3342 /* Assume BR/EDR support until proven otherwise (such as
3343 * through reading supported features during init.
3344 */
a1536da2 3345 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
56f87901 3346 }
ce2be9ac 3347
fcee3377
GP
3348 write_lock(&hci_dev_list_lock);
3349 list_add(&hdev->list, &hci_dev_list);
3350 write_unlock(&hci_dev_list_lock);
3351
4a964404
MH
3352 /* Devices that are marked for raw-only usage are unconfigured
3353 * and should not be included in normal operation.
fee746b0
MH
3354 */
3355 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
a1536da2 3356 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
fee746b0 3357
1da177e4 3358 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3359 hci_dev_hold(hdev);
1da177e4 3360
19202573 3361 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3362
1da177e4 3363 return id;
f48fd9c8 3364
33ca954d
DH
3365err_wqueue:
3366 destroy_workqueue(hdev->workqueue);
6ead1bbc 3367 destroy_workqueue(hdev->req_workqueue);
33ca954d 3368err:
3df92b31 3369 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3370
33ca954d 3371 return error;
1da177e4
LT
3372}
3373EXPORT_SYMBOL(hci_register_dev);
3374
3375/* Unregister HCI device */
59735631 3376void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3377{
2d7cc19e 3378 int id;
ef222013 3379
c13854ce 3380 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3381
a1536da2 3382 hci_dev_set_flag(hdev, HCI_UNREGISTER);
94324962 3383
3df92b31
SL
3384 id = hdev->id;
3385
f20d09d5 3386 write_lock(&hci_dev_list_lock);
1da177e4 3387 list_del(&hdev->list);
f20d09d5 3388 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3389
3390 hci_dev_do_close(hdev);
3391
b9b5ef18
GP
3392 cancel_work_sync(&hdev->power_on);
3393
ab81cbf9 3394 if (!test_bit(HCI_INIT, &hdev->flags) &&
d7a5a11d
MH
3395 !hci_dev_test_flag(hdev, HCI_SETUP) &&
3396 !hci_dev_test_flag(hdev, HCI_CONFIG)) {
09fd0de5 3397 hci_dev_lock(hdev);
744cf19e 3398 mgmt_index_removed(hdev);
09fd0de5 3399 hci_dev_unlock(hdev);
56e5cb86 3400 }
ab81cbf9 3401
2e58ef3e
JH
3402 /* mgmt_index_removed should take care of emptying the
3403 * pending list */
3404 BUG_ON(!list_empty(&hdev->mgmt_pending));
3405
1da177e4
LT
3406 hci_notify(hdev, HCI_DEV_UNREG);
3407
611b30f7
MH
3408 if (hdev->rfkill) {
3409 rfkill_unregister(hdev->rfkill);
3410 rfkill_destroy(hdev->rfkill);
3411 }
3412
bdc3e0f1 3413 device_del(&hdev->dev);
147e2d59 3414
0153e2ec
MH
3415 debugfs_remove_recursive(hdev->debugfs);
3416
f48fd9c8 3417 destroy_workqueue(hdev->workqueue);
6ead1bbc 3418 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3419
09fd0de5 3420 hci_dev_lock(hdev);
dcc36c16 3421 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 3422 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 3423 hci_uuids_clear(hdev);
55ed8ca1 3424 hci_link_keys_clear(hdev);
b899efaf 3425 hci_smp_ltks_clear(hdev);
970c4e46 3426 hci_smp_irks_clear(hdev);
2763eda6 3427 hci_remote_oob_data_clear(hdev);
d2609b34 3428 hci_adv_instances_clear(hdev);
dcc36c16 3429 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 3430 hci_conn_params_clear_all(hdev);
22078800 3431 hci_discovery_filter_clear(hdev);
09fd0de5 3432 hci_dev_unlock(hdev);
e2e0cacb 3433
dc946bd8 3434 hci_dev_put(hdev);
3df92b31
SL
3435
3436 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3437}
3438EXPORT_SYMBOL(hci_unregister_dev);
3439
3440/* Suspend HCI device */
3441int hci_suspend_dev(struct hci_dev *hdev)
3442{
3443 hci_notify(hdev, HCI_DEV_SUSPEND);
3444 return 0;
3445}
3446EXPORT_SYMBOL(hci_suspend_dev);
3447
3448/* Resume HCI device */
3449int hci_resume_dev(struct hci_dev *hdev)
3450{
3451 hci_notify(hdev, HCI_DEV_RESUME);
3452 return 0;
3453}
3454EXPORT_SYMBOL(hci_resume_dev);
3455
75e0569f
MH
3456/* Reset HCI device */
3457int hci_reset_dev(struct hci_dev *hdev)
3458{
3459 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
3460 struct sk_buff *skb;
3461
3462 skb = bt_skb_alloc(3, GFP_ATOMIC);
3463 if (!skb)
3464 return -ENOMEM;
3465
3466 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3467 memcpy(skb_put(skb, 3), hw_err, 3);
3468
3469 /* Send Hardware Error to upper stack */
3470 return hci_recv_frame(hdev, skb);
3471}
3472EXPORT_SYMBOL(hci_reset_dev);
3473
76bca880 3474/* Receive frame from HCI drivers */
e1a26170 3475int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3476{
76bca880 3477 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3478 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3479 kfree_skb(skb);
3480 return -ENXIO;
3481 }
3482
d82603c6 3483 /* Incoming skb */
76bca880
MH
3484 bt_cb(skb)->incoming = 1;
3485
3486 /* Time stamp */
3487 __net_timestamp(skb);
3488
76bca880 3489 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3490 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3491
76bca880
MH
3492 return 0;
3493}
3494EXPORT_SYMBOL(hci_recv_frame);
3495
e875ff84
MH
3496/* Receive diagnostic message from HCI drivers */
3497int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3498{
3499 /* Time stamp */
3500 __net_timestamp(skb);
3501
3502 /* Mark as diagnostic packet and send to monitor */
3503 bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
3504 hci_send_to_monitor(hdev, skb);
3505
3506 kfree_skb(skb);
3507 return 0;
3508}
3509EXPORT_SYMBOL(hci_recv_diag);
3510
1da177e4
LT
3511/* ---- Interface to upper protocols ---- */
3512
1da177e4
LT
3513int hci_register_cb(struct hci_cb *cb)
3514{
3515 BT_DBG("%p name %s", cb, cb->name);
3516
fba7ecf0 3517 mutex_lock(&hci_cb_list_lock);
00629e0f 3518 list_add_tail(&cb->list, &hci_cb_list);
fba7ecf0 3519 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3520
3521 return 0;
3522}
3523EXPORT_SYMBOL(hci_register_cb);
3524
3525int hci_unregister_cb(struct hci_cb *cb)
3526{
3527 BT_DBG("%p name %s", cb, cb->name);
3528
fba7ecf0 3529 mutex_lock(&hci_cb_list_lock);
1da177e4 3530 list_del(&cb->list);
fba7ecf0 3531 mutex_unlock(&hci_cb_list_lock);
1da177e4
LT
3532
3533 return 0;
3534}
3535EXPORT_SYMBOL(hci_unregister_cb);
3536
51086991 3537static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3538{
cdc52faa
MH
3539 int err;
3540
0d48d939 3541 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3542
cd82e61c
MH
3543 /* Time stamp */
3544 __net_timestamp(skb);
1da177e4 3545
cd82e61c
MH
3546 /* Send copy to monitor */
3547 hci_send_to_monitor(hdev, skb);
3548
3549 if (atomic_read(&hdev->promisc)) {
3550 /* Send copy to the sockets */
470fe1b5 3551 hci_send_to_sock(hdev, skb);
1da177e4
LT
3552 }
3553
3554 /* Get rid of skb owner, prior to sending to the driver. */
3555 skb_orphan(skb);
3556
73d0d3c8
MH
3557 if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3558 kfree_skb(skb);
3559 return;
3560 }
3561
cdc52faa
MH
3562 err = hdev->send(hdev, skb);
3563 if (err < 0) {
3564 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
3565 kfree_skb(skb);
3566 }
1da177e4
LT
3567}
3568
1ca3a9d0 3569/* Send HCI command */
07dc93dd
JH
3570int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3571 const void *param)
1ca3a9d0
JH
3572{
3573 struct sk_buff *skb;
3574
3575 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3576
3577 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3578 if (!skb) {
3579 BT_ERR("%s no memory for command", hdev->name);
3580 return -ENOMEM;
3581 }
3582
49c922bb 3583 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
3584 * single-command requests.
3585 */
db6e3e8d 3586 bt_cb(skb)->req.start = true;
11714b3d 3587
1da177e4 3588 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3589 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3590
3591 return 0;
3592}
1da177e4
LT
3593
3594/* Get data from the previously sent command */
a9de9248 3595void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3596{
3597 struct hci_command_hdr *hdr;
3598
3599 if (!hdev->sent_cmd)
3600 return NULL;
3601
3602 hdr = (void *) hdev->sent_cmd->data;
3603
a9de9248 3604 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3605 return NULL;
3606
f0e09510 3607 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3608
3609 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3610}
3611
fbef168f
LP
3612/* Send HCI command and wait for command commplete event */
3613struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
3614 const void *param, u32 timeout)
3615{
3616 struct sk_buff *skb;
3617
3618 if (!test_bit(HCI_UP, &hdev->flags))
3619 return ERR_PTR(-ENETDOWN);
3620
3621 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
3622
3623 hci_req_lock(hdev);
3624 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
3625 hci_req_unlock(hdev);
3626
3627 return skb;
3628}
3629EXPORT_SYMBOL(hci_cmd_sync);
3630
1da177e4
LT
3631/* Send ACL data */
3632static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3633{
3634 struct hci_acl_hdr *hdr;
3635 int len = skb->len;
3636
badff6d0
ACM
3637 skb_push(skb, HCI_ACL_HDR_SIZE);
3638 skb_reset_transport_header(skb);
9c70220b 3639 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3640 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3641 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3642}
3643
ee22be7e 3644static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3645 struct sk_buff *skb, __u16 flags)
1da177e4 3646{
ee22be7e 3647 struct hci_conn *conn = chan->conn;
1da177e4
LT
3648 struct hci_dev *hdev = conn->hdev;
3649 struct sk_buff *list;
3650
087bfd99
GP
3651 skb->len = skb_headlen(skb);
3652 skb->data_len = 0;
3653
3654 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3655
3656 switch (hdev->dev_type) {
3657 case HCI_BREDR:
3658 hci_add_acl_hdr(skb, conn->handle, flags);
3659 break;
3660 case HCI_AMP:
3661 hci_add_acl_hdr(skb, chan->handle, flags);
3662 break;
3663 default:
3664 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3665 return;
3666 }
087bfd99 3667
70f23020
AE
3668 list = skb_shinfo(skb)->frag_list;
3669 if (!list) {
1da177e4
LT
3670 /* Non fragmented */
3671 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3672
73d80deb 3673 skb_queue_tail(queue, skb);
1da177e4
LT
3674 } else {
3675 /* Fragmented */
3676 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3677
3678 skb_shinfo(skb)->frag_list = NULL;
3679
9cfd5a23
JR
3680 /* Queue all fragments atomically. We need to use spin_lock_bh
3681 * here because of 6LoWPAN links, as there this function is
3682 * called from softirq and using normal spin lock could cause
3683 * deadlocks.
3684 */
3685 spin_lock_bh(&queue->lock);
1da177e4 3686
73d80deb 3687 __skb_queue_tail(queue, skb);
e702112f
AE
3688
3689 flags &= ~ACL_START;
3690 flags |= ACL_CONT;
1da177e4
LT
3691 do {
3692 skb = list; list = list->next;
8e87d142 3693
0d48d939 3694 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3695 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3696
3697 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3698
73d80deb 3699 __skb_queue_tail(queue, skb);
1da177e4
LT
3700 } while (list);
3701
9cfd5a23 3702 spin_unlock_bh(&queue->lock);
1da177e4 3703 }
73d80deb
LAD
3704}
3705
3706void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3707{
ee22be7e 3708 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3709
f0e09510 3710 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3711
ee22be7e 3712 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3713
3eff45ea 3714 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3715}
1da177e4
LT
3716
3717/* Send SCO data */
0d861d8b 3718void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3719{
3720 struct hci_dev *hdev = conn->hdev;
3721 struct hci_sco_hdr hdr;
3722
3723 BT_DBG("%s len %d", hdev->name, skb->len);
3724
aca3192c 3725 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3726 hdr.dlen = skb->len;
3727
badff6d0
ACM
3728 skb_push(skb, HCI_SCO_HDR_SIZE);
3729 skb_reset_transport_header(skb);
9c70220b 3730 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3731
0d48d939 3732 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3733
1da177e4 3734 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3735 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3736}
1da177e4
LT
3737
3738/* ---- HCI TX task (outgoing data) ---- */
3739
3740/* HCI Connection scheduler */
6039aa73
GP
3741static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3742 int *quote)
1da177e4
LT
3743{
3744 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3745 struct hci_conn *conn = NULL, *c;
abc5de8f 3746 unsigned int num = 0, min = ~0;
1da177e4 3747
8e87d142 3748 /* We don't have to lock device here. Connections are always
1da177e4 3749 * added and removed with TX task disabled. */
bf4c6325
GP
3750
3751 rcu_read_lock();
3752
3753 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3754 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3755 continue;
769be974
MH
3756
3757 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3758 continue;
3759
1da177e4
LT
3760 num++;
3761
3762 if (c->sent < min) {
3763 min = c->sent;
3764 conn = c;
3765 }
52087a79
LAD
3766
3767 if (hci_conn_num(hdev, type) == num)
3768 break;
1da177e4
LT
3769 }
3770
bf4c6325
GP
3771 rcu_read_unlock();
3772
1da177e4 3773 if (conn) {
6ed58ec5
VT
3774 int cnt, q;
3775
3776 switch (conn->type) {
3777 case ACL_LINK:
3778 cnt = hdev->acl_cnt;
3779 break;
3780 case SCO_LINK:
3781 case ESCO_LINK:
3782 cnt = hdev->sco_cnt;
3783 break;
3784 case LE_LINK:
3785 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3786 break;
3787 default:
3788 cnt = 0;
3789 BT_ERR("Unknown link type");
3790 }
3791
3792 q = cnt / num;
1da177e4
LT
3793 *quote = q ? q : 1;
3794 } else
3795 *quote = 0;
3796
3797 BT_DBG("conn %p quote %d", conn, *quote);
3798 return conn;
3799}
3800
6039aa73 3801static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3802{
3803 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3804 struct hci_conn *c;
1da177e4 3805
bae1f5d9 3806 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3807
bf4c6325
GP
3808 rcu_read_lock();
3809
1da177e4 3810 /* Kill stalled connections */
bf4c6325 3811 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3812 if (c->type == type && c->sent) {
6ed93dc6
AE
3813 BT_ERR("%s killing stalled connection %pMR",
3814 hdev->name, &c->dst);
bed71748 3815 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3816 }
3817 }
bf4c6325
GP
3818
3819 rcu_read_unlock();
1da177e4
LT
3820}
3821
6039aa73
GP
3822static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3823 int *quote)
1da177e4 3824{
73d80deb
LAD
3825 struct hci_conn_hash *h = &hdev->conn_hash;
3826 struct hci_chan *chan = NULL;
abc5de8f 3827 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3828 struct hci_conn *conn;
73d80deb
LAD
3829 int cnt, q, conn_num = 0;
3830
3831 BT_DBG("%s", hdev->name);
3832
bf4c6325
GP
3833 rcu_read_lock();
3834
3835 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3836 struct hci_chan *tmp;
3837
3838 if (conn->type != type)
3839 continue;
3840
3841 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3842 continue;
3843
3844 conn_num++;
3845
8192edef 3846 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3847 struct sk_buff *skb;
3848
3849 if (skb_queue_empty(&tmp->data_q))
3850 continue;
3851
3852 skb = skb_peek(&tmp->data_q);
3853 if (skb->priority < cur_prio)
3854 continue;
3855
3856 if (skb->priority > cur_prio) {
3857 num = 0;
3858 min = ~0;
3859 cur_prio = skb->priority;
3860 }
3861
3862 num++;
3863
3864 if (conn->sent < min) {
3865 min = conn->sent;
3866 chan = tmp;
3867 }
3868 }
3869
3870 if (hci_conn_num(hdev, type) == conn_num)
3871 break;
3872 }
3873
bf4c6325
GP
3874 rcu_read_unlock();
3875
73d80deb
LAD
3876 if (!chan)
3877 return NULL;
3878
3879 switch (chan->conn->type) {
3880 case ACL_LINK:
3881 cnt = hdev->acl_cnt;
3882 break;
bd1eb66b
AE
3883 case AMP_LINK:
3884 cnt = hdev->block_cnt;
3885 break;
73d80deb
LAD
3886 case SCO_LINK:
3887 case ESCO_LINK:
3888 cnt = hdev->sco_cnt;
3889 break;
3890 case LE_LINK:
3891 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3892 break;
3893 default:
3894 cnt = 0;
3895 BT_ERR("Unknown link type");
3896 }
3897
3898 q = cnt / num;
3899 *quote = q ? q : 1;
3900 BT_DBG("chan %p quote %d", chan, *quote);
3901 return chan;
3902}
3903
02b20f0b
LAD
3904static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3905{
3906 struct hci_conn_hash *h = &hdev->conn_hash;
3907 struct hci_conn *conn;
3908 int num = 0;
3909
3910 BT_DBG("%s", hdev->name);
3911
bf4c6325
GP
3912 rcu_read_lock();
3913
3914 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3915 struct hci_chan *chan;
3916
3917 if (conn->type != type)
3918 continue;
3919
3920 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3921 continue;
3922
3923 num++;
3924
8192edef 3925 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3926 struct sk_buff *skb;
3927
3928 if (chan->sent) {
3929 chan->sent = 0;
3930 continue;
3931 }
3932
3933 if (skb_queue_empty(&chan->data_q))
3934 continue;
3935
3936 skb = skb_peek(&chan->data_q);
3937 if (skb->priority >= HCI_PRIO_MAX - 1)
3938 continue;
3939
3940 skb->priority = HCI_PRIO_MAX - 1;
3941
3942 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3943 skb->priority);
02b20f0b
LAD
3944 }
3945
3946 if (hci_conn_num(hdev, type) == num)
3947 break;
3948 }
bf4c6325
GP
3949
3950 rcu_read_unlock();
3951
02b20f0b
LAD
3952}
3953
b71d385a
AE
3954static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3955{
3956 /* Calculate count of blocks used by this packet */
3957 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3958}
3959
6039aa73 3960static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3961{
d7a5a11d 3962 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
1da177e4
LT
3963 /* ACL tx timeout must be longer than maximum
3964 * link supervision timeout (40.9 seconds) */
63d2bc1b 3965 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3966 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3967 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3968 }
63d2bc1b 3969}
1da177e4 3970
6039aa73 3971static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3972{
3973 unsigned int cnt = hdev->acl_cnt;
3974 struct hci_chan *chan;
3975 struct sk_buff *skb;
3976 int quote;
3977
3978 __check_timeout(hdev, cnt);
04837f64 3979
73d80deb 3980 while (hdev->acl_cnt &&
a8c5fb1a 3981 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3982 u32 priority = (skb_peek(&chan->data_q))->priority;
3983 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3984 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3985 skb->len, skb->priority);
73d80deb 3986
ec1cce24
LAD
3987 /* Stop if priority has changed */
3988 if (skb->priority < priority)
3989 break;
3990
3991 skb = skb_dequeue(&chan->data_q);
3992
73d80deb 3993 hci_conn_enter_active_mode(chan->conn,
04124681 3994 bt_cb(skb)->force_active);
04837f64 3995
57d17d70 3996 hci_send_frame(hdev, skb);
1da177e4
LT
3997 hdev->acl_last_tx = jiffies;
3998
3999 hdev->acl_cnt--;
73d80deb
LAD
4000 chan->sent++;
4001 chan->conn->sent++;
1da177e4
LT
4002 }
4003 }
02b20f0b
LAD
4004
4005 if (cnt != hdev->acl_cnt)
4006 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4007}
4008
6039aa73 4009static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4010{
63d2bc1b 4011 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4012 struct hci_chan *chan;
4013 struct sk_buff *skb;
4014 int quote;
bd1eb66b 4015 u8 type;
b71d385a 4016
63d2bc1b 4017 __check_timeout(hdev, cnt);
b71d385a 4018
bd1eb66b
AE
4019 BT_DBG("%s", hdev->name);
4020
4021 if (hdev->dev_type == HCI_AMP)
4022 type = AMP_LINK;
4023 else
4024 type = ACL_LINK;
4025
b71d385a 4026 while (hdev->block_cnt > 0 &&
bd1eb66b 4027 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4028 u32 priority = (skb_peek(&chan->data_q))->priority;
4029 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4030 int blocks;
4031
4032 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4033 skb->len, skb->priority);
b71d385a
AE
4034
4035 /* Stop if priority has changed */
4036 if (skb->priority < priority)
4037 break;
4038
4039 skb = skb_dequeue(&chan->data_q);
4040
4041 blocks = __get_blocks(hdev, skb);
4042 if (blocks > hdev->block_cnt)
4043 return;
4044
4045 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4046 bt_cb(skb)->force_active);
b71d385a 4047
57d17d70 4048 hci_send_frame(hdev, skb);
b71d385a
AE
4049 hdev->acl_last_tx = jiffies;
4050
4051 hdev->block_cnt -= blocks;
4052 quote -= blocks;
4053
4054 chan->sent += blocks;
4055 chan->conn->sent += blocks;
4056 }
4057 }
4058
4059 if (cnt != hdev->block_cnt)
bd1eb66b 4060 hci_prio_recalculate(hdev, type);
b71d385a
AE
4061}
4062
6039aa73 4063static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4064{
4065 BT_DBG("%s", hdev->name);
4066
bd1eb66b
AE
4067 /* No ACL link over BR/EDR controller */
4068 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4069 return;
4070
4071 /* No AMP link over AMP controller */
4072 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4073 return;
4074
4075 switch (hdev->flow_ctl_mode) {
4076 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4077 hci_sched_acl_pkt(hdev);
4078 break;
4079
4080 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4081 hci_sched_acl_blk(hdev);
4082 break;
4083 }
4084}
4085
1da177e4 4086/* Schedule SCO */
6039aa73 4087static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4088{
4089 struct hci_conn *conn;
4090 struct sk_buff *skb;
4091 int quote;
4092
4093 BT_DBG("%s", hdev->name);
4094
52087a79
LAD
4095 if (!hci_conn_num(hdev, SCO_LINK))
4096 return;
4097
1da177e4
LT
4098 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4099 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4100 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4101 hci_send_frame(hdev, skb);
1da177e4
LT
4102
4103 conn->sent++;
4104 if (conn->sent == ~0)
4105 conn->sent = 0;
4106 }
4107 }
4108}
4109
6039aa73 4110static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4111{
4112 struct hci_conn *conn;
4113 struct sk_buff *skb;
4114 int quote;
4115
4116 BT_DBG("%s", hdev->name);
4117
52087a79
LAD
4118 if (!hci_conn_num(hdev, ESCO_LINK))
4119 return;
4120
8fc9ced3
GP
4121 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4122 &quote))) {
b6a0dc82
MH
4123 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4124 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4125 hci_send_frame(hdev, skb);
b6a0dc82
MH
4126
4127 conn->sent++;
4128 if (conn->sent == ~0)
4129 conn->sent = 0;
4130 }
4131 }
4132}
4133
6039aa73 4134static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4135{
73d80deb 4136 struct hci_chan *chan;
6ed58ec5 4137 struct sk_buff *skb;
02b20f0b 4138 int quote, cnt, tmp;
6ed58ec5
VT
4139
4140 BT_DBG("%s", hdev->name);
4141
52087a79
LAD
4142 if (!hci_conn_num(hdev, LE_LINK))
4143 return;
4144
d7a5a11d 4145 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
6ed58ec5
VT
4146 /* LE tx timeout must be longer than maximum
4147 * link supervision timeout (40.9 seconds) */
bae1f5d9 4148 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4149 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4150 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4151 }
4152
4153 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4154 tmp = cnt;
73d80deb 4155 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4156 u32 priority = (skb_peek(&chan->data_q))->priority;
4157 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4158 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4159 skb->len, skb->priority);
6ed58ec5 4160
ec1cce24
LAD
4161 /* Stop if priority has changed */
4162 if (skb->priority < priority)
4163 break;
4164
4165 skb = skb_dequeue(&chan->data_q);
4166
57d17d70 4167 hci_send_frame(hdev, skb);
6ed58ec5
VT
4168 hdev->le_last_tx = jiffies;
4169
4170 cnt--;
73d80deb
LAD
4171 chan->sent++;
4172 chan->conn->sent++;
6ed58ec5
VT
4173 }
4174 }
73d80deb 4175
6ed58ec5
VT
4176 if (hdev->le_pkts)
4177 hdev->le_cnt = cnt;
4178 else
4179 hdev->acl_cnt = cnt;
02b20f0b
LAD
4180
4181 if (cnt != tmp)
4182 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4183}
4184
3eff45ea 4185static void hci_tx_work(struct work_struct *work)
1da177e4 4186{
3eff45ea 4187 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4188 struct sk_buff *skb;
4189
6ed58ec5 4190 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4191 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4192
d7a5a11d 4193 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
52de599e
MH
4194 /* Schedule queues and send stuff to HCI driver */
4195 hci_sched_acl(hdev);
4196 hci_sched_sco(hdev);
4197 hci_sched_esco(hdev);
4198 hci_sched_le(hdev);
4199 }
6ed58ec5 4200
1da177e4
LT
4201 /* Send next queued raw (unknown type) packet */
4202 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4203 hci_send_frame(hdev, skb);
1da177e4
LT
4204}
4205
25985edc 4206/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4207
4208/* ACL data packet */
6039aa73 4209static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4210{
4211 struct hci_acl_hdr *hdr = (void *) skb->data;
4212 struct hci_conn *conn;
4213 __u16 handle, flags;
4214
4215 skb_pull(skb, HCI_ACL_HDR_SIZE);
4216
4217 handle = __le16_to_cpu(hdr->handle);
4218 flags = hci_flags(handle);
4219 handle = hci_handle(handle);
4220
f0e09510 4221 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4222 handle, flags);
1da177e4
LT
4223
4224 hdev->stat.acl_rx++;
4225
4226 hci_dev_lock(hdev);
4227 conn = hci_conn_hash_lookup_handle(hdev, handle);
4228 hci_dev_unlock(hdev);
8e87d142 4229
1da177e4 4230 if (conn) {
65983fc7 4231 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4232
1da177e4 4233 /* Send to upper protocol */
686ebf28
UF
4234 l2cap_recv_acldata(conn, skb, flags);
4235 return;
1da177e4 4236 } else {
8e87d142 4237 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4238 hdev->name, handle);
1da177e4
LT
4239 }
4240
4241 kfree_skb(skb);
4242}
4243
4244/* SCO data packet */
6039aa73 4245static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4246{
4247 struct hci_sco_hdr *hdr = (void *) skb->data;
4248 struct hci_conn *conn;
4249 __u16 handle;
4250
4251 skb_pull(skb, HCI_SCO_HDR_SIZE);
4252
4253 handle = __le16_to_cpu(hdr->handle);
4254
f0e09510 4255 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4256
4257 hdev->stat.sco_rx++;
4258
4259 hci_dev_lock(hdev);
4260 conn = hci_conn_hash_lookup_handle(hdev, handle);
4261 hci_dev_unlock(hdev);
4262
4263 if (conn) {
1da177e4 4264 /* Send to upper protocol */
686ebf28
UF
4265 sco_recv_scodata(conn, skb);
4266 return;
1da177e4 4267 } else {
8e87d142 4268 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4269 hdev->name, handle);
1da177e4
LT
4270 }
4271
4272 kfree_skb(skb);
4273}
4274
9238f36a
JH
4275static bool hci_req_is_complete(struct hci_dev *hdev)
4276{
4277 struct sk_buff *skb;
4278
4279 skb = skb_peek(&hdev->cmd_q);
4280 if (!skb)
4281 return true;
4282
db6e3e8d 4283 return bt_cb(skb)->req.start;
9238f36a
JH
4284}
4285
42c6b129
JH
4286static void hci_resend_last(struct hci_dev *hdev)
4287{
4288 struct hci_command_hdr *sent;
4289 struct sk_buff *skb;
4290 u16 opcode;
4291
4292 if (!hdev->sent_cmd)
4293 return;
4294
4295 sent = (void *) hdev->sent_cmd->data;
4296 opcode = __le16_to_cpu(sent->opcode);
4297 if (opcode == HCI_OP_RESET)
4298 return;
4299
4300 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4301 if (!skb)
4302 return;
4303
4304 skb_queue_head(&hdev->cmd_q, skb);
4305 queue_work(hdev->workqueue, &hdev->cmd_work);
4306}
4307
e6214487
JH
4308void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4309 hci_req_complete_t *req_complete,
4310 hci_req_complete_skb_t *req_complete_skb)
9238f36a 4311{
9238f36a
JH
4312 struct sk_buff *skb;
4313 unsigned long flags;
4314
4315 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4316
42c6b129
JH
4317 /* If the completed command doesn't match the last one that was
4318 * sent we need to do special handling of it.
9238f36a 4319 */
42c6b129
JH
4320 if (!hci_sent_cmd_data(hdev, opcode)) {
4321 /* Some CSR based controllers generate a spontaneous
4322 * reset complete event during init and any pending
4323 * command will never be completed. In such a case we
4324 * need to resend whatever was the last sent
4325 * command.
4326 */
4327 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4328 hci_resend_last(hdev);
4329
9238f36a 4330 return;
42c6b129 4331 }
9238f36a
JH
4332
4333 /* If the command succeeded and there's still more commands in
4334 * this request the request is not yet complete.
4335 */
4336 if (!status && !hci_req_is_complete(hdev))
4337 return;
4338
4339 /* If this was the last command in a request the complete
4340 * callback would be found in hdev->sent_cmd instead of the
4341 * command queue (hdev->cmd_q).
4342 */
e6214487
JH
4343 if (bt_cb(hdev->sent_cmd)->req.complete) {
4344 *req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4345 return;
4346 }
53e21fbc 4347
e6214487
JH
4348 if (bt_cb(hdev->sent_cmd)->req.complete_skb) {
4349 *req_complete_skb = bt_cb(hdev->sent_cmd)->req.complete_skb;
4350 return;
9238f36a
JH
4351 }
4352
4353 /* Remove all pending commands belonging to this request */
4354 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4355 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
db6e3e8d 4356 if (bt_cb(skb)->req.start) {
9238f36a
JH
4357 __skb_queue_head(&hdev->cmd_q, skb);
4358 break;
4359 }
4360
e6214487
JH
4361 *req_complete = bt_cb(skb)->req.complete;
4362 *req_complete_skb = bt_cb(skb)->req.complete_skb;
9238f36a
JH
4363 kfree_skb(skb);
4364 }
4365 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
9238f36a
JH
4366}
4367
b78752cc 4368static void hci_rx_work(struct work_struct *work)
1da177e4 4369{
b78752cc 4370 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4371 struct sk_buff *skb;
4372
4373 BT_DBG("%s", hdev->name);
4374
1da177e4 4375 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4376 /* Send copy to monitor */
4377 hci_send_to_monitor(hdev, skb);
4378
1da177e4
LT
4379 if (atomic_read(&hdev->promisc)) {
4380 /* Send copy to the sockets */
470fe1b5 4381 hci_send_to_sock(hdev, skb);
1da177e4
LT
4382 }
4383
d7a5a11d 4384 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1da177e4
LT
4385 kfree_skb(skb);
4386 continue;
4387 }
4388
4389 if (test_bit(HCI_INIT, &hdev->flags)) {
4390 /* Don't process data packets in this states. */
0d48d939 4391 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4392 case HCI_ACLDATA_PKT:
4393 case HCI_SCODATA_PKT:
4394 kfree_skb(skb);
4395 continue;
3ff50b79 4396 }
1da177e4
LT
4397 }
4398
4399 /* Process frame */
0d48d939 4400 switch (bt_cb(skb)->pkt_type) {
1da177e4 4401 case HCI_EVENT_PKT:
b78752cc 4402 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4403 hci_event_packet(hdev, skb);
4404 break;
4405
4406 case HCI_ACLDATA_PKT:
4407 BT_DBG("%s ACL data packet", hdev->name);
4408 hci_acldata_packet(hdev, skb);
4409 break;
4410
4411 case HCI_SCODATA_PKT:
4412 BT_DBG("%s SCO data packet", hdev->name);
4413 hci_scodata_packet(hdev, skb);
4414 break;
4415
4416 default:
4417 kfree_skb(skb);
4418 break;
4419 }
4420 }
1da177e4
LT
4421}
4422
c347b765 4423static void hci_cmd_work(struct work_struct *work)
1da177e4 4424{
c347b765 4425 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4426 struct sk_buff *skb;
4427
2104786b
AE
4428 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4429 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4430
1da177e4 4431 /* Send queued commands */
5a08ecce
AE
4432 if (atomic_read(&hdev->cmd_cnt)) {
4433 skb = skb_dequeue(&hdev->cmd_q);
4434 if (!skb)
4435 return;
4436
7585b97a 4437 kfree_skb(hdev->sent_cmd);
1da177e4 4438
a675d7f1 4439 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4440 if (hdev->sent_cmd) {
1da177e4 4441 atomic_dec(&hdev->cmd_cnt);
57d17d70 4442 hci_send_frame(hdev, skb);
7bdb8a5c 4443 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 4444 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 4445 else
65cc2b49
MH
4446 schedule_delayed_work(&hdev->cmd_timer,
4447 HCI_CMD_TIMEOUT);
1da177e4
LT
4448 } else {
4449 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4450 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4451 }
4452 }
4453}