]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/bluetooth/hci_core.c
Bluetooth: Refactor hci_dev_open to a separate hci_dev_do_open function
[mirror_ubuntu-bionic-kernel.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
42c6b129 60static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 61{
42c6b129 62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
63
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
77a63e0a
FW
82static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
75e84b7c
JH
84{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
7b1abbbe
JH
107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
75e84b7c
JH
113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
7b1abbbe 137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 138 const void *param, u8 event, u32 timeout)
75e84b7c
JH
139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
7b1abbbe 148 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
7b1abbbe
JH
187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 192 const void *param, u32 timeout)
7b1abbbe
JH
193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
1da177e4 198/* Execute request and wait for completion. */
01178cd4 199static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
200 void (*func)(struct hci_request *req,
201 unsigned long opt),
01178cd4 202 unsigned long opt, __u32 timeout)
1da177e4 203{
42c6b129 204 struct hci_request req;
1da177e4
LT
205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
42c6b129
JH
210 hci_req_init(&req, hdev);
211
1da177e4
LT
212 hdev->req_status = HCI_REQ_PEND;
213
42c6b129 214 func(&req, opt);
53cce22d 215
42c6b129
JH
216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
53cce22d 218 hdev->req_status = 0;
920c8300
AG
219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
42c6b129 224 */
920c8300
AG
225 if (err == -ENODATA)
226 return 0;
227
228 return err;
53cce22d
JH
229 }
230
bc4445c7
AG
231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
1da177e4
LT
234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
e175072f 243 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
3ff50b79 253 }
1da177e4 254
a5040efa 255 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
01178cd4 262static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
263 void (*req)(struct hci_request *req,
264 unsigned long opt),
01178cd4 265 unsigned long opt, __u32 timeout)
1da177e4
LT
266{
267 int ret;
268
7c6a329e
MH
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
1da177e4
LT
272 /* Serialize all requests */
273 hci_req_lock(hdev);
01178cd4 274 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
42c6b129 280static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 281{
42c6b129 282 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
283
284 /* Reset device */
42c6b129
JH
285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
287}
288
42c6b129 289static void bredr_init(struct hci_request *req)
1da177e4 290{
42c6b129 291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 292
1da177e4 293 /* Read Local Supported Features */
42c6b129 294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 295
1143e5a6 296 /* Read Local Version */
42c6b129 297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
298
299 /* Read BD Address */
42c6b129 300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
301}
302
42c6b129 303static void amp_init(struct hci_request *req)
e61ef499 304{
42c6b129 305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 306
e61ef499 307 /* Read Local Version */
42c6b129 308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
309
310 /* Read Local AMP Info */
42c6b129 311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
312
313 /* Read Data Blk size */
42c6b129 314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
315}
316
42c6b129 317static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 318{
42c6b129 319 struct hci_dev *hdev = req->hdev;
e61ef499
AE
320
321 BT_DBG("%s %ld", hdev->name, opt);
322
11778716
AE
323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 325 hci_reset_req(req, 0);
11778716 326
e61ef499
AE
327 switch (hdev->dev_type) {
328 case HCI_BREDR:
42c6b129 329 bredr_init(req);
e61ef499
AE
330 break;
331
332 case HCI_AMP:
42c6b129 333 amp_init(req);
e61ef499
AE
334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
e61ef499
AE
340}
341
42c6b129 342static void bredr_setup(struct hci_request *req)
2177bab5 343{
2177bab5
JH
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
349
350 /* Read Class of Device */
42c6b129 351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
352
353 /* Read Local Name */
42c6b129 354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
355
356 /* Read Voice Setting */
42c6b129 357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5
JH
358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
42c6b129 365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 366
f332ec66
JH
367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
2177bab5
JH
372}
373
42c6b129 374static void le_setup(struct hci_request *req)
2177bab5 375{
c73eee91
JH
376 struct hci_dev *hdev = req->hdev;
377
2177bab5 378 /* Read LE Buffer Size */
42c6b129 379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
380
381 /* Read LE Local Supported Features */
42c6b129 382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
383
384 /* Read LE Advertising Channel TX Power */
42c6b129 385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
386
387 /* Read LE White List Size */
42c6b129 388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
389
390 /* Read LE Supported States */
42c6b129 391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
42c6b129 426static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
427{
428 u8 mode;
429
42c6b129 430 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 431
42c6b129 432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
433}
434
42c6b129 435static void hci_setup_event_mask(struct hci_request *req)
2177bab5 436{
42c6b129
JH
437 struct hci_dev *hdev = req->hdev;
438
2177bab5
JH
439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
42c6b129 506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
42c6b129
JH
511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
2177bab5
JH
513 }
514}
515
42c6b129 516static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 517{
42c6b129
JH
518 struct hci_dev *hdev = req->hdev;
519
2177bab5 520 if (lmp_bredr_capable(hdev))
42c6b129 521 bredr_setup(req);
2177bab5
JH
522
523 if (lmp_le_capable(hdev))
42c6b129 524 le_setup(req);
2177bab5 525
42c6b129 526 hci_setup_event_mask(req);
2177bab5 527
3f8e2d75
JH
528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
42c6b129
JH
537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
2177bab5
JH
539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
42c6b129 545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
42c6b129 550 hci_setup_inquiry_mode(req);
2177bab5
JH
551
552 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
42c6b129
JH
559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
2177bab5
JH
561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
42c6b129
JH
565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
2177bab5
JH
567 }
568}
569
42c6b129 570static void hci_setup_link_policy(struct hci_request *req)
2177bab5 571{
42c6b129 572 struct hci_dev *hdev = req->hdev;
2177bab5
JH
573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
42c6b129 586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
587}
588
42c6b129 589static void hci_set_le_support(struct hci_request *req)
2177bab5 590{
42c6b129 591 struct hci_dev *hdev = req->hdev;
2177bab5
JH
592 struct hci_cp_write_le_host_supported cp;
593
c73eee91
JH
594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
2177bab5
JH
598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
2177bab5
JH
608}
609
d62e6d67
JH
610static void hci_set_event_mask_page_2(struct hci_request *req)
611{
612 struct hci_dev *hdev = req->hdev;
613 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615 /* If Connectionless Slave Broadcast master role is supported
616 * enable all necessary events for it.
617 */
618 if (hdev->features[2][0] & 0x01) {
619 events[1] |= 0x40; /* Triggered Clock Capture */
620 events[1] |= 0x80; /* Synchronization Train Complete */
621 events[2] |= 0x10; /* Slave Page Response Timeout */
622 events[2] |= 0x20; /* CSB Channel Map Change */
623 }
624
625 /* If Connectionless Slave Broadcast slave role is supported
626 * enable all necessary events for it.
627 */
628 if (hdev->features[2][0] & 0x02) {
629 events[2] |= 0x01; /* Synchronization Train Received */
630 events[2] |= 0x02; /* CSB Receive */
631 events[2] |= 0x04; /* CSB Timeout */
632 events[2] |= 0x08; /* Truncated Page Complete */
633 }
634
635 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636}
637
42c6b129 638static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 639{
42c6b129 640 struct hci_dev *hdev = req->hdev;
d2c5d77f 641 u8 p;
42c6b129 642
b8f4e068
GP
643 /* Some Broadcom based Bluetooth controllers do not support the
644 * Delete Stored Link Key command. They are clearly indicating its
645 * absence in the bit mask of supported commands.
646 *
647 * Check the supported commands and only if the the command is marked
648 * as supported send it. If not supported assume that the controller
649 * does not have actual support for stored link keys which makes this
650 * command redundant anyway.
637b4cae 651 */
59f45d57
JH
652 if (hdev->commands[6] & 0x80) {
653 struct hci_cp_delete_stored_link_key cp;
654
655 bacpy(&cp.bdaddr, BDADDR_ANY);
656 cp.delete_all = 0x01;
657 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658 sizeof(cp), &cp);
659 }
660
2177bab5 661 if (hdev->commands[5] & 0x10)
42c6b129 662 hci_setup_link_policy(req);
2177bab5 663
04b4edcb 664 if (lmp_le_capable(hdev)) {
42c6b129 665 hci_set_le_support(req);
04b4edcb
JH
666 hci_update_ad(req);
667 }
d2c5d77f
JH
668
669 /* Read features beyond page 1 if available */
670 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671 struct hci_cp_read_local_ext_features cp;
672
673 cp.page = p;
674 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675 sizeof(cp), &cp);
676 }
2177bab5
JH
677}
678
5d4e7e8d
JH
679static void hci_init4_req(struct hci_request *req, unsigned long opt)
680{
681 struct hci_dev *hdev = req->hdev;
682
d62e6d67
JH
683 /* Set event mask page 2 if the HCI command for it is supported */
684 if (hdev->commands[22] & 0x04)
685 hci_set_event_mask_page_2(req);
686
5d4e7e8d
JH
687 /* Check for Synchronization Train support */
688 if (hdev->features[2][0] & 0x04)
689 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690}
691
2177bab5
JH
692static int __hci_init(struct hci_dev *hdev)
693{
694 int err;
695
696 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697 if (err < 0)
698 return err;
699
700 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701 * BR/EDR/LE type controllers. AMP controllers only need the
702 * first stage init.
703 */
704 if (hdev->dev_type != HCI_BREDR)
705 return 0;
706
707 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708 if (err < 0)
709 return err;
710
5d4e7e8d
JH
711 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712 if (err < 0)
713 return err;
714
715 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
2177bab5
JH
716}
717
42c6b129 718static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
719{
720 __u8 scan = opt;
721
42c6b129 722 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
723
724 /* Inquiry and Page scans */
42c6b129 725 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
726}
727
42c6b129 728static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
729{
730 __u8 auth = opt;
731
42c6b129 732 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
733
734 /* Authentication */
42c6b129 735 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
736}
737
42c6b129 738static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
739{
740 __u8 encrypt = opt;
741
42c6b129 742 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 743
e4e8e37c 744 /* Encryption */
42c6b129 745 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
746}
747
42c6b129 748static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
749{
750 __le16 policy = cpu_to_le16(opt);
751
42c6b129 752 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
753
754 /* Default link policy */
42c6b129 755 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
756}
757
8e87d142 758/* Get HCI device by index.
1da177e4
LT
759 * Device is held on return. */
760struct hci_dev *hci_dev_get(int index)
761{
8035ded4 762 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
763
764 BT_DBG("%d", index);
765
766 if (index < 0)
767 return NULL;
768
769 read_lock(&hci_dev_list_lock);
8035ded4 770 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
771 if (d->id == index) {
772 hdev = hci_dev_hold(d);
773 break;
774 }
775 }
776 read_unlock(&hci_dev_list_lock);
777 return hdev;
778}
1da177e4
LT
779
780/* ---- Inquiry support ---- */
ff9ef578 781
30dc78e1
JH
782bool hci_discovery_active(struct hci_dev *hdev)
783{
784 struct discovery_state *discov = &hdev->discovery;
785
6fbe195d 786 switch (discov->state) {
343f935b 787 case DISCOVERY_FINDING:
6fbe195d 788 case DISCOVERY_RESOLVING:
30dc78e1
JH
789 return true;
790
6fbe195d
AG
791 default:
792 return false;
793 }
30dc78e1
JH
794}
795
ff9ef578
JH
796void hci_discovery_set_state(struct hci_dev *hdev, int state)
797{
798 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800 if (hdev->discovery.state == state)
801 return;
802
803 switch (state) {
804 case DISCOVERY_STOPPED:
7b99b659
AG
805 if (hdev->discovery.state != DISCOVERY_STARTING)
806 mgmt_discovering(hdev, 0);
ff9ef578
JH
807 break;
808 case DISCOVERY_STARTING:
809 break;
343f935b 810 case DISCOVERY_FINDING:
ff9ef578
JH
811 mgmt_discovering(hdev, 1);
812 break;
30dc78e1
JH
813 case DISCOVERY_RESOLVING:
814 break;
ff9ef578
JH
815 case DISCOVERY_STOPPING:
816 break;
817 }
818
819 hdev->discovery.state = state;
820}
821
1f9b9a5d 822void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 823{
30883512 824 struct discovery_state *cache = &hdev->discovery;
b57c1a56 825 struct inquiry_entry *p, *n;
1da177e4 826
561aafbc
JH
827 list_for_each_entry_safe(p, n, &cache->all, all) {
828 list_del(&p->all);
b57c1a56 829 kfree(p);
1da177e4 830 }
561aafbc
JH
831
832 INIT_LIST_HEAD(&cache->unknown);
833 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
834}
835
a8c5fb1a
GP
836struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837 bdaddr_t *bdaddr)
1da177e4 838{
30883512 839 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
840 struct inquiry_entry *e;
841
6ed93dc6 842 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 843
561aafbc
JH
844 list_for_each_entry(e, &cache->all, all) {
845 if (!bacmp(&e->data.bdaddr, bdaddr))
846 return e;
847 }
848
849 return NULL;
850}
851
852struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 853 bdaddr_t *bdaddr)
561aafbc 854{
30883512 855 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
856 struct inquiry_entry *e;
857
6ed93dc6 858 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
859
860 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 861 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
862 return e;
863 }
864
865 return NULL;
1da177e4
LT
866}
867
30dc78e1 868struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
869 bdaddr_t *bdaddr,
870 int state)
30dc78e1
JH
871{
872 struct discovery_state *cache = &hdev->discovery;
873 struct inquiry_entry *e;
874
6ed93dc6 875 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
876
877 list_for_each_entry(e, &cache->resolve, list) {
878 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879 return e;
880 if (!bacmp(&e->data.bdaddr, bdaddr))
881 return e;
882 }
883
884 return NULL;
885}
886
a3d4e20a 887void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 888 struct inquiry_entry *ie)
a3d4e20a
JH
889{
890 struct discovery_state *cache = &hdev->discovery;
891 struct list_head *pos = &cache->resolve;
892 struct inquiry_entry *p;
893
894 list_del(&ie->list);
895
896 list_for_each_entry(p, &cache->resolve, list) {
897 if (p->name_state != NAME_PENDING &&
a8c5fb1a 898 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
899 break;
900 pos = &p->list;
901 }
902
903 list_add(&ie->list, pos);
904}
905
3175405b 906bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 907 bool name_known, bool *ssp)
1da177e4 908{
30883512 909 struct discovery_state *cache = &hdev->discovery;
70f23020 910 struct inquiry_entry *ie;
1da177e4 911
6ed93dc6 912 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 913
2b2fec4d
SJ
914 hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
388fc8fa
JH
916 if (ssp)
917 *ssp = data->ssp_mode;
918
70f23020 919 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 920 if (ie) {
388fc8fa
JH
921 if (ie->data.ssp_mode && ssp)
922 *ssp = true;
923
a3d4e20a 924 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 925 data->rssi != ie->data.rssi) {
a3d4e20a
JH
926 ie->data.rssi = data->rssi;
927 hci_inquiry_cache_update_resolve(hdev, ie);
928 }
929
561aafbc 930 goto update;
a3d4e20a 931 }
561aafbc
JH
932
933 /* Entry not in the cache. Add new one. */
934 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935 if (!ie)
3175405b 936 return false;
561aafbc
JH
937
938 list_add(&ie->all, &cache->all);
939
940 if (name_known) {
941 ie->name_state = NAME_KNOWN;
942 } else {
943 ie->name_state = NAME_NOT_KNOWN;
944 list_add(&ie->list, &cache->unknown);
945 }
70f23020 946
561aafbc
JH
947update:
948 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 949 ie->name_state != NAME_PENDING) {
561aafbc
JH
950 ie->name_state = NAME_KNOWN;
951 list_del(&ie->list);
1da177e4
LT
952 }
953
70f23020
AE
954 memcpy(&ie->data, data, sizeof(*data));
955 ie->timestamp = jiffies;
1da177e4 956 cache->timestamp = jiffies;
3175405b
JH
957
958 if (ie->name_state == NAME_NOT_KNOWN)
959 return false;
960
961 return true;
1da177e4
LT
962}
963
964static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965{
30883512 966 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
967 struct inquiry_info *info = (struct inquiry_info *) buf;
968 struct inquiry_entry *e;
969 int copied = 0;
970
561aafbc 971 list_for_each_entry(e, &cache->all, all) {
1da177e4 972 struct inquiry_data *data = &e->data;
b57c1a56
JH
973
974 if (copied >= num)
975 break;
976
1da177e4
LT
977 bacpy(&info->bdaddr, &data->bdaddr);
978 info->pscan_rep_mode = data->pscan_rep_mode;
979 info->pscan_period_mode = data->pscan_period_mode;
980 info->pscan_mode = data->pscan_mode;
981 memcpy(info->dev_class, data->dev_class, 3);
982 info->clock_offset = data->clock_offset;
b57c1a56 983
1da177e4 984 info++;
b57c1a56 985 copied++;
1da177e4
LT
986 }
987
988 BT_DBG("cache %p, copied %d", cache, copied);
989 return copied;
990}
991
42c6b129 992static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
993{
994 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 995 struct hci_dev *hdev = req->hdev;
1da177e4
LT
996 struct hci_cp_inquiry cp;
997
998 BT_DBG("%s", hdev->name);
999
1000 if (test_bit(HCI_INQUIRY, &hdev->flags))
1001 return;
1002
1003 /* Start Inquiry */
1004 memcpy(&cp.lap, &ir->lap, 3);
1005 cp.length = ir->length;
1006 cp.num_rsp = ir->num_rsp;
42c6b129 1007 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1008}
1009
3e13fa1e
AG
1010static int wait_inquiry(void *word)
1011{
1012 schedule();
1013 return signal_pending(current);
1014}
1015
1da177e4
LT
1016int hci_inquiry(void __user *arg)
1017{
1018 __u8 __user *ptr = arg;
1019 struct hci_inquiry_req ir;
1020 struct hci_dev *hdev;
1021 int err = 0, do_inquiry = 0, max_rsp;
1022 long timeo;
1023 __u8 *buf;
1024
1025 if (copy_from_user(&ir, ptr, sizeof(ir)))
1026 return -EFAULT;
1027
5a08ecce
AE
1028 hdev = hci_dev_get(ir.dev_id);
1029 if (!hdev)
1da177e4
LT
1030 return -ENODEV;
1031
0736cfa8
MH
1032 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033 err = -EBUSY;
1034 goto done;
1035 }
1036
09fd0de5 1037 hci_dev_lock(hdev);
8e87d142 1038 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1039 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1040 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1041 do_inquiry = 1;
1042 }
09fd0de5 1043 hci_dev_unlock(hdev);
1da177e4 1044
04837f64 1045 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1046
1047 if (do_inquiry) {
01178cd4
JH
1048 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049 timeo);
70f23020
AE
1050 if (err < 0)
1051 goto done;
3e13fa1e
AG
1052
1053 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054 * cleared). If it is interrupted by a signal, return -EINTR.
1055 */
1056 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057 TASK_INTERRUPTIBLE))
1058 return -EINTR;
70f23020 1059 }
1da177e4 1060
8fc9ced3
GP
1061 /* for unlimited number of responses we will use buffer with
1062 * 255 entries
1063 */
1da177e4
LT
1064 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067 * copy it to the user space.
1068 */
01df8c31 1069 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1070 if (!buf) {
1da177e4
LT
1071 err = -ENOMEM;
1072 goto done;
1073 }
1074
09fd0de5 1075 hci_dev_lock(hdev);
1da177e4 1076 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1077 hci_dev_unlock(hdev);
1da177e4
LT
1078
1079 BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082 ptr += sizeof(ir);
1083 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1084 ir.num_rsp))
1da177e4 1085 err = -EFAULT;
8e87d142 1086 } else
1da177e4
LT
1087 err = -EFAULT;
1088
1089 kfree(buf);
1090
1091done:
1092 hci_dev_put(hdev);
1093 return err;
1094}
1095
3f0f524b
JH
1096static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097{
1098 u8 ad_len = 0, flags = 0;
1099 size_t name_len;
1100
1101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102 flags |= LE_AD_GENERAL;
1103
1104 if (!lmp_bredr_capable(hdev))
1105 flags |= LE_AD_NO_BREDR;
1106
1107 if (lmp_le_br_capable(hdev))
1108 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110 if (lmp_host_le_br_capable(hdev))
1111 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113 if (flags) {
1114 BT_DBG("adv flags 0x%02x", flags);
1115
1116 ptr[0] = 2;
1117 ptr[1] = EIR_FLAGS;
1118 ptr[2] = flags;
1119
1120 ad_len += 3;
1121 ptr += 3;
1122 }
1123
1124 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125 ptr[0] = 2;
1126 ptr[1] = EIR_TX_POWER;
1127 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129 ad_len += 3;
1130 ptr += 3;
1131 }
1132
1133 name_len = strlen(hdev->dev_name);
1134 if (name_len > 0) {
1135 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137 if (name_len > max_len) {
1138 name_len = max_len;
1139 ptr[1] = EIR_NAME_SHORT;
1140 } else
1141 ptr[1] = EIR_NAME_COMPLETE;
1142
1143 ptr[0] = name_len + 1;
1144
1145 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147 ad_len += (name_len + 2);
1148 ptr += (name_len + 2);
1149 }
1150
1151 return ad_len;
1152}
1153
04b4edcb 1154void hci_update_ad(struct hci_request *req)
3f0f524b 1155{
04b4edcb 1156 struct hci_dev *hdev = req->hdev;
3f0f524b
JH
1157 struct hci_cp_le_set_adv_data cp;
1158 u8 len;
3f0f524b 1159
04b4edcb
JH
1160 if (!lmp_le_capable(hdev))
1161 return;
3f0f524b
JH
1162
1163 memset(&cp, 0, sizeof(cp));
1164
1165 len = create_ad(hdev, cp.data);
1166
1167 if (hdev->adv_data_len == len &&
04b4edcb
JH
1168 memcmp(cp.data, hdev->adv_data, len) == 0)
1169 return;
3f0f524b
JH
1170
1171 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172 hdev->adv_data_len = len;
1173
1174 cp.length = len;
3f0f524b 1175
04b4edcb 1176 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
3f0f524b
JH
1177}
1178
cbed0ca1 1179static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1180{
1da177e4
LT
1181 int ret = 0;
1182
1da177e4
LT
1183 BT_DBG("%s %p", hdev->name, hdev);
1184
1185 hci_req_lock(hdev);
1186
94324962
JH
1187 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1188 ret = -ENODEV;
1189 goto done;
1190 }
1191
bf543036
JH
1192 /* Check for rfkill but allow the HCI setup stage to proceed
1193 * (which in itself doesn't cause any RF activity).
1194 */
1195 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1196 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
611b30f7
MH
1197 ret = -ERFKILL;
1198 goto done;
1199 }
1200
1da177e4
LT
1201 if (test_bit(HCI_UP, &hdev->flags)) {
1202 ret = -EALREADY;
1203 goto done;
1204 }
1205
1da177e4
LT
1206 if (hdev->open(hdev)) {
1207 ret = -EIO;
1208 goto done;
1209 }
1210
f41c70c4
MH
1211 atomic_set(&hdev->cmd_cnt, 1);
1212 set_bit(HCI_INIT, &hdev->flags);
1213
1214 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1215 ret = hdev->setup(hdev);
1216
1217 if (!ret) {
f41c70c4
MH
1218 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1219 set_bit(HCI_RAW, &hdev->flags);
1220
0736cfa8
MH
1221 if (!test_bit(HCI_RAW, &hdev->flags) &&
1222 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1223 ret = __hci_init(hdev);
1da177e4
LT
1224 }
1225
f41c70c4
MH
1226 clear_bit(HCI_INIT, &hdev->flags);
1227
1da177e4
LT
1228 if (!ret) {
1229 hci_dev_hold(hdev);
1230 set_bit(HCI_UP, &hdev->flags);
1231 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1232 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1233 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
bb4b2a9a 1234 mgmt_valid_hdev(hdev)) {
09fd0de5 1235 hci_dev_lock(hdev);
744cf19e 1236 mgmt_powered(hdev, 1);
09fd0de5 1237 hci_dev_unlock(hdev);
56e5cb86 1238 }
8e87d142 1239 } else {
1da177e4 1240 /* Init failed, cleanup */
3eff45ea 1241 flush_work(&hdev->tx_work);
c347b765 1242 flush_work(&hdev->cmd_work);
b78752cc 1243 flush_work(&hdev->rx_work);
1da177e4
LT
1244
1245 skb_queue_purge(&hdev->cmd_q);
1246 skb_queue_purge(&hdev->rx_q);
1247
1248 if (hdev->flush)
1249 hdev->flush(hdev);
1250
1251 if (hdev->sent_cmd) {
1252 kfree_skb(hdev->sent_cmd);
1253 hdev->sent_cmd = NULL;
1254 }
1255
1256 hdev->close(hdev);
1257 hdev->flags = 0;
1258 }
1259
1260done:
1261 hci_req_unlock(hdev);
1da177e4
LT
1262 return ret;
1263}
1264
cbed0ca1
JH
1265/* ---- HCI ioctl helpers ---- */
1266
1267int hci_dev_open(__u16 dev)
1268{
1269 struct hci_dev *hdev;
1270 int err;
1271
1272 hdev = hci_dev_get(dev);
1273 if (!hdev)
1274 return -ENODEV;
1275
1276 err = hci_dev_do_open(hdev);
1277
1278 hci_dev_put(hdev);
1279
1280 return err;
1281}
1282
1da177e4
LT
1283static int hci_dev_do_close(struct hci_dev *hdev)
1284{
1285 BT_DBG("%s %p", hdev->name, hdev);
1286
78c04c0b
VCG
1287 cancel_delayed_work(&hdev->power_off);
1288
1da177e4
LT
1289 hci_req_cancel(hdev, ENODEV);
1290 hci_req_lock(hdev);
1291
1292 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1293 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1294 hci_req_unlock(hdev);
1295 return 0;
1296 }
1297
3eff45ea
GP
1298 /* Flush RX and TX works */
1299 flush_work(&hdev->tx_work);
b78752cc 1300 flush_work(&hdev->rx_work);
1da177e4 1301
16ab91ab 1302 if (hdev->discov_timeout > 0) {
e0f9309f 1303 cancel_delayed_work(&hdev->discov_off);
16ab91ab 1304 hdev->discov_timeout = 0;
5e5282bb 1305 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
1306 }
1307
a8b2d5c2 1308 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
1309 cancel_delayed_work(&hdev->service_cache);
1310
7ba8b4be
AG
1311 cancel_delayed_work_sync(&hdev->le_scan_disable);
1312
09fd0de5 1313 hci_dev_lock(hdev);
1f9b9a5d 1314 hci_inquiry_cache_flush(hdev);
1da177e4 1315 hci_conn_hash_flush(hdev);
09fd0de5 1316 hci_dev_unlock(hdev);
1da177e4
LT
1317
1318 hci_notify(hdev, HCI_DEV_DOWN);
1319
1320 if (hdev->flush)
1321 hdev->flush(hdev);
1322
1323 /* Reset device */
1324 skb_queue_purge(&hdev->cmd_q);
1325 atomic_set(&hdev->cmd_cnt, 1);
8af59467 1326 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 1327 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 1328 set_bit(HCI_INIT, &hdev->flags);
01178cd4 1329 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
1330 clear_bit(HCI_INIT, &hdev->flags);
1331 }
1332
c347b765
GP
1333 /* flush cmd work */
1334 flush_work(&hdev->cmd_work);
1da177e4
LT
1335
1336 /* Drop queues */
1337 skb_queue_purge(&hdev->rx_q);
1338 skb_queue_purge(&hdev->cmd_q);
1339 skb_queue_purge(&hdev->raw_q);
1340
1341 /* Drop last sent command */
1342 if (hdev->sent_cmd) {
b79f44c1 1343 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1344 kfree_skb(hdev->sent_cmd);
1345 hdev->sent_cmd = NULL;
1346 }
1347
b6ddb638
JH
1348 kfree_skb(hdev->recv_evt);
1349 hdev->recv_evt = NULL;
1350
1da177e4
LT
1351 /* After this point our queues are empty
1352 * and no tasks are scheduled. */
1353 hdev->close(hdev);
1354
35b973c9
JH
1355 /* Clear flags */
1356 hdev->flags = 0;
1357 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1358
bb4b2a9a
AE
1359 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1360 mgmt_valid_hdev(hdev)) {
8ee56540
MH
1361 hci_dev_lock(hdev);
1362 mgmt_powered(hdev, 0);
1363 hci_dev_unlock(hdev);
1364 }
5add6af8 1365
ced5c338
AE
1366 /* Controller radio is available but is currently powered down */
1367 hdev->amp_status = 0;
1368
e59fda8d 1369 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 1370 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 1371
1da177e4
LT
1372 hci_req_unlock(hdev);
1373
1374 hci_dev_put(hdev);
1375 return 0;
1376}
1377
1378int hci_dev_close(__u16 dev)
1379{
1380 struct hci_dev *hdev;
1381 int err;
1382
70f23020
AE
1383 hdev = hci_dev_get(dev);
1384 if (!hdev)
1da177e4 1385 return -ENODEV;
8ee56540 1386
0736cfa8
MH
1387 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1388 err = -EBUSY;
1389 goto done;
1390 }
1391
8ee56540
MH
1392 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1393 cancel_delayed_work(&hdev->power_off);
1394
1da177e4 1395 err = hci_dev_do_close(hdev);
8ee56540 1396
0736cfa8 1397done:
1da177e4
LT
1398 hci_dev_put(hdev);
1399 return err;
1400}
1401
1402int hci_dev_reset(__u16 dev)
1403{
1404 struct hci_dev *hdev;
1405 int ret = 0;
1406
70f23020
AE
1407 hdev = hci_dev_get(dev);
1408 if (!hdev)
1da177e4
LT
1409 return -ENODEV;
1410
1411 hci_req_lock(hdev);
1da177e4 1412
808a049e
MH
1413 if (!test_bit(HCI_UP, &hdev->flags)) {
1414 ret = -ENETDOWN;
1da177e4 1415 goto done;
808a049e 1416 }
1da177e4 1417
0736cfa8
MH
1418 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1419 ret = -EBUSY;
1420 goto done;
1421 }
1422
1da177e4
LT
1423 /* Drop queues */
1424 skb_queue_purge(&hdev->rx_q);
1425 skb_queue_purge(&hdev->cmd_q);
1426
09fd0de5 1427 hci_dev_lock(hdev);
1f9b9a5d 1428 hci_inquiry_cache_flush(hdev);
1da177e4 1429 hci_conn_hash_flush(hdev);
09fd0de5 1430 hci_dev_unlock(hdev);
1da177e4
LT
1431
1432 if (hdev->flush)
1433 hdev->flush(hdev);
1434
8e87d142 1435 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 1436 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
1437
1438 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 1439 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
1440
1441done:
1da177e4
LT
1442 hci_req_unlock(hdev);
1443 hci_dev_put(hdev);
1444 return ret;
1445}
1446
1447int hci_dev_reset_stat(__u16 dev)
1448{
1449 struct hci_dev *hdev;
1450 int ret = 0;
1451
70f23020
AE
1452 hdev = hci_dev_get(dev);
1453 if (!hdev)
1da177e4
LT
1454 return -ENODEV;
1455
0736cfa8
MH
1456 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1457 ret = -EBUSY;
1458 goto done;
1459 }
1460
1da177e4
LT
1461 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1462
0736cfa8 1463done:
1da177e4 1464 hci_dev_put(hdev);
1da177e4
LT
1465 return ret;
1466}
1467
1468int hci_dev_cmd(unsigned int cmd, void __user *arg)
1469{
1470 struct hci_dev *hdev;
1471 struct hci_dev_req dr;
1472 int err = 0;
1473
1474 if (copy_from_user(&dr, arg, sizeof(dr)))
1475 return -EFAULT;
1476
70f23020
AE
1477 hdev = hci_dev_get(dr.dev_id);
1478 if (!hdev)
1da177e4
LT
1479 return -ENODEV;
1480
0736cfa8
MH
1481 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1482 err = -EBUSY;
1483 goto done;
1484 }
1485
1da177e4
LT
1486 switch (cmd) {
1487 case HCISETAUTH:
01178cd4
JH
1488 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1489 HCI_INIT_TIMEOUT);
1da177e4
LT
1490 break;
1491
1492 case HCISETENCRYPT:
1493 if (!lmp_encrypt_capable(hdev)) {
1494 err = -EOPNOTSUPP;
1495 break;
1496 }
1497
1498 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1499 /* Auth must be enabled first */
01178cd4
JH
1500 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1501 HCI_INIT_TIMEOUT);
1da177e4
LT
1502 if (err)
1503 break;
1504 }
1505
01178cd4
JH
1506 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1507 HCI_INIT_TIMEOUT);
1da177e4
LT
1508 break;
1509
1510 case HCISETSCAN:
01178cd4
JH
1511 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1512 HCI_INIT_TIMEOUT);
1da177e4
LT
1513 break;
1514
1da177e4 1515 case HCISETLINKPOL:
01178cd4
JH
1516 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1517 HCI_INIT_TIMEOUT);
1da177e4
LT
1518 break;
1519
1520 case HCISETLINKMODE:
e4e8e37c
MH
1521 hdev->link_mode = ((__u16) dr.dev_opt) &
1522 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1523 break;
1524
1525 case HCISETPTYPE:
1526 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
1527 break;
1528
1529 case HCISETACLMTU:
e4e8e37c
MH
1530 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1531 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1532 break;
1533
1534 case HCISETSCOMTU:
e4e8e37c
MH
1535 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1536 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1537 break;
1538
1539 default:
1540 err = -EINVAL;
1541 break;
1542 }
e4e8e37c 1543
0736cfa8 1544done:
1da177e4
LT
1545 hci_dev_put(hdev);
1546 return err;
1547}
1548
1549int hci_get_dev_list(void __user *arg)
1550{
8035ded4 1551 struct hci_dev *hdev;
1da177e4
LT
1552 struct hci_dev_list_req *dl;
1553 struct hci_dev_req *dr;
1da177e4
LT
1554 int n = 0, size, err;
1555 __u16 dev_num;
1556
1557 if (get_user(dev_num, (__u16 __user *) arg))
1558 return -EFAULT;
1559
1560 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1561 return -EINVAL;
1562
1563 size = sizeof(*dl) + dev_num * sizeof(*dr);
1564
70f23020
AE
1565 dl = kzalloc(size, GFP_KERNEL);
1566 if (!dl)
1da177e4
LT
1567 return -ENOMEM;
1568
1569 dr = dl->dev_req;
1570
f20d09d5 1571 read_lock(&hci_dev_list_lock);
8035ded4 1572 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1573 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1574 cancel_delayed_work(&hdev->power_off);
c542a06c 1575
a8b2d5c2
JH
1576 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1577 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1578
1da177e4
LT
1579 (dr + n)->dev_id = hdev->id;
1580 (dr + n)->dev_opt = hdev->flags;
c542a06c 1581
1da177e4
LT
1582 if (++n >= dev_num)
1583 break;
1584 }
f20d09d5 1585 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1586
1587 dl->dev_num = n;
1588 size = sizeof(*dl) + n * sizeof(*dr);
1589
1590 err = copy_to_user(arg, dl, size);
1591 kfree(dl);
1592
1593 return err ? -EFAULT : 0;
1594}
1595
1596int hci_get_dev_info(void __user *arg)
1597{
1598 struct hci_dev *hdev;
1599 struct hci_dev_info di;
1600 int err = 0;
1601
1602 if (copy_from_user(&di, arg, sizeof(di)))
1603 return -EFAULT;
1604
70f23020
AE
1605 hdev = hci_dev_get(di.dev_id);
1606 if (!hdev)
1da177e4
LT
1607 return -ENODEV;
1608
a8b2d5c2 1609 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1610 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1611
a8b2d5c2
JH
1612 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1613 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1614
1da177e4
LT
1615 strcpy(di.name, hdev->name);
1616 di.bdaddr = hdev->bdaddr;
60f2a3ed 1617 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
1618 di.flags = hdev->flags;
1619 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1620 if (lmp_bredr_capable(hdev)) {
1621 di.acl_mtu = hdev->acl_mtu;
1622 di.acl_pkts = hdev->acl_pkts;
1623 di.sco_mtu = hdev->sco_mtu;
1624 di.sco_pkts = hdev->sco_pkts;
1625 } else {
1626 di.acl_mtu = hdev->le_mtu;
1627 di.acl_pkts = hdev->le_pkts;
1628 di.sco_mtu = 0;
1629 di.sco_pkts = 0;
1630 }
1da177e4
LT
1631 di.link_policy = hdev->link_policy;
1632 di.link_mode = hdev->link_mode;
1633
1634 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1635 memcpy(&di.features, &hdev->features, sizeof(di.features));
1636
1637 if (copy_to_user(arg, &di, sizeof(di)))
1638 err = -EFAULT;
1639
1640 hci_dev_put(hdev);
1641
1642 return err;
1643}
1644
1645/* ---- Interface to HCI drivers ---- */
1646
611b30f7
MH
1647static int hci_rfkill_set_block(void *data, bool blocked)
1648{
1649 struct hci_dev *hdev = data;
1650
1651 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1652
0736cfa8
MH
1653 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1654 return -EBUSY;
1655
5e130367
JH
1656 if (blocked) {
1657 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
1658 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1659 hci_dev_do_close(hdev);
5e130367
JH
1660 } else {
1661 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 1662 }
611b30f7
MH
1663
1664 return 0;
1665}
1666
1667static const struct rfkill_ops hci_rfkill_ops = {
1668 .set_block = hci_rfkill_set_block,
1669};
1670
ab81cbf9
JH
1671static void hci_power_on(struct work_struct *work)
1672{
1673 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 1674 int err;
ab81cbf9
JH
1675
1676 BT_DBG("%s", hdev->name);
1677
cbed0ca1 1678 err = hci_dev_do_open(hdev);
96570ffc
JH
1679 if (err < 0) {
1680 mgmt_set_powered_failed(hdev, err);
ab81cbf9 1681 return;
96570ffc 1682 }
ab81cbf9 1683
bf543036
JH
1684 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1685 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1686 hci_dev_do_close(hdev);
1687 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
1688 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1689 HCI_AUTO_OFF_TIMEOUT);
bf543036 1690 }
ab81cbf9 1691
a8b2d5c2 1692 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1693 mgmt_index_added(hdev);
ab81cbf9
JH
1694}
1695
1696static void hci_power_off(struct work_struct *work)
1697{
3243553f 1698 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1699 power_off.work);
ab81cbf9
JH
1700
1701 BT_DBG("%s", hdev->name);
1702
8ee56540 1703 hci_dev_do_close(hdev);
ab81cbf9
JH
1704}
1705
16ab91ab
JH
1706static void hci_discov_off(struct work_struct *work)
1707{
1708 struct hci_dev *hdev;
1709 u8 scan = SCAN_PAGE;
1710
1711 hdev = container_of(work, struct hci_dev, discov_off.work);
1712
1713 BT_DBG("%s", hdev->name);
1714
09fd0de5 1715 hci_dev_lock(hdev);
16ab91ab
JH
1716
1717 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1718
1719 hdev->discov_timeout = 0;
1720
09fd0de5 1721 hci_dev_unlock(hdev);
16ab91ab
JH
1722}
1723
2aeb9a1a
JH
1724int hci_uuids_clear(struct hci_dev *hdev)
1725{
4821002c 1726 struct bt_uuid *uuid, *tmp;
2aeb9a1a 1727
4821002c
JH
1728 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1729 list_del(&uuid->list);
2aeb9a1a
JH
1730 kfree(uuid);
1731 }
1732
1733 return 0;
1734}
1735
55ed8ca1
JH
1736int hci_link_keys_clear(struct hci_dev *hdev)
1737{
1738 struct list_head *p, *n;
1739
1740 list_for_each_safe(p, n, &hdev->link_keys) {
1741 struct link_key *key;
1742
1743 key = list_entry(p, struct link_key, list);
1744
1745 list_del(p);
1746 kfree(key);
1747 }
1748
1749 return 0;
1750}
1751
b899efaf
VCG
1752int hci_smp_ltks_clear(struct hci_dev *hdev)
1753{
1754 struct smp_ltk *k, *tmp;
1755
1756 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1757 list_del(&k->list);
1758 kfree(k);
1759 }
1760
1761 return 0;
1762}
1763
55ed8ca1
JH
1764struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1765{
8035ded4 1766 struct link_key *k;
55ed8ca1 1767
8035ded4 1768 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1769 if (bacmp(bdaddr, &k->bdaddr) == 0)
1770 return k;
55ed8ca1
JH
1771
1772 return NULL;
1773}
1774
745c0ce3 1775static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1776 u8 key_type, u8 old_key_type)
d25e28ab
JH
1777{
1778 /* Legacy key */
1779 if (key_type < 0x03)
745c0ce3 1780 return true;
d25e28ab
JH
1781
1782 /* Debug keys are insecure so don't store them persistently */
1783 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1784 return false;
d25e28ab
JH
1785
1786 /* Changed combination key and there's no previous one */
1787 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1788 return false;
d25e28ab
JH
1789
1790 /* Security mode 3 case */
1791 if (!conn)
745c0ce3 1792 return true;
d25e28ab
JH
1793
1794 /* Neither local nor remote side had no-bonding as requirement */
1795 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1796 return true;
d25e28ab
JH
1797
1798 /* Local side had dedicated bonding as requirement */
1799 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1800 return true;
d25e28ab
JH
1801
1802 /* Remote side had dedicated bonding as requirement */
1803 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1804 return true;
d25e28ab
JH
1805
1806 /* If none of the above criteria match, then don't store the key
1807 * persistently */
745c0ce3 1808 return false;
d25e28ab
JH
1809}
1810
c9839a11 1811struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1812{
c9839a11 1813 struct smp_ltk *k;
75d262c2 1814
c9839a11
VCG
1815 list_for_each_entry(k, &hdev->long_term_keys, list) {
1816 if (k->ediv != ediv ||
a8c5fb1a 1817 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1818 continue;
1819
c9839a11 1820 return k;
75d262c2
VCG
1821 }
1822
1823 return NULL;
1824}
75d262c2 1825
c9839a11 1826struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1827 u8 addr_type)
75d262c2 1828{
c9839a11 1829 struct smp_ltk *k;
75d262c2 1830
c9839a11
VCG
1831 list_for_each_entry(k, &hdev->long_term_keys, list)
1832 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1833 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1834 return k;
1835
1836 return NULL;
1837}
75d262c2 1838
d25e28ab 1839int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1840 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1841{
1842 struct link_key *key, *old_key;
745c0ce3
VA
1843 u8 old_key_type;
1844 bool persistent;
55ed8ca1
JH
1845
1846 old_key = hci_find_link_key(hdev, bdaddr);
1847 if (old_key) {
1848 old_key_type = old_key->type;
1849 key = old_key;
1850 } else {
12adcf3a 1851 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1852 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1853 if (!key)
1854 return -ENOMEM;
1855 list_add(&key->list, &hdev->link_keys);
1856 }
1857
6ed93dc6 1858 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1859
d25e28ab
JH
1860 /* Some buggy controller combinations generate a changed
1861 * combination key for legacy pairing even when there's no
1862 * previous key */
1863 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1864 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1865 type = HCI_LK_COMBINATION;
655fe6ec
JH
1866 if (conn)
1867 conn->key_type = type;
1868 }
d25e28ab 1869
55ed8ca1 1870 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1871 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1872 key->pin_len = pin_len;
1873
b6020ba0 1874 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1875 key->type = old_key_type;
4748fed2
JH
1876 else
1877 key->type = type;
1878
4df378a1
JH
1879 if (!new_key)
1880 return 0;
1881
1882 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1883
744cf19e 1884 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1885
6ec5bcad
VA
1886 if (conn)
1887 conn->flush_key = !persistent;
55ed8ca1
JH
1888
1889 return 0;
1890}
1891
c9839a11 1892int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1893 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1894 ediv, u8 rand[8])
75d262c2 1895{
c9839a11 1896 struct smp_ltk *key, *old_key;
75d262c2 1897
c9839a11
VCG
1898 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1899 return 0;
75d262c2 1900
c9839a11
VCG
1901 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1902 if (old_key)
75d262c2 1903 key = old_key;
c9839a11
VCG
1904 else {
1905 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1906 if (!key)
1907 return -ENOMEM;
c9839a11 1908 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1909 }
1910
75d262c2 1911 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1912 key->bdaddr_type = addr_type;
1913 memcpy(key->val, tk, sizeof(key->val));
1914 key->authenticated = authenticated;
1915 key->ediv = ediv;
1916 key->enc_size = enc_size;
1917 key->type = type;
1918 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1919
c9839a11
VCG
1920 if (!new_key)
1921 return 0;
75d262c2 1922
261cc5aa
VCG
1923 if (type & HCI_SMP_LTK)
1924 mgmt_new_ltk(hdev, key, 1);
1925
75d262c2
VCG
1926 return 0;
1927}
1928
55ed8ca1
JH
1929int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1930{
1931 struct link_key *key;
1932
1933 key = hci_find_link_key(hdev, bdaddr);
1934 if (!key)
1935 return -ENOENT;
1936
6ed93dc6 1937 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1938
1939 list_del(&key->list);
1940 kfree(key);
1941
1942 return 0;
1943}
1944
b899efaf
VCG
1945int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1946{
1947 struct smp_ltk *k, *tmp;
1948
1949 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1950 if (bacmp(bdaddr, &k->bdaddr))
1951 continue;
1952
6ed93dc6 1953 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1954
1955 list_del(&k->list);
1956 kfree(k);
1957 }
1958
1959 return 0;
1960}
1961
6bd32326 1962/* HCI command timer function */
bda4f23a 1963static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1964{
1965 struct hci_dev *hdev = (void *) arg;
1966
bda4f23a
AE
1967 if (hdev->sent_cmd) {
1968 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1969 u16 opcode = __le16_to_cpu(sent->opcode);
1970
1971 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1972 } else {
1973 BT_ERR("%s command tx timeout", hdev->name);
1974 }
1975
6bd32326 1976 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1977 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1978}
1979
2763eda6 1980struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1981 bdaddr_t *bdaddr)
2763eda6
SJ
1982{
1983 struct oob_data *data;
1984
1985 list_for_each_entry(data, &hdev->remote_oob_data, list)
1986 if (bacmp(bdaddr, &data->bdaddr) == 0)
1987 return data;
1988
1989 return NULL;
1990}
1991
1992int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1993{
1994 struct oob_data *data;
1995
1996 data = hci_find_remote_oob_data(hdev, bdaddr);
1997 if (!data)
1998 return -ENOENT;
1999
6ed93dc6 2000 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2001
2002 list_del(&data->list);
2003 kfree(data);
2004
2005 return 0;
2006}
2007
2008int hci_remote_oob_data_clear(struct hci_dev *hdev)
2009{
2010 struct oob_data *data, *n;
2011
2012 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2013 list_del(&data->list);
2014 kfree(data);
2015 }
2016
2017 return 0;
2018}
2019
2020int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2021 u8 *randomizer)
2763eda6
SJ
2022{
2023 struct oob_data *data;
2024
2025 data = hci_find_remote_oob_data(hdev, bdaddr);
2026
2027 if (!data) {
2028 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2029 if (!data)
2030 return -ENOMEM;
2031
2032 bacpy(&data->bdaddr, bdaddr);
2033 list_add(&data->list, &hdev->remote_oob_data);
2034 }
2035
2036 memcpy(data->hash, hash, sizeof(data->hash));
2037 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2038
6ed93dc6 2039 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2040
2041 return 0;
2042}
2043
04124681 2044struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 2045{
8035ded4 2046 struct bdaddr_list *b;
b2a66aad 2047
8035ded4 2048 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
2049 if (bacmp(bdaddr, &b->bdaddr) == 0)
2050 return b;
b2a66aad
AJ
2051
2052 return NULL;
2053}
2054
2055int hci_blacklist_clear(struct hci_dev *hdev)
2056{
2057 struct list_head *p, *n;
2058
2059 list_for_each_safe(p, n, &hdev->blacklist) {
2060 struct bdaddr_list *b;
2061
2062 b = list_entry(p, struct bdaddr_list, list);
2063
2064 list_del(p);
2065 kfree(b);
2066 }
2067
2068 return 0;
2069}
2070
88c1fe4b 2071int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2072{
2073 struct bdaddr_list *entry;
b2a66aad
AJ
2074
2075 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2076 return -EBADF;
2077
5e762444
AJ
2078 if (hci_blacklist_lookup(hdev, bdaddr))
2079 return -EEXIST;
b2a66aad
AJ
2080
2081 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2082 if (!entry)
2083 return -ENOMEM;
b2a66aad
AJ
2084
2085 bacpy(&entry->bdaddr, bdaddr);
2086
2087 list_add(&entry->list, &hdev->blacklist);
2088
88c1fe4b 2089 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2090}
2091
88c1fe4b 2092int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2093{
2094 struct bdaddr_list *entry;
b2a66aad 2095
1ec918ce 2096 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 2097 return hci_blacklist_clear(hdev);
b2a66aad
AJ
2098
2099 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 2100 if (!entry)
5e762444 2101 return -ENOENT;
b2a66aad
AJ
2102
2103 list_del(&entry->list);
2104 kfree(entry);
2105
88c1fe4b 2106 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2107}
2108
4c87eaab 2109static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2110{
4c87eaab
AG
2111 if (status) {
2112 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2113
4c87eaab
AG
2114 hci_dev_lock(hdev);
2115 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2116 hci_dev_unlock(hdev);
2117 return;
2118 }
7ba8b4be
AG
2119}
2120
4c87eaab 2121static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2122{
4c87eaab
AG
2123 /* General inquiry access code (GIAC) */
2124 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2125 struct hci_request req;
2126 struct hci_cp_inquiry cp;
7ba8b4be
AG
2127 int err;
2128
4c87eaab
AG
2129 if (status) {
2130 BT_ERR("Failed to disable LE scanning: status %d", status);
2131 return;
2132 }
7ba8b4be 2133
4c87eaab
AG
2134 switch (hdev->discovery.type) {
2135 case DISCOV_TYPE_LE:
2136 hci_dev_lock(hdev);
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138 hci_dev_unlock(hdev);
2139 break;
7ba8b4be 2140
4c87eaab
AG
2141 case DISCOV_TYPE_INTERLEAVED:
2142 hci_req_init(&req, hdev);
7ba8b4be 2143
4c87eaab
AG
2144 memset(&cp, 0, sizeof(cp));
2145 memcpy(&cp.lap, lap, sizeof(cp.lap));
2146 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2147 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2148
4c87eaab 2149 hci_dev_lock(hdev);
7dbfac1d 2150
4c87eaab 2151 hci_inquiry_cache_flush(hdev);
7dbfac1d 2152
4c87eaab
AG
2153 err = hci_req_run(&req, inquiry_complete);
2154 if (err) {
2155 BT_ERR("Inquiry request failed: err %d", err);
2156 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2157 }
7dbfac1d 2158
4c87eaab
AG
2159 hci_dev_unlock(hdev);
2160 break;
7dbfac1d 2161 }
7dbfac1d
AG
2162}
2163
7ba8b4be
AG
2164static void le_scan_disable_work(struct work_struct *work)
2165{
2166 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2167 le_scan_disable.work);
7ba8b4be 2168 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2169 struct hci_request req;
2170 int err;
7ba8b4be
AG
2171
2172 BT_DBG("%s", hdev->name);
2173
4c87eaab 2174 hci_req_init(&req, hdev);
28b75a89 2175
7ba8b4be 2176 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2177 cp.enable = LE_SCAN_DISABLE;
2178 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2179
4c87eaab
AG
2180 err = hci_req_run(&req, le_scan_disable_work_complete);
2181 if (err)
2182 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2183}
2184
9be0dab7
DH
2185/* Alloc HCI device */
2186struct hci_dev *hci_alloc_dev(void)
2187{
2188 struct hci_dev *hdev;
2189
2190 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2191 if (!hdev)
2192 return NULL;
2193
b1b813d4
DH
2194 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2195 hdev->esco_type = (ESCO_HV1);
2196 hdev->link_mode = (HCI_LM_ACCEPT);
2197 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2198 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2199 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2200
b1b813d4
DH
2201 hdev->sniff_max_interval = 800;
2202 hdev->sniff_min_interval = 80;
2203
2204 mutex_init(&hdev->lock);
2205 mutex_init(&hdev->req_lock);
2206
2207 INIT_LIST_HEAD(&hdev->mgmt_pending);
2208 INIT_LIST_HEAD(&hdev->blacklist);
2209 INIT_LIST_HEAD(&hdev->uuids);
2210 INIT_LIST_HEAD(&hdev->link_keys);
2211 INIT_LIST_HEAD(&hdev->long_term_keys);
2212 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2213 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2214
2215 INIT_WORK(&hdev->rx_work, hci_rx_work);
2216 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2217 INIT_WORK(&hdev->tx_work, hci_tx_work);
2218 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2219
b1b813d4
DH
2220 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2221 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2222 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2223
b1b813d4
DH
2224 skb_queue_head_init(&hdev->rx_q);
2225 skb_queue_head_init(&hdev->cmd_q);
2226 skb_queue_head_init(&hdev->raw_q);
2227
2228 init_waitqueue_head(&hdev->req_wait_q);
2229
bda4f23a 2230 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2231
b1b813d4
DH
2232 hci_init_sysfs(hdev);
2233 discovery_init(hdev);
9be0dab7
DH
2234
2235 return hdev;
2236}
2237EXPORT_SYMBOL(hci_alloc_dev);
2238
2239/* Free HCI device */
2240void hci_free_dev(struct hci_dev *hdev)
2241{
9be0dab7
DH
2242 /* will free via device release */
2243 put_device(&hdev->dev);
2244}
2245EXPORT_SYMBOL(hci_free_dev);
2246
1da177e4
LT
2247/* Register HCI device */
2248int hci_register_dev(struct hci_dev *hdev)
2249{
b1b813d4 2250 int id, error;
1da177e4 2251
010666a1 2252 if (!hdev->open || !hdev->close)
1da177e4
LT
2253 return -EINVAL;
2254
08add513
MM
2255 /* Do not allow HCI_AMP devices to register at index 0,
2256 * so the index can be used as the AMP controller ID.
2257 */
3df92b31
SL
2258 switch (hdev->dev_type) {
2259 case HCI_BREDR:
2260 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2261 break;
2262 case HCI_AMP:
2263 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2264 break;
2265 default:
2266 return -EINVAL;
1da177e4 2267 }
8e87d142 2268
3df92b31
SL
2269 if (id < 0)
2270 return id;
2271
1da177e4
LT
2272 sprintf(hdev->name, "hci%d", id);
2273 hdev->id = id;
2d8b3a11
AE
2274
2275 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2276
d8537548
KC
2277 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2278 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2279 if (!hdev->workqueue) {
2280 error = -ENOMEM;
2281 goto err;
2282 }
f48fd9c8 2283
d8537548
KC
2284 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2285 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
2286 if (!hdev->req_workqueue) {
2287 destroy_workqueue(hdev->workqueue);
2288 error = -ENOMEM;
2289 goto err;
2290 }
2291
33ca954d
DH
2292 error = hci_add_sysfs(hdev);
2293 if (error < 0)
2294 goto err_wqueue;
1da177e4 2295
611b30f7 2296 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
2297 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2298 hdev);
611b30f7
MH
2299 if (hdev->rfkill) {
2300 if (rfkill_register(hdev->rfkill) < 0) {
2301 rfkill_destroy(hdev->rfkill);
2302 hdev->rfkill = NULL;
2303 }
2304 }
2305
5e130367
JH
2306 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2307 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2308
a8b2d5c2 2309 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
2310
2311 if (hdev->dev_type != HCI_AMP)
2312 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2313
fcee3377
GP
2314 write_lock(&hci_dev_list_lock);
2315 list_add(&hdev->list, &hci_dev_list);
2316 write_unlock(&hci_dev_list_lock);
2317
1da177e4 2318 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 2319 hci_dev_hold(hdev);
1da177e4 2320
19202573 2321 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 2322
1da177e4 2323 return id;
f48fd9c8 2324
33ca954d
DH
2325err_wqueue:
2326 destroy_workqueue(hdev->workqueue);
6ead1bbc 2327 destroy_workqueue(hdev->req_workqueue);
33ca954d 2328err:
3df92b31 2329 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 2330
33ca954d 2331 return error;
1da177e4
LT
2332}
2333EXPORT_SYMBOL(hci_register_dev);
2334
2335/* Unregister HCI device */
59735631 2336void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 2337{
3df92b31 2338 int i, id;
ef222013 2339
c13854ce 2340 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 2341
94324962
JH
2342 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2343
3df92b31
SL
2344 id = hdev->id;
2345
f20d09d5 2346 write_lock(&hci_dev_list_lock);
1da177e4 2347 list_del(&hdev->list);
f20d09d5 2348 write_unlock(&hci_dev_list_lock);
1da177e4
LT
2349
2350 hci_dev_do_close(hdev);
2351
cd4c5391 2352 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
2353 kfree_skb(hdev->reassembly[i]);
2354
b9b5ef18
GP
2355 cancel_work_sync(&hdev->power_on);
2356
ab81cbf9 2357 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 2358 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 2359 hci_dev_lock(hdev);
744cf19e 2360 mgmt_index_removed(hdev);
09fd0de5 2361 hci_dev_unlock(hdev);
56e5cb86 2362 }
ab81cbf9 2363
2e58ef3e
JH
2364 /* mgmt_index_removed should take care of emptying the
2365 * pending list */
2366 BUG_ON(!list_empty(&hdev->mgmt_pending));
2367
1da177e4
LT
2368 hci_notify(hdev, HCI_DEV_UNREG);
2369
611b30f7
MH
2370 if (hdev->rfkill) {
2371 rfkill_unregister(hdev->rfkill);
2372 rfkill_destroy(hdev->rfkill);
2373 }
2374
ce242970 2375 hci_del_sysfs(hdev);
147e2d59 2376
f48fd9c8 2377 destroy_workqueue(hdev->workqueue);
6ead1bbc 2378 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 2379
09fd0de5 2380 hci_dev_lock(hdev);
e2e0cacb 2381 hci_blacklist_clear(hdev);
2aeb9a1a 2382 hci_uuids_clear(hdev);
55ed8ca1 2383 hci_link_keys_clear(hdev);
b899efaf 2384 hci_smp_ltks_clear(hdev);
2763eda6 2385 hci_remote_oob_data_clear(hdev);
09fd0de5 2386 hci_dev_unlock(hdev);
e2e0cacb 2387
dc946bd8 2388 hci_dev_put(hdev);
3df92b31
SL
2389
2390 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
2391}
2392EXPORT_SYMBOL(hci_unregister_dev);
2393
2394/* Suspend HCI device */
2395int hci_suspend_dev(struct hci_dev *hdev)
2396{
2397 hci_notify(hdev, HCI_DEV_SUSPEND);
2398 return 0;
2399}
2400EXPORT_SYMBOL(hci_suspend_dev);
2401
2402/* Resume HCI device */
2403int hci_resume_dev(struct hci_dev *hdev)
2404{
2405 hci_notify(hdev, HCI_DEV_RESUME);
2406 return 0;
2407}
2408EXPORT_SYMBOL(hci_resume_dev);
2409
76bca880
MH
2410/* Receive frame from HCI drivers */
2411int hci_recv_frame(struct sk_buff *skb)
2412{
2413 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2414 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 2415 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
2416 kfree_skb(skb);
2417 return -ENXIO;
2418 }
2419
d82603c6 2420 /* Incoming skb */
76bca880
MH
2421 bt_cb(skb)->incoming = 1;
2422
2423 /* Time stamp */
2424 __net_timestamp(skb);
2425
76bca880 2426 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 2427 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 2428
76bca880
MH
2429 return 0;
2430}
2431EXPORT_SYMBOL(hci_recv_frame);
2432
33e882a5 2433static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 2434 int count, __u8 index)
33e882a5
SS
2435{
2436 int len = 0;
2437 int hlen = 0;
2438 int remain = count;
2439 struct sk_buff *skb;
2440 struct bt_skb_cb *scb;
2441
2442 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 2443 index >= NUM_REASSEMBLY)
33e882a5
SS
2444 return -EILSEQ;
2445
2446 skb = hdev->reassembly[index];
2447
2448 if (!skb) {
2449 switch (type) {
2450 case HCI_ACLDATA_PKT:
2451 len = HCI_MAX_FRAME_SIZE;
2452 hlen = HCI_ACL_HDR_SIZE;
2453 break;
2454 case HCI_EVENT_PKT:
2455 len = HCI_MAX_EVENT_SIZE;
2456 hlen = HCI_EVENT_HDR_SIZE;
2457 break;
2458 case HCI_SCODATA_PKT:
2459 len = HCI_MAX_SCO_SIZE;
2460 hlen = HCI_SCO_HDR_SIZE;
2461 break;
2462 }
2463
1e429f38 2464 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
2465 if (!skb)
2466 return -ENOMEM;
2467
2468 scb = (void *) skb->cb;
2469 scb->expect = hlen;
2470 scb->pkt_type = type;
2471
2472 skb->dev = (void *) hdev;
2473 hdev->reassembly[index] = skb;
2474 }
2475
2476 while (count) {
2477 scb = (void *) skb->cb;
89bb46d0 2478 len = min_t(uint, scb->expect, count);
33e882a5
SS
2479
2480 memcpy(skb_put(skb, len), data, len);
2481
2482 count -= len;
2483 data += len;
2484 scb->expect -= len;
2485 remain = count;
2486
2487 switch (type) {
2488 case HCI_EVENT_PKT:
2489 if (skb->len == HCI_EVENT_HDR_SIZE) {
2490 struct hci_event_hdr *h = hci_event_hdr(skb);
2491 scb->expect = h->plen;
2492
2493 if (skb_tailroom(skb) < scb->expect) {
2494 kfree_skb(skb);
2495 hdev->reassembly[index] = NULL;
2496 return -ENOMEM;
2497 }
2498 }
2499 break;
2500
2501 case HCI_ACLDATA_PKT:
2502 if (skb->len == HCI_ACL_HDR_SIZE) {
2503 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2504 scb->expect = __le16_to_cpu(h->dlen);
2505
2506 if (skb_tailroom(skb) < scb->expect) {
2507 kfree_skb(skb);
2508 hdev->reassembly[index] = NULL;
2509 return -ENOMEM;
2510 }
2511 }
2512 break;
2513
2514 case HCI_SCODATA_PKT:
2515 if (skb->len == HCI_SCO_HDR_SIZE) {
2516 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2517 scb->expect = h->dlen;
2518
2519 if (skb_tailroom(skb) < scb->expect) {
2520 kfree_skb(skb);
2521 hdev->reassembly[index] = NULL;
2522 return -ENOMEM;
2523 }
2524 }
2525 break;
2526 }
2527
2528 if (scb->expect == 0) {
2529 /* Complete frame */
2530
2531 bt_cb(skb)->pkt_type = type;
2532 hci_recv_frame(skb);
2533
2534 hdev->reassembly[index] = NULL;
2535 return remain;
2536 }
2537 }
2538
2539 return remain;
2540}
2541
ef222013
MH
2542int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2543{
f39a3c06
SS
2544 int rem = 0;
2545
ef222013
MH
2546 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2547 return -EILSEQ;
2548
da5f6c37 2549 while (count) {
1e429f38 2550 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2551 if (rem < 0)
2552 return rem;
ef222013 2553
f39a3c06
SS
2554 data += (count - rem);
2555 count = rem;
f81c6224 2556 }
ef222013 2557
f39a3c06 2558 return rem;
ef222013
MH
2559}
2560EXPORT_SYMBOL(hci_recv_fragment);
2561
99811510
SS
2562#define STREAM_REASSEMBLY 0
2563
2564int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2565{
2566 int type;
2567 int rem = 0;
2568
da5f6c37 2569 while (count) {
99811510
SS
2570 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2571
2572 if (!skb) {
2573 struct { char type; } *pkt;
2574
2575 /* Start of the frame */
2576 pkt = data;
2577 type = pkt->type;
2578
2579 data++;
2580 count--;
2581 } else
2582 type = bt_cb(skb)->pkt_type;
2583
1e429f38 2584 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2585 STREAM_REASSEMBLY);
99811510
SS
2586 if (rem < 0)
2587 return rem;
2588
2589 data += (count - rem);
2590 count = rem;
f81c6224 2591 }
99811510
SS
2592
2593 return rem;
2594}
2595EXPORT_SYMBOL(hci_recv_stream_fragment);
2596
1da177e4
LT
2597/* ---- Interface to upper protocols ---- */
2598
1da177e4
LT
2599int hci_register_cb(struct hci_cb *cb)
2600{
2601 BT_DBG("%p name %s", cb, cb->name);
2602
f20d09d5 2603 write_lock(&hci_cb_list_lock);
1da177e4 2604 list_add(&cb->list, &hci_cb_list);
f20d09d5 2605 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2606
2607 return 0;
2608}
2609EXPORT_SYMBOL(hci_register_cb);
2610
2611int hci_unregister_cb(struct hci_cb *cb)
2612{
2613 BT_DBG("%p name %s", cb, cb->name);
2614
f20d09d5 2615 write_lock(&hci_cb_list_lock);
1da177e4 2616 list_del(&cb->list);
f20d09d5 2617 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2618
2619 return 0;
2620}
2621EXPORT_SYMBOL(hci_unregister_cb);
2622
2623static int hci_send_frame(struct sk_buff *skb)
2624{
2625 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2626
2627 if (!hdev) {
2628 kfree_skb(skb);
2629 return -ENODEV;
2630 }
2631
0d48d939 2632 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2633
cd82e61c
MH
2634 /* Time stamp */
2635 __net_timestamp(skb);
1da177e4 2636
cd82e61c
MH
2637 /* Send copy to monitor */
2638 hci_send_to_monitor(hdev, skb);
2639
2640 if (atomic_read(&hdev->promisc)) {
2641 /* Send copy to the sockets */
470fe1b5 2642 hci_send_to_sock(hdev, skb);
1da177e4
LT
2643 }
2644
2645 /* Get rid of skb owner, prior to sending to the driver. */
2646 skb_orphan(skb);
2647
2648 return hdev->send(skb);
2649}
2650
3119ae95
JH
2651void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2652{
2653 skb_queue_head_init(&req->cmd_q);
2654 req->hdev = hdev;
5d73e034 2655 req->err = 0;
3119ae95
JH
2656}
2657
2658int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2659{
2660 struct hci_dev *hdev = req->hdev;
2661 struct sk_buff *skb;
2662 unsigned long flags;
2663
2664 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2665
5d73e034
AG
2666 /* If an error occured during request building, remove all HCI
2667 * commands queued on the HCI request queue.
2668 */
2669 if (req->err) {
2670 skb_queue_purge(&req->cmd_q);
2671 return req->err;
2672 }
2673
3119ae95
JH
2674 /* Do not allow empty requests */
2675 if (skb_queue_empty(&req->cmd_q))
382b0c39 2676 return -ENODATA;
3119ae95
JH
2677
2678 skb = skb_peek_tail(&req->cmd_q);
2679 bt_cb(skb)->req.complete = complete;
2680
2681 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2682 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2683 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2684
2685 queue_work(hdev->workqueue, &hdev->cmd_work);
2686
2687 return 0;
2688}
2689
1ca3a9d0 2690static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 2691 u32 plen, const void *param)
1da177e4
LT
2692{
2693 int len = HCI_COMMAND_HDR_SIZE + plen;
2694 struct hci_command_hdr *hdr;
2695 struct sk_buff *skb;
2696
1da177e4 2697 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
2698 if (!skb)
2699 return NULL;
1da177e4
LT
2700
2701 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2702 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2703 hdr->plen = plen;
2704
2705 if (plen)
2706 memcpy(skb_put(skb, plen), param, plen);
2707
2708 BT_DBG("skb len %d", skb->len);
2709
0d48d939 2710 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2711 skb->dev = (void *) hdev;
c78ae283 2712
1ca3a9d0
JH
2713 return skb;
2714}
2715
2716/* Send HCI command */
07dc93dd
JH
2717int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2718 const void *param)
1ca3a9d0
JH
2719{
2720 struct sk_buff *skb;
2721
2722 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2723
2724 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2725 if (!skb) {
2726 BT_ERR("%s no memory for command", hdev->name);
2727 return -ENOMEM;
2728 }
2729
11714b3d
JH
2730 /* Stand-alone HCI commands must be flaged as
2731 * single-command requests.
2732 */
2733 bt_cb(skb)->req.start = true;
2734
1da177e4 2735 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2736 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2737
2738 return 0;
2739}
1da177e4 2740
71c76a17 2741/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
2742void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2743 const void *param, u8 event)
71c76a17
JH
2744{
2745 struct hci_dev *hdev = req->hdev;
2746 struct sk_buff *skb;
2747
2748 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2749
34739c1e
AG
2750 /* If an error occured during request building, there is no point in
2751 * queueing the HCI command. We can simply return.
2752 */
2753 if (req->err)
2754 return;
2755
71c76a17
JH
2756 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2757 if (!skb) {
5d73e034
AG
2758 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2759 hdev->name, opcode);
2760 req->err = -ENOMEM;
e348fe6b 2761 return;
71c76a17
JH
2762 }
2763
2764 if (skb_queue_empty(&req->cmd_q))
2765 bt_cb(skb)->req.start = true;
2766
02350a72
JH
2767 bt_cb(skb)->req.event = event;
2768
71c76a17 2769 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
2770}
2771
07dc93dd
JH
2772void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2773 const void *param)
02350a72
JH
2774{
2775 hci_req_add_ev(req, opcode, plen, param, 0);
2776}
2777
1da177e4 2778/* Get data from the previously sent command */
a9de9248 2779void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2780{
2781 struct hci_command_hdr *hdr;
2782
2783 if (!hdev->sent_cmd)
2784 return NULL;
2785
2786 hdr = (void *) hdev->sent_cmd->data;
2787
a9de9248 2788 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2789 return NULL;
2790
f0e09510 2791 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2792
2793 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2794}
2795
2796/* Send ACL data */
2797static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2798{
2799 struct hci_acl_hdr *hdr;
2800 int len = skb->len;
2801
badff6d0
ACM
2802 skb_push(skb, HCI_ACL_HDR_SIZE);
2803 skb_reset_transport_header(skb);
9c70220b 2804 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2805 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2806 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2807}
2808
ee22be7e 2809static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2810 struct sk_buff *skb, __u16 flags)
1da177e4 2811{
ee22be7e 2812 struct hci_conn *conn = chan->conn;
1da177e4
LT
2813 struct hci_dev *hdev = conn->hdev;
2814 struct sk_buff *list;
2815
087bfd99
GP
2816 skb->len = skb_headlen(skb);
2817 skb->data_len = 0;
2818
2819 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2820
2821 switch (hdev->dev_type) {
2822 case HCI_BREDR:
2823 hci_add_acl_hdr(skb, conn->handle, flags);
2824 break;
2825 case HCI_AMP:
2826 hci_add_acl_hdr(skb, chan->handle, flags);
2827 break;
2828 default:
2829 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2830 return;
2831 }
087bfd99 2832
70f23020
AE
2833 list = skb_shinfo(skb)->frag_list;
2834 if (!list) {
1da177e4
LT
2835 /* Non fragmented */
2836 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2837
73d80deb 2838 skb_queue_tail(queue, skb);
1da177e4
LT
2839 } else {
2840 /* Fragmented */
2841 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2842
2843 skb_shinfo(skb)->frag_list = NULL;
2844
2845 /* Queue all fragments atomically */
af3e6359 2846 spin_lock(&queue->lock);
1da177e4 2847
73d80deb 2848 __skb_queue_tail(queue, skb);
e702112f
AE
2849
2850 flags &= ~ACL_START;
2851 flags |= ACL_CONT;
1da177e4
LT
2852 do {
2853 skb = list; list = list->next;
8e87d142 2854
1da177e4 2855 skb->dev = (void *) hdev;
0d48d939 2856 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2857 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2858
2859 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2860
73d80deb 2861 __skb_queue_tail(queue, skb);
1da177e4
LT
2862 } while (list);
2863
af3e6359 2864 spin_unlock(&queue->lock);
1da177e4 2865 }
73d80deb
LAD
2866}
2867
2868void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2869{
ee22be7e 2870 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2871
f0e09510 2872 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2873
2874 skb->dev = (void *) hdev;
73d80deb 2875
ee22be7e 2876 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2877
3eff45ea 2878 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2879}
1da177e4
LT
2880
2881/* Send SCO data */
0d861d8b 2882void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2883{
2884 struct hci_dev *hdev = conn->hdev;
2885 struct hci_sco_hdr hdr;
2886
2887 BT_DBG("%s len %d", hdev->name, skb->len);
2888
aca3192c 2889 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2890 hdr.dlen = skb->len;
2891
badff6d0
ACM
2892 skb_push(skb, HCI_SCO_HDR_SIZE);
2893 skb_reset_transport_header(skb);
9c70220b 2894 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2895
2896 skb->dev = (void *) hdev;
0d48d939 2897 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2898
1da177e4 2899 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2900 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2901}
1da177e4
LT
2902
2903/* ---- HCI TX task (outgoing data) ---- */
2904
2905/* HCI Connection scheduler */
6039aa73
GP
2906static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2907 int *quote)
1da177e4
LT
2908{
2909 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2910 struct hci_conn *conn = NULL, *c;
abc5de8f 2911 unsigned int num = 0, min = ~0;
1da177e4 2912
8e87d142 2913 /* We don't have to lock device here. Connections are always
1da177e4 2914 * added and removed with TX task disabled. */
bf4c6325
GP
2915
2916 rcu_read_lock();
2917
2918 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2919 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2920 continue;
769be974
MH
2921
2922 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2923 continue;
2924
1da177e4
LT
2925 num++;
2926
2927 if (c->sent < min) {
2928 min = c->sent;
2929 conn = c;
2930 }
52087a79
LAD
2931
2932 if (hci_conn_num(hdev, type) == num)
2933 break;
1da177e4
LT
2934 }
2935
bf4c6325
GP
2936 rcu_read_unlock();
2937
1da177e4 2938 if (conn) {
6ed58ec5
VT
2939 int cnt, q;
2940
2941 switch (conn->type) {
2942 case ACL_LINK:
2943 cnt = hdev->acl_cnt;
2944 break;
2945 case SCO_LINK:
2946 case ESCO_LINK:
2947 cnt = hdev->sco_cnt;
2948 break;
2949 case LE_LINK:
2950 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2951 break;
2952 default:
2953 cnt = 0;
2954 BT_ERR("Unknown link type");
2955 }
2956
2957 q = cnt / num;
1da177e4
LT
2958 *quote = q ? q : 1;
2959 } else
2960 *quote = 0;
2961
2962 BT_DBG("conn %p quote %d", conn, *quote);
2963 return conn;
2964}
2965
6039aa73 2966static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2967{
2968 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2969 struct hci_conn *c;
1da177e4 2970
bae1f5d9 2971 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2972
bf4c6325
GP
2973 rcu_read_lock();
2974
1da177e4 2975 /* Kill stalled connections */
bf4c6325 2976 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2977 if (c->type == type && c->sent) {
6ed93dc6
AE
2978 BT_ERR("%s killing stalled connection %pMR",
2979 hdev->name, &c->dst);
bed71748 2980 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2981 }
2982 }
bf4c6325
GP
2983
2984 rcu_read_unlock();
1da177e4
LT
2985}
2986
6039aa73
GP
2987static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2988 int *quote)
1da177e4 2989{
73d80deb
LAD
2990 struct hci_conn_hash *h = &hdev->conn_hash;
2991 struct hci_chan *chan = NULL;
abc5de8f 2992 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2993 struct hci_conn *conn;
73d80deb
LAD
2994 int cnt, q, conn_num = 0;
2995
2996 BT_DBG("%s", hdev->name);
2997
bf4c6325
GP
2998 rcu_read_lock();
2999
3000 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3001 struct hci_chan *tmp;
3002
3003 if (conn->type != type)
3004 continue;
3005
3006 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3007 continue;
3008
3009 conn_num++;
3010
8192edef 3011 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3012 struct sk_buff *skb;
3013
3014 if (skb_queue_empty(&tmp->data_q))
3015 continue;
3016
3017 skb = skb_peek(&tmp->data_q);
3018 if (skb->priority < cur_prio)
3019 continue;
3020
3021 if (skb->priority > cur_prio) {
3022 num = 0;
3023 min = ~0;
3024 cur_prio = skb->priority;
3025 }
3026
3027 num++;
3028
3029 if (conn->sent < min) {
3030 min = conn->sent;
3031 chan = tmp;
3032 }
3033 }
3034
3035 if (hci_conn_num(hdev, type) == conn_num)
3036 break;
3037 }
3038
bf4c6325
GP
3039 rcu_read_unlock();
3040
73d80deb
LAD
3041 if (!chan)
3042 return NULL;
3043
3044 switch (chan->conn->type) {
3045 case ACL_LINK:
3046 cnt = hdev->acl_cnt;
3047 break;
bd1eb66b
AE
3048 case AMP_LINK:
3049 cnt = hdev->block_cnt;
3050 break;
73d80deb
LAD
3051 case SCO_LINK:
3052 case ESCO_LINK:
3053 cnt = hdev->sco_cnt;
3054 break;
3055 case LE_LINK:
3056 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3057 break;
3058 default:
3059 cnt = 0;
3060 BT_ERR("Unknown link type");
3061 }
3062
3063 q = cnt / num;
3064 *quote = q ? q : 1;
3065 BT_DBG("chan %p quote %d", chan, *quote);
3066 return chan;
3067}
3068
02b20f0b
LAD
3069static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3070{
3071 struct hci_conn_hash *h = &hdev->conn_hash;
3072 struct hci_conn *conn;
3073 int num = 0;
3074
3075 BT_DBG("%s", hdev->name);
3076
bf4c6325
GP
3077 rcu_read_lock();
3078
3079 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3080 struct hci_chan *chan;
3081
3082 if (conn->type != type)
3083 continue;
3084
3085 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3086 continue;
3087
3088 num++;
3089
8192edef 3090 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3091 struct sk_buff *skb;
3092
3093 if (chan->sent) {
3094 chan->sent = 0;
3095 continue;
3096 }
3097
3098 if (skb_queue_empty(&chan->data_q))
3099 continue;
3100
3101 skb = skb_peek(&chan->data_q);
3102 if (skb->priority >= HCI_PRIO_MAX - 1)
3103 continue;
3104
3105 skb->priority = HCI_PRIO_MAX - 1;
3106
3107 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3108 skb->priority);
02b20f0b
LAD
3109 }
3110
3111 if (hci_conn_num(hdev, type) == num)
3112 break;
3113 }
bf4c6325
GP
3114
3115 rcu_read_unlock();
3116
02b20f0b
LAD
3117}
3118
b71d385a
AE
3119static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3120{
3121 /* Calculate count of blocks used by this packet */
3122 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3123}
3124
6039aa73 3125static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3126{
1da177e4
LT
3127 if (!test_bit(HCI_RAW, &hdev->flags)) {
3128 /* ACL tx timeout must be longer than maximum
3129 * link supervision timeout (40.9 seconds) */
63d2bc1b 3130 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3131 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3132 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3133 }
63d2bc1b 3134}
1da177e4 3135
6039aa73 3136static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3137{
3138 unsigned int cnt = hdev->acl_cnt;
3139 struct hci_chan *chan;
3140 struct sk_buff *skb;
3141 int quote;
3142
3143 __check_timeout(hdev, cnt);
04837f64 3144
73d80deb 3145 while (hdev->acl_cnt &&
a8c5fb1a 3146 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3147 u32 priority = (skb_peek(&chan->data_q))->priority;
3148 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3149 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3150 skb->len, skb->priority);
73d80deb 3151
ec1cce24
LAD
3152 /* Stop if priority has changed */
3153 if (skb->priority < priority)
3154 break;
3155
3156 skb = skb_dequeue(&chan->data_q);
3157
73d80deb 3158 hci_conn_enter_active_mode(chan->conn,
04124681 3159 bt_cb(skb)->force_active);
04837f64 3160
1da177e4
LT
3161 hci_send_frame(skb);
3162 hdev->acl_last_tx = jiffies;
3163
3164 hdev->acl_cnt--;
73d80deb
LAD
3165 chan->sent++;
3166 chan->conn->sent++;
1da177e4
LT
3167 }
3168 }
02b20f0b
LAD
3169
3170 if (cnt != hdev->acl_cnt)
3171 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3172}
3173
6039aa73 3174static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3175{
63d2bc1b 3176 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3177 struct hci_chan *chan;
3178 struct sk_buff *skb;
3179 int quote;
bd1eb66b 3180 u8 type;
b71d385a 3181
63d2bc1b 3182 __check_timeout(hdev, cnt);
b71d385a 3183
bd1eb66b
AE
3184 BT_DBG("%s", hdev->name);
3185
3186 if (hdev->dev_type == HCI_AMP)
3187 type = AMP_LINK;
3188 else
3189 type = ACL_LINK;
3190
b71d385a 3191 while (hdev->block_cnt > 0 &&
bd1eb66b 3192 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3193 u32 priority = (skb_peek(&chan->data_q))->priority;
3194 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3195 int blocks;
3196
3197 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3198 skb->len, skb->priority);
b71d385a
AE
3199
3200 /* Stop if priority has changed */
3201 if (skb->priority < priority)
3202 break;
3203
3204 skb = skb_dequeue(&chan->data_q);
3205
3206 blocks = __get_blocks(hdev, skb);
3207 if (blocks > hdev->block_cnt)
3208 return;
3209
3210 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3211 bt_cb(skb)->force_active);
b71d385a
AE
3212
3213 hci_send_frame(skb);
3214 hdev->acl_last_tx = jiffies;
3215
3216 hdev->block_cnt -= blocks;
3217 quote -= blocks;
3218
3219 chan->sent += blocks;
3220 chan->conn->sent += blocks;
3221 }
3222 }
3223
3224 if (cnt != hdev->block_cnt)
bd1eb66b 3225 hci_prio_recalculate(hdev, type);
b71d385a
AE
3226}
3227
6039aa73 3228static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3229{
3230 BT_DBG("%s", hdev->name);
3231
bd1eb66b
AE
3232 /* No ACL link over BR/EDR controller */
3233 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3234 return;
3235
3236 /* No AMP link over AMP controller */
3237 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3238 return;
3239
3240 switch (hdev->flow_ctl_mode) {
3241 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3242 hci_sched_acl_pkt(hdev);
3243 break;
3244
3245 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3246 hci_sched_acl_blk(hdev);
3247 break;
3248 }
3249}
3250
1da177e4 3251/* Schedule SCO */
6039aa73 3252static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3253{
3254 struct hci_conn *conn;
3255 struct sk_buff *skb;
3256 int quote;
3257
3258 BT_DBG("%s", hdev->name);
3259
52087a79
LAD
3260 if (!hci_conn_num(hdev, SCO_LINK))
3261 return;
3262
1da177e4
LT
3263 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3264 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3265 BT_DBG("skb %p len %d", skb, skb->len);
3266 hci_send_frame(skb);
3267
3268 conn->sent++;
3269 if (conn->sent == ~0)
3270 conn->sent = 0;
3271 }
3272 }
3273}
3274
6039aa73 3275static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3276{
3277 struct hci_conn *conn;
3278 struct sk_buff *skb;
3279 int quote;
3280
3281 BT_DBG("%s", hdev->name);
3282
52087a79
LAD
3283 if (!hci_conn_num(hdev, ESCO_LINK))
3284 return;
3285
8fc9ced3
GP
3286 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3287 &quote))) {
b6a0dc82
MH
3288 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3289 BT_DBG("skb %p len %d", skb, skb->len);
3290 hci_send_frame(skb);
3291
3292 conn->sent++;
3293 if (conn->sent == ~0)
3294 conn->sent = 0;
3295 }
3296 }
3297}
3298
6039aa73 3299static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 3300{
73d80deb 3301 struct hci_chan *chan;
6ed58ec5 3302 struct sk_buff *skb;
02b20f0b 3303 int quote, cnt, tmp;
6ed58ec5
VT
3304
3305 BT_DBG("%s", hdev->name);
3306
52087a79
LAD
3307 if (!hci_conn_num(hdev, LE_LINK))
3308 return;
3309
6ed58ec5
VT
3310 if (!test_bit(HCI_RAW, &hdev->flags)) {
3311 /* LE tx timeout must be longer than maximum
3312 * link supervision timeout (40.9 seconds) */
bae1f5d9 3313 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 3314 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 3315 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
3316 }
3317
3318 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 3319 tmp = cnt;
73d80deb 3320 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
3321 u32 priority = (skb_peek(&chan->data_q))->priority;
3322 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3323 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3324 skb->len, skb->priority);
6ed58ec5 3325
ec1cce24
LAD
3326 /* Stop if priority has changed */
3327 if (skb->priority < priority)
3328 break;
3329
3330 skb = skb_dequeue(&chan->data_q);
3331
6ed58ec5
VT
3332 hci_send_frame(skb);
3333 hdev->le_last_tx = jiffies;
3334
3335 cnt--;
73d80deb
LAD
3336 chan->sent++;
3337 chan->conn->sent++;
6ed58ec5
VT
3338 }
3339 }
73d80deb 3340
6ed58ec5
VT
3341 if (hdev->le_pkts)
3342 hdev->le_cnt = cnt;
3343 else
3344 hdev->acl_cnt = cnt;
02b20f0b
LAD
3345
3346 if (cnt != tmp)
3347 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
3348}
3349
3eff45ea 3350static void hci_tx_work(struct work_struct *work)
1da177e4 3351{
3eff45ea 3352 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
3353 struct sk_buff *skb;
3354
6ed58ec5 3355 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 3356 hdev->sco_cnt, hdev->le_cnt);
1da177e4 3357
52de599e
MH
3358 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3359 /* Schedule queues and send stuff to HCI driver */
3360 hci_sched_acl(hdev);
3361 hci_sched_sco(hdev);
3362 hci_sched_esco(hdev);
3363 hci_sched_le(hdev);
3364 }
6ed58ec5 3365
1da177e4
LT
3366 /* Send next queued raw (unknown type) packet */
3367 while ((skb = skb_dequeue(&hdev->raw_q)))
3368 hci_send_frame(skb);
1da177e4
LT
3369}
3370
25985edc 3371/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
3372
3373/* ACL data packet */
6039aa73 3374static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3375{
3376 struct hci_acl_hdr *hdr = (void *) skb->data;
3377 struct hci_conn *conn;
3378 __u16 handle, flags;
3379
3380 skb_pull(skb, HCI_ACL_HDR_SIZE);
3381
3382 handle = __le16_to_cpu(hdr->handle);
3383 flags = hci_flags(handle);
3384 handle = hci_handle(handle);
3385
f0e09510 3386 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 3387 handle, flags);
1da177e4
LT
3388
3389 hdev->stat.acl_rx++;
3390
3391 hci_dev_lock(hdev);
3392 conn = hci_conn_hash_lookup_handle(hdev, handle);
3393 hci_dev_unlock(hdev);
8e87d142 3394
1da177e4 3395 if (conn) {
65983fc7 3396 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 3397
1da177e4 3398 /* Send to upper protocol */
686ebf28
UF
3399 l2cap_recv_acldata(conn, skb, flags);
3400 return;
1da177e4 3401 } else {
8e87d142 3402 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 3403 hdev->name, handle);
1da177e4
LT
3404 }
3405
3406 kfree_skb(skb);
3407}
3408
3409/* SCO data packet */
6039aa73 3410static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
3411{
3412 struct hci_sco_hdr *hdr = (void *) skb->data;
3413 struct hci_conn *conn;
3414 __u16 handle;
3415
3416 skb_pull(skb, HCI_SCO_HDR_SIZE);
3417
3418 handle = __le16_to_cpu(hdr->handle);
3419
f0e09510 3420 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
3421
3422 hdev->stat.sco_rx++;
3423
3424 hci_dev_lock(hdev);
3425 conn = hci_conn_hash_lookup_handle(hdev, handle);
3426 hci_dev_unlock(hdev);
3427
3428 if (conn) {
1da177e4 3429 /* Send to upper protocol */
686ebf28
UF
3430 sco_recv_scodata(conn, skb);
3431 return;
1da177e4 3432 } else {
8e87d142 3433 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 3434 hdev->name, handle);
1da177e4
LT
3435 }
3436
3437 kfree_skb(skb);
3438}
3439
9238f36a
JH
3440static bool hci_req_is_complete(struct hci_dev *hdev)
3441{
3442 struct sk_buff *skb;
3443
3444 skb = skb_peek(&hdev->cmd_q);
3445 if (!skb)
3446 return true;
3447
3448 return bt_cb(skb)->req.start;
3449}
3450
42c6b129
JH
3451static void hci_resend_last(struct hci_dev *hdev)
3452{
3453 struct hci_command_hdr *sent;
3454 struct sk_buff *skb;
3455 u16 opcode;
3456
3457 if (!hdev->sent_cmd)
3458 return;
3459
3460 sent = (void *) hdev->sent_cmd->data;
3461 opcode = __le16_to_cpu(sent->opcode);
3462 if (opcode == HCI_OP_RESET)
3463 return;
3464
3465 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3466 if (!skb)
3467 return;
3468
3469 skb_queue_head(&hdev->cmd_q, skb);
3470 queue_work(hdev->workqueue, &hdev->cmd_work);
3471}
3472
9238f36a
JH
3473void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3474{
3475 hci_req_complete_t req_complete = NULL;
3476 struct sk_buff *skb;
3477 unsigned long flags;
3478
3479 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3480
42c6b129
JH
3481 /* If the completed command doesn't match the last one that was
3482 * sent we need to do special handling of it.
9238f36a 3483 */
42c6b129
JH
3484 if (!hci_sent_cmd_data(hdev, opcode)) {
3485 /* Some CSR based controllers generate a spontaneous
3486 * reset complete event during init and any pending
3487 * command will never be completed. In such a case we
3488 * need to resend whatever was the last sent
3489 * command.
3490 */
3491 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3492 hci_resend_last(hdev);
3493
9238f36a 3494 return;
42c6b129 3495 }
9238f36a
JH
3496
3497 /* If the command succeeded and there's still more commands in
3498 * this request the request is not yet complete.
3499 */
3500 if (!status && !hci_req_is_complete(hdev))
3501 return;
3502
3503 /* If this was the last command in a request the complete
3504 * callback would be found in hdev->sent_cmd instead of the
3505 * command queue (hdev->cmd_q).
3506 */
3507 if (hdev->sent_cmd) {
3508 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
3509
3510 if (req_complete) {
3511 /* We must set the complete callback to NULL to
3512 * avoid calling the callback more than once if
3513 * this function gets called again.
3514 */
3515 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3516
9238f36a 3517 goto call_complete;
53e21fbc 3518 }
9238f36a
JH
3519 }
3520
3521 /* Remove all pending commands belonging to this request */
3522 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3523 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3524 if (bt_cb(skb)->req.start) {
3525 __skb_queue_head(&hdev->cmd_q, skb);
3526 break;
3527 }
3528
3529 req_complete = bt_cb(skb)->req.complete;
3530 kfree_skb(skb);
3531 }
3532 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3533
3534call_complete:
3535 if (req_complete)
3536 req_complete(hdev, status);
3537}
3538
b78752cc 3539static void hci_rx_work(struct work_struct *work)
1da177e4 3540{
b78752cc 3541 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
3542 struct sk_buff *skb;
3543
3544 BT_DBG("%s", hdev->name);
3545
1da177e4 3546 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
3547 /* Send copy to monitor */
3548 hci_send_to_monitor(hdev, skb);
3549
1da177e4
LT
3550 if (atomic_read(&hdev->promisc)) {
3551 /* Send copy to the sockets */
470fe1b5 3552 hci_send_to_sock(hdev, skb);
1da177e4
LT
3553 }
3554
0736cfa8
MH
3555 if (test_bit(HCI_RAW, &hdev->flags) ||
3556 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
3557 kfree_skb(skb);
3558 continue;
3559 }
3560
3561 if (test_bit(HCI_INIT, &hdev->flags)) {
3562 /* Don't process data packets in this states. */
0d48d939 3563 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
3564 case HCI_ACLDATA_PKT:
3565 case HCI_SCODATA_PKT:
3566 kfree_skb(skb);
3567 continue;
3ff50b79 3568 }
1da177e4
LT
3569 }
3570
3571 /* Process frame */
0d48d939 3572 switch (bt_cb(skb)->pkt_type) {
1da177e4 3573 case HCI_EVENT_PKT:
b78752cc 3574 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
3575 hci_event_packet(hdev, skb);
3576 break;
3577
3578 case HCI_ACLDATA_PKT:
3579 BT_DBG("%s ACL data packet", hdev->name);
3580 hci_acldata_packet(hdev, skb);
3581 break;
3582
3583 case HCI_SCODATA_PKT:
3584 BT_DBG("%s SCO data packet", hdev->name);
3585 hci_scodata_packet(hdev, skb);
3586 break;
3587
3588 default:
3589 kfree_skb(skb);
3590 break;
3591 }
3592 }
1da177e4
LT
3593}
3594
c347b765 3595static void hci_cmd_work(struct work_struct *work)
1da177e4 3596{
c347b765 3597 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
3598 struct sk_buff *skb;
3599
2104786b
AE
3600 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3601 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 3602
1da177e4 3603 /* Send queued commands */
5a08ecce
AE
3604 if (atomic_read(&hdev->cmd_cnt)) {
3605 skb = skb_dequeue(&hdev->cmd_q);
3606 if (!skb)
3607 return;
3608
7585b97a 3609 kfree_skb(hdev->sent_cmd);
1da177e4 3610
a675d7f1 3611 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 3612 if (hdev->sent_cmd) {
1da177e4
LT
3613 atomic_dec(&hdev->cmd_cnt);
3614 hci_send_frame(skb);
7bdb8a5c
SJ
3615 if (test_bit(HCI_RESET, &hdev->flags))
3616 del_timer(&hdev->cmd_timer);
3617 else
3618 mod_timer(&hdev->cmd_timer,
5f246e89 3619 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
3620 } else {
3621 skb_queue_head(&hdev->cmd_q, skb);
c347b765 3622 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3623 }
3624 }
3625}
2519a1fc 3626
31f7956c
AG
3627u8 bdaddr_to_le(u8 bdaddr_type)
3628{
3629 switch (bdaddr_type) {
3630 case BDADDR_LE_PUBLIC:
3631 return ADDR_LE_DEV_PUBLIC;
3632
3633 default:
3634 /* Fallback to LE Random address type */
3635 return ADDR_LE_DEV_RANDOM;
3636 }
3637}