]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | BlueZ - Bluetooth protocol stack for Linux | |
3 | Copyright (C) 2000-2001 Qualcomm Incorporated | |
4 | Copyright (C) 2011 ProFUSION Embedded Systems | |
5 | ||
6 | Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License version 2 as | |
10 | published by the Free Software Foundation; | |
11 | ||
12 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
13 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
14 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. | |
15 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY | |
16 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES | |
17 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | |
18 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | |
19 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
20 | ||
21 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, | |
22 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS | |
23 | SOFTWARE IS DISCLAIMED. | |
24 | */ | |
25 | ||
26 | /* Bluetooth HCI core. */ | |
27 | ||
28 | #include <linux/export.h> | |
29 | #include <linux/idr.h> | |
30 | #include <linux/rfkill.h> | |
31 | #include <linux/debugfs.h> | |
32 | #include <linux/crypto.h> | |
33 | #include <asm/unaligned.h> | |
34 | ||
35 | #include <net/bluetooth/bluetooth.h> | |
36 | #include <net/bluetooth/hci_core.h> | |
37 | #include <net/bluetooth/l2cap.h> | |
38 | #include <net/bluetooth/mgmt.h> | |
39 | ||
40 | #include "hci_request.h" | |
41 | #include "hci_debugfs.h" | |
42 | #include "smp.h" | |
43 | #include "leds.h" | |
44 | ||
45 | static void hci_rx_work(struct work_struct *work); | |
46 | static void hci_cmd_work(struct work_struct *work); | |
47 | static void hci_tx_work(struct work_struct *work); | |
48 | ||
49 | /* HCI device list */ | |
50 | LIST_HEAD(hci_dev_list); | |
51 | DEFINE_RWLOCK(hci_dev_list_lock); | |
52 | ||
53 | /* HCI callback list */ | |
54 | LIST_HEAD(hci_cb_list); | |
55 | DEFINE_MUTEX(hci_cb_list_lock); | |
56 | ||
57 | /* HCI ID Numbering */ | |
58 | static DEFINE_IDA(hci_index_ida); | |
59 | ||
60 | /* ---- HCI debugfs entries ---- */ | |
61 | ||
62 | static ssize_t dut_mode_read(struct file *file, char __user *user_buf, | |
63 | size_t count, loff_t *ppos) | |
64 | { | |
65 | struct hci_dev *hdev = file->private_data; | |
66 | char buf[3]; | |
67 | ||
68 | buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; | |
69 | buf[1] = '\n'; | |
70 | buf[2] = '\0'; | |
71 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | |
72 | } | |
73 | ||
74 | static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, | |
75 | size_t count, loff_t *ppos) | |
76 | { | |
77 | struct hci_dev *hdev = file->private_data; | |
78 | struct sk_buff *skb; | |
79 | char buf[32]; | |
80 | size_t buf_size = min(count, (sizeof(buf)-1)); | |
81 | bool enable; | |
82 | ||
83 | if (!test_bit(HCI_UP, &hdev->flags)) | |
84 | return -ENETDOWN; | |
85 | ||
86 | if (copy_from_user(buf, user_buf, buf_size)) | |
87 | return -EFAULT; | |
88 | ||
89 | buf[buf_size] = '\0'; | |
90 | if (strtobool(buf, &enable)) | |
91 | return -EINVAL; | |
92 | ||
93 | if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) | |
94 | return -EALREADY; | |
95 | ||
96 | hci_req_sync_lock(hdev); | |
97 | if (enable) | |
98 | skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, | |
99 | HCI_CMD_TIMEOUT); | |
100 | else | |
101 | skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, | |
102 | HCI_CMD_TIMEOUT); | |
103 | hci_req_sync_unlock(hdev); | |
104 | ||
105 | if (IS_ERR(skb)) | |
106 | return PTR_ERR(skb); | |
107 | ||
108 | kfree_skb(skb); | |
109 | ||
110 | hci_dev_change_flag(hdev, HCI_DUT_MODE); | |
111 | ||
112 | return count; | |
113 | } | |
114 | ||
115 | static const struct file_operations dut_mode_fops = { | |
116 | .open = simple_open, | |
117 | .read = dut_mode_read, | |
118 | .write = dut_mode_write, | |
119 | .llseek = default_llseek, | |
120 | }; | |
121 | ||
122 | static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, | |
123 | size_t count, loff_t *ppos) | |
124 | { | |
125 | struct hci_dev *hdev = file->private_data; | |
126 | char buf[3]; | |
127 | ||
128 | buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; | |
129 | buf[1] = '\n'; | |
130 | buf[2] = '\0'; | |
131 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | |
132 | } | |
133 | ||
134 | static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, | |
135 | size_t count, loff_t *ppos) | |
136 | { | |
137 | struct hci_dev *hdev = file->private_data; | |
138 | char buf[32]; | |
139 | size_t buf_size = min(count, (sizeof(buf)-1)); | |
140 | bool enable; | |
141 | int err; | |
142 | ||
143 | if (copy_from_user(buf, user_buf, buf_size)) | |
144 | return -EFAULT; | |
145 | ||
146 | buf[buf_size] = '\0'; | |
147 | if (strtobool(buf, &enable)) | |
148 | return -EINVAL; | |
149 | ||
150 | /* When the diagnostic flags are not persistent and the transport | |
151 | * is not active or in user channel operation, then there is no need | |
152 | * for the vendor callback. Instead just store the desired value and | |
153 | * the setting will be programmed when the controller gets powered on. | |
154 | */ | |
155 | if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && | |
156 | (!test_bit(HCI_RUNNING, &hdev->flags) || | |
157 | hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) | |
158 | goto done; | |
159 | ||
160 | hci_req_sync_lock(hdev); | |
161 | err = hdev->set_diag(hdev, enable); | |
162 | hci_req_sync_unlock(hdev); | |
163 | ||
164 | if (err < 0) | |
165 | return err; | |
166 | ||
167 | done: | |
168 | if (enable) | |
169 | hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); | |
170 | else | |
171 | hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); | |
172 | ||
173 | return count; | |
174 | } | |
175 | ||
176 | static const struct file_operations vendor_diag_fops = { | |
177 | .open = simple_open, | |
178 | .read = vendor_diag_read, | |
179 | .write = vendor_diag_write, | |
180 | .llseek = default_llseek, | |
181 | }; | |
182 | ||
183 | static void hci_debugfs_create_basic(struct hci_dev *hdev) | |
184 | { | |
185 | debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, | |
186 | &dut_mode_fops); | |
187 | ||
188 | if (hdev->set_diag) | |
189 | debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, | |
190 | &vendor_diag_fops); | |
191 | } | |
192 | ||
193 | static int hci_reset_req(struct hci_request *req, unsigned long opt) | |
194 | { | |
195 | BT_DBG("%s %ld", req->hdev->name, opt); | |
196 | ||
197 | /* Reset device */ | |
198 | set_bit(HCI_RESET, &req->hdev->flags); | |
199 | hci_req_add(req, HCI_OP_RESET, 0, NULL); | |
200 | return 0; | |
201 | } | |
202 | ||
203 | static void bredr_init(struct hci_request *req) | |
204 | { | |
205 | req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; | |
206 | ||
207 | /* Read Local Supported Features */ | |
208 | hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); | |
209 | ||
210 | /* Read Local Version */ | |
211 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | |
212 | ||
213 | /* Read BD Address */ | |
214 | hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); | |
215 | } | |
216 | ||
217 | static void amp_init1(struct hci_request *req) | |
218 | { | |
219 | req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; | |
220 | ||
221 | /* Read Local Version */ | |
222 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | |
223 | ||
224 | /* Read Local Supported Commands */ | |
225 | hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | |
226 | ||
227 | /* Read Local AMP Info */ | |
228 | hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); | |
229 | ||
230 | /* Read Data Blk size */ | |
231 | hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL); | |
232 | ||
233 | /* Read Flow Control Mode */ | |
234 | hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL); | |
235 | ||
236 | /* Read Location Data */ | |
237 | hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL); | |
238 | } | |
239 | ||
240 | static int amp_init2(struct hci_request *req) | |
241 | { | |
242 | /* Read Local Supported Features. Not all AMP controllers | |
243 | * support this so it's placed conditionally in the second | |
244 | * stage init. | |
245 | */ | |
246 | if (req->hdev->commands[14] & 0x20) | |
247 | hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL); | |
248 | ||
249 | return 0; | |
250 | } | |
251 | ||
252 | static int hci_init1_req(struct hci_request *req, unsigned long opt) | |
253 | { | |
254 | struct hci_dev *hdev = req->hdev; | |
255 | ||
256 | BT_DBG("%s %ld", hdev->name, opt); | |
257 | ||
258 | /* Reset */ | |
259 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) | |
260 | hci_reset_req(req, 0); | |
261 | ||
262 | switch (hdev->dev_type) { | |
263 | case HCI_PRIMARY: | |
264 | bredr_init(req); | |
265 | break; | |
266 | case HCI_AMP: | |
267 | amp_init1(req); | |
268 | break; | |
269 | default: | |
270 | BT_ERR("Unknown device type %d", hdev->dev_type); | |
271 | break; | |
272 | } | |
273 | ||
274 | return 0; | |
275 | } | |
276 | ||
277 | static void bredr_setup(struct hci_request *req) | |
278 | { | |
279 | __le16 param; | |
280 | __u8 flt_type; | |
281 | ||
282 | /* Read Buffer Size (ACL mtu, max pkt, etc.) */ | |
283 | hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL); | |
284 | ||
285 | /* Read Class of Device */ | |
286 | hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL); | |
287 | ||
288 | /* Read Local Name */ | |
289 | hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL); | |
290 | ||
291 | /* Read Voice Setting */ | |
292 | hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL); | |
293 | ||
294 | /* Read Number of Supported IAC */ | |
295 | hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL); | |
296 | ||
297 | /* Read Current IAC LAP */ | |
298 | hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL); | |
299 | ||
300 | /* Clear Event Filters */ | |
301 | flt_type = HCI_FLT_CLEAR_ALL; | |
302 | hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type); | |
303 | ||
304 | /* Connection accept timeout ~20 secs */ | |
305 | param = cpu_to_le16(0x7d00); | |
306 | hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); | |
307 | } | |
308 | ||
309 | static void le_setup(struct hci_request *req) | |
310 | { | |
311 | struct hci_dev *hdev = req->hdev; | |
312 | ||
313 | /* Read LE Buffer Size */ | |
314 | hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL); | |
315 | ||
316 | /* Read LE Local Supported Features */ | |
317 | hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL); | |
318 | ||
319 | /* Read LE Supported States */ | |
320 | hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL); | |
321 | ||
322 | /* LE-only controllers have LE implicitly enabled */ | |
323 | if (!lmp_bredr_capable(hdev)) | |
324 | hci_dev_set_flag(hdev, HCI_LE_ENABLED); | |
325 | } | |
326 | ||
327 | static void hci_setup_event_mask(struct hci_request *req) | |
328 | { | |
329 | struct hci_dev *hdev = req->hdev; | |
330 | ||
331 | /* The second byte is 0xff instead of 0x9f (two reserved bits | |
332 | * disabled) since a Broadcom 1.2 dongle doesn't respond to the | |
333 | * command otherwise. | |
334 | */ | |
335 | u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; | |
336 | ||
337 | /* CSR 1.1 dongles does not accept any bitfield so don't try to set | |
338 | * any event mask for pre 1.2 devices. | |
339 | */ | |
340 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | |
341 | return; | |
342 | ||
343 | if (lmp_bredr_capable(hdev)) { | |
344 | events[4] |= 0x01; /* Flow Specification Complete */ | |
345 | } else { | |
346 | /* Use a different default for LE-only devices */ | |
347 | memset(events, 0, sizeof(events)); | |
348 | events[1] |= 0x20; /* Command Complete */ | |
349 | events[1] |= 0x40; /* Command Status */ | |
350 | events[1] |= 0x80; /* Hardware Error */ | |
351 | ||
352 | /* If the controller supports the Disconnect command, enable | |
353 | * the corresponding event. In addition enable packet flow | |
354 | * control related events. | |
355 | */ | |
356 | if (hdev->commands[0] & 0x20) { | |
357 | events[0] |= 0x10; /* Disconnection Complete */ | |
358 | events[2] |= 0x04; /* Number of Completed Packets */ | |
359 | events[3] |= 0x02; /* Data Buffer Overflow */ | |
360 | } | |
361 | ||
362 | /* If the controller supports the Read Remote Version | |
363 | * Information command, enable the corresponding event. | |
364 | */ | |
365 | if (hdev->commands[2] & 0x80) | |
366 | events[1] |= 0x08; /* Read Remote Version Information | |
367 | * Complete | |
368 | */ | |
369 | ||
370 | if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { | |
371 | events[0] |= 0x80; /* Encryption Change */ | |
372 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ | |
373 | } | |
374 | } | |
375 | ||
376 | if (lmp_inq_rssi_capable(hdev) || | |
377 | test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) | |
378 | events[4] |= 0x02; /* Inquiry Result with RSSI */ | |
379 | ||
380 | if (lmp_ext_feat_capable(hdev)) | |
381 | events[4] |= 0x04; /* Read Remote Extended Features Complete */ | |
382 | ||
383 | if (lmp_esco_capable(hdev)) { | |
384 | events[5] |= 0x08; /* Synchronous Connection Complete */ | |
385 | events[5] |= 0x10; /* Synchronous Connection Changed */ | |
386 | } | |
387 | ||
388 | if (lmp_sniffsubr_capable(hdev)) | |
389 | events[5] |= 0x20; /* Sniff Subrating */ | |
390 | ||
391 | if (lmp_pause_enc_capable(hdev)) | |
392 | events[5] |= 0x80; /* Encryption Key Refresh Complete */ | |
393 | ||
394 | if (lmp_ext_inq_capable(hdev)) | |
395 | events[5] |= 0x40; /* Extended Inquiry Result */ | |
396 | ||
397 | if (lmp_no_flush_capable(hdev)) | |
398 | events[7] |= 0x01; /* Enhanced Flush Complete */ | |
399 | ||
400 | if (lmp_lsto_capable(hdev)) | |
401 | events[6] |= 0x80; /* Link Supervision Timeout Changed */ | |
402 | ||
403 | if (lmp_ssp_capable(hdev)) { | |
404 | events[6] |= 0x01; /* IO Capability Request */ | |
405 | events[6] |= 0x02; /* IO Capability Response */ | |
406 | events[6] |= 0x04; /* User Confirmation Request */ | |
407 | events[6] |= 0x08; /* User Passkey Request */ | |
408 | events[6] |= 0x10; /* Remote OOB Data Request */ | |
409 | events[6] |= 0x20; /* Simple Pairing Complete */ | |
410 | events[7] |= 0x04; /* User Passkey Notification */ | |
411 | events[7] |= 0x08; /* Keypress Notification */ | |
412 | events[7] |= 0x10; /* Remote Host Supported | |
413 | * Features Notification | |
414 | */ | |
415 | } | |
416 | ||
417 | if (lmp_le_capable(hdev)) | |
418 | events[7] |= 0x20; /* LE Meta-Event */ | |
419 | ||
420 | hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events); | |
421 | } | |
422 | ||
423 | static int hci_init2_req(struct hci_request *req, unsigned long opt) | |
424 | { | |
425 | struct hci_dev *hdev = req->hdev; | |
426 | ||
427 | if (hdev->dev_type == HCI_AMP) | |
428 | return amp_init2(req); | |
429 | ||
430 | if (lmp_bredr_capable(hdev)) | |
431 | bredr_setup(req); | |
432 | else | |
433 | hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); | |
434 | ||
435 | if (lmp_le_capable(hdev)) | |
436 | le_setup(req); | |
437 | ||
438 | /* All Bluetooth 1.2 and later controllers should support the | |
439 | * HCI command for reading the local supported commands. | |
440 | * | |
441 | * Unfortunately some controllers indicate Bluetooth 1.2 support, | |
442 | * but do not have support for this command. If that is the case, | |
443 | * the driver can quirk the behavior and skip reading the local | |
444 | * supported commands. | |
445 | */ | |
446 | if (hdev->hci_ver > BLUETOOTH_VER_1_1 && | |
447 | !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) | |
448 | hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); | |
449 | ||
450 | if (lmp_ssp_capable(hdev)) { | |
451 | /* When SSP is available, then the host features page | |
452 | * should also be available as well. However some | |
453 | * controllers list the max_page as 0 as long as SSP | |
454 | * has not been enabled. To achieve proper debugging | |
455 | * output, force the minimum max_page to 1 at least. | |
456 | */ | |
457 | hdev->max_page = 0x01; | |
458 | ||
459 | if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { | |
460 | u8 mode = 0x01; | |
461 | ||
462 | hci_req_add(req, HCI_OP_WRITE_SSP_MODE, | |
463 | sizeof(mode), &mode); | |
464 | } else { | |
465 | struct hci_cp_write_eir cp; | |
466 | ||
467 | memset(hdev->eir, 0, sizeof(hdev->eir)); | |
468 | memset(&cp, 0, sizeof(cp)); | |
469 | ||
470 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | |
471 | } | |
472 | } | |
473 | ||
474 | if (lmp_inq_rssi_capable(hdev) || | |
475 | test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) { | |
476 | u8 mode; | |
477 | ||
478 | /* If Extended Inquiry Result events are supported, then | |
479 | * they are clearly preferred over Inquiry Result with RSSI | |
480 | * events. | |
481 | */ | |
482 | mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; | |
483 | ||
484 | hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode); | |
485 | } | |
486 | ||
487 | if (lmp_inq_tx_pwr_capable(hdev)) | |
488 | hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); | |
489 | ||
490 | if (lmp_ext_feat_capable(hdev)) { | |
491 | struct hci_cp_read_local_ext_features cp; | |
492 | ||
493 | cp.page = 0x01; | |
494 | hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, | |
495 | sizeof(cp), &cp); | |
496 | } | |
497 | ||
498 | if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { | |
499 | u8 enable = 1; | |
500 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable), | |
501 | &enable); | |
502 | } | |
503 | ||
504 | return 0; | |
505 | } | |
506 | ||
507 | static void hci_setup_link_policy(struct hci_request *req) | |
508 | { | |
509 | struct hci_dev *hdev = req->hdev; | |
510 | struct hci_cp_write_def_link_policy cp; | |
511 | u16 link_policy = 0; | |
512 | ||
513 | if (lmp_rswitch_capable(hdev)) | |
514 | link_policy |= HCI_LP_RSWITCH; | |
515 | if (lmp_hold_capable(hdev)) | |
516 | link_policy |= HCI_LP_HOLD; | |
517 | if (lmp_sniff_capable(hdev)) | |
518 | link_policy |= HCI_LP_SNIFF; | |
519 | if (lmp_park_capable(hdev)) | |
520 | link_policy |= HCI_LP_PARK; | |
521 | ||
522 | cp.policy = cpu_to_le16(link_policy); | |
523 | hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); | |
524 | } | |
525 | ||
526 | static void hci_set_le_support(struct hci_request *req) | |
527 | { | |
528 | struct hci_dev *hdev = req->hdev; | |
529 | struct hci_cp_write_le_host_supported cp; | |
530 | ||
531 | /* LE-only devices do not support explicit enablement */ | |
532 | if (!lmp_bredr_capable(hdev)) | |
533 | return; | |
534 | ||
535 | memset(&cp, 0, sizeof(cp)); | |
536 | ||
537 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { | |
538 | cp.le = 0x01; | |
539 | cp.simul = 0x00; | |
540 | } | |
541 | ||
542 | if (cp.le != lmp_host_le_capable(hdev)) | |
543 | hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), | |
544 | &cp); | |
545 | } | |
546 | ||
547 | static void hci_set_event_mask_page_2(struct hci_request *req) | |
548 | { | |
549 | struct hci_dev *hdev = req->hdev; | |
550 | u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | |
551 | bool changed = false; | |
552 | ||
553 | /* If Connectionless Slave Broadcast master role is supported | |
554 | * enable all necessary events for it. | |
555 | */ | |
556 | if (lmp_csb_master_capable(hdev)) { | |
557 | events[1] |= 0x40; /* Triggered Clock Capture */ | |
558 | events[1] |= 0x80; /* Synchronization Train Complete */ | |
559 | events[2] |= 0x10; /* Slave Page Response Timeout */ | |
560 | events[2] |= 0x20; /* CSB Channel Map Change */ | |
561 | changed = true; | |
562 | } | |
563 | ||
564 | /* If Connectionless Slave Broadcast slave role is supported | |
565 | * enable all necessary events for it. | |
566 | */ | |
567 | if (lmp_csb_slave_capable(hdev)) { | |
568 | events[2] |= 0x01; /* Synchronization Train Received */ | |
569 | events[2] |= 0x02; /* CSB Receive */ | |
570 | events[2] |= 0x04; /* CSB Timeout */ | |
571 | events[2] |= 0x08; /* Truncated Page Complete */ | |
572 | changed = true; | |
573 | } | |
574 | ||
575 | /* Enable Authenticated Payload Timeout Expired event if supported */ | |
576 | if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { | |
577 | events[2] |= 0x80; | |
578 | changed = true; | |
579 | } | |
580 | ||
581 | /* Some Broadcom based controllers indicate support for Set Event | |
582 | * Mask Page 2 command, but then actually do not support it. Since | |
583 | * the default value is all bits set to zero, the command is only | |
584 | * required if the event mask has to be changed. In case no change | |
585 | * to the event mask is needed, skip this command. | |
586 | */ | |
587 | if (changed) | |
588 | hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, | |
589 | sizeof(events), events); | |
590 | } | |
591 | ||
592 | static int hci_init3_req(struct hci_request *req, unsigned long opt) | |
593 | { | |
594 | struct hci_dev *hdev = req->hdev; | |
595 | u8 p; | |
596 | ||
597 | hci_setup_event_mask(req); | |
598 | ||
599 | if (hdev->commands[6] & 0x20 && | |
600 | !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { | |
601 | struct hci_cp_read_stored_link_key cp; | |
602 | ||
603 | bacpy(&cp.bdaddr, BDADDR_ANY); | |
604 | cp.read_all = 0x01; | |
605 | hci_req_add(req, HCI_OP_READ_STORED_LINK_KEY, sizeof(cp), &cp); | |
606 | } | |
607 | ||
608 | if (hdev->commands[5] & 0x10) | |
609 | hci_setup_link_policy(req); | |
610 | ||
611 | if (hdev->commands[8] & 0x01) | |
612 | hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL); | |
613 | ||
614 | /* Some older Broadcom based Bluetooth 1.2 controllers do not | |
615 | * support the Read Page Scan Type command. Check support for | |
616 | * this command in the bit mask of supported commands. | |
617 | */ | |
618 | if (hdev->commands[13] & 0x01) | |
619 | hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL); | |
620 | ||
621 | if (lmp_le_capable(hdev)) { | |
622 | u8 events[8]; | |
623 | ||
624 | memset(events, 0, sizeof(events)); | |
625 | ||
626 | if (hdev->le_features[0] & HCI_LE_ENCRYPTION) | |
627 | events[0] |= 0x10; /* LE Long Term Key Request */ | |
628 | ||
629 | /* If controller supports the Connection Parameters Request | |
630 | * Link Layer Procedure, enable the corresponding event. | |
631 | */ | |
632 | if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) | |
633 | events[0] |= 0x20; /* LE Remote Connection | |
634 | * Parameter Request | |
635 | */ | |
636 | ||
637 | /* If the controller supports the Data Length Extension | |
638 | * feature, enable the corresponding event. | |
639 | */ | |
640 | if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) | |
641 | events[0] |= 0x40; /* LE Data Length Change */ | |
642 | ||
643 | /* If the controller supports Extended Scanner Filter | |
644 | * Policies, enable the correspondig event. | |
645 | */ | |
646 | if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) | |
647 | events[1] |= 0x04; /* LE Direct Advertising | |
648 | * Report | |
649 | */ | |
650 | ||
651 | /* If the controller supports Channel Selection Algorithm #2 | |
652 | * feature, enable the corresponding event. | |
653 | */ | |
654 | if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) | |
655 | events[2] |= 0x08; /* LE Channel Selection | |
656 | * Algorithm | |
657 | */ | |
658 | ||
659 | /* If the controller supports the LE Set Scan Enable command, | |
660 | * enable the corresponding advertising report event. | |
661 | */ | |
662 | if (hdev->commands[26] & 0x08) | |
663 | events[0] |= 0x02; /* LE Advertising Report */ | |
664 | ||
665 | /* If the controller supports the LE Create Connection | |
666 | * command, enable the corresponding event. | |
667 | */ | |
668 | if (hdev->commands[26] & 0x10) | |
669 | events[0] |= 0x01; /* LE Connection Complete */ | |
670 | ||
671 | /* If the controller supports the LE Connection Update | |
672 | * command, enable the corresponding event. | |
673 | */ | |
674 | if (hdev->commands[27] & 0x04) | |
675 | events[0] |= 0x04; /* LE Connection Update | |
676 | * Complete | |
677 | */ | |
678 | ||
679 | /* If the controller supports the LE Read Remote Used Features | |
680 | * command, enable the corresponding event. | |
681 | */ | |
682 | if (hdev->commands[27] & 0x20) | |
683 | events[0] |= 0x08; /* LE Read Remote Used | |
684 | * Features Complete | |
685 | */ | |
686 | ||
687 | /* If the controller supports the LE Read Local P-256 | |
688 | * Public Key command, enable the corresponding event. | |
689 | */ | |
690 | if (hdev->commands[34] & 0x02) | |
691 | events[0] |= 0x80; /* LE Read Local P-256 | |
692 | * Public Key Complete | |
693 | */ | |
694 | ||
695 | /* If the controller supports the LE Generate DHKey | |
696 | * command, enable the corresponding event. | |
697 | */ | |
698 | if (hdev->commands[34] & 0x04) | |
699 | events[1] |= 0x01; /* LE Generate DHKey Complete */ | |
700 | ||
701 | /* If the controller supports the LE Set Default PHY or | |
702 | * LE Set PHY commands, enable the corresponding event. | |
703 | */ | |
704 | if (hdev->commands[35] & (0x20 | 0x40)) | |
705 | events[1] |= 0x08; /* LE PHY Update Complete */ | |
706 | ||
707 | hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events), | |
708 | events); | |
709 | ||
710 | if (hdev->commands[25] & 0x40) { | |
711 | /* Read LE Advertising Channel TX Power */ | |
712 | hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL); | |
713 | } | |
714 | ||
715 | if (hdev->commands[26] & 0x40) { | |
716 | /* Read LE White List Size */ | |
717 | hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, | |
718 | 0, NULL); | |
719 | } | |
720 | ||
721 | if (hdev->commands[26] & 0x80) { | |
722 | /* Clear LE White List */ | |
723 | hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL); | |
724 | } | |
725 | ||
726 | if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { | |
727 | /* Read LE Maximum Data Length */ | |
728 | hci_req_add(req, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL); | |
729 | ||
730 | /* Read LE Suggested Default Data Length */ | |
731 | hci_req_add(req, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL); | |
732 | } | |
733 | ||
734 | hci_set_le_support(req); | |
735 | } | |
736 | ||
737 | /* Read features beyond page 1 if available */ | |
738 | for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) { | |
739 | struct hci_cp_read_local_ext_features cp; | |
740 | ||
741 | cp.page = p; | |
742 | hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES, | |
743 | sizeof(cp), &cp); | |
744 | } | |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
749 | static int hci_init4_req(struct hci_request *req, unsigned long opt) | |
750 | { | |
751 | struct hci_dev *hdev = req->hdev; | |
752 | ||
753 | /* Some Broadcom based Bluetooth controllers do not support the | |
754 | * Delete Stored Link Key command. They are clearly indicating its | |
755 | * absence in the bit mask of supported commands. | |
756 | * | |
757 | * Check the supported commands and only if the the command is marked | |
758 | * as supported send it. If not supported assume that the controller | |
759 | * does not have actual support for stored link keys which makes this | |
760 | * command redundant anyway. | |
761 | * | |
762 | * Some controllers indicate that they support handling deleting | |
763 | * stored link keys, but they don't. The quirk lets a driver | |
764 | * just disable this command. | |
765 | */ | |
766 | if (hdev->commands[6] & 0x80 && | |
767 | !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) { | |
768 | struct hci_cp_delete_stored_link_key cp; | |
769 | ||
770 | bacpy(&cp.bdaddr, BDADDR_ANY); | |
771 | cp.delete_all = 0x01; | |
772 | hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, | |
773 | sizeof(cp), &cp); | |
774 | } | |
775 | ||
776 | /* Set event mask page 2 if the HCI command for it is supported */ | |
777 | if (hdev->commands[22] & 0x04) | |
778 | hci_set_event_mask_page_2(req); | |
779 | ||
780 | /* Read local codec list if the HCI command is supported */ | |
781 | if (hdev->commands[29] & 0x20) | |
782 | hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL); | |
783 | ||
784 | /* Get MWS transport configuration if the HCI command is supported */ | |
785 | if (hdev->commands[30] & 0x08) | |
786 | hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL); | |
787 | ||
788 | /* Check for Synchronization Train support */ | |
789 | if (lmp_sync_train_capable(hdev)) | |
790 | hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL); | |
791 | ||
792 | /* Enable Secure Connections if supported and configured */ | |
793 | if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && | |
794 | bredr_sc_enabled(hdev)) { | |
795 | u8 support = 0x01; | |
796 | ||
797 | hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, | |
798 | sizeof(support), &support); | |
799 | } | |
800 | ||
801 | /* Set Suggested Default Data Length to maximum if supported */ | |
802 | if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) { | |
803 | struct hci_cp_le_write_def_data_len cp; | |
804 | ||
805 | cp.tx_len = hdev->le_max_tx_len; | |
806 | cp.tx_time = hdev->le_max_tx_time; | |
807 | hci_req_add(req, HCI_OP_LE_WRITE_DEF_DATA_LEN, sizeof(cp), &cp); | |
808 | } | |
809 | ||
810 | /* Set Default PHY parameters if command is supported */ | |
811 | if (hdev->commands[35] & 0x20) { | |
812 | struct hci_cp_le_set_default_phy cp; | |
813 | ||
814 | /* No transmitter PHY or receiver PHY preferences */ | |
815 | cp.all_phys = 0x03; | |
816 | cp.tx_phys = 0; | |
817 | cp.rx_phys = 0; | |
818 | ||
819 | hci_req_add(req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp); | |
820 | } | |
821 | ||
822 | return 0; | |
823 | } | |
824 | ||
825 | static int __hci_init(struct hci_dev *hdev) | |
826 | { | |
827 | int err; | |
828 | ||
829 | err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT, NULL); | |
830 | if (err < 0) | |
831 | return err; | |
832 | ||
833 | if (hci_dev_test_flag(hdev, HCI_SETUP)) | |
834 | hci_debugfs_create_basic(hdev); | |
835 | ||
836 | err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT, NULL); | |
837 | if (err < 0) | |
838 | return err; | |
839 | ||
840 | /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode | |
841 | * BR/EDR/LE type controllers. AMP controllers only need the | |
842 | * first two stages of init. | |
843 | */ | |
844 | if (hdev->dev_type != HCI_PRIMARY) | |
845 | return 0; | |
846 | ||
847 | err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT, NULL); | |
848 | if (err < 0) | |
849 | return err; | |
850 | ||
851 | err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT, NULL); | |
852 | if (err < 0) | |
853 | return err; | |
854 | ||
855 | /* This function is only called when the controller is actually in | |
856 | * configured state. When the controller is marked as unconfigured, | |
857 | * this initialization procedure is not run. | |
858 | * | |
859 | * It means that it is possible that a controller runs through its | |
860 | * setup phase and then discovers missing settings. If that is the | |
861 | * case, then this function will not be called. It then will only | |
862 | * be called during the config phase. | |
863 | * | |
864 | * So only when in setup phase or config phase, create the debugfs | |
865 | * entries and register the SMP channels. | |
866 | */ | |
867 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && | |
868 | !hci_dev_test_flag(hdev, HCI_CONFIG)) | |
869 | return 0; | |
870 | ||
871 | hci_debugfs_create_common(hdev); | |
872 | ||
873 | if (lmp_bredr_capable(hdev)) | |
874 | hci_debugfs_create_bredr(hdev); | |
875 | ||
876 | if (lmp_le_capable(hdev)) | |
877 | hci_debugfs_create_le(hdev); | |
878 | ||
879 | return 0; | |
880 | } | |
881 | ||
882 | static int hci_init0_req(struct hci_request *req, unsigned long opt) | |
883 | { | |
884 | struct hci_dev *hdev = req->hdev; | |
885 | ||
886 | BT_DBG("%s %ld", hdev->name, opt); | |
887 | ||
888 | /* Reset */ | |
889 | if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) | |
890 | hci_reset_req(req, 0); | |
891 | ||
892 | /* Read Local Version */ | |
893 | hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL); | |
894 | ||
895 | /* Read BD Address */ | |
896 | if (hdev->set_bdaddr) | |
897 | hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL); | |
898 | ||
899 | return 0; | |
900 | } | |
901 | ||
902 | static int __hci_unconf_init(struct hci_dev *hdev) | |
903 | { | |
904 | int err; | |
905 | ||
906 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) | |
907 | return 0; | |
908 | ||
909 | err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT, NULL); | |
910 | if (err < 0) | |
911 | return err; | |
912 | ||
913 | if (hci_dev_test_flag(hdev, HCI_SETUP)) | |
914 | hci_debugfs_create_basic(hdev); | |
915 | ||
916 | return 0; | |
917 | } | |
918 | ||
919 | static int hci_scan_req(struct hci_request *req, unsigned long opt) | |
920 | { | |
921 | __u8 scan = opt; | |
922 | ||
923 | BT_DBG("%s %x", req->hdev->name, scan); | |
924 | ||
925 | /* Inquiry and Page scans */ | |
926 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | |
927 | return 0; | |
928 | } | |
929 | ||
930 | static int hci_auth_req(struct hci_request *req, unsigned long opt) | |
931 | { | |
932 | __u8 auth = opt; | |
933 | ||
934 | BT_DBG("%s %x", req->hdev->name, auth); | |
935 | ||
936 | /* Authentication */ | |
937 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); | |
938 | return 0; | |
939 | } | |
940 | ||
941 | static int hci_encrypt_req(struct hci_request *req, unsigned long opt) | |
942 | { | |
943 | __u8 encrypt = opt; | |
944 | ||
945 | BT_DBG("%s %x", req->hdev->name, encrypt); | |
946 | ||
947 | /* Encryption */ | |
948 | hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); | |
949 | return 0; | |
950 | } | |
951 | ||
952 | static int hci_linkpol_req(struct hci_request *req, unsigned long opt) | |
953 | { | |
954 | __le16 policy = cpu_to_le16(opt); | |
955 | ||
956 | BT_DBG("%s %x", req->hdev->name, policy); | |
957 | ||
958 | /* Default link policy */ | |
959 | hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); | |
960 | return 0; | |
961 | } | |
962 | ||
963 | /* Get HCI device by index. | |
964 | * Device is held on return. */ | |
965 | struct hci_dev *hci_dev_get(int index) | |
966 | { | |
967 | struct hci_dev *hdev = NULL, *d; | |
968 | ||
969 | BT_DBG("%d", index); | |
970 | ||
971 | if (index < 0) | |
972 | return NULL; | |
973 | ||
974 | read_lock(&hci_dev_list_lock); | |
975 | list_for_each_entry(d, &hci_dev_list, list) { | |
976 | if (d->id == index) { | |
977 | hdev = hci_dev_hold(d); | |
978 | break; | |
979 | } | |
980 | } | |
981 | read_unlock(&hci_dev_list_lock); | |
982 | return hdev; | |
983 | } | |
984 | ||
985 | /* ---- Inquiry support ---- */ | |
986 | ||
987 | bool hci_discovery_active(struct hci_dev *hdev) | |
988 | { | |
989 | struct discovery_state *discov = &hdev->discovery; | |
990 | ||
991 | switch (discov->state) { | |
992 | case DISCOVERY_FINDING: | |
993 | case DISCOVERY_RESOLVING: | |
994 | return true; | |
995 | ||
996 | default: | |
997 | return false; | |
998 | } | |
999 | } | |
1000 | ||
1001 | void hci_discovery_set_state(struct hci_dev *hdev, int state) | |
1002 | { | |
1003 | int old_state = hdev->discovery.state; | |
1004 | ||
1005 | BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); | |
1006 | ||
1007 | if (old_state == state) | |
1008 | return; | |
1009 | ||
1010 | hdev->discovery.state = state; | |
1011 | ||
1012 | switch (state) { | |
1013 | case DISCOVERY_STOPPED: | |
1014 | hci_update_background_scan(hdev); | |
1015 | ||
1016 | if (old_state != DISCOVERY_STARTING) | |
1017 | mgmt_discovering(hdev, 0); | |
1018 | break; | |
1019 | case DISCOVERY_STARTING: | |
1020 | break; | |
1021 | case DISCOVERY_FINDING: | |
1022 | mgmt_discovering(hdev, 1); | |
1023 | break; | |
1024 | case DISCOVERY_RESOLVING: | |
1025 | break; | |
1026 | case DISCOVERY_STOPPING: | |
1027 | break; | |
1028 | } | |
1029 | } | |
1030 | ||
1031 | void hci_inquiry_cache_flush(struct hci_dev *hdev) | |
1032 | { | |
1033 | struct discovery_state *cache = &hdev->discovery; | |
1034 | struct inquiry_entry *p, *n; | |
1035 | ||
1036 | list_for_each_entry_safe(p, n, &cache->all, all) { | |
1037 | list_del(&p->all); | |
1038 | kfree(p); | |
1039 | } | |
1040 | ||
1041 | INIT_LIST_HEAD(&cache->unknown); | |
1042 | INIT_LIST_HEAD(&cache->resolve); | |
1043 | } | |
1044 | ||
1045 | struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, | |
1046 | bdaddr_t *bdaddr) | |
1047 | { | |
1048 | struct discovery_state *cache = &hdev->discovery; | |
1049 | struct inquiry_entry *e; | |
1050 | ||
1051 | BT_DBG("cache %p, %pMR", cache, bdaddr); | |
1052 | ||
1053 | list_for_each_entry(e, &cache->all, all) { | |
1054 | if (!bacmp(&e->data.bdaddr, bdaddr)) | |
1055 | return e; | |
1056 | } | |
1057 | ||
1058 | return NULL; | |
1059 | } | |
1060 | ||
1061 | struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, | |
1062 | bdaddr_t *bdaddr) | |
1063 | { | |
1064 | struct discovery_state *cache = &hdev->discovery; | |
1065 | struct inquiry_entry *e; | |
1066 | ||
1067 | BT_DBG("cache %p, %pMR", cache, bdaddr); | |
1068 | ||
1069 | list_for_each_entry(e, &cache->unknown, list) { | |
1070 | if (!bacmp(&e->data.bdaddr, bdaddr)) | |
1071 | return e; | |
1072 | } | |
1073 | ||
1074 | return NULL; | |
1075 | } | |
1076 | ||
1077 | struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, | |
1078 | bdaddr_t *bdaddr, | |
1079 | int state) | |
1080 | { | |
1081 | struct discovery_state *cache = &hdev->discovery; | |
1082 | struct inquiry_entry *e; | |
1083 | ||
1084 | BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); | |
1085 | ||
1086 | list_for_each_entry(e, &cache->resolve, list) { | |
1087 | if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) | |
1088 | return e; | |
1089 | if (!bacmp(&e->data.bdaddr, bdaddr)) | |
1090 | return e; | |
1091 | } | |
1092 | ||
1093 | return NULL; | |
1094 | } | |
1095 | ||
1096 | void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, | |
1097 | struct inquiry_entry *ie) | |
1098 | { | |
1099 | struct discovery_state *cache = &hdev->discovery; | |
1100 | struct list_head *pos = &cache->resolve; | |
1101 | struct inquiry_entry *p; | |
1102 | ||
1103 | list_del(&ie->list); | |
1104 | ||
1105 | list_for_each_entry(p, &cache->resolve, list) { | |
1106 | if (p->name_state != NAME_PENDING && | |
1107 | abs(p->data.rssi) >= abs(ie->data.rssi)) | |
1108 | break; | |
1109 | pos = &p->list; | |
1110 | } | |
1111 | ||
1112 | list_add(&ie->list, pos); | |
1113 | } | |
1114 | ||
1115 | u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, | |
1116 | bool name_known) | |
1117 | { | |
1118 | struct discovery_state *cache = &hdev->discovery; | |
1119 | struct inquiry_entry *ie; | |
1120 | u32 flags = 0; | |
1121 | ||
1122 | BT_DBG("cache %p, %pMR", cache, &data->bdaddr); | |
1123 | ||
1124 | hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); | |
1125 | ||
1126 | if (!data->ssp_mode) | |
1127 | flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; | |
1128 | ||
1129 | ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); | |
1130 | if (ie) { | |
1131 | if (!ie->data.ssp_mode) | |
1132 | flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; | |
1133 | ||
1134 | if (ie->name_state == NAME_NEEDED && | |
1135 | data->rssi != ie->data.rssi) { | |
1136 | ie->data.rssi = data->rssi; | |
1137 | hci_inquiry_cache_update_resolve(hdev, ie); | |
1138 | } | |
1139 | ||
1140 | goto update; | |
1141 | } | |
1142 | ||
1143 | /* Entry not in the cache. Add new one. */ | |
1144 | ie = kzalloc(sizeof(*ie), GFP_KERNEL); | |
1145 | if (!ie) { | |
1146 | flags |= MGMT_DEV_FOUND_CONFIRM_NAME; | |
1147 | goto done; | |
1148 | } | |
1149 | ||
1150 | list_add(&ie->all, &cache->all); | |
1151 | ||
1152 | if (name_known) { | |
1153 | ie->name_state = NAME_KNOWN; | |
1154 | } else { | |
1155 | ie->name_state = NAME_NOT_KNOWN; | |
1156 | list_add(&ie->list, &cache->unknown); | |
1157 | } | |
1158 | ||
1159 | update: | |
1160 | if (name_known && ie->name_state != NAME_KNOWN && | |
1161 | ie->name_state != NAME_PENDING) { | |
1162 | ie->name_state = NAME_KNOWN; | |
1163 | list_del(&ie->list); | |
1164 | } | |
1165 | ||
1166 | memcpy(&ie->data, data, sizeof(*data)); | |
1167 | ie->timestamp = jiffies; | |
1168 | cache->timestamp = jiffies; | |
1169 | ||
1170 | if (ie->name_state == NAME_NOT_KNOWN) | |
1171 | flags |= MGMT_DEV_FOUND_CONFIRM_NAME; | |
1172 | ||
1173 | done: | |
1174 | return flags; | |
1175 | } | |
1176 | ||
1177 | static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) | |
1178 | { | |
1179 | struct discovery_state *cache = &hdev->discovery; | |
1180 | struct inquiry_info *info = (struct inquiry_info *) buf; | |
1181 | struct inquiry_entry *e; | |
1182 | int copied = 0; | |
1183 | ||
1184 | list_for_each_entry(e, &cache->all, all) { | |
1185 | struct inquiry_data *data = &e->data; | |
1186 | ||
1187 | if (copied >= num) | |
1188 | break; | |
1189 | ||
1190 | bacpy(&info->bdaddr, &data->bdaddr); | |
1191 | info->pscan_rep_mode = data->pscan_rep_mode; | |
1192 | info->pscan_period_mode = data->pscan_period_mode; | |
1193 | info->pscan_mode = data->pscan_mode; | |
1194 | memcpy(info->dev_class, data->dev_class, 3); | |
1195 | info->clock_offset = data->clock_offset; | |
1196 | ||
1197 | info++; | |
1198 | copied++; | |
1199 | } | |
1200 | ||
1201 | BT_DBG("cache %p, copied %d", cache, copied); | |
1202 | return copied; | |
1203 | } | |
1204 | ||
1205 | static int hci_inq_req(struct hci_request *req, unsigned long opt) | |
1206 | { | |
1207 | struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; | |
1208 | struct hci_dev *hdev = req->hdev; | |
1209 | struct hci_cp_inquiry cp; | |
1210 | ||
1211 | BT_DBG("%s", hdev->name); | |
1212 | ||
1213 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | |
1214 | return 0; | |
1215 | ||
1216 | /* Start Inquiry */ | |
1217 | memcpy(&cp.lap, &ir->lap, 3); | |
1218 | cp.length = ir->length; | |
1219 | cp.num_rsp = ir->num_rsp; | |
1220 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); | |
1221 | ||
1222 | return 0; | |
1223 | } | |
1224 | ||
1225 | int hci_inquiry(void __user *arg) | |
1226 | { | |
1227 | __u8 __user *ptr = arg; | |
1228 | struct hci_inquiry_req ir; | |
1229 | struct hci_dev *hdev; | |
1230 | int err = 0, do_inquiry = 0, max_rsp; | |
1231 | long timeo; | |
1232 | __u8 *buf; | |
1233 | ||
1234 | if (copy_from_user(&ir, ptr, sizeof(ir))) | |
1235 | return -EFAULT; | |
1236 | ||
1237 | hdev = hci_dev_get(ir.dev_id); | |
1238 | if (!hdev) | |
1239 | return -ENODEV; | |
1240 | ||
1241 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1242 | err = -EBUSY; | |
1243 | goto done; | |
1244 | } | |
1245 | ||
1246 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
1247 | err = -EOPNOTSUPP; | |
1248 | goto done; | |
1249 | } | |
1250 | ||
1251 | if (hdev->dev_type != HCI_PRIMARY) { | |
1252 | err = -EOPNOTSUPP; | |
1253 | goto done; | |
1254 | } | |
1255 | ||
1256 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { | |
1257 | err = -EOPNOTSUPP; | |
1258 | goto done; | |
1259 | } | |
1260 | ||
1261 | hci_dev_lock(hdev); | |
1262 | if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || | |
1263 | inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { | |
1264 | hci_inquiry_cache_flush(hdev); | |
1265 | do_inquiry = 1; | |
1266 | } | |
1267 | hci_dev_unlock(hdev); | |
1268 | ||
1269 | timeo = ir.length * msecs_to_jiffies(2000); | |
1270 | ||
1271 | if (do_inquiry) { | |
1272 | err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, | |
1273 | timeo, NULL); | |
1274 | if (err < 0) | |
1275 | goto done; | |
1276 | ||
1277 | /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is | |
1278 | * cleared). If it is interrupted by a signal, return -EINTR. | |
1279 | */ | |
1280 | if (wait_on_bit(&hdev->flags, HCI_INQUIRY, | |
1281 | TASK_INTERRUPTIBLE)) | |
1282 | return -EINTR; | |
1283 | } | |
1284 | ||
1285 | /* for unlimited number of responses we will use buffer with | |
1286 | * 255 entries | |
1287 | */ | |
1288 | max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; | |
1289 | ||
1290 | /* cache_dump can't sleep. Therefore we allocate temp buffer and then | |
1291 | * copy it to the user space. | |
1292 | */ | |
1293 | buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL); | |
1294 | if (!buf) { | |
1295 | err = -ENOMEM; | |
1296 | goto done; | |
1297 | } | |
1298 | ||
1299 | hci_dev_lock(hdev); | |
1300 | ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); | |
1301 | hci_dev_unlock(hdev); | |
1302 | ||
1303 | BT_DBG("num_rsp %d", ir.num_rsp); | |
1304 | ||
1305 | if (!copy_to_user(ptr, &ir, sizeof(ir))) { | |
1306 | ptr += sizeof(ir); | |
1307 | if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * | |
1308 | ir.num_rsp)) | |
1309 | err = -EFAULT; | |
1310 | } else | |
1311 | err = -EFAULT; | |
1312 | ||
1313 | kfree(buf); | |
1314 | ||
1315 | done: | |
1316 | hci_dev_put(hdev); | |
1317 | return err; | |
1318 | } | |
1319 | ||
1320 | static int hci_dev_do_open(struct hci_dev *hdev) | |
1321 | { | |
1322 | int ret = 0; | |
1323 | ||
1324 | BT_DBG("%s %p", hdev->name, hdev); | |
1325 | ||
1326 | hci_req_sync_lock(hdev); | |
1327 | ||
1328 | if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { | |
1329 | ret = -ENODEV; | |
1330 | goto done; | |
1331 | } | |
1332 | ||
1333 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && | |
1334 | !hci_dev_test_flag(hdev, HCI_CONFIG)) { | |
1335 | /* Check for rfkill but allow the HCI setup stage to | |
1336 | * proceed (which in itself doesn't cause any RF activity). | |
1337 | */ | |
1338 | if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { | |
1339 | ret = -ERFKILL; | |
1340 | goto done; | |
1341 | } | |
1342 | ||
1343 | /* Check for valid public address or a configured static | |
1344 | * random adddress, but let the HCI setup proceed to | |
1345 | * be able to determine if there is a public address | |
1346 | * or not. | |
1347 | * | |
1348 | * In case of user channel usage, it is not important | |
1349 | * if a public address or static random address is | |
1350 | * available. | |
1351 | * | |
1352 | * This check is only valid for BR/EDR controllers | |
1353 | * since AMP controllers do not have an address. | |
1354 | */ | |
1355 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && | |
1356 | hdev->dev_type == HCI_PRIMARY && | |
1357 | !bacmp(&hdev->bdaddr, BDADDR_ANY) && | |
1358 | !bacmp(&hdev->static_addr, BDADDR_ANY)) { | |
1359 | ret = -EADDRNOTAVAIL; | |
1360 | goto done; | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | if (test_bit(HCI_UP, &hdev->flags)) { | |
1365 | ret = -EALREADY; | |
1366 | goto done; | |
1367 | } | |
1368 | ||
1369 | if (hdev->open(hdev)) { | |
1370 | ret = -EIO; | |
1371 | goto done; | |
1372 | } | |
1373 | ||
1374 | set_bit(HCI_RUNNING, &hdev->flags); | |
1375 | hci_sock_dev_event(hdev, HCI_DEV_OPEN); | |
1376 | ||
1377 | atomic_set(&hdev->cmd_cnt, 1); | |
1378 | set_bit(HCI_INIT, &hdev->flags); | |
1379 | ||
1380 | if (hci_dev_test_flag(hdev, HCI_SETUP)) { | |
1381 | hci_sock_dev_event(hdev, HCI_DEV_SETUP); | |
1382 | ||
1383 | if (hdev->setup) | |
1384 | ret = hdev->setup(hdev); | |
1385 | ||
1386 | /* The transport driver can set these quirks before | |
1387 | * creating the HCI device or in its setup callback. | |
1388 | * | |
1389 | * In case any of them is set, the controller has to | |
1390 | * start up as unconfigured. | |
1391 | */ | |
1392 | if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || | |
1393 | test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks)) | |
1394 | hci_dev_set_flag(hdev, HCI_UNCONFIGURED); | |
1395 | ||
1396 | /* For an unconfigured controller it is required to | |
1397 | * read at least the version information provided by | |
1398 | * the Read Local Version Information command. | |
1399 | * | |
1400 | * If the set_bdaddr driver callback is provided, then | |
1401 | * also the original Bluetooth public device address | |
1402 | * will be read using the Read BD Address command. | |
1403 | */ | |
1404 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) | |
1405 | ret = __hci_unconf_init(hdev); | |
1406 | } | |
1407 | ||
1408 | if (hci_dev_test_flag(hdev, HCI_CONFIG)) { | |
1409 | /* If public address change is configured, ensure that | |
1410 | * the address gets programmed. If the driver does not | |
1411 | * support changing the public address, fail the power | |
1412 | * on procedure. | |
1413 | */ | |
1414 | if (bacmp(&hdev->public_addr, BDADDR_ANY) && | |
1415 | hdev->set_bdaddr) | |
1416 | ret = hdev->set_bdaddr(hdev, &hdev->public_addr); | |
1417 | else | |
1418 | ret = -EADDRNOTAVAIL; | |
1419 | } | |
1420 | ||
1421 | if (!ret) { | |
1422 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && | |
1423 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1424 | ret = __hci_init(hdev); | |
1425 | if (!ret && hdev->post_init) | |
1426 | ret = hdev->post_init(hdev); | |
1427 | } | |
1428 | } | |
1429 | ||
1430 | /* If the HCI Reset command is clearing all diagnostic settings, | |
1431 | * then they need to be reprogrammed after the init procedure | |
1432 | * completed. | |
1433 | */ | |
1434 | if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && | |
1435 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && | |
1436 | hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) | |
1437 | ret = hdev->set_diag(hdev, true); | |
1438 | ||
1439 | clear_bit(HCI_INIT, &hdev->flags); | |
1440 | ||
1441 | if (!ret) { | |
1442 | hci_dev_hold(hdev); | |
1443 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); | |
1444 | set_bit(HCI_UP, &hdev->flags); | |
1445 | hci_sock_dev_event(hdev, HCI_DEV_UP); | |
1446 | hci_leds_update_powered(hdev, true); | |
1447 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && | |
1448 | !hci_dev_test_flag(hdev, HCI_CONFIG) && | |
1449 | !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && | |
1450 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && | |
1451 | hci_dev_test_flag(hdev, HCI_MGMT) && | |
1452 | hdev->dev_type == HCI_PRIMARY) { | |
1453 | ret = __hci_req_hci_power_on(hdev); | |
1454 | mgmt_power_on(hdev, ret); | |
1455 | } | |
1456 | } else { | |
1457 | /* Init failed, cleanup */ | |
1458 | flush_work(&hdev->tx_work); | |
1459 | flush_work(&hdev->cmd_work); | |
1460 | flush_work(&hdev->rx_work); | |
1461 | ||
1462 | skb_queue_purge(&hdev->cmd_q); | |
1463 | skb_queue_purge(&hdev->rx_q); | |
1464 | ||
1465 | if (hdev->flush) | |
1466 | hdev->flush(hdev); | |
1467 | ||
1468 | if (hdev->sent_cmd) { | |
1469 | kfree_skb(hdev->sent_cmd); | |
1470 | hdev->sent_cmd = NULL; | |
1471 | } | |
1472 | ||
1473 | clear_bit(HCI_RUNNING, &hdev->flags); | |
1474 | hci_sock_dev_event(hdev, HCI_DEV_CLOSE); | |
1475 | ||
1476 | hdev->close(hdev); | |
1477 | hdev->flags &= BIT(HCI_RAW); | |
1478 | } | |
1479 | ||
1480 | done: | |
1481 | hci_req_sync_unlock(hdev); | |
1482 | return ret; | |
1483 | } | |
1484 | ||
1485 | /* ---- HCI ioctl helpers ---- */ | |
1486 | ||
1487 | int hci_dev_open(__u16 dev) | |
1488 | { | |
1489 | struct hci_dev *hdev; | |
1490 | int err; | |
1491 | ||
1492 | hdev = hci_dev_get(dev); | |
1493 | if (!hdev) | |
1494 | return -ENODEV; | |
1495 | ||
1496 | /* Devices that are marked as unconfigured can only be powered | |
1497 | * up as user channel. Trying to bring them up as normal devices | |
1498 | * will result into a failure. Only user channel operation is | |
1499 | * possible. | |
1500 | * | |
1501 | * When this function is called for a user channel, the flag | |
1502 | * HCI_USER_CHANNEL will be set first before attempting to | |
1503 | * open the device. | |
1504 | */ | |
1505 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && | |
1506 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1507 | err = -EOPNOTSUPP; | |
1508 | goto done; | |
1509 | } | |
1510 | ||
1511 | /* We need to ensure that no other power on/off work is pending | |
1512 | * before proceeding to call hci_dev_do_open. This is | |
1513 | * particularly important if the setup procedure has not yet | |
1514 | * completed. | |
1515 | */ | |
1516 | if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) | |
1517 | cancel_delayed_work(&hdev->power_off); | |
1518 | ||
1519 | /* After this call it is guaranteed that the setup procedure | |
1520 | * has finished. This means that error conditions like RFKILL | |
1521 | * or no valid public or static random address apply. | |
1522 | */ | |
1523 | flush_workqueue(hdev->req_workqueue); | |
1524 | ||
1525 | /* For controllers not using the management interface and that | |
1526 | * are brought up using legacy ioctl, set the HCI_BONDABLE bit | |
1527 | * so that pairing works for them. Once the management interface | |
1528 | * is in use this bit will be cleared again and userspace has | |
1529 | * to explicitly enable it. | |
1530 | */ | |
1531 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && | |
1532 | !hci_dev_test_flag(hdev, HCI_MGMT)) | |
1533 | hci_dev_set_flag(hdev, HCI_BONDABLE); | |
1534 | ||
1535 | err = hci_dev_do_open(hdev); | |
1536 | ||
1537 | done: | |
1538 | hci_dev_put(hdev); | |
1539 | return err; | |
1540 | } | |
1541 | ||
1542 | /* This function requires the caller holds hdev->lock */ | |
1543 | static void hci_pend_le_actions_clear(struct hci_dev *hdev) | |
1544 | { | |
1545 | struct hci_conn_params *p; | |
1546 | ||
1547 | list_for_each_entry(p, &hdev->le_conn_params, list) { | |
1548 | if (p->conn) { | |
1549 | hci_conn_drop(p->conn); | |
1550 | hci_conn_put(p->conn); | |
1551 | p->conn = NULL; | |
1552 | } | |
1553 | list_del_init(&p->action); | |
1554 | } | |
1555 | ||
1556 | BT_DBG("All LE pending actions cleared"); | |
1557 | } | |
1558 | ||
1559 | int hci_dev_do_close(struct hci_dev *hdev) | |
1560 | { | |
1561 | bool auto_off; | |
1562 | ||
1563 | BT_DBG("%s %p", hdev->name, hdev); | |
1564 | ||
1565 | if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && | |
1566 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && | |
1567 | test_bit(HCI_UP, &hdev->flags)) { | |
1568 | /* Execute vendor specific shutdown routine */ | |
1569 | if (hdev->shutdown) | |
1570 | hdev->shutdown(hdev); | |
1571 | } | |
1572 | ||
1573 | cancel_delayed_work(&hdev->power_off); | |
1574 | ||
1575 | hci_request_cancel_all(hdev); | |
1576 | hci_req_sync_lock(hdev); | |
1577 | ||
1578 | if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { | |
1579 | cancel_delayed_work_sync(&hdev->cmd_timer); | |
1580 | hci_req_sync_unlock(hdev); | |
1581 | return 0; | |
1582 | } | |
1583 | ||
1584 | hci_leds_update_powered(hdev, false); | |
1585 | ||
1586 | /* Flush RX and TX works */ | |
1587 | flush_work(&hdev->tx_work); | |
1588 | flush_work(&hdev->rx_work); | |
1589 | ||
1590 | if (hdev->discov_timeout > 0) { | |
1591 | hdev->discov_timeout = 0; | |
1592 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); | |
1593 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); | |
1594 | } | |
1595 | ||
1596 | if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) | |
1597 | cancel_delayed_work(&hdev->service_cache); | |
1598 | ||
1599 | if (hci_dev_test_flag(hdev, HCI_MGMT)) | |
1600 | cancel_delayed_work_sync(&hdev->rpa_expired); | |
1601 | ||
1602 | /* Avoid potential lockdep warnings from the *_flush() calls by | |
1603 | * ensuring the workqueue is empty up front. | |
1604 | */ | |
1605 | drain_workqueue(hdev->workqueue); | |
1606 | ||
1607 | hci_dev_lock(hdev); | |
1608 | ||
1609 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | |
1610 | ||
1611 | auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); | |
1612 | ||
1613 | if (!auto_off && hdev->dev_type == HCI_PRIMARY && | |
1614 | !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && | |
1615 | hci_dev_test_flag(hdev, HCI_MGMT)) | |
1616 | __mgmt_power_off(hdev); | |
1617 | ||
1618 | hci_inquiry_cache_flush(hdev); | |
1619 | hci_pend_le_actions_clear(hdev); | |
1620 | hci_conn_hash_flush(hdev); | |
1621 | hci_dev_unlock(hdev); | |
1622 | ||
1623 | smp_unregister(hdev); | |
1624 | ||
1625 | hci_sock_dev_event(hdev, HCI_DEV_DOWN); | |
1626 | ||
1627 | if (hdev->flush) | |
1628 | hdev->flush(hdev); | |
1629 | ||
1630 | /* Reset device */ | |
1631 | skb_queue_purge(&hdev->cmd_q); | |
1632 | atomic_set(&hdev->cmd_cnt, 1); | |
1633 | if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && | |
1634 | !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
1635 | set_bit(HCI_INIT, &hdev->flags); | |
1636 | __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT, NULL); | |
1637 | clear_bit(HCI_INIT, &hdev->flags); | |
1638 | } | |
1639 | ||
1640 | /* flush cmd work */ | |
1641 | flush_work(&hdev->cmd_work); | |
1642 | ||
1643 | /* Drop queues */ | |
1644 | skb_queue_purge(&hdev->rx_q); | |
1645 | skb_queue_purge(&hdev->cmd_q); | |
1646 | skb_queue_purge(&hdev->raw_q); | |
1647 | ||
1648 | /* Drop last sent command */ | |
1649 | if (hdev->sent_cmd) { | |
1650 | cancel_delayed_work_sync(&hdev->cmd_timer); | |
1651 | kfree_skb(hdev->sent_cmd); | |
1652 | hdev->sent_cmd = NULL; | |
1653 | } | |
1654 | ||
1655 | clear_bit(HCI_RUNNING, &hdev->flags); | |
1656 | hci_sock_dev_event(hdev, HCI_DEV_CLOSE); | |
1657 | ||
1658 | /* After this point our queues are empty | |
1659 | * and no tasks are scheduled. */ | |
1660 | hdev->close(hdev); | |
1661 | ||
1662 | /* Clear flags */ | |
1663 | hdev->flags &= BIT(HCI_RAW); | |
1664 | hci_dev_clear_volatile_flags(hdev); | |
1665 | ||
1666 | /* Controller radio is available but is currently powered down */ | |
1667 | hdev->amp_status = AMP_STATUS_POWERED_DOWN; | |
1668 | ||
1669 | memset(hdev->eir, 0, sizeof(hdev->eir)); | |
1670 | memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); | |
1671 | bacpy(&hdev->random_addr, BDADDR_ANY); | |
1672 | ||
1673 | hci_req_sync_unlock(hdev); | |
1674 | ||
1675 | hci_dev_put(hdev); | |
1676 | return 0; | |
1677 | } | |
1678 | ||
1679 | int hci_dev_close(__u16 dev) | |
1680 | { | |
1681 | struct hci_dev *hdev; | |
1682 | int err; | |
1683 | ||
1684 | hdev = hci_dev_get(dev); | |
1685 | if (!hdev) | |
1686 | return -ENODEV; | |
1687 | ||
1688 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1689 | err = -EBUSY; | |
1690 | goto done; | |
1691 | } | |
1692 | ||
1693 | if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) | |
1694 | cancel_delayed_work(&hdev->power_off); | |
1695 | ||
1696 | err = hci_dev_do_close(hdev); | |
1697 | ||
1698 | done: | |
1699 | hci_dev_put(hdev); | |
1700 | return err; | |
1701 | } | |
1702 | ||
1703 | static int hci_dev_do_reset(struct hci_dev *hdev) | |
1704 | { | |
1705 | int ret; | |
1706 | ||
1707 | BT_DBG("%s %p", hdev->name, hdev); | |
1708 | ||
1709 | hci_req_sync_lock(hdev); | |
1710 | ||
1711 | /* Drop queues */ | |
1712 | skb_queue_purge(&hdev->rx_q); | |
1713 | skb_queue_purge(&hdev->cmd_q); | |
1714 | ||
1715 | /* Avoid potential lockdep warnings from the *_flush() calls by | |
1716 | * ensuring the workqueue is empty up front. | |
1717 | */ | |
1718 | drain_workqueue(hdev->workqueue); | |
1719 | ||
1720 | hci_dev_lock(hdev); | |
1721 | hci_inquiry_cache_flush(hdev); | |
1722 | hci_conn_hash_flush(hdev); | |
1723 | hci_dev_unlock(hdev); | |
1724 | ||
1725 | if (hdev->flush) | |
1726 | hdev->flush(hdev); | |
1727 | ||
1728 | atomic_set(&hdev->cmd_cnt, 1); | |
1729 | hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0; | |
1730 | ||
1731 | ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT, NULL); | |
1732 | ||
1733 | hci_req_sync_unlock(hdev); | |
1734 | return ret; | |
1735 | } | |
1736 | ||
1737 | int hci_dev_reset(__u16 dev) | |
1738 | { | |
1739 | struct hci_dev *hdev; | |
1740 | int err; | |
1741 | ||
1742 | hdev = hci_dev_get(dev); | |
1743 | if (!hdev) | |
1744 | return -ENODEV; | |
1745 | ||
1746 | if (!test_bit(HCI_UP, &hdev->flags)) { | |
1747 | err = -ENETDOWN; | |
1748 | goto done; | |
1749 | } | |
1750 | ||
1751 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1752 | err = -EBUSY; | |
1753 | goto done; | |
1754 | } | |
1755 | ||
1756 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
1757 | err = -EOPNOTSUPP; | |
1758 | goto done; | |
1759 | } | |
1760 | ||
1761 | err = hci_dev_do_reset(hdev); | |
1762 | ||
1763 | done: | |
1764 | hci_dev_put(hdev); | |
1765 | return err; | |
1766 | } | |
1767 | ||
1768 | int hci_dev_reset_stat(__u16 dev) | |
1769 | { | |
1770 | struct hci_dev *hdev; | |
1771 | int ret = 0; | |
1772 | ||
1773 | hdev = hci_dev_get(dev); | |
1774 | if (!hdev) | |
1775 | return -ENODEV; | |
1776 | ||
1777 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1778 | ret = -EBUSY; | |
1779 | goto done; | |
1780 | } | |
1781 | ||
1782 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
1783 | ret = -EOPNOTSUPP; | |
1784 | goto done; | |
1785 | } | |
1786 | ||
1787 | memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); | |
1788 | ||
1789 | done: | |
1790 | hci_dev_put(hdev); | |
1791 | return ret; | |
1792 | } | |
1793 | ||
1794 | static void hci_update_scan_state(struct hci_dev *hdev, u8 scan) | |
1795 | { | |
1796 | bool conn_changed, discov_changed; | |
1797 | ||
1798 | BT_DBG("%s scan 0x%02x", hdev->name, scan); | |
1799 | ||
1800 | if ((scan & SCAN_PAGE)) | |
1801 | conn_changed = !hci_dev_test_and_set_flag(hdev, | |
1802 | HCI_CONNECTABLE); | |
1803 | else | |
1804 | conn_changed = hci_dev_test_and_clear_flag(hdev, | |
1805 | HCI_CONNECTABLE); | |
1806 | ||
1807 | if ((scan & SCAN_INQUIRY)) { | |
1808 | discov_changed = !hci_dev_test_and_set_flag(hdev, | |
1809 | HCI_DISCOVERABLE); | |
1810 | } else { | |
1811 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); | |
1812 | discov_changed = hci_dev_test_and_clear_flag(hdev, | |
1813 | HCI_DISCOVERABLE); | |
1814 | } | |
1815 | ||
1816 | if (!hci_dev_test_flag(hdev, HCI_MGMT)) | |
1817 | return; | |
1818 | ||
1819 | if (conn_changed || discov_changed) { | |
1820 | /* In case this was disabled through mgmt */ | |
1821 | hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); | |
1822 | ||
1823 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | |
1824 | hci_req_update_adv_data(hdev, hdev->cur_adv_instance); | |
1825 | ||
1826 | mgmt_new_settings(hdev); | |
1827 | } | |
1828 | } | |
1829 | ||
1830 | int hci_dev_cmd(unsigned int cmd, void __user *arg) | |
1831 | { | |
1832 | struct hci_dev *hdev; | |
1833 | struct hci_dev_req dr; | |
1834 | int err = 0; | |
1835 | ||
1836 | if (copy_from_user(&dr, arg, sizeof(dr))) | |
1837 | return -EFAULT; | |
1838 | ||
1839 | hdev = hci_dev_get(dr.dev_id); | |
1840 | if (!hdev) | |
1841 | return -ENODEV; | |
1842 | ||
1843 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
1844 | err = -EBUSY; | |
1845 | goto done; | |
1846 | } | |
1847 | ||
1848 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
1849 | err = -EOPNOTSUPP; | |
1850 | goto done; | |
1851 | } | |
1852 | ||
1853 | if (hdev->dev_type != HCI_PRIMARY) { | |
1854 | err = -EOPNOTSUPP; | |
1855 | goto done; | |
1856 | } | |
1857 | ||
1858 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { | |
1859 | err = -EOPNOTSUPP; | |
1860 | goto done; | |
1861 | } | |
1862 | ||
1863 | switch (cmd) { | |
1864 | case HCISETAUTH: | |
1865 | err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, | |
1866 | HCI_INIT_TIMEOUT, NULL); | |
1867 | break; | |
1868 | ||
1869 | case HCISETENCRYPT: | |
1870 | if (!lmp_encrypt_capable(hdev)) { | |
1871 | err = -EOPNOTSUPP; | |
1872 | break; | |
1873 | } | |
1874 | ||
1875 | if (!test_bit(HCI_AUTH, &hdev->flags)) { | |
1876 | /* Auth must be enabled first */ | |
1877 | err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, | |
1878 | HCI_INIT_TIMEOUT, NULL); | |
1879 | if (err) | |
1880 | break; | |
1881 | } | |
1882 | ||
1883 | err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, | |
1884 | HCI_INIT_TIMEOUT, NULL); | |
1885 | break; | |
1886 | ||
1887 | case HCISETSCAN: | |
1888 | err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, | |
1889 | HCI_INIT_TIMEOUT, NULL); | |
1890 | ||
1891 | /* Ensure that the connectable and discoverable states | |
1892 | * get correctly modified as this was a non-mgmt change. | |
1893 | */ | |
1894 | if (!err) | |
1895 | hci_update_scan_state(hdev, dr.dev_opt); | |
1896 | break; | |
1897 | ||
1898 | case HCISETLINKPOL: | |
1899 | err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, | |
1900 | HCI_INIT_TIMEOUT, NULL); | |
1901 | break; | |
1902 | ||
1903 | case HCISETLINKMODE: | |
1904 | hdev->link_mode = ((__u16) dr.dev_opt) & | |
1905 | (HCI_LM_MASTER | HCI_LM_ACCEPT); | |
1906 | break; | |
1907 | ||
1908 | case HCISETPTYPE: | |
1909 | hdev->pkt_type = (__u16) dr.dev_opt; | |
1910 | break; | |
1911 | ||
1912 | case HCISETACLMTU: | |
1913 | hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); | |
1914 | hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); | |
1915 | break; | |
1916 | ||
1917 | case HCISETSCOMTU: | |
1918 | hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); | |
1919 | hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); | |
1920 | break; | |
1921 | ||
1922 | default: | |
1923 | err = -EINVAL; | |
1924 | break; | |
1925 | } | |
1926 | ||
1927 | done: | |
1928 | hci_dev_put(hdev); | |
1929 | return err; | |
1930 | } | |
1931 | ||
1932 | int hci_get_dev_list(void __user *arg) | |
1933 | { | |
1934 | struct hci_dev *hdev; | |
1935 | struct hci_dev_list_req *dl; | |
1936 | struct hci_dev_req *dr; | |
1937 | int n = 0, size, err; | |
1938 | __u16 dev_num; | |
1939 | ||
1940 | if (get_user(dev_num, (__u16 __user *) arg)) | |
1941 | return -EFAULT; | |
1942 | ||
1943 | if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) | |
1944 | return -EINVAL; | |
1945 | ||
1946 | size = sizeof(*dl) + dev_num * sizeof(*dr); | |
1947 | ||
1948 | dl = kzalloc(size, GFP_KERNEL); | |
1949 | if (!dl) | |
1950 | return -ENOMEM; | |
1951 | ||
1952 | dr = dl->dev_req; | |
1953 | ||
1954 | read_lock(&hci_dev_list_lock); | |
1955 | list_for_each_entry(hdev, &hci_dev_list, list) { | |
1956 | unsigned long flags = hdev->flags; | |
1957 | ||
1958 | /* When the auto-off is configured it means the transport | |
1959 | * is running, but in that case still indicate that the | |
1960 | * device is actually down. | |
1961 | */ | |
1962 | if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) | |
1963 | flags &= ~BIT(HCI_UP); | |
1964 | ||
1965 | (dr + n)->dev_id = hdev->id; | |
1966 | (dr + n)->dev_opt = flags; | |
1967 | ||
1968 | if (++n >= dev_num) | |
1969 | break; | |
1970 | } | |
1971 | read_unlock(&hci_dev_list_lock); | |
1972 | ||
1973 | dl->dev_num = n; | |
1974 | size = sizeof(*dl) + n * sizeof(*dr); | |
1975 | ||
1976 | err = copy_to_user(arg, dl, size); | |
1977 | kfree(dl); | |
1978 | ||
1979 | return err ? -EFAULT : 0; | |
1980 | } | |
1981 | ||
1982 | int hci_get_dev_info(void __user *arg) | |
1983 | { | |
1984 | struct hci_dev *hdev; | |
1985 | struct hci_dev_info di; | |
1986 | unsigned long flags; | |
1987 | int err = 0; | |
1988 | ||
1989 | if (copy_from_user(&di, arg, sizeof(di))) | |
1990 | return -EFAULT; | |
1991 | ||
1992 | hdev = hci_dev_get(di.dev_id); | |
1993 | if (!hdev) | |
1994 | return -ENODEV; | |
1995 | ||
1996 | /* When the auto-off is configured it means the transport | |
1997 | * is running, but in that case still indicate that the | |
1998 | * device is actually down. | |
1999 | */ | |
2000 | if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) | |
2001 | flags = hdev->flags & ~BIT(HCI_UP); | |
2002 | else | |
2003 | flags = hdev->flags; | |
2004 | ||
2005 | strcpy(di.name, hdev->name); | |
2006 | di.bdaddr = hdev->bdaddr; | |
2007 | di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); | |
2008 | di.flags = flags; | |
2009 | di.pkt_type = hdev->pkt_type; | |
2010 | if (lmp_bredr_capable(hdev)) { | |
2011 | di.acl_mtu = hdev->acl_mtu; | |
2012 | di.acl_pkts = hdev->acl_pkts; | |
2013 | di.sco_mtu = hdev->sco_mtu; | |
2014 | di.sco_pkts = hdev->sco_pkts; | |
2015 | } else { | |
2016 | di.acl_mtu = hdev->le_mtu; | |
2017 | di.acl_pkts = hdev->le_pkts; | |
2018 | di.sco_mtu = 0; | |
2019 | di.sco_pkts = 0; | |
2020 | } | |
2021 | di.link_policy = hdev->link_policy; | |
2022 | di.link_mode = hdev->link_mode; | |
2023 | ||
2024 | memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); | |
2025 | memcpy(&di.features, &hdev->features, sizeof(di.features)); | |
2026 | ||
2027 | if (copy_to_user(arg, &di, sizeof(di))) | |
2028 | err = -EFAULT; | |
2029 | ||
2030 | hci_dev_put(hdev); | |
2031 | ||
2032 | return err; | |
2033 | } | |
2034 | ||
2035 | /* ---- Interface to HCI drivers ---- */ | |
2036 | ||
2037 | static int hci_rfkill_set_block(void *data, bool blocked) | |
2038 | { | |
2039 | struct hci_dev *hdev = data; | |
2040 | ||
2041 | BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); | |
2042 | ||
2043 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) | |
2044 | return -EBUSY; | |
2045 | ||
2046 | if (blocked) { | |
2047 | hci_dev_set_flag(hdev, HCI_RFKILLED); | |
2048 | if (!hci_dev_test_flag(hdev, HCI_SETUP) && | |
2049 | !hci_dev_test_flag(hdev, HCI_CONFIG)) | |
2050 | hci_dev_do_close(hdev); | |
2051 | } else { | |
2052 | hci_dev_clear_flag(hdev, HCI_RFKILLED); | |
2053 | } | |
2054 | ||
2055 | return 0; | |
2056 | } | |
2057 | ||
2058 | static const struct rfkill_ops hci_rfkill_ops = { | |
2059 | .set_block = hci_rfkill_set_block, | |
2060 | }; | |
2061 | ||
2062 | static void hci_power_on(struct work_struct *work) | |
2063 | { | |
2064 | struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); | |
2065 | int err; | |
2066 | ||
2067 | BT_DBG("%s", hdev->name); | |
2068 | ||
2069 | if (test_bit(HCI_UP, &hdev->flags) && | |
2070 | hci_dev_test_flag(hdev, HCI_MGMT) && | |
2071 | hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { | |
2072 | cancel_delayed_work(&hdev->power_off); | |
2073 | hci_req_sync_lock(hdev); | |
2074 | err = __hci_req_hci_power_on(hdev); | |
2075 | hci_req_sync_unlock(hdev); | |
2076 | mgmt_power_on(hdev, err); | |
2077 | return; | |
2078 | } | |
2079 | ||
2080 | err = hci_dev_do_open(hdev); | |
2081 | if (err < 0) { | |
2082 | hci_dev_lock(hdev); | |
2083 | mgmt_set_powered_failed(hdev, err); | |
2084 | hci_dev_unlock(hdev); | |
2085 | return; | |
2086 | } | |
2087 | ||
2088 | /* During the HCI setup phase, a few error conditions are | |
2089 | * ignored and they need to be checked now. If they are still | |
2090 | * valid, it is important to turn the device back off. | |
2091 | */ | |
2092 | if (hci_dev_test_flag(hdev, HCI_RFKILLED) || | |
2093 | hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || | |
2094 | (hdev->dev_type == HCI_PRIMARY && | |
2095 | !bacmp(&hdev->bdaddr, BDADDR_ANY) && | |
2096 | !bacmp(&hdev->static_addr, BDADDR_ANY))) { | |
2097 | hci_dev_clear_flag(hdev, HCI_AUTO_OFF); | |
2098 | hci_dev_do_close(hdev); | |
2099 | } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { | |
2100 | queue_delayed_work(hdev->req_workqueue, &hdev->power_off, | |
2101 | HCI_AUTO_OFF_TIMEOUT); | |
2102 | } | |
2103 | ||
2104 | if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { | |
2105 | /* For unconfigured devices, set the HCI_RAW flag | |
2106 | * so that userspace can easily identify them. | |
2107 | */ | |
2108 | if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) | |
2109 | set_bit(HCI_RAW, &hdev->flags); | |
2110 | ||
2111 | /* For fully configured devices, this will send | |
2112 | * the Index Added event. For unconfigured devices, | |
2113 | * it will send Unconfigued Index Added event. | |
2114 | * | |
2115 | * Devices with HCI_QUIRK_RAW_DEVICE are ignored | |
2116 | * and no event will be send. | |
2117 | */ | |
2118 | mgmt_index_added(hdev); | |
2119 | } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { | |
2120 | /* When the controller is now configured, then it | |
2121 | * is important to clear the HCI_RAW flag. | |
2122 | */ | |
2123 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) | |
2124 | clear_bit(HCI_RAW, &hdev->flags); | |
2125 | ||
2126 | /* Powering on the controller with HCI_CONFIG set only | |
2127 | * happens with the transition from unconfigured to | |
2128 | * configured. This will send the Index Added event. | |
2129 | */ | |
2130 | mgmt_index_added(hdev); | |
2131 | } | |
2132 | } | |
2133 | ||
2134 | static void hci_power_off(struct work_struct *work) | |
2135 | { | |
2136 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
2137 | power_off.work); | |
2138 | ||
2139 | BT_DBG("%s", hdev->name); | |
2140 | ||
2141 | hci_dev_do_close(hdev); | |
2142 | } | |
2143 | ||
2144 | static void hci_error_reset(struct work_struct *work) | |
2145 | { | |
2146 | struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); | |
2147 | ||
2148 | BT_DBG("%s", hdev->name); | |
2149 | ||
2150 | if (hdev->hw_error) | |
2151 | hdev->hw_error(hdev, hdev->hw_error_code); | |
2152 | else | |
2153 | BT_ERR("%s hardware error 0x%2.2x", hdev->name, | |
2154 | hdev->hw_error_code); | |
2155 | ||
2156 | if (hci_dev_do_close(hdev)) | |
2157 | return; | |
2158 | ||
2159 | hci_dev_do_open(hdev); | |
2160 | } | |
2161 | ||
2162 | void hci_uuids_clear(struct hci_dev *hdev) | |
2163 | { | |
2164 | struct bt_uuid *uuid, *tmp; | |
2165 | ||
2166 | list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { | |
2167 | list_del(&uuid->list); | |
2168 | kfree(uuid); | |
2169 | } | |
2170 | } | |
2171 | ||
2172 | void hci_link_keys_clear(struct hci_dev *hdev) | |
2173 | { | |
2174 | struct link_key *key; | |
2175 | ||
2176 | list_for_each_entry_rcu(key, &hdev->link_keys, list) { | |
2177 | list_del_rcu(&key->list); | |
2178 | kfree_rcu(key, rcu); | |
2179 | } | |
2180 | } | |
2181 | ||
2182 | void hci_smp_ltks_clear(struct hci_dev *hdev) | |
2183 | { | |
2184 | struct smp_ltk *k; | |
2185 | ||
2186 | list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { | |
2187 | list_del_rcu(&k->list); | |
2188 | kfree_rcu(k, rcu); | |
2189 | } | |
2190 | } | |
2191 | ||
2192 | void hci_smp_irks_clear(struct hci_dev *hdev) | |
2193 | { | |
2194 | struct smp_irk *k; | |
2195 | ||
2196 | list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { | |
2197 | list_del_rcu(&k->list); | |
2198 | kfree_rcu(k, rcu); | |
2199 | } | |
2200 | } | |
2201 | ||
2202 | struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) | |
2203 | { | |
2204 | struct link_key *k; | |
2205 | ||
2206 | rcu_read_lock(); | |
2207 | list_for_each_entry_rcu(k, &hdev->link_keys, list) { | |
2208 | if (bacmp(bdaddr, &k->bdaddr) == 0) { | |
2209 | rcu_read_unlock(); | |
2210 | return k; | |
2211 | } | |
2212 | } | |
2213 | rcu_read_unlock(); | |
2214 | ||
2215 | return NULL; | |
2216 | } | |
2217 | ||
2218 | static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, | |
2219 | u8 key_type, u8 old_key_type) | |
2220 | { | |
2221 | /* Legacy key */ | |
2222 | if (key_type < 0x03) | |
2223 | return true; | |
2224 | ||
2225 | /* Debug keys are insecure so don't store them persistently */ | |
2226 | if (key_type == HCI_LK_DEBUG_COMBINATION) | |
2227 | return false; | |
2228 | ||
2229 | /* Changed combination key and there's no previous one */ | |
2230 | if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) | |
2231 | return false; | |
2232 | ||
2233 | /* Security mode 3 case */ | |
2234 | if (!conn) | |
2235 | return true; | |
2236 | ||
2237 | /* BR/EDR key derived using SC from an LE link */ | |
2238 | if (conn->type == LE_LINK) | |
2239 | return true; | |
2240 | ||
2241 | /* Neither local nor remote side had no-bonding as requirement */ | |
2242 | if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) | |
2243 | return true; | |
2244 | ||
2245 | /* Local side had dedicated bonding as requirement */ | |
2246 | if (conn->auth_type == 0x02 || conn->auth_type == 0x03) | |
2247 | return true; | |
2248 | ||
2249 | /* Remote side had dedicated bonding as requirement */ | |
2250 | if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) | |
2251 | return true; | |
2252 | ||
2253 | /* If none of the above criteria match, then don't store the key | |
2254 | * persistently */ | |
2255 | return false; | |
2256 | } | |
2257 | ||
2258 | static u8 ltk_role(u8 type) | |
2259 | { | |
2260 | if (type == SMP_LTK) | |
2261 | return HCI_ROLE_MASTER; | |
2262 | ||
2263 | return HCI_ROLE_SLAVE; | |
2264 | } | |
2265 | ||
2266 | struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2267 | u8 addr_type, u8 role) | |
2268 | { | |
2269 | struct smp_ltk *k; | |
2270 | ||
2271 | rcu_read_lock(); | |
2272 | list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { | |
2273 | if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) | |
2274 | continue; | |
2275 | ||
2276 | if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { | |
2277 | rcu_read_unlock(); | |
2278 | return k; | |
2279 | } | |
2280 | } | |
2281 | rcu_read_unlock(); | |
2282 | ||
2283 | return NULL; | |
2284 | } | |
2285 | ||
2286 | struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) | |
2287 | { | |
2288 | struct smp_irk *irk; | |
2289 | ||
2290 | rcu_read_lock(); | |
2291 | list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { | |
2292 | if (!bacmp(&irk->rpa, rpa)) { | |
2293 | rcu_read_unlock(); | |
2294 | return irk; | |
2295 | } | |
2296 | } | |
2297 | ||
2298 | list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { | |
2299 | if (smp_irk_matches(hdev, irk->val, rpa)) { | |
2300 | bacpy(&irk->rpa, rpa); | |
2301 | rcu_read_unlock(); | |
2302 | return irk; | |
2303 | } | |
2304 | } | |
2305 | rcu_read_unlock(); | |
2306 | ||
2307 | return NULL; | |
2308 | } | |
2309 | ||
2310 | struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2311 | u8 addr_type) | |
2312 | { | |
2313 | struct smp_irk *irk; | |
2314 | ||
2315 | /* Identity Address must be public or static random */ | |
2316 | if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) | |
2317 | return NULL; | |
2318 | ||
2319 | rcu_read_lock(); | |
2320 | list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { | |
2321 | if (addr_type == irk->addr_type && | |
2322 | bacmp(bdaddr, &irk->bdaddr) == 0) { | |
2323 | rcu_read_unlock(); | |
2324 | return irk; | |
2325 | } | |
2326 | } | |
2327 | rcu_read_unlock(); | |
2328 | ||
2329 | return NULL; | |
2330 | } | |
2331 | ||
2332 | struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, | |
2333 | bdaddr_t *bdaddr, u8 *val, u8 type, | |
2334 | u8 pin_len, bool *persistent) | |
2335 | { | |
2336 | struct link_key *key, *old_key; | |
2337 | u8 old_key_type; | |
2338 | ||
2339 | old_key = hci_find_link_key(hdev, bdaddr); | |
2340 | if (old_key) { | |
2341 | old_key_type = old_key->type; | |
2342 | key = old_key; | |
2343 | } else { | |
2344 | old_key_type = conn ? conn->key_type : 0xff; | |
2345 | key = kzalloc(sizeof(*key), GFP_KERNEL); | |
2346 | if (!key) | |
2347 | return NULL; | |
2348 | list_add_rcu(&key->list, &hdev->link_keys); | |
2349 | } | |
2350 | ||
2351 | BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); | |
2352 | ||
2353 | /* Some buggy controller combinations generate a changed | |
2354 | * combination key for legacy pairing even when there's no | |
2355 | * previous key */ | |
2356 | if (type == HCI_LK_CHANGED_COMBINATION && | |
2357 | (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { | |
2358 | type = HCI_LK_COMBINATION; | |
2359 | if (conn) | |
2360 | conn->key_type = type; | |
2361 | } | |
2362 | ||
2363 | bacpy(&key->bdaddr, bdaddr); | |
2364 | memcpy(key->val, val, HCI_LINK_KEY_SIZE); | |
2365 | key->pin_len = pin_len; | |
2366 | ||
2367 | if (type == HCI_LK_CHANGED_COMBINATION) | |
2368 | key->type = old_key_type; | |
2369 | else | |
2370 | key->type = type; | |
2371 | ||
2372 | if (persistent) | |
2373 | *persistent = hci_persistent_key(hdev, conn, type, | |
2374 | old_key_type); | |
2375 | ||
2376 | return key; | |
2377 | } | |
2378 | ||
2379 | struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2380 | u8 addr_type, u8 type, u8 authenticated, | |
2381 | u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) | |
2382 | { | |
2383 | struct smp_ltk *key, *old_key; | |
2384 | u8 role = ltk_role(type); | |
2385 | ||
2386 | old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); | |
2387 | if (old_key) | |
2388 | key = old_key; | |
2389 | else { | |
2390 | key = kzalloc(sizeof(*key), GFP_KERNEL); | |
2391 | if (!key) | |
2392 | return NULL; | |
2393 | list_add_rcu(&key->list, &hdev->long_term_keys); | |
2394 | } | |
2395 | ||
2396 | bacpy(&key->bdaddr, bdaddr); | |
2397 | key->bdaddr_type = addr_type; | |
2398 | memcpy(key->val, tk, sizeof(key->val)); | |
2399 | key->authenticated = authenticated; | |
2400 | key->ediv = ediv; | |
2401 | key->rand = rand; | |
2402 | key->enc_size = enc_size; | |
2403 | key->type = type; | |
2404 | ||
2405 | return key; | |
2406 | } | |
2407 | ||
2408 | struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2409 | u8 addr_type, u8 val[16], bdaddr_t *rpa) | |
2410 | { | |
2411 | struct smp_irk *irk; | |
2412 | ||
2413 | irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); | |
2414 | if (!irk) { | |
2415 | irk = kzalloc(sizeof(*irk), GFP_KERNEL); | |
2416 | if (!irk) | |
2417 | return NULL; | |
2418 | ||
2419 | bacpy(&irk->bdaddr, bdaddr); | |
2420 | irk->addr_type = addr_type; | |
2421 | ||
2422 | list_add_rcu(&irk->list, &hdev->identity_resolving_keys); | |
2423 | } | |
2424 | ||
2425 | memcpy(irk->val, val, 16); | |
2426 | bacpy(&irk->rpa, rpa); | |
2427 | ||
2428 | return irk; | |
2429 | } | |
2430 | ||
2431 | int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) | |
2432 | { | |
2433 | struct link_key *key; | |
2434 | ||
2435 | key = hci_find_link_key(hdev, bdaddr); | |
2436 | if (!key) | |
2437 | return -ENOENT; | |
2438 | ||
2439 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); | |
2440 | ||
2441 | list_del_rcu(&key->list); | |
2442 | kfree_rcu(key, rcu); | |
2443 | ||
2444 | return 0; | |
2445 | } | |
2446 | ||
2447 | int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) | |
2448 | { | |
2449 | struct smp_ltk *k; | |
2450 | int removed = 0; | |
2451 | ||
2452 | list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { | |
2453 | if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) | |
2454 | continue; | |
2455 | ||
2456 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); | |
2457 | ||
2458 | list_del_rcu(&k->list); | |
2459 | kfree_rcu(k, rcu); | |
2460 | removed++; | |
2461 | } | |
2462 | ||
2463 | return removed ? 0 : -ENOENT; | |
2464 | } | |
2465 | ||
2466 | void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) | |
2467 | { | |
2468 | struct smp_irk *k; | |
2469 | ||
2470 | list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) { | |
2471 | if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) | |
2472 | continue; | |
2473 | ||
2474 | BT_DBG("%s removing %pMR", hdev->name, bdaddr); | |
2475 | ||
2476 | list_del_rcu(&k->list); | |
2477 | kfree_rcu(k, rcu); | |
2478 | } | |
2479 | } | |
2480 | ||
2481 | bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) | |
2482 | { | |
2483 | struct smp_ltk *k; | |
2484 | struct smp_irk *irk; | |
2485 | u8 addr_type; | |
2486 | ||
2487 | if (type == BDADDR_BREDR) { | |
2488 | if (hci_find_link_key(hdev, bdaddr)) | |
2489 | return true; | |
2490 | return false; | |
2491 | } | |
2492 | ||
2493 | /* Convert to HCI addr type which struct smp_ltk uses */ | |
2494 | if (type == BDADDR_LE_PUBLIC) | |
2495 | addr_type = ADDR_LE_DEV_PUBLIC; | |
2496 | else | |
2497 | addr_type = ADDR_LE_DEV_RANDOM; | |
2498 | ||
2499 | irk = hci_get_irk(hdev, bdaddr, addr_type); | |
2500 | if (irk) { | |
2501 | bdaddr = &irk->bdaddr; | |
2502 | addr_type = irk->addr_type; | |
2503 | } | |
2504 | ||
2505 | rcu_read_lock(); | |
2506 | list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { | |
2507 | if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { | |
2508 | rcu_read_unlock(); | |
2509 | return true; | |
2510 | } | |
2511 | } | |
2512 | rcu_read_unlock(); | |
2513 | ||
2514 | return false; | |
2515 | } | |
2516 | ||
2517 | /* HCI command timer function */ | |
2518 | static void hci_cmd_timeout(struct work_struct *work) | |
2519 | { | |
2520 | struct hci_dev *hdev = container_of(work, struct hci_dev, | |
2521 | cmd_timer.work); | |
2522 | ||
2523 | if (hdev->sent_cmd) { | |
2524 | struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; | |
2525 | u16 opcode = __le16_to_cpu(sent->opcode); | |
2526 | ||
2527 | BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode); | |
2528 | } else { | |
2529 | BT_ERR("%s command tx timeout", hdev->name); | |
2530 | } | |
2531 | ||
2532 | atomic_set(&hdev->cmd_cnt, 1); | |
2533 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
2534 | } | |
2535 | ||
2536 | struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, | |
2537 | bdaddr_t *bdaddr, u8 bdaddr_type) | |
2538 | { | |
2539 | struct oob_data *data; | |
2540 | ||
2541 | list_for_each_entry(data, &hdev->remote_oob_data, list) { | |
2542 | if (bacmp(bdaddr, &data->bdaddr) != 0) | |
2543 | continue; | |
2544 | if (data->bdaddr_type != bdaddr_type) | |
2545 | continue; | |
2546 | return data; | |
2547 | } | |
2548 | ||
2549 | return NULL; | |
2550 | } | |
2551 | ||
2552 | int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2553 | u8 bdaddr_type) | |
2554 | { | |
2555 | struct oob_data *data; | |
2556 | ||
2557 | data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); | |
2558 | if (!data) | |
2559 | return -ENOENT; | |
2560 | ||
2561 | BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); | |
2562 | ||
2563 | list_del(&data->list); | |
2564 | kfree(data); | |
2565 | ||
2566 | return 0; | |
2567 | } | |
2568 | ||
2569 | void hci_remote_oob_data_clear(struct hci_dev *hdev) | |
2570 | { | |
2571 | struct oob_data *data, *n; | |
2572 | ||
2573 | list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { | |
2574 | list_del(&data->list); | |
2575 | kfree(data); | |
2576 | } | |
2577 | } | |
2578 | ||
2579 | int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2580 | u8 bdaddr_type, u8 *hash192, u8 *rand192, | |
2581 | u8 *hash256, u8 *rand256) | |
2582 | { | |
2583 | struct oob_data *data; | |
2584 | ||
2585 | data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); | |
2586 | if (!data) { | |
2587 | data = kmalloc(sizeof(*data), GFP_KERNEL); | |
2588 | if (!data) | |
2589 | return -ENOMEM; | |
2590 | ||
2591 | bacpy(&data->bdaddr, bdaddr); | |
2592 | data->bdaddr_type = bdaddr_type; | |
2593 | list_add(&data->list, &hdev->remote_oob_data); | |
2594 | } | |
2595 | ||
2596 | if (hash192 && rand192) { | |
2597 | memcpy(data->hash192, hash192, sizeof(data->hash192)); | |
2598 | memcpy(data->rand192, rand192, sizeof(data->rand192)); | |
2599 | if (hash256 && rand256) | |
2600 | data->present = 0x03; | |
2601 | } else { | |
2602 | memset(data->hash192, 0, sizeof(data->hash192)); | |
2603 | memset(data->rand192, 0, sizeof(data->rand192)); | |
2604 | if (hash256 && rand256) | |
2605 | data->present = 0x02; | |
2606 | else | |
2607 | data->present = 0x00; | |
2608 | } | |
2609 | ||
2610 | if (hash256 && rand256) { | |
2611 | memcpy(data->hash256, hash256, sizeof(data->hash256)); | |
2612 | memcpy(data->rand256, rand256, sizeof(data->rand256)); | |
2613 | } else { | |
2614 | memset(data->hash256, 0, sizeof(data->hash256)); | |
2615 | memset(data->rand256, 0, sizeof(data->rand256)); | |
2616 | if (hash192 && rand192) | |
2617 | data->present = 0x01; | |
2618 | } | |
2619 | ||
2620 | BT_DBG("%s for %pMR", hdev->name, bdaddr); | |
2621 | ||
2622 | return 0; | |
2623 | } | |
2624 | ||
2625 | /* This function requires the caller holds hdev->lock */ | |
2626 | struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) | |
2627 | { | |
2628 | struct adv_info *adv_instance; | |
2629 | ||
2630 | list_for_each_entry(adv_instance, &hdev->adv_instances, list) { | |
2631 | if (adv_instance->instance == instance) | |
2632 | return adv_instance; | |
2633 | } | |
2634 | ||
2635 | return NULL; | |
2636 | } | |
2637 | ||
2638 | /* This function requires the caller holds hdev->lock */ | |
2639 | struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) | |
2640 | { | |
2641 | struct adv_info *cur_instance; | |
2642 | ||
2643 | cur_instance = hci_find_adv_instance(hdev, instance); | |
2644 | if (!cur_instance) | |
2645 | return NULL; | |
2646 | ||
2647 | if (cur_instance == list_last_entry(&hdev->adv_instances, | |
2648 | struct adv_info, list)) | |
2649 | return list_first_entry(&hdev->adv_instances, | |
2650 | struct adv_info, list); | |
2651 | else | |
2652 | return list_next_entry(cur_instance, list); | |
2653 | } | |
2654 | ||
2655 | /* This function requires the caller holds hdev->lock */ | |
2656 | int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) | |
2657 | { | |
2658 | struct adv_info *adv_instance; | |
2659 | ||
2660 | adv_instance = hci_find_adv_instance(hdev, instance); | |
2661 | if (!adv_instance) | |
2662 | return -ENOENT; | |
2663 | ||
2664 | BT_DBG("%s removing %dMR", hdev->name, instance); | |
2665 | ||
2666 | if (hdev->cur_adv_instance == instance) { | |
2667 | if (hdev->adv_instance_timeout) { | |
2668 | cancel_delayed_work(&hdev->adv_instance_expire); | |
2669 | hdev->adv_instance_timeout = 0; | |
2670 | } | |
2671 | hdev->cur_adv_instance = 0x00; | |
2672 | } | |
2673 | ||
2674 | list_del(&adv_instance->list); | |
2675 | kfree(adv_instance); | |
2676 | ||
2677 | hdev->adv_instance_cnt--; | |
2678 | ||
2679 | return 0; | |
2680 | } | |
2681 | ||
2682 | /* This function requires the caller holds hdev->lock */ | |
2683 | void hci_adv_instances_clear(struct hci_dev *hdev) | |
2684 | { | |
2685 | struct adv_info *adv_instance, *n; | |
2686 | ||
2687 | if (hdev->adv_instance_timeout) { | |
2688 | cancel_delayed_work(&hdev->adv_instance_expire); | |
2689 | hdev->adv_instance_timeout = 0; | |
2690 | } | |
2691 | ||
2692 | list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { | |
2693 | list_del(&adv_instance->list); | |
2694 | kfree(adv_instance); | |
2695 | } | |
2696 | ||
2697 | hdev->adv_instance_cnt = 0; | |
2698 | hdev->cur_adv_instance = 0x00; | |
2699 | } | |
2700 | ||
2701 | /* This function requires the caller holds hdev->lock */ | |
2702 | int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags, | |
2703 | u16 adv_data_len, u8 *adv_data, | |
2704 | u16 scan_rsp_len, u8 *scan_rsp_data, | |
2705 | u16 timeout, u16 duration) | |
2706 | { | |
2707 | struct adv_info *adv_instance; | |
2708 | ||
2709 | adv_instance = hci_find_adv_instance(hdev, instance); | |
2710 | if (adv_instance) { | |
2711 | memset(adv_instance->adv_data, 0, | |
2712 | sizeof(adv_instance->adv_data)); | |
2713 | memset(adv_instance->scan_rsp_data, 0, | |
2714 | sizeof(adv_instance->scan_rsp_data)); | |
2715 | } else { | |
2716 | if (hdev->adv_instance_cnt >= HCI_MAX_ADV_INSTANCES || | |
2717 | instance < 1 || instance > HCI_MAX_ADV_INSTANCES) | |
2718 | return -EOVERFLOW; | |
2719 | ||
2720 | adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL); | |
2721 | if (!adv_instance) | |
2722 | return -ENOMEM; | |
2723 | ||
2724 | adv_instance->pending = true; | |
2725 | adv_instance->instance = instance; | |
2726 | list_add(&adv_instance->list, &hdev->adv_instances); | |
2727 | hdev->adv_instance_cnt++; | |
2728 | } | |
2729 | ||
2730 | adv_instance->flags = flags; | |
2731 | adv_instance->adv_data_len = adv_data_len; | |
2732 | adv_instance->scan_rsp_len = scan_rsp_len; | |
2733 | ||
2734 | if (adv_data_len) | |
2735 | memcpy(adv_instance->adv_data, adv_data, adv_data_len); | |
2736 | ||
2737 | if (scan_rsp_len) | |
2738 | memcpy(adv_instance->scan_rsp_data, | |
2739 | scan_rsp_data, scan_rsp_len); | |
2740 | ||
2741 | adv_instance->timeout = timeout; | |
2742 | adv_instance->remaining_time = timeout; | |
2743 | ||
2744 | if (duration == 0) | |
2745 | adv_instance->duration = HCI_DEFAULT_ADV_DURATION; | |
2746 | else | |
2747 | adv_instance->duration = duration; | |
2748 | ||
2749 | BT_DBG("%s for %dMR", hdev->name, instance); | |
2750 | ||
2751 | return 0; | |
2752 | } | |
2753 | ||
2754 | struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, | |
2755 | bdaddr_t *bdaddr, u8 type) | |
2756 | { | |
2757 | struct bdaddr_list *b; | |
2758 | ||
2759 | list_for_each_entry(b, bdaddr_list, list) { | |
2760 | if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) | |
2761 | return b; | |
2762 | } | |
2763 | ||
2764 | return NULL; | |
2765 | } | |
2766 | ||
2767 | void hci_bdaddr_list_clear(struct list_head *bdaddr_list) | |
2768 | { | |
2769 | struct bdaddr_list *b, *n; | |
2770 | ||
2771 | list_for_each_entry_safe(b, n, bdaddr_list, list) { | |
2772 | list_del(&b->list); | |
2773 | kfree(b); | |
2774 | } | |
2775 | } | |
2776 | ||
2777 | int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) | |
2778 | { | |
2779 | struct bdaddr_list *entry; | |
2780 | ||
2781 | if (!bacmp(bdaddr, BDADDR_ANY)) | |
2782 | return -EBADF; | |
2783 | ||
2784 | if (hci_bdaddr_list_lookup(list, bdaddr, type)) | |
2785 | return -EEXIST; | |
2786 | ||
2787 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); | |
2788 | if (!entry) | |
2789 | return -ENOMEM; | |
2790 | ||
2791 | bacpy(&entry->bdaddr, bdaddr); | |
2792 | entry->bdaddr_type = type; | |
2793 | ||
2794 | list_add(&entry->list, list); | |
2795 | ||
2796 | return 0; | |
2797 | } | |
2798 | ||
2799 | int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) | |
2800 | { | |
2801 | struct bdaddr_list *entry; | |
2802 | ||
2803 | if (!bacmp(bdaddr, BDADDR_ANY)) { | |
2804 | hci_bdaddr_list_clear(list); | |
2805 | return 0; | |
2806 | } | |
2807 | ||
2808 | entry = hci_bdaddr_list_lookup(list, bdaddr, type); | |
2809 | if (!entry) | |
2810 | return -ENOENT; | |
2811 | ||
2812 | list_del(&entry->list); | |
2813 | kfree(entry); | |
2814 | ||
2815 | return 0; | |
2816 | } | |
2817 | ||
2818 | /* This function requires the caller holds hdev->lock */ | |
2819 | struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, | |
2820 | bdaddr_t *addr, u8 addr_type) | |
2821 | { | |
2822 | struct hci_conn_params *params; | |
2823 | ||
2824 | list_for_each_entry(params, &hdev->le_conn_params, list) { | |
2825 | if (bacmp(¶ms->addr, addr) == 0 && | |
2826 | params->addr_type == addr_type) { | |
2827 | return params; | |
2828 | } | |
2829 | } | |
2830 | ||
2831 | return NULL; | |
2832 | } | |
2833 | ||
2834 | /* This function requires the caller holds hdev->lock */ | |
2835 | struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, | |
2836 | bdaddr_t *addr, u8 addr_type) | |
2837 | { | |
2838 | struct hci_conn_params *param; | |
2839 | ||
2840 | list_for_each_entry(param, list, action) { | |
2841 | if (bacmp(¶m->addr, addr) == 0 && | |
2842 | param->addr_type == addr_type) | |
2843 | return param; | |
2844 | } | |
2845 | ||
2846 | return NULL; | |
2847 | } | |
2848 | ||
2849 | /* This function requires the caller holds hdev->lock */ | |
2850 | struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, | |
2851 | bdaddr_t *addr, u8 addr_type) | |
2852 | { | |
2853 | struct hci_conn_params *params; | |
2854 | ||
2855 | params = hci_conn_params_lookup(hdev, addr, addr_type); | |
2856 | if (params) | |
2857 | return params; | |
2858 | ||
2859 | params = kzalloc(sizeof(*params), GFP_KERNEL); | |
2860 | if (!params) { | |
2861 | BT_ERR("Out of memory"); | |
2862 | return NULL; | |
2863 | } | |
2864 | ||
2865 | bacpy(¶ms->addr, addr); | |
2866 | params->addr_type = addr_type; | |
2867 | ||
2868 | list_add(¶ms->list, &hdev->le_conn_params); | |
2869 | INIT_LIST_HEAD(¶ms->action); | |
2870 | ||
2871 | params->conn_min_interval = hdev->le_conn_min_interval; | |
2872 | params->conn_max_interval = hdev->le_conn_max_interval; | |
2873 | params->conn_latency = hdev->le_conn_latency; | |
2874 | params->supervision_timeout = hdev->le_supv_timeout; | |
2875 | params->auto_connect = HCI_AUTO_CONN_DISABLED; | |
2876 | ||
2877 | BT_DBG("addr %pMR (type %u)", addr, addr_type); | |
2878 | ||
2879 | return params; | |
2880 | } | |
2881 | ||
2882 | static void hci_conn_params_free(struct hci_conn_params *params) | |
2883 | { | |
2884 | if (params->conn) { | |
2885 | hci_conn_drop(params->conn); | |
2886 | hci_conn_put(params->conn); | |
2887 | } | |
2888 | ||
2889 | list_del(¶ms->action); | |
2890 | list_del(¶ms->list); | |
2891 | kfree(params); | |
2892 | } | |
2893 | ||
2894 | /* This function requires the caller holds hdev->lock */ | |
2895 | void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) | |
2896 | { | |
2897 | struct hci_conn_params *params; | |
2898 | ||
2899 | params = hci_conn_params_lookup(hdev, addr, addr_type); | |
2900 | if (!params) | |
2901 | return; | |
2902 | ||
2903 | hci_conn_params_free(params); | |
2904 | ||
2905 | hci_update_background_scan(hdev); | |
2906 | ||
2907 | BT_DBG("addr %pMR (type %u)", addr, addr_type); | |
2908 | } | |
2909 | ||
2910 | /* This function requires the caller holds hdev->lock */ | |
2911 | void hci_conn_params_clear_disabled(struct hci_dev *hdev) | |
2912 | { | |
2913 | struct hci_conn_params *params, *tmp; | |
2914 | ||
2915 | list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { | |
2916 | if (params->auto_connect != HCI_AUTO_CONN_DISABLED) | |
2917 | continue; | |
2918 | ||
2919 | /* If trying to estabilish one time connection to disabled | |
2920 | * device, leave the params, but mark them as just once. | |
2921 | */ | |
2922 | if (params->explicit_connect) { | |
2923 | params->auto_connect = HCI_AUTO_CONN_EXPLICIT; | |
2924 | continue; | |
2925 | } | |
2926 | ||
2927 | list_del(¶ms->list); | |
2928 | kfree(params); | |
2929 | } | |
2930 | ||
2931 | BT_DBG("All LE disabled connection parameters were removed"); | |
2932 | } | |
2933 | ||
2934 | /* This function requires the caller holds hdev->lock */ | |
2935 | static void hci_conn_params_clear_all(struct hci_dev *hdev) | |
2936 | { | |
2937 | struct hci_conn_params *params, *tmp; | |
2938 | ||
2939 | list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) | |
2940 | hci_conn_params_free(params); | |
2941 | ||
2942 | BT_DBG("All LE connection parameters were removed"); | |
2943 | } | |
2944 | ||
2945 | /* Copy the Identity Address of the controller. | |
2946 | * | |
2947 | * If the controller has a public BD_ADDR, then by default use that one. | |
2948 | * If this is a LE only controller without a public address, default to | |
2949 | * the static random address. | |
2950 | * | |
2951 | * For debugging purposes it is possible to force controllers with a | |
2952 | * public address to use the static random address instead. | |
2953 | * | |
2954 | * In case BR/EDR has been disabled on a dual-mode controller and | |
2955 | * userspace has configured a static address, then that address | |
2956 | * becomes the identity address instead of the public BR/EDR address. | |
2957 | */ | |
2958 | void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, | |
2959 | u8 *bdaddr_type) | |
2960 | { | |
2961 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || | |
2962 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || | |
2963 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && | |
2964 | bacmp(&hdev->static_addr, BDADDR_ANY))) { | |
2965 | bacpy(bdaddr, &hdev->static_addr); | |
2966 | *bdaddr_type = ADDR_LE_DEV_RANDOM; | |
2967 | } else { | |
2968 | bacpy(bdaddr, &hdev->bdaddr); | |
2969 | *bdaddr_type = ADDR_LE_DEV_PUBLIC; | |
2970 | } | |
2971 | } | |
2972 | ||
2973 | /* Alloc HCI device */ | |
2974 | struct hci_dev *hci_alloc_dev(void) | |
2975 | { | |
2976 | struct hci_dev *hdev; | |
2977 | ||
2978 | hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); | |
2979 | if (!hdev) | |
2980 | return NULL; | |
2981 | ||
2982 | hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); | |
2983 | hdev->esco_type = (ESCO_HV1); | |
2984 | hdev->link_mode = (HCI_LM_ACCEPT); | |
2985 | hdev->num_iac = 0x01; /* One IAC support is mandatory */ | |
2986 | hdev->io_capability = 0x03; /* No Input No Output */ | |
2987 | hdev->manufacturer = 0xffff; /* Default to internal use */ | |
2988 | hdev->inq_tx_power = HCI_TX_POWER_INVALID; | |
2989 | hdev->adv_tx_power = HCI_TX_POWER_INVALID; | |
2990 | hdev->adv_instance_cnt = 0; | |
2991 | hdev->cur_adv_instance = 0x00; | |
2992 | hdev->adv_instance_timeout = 0; | |
2993 | ||
2994 | hdev->sniff_max_interval = 800; | |
2995 | hdev->sniff_min_interval = 80; | |
2996 | ||
2997 | hdev->le_adv_channel_map = 0x07; | |
2998 | hdev->le_adv_min_interval = 0x0800; | |
2999 | hdev->le_adv_max_interval = 0x0800; | |
3000 | hdev->le_scan_interval = 0x0060; | |
3001 | hdev->le_scan_window = 0x0030; | |
3002 | hdev->le_conn_min_interval = 0x0018; | |
3003 | hdev->le_conn_max_interval = 0x0028; | |
3004 | hdev->le_conn_latency = 0x0000; | |
3005 | hdev->le_supv_timeout = 0x002a; | |
3006 | hdev->le_def_tx_len = 0x001b; | |
3007 | hdev->le_def_tx_time = 0x0148; | |
3008 | hdev->le_max_tx_len = 0x001b; | |
3009 | hdev->le_max_tx_time = 0x0148; | |
3010 | hdev->le_max_rx_len = 0x001b; | |
3011 | hdev->le_max_rx_time = 0x0148; | |
3012 | ||
3013 | hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; | |
3014 | hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; | |
3015 | hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; | |
3016 | hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; | |
3017 | ||
3018 | mutex_init(&hdev->lock); | |
3019 | mutex_init(&hdev->req_lock); | |
3020 | ||
3021 | INIT_LIST_HEAD(&hdev->mgmt_pending); | |
3022 | INIT_LIST_HEAD(&hdev->blacklist); | |
3023 | INIT_LIST_HEAD(&hdev->whitelist); | |
3024 | INIT_LIST_HEAD(&hdev->uuids); | |
3025 | INIT_LIST_HEAD(&hdev->link_keys); | |
3026 | INIT_LIST_HEAD(&hdev->long_term_keys); | |
3027 | INIT_LIST_HEAD(&hdev->identity_resolving_keys); | |
3028 | INIT_LIST_HEAD(&hdev->remote_oob_data); | |
3029 | INIT_LIST_HEAD(&hdev->le_white_list); | |
3030 | INIT_LIST_HEAD(&hdev->le_conn_params); | |
3031 | INIT_LIST_HEAD(&hdev->pend_le_conns); | |
3032 | INIT_LIST_HEAD(&hdev->pend_le_reports); | |
3033 | INIT_LIST_HEAD(&hdev->conn_hash.list); | |
3034 | INIT_LIST_HEAD(&hdev->adv_instances); | |
3035 | ||
3036 | INIT_WORK(&hdev->rx_work, hci_rx_work); | |
3037 | INIT_WORK(&hdev->cmd_work, hci_cmd_work); | |
3038 | INIT_WORK(&hdev->tx_work, hci_tx_work); | |
3039 | INIT_WORK(&hdev->power_on, hci_power_on); | |
3040 | INIT_WORK(&hdev->error_reset, hci_error_reset); | |
3041 | ||
3042 | INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); | |
3043 | ||
3044 | skb_queue_head_init(&hdev->rx_q); | |
3045 | skb_queue_head_init(&hdev->cmd_q); | |
3046 | skb_queue_head_init(&hdev->raw_q); | |
3047 | ||
3048 | init_waitqueue_head(&hdev->req_wait_q); | |
3049 | ||
3050 | INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); | |
3051 | ||
3052 | hci_request_setup(hdev); | |
3053 | ||
3054 | hci_init_sysfs(hdev); | |
3055 | discovery_init(hdev); | |
3056 | ||
3057 | return hdev; | |
3058 | } | |
3059 | EXPORT_SYMBOL(hci_alloc_dev); | |
3060 | ||
3061 | /* Free HCI device */ | |
3062 | void hci_free_dev(struct hci_dev *hdev) | |
3063 | { | |
3064 | /* will free via device release */ | |
3065 | put_device(&hdev->dev); | |
3066 | } | |
3067 | EXPORT_SYMBOL(hci_free_dev); | |
3068 | ||
3069 | /* Register HCI device */ | |
3070 | int hci_register_dev(struct hci_dev *hdev) | |
3071 | { | |
3072 | int id, error; | |
3073 | ||
3074 | if (!hdev->open || !hdev->close || !hdev->send) | |
3075 | return -EINVAL; | |
3076 | ||
3077 | /* Do not allow HCI_AMP devices to register at index 0, | |
3078 | * so the index can be used as the AMP controller ID. | |
3079 | */ | |
3080 | switch (hdev->dev_type) { | |
3081 | case HCI_PRIMARY: | |
3082 | id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); | |
3083 | break; | |
3084 | case HCI_AMP: | |
3085 | id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); | |
3086 | break; | |
3087 | default: | |
3088 | return -EINVAL; | |
3089 | } | |
3090 | ||
3091 | if (id < 0) | |
3092 | return id; | |
3093 | ||
3094 | sprintf(hdev->name, "hci%d", id); | |
3095 | hdev->id = id; | |
3096 | ||
3097 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | |
3098 | ||
3099 | hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); | |
3100 | if (!hdev->workqueue) { | |
3101 | error = -ENOMEM; | |
3102 | goto err; | |
3103 | } | |
3104 | ||
3105 | hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, | |
3106 | hdev->name); | |
3107 | if (!hdev->req_workqueue) { | |
3108 | destroy_workqueue(hdev->workqueue); | |
3109 | error = -ENOMEM; | |
3110 | goto err; | |
3111 | } | |
3112 | ||
3113 | if (!IS_ERR_OR_NULL(bt_debugfs)) | |
3114 | hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); | |
3115 | ||
3116 | dev_set_name(&hdev->dev, "%s", hdev->name); | |
3117 | ||
3118 | error = device_add(&hdev->dev); | |
3119 | if (error < 0) | |
3120 | goto err_wqueue; | |
3121 | ||
3122 | hci_leds_init(hdev); | |
3123 | ||
3124 | hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, | |
3125 | RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, | |
3126 | hdev); | |
3127 | if (hdev->rfkill) { | |
3128 | if (rfkill_register(hdev->rfkill) < 0) { | |
3129 | rfkill_destroy(hdev->rfkill); | |
3130 | hdev->rfkill = NULL; | |
3131 | } | |
3132 | } | |
3133 | ||
3134 | if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) | |
3135 | hci_dev_set_flag(hdev, HCI_RFKILLED); | |
3136 | ||
3137 | hci_dev_set_flag(hdev, HCI_SETUP); | |
3138 | hci_dev_set_flag(hdev, HCI_AUTO_OFF); | |
3139 | ||
3140 | if (hdev->dev_type == HCI_PRIMARY) { | |
3141 | /* Assume BR/EDR support until proven otherwise (such as | |
3142 | * through reading supported features during init. | |
3143 | */ | |
3144 | hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); | |
3145 | } | |
3146 | ||
3147 | write_lock(&hci_dev_list_lock); | |
3148 | list_add(&hdev->list, &hci_dev_list); | |
3149 | write_unlock(&hci_dev_list_lock); | |
3150 | ||
3151 | /* Devices that are marked for raw-only usage are unconfigured | |
3152 | * and should not be included in normal operation. | |
3153 | */ | |
3154 | if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) | |
3155 | hci_dev_set_flag(hdev, HCI_UNCONFIGURED); | |
3156 | ||
3157 | hci_sock_dev_event(hdev, HCI_DEV_REG); | |
3158 | hci_dev_hold(hdev); | |
3159 | ||
3160 | queue_work(hdev->req_workqueue, &hdev->power_on); | |
3161 | ||
3162 | return id; | |
3163 | ||
3164 | err_wqueue: | |
3165 | destroy_workqueue(hdev->workqueue); | |
3166 | destroy_workqueue(hdev->req_workqueue); | |
3167 | err: | |
3168 | ida_simple_remove(&hci_index_ida, hdev->id); | |
3169 | ||
3170 | return error; | |
3171 | } | |
3172 | EXPORT_SYMBOL(hci_register_dev); | |
3173 | ||
3174 | /* Unregister HCI device */ | |
3175 | void hci_unregister_dev(struct hci_dev *hdev) | |
3176 | { | |
3177 | int id; | |
3178 | ||
3179 | BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); | |
3180 | ||
3181 | hci_dev_set_flag(hdev, HCI_UNREGISTER); | |
3182 | ||
3183 | id = hdev->id; | |
3184 | ||
3185 | write_lock(&hci_dev_list_lock); | |
3186 | list_del(&hdev->list); | |
3187 | write_unlock(&hci_dev_list_lock); | |
3188 | ||
3189 | cancel_work_sync(&hdev->power_on); | |
3190 | ||
3191 | hci_dev_do_close(hdev); | |
3192 | ||
3193 | if (!test_bit(HCI_INIT, &hdev->flags) && | |
3194 | !hci_dev_test_flag(hdev, HCI_SETUP) && | |
3195 | !hci_dev_test_flag(hdev, HCI_CONFIG)) { | |
3196 | hci_dev_lock(hdev); | |
3197 | mgmt_index_removed(hdev); | |
3198 | hci_dev_unlock(hdev); | |
3199 | } | |
3200 | ||
3201 | /* mgmt_index_removed should take care of emptying the | |
3202 | * pending list */ | |
3203 | BUG_ON(!list_empty(&hdev->mgmt_pending)); | |
3204 | ||
3205 | hci_sock_dev_event(hdev, HCI_DEV_UNREG); | |
3206 | ||
3207 | if (hdev->rfkill) { | |
3208 | rfkill_unregister(hdev->rfkill); | |
3209 | rfkill_destroy(hdev->rfkill); | |
3210 | } | |
3211 | ||
3212 | device_del(&hdev->dev); | |
3213 | ||
3214 | debugfs_remove_recursive(hdev->debugfs); | |
3215 | kfree_const(hdev->hw_info); | |
3216 | kfree_const(hdev->fw_info); | |
3217 | ||
3218 | destroy_workqueue(hdev->workqueue); | |
3219 | destroy_workqueue(hdev->req_workqueue); | |
3220 | ||
3221 | hci_dev_lock(hdev); | |
3222 | hci_bdaddr_list_clear(&hdev->blacklist); | |
3223 | hci_bdaddr_list_clear(&hdev->whitelist); | |
3224 | hci_uuids_clear(hdev); | |
3225 | hci_link_keys_clear(hdev); | |
3226 | hci_smp_ltks_clear(hdev); | |
3227 | hci_smp_irks_clear(hdev); | |
3228 | hci_remote_oob_data_clear(hdev); | |
3229 | hci_adv_instances_clear(hdev); | |
3230 | hci_bdaddr_list_clear(&hdev->le_white_list); | |
3231 | hci_conn_params_clear_all(hdev); | |
3232 | hci_discovery_filter_clear(hdev); | |
3233 | hci_dev_unlock(hdev); | |
3234 | ||
3235 | hci_dev_put(hdev); | |
3236 | ||
3237 | ida_simple_remove(&hci_index_ida, id); | |
3238 | } | |
3239 | EXPORT_SYMBOL(hci_unregister_dev); | |
3240 | ||
3241 | /* Suspend HCI device */ | |
3242 | int hci_suspend_dev(struct hci_dev *hdev) | |
3243 | { | |
3244 | hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); | |
3245 | return 0; | |
3246 | } | |
3247 | EXPORT_SYMBOL(hci_suspend_dev); | |
3248 | ||
3249 | /* Resume HCI device */ | |
3250 | int hci_resume_dev(struct hci_dev *hdev) | |
3251 | { | |
3252 | hci_sock_dev_event(hdev, HCI_DEV_RESUME); | |
3253 | return 0; | |
3254 | } | |
3255 | EXPORT_SYMBOL(hci_resume_dev); | |
3256 | ||
3257 | /* Reset HCI device */ | |
3258 | int hci_reset_dev(struct hci_dev *hdev) | |
3259 | { | |
3260 | const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; | |
3261 | struct sk_buff *skb; | |
3262 | ||
3263 | skb = bt_skb_alloc(3, GFP_ATOMIC); | |
3264 | if (!skb) | |
3265 | return -ENOMEM; | |
3266 | ||
3267 | hci_skb_pkt_type(skb) = HCI_EVENT_PKT; | |
3268 | skb_put_data(skb, hw_err, 3); | |
3269 | ||
3270 | /* Send Hardware Error to upper stack */ | |
3271 | return hci_recv_frame(hdev, skb); | |
3272 | } | |
3273 | EXPORT_SYMBOL(hci_reset_dev); | |
3274 | ||
3275 | /* Receive frame from HCI drivers */ | |
3276 | int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) | |
3277 | { | |
3278 | if (!hdev || (!test_bit(HCI_UP, &hdev->flags) | |
3279 | && !test_bit(HCI_INIT, &hdev->flags))) { | |
3280 | kfree_skb(skb); | |
3281 | return -ENXIO; | |
3282 | } | |
3283 | ||
3284 | if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && | |
3285 | hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && | |
3286 | hci_skb_pkt_type(skb) != HCI_SCODATA_PKT) { | |
3287 | kfree_skb(skb); | |
3288 | return -EINVAL; | |
3289 | } | |
3290 | ||
3291 | /* Incoming skb */ | |
3292 | bt_cb(skb)->incoming = 1; | |
3293 | ||
3294 | /* Time stamp */ | |
3295 | __net_timestamp(skb); | |
3296 | ||
3297 | skb_queue_tail(&hdev->rx_q, skb); | |
3298 | queue_work(hdev->workqueue, &hdev->rx_work); | |
3299 | ||
3300 | return 0; | |
3301 | } | |
3302 | EXPORT_SYMBOL(hci_recv_frame); | |
3303 | ||
3304 | /* Receive diagnostic message from HCI drivers */ | |
3305 | int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) | |
3306 | { | |
3307 | /* Mark as diagnostic packet */ | |
3308 | hci_skb_pkt_type(skb) = HCI_DIAG_PKT; | |
3309 | ||
3310 | /* Time stamp */ | |
3311 | __net_timestamp(skb); | |
3312 | ||
3313 | skb_queue_tail(&hdev->rx_q, skb); | |
3314 | queue_work(hdev->workqueue, &hdev->rx_work); | |
3315 | ||
3316 | return 0; | |
3317 | } | |
3318 | EXPORT_SYMBOL(hci_recv_diag); | |
3319 | ||
3320 | void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) | |
3321 | { | |
3322 | va_list vargs; | |
3323 | ||
3324 | va_start(vargs, fmt); | |
3325 | kfree_const(hdev->hw_info); | |
3326 | hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); | |
3327 | va_end(vargs); | |
3328 | } | |
3329 | EXPORT_SYMBOL(hci_set_hw_info); | |
3330 | ||
3331 | void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) | |
3332 | { | |
3333 | va_list vargs; | |
3334 | ||
3335 | va_start(vargs, fmt); | |
3336 | kfree_const(hdev->fw_info); | |
3337 | hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); | |
3338 | va_end(vargs); | |
3339 | } | |
3340 | EXPORT_SYMBOL(hci_set_fw_info); | |
3341 | ||
3342 | /* ---- Interface to upper protocols ---- */ | |
3343 | ||
3344 | int hci_register_cb(struct hci_cb *cb) | |
3345 | { | |
3346 | BT_DBG("%p name %s", cb, cb->name); | |
3347 | ||
3348 | mutex_lock(&hci_cb_list_lock); | |
3349 | list_add_tail(&cb->list, &hci_cb_list); | |
3350 | mutex_unlock(&hci_cb_list_lock); | |
3351 | ||
3352 | return 0; | |
3353 | } | |
3354 | EXPORT_SYMBOL(hci_register_cb); | |
3355 | ||
3356 | int hci_unregister_cb(struct hci_cb *cb) | |
3357 | { | |
3358 | BT_DBG("%p name %s", cb, cb->name); | |
3359 | ||
3360 | mutex_lock(&hci_cb_list_lock); | |
3361 | list_del(&cb->list); | |
3362 | mutex_unlock(&hci_cb_list_lock); | |
3363 | ||
3364 | return 0; | |
3365 | } | |
3366 | EXPORT_SYMBOL(hci_unregister_cb); | |
3367 | ||
3368 | static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) | |
3369 | { | |
3370 | int err; | |
3371 | ||
3372 | BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), | |
3373 | skb->len); | |
3374 | ||
3375 | /* Time stamp */ | |
3376 | __net_timestamp(skb); | |
3377 | ||
3378 | /* Send copy to monitor */ | |
3379 | hci_send_to_monitor(hdev, skb); | |
3380 | ||
3381 | if (atomic_read(&hdev->promisc)) { | |
3382 | /* Send copy to the sockets */ | |
3383 | hci_send_to_sock(hdev, skb); | |
3384 | } | |
3385 | ||
3386 | /* Get rid of skb owner, prior to sending to the driver. */ | |
3387 | skb_orphan(skb); | |
3388 | ||
3389 | if (!test_bit(HCI_RUNNING, &hdev->flags)) { | |
3390 | kfree_skb(skb); | |
3391 | return; | |
3392 | } | |
3393 | ||
3394 | err = hdev->send(hdev, skb); | |
3395 | if (err < 0) { | |
3396 | BT_ERR("%s sending frame failed (%d)", hdev->name, err); | |
3397 | kfree_skb(skb); | |
3398 | } | |
3399 | } | |
3400 | ||
3401 | /* Send HCI command */ | |
3402 | int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, | |
3403 | const void *param) | |
3404 | { | |
3405 | struct sk_buff *skb; | |
3406 | ||
3407 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | |
3408 | ||
3409 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | |
3410 | if (!skb) { | |
3411 | BT_ERR("%s no memory for command", hdev->name); | |
3412 | return -ENOMEM; | |
3413 | } | |
3414 | ||
3415 | /* Stand-alone HCI commands must be flagged as | |
3416 | * single-command requests. | |
3417 | */ | |
3418 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; | |
3419 | ||
3420 | skb_queue_tail(&hdev->cmd_q, skb); | |
3421 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
3422 | ||
3423 | return 0; | |
3424 | } | |
3425 | ||
3426 | /* Get data from the previously sent command */ | |
3427 | void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) | |
3428 | { | |
3429 | struct hci_command_hdr *hdr; | |
3430 | ||
3431 | if (!hdev->sent_cmd) | |
3432 | return NULL; | |
3433 | ||
3434 | hdr = (void *) hdev->sent_cmd->data; | |
3435 | ||
3436 | if (hdr->opcode != cpu_to_le16(opcode)) | |
3437 | return NULL; | |
3438 | ||
3439 | BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); | |
3440 | ||
3441 | return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; | |
3442 | } | |
3443 | ||
3444 | /* Send HCI command and wait for command commplete event */ | |
3445 | struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, | |
3446 | const void *param, u32 timeout) | |
3447 | { | |
3448 | struct sk_buff *skb; | |
3449 | ||
3450 | if (!test_bit(HCI_UP, &hdev->flags)) | |
3451 | return ERR_PTR(-ENETDOWN); | |
3452 | ||
3453 | bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); | |
3454 | ||
3455 | hci_req_sync_lock(hdev); | |
3456 | skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); | |
3457 | hci_req_sync_unlock(hdev); | |
3458 | ||
3459 | return skb; | |
3460 | } | |
3461 | EXPORT_SYMBOL(hci_cmd_sync); | |
3462 | ||
3463 | /* Send ACL data */ | |
3464 | static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) | |
3465 | { | |
3466 | struct hci_acl_hdr *hdr; | |
3467 | int len = skb->len; | |
3468 | ||
3469 | skb_push(skb, HCI_ACL_HDR_SIZE); | |
3470 | skb_reset_transport_header(skb); | |
3471 | hdr = (struct hci_acl_hdr *)skb_transport_header(skb); | |
3472 | hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); | |
3473 | hdr->dlen = cpu_to_le16(len); | |
3474 | } | |
3475 | ||
3476 | static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, | |
3477 | struct sk_buff *skb, __u16 flags) | |
3478 | { | |
3479 | struct hci_conn *conn = chan->conn; | |
3480 | struct hci_dev *hdev = conn->hdev; | |
3481 | struct sk_buff *list; | |
3482 | ||
3483 | skb->len = skb_headlen(skb); | |
3484 | skb->data_len = 0; | |
3485 | ||
3486 | hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; | |
3487 | ||
3488 | switch (hdev->dev_type) { | |
3489 | case HCI_PRIMARY: | |
3490 | hci_add_acl_hdr(skb, conn->handle, flags); | |
3491 | break; | |
3492 | case HCI_AMP: | |
3493 | hci_add_acl_hdr(skb, chan->handle, flags); | |
3494 | break; | |
3495 | default: | |
3496 | BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type); | |
3497 | return; | |
3498 | } | |
3499 | ||
3500 | list = skb_shinfo(skb)->frag_list; | |
3501 | if (!list) { | |
3502 | /* Non fragmented */ | |
3503 | BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); | |
3504 | ||
3505 | skb_queue_tail(queue, skb); | |
3506 | } else { | |
3507 | /* Fragmented */ | |
3508 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | |
3509 | ||
3510 | skb_shinfo(skb)->frag_list = NULL; | |
3511 | ||
3512 | /* Queue all fragments atomically. We need to use spin_lock_bh | |
3513 | * here because of 6LoWPAN links, as there this function is | |
3514 | * called from softirq and using normal spin lock could cause | |
3515 | * deadlocks. | |
3516 | */ | |
3517 | spin_lock_bh(&queue->lock); | |
3518 | ||
3519 | __skb_queue_tail(queue, skb); | |
3520 | ||
3521 | flags &= ~ACL_START; | |
3522 | flags |= ACL_CONT; | |
3523 | do { | |
3524 | skb = list; list = list->next; | |
3525 | ||
3526 | hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; | |
3527 | hci_add_acl_hdr(skb, conn->handle, flags); | |
3528 | ||
3529 | BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); | |
3530 | ||
3531 | __skb_queue_tail(queue, skb); | |
3532 | } while (list); | |
3533 | ||
3534 | spin_unlock_bh(&queue->lock); | |
3535 | } | |
3536 | } | |
3537 | ||
3538 | void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) | |
3539 | { | |
3540 | struct hci_dev *hdev = chan->conn->hdev; | |
3541 | ||
3542 | BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); | |
3543 | ||
3544 | hci_queue_acl(chan, &chan->data_q, skb, flags); | |
3545 | ||
3546 | queue_work(hdev->workqueue, &hdev->tx_work); | |
3547 | } | |
3548 | ||
3549 | /* Send SCO data */ | |
3550 | void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) | |
3551 | { | |
3552 | struct hci_dev *hdev = conn->hdev; | |
3553 | struct hci_sco_hdr hdr; | |
3554 | ||
3555 | BT_DBG("%s len %d", hdev->name, skb->len); | |
3556 | ||
3557 | hdr.handle = cpu_to_le16(conn->handle); | |
3558 | hdr.dlen = skb->len; | |
3559 | ||
3560 | skb_push(skb, HCI_SCO_HDR_SIZE); | |
3561 | skb_reset_transport_header(skb); | |
3562 | memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); | |
3563 | ||
3564 | hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; | |
3565 | ||
3566 | skb_queue_tail(&conn->data_q, skb); | |
3567 | queue_work(hdev->workqueue, &hdev->tx_work); | |
3568 | } | |
3569 | ||
3570 | /* ---- HCI TX task (outgoing data) ---- */ | |
3571 | ||
3572 | /* HCI Connection scheduler */ | |
3573 | static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, | |
3574 | int *quote) | |
3575 | { | |
3576 | struct hci_conn_hash *h = &hdev->conn_hash; | |
3577 | struct hci_conn *conn = NULL, *c; | |
3578 | unsigned int num = 0, min = ~0; | |
3579 | ||
3580 | /* We don't have to lock device here. Connections are always | |
3581 | * added and removed with TX task disabled. */ | |
3582 | ||
3583 | rcu_read_lock(); | |
3584 | ||
3585 | list_for_each_entry_rcu(c, &h->list, list) { | |
3586 | if (c->type != type || skb_queue_empty(&c->data_q)) | |
3587 | continue; | |
3588 | ||
3589 | if (c->state != BT_CONNECTED && c->state != BT_CONFIG) | |
3590 | continue; | |
3591 | ||
3592 | num++; | |
3593 | ||
3594 | if (c->sent < min) { | |
3595 | min = c->sent; | |
3596 | conn = c; | |
3597 | } | |
3598 | ||
3599 | if (hci_conn_num(hdev, type) == num) | |
3600 | break; | |
3601 | } | |
3602 | ||
3603 | rcu_read_unlock(); | |
3604 | ||
3605 | if (conn) { | |
3606 | int cnt, q; | |
3607 | ||
3608 | switch (conn->type) { | |
3609 | case ACL_LINK: | |
3610 | cnt = hdev->acl_cnt; | |
3611 | break; | |
3612 | case SCO_LINK: | |
3613 | case ESCO_LINK: | |
3614 | cnt = hdev->sco_cnt; | |
3615 | break; | |
3616 | case LE_LINK: | |
3617 | cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; | |
3618 | break; | |
3619 | default: | |
3620 | cnt = 0; | |
3621 | BT_ERR("Unknown link type"); | |
3622 | } | |
3623 | ||
3624 | q = cnt / num; | |
3625 | *quote = q ? q : 1; | |
3626 | } else | |
3627 | *quote = 0; | |
3628 | ||
3629 | BT_DBG("conn %p quote %d", conn, *quote); | |
3630 | return conn; | |
3631 | } | |
3632 | ||
3633 | static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) | |
3634 | { | |
3635 | struct hci_conn_hash *h = &hdev->conn_hash; | |
3636 | struct hci_conn *c; | |
3637 | ||
3638 | BT_ERR("%s link tx timeout", hdev->name); | |
3639 | ||
3640 | rcu_read_lock(); | |
3641 | ||
3642 | /* Kill stalled connections */ | |
3643 | list_for_each_entry_rcu(c, &h->list, list) { | |
3644 | if (c->type == type && c->sent) { | |
3645 | BT_ERR("%s killing stalled connection %pMR", | |
3646 | hdev->name, &c->dst); | |
3647 | hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); | |
3648 | } | |
3649 | } | |
3650 | ||
3651 | rcu_read_unlock(); | |
3652 | } | |
3653 | ||
3654 | static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, | |
3655 | int *quote) | |
3656 | { | |
3657 | struct hci_conn_hash *h = &hdev->conn_hash; | |
3658 | struct hci_chan *chan = NULL; | |
3659 | unsigned int num = 0, min = ~0, cur_prio = 0; | |
3660 | struct hci_conn *conn; | |
3661 | int cnt, q, conn_num = 0; | |
3662 | ||
3663 | BT_DBG("%s", hdev->name); | |
3664 | ||
3665 | rcu_read_lock(); | |
3666 | ||
3667 | list_for_each_entry_rcu(conn, &h->list, list) { | |
3668 | struct hci_chan *tmp; | |
3669 | ||
3670 | if (conn->type != type) | |
3671 | continue; | |
3672 | ||
3673 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | |
3674 | continue; | |
3675 | ||
3676 | conn_num++; | |
3677 | ||
3678 | list_for_each_entry_rcu(tmp, &conn->chan_list, list) { | |
3679 | struct sk_buff *skb; | |
3680 | ||
3681 | if (skb_queue_empty(&tmp->data_q)) | |
3682 | continue; | |
3683 | ||
3684 | skb = skb_peek(&tmp->data_q); | |
3685 | if (skb->priority < cur_prio) | |
3686 | continue; | |
3687 | ||
3688 | if (skb->priority > cur_prio) { | |
3689 | num = 0; | |
3690 | min = ~0; | |
3691 | cur_prio = skb->priority; | |
3692 | } | |
3693 | ||
3694 | num++; | |
3695 | ||
3696 | if (conn->sent < min) { | |
3697 | min = conn->sent; | |
3698 | chan = tmp; | |
3699 | } | |
3700 | } | |
3701 | ||
3702 | if (hci_conn_num(hdev, type) == conn_num) | |
3703 | break; | |
3704 | } | |
3705 | ||
3706 | rcu_read_unlock(); | |
3707 | ||
3708 | if (!chan) | |
3709 | return NULL; | |
3710 | ||
3711 | switch (chan->conn->type) { | |
3712 | case ACL_LINK: | |
3713 | cnt = hdev->acl_cnt; | |
3714 | break; | |
3715 | case AMP_LINK: | |
3716 | cnt = hdev->block_cnt; | |
3717 | break; | |
3718 | case SCO_LINK: | |
3719 | case ESCO_LINK: | |
3720 | cnt = hdev->sco_cnt; | |
3721 | break; | |
3722 | case LE_LINK: | |
3723 | cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; | |
3724 | break; | |
3725 | default: | |
3726 | cnt = 0; | |
3727 | BT_ERR("Unknown link type"); | |
3728 | } | |
3729 | ||
3730 | q = cnt / num; | |
3731 | *quote = q ? q : 1; | |
3732 | BT_DBG("chan %p quote %d", chan, *quote); | |
3733 | return chan; | |
3734 | } | |
3735 | ||
3736 | static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) | |
3737 | { | |
3738 | struct hci_conn_hash *h = &hdev->conn_hash; | |
3739 | struct hci_conn *conn; | |
3740 | int num = 0; | |
3741 | ||
3742 | BT_DBG("%s", hdev->name); | |
3743 | ||
3744 | rcu_read_lock(); | |
3745 | ||
3746 | list_for_each_entry_rcu(conn, &h->list, list) { | |
3747 | struct hci_chan *chan; | |
3748 | ||
3749 | if (conn->type != type) | |
3750 | continue; | |
3751 | ||
3752 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | |
3753 | continue; | |
3754 | ||
3755 | num++; | |
3756 | ||
3757 | list_for_each_entry_rcu(chan, &conn->chan_list, list) { | |
3758 | struct sk_buff *skb; | |
3759 | ||
3760 | if (chan->sent) { | |
3761 | chan->sent = 0; | |
3762 | continue; | |
3763 | } | |
3764 | ||
3765 | if (skb_queue_empty(&chan->data_q)) | |
3766 | continue; | |
3767 | ||
3768 | skb = skb_peek(&chan->data_q); | |
3769 | if (skb->priority >= HCI_PRIO_MAX - 1) | |
3770 | continue; | |
3771 | ||
3772 | skb->priority = HCI_PRIO_MAX - 1; | |
3773 | ||
3774 | BT_DBG("chan %p skb %p promoted to %d", chan, skb, | |
3775 | skb->priority); | |
3776 | } | |
3777 | ||
3778 | if (hci_conn_num(hdev, type) == num) | |
3779 | break; | |
3780 | } | |
3781 | ||
3782 | rcu_read_unlock(); | |
3783 | ||
3784 | } | |
3785 | ||
3786 | static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) | |
3787 | { | |
3788 | /* Calculate count of blocks used by this packet */ | |
3789 | return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); | |
3790 | } | |
3791 | ||
3792 | static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) | |
3793 | { | |
3794 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
3795 | /* ACL tx timeout must be longer than maximum | |
3796 | * link supervision timeout (40.9 seconds) */ | |
3797 | if (!cnt && time_after(jiffies, hdev->acl_last_tx + | |
3798 | HCI_ACL_TX_TIMEOUT)) | |
3799 | hci_link_tx_to(hdev, ACL_LINK); | |
3800 | } | |
3801 | } | |
3802 | ||
3803 | static void hci_sched_acl_pkt(struct hci_dev *hdev) | |
3804 | { | |
3805 | unsigned int cnt = hdev->acl_cnt; | |
3806 | struct hci_chan *chan; | |
3807 | struct sk_buff *skb; | |
3808 | int quote; | |
3809 | ||
3810 | __check_timeout(hdev, cnt); | |
3811 | ||
3812 | while (hdev->acl_cnt && | |
3813 | (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { | |
3814 | u32 priority = (skb_peek(&chan->data_q))->priority; | |
3815 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | |
3816 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | |
3817 | skb->len, skb->priority); | |
3818 | ||
3819 | /* Stop if priority has changed */ | |
3820 | if (skb->priority < priority) | |
3821 | break; | |
3822 | ||
3823 | skb = skb_dequeue(&chan->data_q); | |
3824 | ||
3825 | hci_conn_enter_active_mode(chan->conn, | |
3826 | bt_cb(skb)->force_active); | |
3827 | ||
3828 | hci_send_frame(hdev, skb); | |
3829 | hdev->acl_last_tx = jiffies; | |
3830 | ||
3831 | hdev->acl_cnt--; | |
3832 | chan->sent++; | |
3833 | chan->conn->sent++; | |
3834 | } | |
3835 | } | |
3836 | ||
3837 | if (cnt != hdev->acl_cnt) | |
3838 | hci_prio_recalculate(hdev, ACL_LINK); | |
3839 | } | |
3840 | ||
3841 | static void hci_sched_acl_blk(struct hci_dev *hdev) | |
3842 | { | |
3843 | unsigned int cnt = hdev->block_cnt; | |
3844 | struct hci_chan *chan; | |
3845 | struct sk_buff *skb; | |
3846 | int quote; | |
3847 | u8 type; | |
3848 | ||
3849 | __check_timeout(hdev, cnt); | |
3850 | ||
3851 | BT_DBG("%s", hdev->name); | |
3852 | ||
3853 | if (hdev->dev_type == HCI_AMP) | |
3854 | type = AMP_LINK; | |
3855 | else | |
3856 | type = ACL_LINK; | |
3857 | ||
3858 | while (hdev->block_cnt > 0 && | |
3859 | (chan = hci_chan_sent(hdev, type, "e))) { | |
3860 | u32 priority = (skb_peek(&chan->data_q))->priority; | |
3861 | while (quote > 0 && (skb = skb_peek(&chan->data_q))) { | |
3862 | int blocks; | |
3863 | ||
3864 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | |
3865 | skb->len, skb->priority); | |
3866 | ||
3867 | /* Stop if priority has changed */ | |
3868 | if (skb->priority < priority) | |
3869 | break; | |
3870 | ||
3871 | skb = skb_dequeue(&chan->data_q); | |
3872 | ||
3873 | blocks = __get_blocks(hdev, skb); | |
3874 | if (blocks > hdev->block_cnt) | |
3875 | return; | |
3876 | ||
3877 | hci_conn_enter_active_mode(chan->conn, | |
3878 | bt_cb(skb)->force_active); | |
3879 | ||
3880 | hci_send_frame(hdev, skb); | |
3881 | hdev->acl_last_tx = jiffies; | |
3882 | ||
3883 | hdev->block_cnt -= blocks; | |
3884 | quote -= blocks; | |
3885 | ||
3886 | chan->sent += blocks; | |
3887 | chan->conn->sent += blocks; | |
3888 | } | |
3889 | } | |
3890 | ||
3891 | if (cnt != hdev->block_cnt) | |
3892 | hci_prio_recalculate(hdev, type); | |
3893 | } | |
3894 | ||
3895 | static void hci_sched_acl(struct hci_dev *hdev) | |
3896 | { | |
3897 | BT_DBG("%s", hdev->name); | |
3898 | ||
3899 | /* No ACL link over BR/EDR controller */ | |
3900 | if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) | |
3901 | return; | |
3902 | ||
3903 | /* No AMP link over AMP controller */ | |
3904 | if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) | |
3905 | return; | |
3906 | ||
3907 | switch (hdev->flow_ctl_mode) { | |
3908 | case HCI_FLOW_CTL_MODE_PACKET_BASED: | |
3909 | hci_sched_acl_pkt(hdev); | |
3910 | break; | |
3911 | ||
3912 | case HCI_FLOW_CTL_MODE_BLOCK_BASED: | |
3913 | hci_sched_acl_blk(hdev); | |
3914 | break; | |
3915 | } | |
3916 | } | |
3917 | ||
3918 | /* Schedule SCO */ | |
3919 | static void hci_sched_sco(struct hci_dev *hdev) | |
3920 | { | |
3921 | struct hci_conn *conn; | |
3922 | struct sk_buff *skb; | |
3923 | int quote; | |
3924 | ||
3925 | BT_DBG("%s", hdev->name); | |
3926 | ||
3927 | if (!hci_conn_num(hdev, SCO_LINK)) | |
3928 | return; | |
3929 | ||
3930 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { | |
3931 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | |
3932 | BT_DBG("skb %p len %d", skb, skb->len); | |
3933 | hci_send_frame(hdev, skb); | |
3934 | ||
3935 | conn->sent++; | |
3936 | if (conn->sent == ~0) | |
3937 | conn->sent = 0; | |
3938 | } | |
3939 | } | |
3940 | } | |
3941 | ||
3942 | static void hci_sched_esco(struct hci_dev *hdev) | |
3943 | { | |
3944 | struct hci_conn *conn; | |
3945 | struct sk_buff *skb; | |
3946 | int quote; | |
3947 | ||
3948 | BT_DBG("%s", hdev->name); | |
3949 | ||
3950 | if (!hci_conn_num(hdev, ESCO_LINK)) | |
3951 | return; | |
3952 | ||
3953 | while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, | |
3954 | "e))) { | |
3955 | while (quote-- && (skb = skb_dequeue(&conn->data_q))) { | |
3956 | BT_DBG("skb %p len %d", skb, skb->len); | |
3957 | hci_send_frame(hdev, skb); | |
3958 | ||
3959 | conn->sent++; | |
3960 | if (conn->sent == ~0) | |
3961 | conn->sent = 0; | |
3962 | } | |
3963 | } | |
3964 | } | |
3965 | ||
3966 | static void hci_sched_le(struct hci_dev *hdev) | |
3967 | { | |
3968 | struct hci_chan *chan; | |
3969 | struct sk_buff *skb; | |
3970 | int quote, cnt, tmp; | |
3971 | ||
3972 | BT_DBG("%s", hdev->name); | |
3973 | ||
3974 | if (!hci_conn_num(hdev, LE_LINK)) | |
3975 | return; | |
3976 | ||
3977 | if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { | |
3978 | /* LE tx timeout must be longer than maximum | |
3979 | * link supervision timeout (40.9 seconds) */ | |
3980 | if (!hdev->le_cnt && hdev->le_pkts && | |
3981 | time_after(jiffies, hdev->le_last_tx + HZ * 45)) | |
3982 | hci_link_tx_to(hdev, LE_LINK); | |
3983 | } | |
3984 | ||
3985 | cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; | |
3986 | tmp = cnt; | |
3987 | while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { | |
3988 | u32 priority = (skb_peek(&chan->data_q))->priority; | |
3989 | while (quote-- && (skb = skb_peek(&chan->data_q))) { | |
3990 | BT_DBG("chan %p skb %p len %d priority %u", chan, skb, | |
3991 | skb->len, skb->priority); | |
3992 | ||
3993 | /* Stop if priority has changed */ | |
3994 | if (skb->priority < priority) | |
3995 | break; | |
3996 | ||
3997 | skb = skb_dequeue(&chan->data_q); | |
3998 | ||
3999 | hci_send_frame(hdev, skb); | |
4000 | hdev->le_last_tx = jiffies; | |
4001 | ||
4002 | cnt--; | |
4003 | chan->sent++; | |
4004 | chan->conn->sent++; | |
4005 | } | |
4006 | } | |
4007 | ||
4008 | if (hdev->le_pkts) | |
4009 | hdev->le_cnt = cnt; | |
4010 | else | |
4011 | hdev->acl_cnt = cnt; | |
4012 | ||
4013 | if (cnt != tmp) | |
4014 | hci_prio_recalculate(hdev, LE_LINK); | |
4015 | } | |
4016 | ||
4017 | static void hci_tx_work(struct work_struct *work) | |
4018 | { | |
4019 | struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); | |
4020 | struct sk_buff *skb; | |
4021 | ||
4022 | BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, | |
4023 | hdev->sco_cnt, hdev->le_cnt); | |
4024 | ||
4025 | if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
4026 | /* Schedule queues and send stuff to HCI driver */ | |
4027 | hci_sched_acl(hdev); | |
4028 | hci_sched_sco(hdev); | |
4029 | hci_sched_esco(hdev); | |
4030 | hci_sched_le(hdev); | |
4031 | } | |
4032 | ||
4033 | /* Send next queued raw (unknown type) packet */ | |
4034 | while ((skb = skb_dequeue(&hdev->raw_q))) | |
4035 | hci_send_frame(hdev, skb); | |
4036 | } | |
4037 | ||
4038 | /* ----- HCI RX task (incoming data processing) ----- */ | |
4039 | ||
4040 | /* ACL data packet */ | |
4041 | static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |
4042 | { | |
4043 | struct hci_acl_hdr *hdr = (void *) skb->data; | |
4044 | struct hci_conn *conn; | |
4045 | __u16 handle, flags; | |
4046 | ||
4047 | skb_pull(skb, HCI_ACL_HDR_SIZE); | |
4048 | ||
4049 | handle = __le16_to_cpu(hdr->handle); | |
4050 | flags = hci_flags(handle); | |
4051 | handle = hci_handle(handle); | |
4052 | ||
4053 | BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, | |
4054 | handle, flags); | |
4055 | ||
4056 | hdev->stat.acl_rx++; | |
4057 | ||
4058 | hci_dev_lock(hdev); | |
4059 | conn = hci_conn_hash_lookup_handle(hdev, handle); | |
4060 | hci_dev_unlock(hdev); | |
4061 | ||
4062 | if (conn) { | |
4063 | hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); | |
4064 | ||
4065 | /* Send to upper protocol */ | |
4066 | l2cap_recv_acldata(conn, skb, flags); | |
4067 | return; | |
4068 | } else { | |
4069 | BT_ERR("%s ACL packet for unknown connection handle %d", | |
4070 | hdev->name, handle); | |
4071 | } | |
4072 | ||
4073 | kfree_skb(skb); | |
4074 | } | |
4075 | ||
4076 | /* SCO data packet */ | |
4077 | static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) | |
4078 | { | |
4079 | struct hci_sco_hdr *hdr = (void *) skb->data; | |
4080 | struct hci_conn *conn; | |
4081 | __u16 handle; | |
4082 | ||
4083 | skb_pull(skb, HCI_SCO_HDR_SIZE); | |
4084 | ||
4085 | handle = __le16_to_cpu(hdr->handle); | |
4086 | ||
4087 | BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle); | |
4088 | ||
4089 | hdev->stat.sco_rx++; | |
4090 | ||
4091 | hci_dev_lock(hdev); | |
4092 | conn = hci_conn_hash_lookup_handle(hdev, handle); | |
4093 | hci_dev_unlock(hdev); | |
4094 | ||
4095 | if (conn) { | |
4096 | /* Send to upper protocol */ | |
4097 | sco_recv_scodata(conn, skb); | |
4098 | return; | |
4099 | } else { | |
4100 | BT_ERR("%s SCO packet for unknown connection handle %d", | |
4101 | hdev->name, handle); | |
4102 | } | |
4103 | ||
4104 | kfree_skb(skb); | |
4105 | } | |
4106 | ||
4107 | static bool hci_req_is_complete(struct hci_dev *hdev) | |
4108 | { | |
4109 | struct sk_buff *skb; | |
4110 | ||
4111 | skb = skb_peek(&hdev->cmd_q); | |
4112 | if (!skb) | |
4113 | return true; | |
4114 | ||
4115 | return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); | |
4116 | } | |
4117 | ||
4118 | static void hci_resend_last(struct hci_dev *hdev) | |
4119 | { | |
4120 | struct hci_command_hdr *sent; | |
4121 | struct sk_buff *skb; | |
4122 | u16 opcode; | |
4123 | ||
4124 | if (!hdev->sent_cmd) | |
4125 | return; | |
4126 | ||
4127 | sent = (void *) hdev->sent_cmd->data; | |
4128 | opcode = __le16_to_cpu(sent->opcode); | |
4129 | if (opcode == HCI_OP_RESET) | |
4130 | return; | |
4131 | ||
4132 | skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); | |
4133 | if (!skb) | |
4134 | return; | |
4135 | ||
4136 | skb_queue_head(&hdev->cmd_q, skb); | |
4137 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
4138 | } | |
4139 | ||
4140 | void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, | |
4141 | hci_req_complete_t *req_complete, | |
4142 | hci_req_complete_skb_t *req_complete_skb) | |
4143 | { | |
4144 | struct sk_buff *skb; | |
4145 | unsigned long flags; | |
4146 | ||
4147 | BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); | |
4148 | ||
4149 | /* If the completed command doesn't match the last one that was | |
4150 | * sent we need to do special handling of it. | |
4151 | */ | |
4152 | if (!hci_sent_cmd_data(hdev, opcode)) { | |
4153 | /* Some CSR based controllers generate a spontaneous | |
4154 | * reset complete event during init and any pending | |
4155 | * command will never be completed. In such a case we | |
4156 | * need to resend whatever was the last sent | |
4157 | * command. | |
4158 | */ | |
4159 | if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) | |
4160 | hci_resend_last(hdev); | |
4161 | ||
4162 | return; | |
4163 | } | |
4164 | ||
4165 | /* If the command succeeded and there's still more commands in | |
4166 | * this request the request is not yet complete. | |
4167 | */ | |
4168 | if (!status && !hci_req_is_complete(hdev)) | |
4169 | return; | |
4170 | ||
4171 | /* If this was the last command in a request the complete | |
4172 | * callback would be found in hdev->sent_cmd instead of the | |
4173 | * command queue (hdev->cmd_q). | |
4174 | */ | |
4175 | if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { | |
4176 | *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; | |
4177 | return; | |
4178 | } | |
4179 | ||
4180 | if (bt_cb(hdev->sent_cmd)->hci.req_complete) { | |
4181 | *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; | |
4182 | return; | |
4183 | } | |
4184 | ||
4185 | /* Remove all pending commands belonging to this request */ | |
4186 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | |
4187 | while ((skb = __skb_dequeue(&hdev->cmd_q))) { | |
4188 | if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { | |
4189 | __skb_queue_head(&hdev->cmd_q, skb); | |
4190 | break; | |
4191 | } | |
4192 | ||
4193 | if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) | |
4194 | *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; | |
4195 | else | |
4196 | *req_complete = bt_cb(skb)->hci.req_complete; | |
4197 | kfree_skb(skb); | |
4198 | } | |
4199 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | |
4200 | } | |
4201 | ||
4202 | static void hci_rx_work(struct work_struct *work) | |
4203 | { | |
4204 | struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); | |
4205 | struct sk_buff *skb; | |
4206 | ||
4207 | BT_DBG("%s", hdev->name); | |
4208 | ||
4209 | while ((skb = skb_dequeue(&hdev->rx_q))) { | |
4210 | /* Send copy to monitor */ | |
4211 | hci_send_to_monitor(hdev, skb); | |
4212 | ||
4213 | if (atomic_read(&hdev->promisc)) { | |
4214 | /* Send copy to the sockets */ | |
4215 | hci_send_to_sock(hdev, skb); | |
4216 | } | |
4217 | ||
4218 | if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { | |
4219 | kfree_skb(skb); | |
4220 | continue; | |
4221 | } | |
4222 | ||
4223 | if (test_bit(HCI_INIT, &hdev->flags)) { | |
4224 | /* Don't process data packets in this states. */ | |
4225 | switch (hci_skb_pkt_type(skb)) { | |
4226 | case HCI_ACLDATA_PKT: | |
4227 | case HCI_SCODATA_PKT: | |
4228 | kfree_skb(skb); | |
4229 | continue; | |
4230 | } | |
4231 | } | |
4232 | ||
4233 | /* Process frame */ | |
4234 | switch (hci_skb_pkt_type(skb)) { | |
4235 | case HCI_EVENT_PKT: | |
4236 | BT_DBG("%s Event packet", hdev->name); | |
4237 | hci_event_packet(hdev, skb); | |
4238 | break; | |
4239 | ||
4240 | case HCI_ACLDATA_PKT: | |
4241 | BT_DBG("%s ACL data packet", hdev->name); | |
4242 | hci_acldata_packet(hdev, skb); | |
4243 | break; | |
4244 | ||
4245 | case HCI_SCODATA_PKT: | |
4246 | BT_DBG("%s SCO data packet", hdev->name); | |
4247 | hci_scodata_packet(hdev, skb); | |
4248 | break; | |
4249 | ||
4250 | default: | |
4251 | kfree_skb(skb); | |
4252 | break; | |
4253 | } | |
4254 | } | |
4255 | } | |
4256 | ||
4257 | static void hci_cmd_work(struct work_struct *work) | |
4258 | { | |
4259 | struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); | |
4260 | struct sk_buff *skb; | |
4261 | ||
4262 | BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, | |
4263 | atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); | |
4264 | ||
4265 | /* Send queued commands */ | |
4266 | if (atomic_read(&hdev->cmd_cnt)) { | |
4267 | skb = skb_dequeue(&hdev->cmd_q); | |
4268 | if (!skb) | |
4269 | return; | |
4270 | ||
4271 | kfree_skb(hdev->sent_cmd); | |
4272 | ||
4273 | hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); | |
4274 | if (hdev->sent_cmd) { | |
4275 | atomic_dec(&hdev->cmd_cnt); | |
4276 | hci_send_frame(hdev, skb); | |
4277 | if (test_bit(HCI_RESET, &hdev->flags)) | |
4278 | cancel_delayed_work(&hdev->cmd_timer); | |
4279 | else | |
4280 | schedule_delayed_work(&hdev->cmd_timer, | |
4281 | HCI_CMD_TIMEOUT); | |
4282 | } else { | |
4283 | skb_queue_head(&hdev->cmd_q, skb); | |
4284 | queue_work(hdev->workqueue, &hdev->cmd_work); | |
4285 | } | |
4286 | } | |
4287 | } |