]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/net/bluetooth/hci_core.h
Merge branch 'release' of git://lm-sensors.org/kernel/mhoffman/hwmon-2.6
[mirror_ubuntu-bionic-kernel.git] / include / net / bluetooth / hci_core.h
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 #ifndef __HCI_CORE_H
26 #define __HCI_CORE_H
27
28 #include <net/bluetooth/hci.h>
29
30 /* HCI upper protocols */
31 #define HCI_PROTO_L2CAP 0
32 #define HCI_PROTO_SCO 1
33
34 /* HCI Core structures */
35 struct inquiry_data {
36 bdaddr_t bdaddr;
37 __u8 pscan_rep_mode;
38 __u8 pscan_period_mode;
39 __u8 pscan_mode;
40 __u8 dev_class[3];
41 __le16 clock_offset;
42 __s8 rssi;
43 };
44
45 struct inquiry_entry {
46 struct inquiry_entry *next;
47 __u32 timestamp;
48 struct inquiry_data data;
49 };
50
51 struct inquiry_cache {
52 spinlock_t lock;
53 __u32 timestamp;
54 struct inquiry_entry *list;
55 };
56
57 struct hci_conn_hash {
58 struct list_head list;
59 spinlock_t lock;
60 unsigned int acl_num;
61 unsigned int sco_num;
62 };
63
64 struct hci_dev {
65 struct list_head list;
66 spinlock_t lock;
67 atomic_t refcnt;
68
69 char name[8];
70 unsigned long flags;
71 __u16 id;
72 __u8 type;
73 bdaddr_t bdaddr;
74 __u8 features[8];
75 __u8 hci_ver;
76 __u16 hci_rev;
77 __u16 manufacturer;
78 __u16 voice_setting;
79
80 __u16 pkt_type;
81 __u16 esco_type;
82 __u16 link_policy;
83 __u16 link_mode;
84
85 __u32 idle_timeout;
86 __u16 sniff_min_interval;
87 __u16 sniff_max_interval;
88
89 unsigned long quirks;
90
91 atomic_t cmd_cnt;
92 unsigned int acl_cnt;
93 unsigned int sco_cnt;
94
95 unsigned int acl_mtu;
96 unsigned int sco_mtu;
97 unsigned int acl_pkts;
98 unsigned int sco_pkts;
99
100 unsigned long cmd_last_tx;
101 unsigned long acl_last_tx;
102 unsigned long sco_last_tx;
103
104 struct tasklet_struct cmd_task;
105 struct tasklet_struct rx_task;
106 struct tasklet_struct tx_task;
107
108 struct sk_buff_head rx_q;
109 struct sk_buff_head raw_q;
110 struct sk_buff_head cmd_q;
111
112 struct sk_buff *sent_cmd;
113 struct sk_buff *reassembly[3];
114
115 struct semaphore req_lock;
116 wait_queue_head_t req_wait_q;
117 __u32 req_status;
118 __u32 req_result;
119
120 struct inquiry_cache inq_cache;
121 struct hci_conn_hash conn_hash;
122
123 struct hci_dev_stats stat;
124
125 struct sk_buff_head driver_init;
126
127 void *driver_data;
128 void *core_data;
129
130 atomic_t promisc;
131
132 struct device *parent;
133 struct device dev;
134
135 struct module *owner;
136
137 int (*open)(struct hci_dev *hdev);
138 int (*close)(struct hci_dev *hdev);
139 int (*flush)(struct hci_dev *hdev);
140 int (*send)(struct sk_buff *skb);
141 void (*destruct)(struct hci_dev *hdev);
142 void (*notify)(struct hci_dev *hdev, unsigned int evt);
143 int (*ioctl)(struct hci_dev *hdev, unsigned int cmd, unsigned long arg);
144 };
145
146 struct hci_conn {
147 struct list_head list;
148
149 atomic_t refcnt;
150 spinlock_t lock;
151
152 bdaddr_t dst;
153 __u16 handle;
154 __u16 state;
155 __u8 mode;
156 __u8 type;
157 __u8 out;
158 __u8 attempt;
159 __u8 dev_class[3];
160 __u8 features[8];
161 __u16 interval;
162 __u16 link_policy;
163 __u32 link_mode;
164 __u8 power_save;
165 unsigned long pend;
166
167 unsigned int sent;
168
169 struct sk_buff_head data_q;
170
171 struct timer_list disc_timer;
172 struct timer_list idle_timer;
173
174 struct work_struct work;
175
176 struct device dev;
177
178 struct hci_dev *hdev;
179 void *l2cap_data;
180 void *sco_data;
181 void *priv;
182
183 struct hci_conn *link;
184 };
185
186 extern struct hci_proto *hci_proto[];
187 extern struct list_head hci_dev_list;
188 extern struct list_head hci_cb_list;
189 extern rwlock_t hci_dev_list_lock;
190 extern rwlock_t hci_cb_list_lock;
191
192 /* ----- Inquiry cache ----- */
193 #define INQUIRY_CACHE_AGE_MAX (HZ*30) // 30 seconds
194 #define INQUIRY_ENTRY_AGE_MAX (HZ*60) // 60 seconds
195
196 #define inquiry_cache_lock(c) spin_lock(&c->lock)
197 #define inquiry_cache_unlock(c) spin_unlock(&c->lock)
198 #define inquiry_cache_lock_bh(c) spin_lock_bh(&c->lock)
199 #define inquiry_cache_unlock_bh(c) spin_unlock_bh(&c->lock)
200
201 static inline void inquiry_cache_init(struct hci_dev *hdev)
202 {
203 struct inquiry_cache *c = &hdev->inq_cache;
204 spin_lock_init(&c->lock);
205 c->list = NULL;
206 }
207
208 static inline int inquiry_cache_empty(struct hci_dev *hdev)
209 {
210 struct inquiry_cache *c = &hdev->inq_cache;
211 return (c->list == NULL);
212 }
213
214 static inline long inquiry_cache_age(struct hci_dev *hdev)
215 {
216 struct inquiry_cache *c = &hdev->inq_cache;
217 return jiffies - c->timestamp;
218 }
219
220 static inline long inquiry_entry_age(struct inquiry_entry *e)
221 {
222 return jiffies - e->timestamp;
223 }
224
225 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr);
226 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data);
227
228 /* ----- HCI Connections ----- */
229 enum {
230 HCI_CONN_AUTH_PEND,
231 HCI_CONN_ENCRYPT_PEND,
232 HCI_CONN_RSWITCH_PEND,
233 HCI_CONN_MODE_CHANGE_PEND,
234 };
235
236 static inline void hci_conn_hash_init(struct hci_dev *hdev)
237 {
238 struct hci_conn_hash *h = &hdev->conn_hash;
239 INIT_LIST_HEAD(&h->list);
240 spin_lock_init(&h->lock);
241 h->acl_num = 0;
242 h->sco_num = 0;
243 }
244
245 static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
246 {
247 struct hci_conn_hash *h = &hdev->conn_hash;
248 list_add(&c->list, &h->list);
249 if (c->type == ACL_LINK)
250 h->acl_num++;
251 else
252 h->sco_num++;
253 }
254
255 static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
256 {
257 struct hci_conn_hash *h = &hdev->conn_hash;
258 list_del(&c->list);
259 if (c->type == ACL_LINK)
260 h->acl_num--;
261 else
262 h->sco_num--;
263 }
264
265 static inline struct hci_conn *hci_conn_hash_lookup_handle(struct hci_dev *hdev,
266 __u16 handle)
267 {
268 struct hci_conn_hash *h = &hdev->conn_hash;
269 struct list_head *p;
270 struct hci_conn *c;
271
272 list_for_each(p, &h->list) {
273 c = list_entry(p, struct hci_conn, list);
274 if (c->handle == handle)
275 return c;
276 }
277 return NULL;
278 }
279
280 static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
281 __u8 type, bdaddr_t *ba)
282 {
283 struct hci_conn_hash *h = &hdev->conn_hash;
284 struct list_head *p;
285 struct hci_conn *c;
286
287 list_for_each(p, &h->list) {
288 c = list_entry(p, struct hci_conn, list);
289 if (c->type == type && !bacmp(&c->dst, ba))
290 return c;
291 }
292 return NULL;
293 }
294
295 static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
296 __u8 type, __u16 state)
297 {
298 struct hci_conn_hash *h = &hdev->conn_hash;
299 struct list_head *p;
300 struct hci_conn *c;
301
302 list_for_each(p, &h->list) {
303 c = list_entry(p, struct hci_conn, list);
304 if (c->type == type && c->state == state)
305 return c;
306 }
307 return NULL;
308 }
309
310 void hci_acl_connect(struct hci_conn *conn);
311 void hci_acl_disconn(struct hci_conn *conn, __u8 reason);
312 void hci_add_sco(struct hci_conn *conn, __u16 handle);
313
314 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
315 int hci_conn_del(struct hci_conn *conn);
316 void hci_conn_hash_flush(struct hci_dev *hdev);
317
318 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *src);
319 int hci_conn_auth(struct hci_conn *conn);
320 int hci_conn_encrypt(struct hci_conn *conn);
321 int hci_conn_change_link_key(struct hci_conn *conn);
322 int hci_conn_switch_role(struct hci_conn *conn, uint8_t role);
323
324 void hci_conn_enter_active_mode(struct hci_conn *conn);
325 void hci_conn_enter_sniff_mode(struct hci_conn *conn);
326
327 static inline void hci_conn_hold(struct hci_conn *conn)
328 {
329 atomic_inc(&conn->refcnt);
330 del_timer(&conn->disc_timer);
331 }
332
333 static inline void hci_conn_put(struct hci_conn *conn)
334 {
335 if (atomic_dec_and_test(&conn->refcnt)) {
336 unsigned long timeo;
337 if (conn->type == ACL_LINK) {
338 del_timer(&conn->idle_timer);
339 if (conn->state == BT_CONNECTED) {
340 timeo = msecs_to_jiffies(HCI_DISCONN_TIMEOUT);
341 if (!conn->out)
342 timeo *= 2;
343 } else
344 timeo = msecs_to_jiffies(10);
345 } else
346 timeo = msecs_to_jiffies(10);
347 mod_timer(&conn->disc_timer, jiffies + timeo);
348 }
349 }
350
351 /* ----- HCI tasks ----- */
352 static inline void hci_sched_cmd(struct hci_dev *hdev)
353 {
354 tasklet_schedule(&hdev->cmd_task);
355 }
356
357 static inline void hci_sched_rx(struct hci_dev *hdev)
358 {
359 tasklet_schedule(&hdev->rx_task);
360 }
361
362 static inline void hci_sched_tx(struct hci_dev *hdev)
363 {
364 tasklet_schedule(&hdev->tx_task);
365 }
366
367 /* ----- HCI Devices ----- */
368 static inline void __hci_dev_put(struct hci_dev *d)
369 {
370 if (atomic_dec_and_test(&d->refcnt))
371 d->destruct(d);
372 }
373
374 static inline void hci_dev_put(struct hci_dev *d)
375 {
376 __hci_dev_put(d);
377 module_put(d->owner);
378 }
379
380 static inline struct hci_dev *__hci_dev_hold(struct hci_dev *d)
381 {
382 atomic_inc(&d->refcnt);
383 return d;
384 }
385
386 static inline struct hci_dev *hci_dev_hold(struct hci_dev *d)
387 {
388 if (try_module_get(d->owner))
389 return __hci_dev_hold(d);
390 return NULL;
391 }
392
393 #define hci_dev_lock(d) spin_lock(&d->lock)
394 #define hci_dev_unlock(d) spin_unlock(&d->lock)
395 #define hci_dev_lock_bh(d) spin_lock_bh(&d->lock)
396 #define hci_dev_unlock_bh(d) spin_unlock_bh(&d->lock)
397
398 struct hci_dev *hci_dev_get(int index);
399 struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst);
400
401 struct hci_dev *hci_alloc_dev(void);
402 void hci_free_dev(struct hci_dev *hdev);
403 int hci_register_dev(struct hci_dev *hdev);
404 int hci_unregister_dev(struct hci_dev *hdev);
405 int hci_suspend_dev(struct hci_dev *hdev);
406 int hci_resume_dev(struct hci_dev *hdev);
407 int hci_dev_open(__u16 dev);
408 int hci_dev_close(__u16 dev);
409 int hci_dev_reset(__u16 dev);
410 int hci_dev_reset_stat(__u16 dev);
411 int hci_dev_cmd(unsigned int cmd, void __user *arg);
412 int hci_get_dev_list(void __user *arg);
413 int hci_get_dev_info(void __user *arg);
414 int hci_get_conn_list(void __user *arg);
415 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg);
416 int hci_inquiry(void __user *arg);
417
418 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
419
420 /* Receive frame from HCI drivers */
421 static inline int hci_recv_frame(struct sk_buff *skb)
422 {
423 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
424 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
425 && !test_bit(HCI_INIT, &hdev->flags))) {
426 kfree_skb(skb);
427 return -ENXIO;
428 }
429
430 /* Incomming skb */
431 bt_cb(skb)->incoming = 1;
432
433 /* Time stamp */
434 __net_timestamp(skb);
435
436 /* Queue frame for rx task */
437 skb_queue_tail(&hdev->rx_q, skb);
438 hci_sched_rx(hdev);
439 return 0;
440 }
441
442 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count);
443
444 int hci_register_sysfs(struct hci_dev *hdev);
445 void hci_unregister_sysfs(struct hci_dev *hdev);
446 void hci_conn_add_sysfs(struct hci_conn *conn);
447 void hci_conn_del_sysfs(struct hci_conn *conn);
448
449 #define SET_HCIDEV_DEV(hdev, pdev) ((hdev)->parent = (pdev))
450
451 /* ----- LMP capabilities ----- */
452 #define lmp_rswitch_capable(dev) ((dev)->features[0] & LMP_RSWITCH)
453 #define lmp_encrypt_capable(dev) ((dev)->features[0] & LMP_ENCRYPT)
454 #define lmp_sniff_capable(dev) ((dev)->features[0] & LMP_SNIFF)
455 #define lmp_sniffsubr_capable(dev) ((dev)->features[5] & LMP_SNIFF_SUBR)
456 #define lmp_esco_capable(dev) ((dev)->features[3] & LMP_ESCO)
457
458 /* ----- HCI protocols ----- */
459 struct hci_proto {
460 char *name;
461 unsigned int id;
462 unsigned long flags;
463
464 void *priv;
465
466 int (*connect_ind) (struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type);
467 int (*connect_cfm) (struct hci_conn *conn, __u8 status);
468 int (*disconn_ind) (struct hci_conn *conn, __u8 reason);
469 int (*recv_acldata) (struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
470 int (*recv_scodata) (struct hci_conn *conn, struct sk_buff *skb);
471 int (*auth_cfm) (struct hci_conn *conn, __u8 status);
472 int (*encrypt_cfm) (struct hci_conn *conn, __u8 status);
473 };
474
475 static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type)
476 {
477 register struct hci_proto *hp;
478 int mask = 0;
479
480 hp = hci_proto[HCI_PROTO_L2CAP];
481 if (hp && hp->connect_ind)
482 mask |= hp->connect_ind(hdev, bdaddr, type);
483
484 hp = hci_proto[HCI_PROTO_SCO];
485 if (hp && hp->connect_ind)
486 mask |= hp->connect_ind(hdev, bdaddr, type);
487
488 return mask;
489 }
490
491 static inline void hci_proto_connect_cfm(struct hci_conn *conn, __u8 status)
492 {
493 register struct hci_proto *hp;
494
495 hp = hci_proto[HCI_PROTO_L2CAP];
496 if (hp && hp->connect_cfm)
497 hp->connect_cfm(conn, status);
498
499 hp = hci_proto[HCI_PROTO_SCO];
500 if (hp && hp->connect_cfm)
501 hp->connect_cfm(conn, status);
502 }
503
504 static inline void hci_proto_disconn_ind(struct hci_conn *conn, __u8 reason)
505 {
506 register struct hci_proto *hp;
507
508 hp = hci_proto[HCI_PROTO_L2CAP];
509 if (hp && hp->disconn_ind)
510 hp->disconn_ind(conn, reason);
511
512 hp = hci_proto[HCI_PROTO_SCO];
513 if (hp && hp->disconn_ind)
514 hp->disconn_ind(conn, reason);
515 }
516
517 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status)
518 {
519 register struct hci_proto *hp;
520
521 hp = hci_proto[HCI_PROTO_L2CAP];
522 if (hp && hp->auth_cfm)
523 hp->auth_cfm(conn, status);
524
525 hp = hci_proto[HCI_PROTO_SCO];
526 if (hp && hp->auth_cfm)
527 hp->auth_cfm(conn, status);
528 }
529
530 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status)
531 {
532 register struct hci_proto *hp;
533
534 hp = hci_proto[HCI_PROTO_L2CAP];
535 if (hp && hp->encrypt_cfm)
536 hp->encrypt_cfm(conn, status);
537
538 hp = hci_proto[HCI_PROTO_SCO];
539 if (hp && hp->encrypt_cfm)
540 hp->encrypt_cfm(conn, status);
541 }
542
543 int hci_register_proto(struct hci_proto *hproto);
544 int hci_unregister_proto(struct hci_proto *hproto);
545
546 /* ----- HCI callbacks ----- */
547 struct hci_cb {
548 struct list_head list;
549
550 char *name;
551
552 void (*auth_cfm) (struct hci_conn *conn, __u8 status);
553 void (*encrypt_cfm) (struct hci_conn *conn, __u8 status, __u8 encrypt);
554 void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
555 void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
556 };
557
558 static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
559 {
560 struct list_head *p;
561
562 hci_proto_auth_cfm(conn, status);
563
564 read_lock_bh(&hci_cb_list_lock);
565 list_for_each(p, &hci_cb_list) {
566 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
567 if (cb->auth_cfm)
568 cb->auth_cfm(conn, status);
569 }
570 read_unlock_bh(&hci_cb_list_lock);
571 }
572
573 static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt)
574 {
575 struct list_head *p;
576
577 hci_proto_encrypt_cfm(conn, status);
578
579 read_lock_bh(&hci_cb_list_lock);
580 list_for_each(p, &hci_cb_list) {
581 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
582 if (cb->encrypt_cfm)
583 cb->encrypt_cfm(conn, status, encrypt);
584 }
585 read_unlock_bh(&hci_cb_list_lock);
586 }
587
588 static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
589 {
590 struct list_head *p;
591
592 read_lock_bh(&hci_cb_list_lock);
593 list_for_each(p, &hci_cb_list) {
594 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
595 if (cb->key_change_cfm)
596 cb->key_change_cfm(conn, status);
597 }
598 read_unlock_bh(&hci_cb_list_lock);
599 }
600
601 static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status, __u8 role)
602 {
603 struct list_head *p;
604
605 read_lock_bh(&hci_cb_list_lock);
606 list_for_each(p, &hci_cb_list) {
607 struct hci_cb *cb = list_entry(p, struct hci_cb, list);
608 if (cb->role_switch_cfm)
609 cb->role_switch_cfm(conn, status, role);
610 }
611 read_unlock_bh(&hci_cb_list_lock);
612 }
613
614 int hci_register_cb(struct hci_cb *hcb);
615 int hci_unregister_cb(struct hci_cb *hcb);
616
617 int hci_register_notifier(struct notifier_block *nb);
618 int hci_unregister_notifier(struct notifier_block *nb);
619
620 int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param);
621 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags);
622 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb);
623
624 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf);
625
626 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data);
627
628 /* ----- HCI Sockets ----- */
629 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
630
631 /* HCI info for socket */
632 #define hci_pi(sk) ((struct hci_pinfo *) sk)
633
634 struct hci_pinfo {
635 struct bt_sock bt;
636 struct hci_dev *hdev;
637 struct hci_filter filter;
638 __u32 cmsg_mask;
639 };
640
641 /* HCI security filter */
642 #define HCI_SFLT_MAX_OGF 5
643
644 struct hci_sec_filter {
645 __u32 type_mask;
646 __u32 event_mask[2];
647 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
648 };
649
650 /* ----- HCI requests ----- */
651 #define HCI_REQ_DONE 0
652 #define HCI_REQ_PEND 1
653 #define HCI_REQ_CANCELED 2
654
655 #define hci_req_lock(d) down(&d->req_lock)
656 #define hci_req_unlock(d) up(&d->req_lock)
657
658 void hci_req_complete(struct hci_dev *hdev, int result);
659
660 #endif /* __HCI_CORE_H */