1 // SPDX-License-Identifier: GPL-2.0-only
3 * Bluetooth Software UART Qualcomm protocol
5 * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management
6 * protocol extension to H4.
8 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
12 * This file is based on hci_ll.c, which was...
13 * Written by Ohad Ben-Cohen <ohad@bencohen.org>
14 * which was in turn based on hci_h4.c, which was written
15 * by Maxim Krasnyansky and Marcel Holtmann.
18 #include <linux/kernel.h>
19 #include <linux/clk.h>
20 #include <linux/completion.h>
21 #include <linux/debugfs.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/gpio/consumer.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/module.h>
27 #include <linux/of_device.h>
28 #include <linux/platform_device.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/serdev.h>
31 #include <asm/unaligned.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
39 /* HCI_IBS protocol messages */
40 #define HCI_IBS_SLEEP_IND 0xFE
41 #define HCI_IBS_WAKE_IND 0xFD
42 #define HCI_IBS_WAKE_ACK 0xFC
43 #define HCI_MAX_IBS_SIZE 10
45 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100
46 #define IBS_TX_IDLE_TIMEOUT_MS 2000
47 #define CMD_TRANS_TIMEOUT_MS 100
50 #define SUSCLK_RATE_32KHZ 32768
52 /* Controller debug log header */
53 #define QCA_DEBUG_HANDLE 0x2EDC
57 QCA_DROP_VENDOR_EVENT
,
60 /* HCI_IBS transmit side sleep protocol states */
67 /* HCI_IBS receive side sleep protocol states */
73 /* HCI_IBS transmit and receive side clock state vote */
74 enum hci_ibs_clock_state_vote
{
75 HCI_IBS_VOTE_STATS_UPDATE
,
76 HCI_IBS_TX_VOTE_CLOCK_ON
,
77 HCI_IBS_TX_VOTE_CLOCK_OFF
,
78 HCI_IBS_RX_VOTE_CLOCK_ON
,
79 HCI_IBS_RX_VOTE_CLOCK_OFF
,
84 struct sk_buff
*rx_skb
;
85 struct sk_buff_head txq
;
86 struct sk_buff_head tx_wait_q
; /* HCI_IBS wait queue */
87 spinlock_t hci_ibs_lock
; /* HCI_IBS state lock */
88 u8 tx_ibs_state
; /* HCI_IBS transmit side power state*/
89 u8 rx_ibs_state
; /* HCI_IBS receive side power state */
90 bool tx_vote
; /* Clock must be on for TX */
91 bool rx_vote
; /* Clock must be on for RX */
92 struct timer_list tx_idle_timer
;
94 struct timer_list wake_retrans_timer
;
96 struct workqueue_struct
*workqueue
;
97 struct work_struct ws_awake_rx
;
98 struct work_struct ws_awake_device
;
99 struct work_struct ws_rx_vote_off
;
100 struct work_struct ws_tx_vote_off
;
102 struct completion drop_ev_comp
;
104 /* For debugging purpose */
122 enum qca_speed_type
{
128 * Voltage regulator information required for configuring the
129 * QCA Bluetooth chipset
135 unsigned int load_uA
;
138 struct qca_vreg_data
{
139 enum qca_btsoc_type soc_type
;
140 struct qca_vreg
*vregs
;
145 * Platform data for the QCA Bluetooth power driver.
149 const struct qca_vreg_data
*vreg_data
;
150 struct regulator_bulk_data
*vreg_bulk
;
155 struct hci_uart serdev_hu
;
156 struct gpio_desc
*bt_en
;
158 enum qca_btsoc_type btsoc_type
;
159 struct qca_power
*bt_power
;
162 const char *firmware_name
;
165 static int qca_power_setup(struct hci_uart
*hu
, bool on
);
166 static void qca_power_shutdown(struct hci_uart
*hu
);
167 static int qca_power_off(struct hci_dev
*hdev
);
169 static enum qca_btsoc_type
qca_soc_type(struct hci_uart
*hu
)
171 enum qca_btsoc_type soc_type
;
174 struct qca_serdev
*qsd
= serdev_device_get_drvdata(hu
->serdev
);
176 soc_type
= qsd
->btsoc_type
;
184 static const char *qca_get_firmware_name(struct hci_uart
*hu
)
187 struct qca_serdev
*qsd
= serdev_device_get_drvdata(hu
->serdev
);
189 return qsd
->firmware_name
;
195 static void __serial_clock_on(struct tty_struct
*tty
)
197 /* TODO: Some chipset requires to enable UART clock on client
198 * side to save power consumption or manual work is required.
199 * Please put your code to control UART clock here if needed
203 static void __serial_clock_off(struct tty_struct
*tty
)
205 /* TODO: Some chipset requires to disable UART clock on client
206 * side to save power consumption or manual work is required.
207 * Please put your code to control UART clock off here if needed
211 /* serial_clock_vote needs to be called with the ibs lock held */
212 static void serial_clock_vote(unsigned long vote
, struct hci_uart
*hu
)
214 struct qca_data
*qca
= hu
->priv
;
217 bool old_vote
= (qca
->tx_vote
| qca
->rx_vote
);
221 case HCI_IBS_VOTE_STATS_UPDATE
:
222 diff
= jiffies_to_msecs(jiffies
- qca
->vote_last_jif
);
225 qca
->vote_off_ms
+= diff
;
227 qca
->vote_on_ms
+= diff
;
230 case HCI_IBS_TX_VOTE_CLOCK_ON
:
236 case HCI_IBS_RX_VOTE_CLOCK_ON
:
242 case HCI_IBS_TX_VOTE_CLOCK_OFF
:
243 qca
->tx_vote
= false;
245 new_vote
= qca
->rx_vote
| qca
->tx_vote
;
248 case HCI_IBS_RX_VOTE_CLOCK_OFF
:
249 qca
->rx_vote
= false;
251 new_vote
= qca
->rx_vote
| qca
->tx_vote
;
255 BT_ERR("Voting irregularity");
259 if (new_vote
!= old_vote
) {
261 __serial_clock_on(hu
->tty
);
263 __serial_clock_off(hu
->tty
);
265 BT_DBG("Vote serial clock %s(%s)", new_vote
? "true" : "false",
266 vote
? "true" : "false");
268 diff
= jiffies_to_msecs(jiffies
- qca
->vote_last_jif
);
272 qca
->vote_off_ms
+= diff
;
275 qca
->vote_on_ms
+= diff
;
277 qca
->vote_last_jif
= jiffies
;
281 /* Builds and sends an HCI_IBS command packet.
282 * These are very simple packets with only 1 cmd byte.
284 static int send_hci_ibs_cmd(u8 cmd
, struct hci_uart
*hu
)
287 struct sk_buff
*skb
= NULL
;
288 struct qca_data
*qca
= hu
->priv
;
290 BT_DBG("hu %p send hci ibs cmd 0x%x", hu
, cmd
);
292 skb
= bt_skb_alloc(1, GFP_ATOMIC
);
294 BT_ERR("Failed to allocate memory for HCI_IBS packet");
298 /* Assign HCI_IBS type */
299 skb_put_u8(skb
, cmd
);
301 skb_queue_tail(&qca
->txq
, skb
);
306 static void qca_wq_awake_device(struct work_struct
*work
)
308 struct qca_data
*qca
= container_of(work
, struct qca_data
,
310 struct hci_uart
*hu
= qca
->hu
;
311 unsigned long retrans_delay
;
314 BT_DBG("hu %p wq awake device", hu
);
316 /* Vote for serial clock */
317 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON
, hu
);
319 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
321 /* Send wake indication to device */
322 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND
, hu
) < 0)
323 BT_ERR("Failed to send WAKE to device");
325 qca
->ibs_sent_wakes
++;
327 /* Start retransmit timer */
328 retrans_delay
= msecs_to_jiffies(qca
->wake_retrans
);
329 mod_timer(&qca
->wake_retrans_timer
, jiffies
+ retrans_delay
);
331 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
333 /* Actually send the packets */
334 hci_uart_tx_wakeup(hu
);
337 static void qca_wq_awake_rx(struct work_struct
*work
)
339 struct qca_data
*qca
= container_of(work
, struct qca_data
,
341 struct hci_uart
*hu
= qca
->hu
;
344 BT_DBG("hu %p wq awake rx", hu
);
346 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON
, hu
);
348 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
349 qca
->rx_ibs_state
= HCI_IBS_RX_AWAKE
;
351 /* Always acknowledge device wake up,
352 * sending IBS message doesn't count as TX ON.
354 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK
, hu
) < 0)
355 BT_ERR("Failed to acknowledge device wake up");
357 qca
->ibs_sent_wacks
++;
359 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
361 /* Actually send the packets */
362 hci_uart_tx_wakeup(hu
);
365 static void qca_wq_serial_rx_clock_vote_off(struct work_struct
*work
)
367 struct qca_data
*qca
= container_of(work
, struct qca_data
,
369 struct hci_uart
*hu
= qca
->hu
;
371 BT_DBG("hu %p rx clock vote off", hu
);
373 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF
, hu
);
376 static void qca_wq_serial_tx_clock_vote_off(struct work_struct
*work
)
378 struct qca_data
*qca
= container_of(work
, struct qca_data
,
380 struct hci_uart
*hu
= qca
->hu
;
382 BT_DBG("hu %p tx clock vote off", hu
);
384 /* Run HCI tx handling unlocked */
385 hci_uart_tx_wakeup(hu
);
387 /* Now that message queued to tty driver, vote for tty clocks off.
388 * It is up to the tty driver to pend the clocks off until tx done.
390 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF
, hu
);
393 static void hci_ibs_tx_idle_timeout(struct timer_list
*t
)
395 struct qca_data
*qca
= from_timer(qca
, t
, tx_idle_timer
);
396 struct hci_uart
*hu
= qca
->hu
;
399 BT_DBG("hu %p idle timeout in %d state", hu
, qca
->tx_ibs_state
);
401 spin_lock_irqsave_nested(&qca
->hci_ibs_lock
,
402 flags
, SINGLE_DEPTH_NESTING
);
404 switch (qca
->tx_ibs_state
) {
405 case HCI_IBS_TX_AWAKE
:
406 /* TX_IDLE, go to SLEEP */
407 if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND
, hu
) < 0) {
408 BT_ERR("Failed to send SLEEP to device");
411 qca
->tx_ibs_state
= HCI_IBS_TX_ASLEEP
;
412 qca
->ibs_sent_slps
++;
413 queue_work(qca
->workqueue
, &qca
->ws_tx_vote_off
);
416 case HCI_IBS_TX_ASLEEP
:
417 case HCI_IBS_TX_WAKING
:
421 BT_ERR("Spurious timeout tx state %d", qca
->tx_ibs_state
);
425 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
428 static void hci_ibs_wake_retrans_timeout(struct timer_list
*t
)
430 struct qca_data
*qca
= from_timer(qca
, t
, wake_retrans_timer
);
431 struct hci_uart
*hu
= qca
->hu
;
432 unsigned long flags
, retrans_delay
;
433 bool retransmit
= false;
435 BT_DBG("hu %p wake retransmit timeout in %d state",
436 hu
, qca
->tx_ibs_state
);
438 spin_lock_irqsave_nested(&qca
->hci_ibs_lock
,
439 flags
, SINGLE_DEPTH_NESTING
);
441 switch (qca
->tx_ibs_state
) {
442 case HCI_IBS_TX_WAKING
:
443 /* No WAKE_ACK, retransmit WAKE */
445 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND
, hu
) < 0) {
446 BT_ERR("Failed to acknowledge device wake up");
449 qca
->ibs_sent_wakes
++;
450 retrans_delay
= msecs_to_jiffies(qca
->wake_retrans
);
451 mod_timer(&qca
->wake_retrans_timer
, jiffies
+ retrans_delay
);
454 case HCI_IBS_TX_ASLEEP
:
455 case HCI_IBS_TX_AWAKE
:
459 BT_ERR("Spurious timeout tx state %d", qca
->tx_ibs_state
);
463 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
466 hci_uart_tx_wakeup(hu
);
469 /* Initialize protocol */
470 static int qca_open(struct hci_uart
*hu
)
472 struct qca_serdev
*qcadev
;
473 struct qca_data
*qca
;
476 BT_DBG("hu %p qca_open", hu
);
478 if (!hci_uart_has_flow_control(hu
))
481 qca
= kzalloc(sizeof(struct qca_data
), GFP_KERNEL
);
485 skb_queue_head_init(&qca
->txq
);
486 skb_queue_head_init(&qca
->tx_wait_q
);
487 spin_lock_init(&qca
->hci_ibs_lock
);
488 qca
->workqueue
= alloc_ordered_workqueue("qca_wq", 0);
489 if (!qca
->workqueue
) {
490 BT_ERR("QCA Workqueue not initialized properly");
495 INIT_WORK(&qca
->ws_awake_rx
, qca_wq_awake_rx
);
496 INIT_WORK(&qca
->ws_awake_device
, qca_wq_awake_device
);
497 INIT_WORK(&qca
->ws_rx_vote_off
, qca_wq_serial_rx_clock_vote_off
);
498 INIT_WORK(&qca
->ws_tx_vote_off
, qca_wq_serial_tx_clock_vote_off
);
501 init_completion(&qca
->drop_ev_comp
);
503 /* Assume we start with both sides asleep -- extra wakes OK */
504 qca
->tx_ibs_state
= HCI_IBS_TX_ASLEEP
;
505 qca
->rx_ibs_state
= HCI_IBS_RX_ASLEEP
;
507 /* clocks actually on, but we start votes off */
508 qca
->tx_vote
= false;
509 qca
->rx_vote
= false;
512 qca
->ibs_sent_wacks
= 0;
513 qca
->ibs_sent_slps
= 0;
514 qca
->ibs_sent_wakes
= 0;
515 qca
->ibs_recv_wacks
= 0;
516 qca
->ibs_recv_slps
= 0;
517 qca
->ibs_recv_wakes
= 0;
518 qca
->vote_last_jif
= jiffies
;
520 qca
->vote_off_ms
= 0;
523 qca
->tx_votes_on
= 0;
524 qca
->tx_votes_off
= 0;
525 qca
->rx_votes_on
= 0;
526 qca
->rx_votes_off
= 0;
532 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
533 if (!qca_is_wcn399x(qcadev
->btsoc_type
)) {
534 gpiod_set_value_cansleep(qcadev
->bt_en
, 1);
535 /* Controller needs time to bootup. */
538 hu
->init_speed
= qcadev
->init_speed
;
539 hu
->oper_speed
= qcadev
->oper_speed
;
540 ret
= qca_power_setup(hu
, true);
542 destroy_workqueue(qca
->workqueue
);
543 kfree_skb(qca
->rx_skb
);
551 timer_setup(&qca
->wake_retrans_timer
, hci_ibs_wake_retrans_timeout
, 0);
552 qca
->wake_retrans
= IBS_WAKE_RETRANS_TIMEOUT_MS
;
554 timer_setup(&qca
->tx_idle_timer
, hci_ibs_tx_idle_timeout
, 0);
555 qca
->tx_idle_delay
= IBS_TX_IDLE_TIMEOUT_MS
;
557 BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
558 qca
->tx_idle_delay
, qca
->wake_retrans
);
563 static void qca_debugfs_init(struct hci_dev
*hdev
)
565 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
566 struct qca_data
*qca
= hu
->priv
;
567 struct dentry
*ibs_dir
;
573 ibs_dir
= debugfs_create_dir("ibs", hdev
->debugfs
);
577 debugfs_create_u8("tx_ibs_state", mode
, ibs_dir
, &qca
->tx_ibs_state
);
578 debugfs_create_u8("rx_ibs_state", mode
, ibs_dir
, &qca
->rx_ibs_state
);
579 debugfs_create_u64("ibs_sent_sleeps", mode
, ibs_dir
,
580 &qca
->ibs_sent_slps
);
581 debugfs_create_u64("ibs_sent_wakes", mode
, ibs_dir
,
582 &qca
->ibs_sent_wakes
);
583 debugfs_create_u64("ibs_sent_wake_acks", mode
, ibs_dir
,
584 &qca
->ibs_sent_wacks
);
585 debugfs_create_u64("ibs_recv_sleeps", mode
, ibs_dir
,
586 &qca
->ibs_recv_slps
);
587 debugfs_create_u64("ibs_recv_wakes", mode
, ibs_dir
,
588 &qca
->ibs_recv_wakes
);
589 debugfs_create_u64("ibs_recv_wake_acks", mode
, ibs_dir
,
590 &qca
->ibs_recv_wacks
);
591 debugfs_create_bool("tx_vote", mode
, ibs_dir
, &qca
->tx_vote
);
592 debugfs_create_u64("tx_votes_on", mode
, ibs_dir
, &qca
->tx_votes_on
);
593 debugfs_create_u64("tx_votes_off", mode
, ibs_dir
, &qca
->tx_votes_off
);
594 debugfs_create_bool("rx_vote", mode
, ibs_dir
, &qca
->rx_vote
);
595 debugfs_create_u64("rx_votes_on", mode
, ibs_dir
, &qca
->rx_votes_on
);
596 debugfs_create_u64("rx_votes_off", mode
, ibs_dir
, &qca
->rx_votes_off
);
597 debugfs_create_u64("votes_on", mode
, ibs_dir
, &qca
->votes_on
);
598 debugfs_create_u64("votes_off", mode
, ibs_dir
, &qca
->votes_off
);
599 debugfs_create_u32("vote_on_ms", mode
, ibs_dir
, &qca
->vote_on_ms
);
600 debugfs_create_u32("vote_off_ms", mode
, ibs_dir
, &qca
->vote_off_ms
);
603 mode
= S_IRUGO
| S_IWUSR
;
604 debugfs_create_u32("wake_retrans", mode
, ibs_dir
, &qca
->wake_retrans
);
605 debugfs_create_u32("tx_idle_delay", mode
, ibs_dir
,
606 &qca
->tx_idle_delay
);
609 /* Flush protocol data */
610 static int qca_flush(struct hci_uart
*hu
)
612 struct qca_data
*qca
= hu
->priv
;
614 BT_DBG("hu %p qca flush", hu
);
616 skb_queue_purge(&qca
->tx_wait_q
);
617 skb_queue_purge(&qca
->txq
);
623 static int qca_close(struct hci_uart
*hu
)
625 struct qca_serdev
*qcadev
;
626 struct qca_data
*qca
= hu
->priv
;
628 BT_DBG("hu %p qca close", hu
);
630 serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE
, hu
);
632 skb_queue_purge(&qca
->tx_wait_q
);
633 skb_queue_purge(&qca
->txq
);
634 del_timer(&qca
->tx_idle_timer
);
635 del_timer(&qca
->wake_retrans_timer
);
636 destroy_workqueue(qca
->workqueue
);
640 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
641 if (qca_is_wcn399x(qcadev
->btsoc_type
))
642 qca_power_shutdown(hu
);
644 gpiod_set_value_cansleep(qcadev
->bt_en
, 0);
648 kfree_skb(qca
->rx_skb
);
657 /* Called upon a wake-up-indication from the device.
659 static void device_want_to_wakeup(struct hci_uart
*hu
)
662 struct qca_data
*qca
= hu
->priv
;
664 BT_DBG("hu %p want to wake up", hu
);
666 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
668 qca
->ibs_recv_wakes
++;
670 switch (qca
->rx_ibs_state
) {
671 case HCI_IBS_RX_ASLEEP
:
672 /* Make sure clock is on - we may have turned clock off since
673 * receiving the wake up indicator awake rx clock.
675 queue_work(qca
->workqueue
, &qca
->ws_awake_rx
);
676 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
679 case HCI_IBS_RX_AWAKE
:
680 /* Always acknowledge device wake up,
681 * sending IBS message doesn't count as TX ON.
683 if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK
, hu
) < 0) {
684 BT_ERR("Failed to acknowledge device wake up");
687 qca
->ibs_sent_wacks
++;
691 /* Any other state is illegal */
692 BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d",
697 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
699 /* Actually send the packets */
700 hci_uart_tx_wakeup(hu
);
703 /* Called upon a sleep-indication from the device.
705 static void device_want_to_sleep(struct hci_uart
*hu
)
708 struct qca_data
*qca
= hu
->priv
;
710 BT_DBG("hu %p want to sleep in %d state", hu
, qca
->rx_ibs_state
);
712 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
714 qca
->ibs_recv_slps
++;
716 switch (qca
->rx_ibs_state
) {
717 case HCI_IBS_RX_AWAKE
:
719 qca
->rx_ibs_state
= HCI_IBS_RX_ASLEEP
;
720 /* Vote off rx clock under workqueue */
721 queue_work(qca
->workqueue
, &qca
->ws_rx_vote_off
);
724 case HCI_IBS_RX_ASLEEP
:
728 /* Any other state is illegal */
729 BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d",
734 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
737 /* Called upon wake-up-acknowledgement from the device
739 static void device_woke_up(struct hci_uart
*hu
)
741 unsigned long flags
, idle_delay
;
742 struct qca_data
*qca
= hu
->priv
;
743 struct sk_buff
*skb
= NULL
;
745 BT_DBG("hu %p woke up", hu
);
747 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
749 qca
->ibs_recv_wacks
++;
751 switch (qca
->tx_ibs_state
) {
752 case HCI_IBS_TX_AWAKE
:
753 /* Expect one if we send 2 WAKEs */
754 BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d",
758 case HCI_IBS_TX_WAKING
:
759 /* Send pending packets */
760 while ((skb
= skb_dequeue(&qca
->tx_wait_q
)))
761 skb_queue_tail(&qca
->txq
, skb
);
763 /* Switch timers and change state to HCI_IBS_TX_AWAKE */
764 del_timer(&qca
->wake_retrans_timer
);
765 idle_delay
= msecs_to_jiffies(qca
->tx_idle_delay
);
766 mod_timer(&qca
->tx_idle_timer
, jiffies
+ idle_delay
);
767 qca
->tx_ibs_state
= HCI_IBS_TX_AWAKE
;
770 case HCI_IBS_TX_ASLEEP
:
774 BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d",
779 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
781 /* Actually send the packets */
782 hci_uart_tx_wakeup(hu
);
785 /* Enqueue frame for transmittion (padding, crc, etc) may be called from
786 * two simultaneous tasklets.
788 static int qca_enqueue(struct hci_uart
*hu
, struct sk_buff
*skb
)
790 unsigned long flags
= 0, idle_delay
;
791 struct qca_data
*qca
= hu
->priv
;
793 BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu
, skb
,
796 /* Prepend skb with frame type */
797 memcpy(skb_push(skb
, 1), &hci_skb_pkt_type(skb
), 1);
799 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
801 /* Don't go to sleep in middle of patch download or
802 * Out-Of-Band(GPIOs control) sleep is selected.
804 if (!test_bit(QCA_IBS_ENABLED
, &qca
->flags
)) {
805 skb_queue_tail(&qca
->txq
, skb
);
806 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
810 /* Act according to current state */
811 switch (qca
->tx_ibs_state
) {
812 case HCI_IBS_TX_AWAKE
:
813 BT_DBG("Device awake, sending normally");
814 skb_queue_tail(&qca
->txq
, skb
);
815 idle_delay
= msecs_to_jiffies(qca
->tx_idle_delay
);
816 mod_timer(&qca
->tx_idle_timer
, jiffies
+ idle_delay
);
819 case HCI_IBS_TX_ASLEEP
:
820 BT_DBG("Device asleep, waking up and queueing packet");
821 /* Save packet for later */
822 skb_queue_tail(&qca
->tx_wait_q
, skb
);
824 qca
->tx_ibs_state
= HCI_IBS_TX_WAKING
;
825 /* Schedule a work queue to wake up device */
826 queue_work(qca
->workqueue
, &qca
->ws_awake_device
);
829 case HCI_IBS_TX_WAKING
:
830 BT_DBG("Device waking up, queueing packet");
831 /* Transient state; just keep packet for later */
832 skb_queue_tail(&qca
->tx_wait_q
, skb
);
836 BT_ERR("Illegal tx state: %d (losing packet)",
842 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
847 static int qca_ibs_sleep_ind(struct hci_dev
*hdev
, struct sk_buff
*skb
)
849 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
851 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_SLEEP_IND
);
853 device_want_to_sleep(hu
);
859 static int qca_ibs_wake_ind(struct hci_dev
*hdev
, struct sk_buff
*skb
)
861 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
863 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_WAKE_IND
);
865 device_want_to_wakeup(hu
);
871 static int qca_ibs_wake_ack(struct hci_dev
*hdev
, struct sk_buff
*skb
)
873 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
875 BT_DBG("hu %p recv hci ibs cmd 0x%x", hu
, HCI_IBS_WAKE_ACK
);
883 static int qca_recv_acl_data(struct hci_dev
*hdev
, struct sk_buff
*skb
)
885 /* We receive debug logs from chip as an ACL packets.
886 * Instead of sending the data to ACL to decode the
887 * received data, we are pushing them to the above layers
888 * as a diagnostic packet.
890 if (get_unaligned_le16(skb
->data
) == QCA_DEBUG_HANDLE
)
891 return hci_recv_diag(hdev
, skb
);
893 return hci_recv_frame(hdev
, skb
);
896 static int qca_recv_event(struct hci_dev
*hdev
, struct sk_buff
*skb
)
898 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
899 struct qca_data
*qca
= hu
->priv
;
901 if (test_bit(QCA_DROP_VENDOR_EVENT
, &qca
->flags
)) {
902 struct hci_event_hdr
*hdr
= (void *)skb
->data
;
904 /* For the WCN3990 the vendor command for a baudrate change
905 * isn't sent as synchronous HCI command, because the
906 * controller sends the corresponding vendor event with the
907 * new baudrate. The event is received and properly decoded
908 * after changing the baudrate of the host port. It needs to
909 * be dropped, otherwise it can be misinterpreted as
910 * response to a later firmware download command (also a
914 if (hdr
->evt
== HCI_EV_VENDOR
)
915 complete(&qca
->drop_ev_comp
);
922 return hci_recv_frame(hdev
, skb
);
925 #define QCA_IBS_SLEEP_IND_EVENT \
926 .type = HCI_IBS_SLEEP_IND, \
930 .maxlen = HCI_MAX_IBS_SIZE
932 #define QCA_IBS_WAKE_IND_EVENT \
933 .type = HCI_IBS_WAKE_IND, \
937 .maxlen = HCI_MAX_IBS_SIZE
939 #define QCA_IBS_WAKE_ACK_EVENT \
940 .type = HCI_IBS_WAKE_ACK, \
944 .maxlen = HCI_MAX_IBS_SIZE
946 static const struct h4_recv_pkt qca_recv_pkts
[] = {
947 { H4_RECV_ACL
, .recv
= qca_recv_acl_data
},
948 { H4_RECV_SCO
, .recv
= hci_recv_frame
},
949 { H4_RECV_EVENT
, .recv
= qca_recv_event
},
950 { QCA_IBS_WAKE_IND_EVENT
, .recv
= qca_ibs_wake_ind
},
951 { QCA_IBS_WAKE_ACK_EVENT
, .recv
= qca_ibs_wake_ack
},
952 { QCA_IBS_SLEEP_IND_EVENT
, .recv
= qca_ibs_sleep_ind
},
955 static int qca_recv(struct hci_uart
*hu
, const void *data
, int count
)
957 struct qca_data
*qca
= hu
->priv
;
959 if (!test_bit(HCI_UART_REGISTERED
, &hu
->flags
))
962 qca
->rx_skb
= h4_recv_buf(hu
->hdev
, qca
->rx_skb
, data
, count
,
963 qca_recv_pkts
, ARRAY_SIZE(qca_recv_pkts
));
964 if (IS_ERR(qca
->rx_skb
)) {
965 int err
= PTR_ERR(qca
->rx_skb
);
966 bt_dev_err(hu
->hdev
, "Frame reassembly failed (%d)", err
);
974 static struct sk_buff
*qca_dequeue(struct hci_uart
*hu
)
976 struct qca_data
*qca
= hu
->priv
;
978 return skb_dequeue(&qca
->txq
);
981 static uint8_t qca_get_baudrate_value(int speed
)
985 return QCA_BAUDRATE_9600
;
987 return QCA_BAUDRATE_19200
;
989 return QCA_BAUDRATE_38400
;
991 return QCA_BAUDRATE_57600
;
993 return QCA_BAUDRATE_115200
;
995 return QCA_BAUDRATE_230400
;
997 return QCA_BAUDRATE_460800
;
999 return QCA_BAUDRATE_500000
;
1001 return QCA_BAUDRATE_921600
;
1003 return QCA_BAUDRATE_1000000
;
1005 return QCA_BAUDRATE_2000000
;
1007 return QCA_BAUDRATE_3000000
;
1009 return QCA_BAUDRATE_3200000
;
1011 return QCA_BAUDRATE_3500000
;
1013 return QCA_BAUDRATE_115200
;
1017 static int qca_set_baudrate(struct hci_dev
*hdev
, uint8_t baudrate
)
1019 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
1020 struct qca_data
*qca
= hu
->priv
;
1021 struct sk_buff
*skb
;
1022 u8 cmd
[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 };
1024 if (baudrate
> QCA_BAUDRATE_3200000
)
1029 skb
= bt_skb_alloc(sizeof(cmd
), GFP_KERNEL
);
1031 bt_dev_err(hdev
, "Failed to allocate baudrate packet");
1035 /* Assign commands to change baudrate and packet type. */
1036 skb_put_data(skb
, cmd
, sizeof(cmd
));
1037 hci_skb_pkt_type(skb
) = HCI_COMMAND_PKT
;
1039 skb_queue_tail(&qca
->txq
, skb
);
1040 hci_uart_tx_wakeup(hu
);
1042 /* Wait for the baudrate change request to be sent */
1044 while (!skb_queue_empty(&qca
->txq
))
1045 usleep_range(100, 200);
1048 serdev_device_wait_until_sent(hu
->serdev
,
1049 msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS
));
1051 /* Give the controller time to process the request */
1052 if (qca_is_wcn399x(qca_soc_type(hu
)))
1060 static inline void host_set_baudrate(struct hci_uart
*hu
, unsigned int speed
)
1063 serdev_device_set_baudrate(hu
->serdev
, speed
);
1065 hci_uart_set_baudrate(hu
, speed
);
1068 static int qca_send_power_pulse(struct hci_uart
*hu
, bool on
)
1071 int timeout
= msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS
);
1072 u8 cmd
= on
? QCA_WCN3990_POWERON_PULSE
: QCA_WCN3990_POWEROFF_PULSE
;
1074 /* These power pulses are single byte command which are sent
1075 * at required baudrate to wcn3990. On wcn3990, we have an external
1076 * circuit at Tx pin which decodes the pulse sent at specific baudrate.
1077 * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT
1078 * and also we use the same power inputs to turn on and off for
1079 * Wi-Fi/BT. Powering up the power sources will not enable BT, until
1080 * we send a power on pulse at 115200 bps. This algorithm will help to
1081 * save power. Disabling hardware flow control is mandatory while
1082 * sending power pulses to SoC.
1084 bt_dev_dbg(hu
->hdev
, "sending power pulse %02x to controller", cmd
);
1086 serdev_device_write_flush(hu
->serdev
);
1087 hci_uart_set_flow_control(hu
, true);
1088 ret
= serdev_device_write_buf(hu
->serdev
, &cmd
, sizeof(cmd
));
1090 bt_dev_err(hu
->hdev
, "failed to send power pulse %02x", cmd
);
1094 serdev_device_wait_until_sent(hu
->serdev
, timeout
);
1095 hci_uart_set_flow_control(hu
, false);
1097 /* Give to controller time to boot/shutdown */
1106 static unsigned int qca_get_speed(struct hci_uart
*hu
,
1107 enum qca_speed_type speed_type
)
1109 unsigned int speed
= 0;
1111 if (speed_type
== QCA_INIT_SPEED
) {
1113 speed
= hu
->init_speed
;
1114 else if (hu
->proto
->init_speed
)
1115 speed
= hu
->proto
->init_speed
;
1118 speed
= hu
->oper_speed
;
1119 else if (hu
->proto
->oper_speed
)
1120 speed
= hu
->proto
->oper_speed
;
1126 static int qca_check_speeds(struct hci_uart
*hu
)
1128 if (qca_is_wcn399x(qca_soc_type(hu
))) {
1129 if (!qca_get_speed(hu
, QCA_INIT_SPEED
) &&
1130 !qca_get_speed(hu
, QCA_OPER_SPEED
))
1133 if (!qca_get_speed(hu
, QCA_INIT_SPEED
) ||
1134 !qca_get_speed(hu
, QCA_OPER_SPEED
))
1141 static int qca_set_speed(struct hci_uart
*hu
, enum qca_speed_type speed_type
)
1143 unsigned int speed
, qca_baudrate
;
1144 struct qca_data
*qca
= hu
->priv
;
1147 if (speed_type
== QCA_INIT_SPEED
) {
1148 speed
= qca_get_speed(hu
, QCA_INIT_SPEED
);
1150 host_set_baudrate(hu
, speed
);
1152 enum qca_btsoc_type soc_type
= qca_soc_type(hu
);
1154 speed
= qca_get_speed(hu
, QCA_OPER_SPEED
);
1158 /* Disable flow control for wcn3990 to deassert RTS while
1159 * changing the baudrate of chip and host.
1161 if (qca_is_wcn399x(soc_type
))
1162 hci_uart_set_flow_control(hu
, true);
1164 if (soc_type
== QCA_WCN3990
) {
1165 reinit_completion(&qca
->drop_ev_comp
);
1166 set_bit(QCA_DROP_VENDOR_EVENT
, &qca
->flags
);
1169 qca_baudrate
= qca_get_baudrate_value(speed
);
1170 bt_dev_dbg(hu
->hdev
, "Set UART speed to %d", speed
);
1171 ret
= qca_set_baudrate(hu
->hdev
, qca_baudrate
);
1175 host_set_baudrate(hu
, speed
);
1178 if (qca_is_wcn399x(soc_type
))
1179 hci_uart_set_flow_control(hu
, false);
1181 if (soc_type
== QCA_WCN3990
) {
1182 /* Wait for the controller to send the vendor event
1183 * for the baudrate change command.
1185 if (!wait_for_completion_timeout(&qca
->drop_ev_comp
,
1186 msecs_to_jiffies(100))) {
1187 bt_dev_err(hu
->hdev
,
1188 "Failed to change controller baudrate\n");
1192 clear_bit(QCA_DROP_VENDOR_EVENT
, &qca
->flags
);
1199 static int qca_wcn3990_init(struct hci_uart
*hu
)
1201 struct qca_serdev
*qcadev
;
1204 /* Check for vregs status, may be hci down has turned
1205 * off the voltage regulator.
1207 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
1208 if (!qcadev
->bt_power
->vregs_on
) {
1209 serdev_device_close(hu
->serdev
);
1210 ret
= qca_power_setup(hu
, true);
1214 ret
= serdev_device_open(hu
->serdev
);
1216 bt_dev_err(hu
->hdev
, "failed to open port");
1221 /* Forcefully enable wcn3990 to enter in to boot mode. */
1222 host_set_baudrate(hu
, 2400);
1223 ret
= qca_send_power_pulse(hu
, false);
1227 qca_set_speed(hu
, QCA_INIT_SPEED
);
1228 ret
= qca_send_power_pulse(hu
, true);
1232 /* Now the device is in ready state to communicate with host.
1233 * To sync host with device we need to reopen port.
1234 * Without this, we will have RTS and CTS synchronization
1237 serdev_device_close(hu
->serdev
);
1238 ret
= serdev_device_open(hu
->serdev
);
1240 bt_dev_err(hu
->hdev
, "failed to open port");
1244 hci_uart_set_flow_control(hu
, false);
1249 static int qca_setup(struct hci_uart
*hu
)
1251 struct hci_dev
*hdev
= hu
->hdev
;
1252 struct qca_data
*qca
= hu
->priv
;
1253 unsigned int speed
, qca_baudrate
= QCA_BAUDRATE_115200
;
1254 enum qca_btsoc_type soc_type
= qca_soc_type(hu
);
1255 const char *firmware_name
= qca_get_firmware_name(hu
);
1259 ret
= qca_check_speeds(hu
);
1263 /* Patch downloading has to be done without IBS mode */
1264 clear_bit(QCA_IBS_ENABLED
, &qca
->flags
);
1266 if (qca_is_wcn399x(soc_type
)) {
1267 bt_dev_info(hdev
, "setting up wcn3990");
1269 /* Enable NON_PERSISTENT_SETUP QUIRK to ensure to execute
1270 * setup for every hci up.
1272 set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP
, &hdev
->quirks
);
1273 set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY
, &hdev
->quirks
);
1274 hu
->hdev
->shutdown
= qca_power_off
;
1275 ret
= qca_wcn3990_init(hu
);
1279 ret
= qca_read_soc_version(hdev
, &soc_ver
);
1283 bt_dev_info(hdev
, "ROME setup");
1284 qca_set_speed(hu
, QCA_INIT_SPEED
);
1287 /* Setup user speed if needed */
1288 speed
= qca_get_speed(hu
, QCA_OPER_SPEED
);
1290 ret
= qca_set_speed(hu
, QCA_OPER_SPEED
);
1294 qca_baudrate
= qca_get_baudrate_value(speed
);
1297 if (!qca_is_wcn399x(soc_type
)) {
1298 /* Get QCA version information */
1299 ret
= qca_read_soc_version(hdev
, &soc_ver
);
1304 bt_dev_info(hdev
, "QCA controller version 0x%08x", soc_ver
);
1305 /* Setup patch / NVM configurations */
1306 ret
= qca_uart_setup(hdev
, qca_baudrate
, soc_type
, soc_ver
,
1309 set_bit(QCA_IBS_ENABLED
, &qca
->flags
);
1310 qca_debugfs_init(hdev
);
1311 } else if (ret
== -ENOENT
) {
1312 /* No patch/nvm-config found, run with original fw/config */
1314 } else if (ret
== -EAGAIN
) {
1316 * Userspace firmware loader will return -EAGAIN in case no
1317 * patch/nvm-config is found, so run with original fw/config.
1323 if (qca_is_wcn399x(soc_type
))
1324 hu
->hdev
->set_bdaddr
= qca_set_bdaddr
;
1326 hu
->hdev
->set_bdaddr
= qca_set_bdaddr_rome
;
1331 static struct hci_uart_proto qca_proto
= {
1335 .init_speed
= 115200,
1336 .oper_speed
= 3000000,
1342 .enqueue
= qca_enqueue
,
1343 .dequeue
= qca_dequeue
,
1346 static const struct qca_vreg_data qca_soc_data_wcn3990
= {
1347 .soc_type
= QCA_WCN3990
,
1348 .vregs
= (struct qca_vreg
[]) {
1349 { "vddio", 1800000, 1900000, 15000 },
1350 { "vddxo", 1800000, 1900000, 80000 },
1351 { "vddrf", 1300000, 1350000, 300000 },
1352 { "vddch0", 3300000, 3400000, 450000 },
1357 static const struct qca_vreg_data qca_soc_data_wcn3998
= {
1358 .soc_type
= QCA_WCN3998
,
1359 .vregs
= (struct qca_vreg
[]) {
1360 { "vddio", 1800000, 1900000, 10000 },
1361 { "vddxo", 1800000, 1900000, 80000 },
1362 { "vddrf", 1300000, 1352000, 300000 },
1363 { "vddch0", 3300000, 3300000, 450000 },
1368 static void qca_power_shutdown(struct hci_uart
*hu
)
1370 struct qca_data
*qca
= hu
->priv
;
1371 unsigned long flags
;
1373 /* From this point we go into power off state. But serial port is
1374 * still open, stop queueing the IBS data and flush all the buffered
1377 spin_lock_irqsave(&qca
->hci_ibs_lock
, flags
);
1378 clear_bit(QCA_IBS_ENABLED
, &qca
->flags
);
1380 spin_unlock_irqrestore(&qca
->hci_ibs_lock
, flags
);
1382 host_set_baudrate(hu
, 2400);
1383 qca_send_power_pulse(hu
, false);
1384 qca_power_setup(hu
, false);
1387 static int qca_power_off(struct hci_dev
*hdev
)
1389 struct hci_uart
*hu
= hci_get_drvdata(hdev
);
1391 /* Perform pre shutdown command */
1392 qca_send_pre_shutdown_cmd(hdev
);
1394 qca_power_shutdown(hu
);
1398 static int qca_enable_regulator(struct qca_vreg vregs
,
1399 struct regulator
*regulator
)
1403 ret
= regulator_set_voltage(regulator
, vregs
.min_uV
,
1409 ret
= regulator_set_load(regulator
,
1415 return regulator_enable(regulator
);
1419 static void qca_disable_regulator(struct qca_vreg vregs
,
1420 struct regulator
*regulator
)
1422 regulator_disable(regulator
);
1423 regulator_set_voltage(regulator
, 0, vregs
.max_uV
);
1425 regulator_set_load(regulator
, 0);
1429 static int qca_power_setup(struct hci_uart
*hu
, bool on
)
1431 struct qca_vreg
*vregs
;
1432 struct regulator_bulk_data
*vreg_bulk
;
1433 struct qca_serdev
*qcadev
;
1434 int i
, num_vregs
, ret
= 0;
1436 qcadev
= serdev_device_get_drvdata(hu
->serdev
);
1437 if (!qcadev
|| !qcadev
->bt_power
|| !qcadev
->bt_power
->vreg_data
||
1438 !qcadev
->bt_power
->vreg_bulk
)
1441 vregs
= qcadev
->bt_power
->vreg_data
->vregs
;
1442 vreg_bulk
= qcadev
->bt_power
->vreg_bulk
;
1443 num_vregs
= qcadev
->bt_power
->vreg_data
->num_vregs
;
1444 BT_DBG("on: %d", on
);
1445 if (on
&& !qcadev
->bt_power
->vregs_on
) {
1446 for (i
= 0; i
< num_vregs
; i
++) {
1447 ret
= qca_enable_regulator(vregs
[i
],
1448 vreg_bulk
[i
].consumer
);
1454 BT_ERR("failed to enable regulator:%s", vregs
[i
].name
);
1455 /* turn off regulators which are enabled */
1456 for (i
= i
- 1; i
>= 0; i
--)
1457 qca_disable_regulator(vregs
[i
],
1458 vreg_bulk
[i
].consumer
);
1460 qcadev
->bt_power
->vregs_on
= true;
1462 } else if (!on
&& qcadev
->bt_power
->vregs_on
) {
1463 /* turn off regulator in reverse order */
1464 i
= qcadev
->bt_power
->vreg_data
->num_vregs
- 1;
1465 for ( ; i
>= 0; i
--)
1466 qca_disable_regulator(vregs
[i
], vreg_bulk
[i
].consumer
);
1468 qcadev
->bt_power
->vregs_on
= false;
1474 static int qca_init_regulators(struct qca_power
*qca
,
1475 const struct qca_vreg
*vregs
, size_t num_vregs
)
1479 qca
->vreg_bulk
= devm_kcalloc(qca
->dev
, num_vregs
,
1480 sizeof(struct regulator_bulk_data
),
1482 if (!qca
->vreg_bulk
)
1485 for (i
= 0; i
< num_vregs
; i
++)
1486 qca
->vreg_bulk
[i
].supply
= vregs
[i
].name
;
1488 return devm_regulator_bulk_get(qca
->dev
, num_vregs
, qca
->vreg_bulk
);
1491 static int qca_serdev_probe(struct serdev_device
*serdev
)
1493 struct qca_serdev
*qcadev
;
1494 const struct qca_vreg_data
*data
;
1497 qcadev
= devm_kzalloc(&serdev
->dev
, sizeof(*qcadev
), GFP_KERNEL
);
1501 qcadev
->serdev_hu
.serdev
= serdev
;
1502 data
= of_device_get_match_data(&serdev
->dev
);
1503 serdev_device_set_drvdata(serdev
, qcadev
);
1504 device_property_read_string(&serdev
->dev
, "firmware-name",
1505 &qcadev
->firmware_name
);
1506 if (data
&& qca_is_wcn399x(data
->soc_type
)) {
1507 qcadev
->btsoc_type
= data
->soc_type
;
1508 qcadev
->bt_power
= devm_kzalloc(&serdev
->dev
,
1509 sizeof(struct qca_power
),
1511 if (!qcadev
->bt_power
)
1514 qcadev
->bt_power
->dev
= &serdev
->dev
;
1515 qcadev
->bt_power
->vreg_data
= data
;
1516 err
= qca_init_regulators(qcadev
->bt_power
, data
->vregs
,
1519 BT_ERR("Failed to init regulators:%d", err
);
1523 qcadev
->bt_power
->vregs_on
= false;
1525 device_property_read_u32(&serdev
->dev
, "max-speed",
1526 &qcadev
->oper_speed
);
1527 if (!qcadev
->oper_speed
)
1528 BT_DBG("UART will pick default operating speed");
1530 err
= hci_uart_register_device(&qcadev
->serdev_hu
, &qca_proto
);
1532 BT_ERR("wcn3990 serdev registration failed");
1536 qcadev
->btsoc_type
= QCA_ROME
;
1537 qcadev
->bt_en
= devm_gpiod_get(&serdev
->dev
, "enable",
1539 if (IS_ERR(qcadev
->bt_en
)) {
1540 dev_err(&serdev
->dev
, "failed to acquire enable gpio\n");
1541 return PTR_ERR(qcadev
->bt_en
);
1544 qcadev
->susclk
= devm_clk_get(&serdev
->dev
, NULL
);
1545 if (IS_ERR(qcadev
->susclk
)) {
1546 dev_err(&serdev
->dev
, "failed to acquire clk\n");
1547 return PTR_ERR(qcadev
->susclk
);
1550 err
= clk_set_rate(qcadev
->susclk
, SUSCLK_RATE_32KHZ
);
1554 err
= clk_prepare_enable(qcadev
->susclk
);
1558 err
= hci_uart_register_device(&qcadev
->serdev_hu
, &qca_proto
);
1560 clk_disable_unprepare(qcadev
->susclk
);
1567 static void qca_serdev_remove(struct serdev_device
*serdev
)
1569 struct qca_serdev
*qcadev
= serdev_device_get_drvdata(serdev
);
1571 if (qca_is_wcn399x(qcadev
->btsoc_type
))
1572 qca_power_shutdown(&qcadev
->serdev_hu
);
1574 clk_disable_unprepare(qcadev
->susclk
);
1576 hci_uart_unregister_device(&qcadev
->serdev_hu
);
1579 static const struct of_device_id qca_bluetooth_of_match
[] = {
1580 { .compatible
= "qcom,qca6174-bt" },
1581 { .compatible
= "qcom,wcn3990-bt", .data
= &qca_soc_data_wcn3990
},
1582 { .compatible
= "qcom,wcn3998-bt", .data
= &qca_soc_data_wcn3998
},
1585 MODULE_DEVICE_TABLE(of
, qca_bluetooth_of_match
);
1587 static struct serdev_device_driver qca_serdev_driver
= {
1588 .probe
= qca_serdev_probe
,
1589 .remove
= qca_serdev_remove
,
1591 .name
= "hci_uart_qca",
1592 .of_match_table
= qca_bluetooth_of_match
,
1596 int __init
qca_init(void)
1598 serdev_device_driver_register(&qca_serdev_driver
);
1600 return hci_uart_register_proto(&qca_proto
);
1603 int __exit
qca_deinit(void)
1605 serdev_device_driver_unregister(&qca_serdev_driver
);
1607 return hci_uart_unregister_proto(&qca_proto
);