1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
6 #include <linux/clk/tegra.h>
7 #include <linux/genalloc.h>
8 #include <linux/mailbox_client.h>
9 #include <linux/module.h>
11 #include <linux/of_address.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
15 #include <linux/semaphore.h>
16 #include <linux/sched/clock.h>
18 #include <soc/tegra/bpmp.h>
19 #include <soc/tegra/bpmp-abi.h>
20 #include <soc/tegra/ivc.h>
22 #include "bpmp-private.h"
24 #define MSG_ACK BIT(0)
25 #define MSG_RING BIT(1)
28 static inline struct tegra_bpmp
*
29 mbox_client_to_bpmp(struct mbox_client
*client
)
31 return container_of(client
, struct tegra_bpmp
, mbox
.client
);
34 static inline const struct tegra_bpmp_ops
*
35 channel_to_ops(struct tegra_bpmp_channel
*channel
)
37 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
39 return bpmp
->soc
->ops
;
42 struct tegra_bpmp
*tegra_bpmp_get(struct device
*dev
)
44 struct platform_device
*pdev
;
45 struct tegra_bpmp
*bpmp
;
46 struct device_node
*np
;
48 np
= of_parse_phandle(dev
->of_node
, "nvidia,bpmp", 0);
50 return ERR_PTR(-ENOENT
);
52 pdev
= of_find_device_by_node(np
);
54 bpmp
= ERR_PTR(-ENODEV
);
58 bpmp
= platform_get_drvdata(pdev
);
60 bpmp
= ERR_PTR(-EPROBE_DEFER
);
61 put_device(&pdev
->dev
);
69 EXPORT_SYMBOL_GPL(tegra_bpmp_get
);
71 void tegra_bpmp_put(struct tegra_bpmp
*bpmp
)
74 put_device(bpmp
->dev
);
76 EXPORT_SYMBOL_GPL(tegra_bpmp_put
);
79 tegra_bpmp_channel_get_thread_index(struct tegra_bpmp_channel
*channel
)
81 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
85 count
= bpmp
->soc
->channels
.thread
.count
;
87 index
= channel
- channel
->bpmp
->threaded_channels
;
88 if (index
< 0 || index
>= count
)
94 static bool tegra_bpmp_message_valid(const struct tegra_bpmp_message
*msg
)
96 return (msg
->tx
.size
<= MSG_DATA_MIN_SZ
) &&
97 (msg
->rx
.size
<= MSG_DATA_MIN_SZ
) &&
98 (msg
->tx
.size
== 0 || msg
->tx
.data
) &&
99 (msg
->rx
.size
== 0 || msg
->rx
.data
);
102 static bool tegra_bpmp_is_response_ready(struct tegra_bpmp_channel
*channel
)
104 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
106 return ops
->is_response_ready(channel
);
109 static bool tegra_bpmp_is_request_ready(struct tegra_bpmp_channel
*channel
)
111 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
113 return ops
->is_request_ready(channel
);
116 static int tegra_bpmp_wait_response(struct tegra_bpmp_channel
*channel
)
118 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
121 end
= ktime_add_us(ktime_get(), timeout
);
124 if (tegra_bpmp_is_response_ready(channel
))
126 } while (ktime_before(ktime_get(), end
));
131 static int tegra_bpmp_ack_response(struct tegra_bpmp_channel
*channel
)
133 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
135 return ops
->ack_response(channel
);
138 static int tegra_bpmp_ack_request(struct tegra_bpmp_channel
*channel
)
140 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
142 return ops
->ack_request(channel
);
146 tegra_bpmp_is_request_channel_free(struct tegra_bpmp_channel
*channel
)
148 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
150 return ops
->is_request_channel_free(channel
);
154 tegra_bpmp_is_response_channel_free(struct tegra_bpmp_channel
*channel
)
156 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
158 return ops
->is_response_channel_free(channel
);
162 tegra_bpmp_wait_request_channel_free(struct tegra_bpmp_channel
*channel
)
164 unsigned long timeout
= channel
->bpmp
->soc
->channels
.cpu_tx
.timeout
;
167 start
= ns_to_ktime(local_clock());
170 if (tegra_bpmp_is_request_channel_free(channel
))
173 now
= ns_to_ktime(local_clock());
174 } while (ktime_us_delta(now
, start
) < timeout
);
179 static int tegra_bpmp_post_request(struct tegra_bpmp_channel
*channel
)
181 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
183 return ops
->post_request(channel
);
186 static int tegra_bpmp_post_response(struct tegra_bpmp_channel
*channel
)
188 const struct tegra_bpmp_ops
*ops
= channel_to_ops(channel
);
190 return ops
->post_response(channel
);
193 static int tegra_bpmp_ring_doorbell(struct tegra_bpmp
*bpmp
)
195 return bpmp
->soc
->ops
->ring_doorbell(bpmp
);
198 static ssize_t
__tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
199 void *data
, size_t size
, int *ret
)
203 if (data
&& size
> 0)
204 memcpy(data
, channel
->ib
->data
, size
);
206 err
= tegra_bpmp_ack_response(channel
);
210 *ret
= channel
->ib
->code
;
215 static ssize_t
tegra_bpmp_channel_read(struct tegra_bpmp_channel
*channel
,
216 void *data
, size_t size
, int *ret
)
218 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
223 index
= tegra_bpmp_channel_get_thread_index(channel
);
229 spin_lock_irqsave(&bpmp
->lock
, flags
);
230 err
= __tegra_bpmp_channel_read(channel
, data
, size
, ret
);
231 clear_bit(index
, bpmp
->threaded
.allocated
);
232 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
235 up(&bpmp
->threaded
.lock
);
240 static ssize_t
__tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
241 unsigned int mrq
, unsigned long flags
,
242 const void *data
, size_t size
)
244 channel
->ob
->code
= mrq
;
245 channel
->ob
->flags
= flags
;
247 if (data
&& size
> 0)
248 memcpy(channel
->ob
->data
, data
, size
);
250 return tegra_bpmp_post_request(channel
);
253 static struct tegra_bpmp_channel
*
254 tegra_bpmp_write_threaded(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
255 const void *data
, size_t size
)
257 unsigned long timeout
= bpmp
->soc
->channels
.thread
.timeout
;
258 unsigned int count
= bpmp
->soc
->channels
.thread
.count
;
259 struct tegra_bpmp_channel
*channel
;
264 err
= down_timeout(&bpmp
->threaded
.lock
, usecs_to_jiffies(timeout
));
268 spin_lock_irqsave(&bpmp
->lock
, flags
);
270 index
= find_first_zero_bit(bpmp
->threaded
.allocated
, count
);
271 if (index
== count
) {
276 channel
= &bpmp
->threaded_channels
[index
];
278 if (!tegra_bpmp_is_request_channel_free(channel
)) {
283 set_bit(index
, bpmp
->threaded
.allocated
);
285 err
= __tegra_bpmp_channel_write(channel
, mrq
, MSG_ACK
| MSG_RING
,
288 goto clear_allocated
;
290 set_bit(index
, bpmp
->threaded
.busy
);
292 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
296 clear_bit(index
, bpmp
->threaded
.allocated
);
298 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
299 up(&bpmp
->threaded
.lock
);
304 static ssize_t
tegra_bpmp_channel_write(struct tegra_bpmp_channel
*channel
,
305 unsigned int mrq
, unsigned long flags
,
306 const void *data
, size_t size
)
310 err
= tegra_bpmp_wait_request_channel_free(channel
);
314 return __tegra_bpmp_channel_write(channel
, mrq
, flags
, data
, size
);
317 int tegra_bpmp_transfer_atomic(struct tegra_bpmp
*bpmp
,
318 struct tegra_bpmp_message
*msg
)
320 struct tegra_bpmp_channel
*channel
;
323 if (WARN_ON(!irqs_disabled()))
326 if (!tegra_bpmp_message_valid(msg
))
329 channel
= bpmp
->tx_channel
;
331 spin_lock(&bpmp
->atomic_tx_lock
);
333 err
= tegra_bpmp_channel_write(channel
, msg
->mrq
, MSG_ACK
,
334 msg
->tx
.data
, msg
->tx
.size
);
336 spin_unlock(&bpmp
->atomic_tx_lock
);
340 spin_unlock(&bpmp
->atomic_tx_lock
);
342 err
= tegra_bpmp_ring_doorbell(bpmp
);
346 err
= tegra_bpmp_wait_response(channel
);
350 return __tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
353 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer_atomic
);
355 int tegra_bpmp_transfer(struct tegra_bpmp
*bpmp
,
356 struct tegra_bpmp_message
*msg
)
358 struct tegra_bpmp_channel
*channel
;
359 unsigned long timeout
;
362 if (WARN_ON(irqs_disabled()))
365 if (!tegra_bpmp_message_valid(msg
))
368 channel
= tegra_bpmp_write_threaded(bpmp
, msg
->mrq
, msg
->tx
.data
,
371 return PTR_ERR(channel
);
373 err
= tegra_bpmp_ring_doorbell(bpmp
);
377 timeout
= usecs_to_jiffies(bpmp
->soc
->channels
.thread
.timeout
);
379 err
= wait_for_completion_timeout(&channel
->completion
, timeout
);
383 return tegra_bpmp_channel_read(channel
, msg
->rx
.data
, msg
->rx
.size
,
386 EXPORT_SYMBOL_GPL(tegra_bpmp_transfer
);
388 static struct tegra_bpmp_mrq
*tegra_bpmp_find_mrq(struct tegra_bpmp
*bpmp
,
391 struct tegra_bpmp_mrq
*entry
;
393 list_for_each_entry(entry
, &bpmp
->mrqs
, list
)
394 if (entry
->mrq
== mrq
)
400 void tegra_bpmp_mrq_return(struct tegra_bpmp_channel
*channel
, int code
,
401 const void *data
, size_t size
)
403 unsigned long flags
= channel
->ib
->flags
;
404 struct tegra_bpmp
*bpmp
= channel
->bpmp
;
407 if (WARN_ON(size
> MSG_DATA_MIN_SZ
))
410 err
= tegra_bpmp_ack_request(channel
);
411 if (WARN_ON(err
< 0))
414 if ((flags
& MSG_ACK
) == 0)
417 if (WARN_ON(!tegra_bpmp_is_response_channel_free(channel
)))
420 channel
->ob
->code
= code
;
422 if (data
&& size
> 0)
423 memcpy(channel
->ob
->data
, data
, size
);
425 err
= tegra_bpmp_post_response(channel
);
426 if (WARN_ON(err
< 0))
429 if (flags
& MSG_RING
) {
430 err
= tegra_bpmp_ring_doorbell(bpmp
);
431 if (WARN_ON(err
< 0))
435 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_return
);
437 static void tegra_bpmp_handle_mrq(struct tegra_bpmp
*bpmp
,
439 struct tegra_bpmp_channel
*channel
)
441 struct tegra_bpmp_mrq
*entry
;
444 spin_lock(&bpmp
->lock
);
446 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
448 spin_unlock(&bpmp
->lock
);
449 tegra_bpmp_mrq_return(channel
, -EINVAL
, &zero
, sizeof(zero
));
453 entry
->handler(mrq
, channel
, entry
->data
);
455 spin_unlock(&bpmp
->lock
);
458 int tegra_bpmp_request_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
,
459 tegra_bpmp_mrq_handler_t handler
, void *data
)
461 struct tegra_bpmp_mrq
*entry
;
467 entry
= devm_kzalloc(bpmp
->dev
, sizeof(*entry
), GFP_KERNEL
);
471 spin_lock_irqsave(&bpmp
->lock
, flags
);
474 entry
->handler
= handler
;
476 list_add(&entry
->list
, &bpmp
->mrqs
);
478 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
482 EXPORT_SYMBOL_GPL(tegra_bpmp_request_mrq
);
484 void tegra_bpmp_free_mrq(struct tegra_bpmp
*bpmp
, unsigned int mrq
, void *data
)
486 struct tegra_bpmp_mrq
*entry
;
489 spin_lock_irqsave(&bpmp
->lock
, flags
);
491 entry
= tegra_bpmp_find_mrq(bpmp
, mrq
);
495 list_del(&entry
->list
);
496 devm_kfree(bpmp
->dev
, entry
);
499 spin_unlock_irqrestore(&bpmp
->lock
, flags
);
501 EXPORT_SYMBOL_GPL(tegra_bpmp_free_mrq
);
503 bool tegra_bpmp_mrq_is_supported(struct tegra_bpmp
*bpmp
, unsigned int mrq
)
505 struct mrq_query_abi_request req
= { .mrq
= cpu_to_le32(mrq
) };
506 struct mrq_query_abi_response resp
;
507 struct tegra_bpmp_message msg
= {
508 .mrq
= MRQ_QUERY_ABI
,
515 .size
= sizeof(resp
),
520 err
= tegra_bpmp_transfer(bpmp
, &msg
);
521 if (err
|| msg
.rx
.ret
)
524 return resp
.status
== 0;
526 EXPORT_SYMBOL_GPL(tegra_bpmp_mrq_is_supported
);
528 static void tegra_bpmp_mrq_handle_ping(unsigned int mrq
,
529 struct tegra_bpmp_channel
*channel
,
532 struct mrq_ping_request
*request
;
533 struct mrq_ping_response response
;
535 request
= (struct mrq_ping_request
*)channel
->ib
->data
;
537 memset(&response
, 0, sizeof(response
));
538 response
.reply
= request
->challenge
<< 1;
540 tegra_bpmp_mrq_return(channel
, 0, &response
, sizeof(response
));
543 static int tegra_bpmp_ping(struct tegra_bpmp
*bpmp
)
545 struct mrq_ping_response response
;
546 struct mrq_ping_request request
;
547 struct tegra_bpmp_message msg
;
552 memset(&request
, 0, sizeof(request
));
553 request
.challenge
= 1;
555 memset(&response
, 0, sizeof(response
));
557 memset(&msg
, 0, sizeof(msg
));
559 msg
.tx
.data
= &request
;
560 msg
.tx
.size
= sizeof(request
);
561 msg
.rx
.data
= &response
;
562 msg
.rx
.size
= sizeof(response
);
564 local_irq_save(flags
);
566 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
568 local_irq_restore(flags
);
572 "ping ok: challenge: %u, response: %u, time: %lld\n",
573 request
.challenge
, response
.reply
,
574 ktime_to_us(ktime_sub(end
, start
)));
579 /* deprecated version of tag query */
580 static int tegra_bpmp_get_firmware_tag_old(struct tegra_bpmp
*bpmp
, char *tag
,
583 struct mrq_query_tag_request request
;
584 struct tegra_bpmp_message msg
;
593 virt
= dma_alloc_coherent(bpmp
->dev
, TAG_SZ
, &phys
,
594 GFP_KERNEL
| GFP_DMA32
);
598 memset(&request
, 0, sizeof(request
));
601 memset(&msg
, 0, sizeof(msg
));
602 msg
.mrq
= MRQ_QUERY_TAG
;
603 msg
.tx
.data
= &request
;
604 msg
.tx
.size
= sizeof(request
);
606 local_irq_save(flags
);
607 err
= tegra_bpmp_transfer_atomic(bpmp
, &msg
);
608 local_irq_restore(flags
);
611 memcpy(tag
, virt
, TAG_SZ
);
613 dma_free_coherent(bpmp
->dev
, TAG_SZ
, virt
, phys
);
618 static int tegra_bpmp_get_firmware_tag(struct tegra_bpmp
*bpmp
, char *tag
,
621 if (tegra_bpmp_mrq_is_supported(bpmp
, MRQ_QUERY_FW_TAG
)) {
622 struct mrq_query_fw_tag_response resp
;
623 struct tegra_bpmp_message msg
= {
624 .mrq
= MRQ_QUERY_FW_TAG
,
627 .size
= sizeof(resp
),
632 if (size
!= sizeof(resp
.tag
))
635 err
= tegra_bpmp_transfer(bpmp
, &msg
);
642 memcpy(tag
, resp
.tag
, sizeof(resp
.tag
));
646 return tegra_bpmp_get_firmware_tag_old(bpmp
, tag
, size
);
649 static void tegra_bpmp_channel_signal(struct tegra_bpmp_channel
*channel
)
651 unsigned long flags
= channel
->ob
->flags
;
653 if ((flags
& MSG_RING
) == 0)
656 complete(&channel
->completion
);
659 void tegra_bpmp_handle_rx(struct tegra_bpmp
*bpmp
)
661 struct tegra_bpmp_channel
*channel
;
662 unsigned int i
, count
;
665 channel
= bpmp
->rx_channel
;
666 count
= bpmp
->soc
->channels
.thread
.count
;
667 busy
= bpmp
->threaded
.busy
;
669 if (tegra_bpmp_is_request_ready(channel
))
670 tegra_bpmp_handle_mrq(bpmp
, channel
->ib
->code
, channel
);
672 spin_lock(&bpmp
->lock
);
674 for_each_set_bit(i
, busy
, count
) {
675 struct tegra_bpmp_channel
*channel
;
677 channel
= &bpmp
->threaded_channels
[i
];
679 if (tegra_bpmp_is_response_ready(channel
)) {
680 tegra_bpmp_channel_signal(channel
);
685 spin_unlock(&bpmp
->lock
);
688 static int tegra_bpmp_probe(struct platform_device
*pdev
)
690 struct tegra_bpmp
*bpmp
;
695 bpmp
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
), GFP_KERNEL
);
699 bpmp
->soc
= of_device_get_match_data(&pdev
->dev
);
700 bpmp
->dev
= &pdev
->dev
;
702 INIT_LIST_HEAD(&bpmp
->mrqs
);
703 spin_lock_init(&bpmp
->lock
);
705 bpmp
->threaded
.count
= bpmp
->soc
->channels
.thread
.count
;
706 sema_init(&bpmp
->threaded
.lock
, bpmp
->threaded
.count
);
708 size
= BITS_TO_LONGS(bpmp
->threaded
.count
) * sizeof(long);
710 bpmp
->threaded
.allocated
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
711 if (!bpmp
->threaded
.allocated
)
714 bpmp
->threaded
.busy
= devm_kzalloc(&pdev
->dev
, size
, GFP_KERNEL
);
715 if (!bpmp
->threaded
.busy
)
718 spin_lock_init(&bpmp
->atomic_tx_lock
);
719 bpmp
->tx_channel
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
->tx_channel
),
721 if (!bpmp
->tx_channel
)
724 bpmp
->rx_channel
= devm_kzalloc(&pdev
->dev
, sizeof(*bpmp
->rx_channel
),
726 if (!bpmp
->rx_channel
)
729 bpmp
->threaded_channels
= devm_kcalloc(&pdev
->dev
, bpmp
->threaded
.count
,
730 sizeof(*bpmp
->threaded_channels
),
732 if (!bpmp
->threaded_channels
)
735 err
= bpmp
->soc
->ops
->init(bpmp
);
739 err
= tegra_bpmp_request_mrq(bpmp
, MRQ_PING
,
740 tegra_bpmp_mrq_handle_ping
, bpmp
);
744 err
= tegra_bpmp_ping(bpmp
);
746 dev_err(&pdev
->dev
, "failed to ping BPMP: %d\n", err
);
750 err
= tegra_bpmp_get_firmware_tag(bpmp
, tag
, sizeof(tag
));
752 dev_err(&pdev
->dev
, "failed to get firmware tag: %d\n", err
);
756 dev_info(&pdev
->dev
, "firmware: %.*s\n", (int)sizeof(tag
), tag
);
758 platform_set_drvdata(pdev
, bpmp
);
760 err
= of_platform_default_populate(pdev
->dev
.of_node
, NULL
, &pdev
->dev
);
764 if (of_find_property(pdev
->dev
.of_node
, "#clock-cells", NULL
)) {
765 err
= tegra_bpmp_init_clocks(bpmp
);
770 if (of_find_property(pdev
->dev
.of_node
, "#reset-cells", NULL
)) {
771 err
= tegra_bpmp_init_resets(bpmp
);
776 if (of_find_property(pdev
->dev
.of_node
, "#power-domain-cells", NULL
)) {
777 err
= tegra_bpmp_init_powergates(bpmp
);
782 err
= tegra_bpmp_init_debugfs(bpmp
);
784 dev_err(&pdev
->dev
, "debugfs initialization failed: %d\n", err
);
789 tegra_bpmp_free_mrq(bpmp
, MRQ_PING
, bpmp
);
791 if (bpmp
->soc
->ops
->deinit
)
792 bpmp
->soc
->ops
->deinit(bpmp
);
797 static int __maybe_unused
tegra_bpmp_resume(struct device
*dev
)
799 struct tegra_bpmp
*bpmp
= dev_get_drvdata(dev
);
801 if (bpmp
->soc
->ops
->resume
)
802 return bpmp
->soc
->ops
->resume(bpmp
);
807 static const struct dev_pm_ops tegra_bpmp_pm_ops
= {
808 .resume_noirq
= tegra_bpmp_resume
,
811 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
812 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
813 static const struct tegra_bpmp_soc tegra186_soc
= {
817 .timeout
= 60 * USEC_PER_SEC
,
822 .timeout
= 600 * USEC_PER_SEC
,
829 .ops
= &tegra186_bpmp_ops
,
834 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
835 static const struct tegra_bpmp_soc tegra210_soc
= {
840 .timeout
= 60 * USEC_PER_SEC
,
845 .timeout
= 600 * USEC_PER_SEC
,
853 .ops
= &tegra210_bpmp_ops
,
857 static const struct of_device_id tegra_bpmp_match
[] = {
858 #if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
859 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
860 IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
861 { .compatible
= "nvidia,tegra186-bpmp", .data
= &tegra186_soc
},
863 #if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
864 { .compatible
= "nvidia,tegra210-bpmp", .data
= &tegra210_soc
},
869 static struct platform_driver tegra_bpmp_driver
= {
871 .name
= "tegra-bpmp",
872 .of_match_table
= tegra_bpmp_match
,
873 .pm
= &tegra_bpmp_pm_ops
,
874 .suppress_bind_attrs
= true,
876 .probe
= tegra_bpmp_probe
,
878 builtin_platform_driver(tegra_bpmp_driver
);