1 // SPDX-License-Identifier: GPL-2.0
3 * System Control and Management Interface (SCMI) Message Protocol driver
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
14 * Copyright (C) 2018 ARM Ltd.
17 #include <linux/bitmap.h>
18 #include <linux/export.h>
20 #include <linux/kernel.h>
21 #include <linux/ktime.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/processor.h>
26 #include <linux/slab.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/scmi.h>
34 enum scmi_error_codes
{
35 SCMI_SUCCESS
= 0, /* Success */
36 SCMI_ERR_SUPPORT
= -1, /* Not supported */
37 SCMI_ERR_PARAMS
= -2, /* Invalid Parameters */
38 SCMI_ERR_ACCESS
= -3, /* Invalid access/permission denied */
39 SCMI_ERR_ENTRY
= -4, /* Not found */
40 SCMI_ERR_RANGE
= -5, /* Value out of range */
41 SCMI_ERR_BUSY
= -6, /* Device busy */
42 SCMI_ERR_COMMS
= -7, /* Communication Error */
43 SCMI_ERR_GENERIC
= -8, /* Generic Error */
44 SCMI_ERR_HARDWARE
= -9, /* Hardware Error */
45 SCMI_ERR_PROTOCOL
= -10,/* Protocol Error */
49 /* List of all SCMI devices active in system */
50 static LIST_HEAD(scmi_list
);
51 /* Protection for the entire list */
52 static DEFINE_MUTEX(scmi_list_mutex
);
53 /* Track the unique id for the transfers for debug & profiling purpose */
54 static atomic_t transfer_last_id
;
57 * struct scmi_xfers_info - Structure to manage transfer information
59 * @xfer_block: Preallocated Message array
60 * @xfer_alloc_table: Bitmap table for allocated messages.
61 * Index of this bitmap table is also used for message
62 * sequence identifier.
63 * @xfer_lock: Protection for message allocation
65 struct scmi_xfers_info
{
66 struct scmi_xfer
*xfer_block
;
67 unsigned long *xfer_alloc_table
;
72 * struct scmi_info - Structure representing a SCMI instance
74 * @dev: Device pointer
75 * @desc: SoC description for this instance
76 * @version: SCMI revision information containing protocol version,
77 * implementation version and (sub-)vendor identification.
78 * @handle: Instance of SCMI handle to send to clients
79 * @tx_minfo: Universal Transmit Message management info
80 * @rx_minfo: Universal Receive Message management info
81 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
82 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
83 * @protocols_imp: List of protocols implemented, currently maximum of
84 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
86 * @users: Number of users of this instance
90 const struct scmi_desc
*desc
;
91 struct scmi_revision_info version
;
92 struct scmi_handle handle
;
93 struct scmi_xfers_info tx_minfo
;
94 struct scmi_xfers_info rx_minfo
;
98 struct list_head node
;
102 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
104 static const int scmi_linux_errmap
[] = {
105 /* better than switch case as long as return value is continuous */
106 0, /* SCMI_SUCCESS */
107 -EOPNOTSUPP
, /* SCMI_ERR_SUPPORT */
108 -EINVAL
, /* SCMI_ERR_PARAM */
109 -EACCES
, /* SCMI_ERR_ACCESS */
110 -ENOENT
, /* SCMI_ERR_ENTRY */
111 -ERANGE
, /* SCMI_ERR_RANGE */
112 -EBUSY
, /* SCMI_ERR_BUSY */
113 -ECOMM
, /* SCMI_ERR_COMMS */
114 -EIO
, /* SCMI_ERR_GENERIC */
115 -EREMOTEIO
, /* SCMI_ERR_HARDWARE */
116 -EPROTO
, /* SCMI_ERR_PROTOCOL */
119 static inline int scmi_to_linux_errno(int errno
)
121 if (errno
< SCMI_SUCCESS
&& errno
> SCMI_ERR_MAX
)
122 return scmi_linux_errmap
[-errno
];
127 * scmi_dump_header_dbg() - Helper to dump a message header.
129 * @dev: Device pointer corresponding to the SCMI entity
130 * @hdr: pointer to header.
132 static inline void scmi_dump_header_dbg(struct device
*dev
,
133 struct scmi_msg_hdr
*hdr
)
135 dev_dbg(dev
, "Message ID: %x Sequence ID: %x Protocol: %x\n",
136 hdr
->id
, hdr
->seq
, hdr
->protocol_id
);
140 * scmi_xfer_get() - Allocate one message
142 * @handle: Pointer to SCMI entity handle
143 * @minfo: Pointer to Tx/Rx Message management info based on channel type
145 * Helper function which is used by various message functions that are
146 * exposed to clients of this driver for allocating a message traffic event.
148 * This function can sleep depending on pending requests already in the system
149 * for the SCMI entity. Further, this also holds a spinlock to maintain
150 * integrity of internal data structures.
152 * Return: 0 if all went fine, else corresponding error.
154 static struct scmi_xfer
*scmi_xfer_get(const struct scmi_handle
*handle
,
155 struct scmi_xfers_info
*minfo
)
158 struct scmi_xfer
*xfer
;
159 unsigned long flags
, bit_pos
;
160 struct scmi_info
*info
= handle_to_scmi_info(handle
);
162 /* Keep the locked section as small as possible */
163 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
164 bit_pos
= find_first_zero_bit(minfo
->xfer_alloc_table
,
165 info
->desc
->max_msg
);
166 if (bit_pos
== info
->desc
->max_msg
) {
167 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
168 return ERR_PTR(-ENOMEM
);
170 set_bit(bit_pos
, minfo
->xfer_alloc_table
);
171 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
175 xfer
= &minfo
->xfer_block
[xfer_id
];
176 xfer
->hdr
.seq
= xfer_id
;
177 reinit_completion(&xfer
->done
);
178 xfer
->transfer_id
= atomic_inc_return(&transfer_last_id
);
184 * __scmi_xfer_put() - Release a message
186 * @minfo: Pointer to Tx/Rx Message management info based on channel type
187 * @xfer: message that was reserved by scmi_xfer_get
189 * This holds a spinlock to maintain integrity of internal data structures.
192 __scmi_xfer_put(struct scmi_xfers_info
*minfo
, struct scmi_xfer
*xfer
)
197 * Keep the locked section as small as possible
198 * NOTE: we might escape with smp_mb and no lock here..
199 * but just be conservative and symmetric.
201 spin_lock_irqsave(&minfo
->xfer_lock
, flags
);
202 clear_bit(xfer
->hdr
.seq
, minfo
->xfer_alloc_table
);
203 spin_unlock_irqrestore(&minfo
->xfer_lock
, flags
);
206 static void scmi_handle_notification(struct scmi_chan_info
*cinfo
, u32 msg_hdr
)
209 struct scmi_xfer
*xfer
;
210 struct device
*dev
= cinfo
->dev
;
211 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
212 struct scmi_xfers_info
*minfo
= &info
->rx_minfo
;
214 ts
= ktime_get_boottime_ns();
215 xfer
= scmi_xfer_get(cinfo
->handle
, minfo
);
217 dev_err(dev
, "failed to get free message slot (%ld)\n",
219 info
->desc
->ops
->clear_channel(cinfo
);
223 unpack_scmi_header(msg_hdr
, &xfer
->hdr
);
224 scmi_dump_header_dbg(dev
, &xfer
->hdr
);
225 info
->desc
->ops
->fetch_notification(cinfo
, info
->desc
->max_msg_size
,
227 scmi_notify(cinfo
->handle
, xfer
->hdr
.protocol_id
,
228 xfer
->hdr
.id
, xfer
->rx
.buf
, xfer
->rx
.len
, ts
);
230 trace_scmi_rx_done(xfer
->transfer_id
, xfer
->hdr
.id
,
231 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
232 MSG_TYPE_NOTIFICATION
);
234 __scmi_xfer_put(minfo
, xfer
);
236 info
->desc
->ops
->clear_channel(cinfo
);
239 static void scmi_handle_response(struct scmi_chan_info
*cinfo
,
240 u16 xfer_id
, u8 msg_type
)
242 struct scmi_xfer
*xfer
;
243 struct device
*dev
= cinfo
->dev
;
244 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
245 struct scmi_xfers_info
*minfo
= &info
->tx_minfo
;
247 /* Are we even expecting this? */
248 if (!test_bit(xfer_id
, minfo
->xfer_alloc_table
)) {
249 dev_err(dev
, "message for %d is not expected!\n", xfer_id
);
250 info
->desc
->ops
->clear_channel(cinfo
);
254 xfer
= &minfo
->xfer_block
[xfer_id
];
256 * Even if a response was indeed expected on this slot at this point,
257 * a buggy platform could wrongly reply feeding us an unexpected
258 * delayed response we're not prepared to handle: bail-out safely
261 if (unlikely(msg_type
== MSG_TYPE_DELAYED_RESP
&& !xfer
->async_done
)) {
263 "Delayed Response for %d not expected! Buggy F/W ?\n",
265 info
->desc
->ops
->clear_channel(cinfo
);
266 /* It was unexpected, so nobody will clear the xfer if not us */
267 __scmi_xfer_put(minfo
, xfer
);
271 scmi_dump_header_dbg(dev
, &xfer
->hdr
);
273 info
->desc
->ops
->fetch_response(cinfo
, xfer
);
275 trace_scmi_rx_done(xfer
->transfer_id
, xfer
->hdr
.id
,
276 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
279 if (msg_type
== MSG_TYPE_DELAYED_RESP
) {
280 info
->desc
->ops
->clear_channel(cinfo
);
281 complete(xfer
->async_done
);
283 complete(&xfer
->done
);
288 * scmi_rx_callback() - callback for receiving messages
290 * @cinfo: SCMI channel info
291 * @msg_hdr: Message header
293 * Processes one received message to appropriate transfer information and
294 * signals completion of the transfer.
296 * NOTE: This function will be invoked in IRQ context, hence should be
297 * as optimal as possible.
299 void scmi_rx_callback(struct scmi_chan_info
*cinfo
, u32 msg_hdr
)
301 u16 xfer_id
= MSG_XTRACT_TOKEN(msg_hdr
);
302 u8 msg_type
= MSG_XTRACT_TYPE(msg_hdr
);
305 case MSG_TYPE_NOTIFICATION
:
306 scmi_handle_notification(cinfo
, msg_hdr
);
308 case MSG_TYPE_COMMAND
:
309 case MSG_TYPE_DELAYED_RESP
:
310 scmi_handle_response(cinfo
, xfer_id
, msg_type
);
313 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type
);
319 * scmi_xfer_put() - Release a transmit message
321 * @handle: Pointer to SCMI entity handle
322 * @xfer: message that was reserved by scmi_xfer_get
324 void scmi_xfer_put(const struct scmi_handle
*handle
, struct scmi_xfer
*xfer
)
326 struct scmi_info
*info
= handle_to_scmi_info(handle
);
328 __scmi_xfer_put(&info
->tx_minfo
, xfer
);
331 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
333 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info
*cinfo
,
334 struct scmi_xfer
*xfer
, ktime_t stop
)
336 struct scmi_info
*info
= handle_to_scmi_info(cinfo
->handle
);
338 return info
->desc
->ops
->poll_done(cinfo
, xfer
) ||
339 ktime_after(ktime_get(), stop
);
343 * scmi_do_xfer() - Do one transfer
345 * @handle: Pointer to SCMI entity handle
346 * @xfer: Transfer to initiate and wait for response
348 * Return: -ETIMEDOUT in case of no response, if transmit error,
349 * return corresponding error, else if all goes well,
352 int scmi_do_xfer(const struct scmi_handle
*handle
, struct scmi_xfer
*xfer
)
356 struct scmi_info
*info
= handle_to_scmi_info(handle
);
357 struct device
*dev
= info
->dev
;
358 struct scmi_chan_info
*cinfo
;
360 cinfo
= idr_find(&info
->tx_idr
, xfer
->hdr
.protocol_id
);
361 if (unlikely(!cinfo
))
364 trace_scmi_xfer_begin(xfer
->transfer_id
, xfer
->hdr
.id
,
365 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
,
366 xfer
->hdr
.poll_completion
);
368 ret
= info
->desc
->ops
->send_message(cinfo
, xfer
);
370 dev_dbg(dev
, "Failed to send message %d\n", ret
);
374 if (xfer
->hdr
.poll_completion
) {
375 ktime_t stop
= ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS
);
377 spin_until_cond(scmi_xfer_done_no_timeout(cinfo
, xfer
, stop
));
379 if (ktime_before(ktime_get(), stop
))
380 info
->desc
->ops
->fetch_response(cinfo
, xfer
);
384 /* And we wait for the response. */
385 timeout
= msecs_to_jiffies(info
->desc
->max_rx_timeout_ms
);
386 if (!wait_for_completion_timeout(&xfer
->done
, timeout
)) {
387 dev_err(dev
, "timed out in resp(caller: %pS)\n",
393 if (!ret
&& xfer
->hdr
.status
)
394 ret
= scmi_to_linux_errno(xfer
->hdr
.status
);
396 if (info
->desc
->ops
->mark_txdone
)
397 info
->desc
->ops
->mark_txdone(cinfo
, ret
);
399 trace_scmi_xfer_end(xfer
->transfer_id
, xfer
->hdr
.id
,
400 xfer
->hdr
.protocol_id
, xfer
->hdr
.seq
, ret
);
405 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
408 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
409 * response is received
411 * @handle: Pointer to SCMI entity handle
412 * @xfer: Transfer to initiate and wait for response
414 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
415 * return corresponding error, else if all goes well, return 0.
417 int scmi_do_xfer_with_response(const struct scmi_handle
*handle
,
418 struct scmi_xfer
*xfer
)
420 int ret
, timeout
= msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT
);
421 DECLARE_COMPLETION_ONSTACK(async_response
);
423 xfer
->async_done
= &async_response
;
425 ret
= scmi_do_xfer(handle
, xfer
);
426 if (!ret
&& !wait_for_completion_timeout(xfer
->async_done
, timeout
))
429 xfer
->async_done
= NULL
;
434 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
436 * @handle: Pointer to SCMI entity handle
437 * @msg_id: Message identifier
438 * @prot_id: Protocol identifier for the message
439 * @tx_size: transmit message size
440 * @rx_size: receive message size
441 * @p: pointer to the allocated and initialised message
443 * This function allocates the message using @scmi_xfer_get and
444 * initialise the header.
446 * Return: 0 if all went fine with @p pointing to message, else
447 * corresponding error.
449 int scmi_xfer_get_init(const struct scmi_handle
*handle
, u8 msg_id
, u8 prot_id
,
450 size_t tx_size
, size_t rx_size
, struct scmi_xfer
**p
)
453 struct scmi_xfer
*xfer
;
454 struct scmi_info
*info
= handle_to_scmi_info(handle
);
455 struct scmi_xfers_info
*minfo
= &info
->tx_minfo
;
456 struct device
*dev
= info
->dev
;
458 /* Ensure we have sane transfer sizes */
459 if (rx_size
> info
->desc
->max_msg_size
||
460 tx_size
> info
->desc
->max_msg_size
)
463 xfer
= scmi_xfer_get(handle
, minfo
);
466 dev_err(dev
, "failed to get free message slot(%d)\n", ret
);
470 xfer
->tx
.len
= tx_size
;
471 xfer
->rx
.len
= rx_size
? : info
->desc
->max_msg_size
;
472 xfer
->hdr
.id
= msg_id
;
473 xfer
->hdr
.protocol_id
= prot_id
;
474 xfer
->hdr
.poll_completion
= false;
482 * scmi_version_get() - command to get the revision of the SCMI entity
484 * @handle: Pointer to SCMI entity handle
485 * @protocol: Protocol identifier for the message
486 * @version: Holds returned version of protocol.
488 * Updates the SCMI information in the internal data structure.
490 * Return: 0 if all went fine, else return appropriate error.
492 int scmi_version_get(const struct scmi_handle
*handle
, u8 protocol
,
499 ret
= scmi_xfer_get_init(handle
, PROTOCOL_VERSION
, protocol
, 0,
500 sizeof(*version
), &t
);
504 ret
= scmi_do_xfer(handle
, t
);
506 rev_info
= t
->rx
.buf
;
507 *version
= le32_to_cpu(*rev_info
);
510 scmi_xfer_put(handle
, t
);
514 void scmi_setup_protocol_implemented(const struct scmi_handle
*handle
,
517 struct scmi_info
*info
= handle_to_scmi_info(handle
);
519 info
->protocols_imp
= prot_imp
;
523 scmi_is_protocol_implemented(const struct scmi_handle
*handle
, u8 prot_id
)
526 struct scmi_info
*info
= handle_to_scmi_info(handle
);
528 if (!info
->protocols_imp
)
531 for (i
= 0; i
< MAX_PROTOCOLS_IMP
; i
++)
532 if (info
->protocols_imp
[i
] == prot_id
)
538 * scmi_handle_get() - Get the SCMI handle for a device
540 * @dev: pointer to device for which we want SCMI handle
542 * NOTE: The function does not track individual clients of the framework
543 * and is expected to be maintained by caller of SCMI protocol library.
544 * scmi_handle_put must be balanced with successful scmi_handle_get
546 * Return: pointer to handle if successful, NULL on error
548 struct scmi_handle
*scmi_handle_get(struct device
*dev
)
551 struct scmi_info
*info
;
552 struct scmi_handle
*handle
= NULL
;
554 mutex_lock(&scmi_list_mutex
);
555 list_for_each(p
, &scmi_list
) {
556 info
= list_entry(p
, struct scmi_info
, node
);
557 if (dev
->parent
== info
->dev
) {
558 handle
= &info
->handle
;
563 mutex_unlock(&scmi_list_mutex
);
569 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
571 * @handle: handle acquired by scmi_handle_get
573 * NOTE: The function does not track individual clients of the framework
574 * and is expected to be maintained by caller of SCMI protocol library.
575 * scmi_handle_put must be balanced with successful scmi_handle_get
577 * Return: 0 is successfully released
578 * if null was passed, it returns -EINVAL;
580 int scmi_handle_put(const struct scmi_handle
*handle
)
582 struct scmi_info
*info
;
587 info
= handle_to_scmi_info(handle
);
588 mutex_lock(&scmi_list_mutex
);
589 if (!WARN_ON(!info
->users
))
591 mutex_unlock(&scmi_list_mutex
);
596 static int __scmi_xfer_info_init(struct scmi_info
*sinfo
,
597 struct scmi_xfers_info
*info
)
600 struct scmi_xfer
*xfer
;
601 struct device
*dev
= sinfo
->dev
;
602 const struct scmi_desc
*desc
= sinfo
->desc
;
604 /* Pre-allocated messages, no more than what hdr.seq can support */
605 if (WARN_ON(desc
->max_msg
>= MSG_TOKEN_MAX
)) {
606 dev_err(dev
, "Maximum message of %d exceeds supported %ld\n",
607 desc
->max_msg
, MSG_TOKEN_MAX
);
611 info
->xfer_block
= devm_kcalloc(dev
, desc
->max_msg
,
612 sizeof(*info
->xfer_block
), GFP_KERNEL
);
613 if (!info
->xfer_block
)
616 info
->xfer_alloc_table
= devm_kcalloc(dev
, BITS_TO_LONGS(desc
->max_msg
),
617 sizeof(long), GFP_KERNEL
);
618 if (!info
->xfer_alloc_table
)
621 /* Pre-initialize the buffer pointer to pre-allocated buffers */
622 for (i
= 0, xfer
= info
->xfer_block
; i
< desc
->max_msg
; i
++, xfer
++) {
623 xfer
->rx
.buf
= devm_kcalloc(dev
, sizeof(u8
), desc
->max_msg_size
,
628 xfer
->tx
.buf
= xfer
->rx
.buf
;
629 init_completion(&xfer
->done
);
632 spin_lock_init(&info
->xfer_lock
);
637 static int scmi_xfer_info_init(struct scmi_info
*sinfo
)
639 int ret
= __scmi_xfer_info_init(sinfo
, &sinfo
->tx_minfo
);
641 if (!ret
&& idr_find(&sinfo
->rx_idr
, SCMI_PROTOCOL_BASE
))
642 ret
= __scmi_xfer_info_init(sinfo
, &sinfo
->rx_minfo
);
647 static int scmi_chan_setup(struct scmi_info
*info
, struct device
*dev
,
648 int prot_id
, bool tx
)
651 struct scmi_chan_info
*cinfo
;
654 /* Transmit channel is first entry i.e. index 0 */
656 idr
= tx
? &info
->tx_idr
: &info
->rx_idr
;
658 /* check if already allocated, used for multiple device per protocol */
659 cinfo
= idr_find(idr
, prot_id
);
663 if (!info
->desc
->ops
->chan_available(dev
, idx
)) {
664 cinfo
= idr_find(idr
, SCMI_PROTOCOL_BASE
);
665 if (unlikely(!cinfo
)) /* Possible only if platform has no Rx */
670 cinfo
= devm_kzalloc(info
->dev
, sizeof(*cinfo
), GFP_KERNEL
);
676 ret
= info
->desc
->ops
->chan_setup(cinfo
, info
->dev
, tx
);
681 ret
= idr_alloc(idr
, cinfo
, prot_id
, prot_id
+ 1, GFP_KERNEL
);
682 if (ret
!= prot_id
) {
683 dev_err(dev
, "unable to allocate SCMI idr slot err %d\n", ret
);
687 cinfo
->handle
= &info
->handle
;
692 scmi_txrx_setup(struct scmi_info
*info
, struct device
*dev
, int prot_id
)
694 int ret
= scmi_chan_setup(info
, dev
, prot_id
, true);
696 if (!ret
) /* Rx is optional, hence no error check */
697 scmi_chan_setup(info
, dev
, prot_id
, false);
703 scmi_create_protocol_device(struct device_node
*np
, struct scmi_info
*info
,
704 int prot_id
, const char *name
)
706 struct scmi_device
*sdev
;
708 sdev
= scmi_device_create(np
, info
->dev
, prot_id
, name
);
710 dev_err(info
->dev
, "failed to create %d protocol device\n",
715 if (scmi_txrx_setup(info
, &sdev
->dev
, prot_id
)) {
716 dev_err(&sdev
->dev
, "failed to setup transport\n");
717 scmi_device_destroy(sdev
);
721 /* setup handle now as the transport is ready */
722 scmi_set_handle(sdev
);
725 #define MAX_SCMI_DEV_PER_PROTOCOL 2
726 struct scmi_prot_devnames
{
728 char *names
[MAX_SCMI_DEV_PER_PROTOCOL
];
731 static struct scmi_prot_devnames devnames
[] = {
732 { SCMI_PROTOCOL_POWER
, { "genpd" },},
733 { SCMI_PROTOCOL_PERF
, { "cpufreq" },},
734 { SCMI_PROTOCOL_CLOCK
, { "clocks" },},
735 { SCMI_PROTOCOL_SENSOR
, { "hwmon" },},
736 { SCMI_PROTOCOL_RESET
, { "reset" },},
740 scmi_create_protocol_devices(struct device_node
*np
, struct scmi_info
*info
,
745 for (loop
= 0; loop
< ARRAY_SIZE(devnames
); loop
++) {
746 if (devnames
[loop
].protocol_id
!= prot_id
)
749 for (cnt
= 0; cnt
< ARRAY_SIZE(devnames
[loop
].names
); cnt
++) {
750 const char *name
= devnames
[loop
].names
[cnt
];
753 scmi_create_protocol_device(np
, info
, prot_id
,
759 static int scmi_probe(struct platform_device
*pdev
)
762 struct scmi_handle
*handle
;
763 const struct scmi_desc
*desc
;
764 struct scmi_info
*info
;
765 struct device
*dev
= &pdev
->dev
;
766 struct device_node
*child
, *np
= dev
->of_node
;
768 desc
= of_device_get_match_data(dev
);
772 info
= devm_kzalloc(dev
, sizeof(*info
), GFP_KERNEL
);
778 INIT_LIST_HEAD(&info
->node
);
780 platform_set_drvdata(pdev
, info
);
781 idr_init(&info
->tx_idr
);
782 idr_init(&info
->rx_idr
);
784 handle
= &info
->handle
;
785 handle
->dev
= info
->dev
;
786 handle
->version
= &info
->version
;
788 ret
= scmi_txrx_setup(info
, dev
, SCMI_PROTOCOL_BASE
);
792 ret
= scmi_xfer_info_init(info
);
796 if (scmi_notification_init(handle
))
797 dev_err(dev
, "SCMI Notifications NOT available.\n");
799 ret
= scmi_base_protocol_init(handle
);
801 dev_err(dev
, "unable to communicate with SCMI(%d)\n", ret
);
805 mutex_lock(&scmi_list_mutex
);
806 list_add_tail(&info
->node
, &scmi_list
);
807 mutex_unlock(&scmi_list_mutex
);
809 for_each_available_child_of_node(np
, child
) {
812 if (of_property_read_u32(child
, "reg", &prot_id
))
815 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK
, prot_id
))
816 dev_err(dev
, "Out of range protocol %d\n", prot_id
);
818 if (!scmi_is_protocol_implemented(handle
, prot_id
)) {
819 dev_err(dev
, "SCMI protocol %d not implemented\n",
824 scmi_create_protocol_devices(child
, info
, prot_id
);
830 void scmi_free_channel(struct scmi_chan_info
*cinfo
, struct idr
*idr
, int id
)
835 static int scmi_remove(struct platform_device
*pdev
)
838 struct scmi_info
*info
= platform_get_drvdata(pdev
);
839 struct idr
*idr
= &info
->tx_idr
;
841 scmi_notification_exit(&info
->handle
);
843 mutex_lock(&scmi_list_mutex
);
847 list_del(&info
->node
);
848 mutex_unlock(&scmi_list_mutex
);
853 /* Safe to free channels since no more users */
854 ret
= idr_for_each(idr
, info
->desc
->ops
->chan_free
, idr
);
855 idr_destroy(&info
->tx_idr
);
858 ret
= idr_for_each(idr
, info
->desc
->ops
->chan_free
, idr
);
859 idr_destroy(&info
->rx_idr
);
864 static ssize_t
protocol_version_show(struct device
*dev
,
865 struct device_attribute
*attr
, char *buf
)
867 struct scmi_info
*info
= dev_get_drvdata(dev
);
869 return sprintf(buf
, "%u.%u\n", info
->version
.major_ver
,
870 info
->version
.minor_ver
);
872 static DEVICE_ATTR_RO(protocol_version
);
874 static ssize_t
firmware_version_show(struct device
*dev
,
875 struct device_attribute
*attr
, char *buf
)
877 struct scmi_info
*info
= dev_get_drvdata(dev
);
879 return sprintf(buf
, "0x%x\n", info
->version
.impl_ver
);
881 static DEVICE_ATTR_RO(firmware_version
);
883 static ssize_t
vendor_id_show(struct device
*dev
,
884 struct device_attribute
*attr
, char *buf
)
886 struct scmi_info
*info
= dev_get_drvdata(dev
);
888 return sprintf(buf
, "%s\n", info
->version
.vendor_id
);
890 static DEVICE_ATTR_RO(vendor_id
);
892 static ssize_t
sub_vendor_id_show(struct device
*dev
,
893 struct device_attribute
*attr
, char *buf
)
895 struct scmi_info
*info
= dev_get_drvdata(dev
);
897 return sprintf(buf
, "%s\n", info
->version
.sub_vendor_id
);
899 static DEVICE_ATTR_RO(sub_vendor_id
);
901 static struct attribute
*versions_attrs
[] = {
902 &dev_attr_firmware_version
.attr
,
903 &dev_attr_protocol_version
.attr
,
904 &dev_attr_vendor_id
.attr
,
905 &dev_attr_sub_vendor_id
.attr
,
908 ATTRIBUTE_GROUPS(versions
);
910 /* Each compatible listed below must have descriptor associated with it */
911 static const struct of_device_id scmi_of_match
[] = {
912 { .compatible
= "arm,scmi", .data
= &scmi_mailbox_desc
},
913 #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
914 { .compatible
= "arm,scmi-smc", .data
= &scmi_smc_desc
},
919 MODULE_DEVICE_TABLE(of
, scmi_of_match
);
921 static struct platform_driver scmi_driver
= {
924 .of_match_table
= scmi_of_match
,
925 .dev_groups
= versions_groups
,
928 .remove
= scmi_remove
,
931 module_platform_driver(scmi_driver
);
933 MODULE_ALIAS("platform: arm-scmi");
934 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
935 MODULE_DESCRIPTION("ARM SCMI protocol driver");
936 MODULE_LICENSE("GPL v2");