]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/firmware/arm_scmi/driver.c
firmware: arm_scmi: Remove fixed size fields from reports/scmi_event_header
[mirror_ubuntu-jammy-kernel.git] / drivers / firmware / arm_scmi / driver.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * System Control and Management Interface (SCMI) Message Protocol driver
4 *
5 * SCMI Message Protocol is used between the System Control Processor(SCP)
6 * and the Application Processors(AP). The Message Handling Unit(MHU)
7 * provides a mechanism for inter-processor communication between SCP's
8 * Cortex M3 and AP.
9 *
10 * SCP offers control and management of the core/cluster power states,
11 * various power domain DVFS including the core/cluster, certain system
12 * clocks configuration, thermal sensors and many others.
13 *
14 * Copyright (C) 2018 ARM Ltd.
15 */
16
17 #include <linux/bitmap.h>
18 #include <linux/export.h>
19 #include <linux/io.h>
20 #include <linux/kernel.h>
21 #include <linux/ktime.h>
22 #include <linux/module.h>
23 #include <linux/of_address.h>
24 #include <linux/of_device.h>
25 #include <linux/processor.h>
26 #include <linux/slab.h>
27
28 #include "common.h"
29 #include "notify.h"
30
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/scmi.h>
33
34 enum scmi_error_codes {
35 SCMI_SUCCESS = 0, /* Success */
36 SCMI_ERR_SUPPORT = -1, /* Not supported */
37 SCMI_ERR_PARAMS = -2, /* Invalid Parameters */
38 SCMI_ERR_ACCESS = -3, /* Invalid access/permission denied */
39 SCMI_ERR_ENTRY = -4, /* Not found */
40 SCMI_ERR_RANGE = -5, /* Value out of range */
41 SCMI_ERR_BUSY = -6, /* Device busy */
42 SCMI_ERR_COMMS = -7, /* Communication Error */
43 SCMI_ERR_GENERIC = -8, /* Generic Error */
44 SCMI_ERR_HARDWARE = -9, /* Hardware Error */
45 SCMI_ERR_PROTOCOL = -10,/* Protocol Error */
46 SCMI_ERR_MAX
47 };
48
49 /* List of all SCMI devices active in system */
50 static LIST_HEAD(scmi_list);
51 /* Protection for the entire list */
52 static DEFINE_MUTEX(scmi_list_mutex);
53 /* Track the unique id for the transfers for debug & profiling purpose */
54 static atomic_t transfer_last_id;
55
56 /**
57 * struct scmi_xfers_info - Structure to manage transfer information
58 *
59 * @xfer_block: Preallocated Message array
60 * @xfer_alloc_table: Bitmap table for allocated messages.
61 * Index of this bitmap table is also used for message
62 * sequence identifier.
63 * @xfer_lock: Protection for message allocation
64 */
65 struct scmi_xfers_info {
66 struct scmi_xfer *xfer_block;
67 unsigned long *xfer_alloc_table;
68 spinlock_t xfer_lock;
69 };
70
71 /**
72 * struct scmi_info - Structure representing a SCMI instance
73 *
74 * @dev: Device pointer
75 * @desc: SoC description for this instance
76 * @version: SCMI revision information containing protocol version,
77 * implementation version and (sub-)vendor identification.
78 * @handle: Instance of SCMI handle to send to clients
79 * @tx_minfo: Universal Transmit Message management info
80 * @rx_minfo: Universal Receive Message management info
81 * @tx_idr: IDR object to map protocol id to Tx channel info pointer
82 * @rx_idr: IDR object to map protocol id to Rx channel info pointer
83 * @protocols_imp: List of protocols implemented, currently maximum of
84 * MAX_PROTOCOLS_IMP elements allocated by the base protocol
85 * @node: List head
86 * @users: Number of users of this instance
87 */
88 struct scmi_info {
89 struct device *dev;
90 const struct scmi_desc *desc;
91 struct scmi_revision_info version;
92 struct scmi_handle handle;
93 struct scmi_xfers_info tx_minfo;
94 struct scmi_xfers_info rx_minfo;
95 struct idr tx_idr;
96 struct idr rx_idr;
97 u8 *protocols_imp;
98 struct list_head node;
99 int users;
100 };
101
102 #define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
103
104 static const int scmi_linux_errmap[] = {
105 /* better than switch case as long as return value is continuous */
106 0, /* SCMI_SUCCESS */
107 -EOPNOTSUPP, /* SCMI_ERR_SUPPORT */
108 -EINVAL, /* SCMI_ERR_PARAM */
109 -EACCES, /* SCMI_ERR_ACCESS */
110 -ENOENT, /* SCMI_ERR_ENTRY */
111 -ERANGE, /* SCMI_ERR_RANGE */
112 -EBUSY, /* SCMI_ERR_BUSY */
113 -ECOMM, /* SCMI_ERR_COMMS */
114 -EIO, /* SCMI_ERR_GENERIC */
115 -EREMOTEIO, /* SCMI_ERR_HARDWARE */
116 -EPROTO, /* SCMI_ERR_PROTOCOL */
117 };
118
119 static inline int scmi_to_linux_errno(int errno)
120 {
121 if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX)
122 return scmi_linux_errmap[-errno];
123 return -EIO;
124 }
125
126 /**
127 * scmi_dump_header_dbg() - Helper to dump a message header.
128 *
129 * @dev: Device pointer corresponding to the SCMI entity
130 * @hdr: pointer to header.
131 */
132 static inline void scmi_dump_header_dbg(struct device *dev,
133 struct scmi_msg_hdr *hdr)
134 {
135 dev_dbg(dev, "Message ID: %x Sequence ID: %x Protocol: %x\n",
136 hdr->id, hdr->seq, hdr->protocol_id);
137 }
138
139 /**
140 * scmi_xfer_get() - Allocate one message
141 *
142 * @handle: Pointer to SCMI entity handle
143 * @minfo: Pointer to Tx/Rx Message management info based on channel type
144 *
145 * Helper function which is used by various message functions that are
146 * exposed to clients of this driver for allocating a message traffic event.
147 *
148 * This function can sleep depending on pending requests already in the system
149 * for the SCMI entity. Further, this also holds a spinlock to maintain
150 * integrity of internal data structures.
151 *
152 * Return: 0 if all went fine, else corresponding error.
153 */
154 static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
155 struct scmi_xfers_info *minfo)
156 {
157 u16 xfer_id;
158 struct scmi_xfer *xfer;
159 unsigned long flags, bit_pos;
160 struct scmi_info *info = handle_to_scmi_info(handle);
161
162 /* Keep the locked section as small as possible */
163 spin_lock_irqsave(&minfo->xfer_lock, flags);
164 bit_pos = find_first_zero_bit(minfo->xfer_alloc_table,
165 info->desc->max_msg);
166 if (bit_pos == info->desc->max_msg) {
167 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
168 return ERR_PTR(-ENOMEM);
169 }
170 set_bit(bit_pos, minfo->xfer_alloc_table);
171 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
172
173 xfer_id = bit_pos;
174
175 xfer = &minfo->xfer_block[xfer_id];
176 xfer->hdr.seq = xfer_id;
177 reinit_completion(&xfer->done);
178 xfer->transfer_id = atomic_inc_return(&transfer_last_id);
179
180 return xfer;
181 }
182
183 /**
184 * __scmi_xfer_put() - Release a message
185 *
186 * @minfo: Pointer to Tx/Rx Message management info based on channel type
187 * @xfer: message that was reserved by scmi_xfer_get
188 *
189 * This holds a spinlock to maintain integrity of internal data structures.
190 */
191 static void
192 __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
193 {
194 unsigned long flags;
195
196 /*
197 * Keep the locked section as small as possible
198 * NOTE: we might escape with smp_mb and no lock here..
199 * but just be conservative and symmetric.
200 */
201 spin_lock_irqsave(&minfo->xfer_lock, flags);
202 clear_bit(xfer->hdr.seq, minfo->xfer_alloc_table);
203 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
204 }
205
206 static void scmi_handle_notification(struct scmi_chan_info *cinfo, u32 msg_hdr)
207 {
208 struct scmi_xfer *xfer;
209 struct device *dev = cinfo->dev;
210 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
211 struct scmi_xfers_info *minfo = &info->rx_minfo;
212 ktime_t ts;
213
214 ts = ktime_get_boottime();
215 xfer = scmi_xfer_get(cinfo->handle, minfo);
216 if (IS_ERR(xfer)) {
217 dev_err(dev, "failed to get free message slot (%ld)\n",
218 PTR_ERR(xfer));
219 info->desc->ops->clear_channel(cinfo);
220 return;
221 }
222
223 unpack_scmi_header(msg_hdr, &xfer->hdr);
224 scmi_dump_header_dbg(dev, &xfer->hdr);
225 info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size,
226 xfer);
227 scmi_notify(cinfo->handle, xfer->hdr.protocol_id,
228 xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts);
229
230 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
231 xfer->hdr.protocol_id, xfer->hdr.seq,
232 MSG_TYPE_NOTIFICATION);
233
234 __scmi_xfer_put(minfo, xfer);
235
236 info->desc->ops->clear_channel(cinfo);
237 }
238
239 static void scmi_handle_response(struct scmi_chan_info *cinfo,
240 u16 xfer_id, u8 msg_type)
241 {
242 struct scmi_xfer *xfer;
243 struct device *dev = cinfo->dev;
244 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
245 struct scmi_xfers_info *minfo = &info->tx_minfo;
246
247 /* Are we even expecting this? */
248 if (!test_bit(xfer_id, minfo->xfer_alloc_table)) {
249 dev_err(dev, "message for %d is not expected!\n", xfer_id);
250 info->desc->ops->clear_channel(cinfo);
251 return;
252 }
253
254 xfer = &minfo->xfer_block[xfer_id];
255 /*
256 * Even if a response was indeed expected on this slot at this point,
257 * a buggy platform could wrongly reply feeding us an unexpected
258 * delayed response we're not prepared to handle: bail-out safely
259 * blaming firmware.
260 */
261 if (unlikely(msg_type == MSG_TYPE_DELAYED_RESP && !xfer->async_done)) {
262 dev_err(dev,
263 "Delayed Response for %d not expected! Buggy F/W ?\n",
264 xfer_id);
265 info->desc->ops->clear_channel(cinfo);
266 /* It was unexpected, so nobody will clear the xfer if not us */
267 __scmi_xfer_put(minfo, xfer);
268 return;
269 }
270
271 scmi_dump_header_dbg(dev, &xfer->hdr);
272
273 info->desc->ops->fetch_response(cinfo, xfer);
274
275 trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
276 xfer->hdr.protocol_id, xfer->hdr.seq,
277 msg_type);
278
279 if (msg_type == MSG_TYPE_DELAYED_RESP) {
280 info->desc->ops->clear_channel(cinfo);
281 complete(xfer->async_done);
282 } else {
283 complete(&xfer->done);
284 }
285 }
286
287 /**
288 * scmi_rx_callback() - callback for receiving messages
289 *
290 * @cinfo: SCMI channel info
291 * @msg_hdr: Message header
292 *
293 * Processes one received message to appropriate transfer information and
294 * signals completion of the transfer.
295 *
296 * NOTE: This function will be invoked in IRQ context, hence should be
297 * as optimal as possible.
298 */
299 void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
300 {
301 u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
302 u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
303
304 switch (msg_type) {
305 case MSG_TYPE_NOTIFICATION:
306 scmi_handle_notification(cinfo, msg_hdr);
307 break;
308 case MSG_TYPE_COMMAND:
309 case MSG_TYPE_DELAYED_RESP:
310 scmi_handle_response(cinfo, xfer_id, msg_type);
311 break;
312 default:
313 WARN_ONCE(1, "received unknown msg_type:%d\n", msg_type);
314 break;
315 }
316 }
317
318 /**
319 * scmi_xfer_put() - Release a transmit message
320 *
321 * @handle: Pointer to SCMI entity handle
322 * @xfer: message that was reserved by scmi_xfer_get
323 */
324 void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
325 {
326 struct scmi_info *info = handle_to_scmi_info(handle);
327
328 __scmi_xfer_put(&info->tx_minfo, xfer);
329 }
330
331 #define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
332
333 static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
334 struct scmi_xfer *xfer, ktime_t stop)
335 {
336 struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
337
338 return info->desc->ops->poll_done(cinfo, xfer) ||
339 ktime_after(ktime_get(), stop);
340 }
341
342 /**
343 * scmi_do_xfer() - Do one transfer
344 *
345 * @handle: Pointer to SCMI entity handle
346 * @xfer: Transfer to initiate and wait for response
347 *
348 * Return: -ETIMEDOUT in case of no response, if transmit error,
349 * return corresponding error, else if all goes well,
350 * return 0.
351 */
352 int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
353 {
354 int ret;
355 int timeout;
356 struct scmi_info *info = handle_to_scmi_info(handle);
357 struct device *dev = info->dev;
358 struct scmi_chan_info *cinfo;
359
360 cinfo = idr_find(&info->tx_idr, xfer->hdr.protocol_id);
361 if (unlikely(!cinfo))
362 return -EINVAL;
363
364 trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
365 xfer->hdr.protocol_id, xfer->hdr.seq,
366 xfer->hdr.poll_completion);
367
368 ret = info->desc->ops->send_message(cinfo, xfer);
369 if (ret < 0) {
370 dev_dbg(dev, "Failed to send message %d\n", ret);
371 return ret;
372 }
373
374 if (xfer->hdr.poll_completion) {
375 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
376
377 spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
378
379 if (ktime_before(ktime_get(), stop))
380 info->desc->ops->fetch_response(cinfo, xfer);
381 else
382 ret = -ETIMEDOUT;
383 } else {
384 /* And we wait for the response. */
385 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
386 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
387 dev_err(dev, "timed out in resp(caller: %pS)\n",
388 (void *)_RET_IP_);
389 ret = -ETIMEDOUT;
390 }
391 }
392
393 if (!ret && xfer->hdr.status)
394 ret = scmi_to_linux_errno(xfer->hdr.status);
395
396 if (info->desc->ops->mark_txdone)
397 info->desc->ops->mark_txdone(cinfo, ret);
398
399 trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
400 xfer->hdr.protocol_id, xfer->hdr.seq, ret);
401
402 return ret;
403 }
404
405 #define SCMI_MAX_RESPONSE_TIMEOUT (2 * MSEC_PER_SEC)
406
407 /**
408 * scmi_do_xfer_with_response() - Do one transfer and wait until the delayed
409 * response is received
410 *
411 * @handle: Pointer to SCMI entity handle
412 * @xfer: Transfer to initiate and wait for response
413 *
414 * Return: -ETIMEDOUT in case of no delayed response, if transmit error,
415 * return corresponding error, else if all goes well, return 0.
416 */
417 int scmi_do_xfer_with_response(const struct scmi_handle *handle,
418 struct scmi_xfer *xfer)
419 {
420 int ret, timeout = msecs_to_jiffies(SCMI_MAX_RESPONSE_TIMEOUT);
421 DECLARE_COMPLETION_ONSTACK(async_response);
422
423 xfer->async_done = &async_response;
424
425 ret = scmi_do_xfer(handle, xfer);
426 if (!ret && !wait_for_completion_timeout(xfer->async_done, timeout))
427 ret = -ETIMEDOUT;
428
429 xfer->async_done = NULL;
430 return ret;
431 }
432
433 /**
434 * scmi_xfer_get_init() - Allocate and initialise one message for transmit
435 *
436 * @handle: Pointer to SCMI entity handle
437 * @msg_id: Message identifier
438 * @prot_id: Protocol identifier for the message
439 * @tx_size: transmit message size
440 * @rx_size: receive message size
441 * @p: pointer to the allocated and initialised message
442 *
443 * This function allocates the message using @scmi_xfer_get and
444 * initialise the header.
445 *
446 * Return: 0 if all went fine with @p pointing to message, else
447 * corresponding error.
448 */
449 int scmi_xfer_get_init(const struct scmi_handle *handle, u8 msg_id, u8 prot_id,
450 size_t tx_size, size_t rx_size, struct scmi_xfer **p)
451 {
452 int ret;
453 struct scmi_xfer *xfer;
454 struct scmi_info *info = handle_to_scmi_info(handle);
455 struct scmi_xfers_info *minfo = &info->tx_minfo;
456 struct device *dev = info->dev;
457
458 /* Ensure we have sane transfer sizes */
459 if (rx_size > info->desc->max_msg_size ||
460 tx_size > info->desc->max_msg_size)
461 return -ERANGE;
462
463 xfer = scmi_xfer_get(handle, minfo);
464 if (IS_ERR(xfer)) {
465 ret = PTR_ERR(xfer);
466 dev_err(dev, "failed to get free message slot(%d)\n", ret);
467 return ret;
468 }
469
470 xfer->tx.len = tx_size;
471 xfer->rx.len = rx_size ? : info->desc->max_msg_size;
472 xfer->hdr.id = msg_id;
473 xfer->hdr.protocol_id = prot_id;
474 xfer->hdr.poll_completion = false;
475
476 *p = xfer;
477
478 return 0;
479 }
480
481 /**
482 * scmi_version_get() - command to get the revision of the SCMI entity
483 *
484 * @handle: Pointer to SCMI entity handle
485 * @protocol: Protocol identifier for the message
486 * @version: Holds returned version of protocol.
487 *
488 * Updates the SCMI information in the internal data structure.
489 *
490 * Return: 0 if all went fine, else return appropriate error.
491 */
492 int scmi_version_get(const struct scmi_handle *handle, u8 protocol,
493 u32 *version)
494 {
495 int ret;
496 __le32 *rev_info;
497 struct scmi_xfer *t;
498
499 ret = scmi_xfer_get_init(handle, PROTOCOL_VERSION, protocol, 0,
500 sizeof(*version), &t);
501 if (ret)
502 return ret;
503
504 ret = scmi_do_xfer(handle, t);
505 if (!ret) {
506 rev_info = t->rx.buf;
507 *version = le32_to_cpu(*rev_info);
508 }
509
510 scmi_xfer_put(handle, t);
511 return ret;
512 }
513
514 void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
515 u8 *prot_imp)
516 {
517 struct scmi_info *info = handle_to_scmi_info(handle);
518
519 info->protocols_imp = prot_imp;
520 }
521
522 static bool
523 scmi_is_protocol_implemented(const struct scmi_handle *handle, u8 prot_id)
524 {
525 int i;
526 struct scmi_info *info = handle_to_scmi_info(handle);
527
528 if (!info->protocols_imp)
529 return false;
530
531 for (i = 0; i < MAX_PROTOCOLS_IMP; i++)
532 if (info->protocols_imp[i] == prot_id)
533 return true;
534 return false;
535 }
536
537 /**
538 * scmi_handle_get() - Get the SCMI handle for a device
539 *
540 * @dev: pointer to device for which we want SCMI handle
541 *
542 * NOTE: The function does not track individual clients of the framework
543 * and is expected to be maintained by caller of SCMI protocol library.
544 * scmi_handle_put must be balanced with successful scmi_handle_get
545 *
546 * Return: pointer to handle if successful, NULL on error
547 */
548 struct scmi_handle *scmi_handle_get(struct device *dev)
549 {
550 struct list_head *p;
551 struct scmi_info *info;
552 struct scmi_handle *handle = NULL;
553
554 mutex_lock(&scmi_list_mutex);
555 list_for_each(p, &scmi_list) {
556 info = list_entry(p, struct scmi_info, node);
557 if (dev->parent == info->dev) {
558 handle = &info->handle;
559 info->users++;
560 break;
561 }
562 }
563 mutex_unlock(&scmi_list_mutex);
564
565 return handle;
566 }
567
568 /**
569 * scmi_handle_put() - Release the handle acquired by scmi_handle_get
570 *
571 * @handle: handle acquired by scmi_handle_get
572 *
573 * NOTE: The function does not track individual clients of the framework
574 * and is expected to be maintained by caller of SCMI protocol library.
575 * scmi_handle_put must be balanced with successful scmi_handle_get
576 *
577 * Return: 0 is successfully released
578 * if null was passed, it returns -EINVAL;
579 */
580 int scmi_handle_put(const struct scmi_handle *handle)
581 {
582 struct scmi_info *info;
583
584 if (!handle)
585 return -EINVAL;
586
587 info = handle_to_scmi_info(handle);
588 mutex_lock(&scmi_list_mutex);
589 if (!WARN_ON(!info->users))
590 info->users--;
591 mutex_unlock(&scmi_list_mutex);
592
593 return 0;
594 }
595
596 static int __scmi_xfer_info_init(struct scmi_info *sinfo,
597 struct scmi_xfers_info *info)
598 {
599 int i;
600 struct scmi_xfer *xfer;
601 struct device *dev = sinfo->dev;
602 const struct scmi_desc *desc = sinfo->desc;
603
604 /* Pre-allocated messages, no more than what hdr.seq can support */
605 if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) {
606 dev_err(dev, "Maximum message of %d exceeds supported %ld\n",
607 desc->max_msg, MSG_TOKEN_MAX);
608 return -EINVAL;
609 }
610
611 info->xfer_block = devm_kcalloc(dev, desc->max_msg,
612 sizeof(*info->xfer_block), GFP_KERNEL);
613 if (!info->xfer_block)
614 return -ENOMEM;
615
616 info->xfer_alloc_table = devm_kcalloc(dev, BITS_TO_LONGS(desc->max_msg),
617 sizeof(long), GFP_KERNEL);
618 if (!info->xfer_alloc_table)
619 return -ENOMEM;
620
621 /* Pre-initialize the buffer pointer to pre-allocated buffers */
622 for (i = 0, xfer = info->xfer_block; i < desc->max_msg; i++, xfer++) {
623 xfer->rx.buf = devm_kcalloc(dev, sizeof(u8), desc->max_msg_size,
624 GFP_KERNEL);
625 if (!xfer->rx.buf)
626 return -ENOMEM;
627
628 xfer->tx.buf = xfer->rx.buf;
629 init_completion(&xfer->done);
630 }
631
632 spin_lock_init(&info->xfer_lock);
633
634 return 0;
635 }
636
637 static int scmi_xfer_info_init(struct scmi_info *sinfo)
638 {
639 int ret = __scmi_xfer_info_init(sinfo, &sinfo->tx_minfo);
640
641 if (!ret && idr_find(&sinfo->rx_idr, SCMI_PROTOCOL_BASE))
642 ret = __scmi_xfer_info_init(sinfo, &sinfo->rx_minfo);
643
644 return ret;
645 }
646
647 static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
648 int prot_id, bool tx)
649 {
650 int ret, idx;
651 struct scmi_chan_info *cinfo;
652 struct idr *idr;
653
654 /* Transmit channel is first entry i.e. index 0 */
655 idx = tx ? 0 : 1;
656 idr = tx ? &info->tx_idr : &info->rx_idr;
657
658 /* check if already allocated, used for multiple device per protocol */
659 cinfo = idr_find(idr, prot_id);
660 if (cinfo)
661 return 0;
662
663 if (!info->desc->ops->chan_available(dev, idx)) {
664 cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
665 if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
666 return -EINVAL;
667 goto idr_alloc;
668 }
669
670 cinfo = devm_kzalloc(info->dev, sizeof(*cinfo), GFP_KERNEL);
671 if (!cinfo)
672 return -ENOMEM;
673
674 cinfo->dev = dev;
675
676 ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
677 if (ret)
678 return ret;
679
680 idr_alloc:
681 ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
682 if (ret != prot_id) {
683 dev_err(dev, "unable to allocate SCMI idr slot err %d\n", ret);
684 return ret;
685 }
686
687 cinfo->handle = &info->handle;
688 return 0;
689 }
690
691 static inline int
692 scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
693 {
694 int ret = scmi_chan_setup(info, dev, prot_id, true);
695
696 if (!ret) /* Rx is optional, hence no error check */
697 scmi_chan_setup(info, dev, prot_id, false);
698
699 return ret;
700 }
701
702 static inline void
703 scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
704 int prot_id, const char *name)
705 {
706 struct scmi_device *sdev;
707
708 sdev = scmi_device_create(np, info->dev, prot_id, name);
709 if (!sdev) {
710 dev_err(info->dev, "failed to create %d protocol device\n",
711 prot_id);
712 return;
713 }
714
715 if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
716 dev_err(&sdev->dev, "failed to setup transport\n");
717 scmi_device_destroy(sdev);
718 return;
719 }
720
721 /* setup handle now as the transport is ready */
722 scmi_set_handle(sdev);
723 }
724
725 #define MAX_SCMI_DEV_PER_PROTOCOL 2
726 struct scmi_prot_devnames {
727 int protocol_id;
728 char *names[MAX_SCMI_DEV_PER_PROTOCOL];
729 };
730
731 static struct scmi_prot_devnames devnames[] = {
732 { SCMI_PROTOCOL_POWER, { "genpd" },},
733 { SCMI_PROTOCOL_PERF, { "cpufreq" },},
734 { SCMI_PROTOCOL_CLOCK, { "clocks" },},
735 { SCMI_PROTOCOL_SENSOR, { "hwmon" },},
736 { SCMI_PROTOCOL_RESET, { "reset" },},
737 };
738
739 static inline void
740 scmi_create_protocol_devices(struct device_node *np, struct scmi_info *info,
741 int prot_id)
742 {
743 int loop, cnt;
744
745 for (loop = 0; loop < ARRAY_SIZE(devnames); loop++) {
746 if (devnames[loop].protocol_id != prot_id)
747 continue;
748
749 for (cnt = 0; cnt < ARRAY_SIZE(devnames[loop].names); cnt++) {
750 const char *name = devnames[loop].names[cnt];
751
752 if (name)
753 scmi_create_protocol_device(np, info, prot_id,
754 name);
755 }
756 }
757 }
758
759 static int scmi_probe(struct platform_device *pdev)
760 {
761 int ret;
762 struct scmi_handle *handle;
763 const struct scmi_desc *desc;
764 struct scmi_info *info;
765 struct device *dev = &pdev->dev;
766 struct device_node *child, *np = dev->of_node;
767
768 desc = of_device_get_match_data(dev);
769 if (!desc)
770 return -EINVAL;
771
772 info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
773 if (!info)
774 return -ENOMEM;
775
776 info->dev = dev;
777 info->desc = desc;
778 INIT_LIST_HEAD(&info->node);
779
780 platform_set_drvdata(pdev, info);
781 idr_init(&info->tx_idr);
782 idr_init(&info->rx_idr);
783
784 handle = &info->handle;
785 handle->dev = info->dev;
786 handle->version = &info->version;
787
788 ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
789 if (ret)
790 return ret;
791
792 ret = scmi_xfer_info_init(info);
793 if (ret)
794 return ret;
795
796 if (scmi_notification_init(handle))
797 dev_err(dev, "SCMI Notifications NOT available.\n");
798
799 ret = scmi_base_protocol_init(handle);
800 if (ret) {
801 dev_err(dev, "unable to communicate with SCMI(%d)\n", ret);
802 return ret;
803 }
804
805 mutex_lock(&scmi_list_mutex);
806 list_add_tail(&info->node, &scmi_list);
807 mutex_unlock(&scmi_list_mutex);
808
809 for_each_available_child_of_node(np, child) {
810 u32 prot_id;
811
812 if (of_property_read_u32(child, "reg", &prot_id))
813 continue;
814
815 if (!FIELD_FIT(MSG_PROTOCOL_ID_MASK, prot_id))
816 dev_err(dev, "Out of range protocol %d\n", prot_id);
817
818 if (!scmi_is_protocol_implemented(handle, prot_id)) {
819 dev_err(dev, "SCMI protocol %d not implemented\n",
820 prot_id);
821 continue;
822 }
823
824 scmi_create_protocol_devices(child, info, prot_id);
825 }
826
827 return 0;
828 }
829
830 void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
831 {
832 idr_remove(idr, id);
833 }
834
835 static int scmi_remove(struct platform_device *pdev)
836 {
837 int ret = 0;
838 struct scmi_info *info = platform_get_drvdata(pdev);
839 struct idr *idr = &info->tx_idr;
840
841 scmi_notification_exit(&info->handle);
842
843 mutex_lock(&scmi_list_mutex);
844 if (info->users)
845 ret = -EBUSY;
846 else
847 list_del(&info->node);
848 mutex_unlock(&scmi_list_mutex);
849
850 if (ret)
851 return ret;
852
853 /* Safe to free channels since no more users */
854 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
855 idr_destroy(&info->tx_idr);
856
857 idr = &info->rx_idr;
858 ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
859 idr_destroy(&info->rx_idr);
860
861 return ret;
862 }
863
864 static ssize_t protocol_version_show(struct device *dev,
865 struct device_attribute *attr, char *buf)
866 {
867 struct scmi_info *info = dev_get_drvdata(dev);
868
869 return sprintf(buf, "%u.%u\n", info->version.major_ver,
870 info->version.minor_ver);
871 }
872 static DEVICE_ATTR_RO(protocol_version);
873
874 static ssize_t firmware_version_show(struct device *dev,
875 struct device_attribute *attr, char *buf)
876 {
877 struct scmi_info *info = dev_get_drvdata(dev);
878
879 return sprintf(buf, "0x%x\n", info->version.impl_ver);
880 }
881 static DEVICE_ATTR_RO(firmware_version);
882
883 static ssize_t vendor_id_show(struct device *dev,
884 struct device_attribute *attr, char *buf)
885 {
886 struct scmi_info *info = dev_get_drvdata(dev);
887
888 return sprintf(buf, "%s\n", info->version.vendor_id);
889 }
890 static DEVICE_ATTR_RO(vendor_id);
891
892 static ssize_t sub_vendor_id_show(struct device *dev,
893 struct device_attribute *attr, char *buf)
894 {
895 struct scmi_info *info = dev_get_drvdata(dev);
896
897 return sprintf(buf, "%s\n", info->version.sub_vendor_id);
898 }
899 static DEVICE_ATTR_RO(sub_vendor_id);
900
901 static struct attribute *versions_attrs[] = {
902 &dev_attr_firmware_version.attr,
903 &dev_attr_protocol_version.attr,
904 &dev_attr_vendor_id.attr,
905 &dev_attr_sub_vendor_id.attr,
906 NULL,
907 };
908 ATTRIBUTE_GROUPS(versions);
909
910 /* Each compatible listed below must have descriptor associated with it */
911 static const struct of_device_id scmi_of_match[] = {
912 { .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
913 #ifdef CONFIG_HAVE_ARM_SMCCC_DISCOVERY
914 { .compatible = "arm,scmi-smc", .data = &scmi_smc_desc},
915 #endif
916 { /* Sentinel */ },
917 };
918
919 MODULE_DEVICE_TABLE(of, scmi_of_match);
920
921 static struct platform_driver scmi_driver = {
922 .driver = {
923 .name = "arm-scmi",
924 .of_match_table = scmi_of_match,
925 .dev_groups = versions_groups,
926 },
927 .probe = scmi_probe,
928 .remove = scmi_remove,
929 };
930
931 module_platform_driver(scmi_driver);
932
933 MODULE_ALIAS("platform: arm-scmi");
934 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
935 MODULE_DESCRIPTION("ARM SCMI protocol driver");
936 MODULE_LICENSE("GPL v2");