2 * Copyright (c) 2016-2017, Linaro Ltd
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/idr.h>
15 #include <linux/interrupt.h>
17 #include <linux/list.h>
18 #include <linux/mfd/syscon.h>
19 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/platform_device.h>
24 #include <linux/regmap.h>
25 #include <linux/rpmsg.h>
26 #include <linux/slab.h>
27 #include <linux/workqueue.h>
28 #include <linux/mailbox_client.h>
30 #include "rpmsg_internal.h"
31 #include "qcom_glink_native.h"
33 #define GLINK_NAME_SIZE 32
34 #define GLINK_VERSION_1 1
36 #define RPM_GLINK_CID_MIN 1
37 #define RPM_GLINK_CID_MAX 65536
47 * struct glink_defer_cmd - deferred incoming control message
49 * @msg: message header
50 * data: payload of the message
52 * Copy of a received control message, to be added to @rx_queue and processed
53 * by @rx_work of @qcom_glink.
55 struct glink_defer_cmd
{
56 struct list_head node
;
63 * struct glink_core_rx_intent - RX intent
66 * data: pointer to the data (may be NULL for zero-copy)
67 * id: remote or local intent ID
68 * size: size of the original intent (do not modify)
69 * reuse: To mark if the intent can be reused after first use
70 * in_use: To mark if intent is already in use for the channel
71 * offset: next write offset (initially 0)
73 struct glink_core_rx_intent
{
83 * struct qcom_glink - driver context, relates to one remote subsystem
84 * @dev: reference to the associated struct device
85 * @mbox_client: mailbox client
86 * @mbox_chan: mailbox channel
87 * @rx_pipe: pipe object for receive FIFO
88 * @tx_pipe: pipe object for transmit FIFO
89 * @irq: IRQ for signaling incoming events
90 * @rx_work: worker for handling received control messages
91 * @rx_lock: protects the @rx_queue
92 * @rx_queue: queue of received control messages to be processed in @rx_work
93 * @tx_lock: synchronizes operations on the tx fifo
94 * @idr_lock: synchronizes @lcids and @rcids modifications
95 * @lcids: idr of all channels with a known local channel id
96 * @rcids: idr of all channels with a known remote channel id
101 struct mbox_client mbox_client
;
102 struct mbox_chan
*mbox_chan
;
104 struct qcom_glink_pipe
*rx_pipe
;
105 struct qcom_glink_pipe
*tx_pipe
;
109 struct work_struct rx_work
;
111 struct list_head rx_queue
;
113 struct mutex tx_lock
;
118 unsigned long features
;
131 * struct glink_channel - internal representation of a channel
132 * @rpdev: rpdev reference, only used for primary endpoints
133 * @ept: rpmsg endpoint this channel is associated with
134 * @glink: qcom_glink context handle
135 * @refcount: refcount for the channel object
136 * @recv_lock: guard for @ept.cb
137 * @name: unique channel name/identifier
138 * @lcid: channel id, in local space
139 * @rcid: channel id, in remote space
140 * @intent_lock: lock for protection of @liids
141 * @liids: idr of all local intents
142 * @buf: receive buffer, for gathering fragments
143 * @buf_offset: write offset in @buf
144 * @buf_size: size of current @buf
145 * @open_ack: completed once remote has acked the open-request
146 * @open_req: completed once open-request has been received
148 struct glink_channel
{
149 struct rpmsg_endpoint ept
;
151 struct rpmsg_device
*rpdev
;
152 struct qcom_glink
*glink
;
154 struct kref refcount
;
156 spinlock_t recv_lock
;
162 spinlock_t intent_lock
;
165 struct glink_core_rx_intent
*buf
;
169 struct completion open_ack
;
170 struct completion open_req
;
173 #define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
175 static const struct rpmsg_endpoint_ops glink_endpoint_ops
;
177 #define RPM_CMD_VERSION 0
178 #define RPM_CMD_VERSION_ACK 1
179 #define RPM_CMD_OPEN 2
180 #define RPM_CMD_CLOSE 3
181 #define RPM_CMD_OPEN_ACK 4
182 #define RPM_CMD_INTENT 5
183 #define RPM_CMD_RX_INTENT_REQ 7
184 #define RPM_CMD_RX_INTENT_REQ_ACK 8
185 #define RPM_CMD_TX_DATA 9
186 #define RPM_CMD_CLOSE_ACK 11
187 #define RPM_CMD_TX_DATA_CONT 12
188 #define RPM_CMD_READ_NOTIF 13
190 #define GLINK_FEATURE_INTENTLESS BIT(1)
192 static struct glink_channel
*qcom_glink_alloc_channel(struct qcom_glink
*glink
,
195 struct glink_channel
*channel
;
197 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
199 return ERR_PTR(-ENOMEM
);
201 /* Setup glink internal glink_channel data */
202 spin_lock_init(&channel
->recv_lock
);
203 spin_lock_init(&channel
->intent_lock
);
204 channel
->glink
= glink
;
205 channel
->name
= kstrdup(name
, GFP_KERNEL
);
207 init_completion(&channel
->open_req
);
208 init_completion(&channel
->open_ack
);
210 idr_init(&channel
->liids
);
211 kref_init(&channel
->refcount
);
216 static void qcom_glink_channel_release(struct kref
*ref
)
218 struct glink_channel
*channel
= container_of(ref
, struct glink_channel
,
222 spin_lock_irqsave(&channel
->intent_lock
, flags
);
223 idr_destroy(&channel
->liids
);
224 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
226 kfree(channel
->name
);
230 static size_t qcom_glink_rx_avail(struct qcom_glink
*glink
)
232 return glink
->rx_pipe
->avail(glink
->rx_pipe
);
235 static void qcom_glink_rx_peak(struct qcom_glink
*glink
,
236 void *data
, unsigned int offset
, size_t count
)
238 glink
->rx_pipe
->peak(glink
->rx_pipe
, data
, offset
, count
);
241 static void qcom_glink_rx_advance(struct qcom_glink
*glink
, size_t count
)
243 glink
->rx_pipe
->advance(glink
->rx_pipe
, count
);
246 static size_t qcom_glink_tx_avail(struct qcom_glink
*glink
)
248 return glink
->tx_pipe
->avail(glink
->tx_pipe
);
251 static void qcom_glink_tx_write(struct qcom_glink
*glink
,
252 const void *hdr
, size_t hlen
,
253 const void *data
, size_t dlen
)
255 glink
->tx_pipe
->write(glink
->tx_pipe
, hdr
, hlen
, data
, dlen
);
258 static int qcom_glink_tx(struct qcom_glink
*glink
,
259 const void *hdr
, size_t hlen
,
260 const void *data
, size_t dlen
, bool wait
)
262 unsigned int tlen
= hlen
+ dlen
;
265 /* Reject packets that are too big */
266 if (tlen
>= glink
->tx_pipe
->length
)
269 ret
= mutex_lock_interruptible(&glink
->tx_lock
);
273 while (qcom_glink_tx_avail(glink
) < tlen
) {
279 usleep_range(10000, 15000);
282 qcom_glink_tx_write(glink
, hdr
, hlen
, data
, dlen
);
284 mbox_send_message(glink
->mbox_chan
, NULL
);
285 mbox_client_txdone(glink
->mbox_chan
, 0);
288 mutex_unlock(&glink
->tx_lock
);
293 static int qcom_glink_send_version(struct qcom_glink
*glink
)
295 struct glink_msg msg
;
297 msg
.cmd
= cpu_to_le16(RPM_CMD_VERSION
);
298 msg
.param1
= cpu_to_le16(GLINK_VERSION_1
);
299 msg
.param2
= cpu_to_le32(glink
->features
);
301 return qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
304 static void qcom_glink_send_version_ack(struct qcom_glink
*glink
)
306 struct glink_msg msg
;
308 msg
.cmd
= cpu_to_le16(RPM_CMD_VERSION_ACK
);
309 msg
.param1
= cpu_to_le16(GLINK_VERSION_1
);
310 msg
.param2
= cpu_to_le32(glink
->features
);
312 qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
315 static void qcom_glink_send_open_ack(struct qcom_glink
*glink
,
316 struct glink_channel
*channel
)
318 struct glink_msg msg
;
320 msg
.cmd
= cpu_to_le16(RPM_CMD_OPEN_ACK
);
321 msg
.param1
= cpu_to_le16(channel
->rcid
);
322 msg
.param2
= cpu_to_le32(0);
324 qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
328 * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
329 * @glink: Ptr to the glink edge
330 * @channel: Ptr to the channel that the open req is sent
332 * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
333 * Will return with refcount held, regardless of outcome.
335 * Returns 0 on success, negative errno otherwise.
337 static int qcom_glink_send_open_req(struct qcom_glink
*glink
,
338 struct glink_channel
*channel
)
341 struct glink_msg msg
;
342 u8 name
[GLINK_NAME_SIZE
];
344 int name_len
= strlen(channel
->name
) + 1;
345 int req_len
= ALIGN(sizeof(req
.msg
) + name_len
, 8);
349 kref_get(&channel
->refcount
);
351 spin_lock_irqsave(&glink
->idr_lock
, flags
);
352 ret
= idr_alloc_cyclic(&glink
->lcids
, channel
,
353 RPM_GLINK_CID_MIN
, RPM_GLINK_CID_MAX
,
355 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
361 req
.msg
.cmd
= cpu_to_le16(RPM_CMD_OPEN
);
362 req
.msg
.param1
= cpu_to_le16(channel
->lcid
);
363 req
.msg
.param2
= cpu_to_le32(name_len
);
364 strcpy(req
.name
, channel
->name
);
366 ret
= qcom_glink_tx(glink
, &req
, req_len
, NULL
, 0, true);
373 spin_lock_irqsave(&glink
->idr_lock
, flags
);
374 idr_remove(&glink
->lcids
, channel
->lcid
);
376 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
381 static void qcom_glink_send_close_req(struct qcom_glink
*glink
,
382 struct glink_channel
*channel
)
384 struct glink_msg req
;
386 req
.cmd
= cpu_to_le16(RPM_CMD_CLOSE
);
387 req
.param1
= cpu_to_le16(channel
->lcid
);
390 qcom_glink_tx(glink
, &req
, sizeof(req
), NULL
, 0, true);
393 static void qcom_glink_send_close_ack(struct qcom_glink
*glink
,
396 struct glink_msg req
;
398 req
.cmd
= cpu_to_le16(RPM_CMD_CLOSE_ACK
);
399 req
.param1
= cpu_to_le16(rcid
);
402 qcom_glink_tx(glink
, &req
, sizeof(req
), NULL
, 0, true);
406 * qcom_glink_receive_version() - receive version/features from remote system
408 * @glink: pointer to transport interface
409 * @r_version: remote version
410 * @r_features: remote features
412 * This function is called in response to a remote-initiated version/feature
413 * negotiation sequence.
415 static void qcom_glink_receive_version(struct qcom_glink
*glink
,
422 case GLINK_VERSION_1
:
423 glink
->features
&= features
;
426 qcom_glink_send_version_ack(glink
);
432 * qcom_glink_receive_version_ack() - receive negotiation ack from remote system
434 * @glink: pointer to transport interface
435 * @r_version: remote version response
436 * @r_features: remote features response
438 * This function is called in response to a local-initiated version/feature
439 * negotiation sequence and is the counter-offer from the remote side based
440 * upon the initial version and feature set requested.
442 static void qcom_glink_receive_version_ack(struct qcom_glink
*glink
,
448 /* Version negotiation failed */
450 case GLINK_VERSION_1
:
451 if (features
== glink
->features
)
454 glink
->features
&= features
;
457 qcom_glink_send_version(glink
);
463 * qcom_glink_send_intent_req_ack() - convert an rx intent request ack cmd to
464 wire format and transmit
465 * @glink: The transport to transmit on.
466 * @channel: The glink channel
467 * @granted: The request response to encode.
469 * Return: 0 on success or standard Linux error code.
471 static int qcom_glink_send_intent_req_ack(struct qcom_glink
*glink
,
472 struct glink_channel
*channel
,
475 struct glink_msg msg
;
477 msg
.cmd
= cpu_to_le16(RPM_CMD_RX_INTENT_REQ_ACK
);
478 msg
.param1
= cpu_to_le16(channel
->lcid
);
479 msg
.param2
= cpu_to_le32(granted
);
481 qcom_glink_tx(glink
, &msg
, sizeof(msg
), NULL
, 0, true);
487 * qcom_glink_advertise_intent - convert an rx intent cmd to wire format and
489 * @glink: The transport to transmit on.
490 * @channel: The local channel
491 * @size: The intent to pass on to remote.
493 * Return: 0 on success or standard Linux error code.
495 static int qcom_glink_advertise_intent(struct qcom_glink
*glink
,
496 struct glink_channel
*channel
,
497 struct glink_core_rx_intent
*intent
)
508 cmd
.id
= cpu_to_le16(RPM_CMD_INTENT
);
509 cmd
.lcid
= cpu_to_le16(channel
->lcid
);
510 cmd
.count
= cpu_to_le32(1);
511 cmd
.size
= cpu_to_le32(intent
->size
);
512 cmd
.liid
= cpu_to_le32(intent
->id
);
514 qcom_glink_tx(glink
, &cmd
, sizeof(cmd
), NULL
, 0, true);
519 static struct glink_core_rx_intent
*
520 qcom_glink_alloc_intent(struct qcom_glink
*glink
,
521 struct glink_channel
*channel
,
525 struct glink_core_rx_intent
*intent
;
529 intent
= kzalloc(sizeof(*intent
), GFP_KERNEL
);
534 intent
->data
= kzalloc(size
, GFP_KERNEL
);
538 spin_lock_irqsave(&channel
->intent_lock
, flags
);
539 ret
= idr_alloc_cyclic(&channel
->liids
, intent
, 1, -1, GFP_ATOMIC
);
541 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
544 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
548 intent
->reuse
= reuseable
;
554 * qcom_glink_handle_intent_req() - Receive a request for rx_intent
556 * if_ptr: Pointer to the transport interface
557 * rcid: Remote channel ID
558 * size: size of the intent
560 * The function searches for the local channel to which the request for
561 * rx_intent has arrived and allocates and notifies the remote back
563 static void qcom_glink_handle_intent_req(struct qcom_glink
*glink
,
564 u32 cid
, size_t size
)
566 struct glink_core_rx_intent
*intent
;
567 struct glink_channel
*channel
;
570 spin_lock_irqsave(&glink
->idr_lock
, flags
);
571 channel
= idr_find(&glink
->rcids
, cid
);
572 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
575 pr_err("%s channel not found for cid %d\n", __func__
, cid
);
579 intent
= qcom_glink_alloc_intent(glink
, channel
, size
, false);
581 qcom_glink_advertise_intent(glink
, channel
, intent
);
583 qcom_glink_send_intent_req_ack(glink
, channel
, !!intent
);
586 static int qcom_glink_rx_defer(struct qcom_glink
*glink
, size_t extra
)
588 struct glink_defer_cmd
*dcmd
;
590 extra
= ALIGN(extra
, 8);
592 if (qcom_glink_rx_avail(glink
) < sizeof(struct glink_msg
) + extra
) {
593 dev_dbg(glink
->dev
, "Insufficient data in rx fifo");
597 dcmd
= kzalloc(sizeof(*dcmd
) + extra
, GFP_ATOMIC
);
601 INIT_LIST_HEAD(&dcmd
->node
);
603 qcom_glink_rx_peak(glink
, &dcmd
->msg
, 0, sizeof(dcmd
->msg
) + extra
);
605 spin_lock(&glink
->rx_lock
);
606 list_add_tail(&dcmd
->node
, &glink
->rx_queue
);
607 spin_unlock(&glink
->rx_lock
);
609 schedule_work(&glink
->rx_work
);
610 qcom_glink_rx_advance(glink
, sizeof(dcmd
->msg
) + extra
);
615 static int qcom_glink_rx_data(struct qcom_glink
*glink
, size_t avail
)
617 struct glink_core_rx_intent
*intent
;
618 struct glink_channel
*channel
;
620 struct glink_msg msg
;
624 unsigned int chunk_size
;
625 unsigned int left_size
;
631 if (avail
< sizeof(hdr
)) {
632 dev_dbg(glink
->dev
, "Not enough data in fifo\n");
636 qcom_glink_rx_peak(glink
, &hdr
, 0, sizeof(hdr
));
637 chunk_size
= le32_to_cpu(hdr
.chunk_size
);
638 left_size
= le32_to_cpu(hdr
.left_size
);
640 if (avail
< sizeof(hdr
) + chunk_size
) {
641 dev_dbg(glink
->dev
, "Payload not yet in fifo\n");
645 if (WARN(chunk_size
% 4, "Incoming data must be word aligned\n"))
648 rcid
= le16_to_cpu(hdr
.msg
.param1
);
649 spin_lock_irqsave(&glink
->idr_lock
, flags
);
650 channel
= idr_find(&glink
->rcids
, rcid
);
651 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
653 dev_dbg(glink
->dev
, "Data on non-existing channel\n");
655 /* Drop the message */
659 if (glink
->intentless
) {
660 /* Might have an ongoing, fragmented, message to append */
662 intent
= kzalloc(sizeof(*intent
), GFP_ATOMIC
);
666 intent
->data
= kmalloc(chunk_size
+ left_size
,
673 intent
->id
= 0xbabababa;
674 intent
->size
= chunk_size
+ left_size
;
677 channel
->buf
= intent
;
679 intent
= channel
->buf
;
682 liid
= le32_to_cpu(hdr
.msg
.param2
);
684 spin_lock_irqsave(&channel
->intent_lock
, flags
);
685 intent
= idr_find(&channel
->liids
, liid
);
686 spin_unlock_irqrestore(&channel
->intent_lock
, flags
);
690 "no intent found for channel %s intent %d",
691 channel
->name
, liid
);
696 if (intent
->size
- intent
->offset
< chunk_size
) {
697 dev_err(glink
->dev
, "Insufficient space in intent\n");
699 /* The packet header lied, drop payload */
703 qcom_glink_rx_peak(glink
, intent
->data
+ intent
->offset
,
704 sizeof(hdr
), chunk_size
);
705 intent
->offset
+= chunk_size
;
707 /* Handle message when no fragments remain to be received */
709 spin_lock(&channel
->recv_lock
);
710 if (channel
->ept
.cb
) {
711 channel
->ept
.cb(channel
->ept
.rpdev
,
717 spin_unlock(&channel
->recv_lock
);
724 qcom_glink_rx_advance(glink
, ALIGN(sizeof(hdr
) + chunk_size
, 8));
729 static int qcom_glink_rx_open_ack(struct qcom_glink
*glink
, unsigned int lcid
)
731 struct glink_channel
*channel
;
733 spin_lock(&glink
->idr_lock
);
734 channel
= idr_find(&glink
->lcids
, lcid
);
736 dev_err(glink
->dev
, "Invalid open ack packet\n");
739 spin_unlock(&glink
->idr_lock
);
741 complete(&channel
->open_ack
);
746 static irqreturn_t
qcom_glink_native_intr(int irq
, void *data
)
748 struct qcom_glink
*glink
= data
;
749 struct glink_msg msg
;
757 avail
= qcom_glink_rx_avail(glink
);
758 if (avail
< sizeof(msg
))
761 qcom_glink_rx_peak(glink
, &msg
, 0, sizeof(msg
));
763 cmd
= le16_to_cpu(msg
.cmd
);
764 param1
= le16_to_cpu(msg
.param1
);
765 param2
= le32_to_cpu(msg
.param2
);
768 case RPM_CMD_VERSION
:
769 case RPM_CMD_VERSION_ACK
:
771 case RPM_CMD_CLOSE_ACK
:
772 case RPM_CMD_RX_INTENT_REQ
:
773 ret
= qcom_glink_rx_defer(glink
, 0);
775 case RPM_CMD_OPEN_ACK
:
776 ret
= qcom_glink_rx_open_ack(glink
, param1
);
777 qcom_glink_rx_advance(glink
, ALIGN(sizeof(msg
), 8));
780 ret
= qcom_glink_rx_defer(glink
, param2
);
782 case RPM_CMD_TX_DATA
:
783 case RPM_CMD_TX_DATA_CONT
:
784 ret
= qcom_glink_rx_data(glink
, avail
);
786 case RPM_CMD_READ_NOTIF
:
787 qcom_glink_rx_advance(glink
, ALIGN(sizeof(msg
), 8));
789 mbox_send_message(glink
->mbox_chan
, NULL
);
790 mbox_client_txdone(glink
->mbox_chan
, 0);
795 dev_err(glink
->dev
, "unhandled rx cmd: %d\n", cmd
);
807 /* Locally initiated rpmsg_create_ept */
808 static struct glink_channel
*qcom_glink_create_local(struct qcom_glink
*glink
,
811 struct glink_channel
*channel
;
815 channel
= qcom_glink_alloc_channel(glink
, name
);
817 return ERR_CAST(channel
);
819 ret
= qcom_glink_send_open_req(glink
, channel
);
821 goto release_channel
;
823 ret
= wait_for_completion_timeout(&channel
->open_ack
, 5 * HZ
);
827 ret
= wait_for_completion_timeout(&channel
->open_req
, 5 * HZ
);
831 qcom_glink_send_open_ack(glink
, channel
);
836 /* qcom_glink_send_open_req() did register the channel in lcids*/
837 spin_lock_irqsave(&glink
->idr_lock
, flags
);
838 idr_remove(&glink
->lcids
, channel
->lcid
);
839 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
842 /* Release qcom_glink_send_open_req() reference */
843 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
844 /* Release qcom_glink_alloc_channel() reference */
845 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
847 return ERR_PTR(-ETIMEDOUT
);
850 /* Remote initiated rpmsg_create_ept */
851 static int qcom_glink_create_remote(struct qcom_glink
*glink
,
852 struct glink_channel
*channel
)
856 qcom_glink_send_open_ack(glink
, channel
);
858 ret
= qcom_glink_send_open_req(glink
, channel
);
862 ret
= wait_for_completion_timeout(&channel
->open_ack
, 5 * HZ
);
872 * Send a close request to "undo" our open-ack. The close-ack will
873 * release the last reference.
875 qcom_glink_send_close_req(glink
, channel
);
877 /* Release qcom_glink_send_open_req() reference */
878 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
883 static struct rpmsg_endpoint
*qcom_glink_create_ept(struct rpmsg_device
*rpdev
,
886 struct rpmsg_channel_info
889 struct glink_channel
*parent
= to_glink_channel(rpdev
->ept
);
890 struct glink_channel
*channel
;
891 struct qcom_glink
*glink
= parent
->glink
;
892 struct rpmsg_endpoint
*ept
;
893 const char *name
= chinfo
.name
;
898 spin_lock_irqsave(&glink
->idr_lock
, flags
);
899 idr_for_each_entry(&glink
->rcids
, channel
, cid
) {
900 if (!strcmp(channel
->name
, name
))
903 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
906 channel
= qcom_glink_create_local(glink
, name
);
910 ret
= qcom_glink_create_remote(glink
, channel
);
919 ept
->ops
= &glink_endpoint_ops
;
924 static void qcom_glink_destroy_ept(struct rpmsg_endpoint
*ept
)
926 struct glink_channel
*channel
= to_glink_channel(ept
);
927 struct qcom_glink
*glink
= channel
->glink
;
930 spin_lock_irqsave(&channel
->recv_lock
, flags
);
931 channel
->ept
.cb
= NULL
;
932 spin_unlock_irqrestore(&channel
->recv_lock
, flags
);
934 /* Decouple the potential rpdev from the channel */
935 channel
->rpdev
= NULL
;
937 qcom_glink_send_close_req(glink
, channel
);
940 static int __qcom_glink_send(struct glink_channel
*channel
,
941 void *data
, int len
, bool wait
)
943 struct qcom_glink
*glink
= channel
->glink
;
945 struct glink_msg msg
;
950 req
.msg
.cmd
= cpu_to_le16(RPM_CMD_TX_DATA
);
951 req
.msg
.param1
= cpu_to_le16(channel
->lcid
);
952 req
.msg
.param2
= cpu_to_le32(channel
->rcid
);
953 req
.chunk_size
= cpu_to_le32(len
);
954 req
.left_size
= cpu_to_le32(0);
956 return qcom_glink_tx(glink
, &req
, sizeof(req
), data
, len
, wait
);
959 static int qcom_glink_send(struct rpmsg_endpoint
*ept
, void *data
, int len
)
961 struct glink_channel
*channel
= to_glink_channel(ept
);
963 return __qcom_glink_send(channel
, data
, len
, true);
966 static int qcom_glink_trysend(struct rpmsg_endpoint
*ept
, void *data
, int len
)
968 struct glink_channel
*channel
= to_glink_channel(ept
);
970 return __qcom_glink_send(channel
, data
, len
, false);
974 * Finds the device_node for the glink child interested in this channel.
976 static struct device_node
*qcom_glink_match_channel(struct device_node
*node
,
979 struct device_node
*child
;
984 for_each_available_child_of_node(node
, child
) {
985 key
= "qcom,glink-channels";
986 ret
= of_property_read_string(child
, key
, &name
);
990 if (strcmp(name
, channel
) == 0)
997 static const struct rpmsg_device_ops glink_device_ops
= {
998 .create_ept
= qcom_glink_create_ept
,
1001 static const struct rpmsg_endpoint_ops glink_endpoint_ops
= {
1002 .destroy_ept
= qcom_glink_destroy_ept
,
1003 .send
= qcom_glink_send
,
1004 .trysend
= qcom_glink_trysend
,
1007 static void qcom_glink_rpdev_release(struct device
*dev
)
1009 struct rpmsg_device
*rpdev
= to_rpmsg_device(dev
);
1010 struct glink_channel
*channel
= to_glink_channel(rpdev
->ept
);
1012 channel
->rpdev
= NULL
;
1016 static int qcom_glink_rx_open(struct qcom_glink
*glink
, unsigned int rcid
,
1019 struct glink_channel
*channel
;
1020 struct rpmsg_device
*rpdev
;
1021 bool create_device
= false;
1022 struct device_node
*node
;
1025 unsigned long flags
;
1027 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1028 idr_for_each_entry(&glink
->lcids
, channel
, lcid
) {
1029 if (!strcmp(channel
->name
, name
))
1032 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1035 channel
= qcom_glink_alloc_channel(glink
, name
);
1036 if (IS_ERR(channel
))
1037 return PTR_ERR(channel
);
1039 /* The opening dance was initiated by the remote */
1040 create_device
= true;
1043 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1044 ret
= idr_alloc(&glink
->rcids
, channel
, rcid
, rcid
+ 1, GFP_ATOMIC
);
1046 dev_err(glink
->dev
, "Unable to insert channel into rcid list\n");
1047 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1050 channel
->rcid
= ret
;
1051 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1053 complete(&channel
->open_req
);
1055 if (create_device
) {
1056 rpdev
= kzalloc(sizeof(*rpdev
), GFP_KERNEL
);
1062 rpdev
->ept
= &channel
->ept
;
1063 strncpy(rpdev
->id
.name
, name
, RPMSG_NAME_SIZE
);
1064 rpdev
->src
= RPMSG_ADDR_ANY
;
1065 rpdev
->dst
= RPMSG_ADDR_ANY
;
1066 rpdev
->ops
= &glink_device_ops
;
1068 node
= qcom_glink_match_channel(glink
->dev
->of_node
, name
);
1069 rpdev
->dev
.of_node
= node
;
1070 rpdev
->dev
.parent
= glink
->dev
;
1071 rpdev
->dev
.release
= qcom_glink_rpdev_release
;
1073 ret
= rpmsg_register_device(rpdev
);
1077 channel
->rpdev
= rpdev
;
1085 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1086 idr_remove(&glink
->rcids
, channel
->rcid
);
1088 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1090 /* Release the reference, iff we took it */
1092 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1097 static void qcom_glink_rx_close(struct qcom_glink
*glink
, unsigned int rcid
)
1099 struct rpmsg_channel_info chinfo
;
1100 struct glink_channel
*channel
;
1101 unsigned long flags
;
1103 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1104 channel
= idr_find(&glink
->rcids
, rcid
);
1105 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1106 if (WARN(!channel
, "close request on unknown channel\n"))
1109 if (channel
->rpdev
) {
1110 strncpy(chinfo
.name
, channel
->name
, sizeof(chinfo
.name
));
1111 chinfo
.src
= RPMSG_ADDR_ANY
;
1112 chinfo
.dst
= RPMSG_ADDR_ANY
;
1114 rpmsg_unregister_device(glink
->dev
, &chinfo
);
1117 qcom_glink_send_close_ack(glink
, channel
->rcid
);
1119 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1120 idr_remove(&glink
->rcids
, channel
->rcid
);
1122 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1124 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1127 static void qcom_glink_rx_close_ack(struct qcom_glink
*glink
, unsigned int lcid
)
1129 struct glink_channel
*channel
;
1130 unsigned long flags
;
1132 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1133 channel
= idr_find(&glink
->lcids
, lcid
);
1134 if (WARN(!channel
, "close ack on unknown channel\n")) {
1135 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1139 idr_remove(&glink
->lcids
, channel
->lcid
);
1141 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1143 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1146 static void qcom_glink_work(struct work_struct
*work
)
1148 struct qcom_glink
*glink
= container_of(work
, struct qcom_glink
,
1150 struct glink_defer_cmd
*dcmd
;
1151 struct glink_msg
*msg
;
1152 unsigned long flags
;
1153 unsigned int param1
;
1154 unsigned int param2
;
1158 spin_lock_irqsave(&glink
->rx_lock
, flags
);
1159 if (list_empty(&glink
->rx_queue
)) {
1160 spin_unlock_irqrestore(&glink
->rx_lock
, flags
);
1163 dcmd
= list_first_entry(&glink
->rx_queue
,
1164 struct glink_defer_cmd
, node
);
1165 list_del(&dcmd
->node
);
1166 spin_unlock_irqrestore(&glink
->rx_lock
, flags
);
1169 cmd
= le16_to_cpu(msg
->cmd
);
1170 param1
= le16_to_cpu(msg
->param1
);
1171 param2
= le32_to_cpu(msg
->param2
);
1174 case RPM_CMD_VERSION
:
1175 qcom_glink_receive_version(glink
, param1
, param2
);
1177 case RPM_CMD_VERSION_ACK
:
1178 qcom_glink_receive_version_ack(glink
, param1
, param2
);
1181 qcom_glink_rx_open(glink
, param1
, msg
->data
);
1184 qcom_glink_rx_close(glink
, param1
);
1186 case RPM_CMD_CLOSE_ACK
:
1187 qcom_glink_rx_close_ack(glink
, param1
);
1189 case RPM_CMD_RX_INTENT_REQ
:
1190 qcom_glink_handle_intent_req(glink
, param1
, param2
);
1193 WARN(1, "Unknown defer object %d\n", cmd
);
1201 struct qcom_glink
*qcom_glink_native_probe(struct device
*dev
,
1202 unsigned long features
,
1203 struct qcom_glink_pipe
*rx
,
1204 struct qcom_glink_pipe
*tx
,
1209 struct qcom_glink
*glink
;
1211 glink
= devm_kzalloc(dev
, sizeof(*glink
), GFP_KERNEL
);
1213 return ERR_PTR(-ENOMEM
);
1216 glink
->tx_pipe
= tx
;
1217 glink
->rx_pipe
= rx
;
1219 glink
->features
= features
;
1220 glink
->intentless
= intentless
;
1222 mutex_init(&glink
->tx_lock
);
1223 spin_lock_init(&glink
->rx_lock
);
1224 INIT_LIST_HEAD(&glink
->rx_queue
);
1225 INIT_WORK(&glink
->rx_work
, qcom_glink_work
);
1227 spin_lock_init(&glink
->idr_lock
);
1228 idr_init(&glink
->lcids
);
1229 idr_init(&glink
->rcids
);
1231 glink
->mbox_client
.dev
= dev
;
1232 glink
->mbox_chan
= mbox_request_channel(&glink
->mbox_client
, 0);
1233 if (IS_ERR(glink
->mbox_chan
)) {
1234 if (PTR_ERR(glink
->mbox_chan
) != -EPROBE_DEFER
)
1235 dev_err(dev
, "failed to acquire IPC channel\n");
1236 return ERR_CAST(glink
->mbox_chan
);
1239 irq
= of_irq_get(dev
->of_node
, 0);
1240 ret
= devm_request_irq(dev
, irq
,
1241 qcom_glink_native_intr
,
1242 IRQF_NO_SUSPEND
| IRQF_SHARED
,
1243 "glink-native", glink
);
1245 dev_err(dev
, "failed to request IRQ\n");
1246 return ERR_PTR(ret
);
1251 ret
= qcom_glink_send_version(glink
);
1253 return ERR_PTR(ret
);
1258 static int qcom_glink_remove_device(struct device
*dev
, void *data
)
1260 device_unregister(dev
);
1265 void qcom_glink_native_remove(struct qcom_glink
*glink
)
1267 struct glink_channel
*channel
;
1270 unsigned long flags
;
1272 disable_irq(glink
->irq
);
1273 cancel_work_sync(&glink
->rx_work
);
1275 ret
= device_for_each_child(glink
->dev
, NULL
, qcom_glink_remove_device
);
1277 dev_warn(glink
->dev
, "Can't remove GLINK devices: %d\n", ret
);
1279 spin_lock_irqsave(&glink
->idr_lock
, flags
);
1280 /* Release any defunct local channels, waiting for close-ack */
1281 idr_for_each_entry(&glink
->lcids
, channel
, cid
)
1282 kref_put(&channel
->refcount
, qcom_glink_channel_release
);
1284 idr_destroy(&glink
->lcids
);
1285 idr_destroy(&glink
->rcids
);
1286 spin_unlock_irqrestore(&glink
->idr_lock
, flags
);
1287 mbox_free_channel(glink
->mbox_chan
);
1290 void qcom_glink_native_unregister(struct qcom_glink
*glink
)
1292 device_unregister(glink
->dev
);