]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/rpmsg/qcom_glink_native.c
rpmsg: glink: Allow unaligned data access
[mirror_ubuntu-jammy-kernel.git] / drivers / rpmsg / qcom_glink_native.c
CommitLineData
835764dd
BA
1/*
2 * Copyright (c) 2016-2017, Linaro Ltd
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/idr.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/list.h>
18#include <linux/mfd/syscon.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23#include <linux/platform_device.h>
24#include <linux/regmap.h>
25#include <linux/rpmsg.h>
26#include <linux/slab.h>
27#include <linux/workqueue.h>
28#include <linux/mailbox_client.h>
29
30#include "rpmsg_internal.h"
31#include "qcom_glink_native.h"
32
33#define GLINK_NAME_SIZE 32
34
35#define RPM_GLINK_CID_MIN 1
36#define RPM_GLINK_CID_MAX 65536
37
38struct glink_msg {
39 __le16 cmd;
40 __le16 param1;
41 __le32 param2;
42 u8 data[];
43} __packed;
44
45/**
46 * struct glink_defer_cmd - deferred incoming control message
47 * @node: list node
48 * @msg: message header
49 * data: payload of the message
50 *
51 * Copy of a received control message, to be added to @rx_queue and processed
52 * by @rx_work of @qcom_glink.
53 */
54struct glink_defer_cmd {
55 struct list_head node;
56
57 struct glink_msg msg;
58 u8 data[];
59};
60
61/**
62 * struct qcom_glink - driver context, relates to one remote subsystem
63 * @dev: reference to the associated struct device
64 * @mbox_client: mailbox client
65 * @mbox_chan: mailbox channel
66 * @rx_pipe: pipe object for receive FIFO
67 * @tx_pipe: pipe object for transmit FIFO
68 * @irq: IRQ for signaling incoming events
69 * @rx_work: worker for handling received control messages
70 * @rx_lock: protects the @rx_queue
71 * @rx_queue: queue of received control messages to be processed in @rx_work
72 * @tx_lock: synchronizes operations on the tx fifo
73 * @idr_lock: synchronizes @lcids and @rcids modifications
74 * @lcids: idr of all channels with a known local channel id
75 * @rcids: idr of all channels with a known remote channel id
76 */
77struct qcom_glink {
78 struct device *dev;
79
80 struct mbox_client mbox_client;
81 struct mbox_chan *mbox_chan;
82
83 struct qcom_glink_pipe *rx_pipe;
84 struct qcom_glink_pipe *tx_pipe;
85
86 int irq;
87
88 struct work_struct rx_work;
89 spinlock_t rx_lock;
90 struct list_head rx_queue;
91
92 struct mutex tx_lock;
93
94 struct mutex idr_lock;
95 struct idr lcids;
96 struct idr rcids;
97};
98
99enum {
100 GLINK_STATE_CLOSED,
101 GLINK_STATE_OPENING,
102 GLINK_STATE_OPEN,
103 GLINK_STATE_CLOSING,
104};
105
106/**
107 * struct glink_channel - internal representation of a channel
108 * @rpdev: rpdev reference, only used for primary endpoints
109 * @ept: rpmsg endpoint this channel is associated with
110 * @glink: qcom_glink context handle
111 * @refcount: refcount for the channel object
112 * @recv_lock: guard for @ept.cb
113 * @name: unique channel name/identifier
114 * @lcid: channel id, in local space
115 * @rcid: channel id, in remote space
116 * @buf: receive buffer, for gathering fragments
117 * @buf_offset: write offset in @buf
118 * @buf_size: size of current @buf
119 * @open_ack: completed once remote has acked the open-request
120 * @open_req: completed once open-request has been received
121 */
122struct glink_channel {
123 struct rpmsg_endpoint ept;
124
125 struct rpmsg_device *rpdev;
126 struct qcom_glink *glink;
127
128 struct kref refcount;
129
130 spinlock_t recv_lock;
131
132 char *name;
133 unsigned int lcid;
134 unsigned int rcid;
135
136 void *buf;
137 int buf_offset;
138 int buf_size;
139
140 struct completion open_ack;
141 struct completion open_req;
142};
143
144#define to_glink_channel(_ept) container_of(_ept, struct glink_channel, ept)
145
146static const struct rpmsg_endpoint_ops glink_endpoint_ops;
147
148#define RPM_CMD_VERSION 0
149#define RPM_CMD_VERSION_ACK 1
150#define RPM_CMD_OPEN 2
151#define RPM_CMD_CLOSE 3
152#define RPM_CMD_OPEN_ACK 4
153#define RPM_CMD_TX_DATA 9
154#define RPM_CMD_CLOSE_ACK 11
155#define RPM_CMD_TX_DATA_CONT 12
156#define RPM_CMD_READ_NOTIF 13
157
158#define GLINK_FEATURE_INTENTLESS BIT(1)
159
160static struct glink_channel *qcom_glink_alloc_channel(struct qcom_glink *glink,
161 const char *name)
162{
163 struct glink_channel *channel;
164
165 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
166 if (!channel)
167 return ERR_PTR(-ENOMEM);
168
169 /* Setup glink internal glink_channel data */
170 spin_lock_init(&channel->recv_lock);
171 channel->glink = glink;
172 channel->name = kstrdup(name, GFP_KERNEL);
173
174 init_completion(&channel->open_req);
175 init_completion(&channel->open_ack);
176
177 kref_init(&channel->refcount);
178
179 return channel;
180}
181
182static void qcom_glink_channel_release(struct kref *ref)
183{
184 struct glink_channel *channel = container_of(ref, struct glink_channel,
185 refcount);
186
187 kfree(channel->name);
188 kfree(channel);
189}
190
191static size_t qcom_glink_rx_avail(struct qcom_glink *glink)
192{
193 return glink->rx_pipe->avail(glink->rx_pipe);
194}
195
196static void qcom_glink_rx_peak(struct qcom_glink *glink,
197 void *data, size_t count)
198{
199 glink->rx_pipe->peak(glink->rx_pipe, data, count);
200}
201
202static void qcom_glink_rx_advance(struct qcom_glink *glink, size_t count)
203{
204 glink->rx_pipe->advance(glink->rx_pipe, count);
205}
206
207static size_t qcom_glink_tx_avail(struct qcom_glink *glink)
208{
209 return glink->tx_pipe->avail(glink->tx_pipe);
210}
211
212static void qcom_glink_tx_write(struct qcom_glink *glink,
213 const void *hdr, size_t hlen,
214 const void *data, size_t dlen)
215{
216 glink->tx_pipe->write(glink->tx_pipe, hdr, hlen, data, dlen);
217}
218
219static int qcom_glink_tx(struct qcom_glink *glink,
220 const void *hdr, size_t hlen,
221 const void *data, size_t dlen, bool wait)
222{
223 unsigned int tlen = hlen + dlen;
224 int ret;
225
226 /* Reject packets that are too big */
227 if (tlen >= glink->tx_pipe->length)
228 return -EINVAL;
229
835764dd
BA
230 ret = mutex_lock_interruptible(&glink->tx_lock);
231 if (ret)
232 return ret;
233
234 while (qcom_glink_tx_avail(glink) < tlen) {
235 if (!wait) {
236 ret = -ENOMEM;
237 goto out;
238 }
239
240 usleep_range(10000, 15000);
241 }
242
243 qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
244
245 mbox_send_message(glink->mbox_chan, NULL);
246 mbox_client_txdone(glink->mbox_chan, 0);
247
248out:
249 mutex_unlock(&glink->tx_lock);
250
251 return ret;
252}
253
254static int qcom_glink_send_version(struct qcom_glink *glink)
255{
256 struct glink_msg msg;
257
258 msg.cmd = cpu_to_le16(RPM_CMD_VERSION);
259 msg.param1 = cpu_to_le16(1);
260 msg.param2 = cpu_to_le32(GLINK_FEATURE_INTENTLESS);
261
262 return qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
263}
264
265static void qcom_glink_send_version_ack(struct qcom_glink *glink)
266{
267 struct glink_msg msg;
268
269 msg.cmd = cpu_to_le16(RPM_CMD_VERSION_ACK);
270 msg.param1 = cpu_to_le16(1);
271 msg.param2 = cpu_to_le32(0);
272
273 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
274}
275
276static void qcom_glink_send_open_ack(struct qcom_glink *glink,
277 struct glink_channel *channel)
278{
279 struct glink_msg msg;
280
281 msg.cmd = cpu_to_le16(RPM_CMD_OPEN_ACK);
282 msg.param1 = cpu_to_le16(channel->rcid);
283 msg.param2 = cpu_to_le32(0);
284
285 qcom_glink_tx(glink, &msg, sizeof(msg), NULL, 0, true);
286}
287
288/**
289 * qcom_glink_send_open_req() - send a RPM_CMD_OPEN request to the remote
290 * @glink: Ptr to the glink edge
291 * @channel: Ptr to the channel that the open req is sent
292 *
293 * Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
294 * Will return with refcount held, regardless of outcome.
295 *
296 * Returns 0 on success, negative errno otherwise.
297 */
298static int qcom_glink_send_open_req(struct qcom_glink *glink,
299 struct glink_channel *channel)
300{
301 struct {
302 struct glink_msg msg;
303 u8 name[GLINK_NAME_SIZE];
304 } __packed req;
305 int name_len = strlen(channel->name) + 1;
306 int req_len = ALIGN(sizeof(req.msg) + name_len, 8);
307 int ret;
308
309 kref_get(&channel->refcount);
310
311 mutex_lock(&glink->idr_lock);
312 ret = idr_alloc_cyclic(&glink->lcids, channel,
313 RPM_GLINK_CID_MIN, RPM_GLINK_CID_MAX,
314 GFP_KERNEL);
315 mutex_unlock(&glink->idr_lock);
316 if (ret < 0)
317 return ret;
318
319 channel->lcid = ret;
320
321 req.msg.cmd = cpu_to_le16(RPM_CMD_OPEN);
322 req.msg.param1 = cpu_to_le16(channel->lcid);
323 req.msg.param2 = cpu_to_le32(name_len);
324 strcpy(req.name, channel->name);
325
326 ret = qcom_glink_tx(glink, &req, req_len, NULL, 0, true);
327 if (ret)
328 goto remove_idr;
329
330 return 0;
331
332remove_idr:
333 mutex_lock(&glink->idr_lock);
334 idr_remove(&glink->lcids, channel->lcid);
335 channel->lcid = 0;
336 mutex_unlock(&glink->idr_lock);
337
338 return ret;
339}
340
341static void qcom_glink_send_close_req(struct qcom_glink *glink,
342 struct glink_channel *channel)
343{
344 struct glink_msg req;
345
346 req.cmd = cpu_to_le16(RPM_CMD_CLOSE);
347 req.param1 = cpu_to_le16(channel->lcid);
348 req.param2 = 0;
349
350 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
351}
352
353static void qcom_glink_send_close_ack(struct qcom_glink *glink,
354 unsigned int rcid)
355{
356 struct glink_msg req;
357
358 req.cmd = cpu_to_le16(RPM_CMD_CLOSE_ACK);
359 req.param1 = cpu_to_le16(rcid);
360 req.param2 = 0;
361
362 qcom_glink_tx(glink, &req, sizeof(req), NULL, 0, true);
363}
364
365static int qcom_glink_rx_defer(struct qcom_glink *glink, size_t extra)
366{
367 struct glink_defer_cmd *dcmd;
368
369 extra = ALIGN(extra, 8);
370
371 if (qcom_glink_rx_avail(glink) < sizeof(struct glink_msg) + extra) {
372 dev_dbg(glink->dev, "Insufficient data in rx fifo");
373 return -ENXIO;
374 }
375
376 dcmd = kzalloc(sizeof(*dcmd) + extra, GFP_ATOMIC);
377 if (!dcmd)
378 return -ENOMEM;
379
380 INIT_LIST_HEAD(&dcmd->node);
381
382 qcom_glink_rx_peak(glink, &dcmd->msg, sizeof(dcmd->msg) + extra);
383
384 spin_lock(&glink->rx_lock);
385 list_add_tail(&dcmd->node, &glink->rx_queue);
386 spin_unlock(&glink->rx_lock);
387
388 schedule_work(&glink->rx_work);
389 qcom_glink_rx_advance(glink, sizeof(dcmd->msg) + extra);
390
391 return 0;
392}
393
394static int qcom_glink_rx_data(struct qcom_glink *glink, size_t avail)
395{
396 struct glink_channel *channel;
397 struct {
398 struct glink_msg msg;
399 __le32 chunk_size;
400 __le32 left_size;
401 } __packed hdr;
402 unsigned int chunk_size;
403 unsigned int left_size;
404 unsigned int rcid;
405
406 if (avail < sizeof(hdr)) {
407 dev_dbg(glink->dev, "Not enough data in fifo\n");
408 return -EAGAIN;
409 }
410
411 qcom_glink_rx_peak(glink, &hdr, sizeof(hdr));
412 chunk_size = le32_to_cpu(hdr.chunk_size);
413 left_size = le32_to_cpu(hdr.left_size);
414
415 if (avail < sizeof(hdr) + chunk_size) {
416 dev_dbg(glink->dev, "Payload not yet in fifo\n");
417 return -EAGAIN;
418 }
419
420 if (WARN(chunk_size % 4, "Incoming data must be word aligned\n"))
421 return -EINVAL;
422
423 rcid = le16_to_cpu(hdr.msg.param1);
424 channel = idr_find(&glink->rcids, rcid);
425 if (!channel) {
426 dev_dbg(glink->dev, "Data on non-existing channel\n");
427
428 /* Drop the message */
429 qcom_glink_rx_advance(glink,
430 ALIGN(sizeof(hdr) + chunk_size, 8));
431 return 0;
432 }
433
434 /* Might have an ongoing, fragmented, message to append */
435 if (!channel->buf) {
436 channel->buf = kmalloc(chunk_size + left_size, GFP_ATOMIC);
437 if (!channel->buf)
438 return -ENOMEM;
439
440 channel->buf_size = chunk_size + left_size;
441 channel->buf_offset = 0;
442 }
443
444 qcom_glink_rx_advance(glink, sizeof(hdr));
445
446 if (channel->buf_size - channel->buf_offset < chunk_size) {
447 dev_err(glink->dev, "Insufficient space in input buffer\n");
448
449 /* The packet header lied, drop payload */
450 qcom_glink_rx_advance(glink, chunk_size);
451 return -ENOMEM;
452 }
453
454 qcom_glink_rx_peak(glink, channel->buf + channel->buf_offset,
455 chunk_size);
456 channel->buf_offset += chunk_size;
457
458 /* Handle message when no fragments remain to be received */
459 if (!left_size) {
460 spin_lock(&channel->recv_lock);
461 if (channel->ept.cb) {
462 channel->ept.cb(channel->ept.rpdev,
463 channel->buf,
464 channel->buf_offset,
465 channel->ept.priv,
466 RPMSG_ADDR_ANY);
467 }
468 spin_unlock(&channel->recv_lock);
469
470 kfree(channel->buf);
471 channel->buf = NULL;
472 channel->buf_size = 0;
473 }
474
475 /* Each message starts at 8 byte aligned address */
476 qcom_glink_rx_advance(glink, ALIGN(chunk_size, 8));
477
478 return 0;
479}
480
481static int qcom_glink_rx_open_ack(struct qcom_glink *glink, unsigned int lcid)
482{
483 struct glink_channel *channel;
484
485 channel = idr_find(&glink->lcids, lcid);
486 if (!channel) {
487 dev_err(glink->dev, "Invalid open ack packet\n");
488 return -EINVAL;
489 }
490
491 complete(&channel->open_ack);
492
493 return 0;
494}
495
496static irqreturn_t qcom_glink_native_intr(int irq, void *data)
497{
498 struct qcom_glink *glink = data;
499 struct glink_msg msg;
500 unsigned int param1;
501 unsigned int param2;
502 unsigned int avail;
503 unsigned int cmd;
504 int ret;
505
506 for (;;) {
507 avail = qcom_glink_rx_avail(glink);
508 if (avail < sizeof(msg))
509 break;
510
511 qcom_glink_rx_peak(glink, &msg, sizeof(msg));
512
513 cmd = le16_to_cpu(msg.cmd);
514 param1 = le16_to_cpu(msg.param1);
515 param2 = le32_to_cpu(msg.param2);
516
517 switch (cmd) {
518 case RPM_CMD_VERSION:
519 case RPM_CMD_VERSION_ACK:
520 case RPM_CMD_CLOSE:
521 case RPM_CMD_CLOSE_ACK:
522 ret = qcom_glink_rx_defer(glink, 0);
523 break;
524 case RPM_CMD_OPEN_ACK:
525 ret = qcom_glink_rx_open_ack(glink, param1);
526 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
527 break;
528 case RPM_CMD_OPEN:
529 ret = qcom_glink_rx_defer(glink, param2);
530 break;
531 case RPM_CMD_TX_DATA:
532 case RPM_CMD_TX_DATA_CONT:
533 ret = qcom_glink_rx_data(glink, avail);
534 break;
535 case RPM_CMD_READ_NOTIF:
536 qcom_glink_rx_advance(glink, ALIGN(sizeof(msg), 8));
537
538 mbox_send_message(glink->mbox_chan, NULL);
539 mbox_client_txdone(glink->mbox_chan, 0);
540
541 ret = 0;
542 break;
543 default:
544 dev_err(glink->dev, "unhandled rx cmd: %d\n", cmd);
545 ret = -EINVAL;
546 break;
547 }
548
549 if (ret)
550 break;
551 }
552
553 return IRQ_HANDLED;
554}
555
556/* Locally initiated rpmsg_create_ept */
557static struct glink_channel *qcom_glink_create_local(struct qcom_glink *glink,
558 const char *name)
559{
560 struct glink_channel *channel;
561 int ret;
562
563 channel = qcom_glink_alloc_channel(glink, name);
564 if (IS_ERR(channel))
565 return ERR_CAST(channel);
566
567 ret = qcom_glink_send_open_req(glink, channel);
568 if (ret)
569 goto release_channel;
570
571 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
572 if (!ret)
573 goto err_timeout;
574
575 ret = wait_for_completion_timeout(&channel->open_req, 5 * HZ);
576 if (!ret)
577 goto err_timeout;
578
579 qcom_glink_send_open_ack(glink, channel);
580
581 return channel;
582
583err_timeout:
584 /* qcom_glink_send_open_req() did register the channel in lcids*/
585 mutex_lock(&glink->idr_lock);
586 idr_remove(&glink->lcids, channel->lcid);
587 mutex_unlock(&glink->idr_lock);
588
589release_channel:
590 /* Release qcom_glink_send_open_req() reference */
591 kref_put(&channel->refcount, qcom_glink_channel_release);
592 /* Release qcom_glink_alloc_channel() reference */
593 kref_put(&channel->refcount, qcom_glink_channel_release);
594
595 return ERR_PTR(-ETIMEDOUT);
596}
597
598/* Remote initiated rpmsg_create_ept */
599static int qcom_glink_create_remote(struct qcom_glink *glink,
600 struct glink_channel *channel)
601{
602 int ret;
603
604 qcom_glink_send_open_ack(glink, channel);
605
606 ret = qcom_glink_send_open_req(glink, channel);
607 if (ret)
608 goto close_link;
609
610 ret = wait_for_completion_timeout(&channel->open_ack, 5 * HZ);
611 if (!ret) {
612 ret = -ETIMEDOUT;
613 goto close_link;
614 }
615
616 return 0;
617
618close_link:
619 /*
620 * Send a close request to "undo" our open-ack. The close-ack will
621 * release the last reference.
622 */
623 qcom_glink_send_close_req(glink, channel);
624
625 /* Release qcom_glink_send_open_req() reference */
626 kref_put(&channel->refcount, qcom_glink_channel_release);
627
628 return ret;
629}
630
631static struct rpmsg_endpoint *qcom_glink_create_ept(struct rpmsg_device *rpdev,
632 rpmsg_rx_cb_t cb,
633 void *priv,
634 struct rpmsg_channel_info
635 chinfo)
636{
637 struct glink_channel *parent = to_glink_channel(rpdev->ept);
638 struct glink_channel *channel;
639 struct qcom_glink *glink = parent->glink;
640 struct rpmsg_endpoint *ept;
641 const char *name = chinfo.name;
642 int cid;
643 int ret;
644
645 idr_for_each_entry(&glink->rcids, channel, cid) {
646 if (!strcmp(channel->name, name))
647 break;
648 }
649
650 if (!channel) {
651 channel = qcom_glink_create_local(glink, name);
652 if (IS_ERR(channel))
653 return NULL;
654 } else {
655 ret = qcom_glink_create_remote(glink, channel);
656 if (ret)
657 return NULL;
658 }
659
660 ept = &channel->ept;
661 ept->rpdev = rpdev;
662 ept->cb = cb;
663 ept->priv = priv;
664 ept->ops = &glink_endpoint_ops;
665
666 return ept;
667}
668
669static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
670{
671 struct glink_channel *channel = to_glink_channel(ept);
672 struct qcom_glink *glink = channel->glink;
673 unsigned long flags;
674
675 spin_lock_irqsave(&channel->recv_lock, flags);
676 channel->ept.cb = NULL;
677 spin_unlock_irqrestore(&channel->recv_lock, flags);
678
679 /* Decouple the potential rpdev from the channel */
680 channel->rpdev = NULL;
681
682 qcom_glink_send_close_req(glink, channel);
683}
684
685static int __qcom_glink_send(struct glink_channel *channel,
686 void *data, int len, bool wait)
687{
688 struct qcom_glink *glink = channel->glink;
689 struct {
690 struct glink_msg msg;
691 __le32 chunk_size;
692 __le32 left_size;
693 } __packed req;
694
835764dd
BA
695 req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
696 req.msg.param1 = cpu_to_le16(channel->lcid);
697 req.msg.param2 = cpu_to_le32(channel->rcid);
698 req.chunk_size = cpu_to_le32(len);
699 req.left_size = cpu_to_le32(0);
700
701 return qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
702}
703
704static int qcom_glink_send(struct rpmsg_endpoint *ept, void *data, int len)
705{
706 struct glink_channel *channel = to_glink_channel(ept);
707
708 return __qcom_glink_send(channel, data, len, true);
709}
710
711static int qcom_glink_trysend(struct rpmsg_endpoint *ept, void *data, int len)
712{
713 struct glink_channel *channel = to_glink_channel(ept);
714
715 return __qcom_glink_send(channel, data, len, false);
716}
717
718/*
719 * Finds the device_node for the glink child interested in this channel.
720 */
721static struct device_node *qcom_glink_match_channel(struct device_node *node,
722 const char *channel)
723{
724 struct device_node *child;
725 const char *name;
726 const char *key;
727 int ret;
728
729 for_each_available_child_of_node(node, child) {
730 key = "qcom,glink-channels";
731 ret = of_property_read_string(child, key, &name);
732 if (ret)
733 continue;
734
735 if (strcmp(name, channel) == 0)
736 return child;
737 }
738
739 return NULL;
740}
741
742static const struct rpmsg_device_ops glink_device_ops = {
743 .create_ept = qcom_glink_create_ept,
744};
745
746static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
747 .destroy_ept = qcom_glink_destroy_ept,
748 .send = qcom_glink_send,
749 .trysend = qcom_glink_trysend,
750};
751
752static void qcom_glink_rpdev_release(struct device *dev)
753{
754 struct rpmsg_device *rpdev = to_rpmsg_device(dev);
755 struct glink_channel *channel = to_glink_channel(rpdev->ept);
756
757 channel->rpdev = NULL;
758 kfree(rpdev);
759}
760
761static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
762 char *name)
763{
764 struct glink_channel *channel;
765 struct rpmsg_device *rpdev;
766 bool create_device = false;
767 struct device_node *node;
768 int lcid;
769 int ret;
770
771 idr_for_each_entry(&glink->lcids, channel, lcid) {
772 if (!strcmp(channel->name, name))
773 break;
774 }
775
776 if (!channel) {
777 channel = qcom_glink_alloc_channel(glink, name);
778 if (IS_ERR(channel))
779 return PTR_ERR(channel);
780
781 /* The opening dance was initiated by the remote */
782 create_device = true;
783 }
784
785 mutex_lock(&glink->idr_lock);
786 ret = idr_alloc(&glink->rcids, channel, rcid, rcid + 1, GFP_KERNEL);
787 if (ret < 0) {
788 dev_err(glink->dev, "Unable to insert channel into rcid list\n");
789 mutex_unlock(&glink->idr_lock);
790 goto free_channel;
791 }
792 channel->rcid = ret;
793 mutex_unlock(&glink->idr_lock);
794
795 complete(&channel->open_req);
796
797 if (create_device) {
798 rpdev = kzalloc(sizeof(*rpdev), GFP_KERNEL);
799 if (!rpdev) {
800 ret = -ENOMEM;
801 goto rcid_remove;
802 }
803
804 rpdev->ept = &channel->ept;
805 strncpy(rpdev->id.name, name, RPMSG_NAME_SIZE);
806 rpdev->src = RPMSG_ADDR_ANY;
807 rpdev->dst = RPMSG_ADDR_ANY;
808 rpdev->ops = &glink_device_ops;
809
810 node = qcom_glink_match_channel(glink->dev->of_node, name);
811 rpdev->dev.of_node = node;
812 rpdev->dev.parent = glink->dev;
813 rpdev->dev.release = qcom_glink_rpdev_release;
814
815 ret = rpmsg_register_device(rpdev);
816 if (ret)
817 goto free_rpdev;
818
819 channel->rpdev = rpdev;
820 }
821
822 return 0;
823
824free_rpdev:
825 kfree(rpdev);
826rcid_remove:
827 mutex_lock(&glink->idr_lock);
828 idr_remove(&glink->rcids, channel->rcid);
829 channel->rcid = 0;
830 mutex_unlock(&glink->idr_lock);
831free_channel:
832 /* Release the reference, iff we took it */
833 if (create_device)
834 kref_put(&channel->refcount, qcom_glink_channel_release);
835
836 return ret;
837}
838
839static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
840{
841 struct rpmsg_channel_info chinfo;
842 struct glink_channel *channel;
843
844 channel = idr_find(&glink->rcids, rcid);
845 if (WARN(!channel, "close request on unknown channel\n"))
846 return;
847
848 if (channel->rpdev) {
849 strncpy(chinfo.name, channel->name, sizeof(chinfo.name));
850 chinfo.src = RPMSG_ADDR_ANY;
851 chinfo.dst = RPMSG_ADDR_ANY;
852
853 rpmsg_unregister_device(glink->dev, &chinfo);
854 }
855
856 qcom_glink_send_close_ack(glink, channel->rcid);
857
858 mutex_lock(&glink->idr_lock);
859 idr_remove(&glink->rcids, channel->rcid);
860 channel->rcid = 0;
861 mutex_unlock(&glink->idr_lock);
862
863 kref_put(&channel->refcount, qcom_glink_channel_release);
864}
865
866static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
867{
868 struct glink_channel *channel;
869
870 channel = idr_find(&glink->lcids, lcid);
871 if (WARN(!channel, "close ack on unknown channel\n"))
872 return;
873
874 mutex_lock(&glink->idr_lock);
875 idr_remove(&glink->lcids, channel->lcid);
876 channel->lcid = 0;
877 mutex_unlock(&glink->idr_lock);
878
879 kref_put(&channel->refcount, qcom_glink_channel_release);
880}
881
882static void qcom_glink_work(struct work_struct *work)
883{
884 struct qcom_glink *glink = container_of(work, struct qcom_glink,
885 rx_work);
886 struct glink_defer_cmd *dcmd;
887 struct glink_msg *msg;
888 unsigned long flags;
889 unsigned int param1;
890 unsigned int param2;
891 unsigned int cmd;
892
893 for (;;) {
894 spin_lock_irqsave(&glink->rx_lock, flags);
895 if (list_empty(&glink->rx_queue)) {
896 spin_unlock_irqrestore(&glink->rx_lock, flags);
897 break;
898 }
899 dcmd = list_first_entry(&glink->rx_queue,
900 struct glink_defer_cmd, node);
901 list_del(&dcmd->node);
902 spin_unlock_irqrestore(&glink->rx_lock, flags);
903
904 msg = &dcmd->msg;
905 cmd = le16_to_cpu(msg->cmd);
906 param1 = le16_to_cpu(msg->param1);
907 param2 = le32_to_cpu(msg->param2);
908
909 switch (cmd) {
910 case RPM_CMD_VERSION:
911 qcom_glink_send_version_ack(glink);
912 break;
913 case RPM_CMD_VERSION_ACK:
914 break;
915 case RPM_CMD_OPEN:
916 qcom_glink_rx_open(glink, param1, msg->data);
917 break;
918 case RPM_CMD_CLOSE:
919 qcom_glink_rx_close(glink, param1);
920 break;
921 case RPM_CMD_CLOSE_ACK:
922 qcom_glink_rx_close_ack(glink, param1);
923 break;
924 default:
925 WARN(1, "Unknown defer object %d\n", cmd);
926 break;
927 }
928
929 kfree(dcmd);
930 }
931}
932
933struct qcom_glink *qcom_glink_native_probe(struct device *dev,
934 struct qcom_glink_pipe *rx,
935 struct qcom_glink_pipe *tx)
936{
937 int irq;
938 int ret;
939 struct qcom_glink *glink;
940
941 glink = devm_kzalloc(dev, sizeof(*glink), GFP_KERNEL);
942 if (!glink)
943 return ERR_PTR(-ENOMEM);
944
945 glink->dev = dev;
946 glink->tx_pipe = tx;
947 glink->rx_pipe = rx;
948
949 mutex_init(&glink->tx_lock);
950 spin_lock_init(&glink->rx_lock);
951 INIT_LIST_HEAD(&glink->rx_queue);
952 INIT_WORK(&glink->rx_work, qcom_glink_work);
953
954 mutex_init(&glink->idr_lock);
955 idr_init(&glink->lcids);
956 idr_init(&glink->rcids);
957
958 glink->mbox_client.dev = dev;
959 glink->mbox_chan = mbox_request_channel(&glink->mbox_client, 0);
960 if (IS_ERR(glink->mbox_chan)) {
961 if (PTR_ERR(glink->mbox_chan) != -EPROBE_DEFER)
962 dev_err(dev, "failed to acquire IPC channel\n");
963 return ERR_CAST(glink->mbox_chan);
964 }
965
966 irq = of_irq_get(dev->of_node, 0);
967 ret = devm_request_irq(dev, irq,
968 qcom_glink_native_intr,
969 IRQF_NO_SUSPEND | IRQF_SHARED,
970 "glink-native", glink);
971 if (ret) {
972 dev_err(dev, "failed to request IRQ\n");
973 return ERR_PTR(ret);
974 }
975
976 glink->irq = irq;
977
978 ret = qcom_glink_send_version(glink);
979 if (ret)
980 return ERR_PTR(ret);
981
982 return glink;
983}
984
985static int qcom_glink_remove_device(struct device *dev, void *data)
986{
987 device_unregister(dev);
988
989 return 0;
990}
991
992void qcom_glink_native_remove(struct qcom_glink *glink)
993{
994 struct glink_channel *channel;
995 int cid;
996 int ret;
997
998 disable_irq(glink->irq);
999 cancel_work_sync(&glink->rx_work);
1000
1001 ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
1002 if (ret)
1003 dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
1004
1005 /* Release any defunct local channels, waiting for close-ack */
1006 idr_for_each_entry(&glink->lcids, channel, cid)
1007 kref_put(&channel->refcount, qcom_glink_channel_release);
1008
1009 idr_destroy(&glink->lcids);
1010 idr_destroy(&glink->rcids);
1011}