]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/mailbox/mailbox.c
Merge tag 'locking-kcsan-2020-06-11' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-hirsute-kernel.git] / drivers / mailbox / mailbox.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Mailbox: Common code for Mailbox controllers and users
4 *
5 * Copyright (C) 2013-2014 Linaro Ltd.
6 * Author: Jassi Brar <jassisinghbrar@gmail.com>
7 */
8
9 #include <linux/interrupt.h>
10 #include <linux/spinlock.h>
11 #include <linux/mutex.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/device.h>
17 #include <linux/bitops.h>
18 #include <linux/mailbox_client.h>
19 #include <linux/mailbox_controller.h>
20
21 #include "mailbox.h"
22
23 static LIST_HEAD(mbox_cons);
24 static DEFINE_MUTEX(con_mutex);
25
26 static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
27 {
28 int idx;
29 unsigned long flags;
30
31 spin_lock_irqsave(&chan->lock, flags);
32
33 /* See if there is any space left */
34 if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
35 spin_unlock_irqrestore(&chan->lock, flags);
36 return -ENOBUFS;
37 }
38
39 idx = chan->msg_free;
40 chan->msg_data[idx] = mssg;
41 chan->msg_count++;
42
43 if (idx == MBOX_TX_QUEUE_LEN - 1)
44 chan->msg_free = 0;
45 else
46 chan->msg_free++;
47
48 spin_unlock_irqrestore(&chan->lock, flags);
49
50 return idx;
51 }
52
53 static void msg_submit(struct mbox_chan *chan)
54 {
55 unsigned count, idx;
56 unsigned long flags;
57 void *data;
58 int err = -EBUSY;
59
60 spin_lock_irqsave(&chan->lock, flags);
61
62 if (!chan->msg_count || chan->active_req)
63 goto exit;
64
65 count = chan->msg_count;
66 idx = chan->msg_free;
67 if (idx >= count)
68 idx -= count;
69 else
70 idx += MBOX_TX_QUEUE_LEN - count;
71
72 data = chan->msg_data[idx];
73
74 if (chan->cl->tx_prepare)
75 chan->cl->tx_prepare(chan->cl, data);
76 /* Try to submit a message to the MBOX controller */
77 err = chan->mbox->ops->send_data(chan, data);
78 if (!err) {
79 chan->active_req = data;
80 chan->msg_count--;
81 }
82 exit:
83 spin_unlock_irqrestore(&chan->lock, flags);
84
85 if (!err && (chan->txdone_method & TXDONE_BY_POLL))
86 /* kick start the timer immediately to avoid delays */
87 hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
88 }
89
90 static void tx_tick(struct mbox_chan *chan, int r)
91 {
92 unsigned long flags;
93 void *mssg;
94
95 spin_lock_irqsave(&chan->lock, flags);
96 mssg = chan->active_req;
97 chan->active_req = NULL;
98 spin_unlock_irqrestore(&chan->lock, flags);
99
100 /* Submit next message */
101 msg_submit(chan);
102
103 if (!mssg)
104 return;
105
106 /* Notify the client */
107 if (chan->cl->tx_done)
108 chan->cl->tx_done(chan->cl, mssg, r);
109
110 if (r != -ETIME && chan->cl->tx_block)
111 complete(&chan->tx_complete);
112 }
113
114 static enum hrtimer_restart txdone_hrtimer(struct hrtimer *hrtimer)
115 {
116 struct mbox_controller *mbox =
117 container_of(hrtimer, struct mbox_controller, poll_hrt);
118 bool txdone, resched = false;
119 int i;
120
121 for (i = 0; i < mbox->num_chans; i++) {
122 struct mbox_chan *chan = &mbox->chans[i];
123
124 if (chan->active_req && chan->cl) {
125 txdone = chan->mbox->ops->last_tx_done(chan);
126 if (txdone)
127 tx_tick(chan, 0);
128 else
129 resched = true;
130 }
131 }
132
133 if (resched) {
134 hrtimer_forward_now(hrtimer, ms_to_ktime(mbox->txpoll_period));
135 return HRTIMER_RESTART;
136 }
137 return HRTIMER_NORESTART;
138 }
139
140 /**
141 * mbox_chan_received_data - A way for controller driver to push data
142 * received from remote to the upper layer.
143 * @chan: Pointer to the mailbox channel on which RX happened.
144 * @mssg: Client specific message typecasted as void *
145 *
146 * After startup and before shutdown any data received on the chan
147 * is passed on to the API via atomic mbox_chan_received_data().
148 * The controller should ACK the RX only after this call returns.
149 */
150 void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
151 {
152 /* No buffering the received data */
153 if (chan->cl->rx_callback)
154 chan->cl->rx_callback(chan->cl, mssg);
155 }
156 EXPORT_SYMBOL_GPL(mbox_chan_received_data);
157
158 /**
159 * mbox_chan_txdone - A way for controller driver to notify the
160 * framework that the last TX has completed.
161 * @chan: Pointer to the mailbox chan on which TX happened.
162 * @r: Status of last TX - OK or ERROR
163 *
164 * The controller that has IRQ for TX ACK calls this atomic API
165 * to tick the TX state machine. It works only if txdone_irq
166 * is set by the controller.
167 */
168 void mbox_chan_txdone(struct mbox_chan *chan, int r)
169 {
170 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
171 dev_err(chan->mbox->dev,
172 "Controller can't run the TX ticker\n");
173 return;
174 }
175
176 tx_tick(chan, r);
177 }
178 EXPORT_SYMBOL_GPL(mbox_chan_txdone);
179
180 /**
181 * mbox_client_txdone - The way for a client to run the TX state machine.
182 * @chan: Mailbox channel assigned to this client.
183 * @r: Success status of last transmission.
184 *
185 * The client/protocol had received some 'ACK' packet and it notifies
186 * the API that the last packet was sent successfully. This only works
187 * if the controller can't sense TX-Done.
188 */
189 void mbox_client_txdone(struct mbox_chan *chan, int r)
190 {
191 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
192 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
193 return;
194 }
195
196 tx_tick(chan, r);
197 }
198 EXPORT_SYMBOL_GPL(mbox_client_txdone);
199
200 /**
201 * mbox_client_peek_data - A way for client driver to pull data
202 * received from remote by the controller.
203 * @chan: Mailbox channel assigned to this client.
204 *
205 * A poke to controller driver for any received data.
206 * The data is actually passed onto client via the
207 * mbox_chan_received_data()
208 * The call can be made from atomic context, so the controller's
209 * implementation of peek_data() must not sleep.
210 *
211 * Return: True, if controller has, and is going to push after this,
212 * some data.
213 * False, if controller doesn't have any data to be read.
214 */
215 bool mbox_client_peek_data(struct mbox_chan *chan)
216 {
217 if (chan->mbox->ops->peek_data)
218 return chan->mbox->ops->peek_data(chan);
219
220 return false;
221 }
222 EXPORT_SYMBOL_GPL(mbox_client_peek_data);
223
224 /**
225 * mbox_send_message - For client to submit a message to be
226 * sent to the remote.
227 * @chan: Mailbox channel assigned to this client.
228 * @mssg: Client specific message typecasted.
229 *
230 * For client to submit data to the controller destined for a remote
231 * processor. If the client had set 'tx_block', the call will return
232 * either when the remote receives the data or when 'tx_tout' millisecs
233 * run out.
234 * In non-blocking mode, the requests are buffered by the API and a
235 * non-negative token is returned for each queued request. If the request
236 * is not queued, a negative token is returned. Upon failure or successful
237 * TX, the API calls 'tx_done' from atomic context, from which the client
238 * could submit yet another request.
239 * The pointer to message should be preserved until it is sent
240 * over the chan, i.e, tx_done() is made.
241 * This function could be called from atomic context as it simply
242 * queues the data and returns a token against the request.
243 *
244 * Return: Non-negative integer for successful submission (non-blocking mode)
245 * or transmission over chan (blocking mode).
246 * Negative value denotes failure.
247 */
248 int mbox_send_message(struct mbox_chan *chan, void *mssg)
249 {
250 int t;
251
252 if (!chan || !chan->cl)
253 return -EINVAL;
254
255 t = add_to_rbuf(chan, mssg);
256 if (t < 0) {
257 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
258 return t;
259 }
260
261 msg_submit(chan);
262
263 if (chan->cl->tx_block) {
264 unsigned long wait;
265 int ret;
266
267 if (!chan->cl->tx_tout) /* wait forever */
268 wait = msecs_to_jiffies(3600000);
269 else
270 wait = msecs_to_jiffies(chan->cl->tx_tout);
271
272 ret = wait_for_completion_timeout(&chan->tx_complete, wait);
273 if (ret == 0) {
274 t = -ETIME;
275 tx_tick(chan, t);
276 }
277 }
278
279 return t;
280 }
281 EXPORT_SYMBOL_GPL(mbox_send_message);
282
283 /**
284 * mbox_flush - flush a mailbox channel
285 * @chan: mailbox channel to flush
286 * @timeout: time, in milliseconds, to allow the flush operation to succeed
287 *
288 * Mailbox controllers that need to work in atomic context can implement the
289 * ->flush() callback to busy loop until a transmission has been completed.
290 * The implementation must call mbox_chan_txdone() upon success. Clients can
291 * call the mbox_flush() function at any time after mbox_send_message() to
292 * flush the transmission. After the function returns success, the mailbox
293 * transmission is guaranteed to have completed.
294 *
295 * Returns: 0 on success or a negative error code on failure.
296 */
297 int mbox_flush(struct mbox_chan *chan, unsigned long timeout)
298 {
299 int ret;
300
301 if (!chan->mbox->ops->flush)
302 return -ENOTSUPP;
303
304 ret = chan->mbox->ops->flush(chan, timeout);
305 if (ret < 0)
306 tx_tick(chan, ret);
307
308 return ret;
309 }
310 EXPORT_SYMBOL_GPL(mbox_flush);
311
312 /**
313 * mbox_request_channel - Request a mailbox channel.
314 * @cl: Identity of the client requesting the channel.
315 * @index: Index of mailbox specifier in 'mboxes' property.
316 *
317 * The Client specifies its requirements and capabilities while asking for
318 * a mailbox channel. It can't be called from atomic context.
319 * The channel is exclusively allocated and can't be used by another
320 * client before the owner calls mbox_free_channel.
321 * After assignment, any packet received on this channel will be
322 * handed over to the client via the 'rx_callback'.
323 * The framework holds reference to the client, so the mbox_client
324 * structure shouldn't be modified until the mbox_free_channel returns.
325 *
326 * Return: Pointer to the channel assigned to the client if successful.
327 * ERR_PTR for request failure.
328 */
329 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
330 {
331 struct device *dev = cl->dev;
332 struct mbox_controller *mbox;
333 struct of_phandle_args spec;
334 struct mbox_chan *chan;
335 unsigned long flags;
336 int ret;
337
338 if (!dev || !dev->of_node) {
339 pr_debug("%s: No owner device node\n", __func__);
340 return ERR_PTR(-ENODEV);
341 }
342
343 mutex_lock(&con_mutex);
344
345 if (of_parse_phandle_with_args(dev->of_node, "mboxes",
346 "#mbox-cells", index, &spec)) {
347 dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
348 mutex_unlock(&con_mutex);
349 return ERR_PTR(-ENODEV);
350 }
351
352 chan = ERR_PTR(-EPROBE_DEFER);
353 list_for_each_entry(mbox, &mbox_cons, node)
354 if (mbox->dev->of_node == spec.np) {
355 chan = mbox->of_xlate(mbox, &spec);
356 if (!IS_ERR(chan))
357 break;
358 }
359
360 of_node_put(spec.np);
361
362 if (IS_ERR(chan)) {
363 mutex_unlock(&con_mutex);
364 return chan;
365 }
366
367 if (chan->cl || !try_module_get(mbox->dev->driver->owner)) {
368 dev_dbg(dev, "%s: mailbox not free\n", __func__);
369 mutex_unlock(&con_mutex);
370 return ERR_PTR(-EBUSY);
371 }
372
373 spin_lock_irqsave(&chan->lock, flags);
374 chan->msg_free = 0;
375 chan->msg_count = 0;
376 chan->active_req = NULL;
377 chan->cl = cl;
378 init_completion(&chan->tx_complete);
379
380 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
381 chan->txdone_method = TXDONE_BY_ACK;
382
383 spin_unlock_irqrestore(&chan->lock, flags);
384
385 if (chan->mbox->ops->startup) {
386 ret = chan->mbox->ops->startup(chan);
387
388 if (ret) {
389 dev_err(dev, "Unable to startup the chan (%d)\n", ret);
390 mbox_free_channel(chan);
391 chan = ERR_PTR(ret);
392 }
393 }
394
395 mutex_unlock(&con_mutex);
396 return chan;
397 }
398 EXPORT_SYMBOL_GPL(mbox_request_channel);
399
400 struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
401 const char *name)
402 {
403 struct device_node *np = cl->dev->of_node;
404 struct property *prop;
405 const char *mbox_name;
406 int index = 0;
407
408 if (!np) {
409 dev_err(cl->dev, "%s() currently only supports DT\n", __func__);
410 return ERR_PTR(-EINVAL);
411 }
412
413 if (!of_get_property(np, "mbox-names", NULL)) {
414 dev_err(cl->dev,
415 "%s() requires an \"mbox-names\" property\n", __func__);
416 return ERR_PTR(-EINVAL);
417 }
418
419 of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
420 if (!strncmp(name, mbox_name, strlen(name)))
421 return mbox_request_channel(cl, index);
422 index++;
423 }
424
425 dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
426 __func__, name);
427 return ERR_PTR(-EINVAL);
428 }
429 EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
430
431 /**
432 * mbox_free_channel - The client relinquishes control of a mailbox
433 * channel by this call.
434 * @chan: The mailbox channel to be freed.
435 */
436 void mbox_free_channel(struct mbox_chan *chan)
437 {
438 unsigned long flags;
439
440 if (!chan || !chan->cl)
441 return;
442
443 if (chan->mbox->ops->shutdown)
444 chan->mbox->ops->shutdown(chan);
445
446 /* The queued TX requests are simply aborted, no callbacks are made */
447 spin_lock_irqsave(&chan->lock, flags);
448 chan->cl = NULL;
449 chan->active_req = NULL;
450 if (chan->txdone_method == TXDONE_BY_ACK)
451 chan->txdone_method = TXDONE_BY_POLL;
452
453 module_put(chan->mbox->dev->driver->owner);
454 spin_unlock_irqrestore(&chan->lock, flags);
455 }
456 EXPORT_SYMBOL_GPL(mbox_free_channel);
457
458 static struct mbox_chan *
459 of_mbox_index_xlate(struct mbox_controller *mbox,
460 const struct of_phandle_args *sp)
461 {
462 int ind = sp->args[0];
463
464 if (ind >= mbox->num_chans)
465 return ERR_PTR(-EINVAL);
466
467 return &mbox->chans[ind];
468 }
469
470 /**
471 * mbox_controller_register - Register the mailbox controller
472 * @mbox: Pointer to the mailbox controller.
473 *
474 * The controller driver registers its communication channels
475 */
476 int mbox_controller_register(struct mbox_controller *mbox)
477 {
478 int i, txdone;
479
480 /* Sanity check */
481 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
482 return -EINVAL;
483
484 if (mbox->txdone_irq)
485 txdone = TXDONE_BY_IRQ;
486 else if (mbox->txdone_poll)
487 txdone = TXDONE_BY_POLL;
488 else /* It has to be ACK then */
489 txdone = TXDONE_BY_ACK;
490
491 if (txdone == TXDONE_BY_POLL) {
492
493 if (!mbox->ops->last_tx_done) {
494 dev_err(mbox->dev, "last_tx_done method is absent\n");
495 return -EINVAL;
496 }
497
498 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
499 HRTIMER_MODE_REL);
500 mbox->poll_hrt.function = txdone_hrtimer;
501 }
502
503 for (i = 0; i < mbox->num_chans; i++) {
504 struct mbox_chan *chan = &mbox->chans[i];
505
506 chan->cl = NULL;
507 chan->mbox = mbox;
508 chan->txdone_method = txdone;
509 spin_lock_init(&chan->lock);
510 }
511
512 if (!mbox->of_xlate)
513 mbox->of_xlate = of_mbox_index_xlate;
514
515 mutex_lock(&con_mutex);
516 list_add_tail(&mbox->node, &mbox_cons);
517 mutex_unlock(&con_mutex);
518
519 return 0;
520 }
521 EXPORT_SYMBOL_GPL(mbox_controller_register);
522
523 /**
524 * mbox_controller_unregister - Unregister the mailbox controller
525 * @mbox: Pointer to the mailbox controller.
526 */
527 void mbox_controller_unregister(struct mbox_controller *mbox)
528 {
529 int i;
530
531 if (!mbox)
532 return;
533
534 mutex_lock(&con_mutex);
535
536 list_del(&mbox->node);
537
538 for (i = 0; i < mbox->num_chans; i++)
539 mbox_free_channel(&mbox->chans[i]);
540
541 if (mbox->txdone_poll)
542 hrtimer_cancel(&mbox->poll_hrt);
543
544 mutex_unlock(&con_mutex);
545 }
546 EXPORT_SYMBOL_GPL(mbox_controller_unregister);
547
548 static void __devm_mbox_controller_unregister(struct device *dev, void *res)
549 {
550 struct mbox_controller **mbox = res;
551
552 mbox_controller_unregister(*mbox);
553 }
554
555 static int devm_mbox_controller_match(struct device *dev, void *res, void *data)
556 {
557 struct mbox_controller **mbox = res;
558
559 if (WARN_ON(!mbox || !*mbox))
560 return 0;
561
562 return *mbox == data;
563 }
564
565 /**
566 * devm_mbox_controller_register() - managed mbox_controller_register()
567 * @dev: device owning the mailbox controller being registered
568 * @mbox: mailbox controller being registered
569 *
570 * This function adds a device-managed resource that will make sure that the
571 * mailbox controller, which is registered using mbox_controller_register()
572 * as part of this function, will be unregistered along with the rest of
573 * device-managed resources upon driver probe failure or driver removal.
574 *
575 * Returns 0 on success or a negative error code on failure.
576 */
577 int devm_mbox_controller_register(struct device *dev,
578 struct mbox_controller *mbox)
579 {
580 struct mbox_controller **ptr;
581 int err;
582
583 ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
584 GFP_KERNEL);
585 if (!ptr)
586 return -ENOMEM;
587
588 err = mbox_controller_register(mbox);
589 if (err < 0) {
590 devres_free(ptr);
591 return err;
592 }
593
594 devres_add(dev, ptr);
595 *ptr = mbox;
596
597 return 0;
598 }
599 EXPORT_SYMBOL_GPL(devm_mbox_controller_register);
600
601 /**
602 * devm_mbox_controller_unregister() - managed mbox_controller_unregister()
603 * @dev: device owning the mailbox controller being unregistered
604 * @mbox: mailbox controller being unregistered
605 *
606 * This function unregisters the mailbox controller and removes the device-
607 * managed resource that was set up to automatically unregister the mailbox
608 * controller on driver probe failure or driver removal. It's typically not
609 * necessary to call this function.
610 */
611 void devm_mbox_controller_unregister(struct device *dev, struct mbox_controller *mbox)
612 {
613 WARN_ON(devres_release(dev, __devm_mbox_controller_unregister,
614 devm_mbox_controller_match, mbox));
615 }
616 EXPORT_SYMBOL_GPL(devm_mbox_controller_unregister);