]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Texas Instruments' Message Manager Driver | |
4 | * | |
5 | * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ | |
6 | * Nishanth Menon | |
7 | */ | |
8 | ||
9 | #define pr_fmt(fmt) "%s: " fmt, __func__ | |
10 | ||
11 | #include <linux/device.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/mailbox_controller.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/of_irq.h> | |
20 | #include <linux/platform_device.h> | |
21 | #include <linux/soc/ti/ti-msgmgr.h> | |
22 | ||
23 | #define Q_DATA_OFFSET(proxy, queue, reg) \ | |
24 | ((0x10000 * (proxy)) + (0x80 * (queue)) + ((reg) * 4)) | |
25 | #define Q_STATE_OFFSET(queue) ((queue) * 0x4) | |
26 | #define Q_STATE_ENTRY_COUNT_MASK (0xFFF000) | |
27 | ||
28 | /** | |
29 | * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor | |
30 | * @queue_id: Queue Number for this path | |
31 | * @proxy_id: Proxy ID representing the processor in SoC | |
32 | * @is_tx: Is this a receive path? | |
33 | */ | |
34 | struct ti_msgmgr_valid_queue_desc { | |
35 | u8 queue_id; | |
36 | u8 proxy_id; | |
37 | bool is_tx; | |
38 | }; | |
39 | ||
40 | /** | |
41 | * struct ti_msgmgr_desc - Description of message manager integration | |
42 | * @queue_count: Number of Queues | |
43 | * @max_message_size: Message size in bytes | |
44 | * @max_messages: Number of messages | |
45 | * @data_first_reg: First data register for proxy data region | |
46 | * @data_last_reg: Last data register for proxy data region | |
47 | * @tx_polled: Do I need to use polled mechanism for tx | |
48 | * @tx_poll_timeout_ms: Timeout in ms if polled | |
49 | * @valid_queues: List of Valid queues that the processor can access | |
50 | * @num_valid_queues: Number of valid queues | |
51 | * | |
52 | * This structure is used in of match data to describe how integration | |
53 | * for a specific compatible SoC is done. | |
54 | */ | |
55 | struct ti_msgmgr_desc { | |
56 | u8 queue_count; | |
57 | u8 max_message_size; | |
58 | u8 max_messages; | |
59 | u8 data_first_reg; | |
60 | u8 data_last_reg; | |
61 | bool tx_polled; | |
62 | int tx_poll_timeout_ms; | |
63 | const struct ti_msgmgr_valid_queue_desc *valid_queues; | |
64 | int num_valid_queues; | |
65 | }; | |
66 | ||
67 | /** | |
68 | * struct ti_queue_inst - Description of a queue instance | |
69 | * @name: Queue Name | |
70 | * @queue_id: Queue Identifier as mapped on SoC | |
71 | * @proxy_id: Proxy Identifier as mapped on SoC | |
72 | * @irq: IRQ for Rx Queue | |
73 | * @is_tx: 'true' if transmit queue, else, 'false' | |
74 | * @queue_buff_start: First register of Data Buffer | |
75 | * @queue_buff_end: Last (or confirmation) register of Data buffer | |
76 | * @queue_state: Queue status register | |
77 | * @chan: Mailbox channel | |
78 | * @rx_buff: Receive buffer pointer allocated at probe, max_message_size | |
79 | */ | |
80 | struct ti_queue_inst { | |
81 | char name[30]; | |
82 | u8 queue_id; | |
83 | u8 proxy_id; | |
84 | int irq; | |
85 | bool is_tx; | |
86 | void __iomem *queue_buff_start; | |
87 | void __iomem *queue_buff_end; | |
88 | void __iomem *queue_state; | |
89 | struct mbox_chan *chan; | |
90 | u32 *rx_buff; | |
91 | }; | |
92 | ||
93 | /** | |
94 | * struct ti_msgmgr_inst - Description of a Message Manager Instance | |
95 | * @dev: device pointer corresponding to the Message Manager instance | |
96 | * @desc: Description of the SoC integration | |
97 | * @queue_proxy_region: Queue proxy region where queue buffers are located | |
98 | * @queue_state_debug_region: Queue status register regions | |
99 | * @num_valid_queues: Number of valid queues defined for the processor | |
100 | * Note: other queues are probably reserved for other processors | |
101 | * in the SoC. | |
102 | * @qinsts: Array of valid Queue Instances for the Processor | |
103 | * @mbox: Mailbox Controller | |
104 | * @chans: Array for channels corresponding to the Queue Instances. | |
105 | */ | |
106 | struct ti_msgmgr_inst { | |
107 | struct device *dev; | |
108 | const struct ti_msgmgr_desc *desc; | |
109 | void __iomem *queue_proxy_region; | |
110 | void __iomem *queue_state_debug_region; | |
111 | u8 num_valid_queues; | |
112 | struct ti_queue_inst *qinsts; | |
113 | struct mbox_controller mbox; | |
114 | struct mbox_chan *chans; | |
115 | }; | |
116 | ||
117 | /** | |
118 | * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages | |
119 | * @qinst: Queue instance for which we check the number of pending messages | |
120 | * | |
121 | * Return: number of messages pending in the queue (0 == no pending messages) | |
122 | */ | |
123 | static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst) | |
124 | { | |
125 | u32 val; | |
126 | ||
127 | /* | |
128 | * We cannot use relaxed operation here - update may happen | |
129 | * real-time. | |
130 | */ | |
131 | val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK; | |
132 | val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK); | |
133 | ||
134 | return val; | |
135 | } | |
136 | ||
137 | /** | |
138 | * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue | |
139 | * @irq: Interrupt number | |
140 | * @p: Channel Pointer | |
141 | * | |
142 | * Return: -EINVAL if there is no instance | |
143 | * IRQ_NONE if the interrupt is not ours. | |
144 | * IRQ_HANDLED if the rx interrupt was successfully handled. | |
145 | */ | |
146 | static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p) | |
147 | { | |
148 | struct mbox_chan *chan = p; | |
149 | struct device *dev = chan->mbox->dev; | |
150 | struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); | |
151 | struct ti_queue_inst *qinst = chan->con_priv; | |
152 | const struct ti_msgmgr_desc *desc; | |
153 | int msg_count, num_words; | |
154 | struct ti_msgmgr_message message; | |
155 | void __iomem *data_reg; | |
156 | u32 *word_data; | |
157 | ||
158 | if (WARN_ON(!inst)) { | |
159 | dev_err(dev, "no platform drv data??\n"); | |
160 | return -EINVAL; | |
161 | } | |
162 | ||
163 | /* Do I have an invalid interrupt source? */ | |
164 | if (qinst->is_tx) { | |
165 | dev_err(dev, "Cannot handle rx interrupt on tx channel %s\n", | |
166 | qinst->name); | |
167 | return IRQ_NONE; | |
168 | } | |
169 | ||
170 | /* Do I actually have messages to read? */ | |
171 | msg_count = ti_msgmgr_queue_get_num_messages(qinst); | |
172 | if (!msg_count) { | |
173 | /* Shared IRQ? */ | |
174 | dev_dbg(dev, "Spurious event - 0 pending data!\n"); | |
175 | return IRQ_NONE; | |
176 | } | |
177 | ||
178 | /* | |
179 | * I have no idea about the protocol being used to communicate with the | |
180 | * remote producer - 0 could be valid data, so I wont make a judgement | |
181 | * of how many bytes I should be reading. Let the client figure this | |
182 | * out.. I just read the full message and pass it on.. | |
183 | */ | |
184 | desc = inst->desc; | |
185 | message.len = desc->max_message_size; | |
186 | message.buf = (u8 *)qinst->rx_buff; | |
187 | ||
188 | /* | |
189 | * NOTE about register access involved here: | |
190 | * the hardware block is implemented with 32bit access operations and no | |
191 | * support for data splitting. We don't want the hardware to misbehave | |
192 | * with sub 32bit access - For example: if the last register read is | |
193 | * split into byte wise access, it can result in the queue getting | |
194 | * stuck or indeterminate behavior. An out of order read operation may | |
195 | * result in weird data results as well. | |
196 | * Hence, we do not use memcpy_fromio or __ioread32_copy here, instead | |
197 | * we depend on readl for the purpose. | |
198 | * | |
199 | * Also note that the final register read automatically marks the | |
200 | * queue message as read. | |
201 | */ | |
202 | for (data_reg = qinst->queue_buff_start, word_data = qinst->rx_buff, | |
203 | num_words = (desc->max_message_size / sizeof(u32)); | |
204 | num_words; num_words--, data_reg += sizeof(u32), word_data++) | |
205 | *word_data = readl(data_reg); | |
206 | ||
207 | /* | |
208 | * Last register read automatically clears the IRQ if only 1 message | |
209 | * is pending - so send the data up the stack.. | |
210 | * NOTE: Client is expected to be as optimal as possible, since | |
211 | * we invoke the handler in IRQ context. | |
212 | */ | |
213 | mbox_chan_received_data(chan, (void *)&message); | |
214 | ||
215 | return IRQ_HANDLED; | |
216 | } | |
217 | ||
218 | /** | |
219 | * ti_msgmgr_queue_peek_data() - Peek to see if there are any rx messages. | |
220 | * @chan: Channel Pointer | |
221 | * | |
222 | * Return: 'true' if there is pending rx data, 'false' if there is none. | |
223 | */ | |
224 | static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan) | |
225 | { | |
226 | struct ti_queue_inst *qinst = chan->con_priv; | |
227 | int msg_count; | |
228 | ||
229 | if (qinst->is_tx) | |
230 | return false; | |
231 | ||
232 | msg_count = ti_msgmgr_queue_get_num_messages(qinst); | |
233 | ||
234 | return msg_count ? true : false; | |
235 | } | |
236 | ||
237 | /** | |
238 | * ti_msgmgr_last_tx_done() - See if all the tx messages are sent | |
239 | * @chan: Channel pointer | |
240 | * | |
241 | * Return: 'true' is no pending tx data, 'false' if there are any. | |
242 | */ | |
243 | static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan) | |
244 | { | |
245 | struct ti_queue_inst *qinst = chan->con_priv; | |
246 | int msg_count; | |
247 | ||
248 | if (!qinst->is_tx) | |
249 | return false; | |
250 | ||
251 | msg_count = ti_msgmgr_queue_get_num_messages(qinst); | |
252 | ||
253 | /* if we have any messages pending.. */ | |
254 | return msg_count ? false : true; | |
255 | } | |
256 | ||
257 | /** | |
258 | * ti_msgmgr_send_data() - Send data | |
259 | * @chan: Channel Pointer | |
260 | * @data: ti_msgmgr_message * Message Pointer | |
261 | * | |
262 | * Return: 0 if all goes good, else appropriate error messages. | |
263 | */ | |
264 | static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data) | |
265 | { | |
266 | struct device *dev = chan->mbox->dev; | |
267 | struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); | |
268 | const struct ti_msgmgr_desc *desc; | |
269 | struct ti_queue_inst *qinst = chan->con_priv; | |
270 | int num_words, trail_bytes; | |
271 | struct ti_msgmgr_message *message = data; | |
272 | void __iomem *data_reg; | |
273 | u32 *word_data; | |
274 | ||
275 | if (WARN_ON(!inst)) { | |
276 | dev_err(dev, "no platform drv data??\n"); | |
277 | return -EINVAL; | |
278 | } | |
279 | desc = inst->desc; | |
280 | ||
281 | if (desc->max_message_size < message->len) { | |
282 | dev_err(dev, "Queue %s message length %zu > max %d\n", | |
283 | qinst->name, message->len, desc->max_message_size); | |
284 | return -EINVAL; | |
285 | } | |
286 | ||
287 | /* NOTE: Constraints similar to rx path exists here as well */ | |
288 | for (data_reg = qinst->queue_buff_start, | |
289 | num_words = message->len / sizeof(u32), | |
290 | word_data = (u32 *)message->buf; | |
291 | num_words; num_words--, data_reg += sizeof(u32), word_data++) | |
292 | writel(*word_data, data_reg); | |
293 | ||
294 | trail_bytes = message->len % sizeof(u32); | |
295 | if (trail_bytes) { | |
296 | u32 data_trail = *word_data; | |
297 | ||
298 | /* Ensure all unused data is 0 */ | |
299 | data_trail &= 0xFFFFFFFF >> (8 * (sizeof(u32) - trail_bytes)); | |
300 | writel(data_trail, data_reg); | |
301 | data_reg++; | |
302 | } | |
303 | /* | |
304 | * 'data_reg' indicates next register to write. If we did not already | |
305 | * write on tx complete reg(last reg), we must do so for transmit | |
306 | */ | |
307 | if (data_reg <= qinst->queue_buff_end) | |
308 | writel(0, qinst->queue_buff_end); | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
313 | /** | |
314 | * ti_msgmgr_queue_rx_irq_req() - RX IRQ request | |
315 | * @dev: device pointer | |
316 | * @qinst: Queue instance | |
317 | * @chan: Channel pointer | |
318 | */ | |
319 | static int ti_msgmgr_queue_rx_irq_req(struct device *dev, | |
320 | struct ti_queue_inst *qinst, | |
321 | struct mbox_chan *chan) | |
322 | { | |
323 | int ret = 0; | |
324 | char of_rx_irq_name[7]; | |
325 | struct device_node *np; | |
326 | ||
327 | snprintf(of_rx_irq_name, sizeof(of_rx_irq_name), | |
328 | "rx_%03d", qinst->queue_id); | |
329 | ||
330 | /* Get the IRQ if not found */ | |
331 | if (qinst->irq < 0) { | |
332 | np = of_node_get(dev->of_node); | |
333 | if (!np) | |
334 | return -ENODATA; | |
335 | qinst->irq = of_irq_get_byname(np, of_rx_irq_name); | |
336 | of_node_put(np); | |
337 | ||
338 | if (qinst->irq < 0) { | |
339 | dev_err(dev, | |
340 | "QID %d PID %d:No IRQ[%s]: %d\n", | |
341 | qinst->queue_id, qinst->proxy_id, | |
342 | of_rx_irq_name, qinst->irq); | |
343 | return qinst->irq; | |
344 | } | |
345 | } | |
346 | ||
347 | /* With the expectation that the IRQ might be shared in SoC */ | |
348 | ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt, | |
349 | IRQF_SHARED, qinst->name, chan); | |
350 | if (ret) { | |
351 | dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n", | |
352 | qinst->irq, qinst->name, ret); | |
353 | } | |
354 | ||
355 | return ret; | |
356 | } | |
357 | ||
358 | /** | |
359 | * ti_msgmgr_queue_startup() - Startup queue | |
360 | * @chan: Channel pointer | |
361 | * | |
362 | * Return: 0 if all goes good, else return corresponding error message | |
363 | */ | |
364 | static int ti_msgmgr_queue_startup(struct mbox_chan *chan) | |
365 | { | |
366 | struct device *dev = chan->mbox->dev; | |
367 | struct ti_msgmgr_inst *inst = dev_get_drvdata(dev); | |
368 | struct ti_queue_inst *qinst = chan->con_priv; | |
369 | const struct ti_msgmgr_desc *d = inst->desc; | |
370 | int ret; | |
371 | ||
372 | if (!qinst->is_tx) { | |
373 | /* Allocate usage buffer for rx */ | |
374 | qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL); | |
375 | if (!qinst->rx_buff) | |
376 | return -ENOMEM; | |
377 | /* Request IRQ */ | |
378 | ret = ti_msgmgr_queue_rx_irq_req(dev, qinst, chan); | |
379 | if (ret) { | |
380 | kfree(qinst->rx_buff); | |
381 | return ret; | |
382 | } | |
383 | } | |
384 | ||
385 | return 0; | |
386 | } | |
387 | ||
388 | /** | |
389 | * ti_msgmgr_queue_shutdown() - Shutdown the queue | |
390 | * @chan: Channel pointer | |
391 | */ | |
392 | static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan) | |
393 | { | |
394 | struct ti_queue_inst *qinst = chan->con_priv; | |
395 | ||
396 | if (!qinst->is_tx) { | |
397 | free_irq(qinst->irq, chan); | |
398 | kfree(qinst->rx_buff); | |
399 | } | |
400 | } | |
401 | ||
402 | /** | |
403 | * ti_msgmgr_of_xlate() - Translation of phandle to queue | |
404 | * @mbox: Mailbox controller | |
405 | * @p: phandle pointer | |
406 | * | |
407 | * Return: Mailbox channel corresponding to the queue, else return error | |
408 | * pointer. | |
409 | */ | |
410 | static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox, | |
411 | const struct of_phandle_args *p) | |
412 | { | |
413 | struct ti_msgmgr_inst *inst; | |
414 | int req_qid, req_pid; | |
415 | struct ti_queue_inst *qinst; | |
416 | int i; | |
417 | ||
418 | inst = container_of(mbox, struct ti_msgmgr_inst, mbox); | |
419 | if (WARN_ON(!inst)) | |
420 | return ERR_PTR(-EINVAL); | |
421 | ||
422 | /* #mbox-cells is 2 */ | |
423 | if (p->args_count != 2) { | |
424 | dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n", | |
425 | p->args_count); | |
426 | return ERR_PTR(-EINVAL); | |
427 | } | |
428 | req_qid = p->args[0]; | |
429 | req_pid = p->args[1]; | |
430 | ||
431 | for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; | |
432 | i++, qinst++) { | |
433 | if (req_qid == qinst->queue_id && req_pid == qinst->proxy_id) | |
434 | return qinst->chan; | |
435 | } | |
436 | ||
437 | dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n", | |
438 | req_qid, req_pid, p->np->name); | |
439 | return ERR_PTR(-ENOENT); | |
440 | } | |
441 | ||
442 | /** | |
443 | * ti_msgmgr_queue_setup() - Setup data structures for each queue instance | |
444 | * @idx: index of the queue | |
445 | * @dev: pointer to the message manager device | |
446 | * @np: pointer to the of node | |
447 | * @inst: Queue instance pointer | |
448 | * @d: Message Manager instance description data | |
449 | * @qd: Queue description data | |
450 | * @qinst: Queue instance pointer | |
451 | * @chan: pointer to mailbox channel | |
452 | * | |
453 | * Return: 0 if all went well, else return corresponding error | |
454 | */ | |
455 | static int ti_msgmgr_queue_setup(int idx, struct device *dev, | |
456 | struct device_node *np, | |
457 | struct ti_msgmgr_inst *inst, | |
458 | const struct ti_msgmgr_desc *d, | |
459 | const struct ti_msgmgr_valid_queue_desc *qd, | |
460 | struct ti_queue_inst *qinst, | |
461 | struct mbox_chan *chan) | |
462 | { | |
463 | qinst->proxy_id = qd->proxy_id; | |
464 | qinst->queue_id = qd->queue_id; | |
465 | ||
466 | if (qinst->queue_id > d->queue_count) { | |
467 | dev_err(dev, "Queue Data [idx=%d] queuid %d > %d\n", | |
468 | idx, qinst->queue_id, d->queue_count); | |
469 | return -ERANGE; | |
470 | } | |
471 | ||
472 | qinst->is_tx = qd->is_tx; | |
473 | snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d", | |
474 | dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id, | |
475 | qinst->proxy_id); | |
476 | ||
477 | qinst->queue_buff_start = inst->queue_proxy_region + | |
478 | Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg); | |
479 | qinst->queue_buff_end = inst->queue_proxy_region + | |
480 | Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg); | |
481 | qinst->queue_state = inst->queue_state_debug_region + | |
482 | Q_STATE_OFFSET(qinst->queue_id); | |
483 | qinst->chan = chan; | |
484 | ||
485 | /* Setup an error value for IRQ - Lazy allocation */ | |
486 | qinst->irq = -EINVAL; | |
487 | ||
488 | chan->con_priv = qinst; | |
489 | ||
490 | dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n", | |
491 | idx, qinst->queue_id, qinst->proxy_id, qinst->irq, | |
492 | qinst->queue_buff_start, qinst->queue_buff_end); | |
493 | return 0; | |
494 | } | |
495 | ||
496 | /* Queue operations */ | |
497 | static const struct mbox_chan_ops ti_msgmgr_chan_ops = { | |
498 | .startup = ti_msgmgr_queue_startup, | |
499 | .shutdown = ti_msgmgr_queue_shutdown, | |
500 | .peek_data = ti_msgmgr_queue_peek_data, | |
501 | .last_tx_done = ti_msgmgr_last_tx_done, | |
502 | .send_data = ti_msgmgr_send_data, | |
503 | }; | |
504 | ||
505 | /* Keystone K2G SoC integration details */ | |
506 | static const struct ti_msgmgr_valid_queue_desc k2g_valid_queues[] = { | |
507 | {.queue_id = 0, .proxy_id = 0, .is_tx = true,}, | |
508 | {.queue_id = 1, .proxy_id = 0, .is_tx = true,}, | |
509 | {.queue_id = 2, .proxy_id = 0, .is_tx = true,}, | |
510 | {.queue_id = 3, .proxy_id = 0, .is_tx = true,}, | |
511 | {.queue_id = 5, .proxy_id = 2, .is_tx = false,}, | |
512 | {.queue_id = 56, .proxy_id = 1, .is_tx = true,}, | |
513 | {.queue_id = 57, .proxy_id = 2, .is_tx = false,}, | |
514 | {.queue_id = 58, .proxy_id = 3, .is_tx = true,}, | |
515 | {.queue_id = 59, .proxy_id = 4, .is_tx = true,}, | |
516 | {.queue_id = 60, .proxy_id = 5, .is_tx = true,}, | |
517 | {.queue_id = 61, .proxy_id = 6, .is_tx = true,}, | |
518 | }; | |
519 | ||
520 | static const struct ti_msgmgr_desc k2g_desc = { | |
521 | .queue_count = 64, | |
522 | .max_message_size = 64, | |
523 | .max_messages = 128, | |
524 | .data_first_reg = 16, | |
525 | .data_last_reg = 31, | |
526 | .tx_polled = false, | |
527 | .valid_queues = k2g_valid_queues, | |
528 | .num_valid_queues = ARRAY_SIZE(k2g_valid_queues), | |
529 | }; | |
530 | ||
531 | static const struct of_device_id ti_msgmgr_of_match[] = { | |
532 | {.compatible = "ti,k2g-message-manager", .data = &k2g_desc}, | |
533 | { /* Sentinel */ } | |
534 | }; | |
535 | MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match); | |
536 | ||
537 | static int ti_msgmgr_probe(struct platform_device *pdev) | |
538 | { | |
539 | struct device *dev = &pdev->dev; | |
540 | const struct of_device_id *of_id; | |
541 | struct device_node *np; | |
542 | struct resource *res; | |
543 | const struct ti_msgmgr_desc *desc; | |
544 | struct ti_msgmgr_inst *inst; | |
545 | struct ti_queue_inst *qinst; | |
546 | struct mbox_controller *mbox; | |
547 | struct mbox_chan *chans; | |
548 | int queue_count; | |
549 | int i; | |
550 | int ret = -EINVAL; | |
551 | const struct ti_msgmgr_valid_queue_desc *queue_desc; | |
552 | ||
553 | if (!dev->of_node) { | |
554 | dev_err(dev, "no OF information\n"); | |
555 | return -EINVAL; | |
556 | } | |
557 | np = dev->of_node; | |
558 | ||
559 | of_id = of_match_device(ti_msgmgr_of_match, dev); | |
560 | if (!of_id) { | |
561 | dev_err(dev, "OF data missing\n"); | |
562 | return -EINVAL; | |
563 | } | |
564 | desc = of_id->data; | |
565 | ||
566 | inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL); | |
567 | if (!inst) | |
568 | return -ENOMEM; | |
569 | ||
570 | inst->dev = dev; | |
571 | inst->desc = desc; | |
572 | ||
573 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
574 | "queue_proxy_region"); | |
575 | inst->queue_proxy_region = devm_ioremap_resource(dev, res); | |
576 | if (IS_ERR(inst->queue_proxy_region)) | |
577 | return PTR_ERR(inst->queue_proxy_region); | |
578 | ||
579 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | |
580 | "queue_state_debug_region"); | |
581 | inst->queue_state_debug_region = devm_ioremap_resource(dev, res); | |
582 | if (IS_ERR(inst->queue_state_debug_region)) | |
583 | return PTR_ERR(inst->queue_state_debug_region); | |
584 | ||
585 | dev_dbg(dev, "proxy region=%p, queue_state=%p\n", | |
586 | inst->queue_proxy_region, inst->queue_state_debug_region); | |
587 | ||
588 | queue_count = desc->num_valid_queues; | |
589 | if (!queue_count || queue_count > desc->queue_count) { | |
590 | dev_crit(dev, "Invalid Number of queues %d. Max %d\n", | |
591 | queue_count, desc->queue_count); | |
592 | return -ERANGE; | |
593 | } | |
594 | inst->num_valid_queues = queue_count; | |
595 | ||
596 | qinst = devm_kcalloc(dev, queue_count, sizeof(*qinst), GFP_KERNEL); | |
597 | if (!qinst) | |
598 | return -ENOMEM; | |
599 | inst->qinsts = qinst; | |
600 | ||
601 | chans = devm_kcalloc(dev, queue_count, sizeof(*chans), GFP_KERNEL); | |
602 | if (!chans) | |
603 | return -ENOMEM; | |
604 | inst->chans = chans; | |
605 | ||
606 | for (i = 0, queue_desc = desc->valid_queues; | |
607 | i < queue_count; i++, qinst++, chans++, queue_desc++) { | |
608 | ret = ti_msgmgr_queue_setup(i, dev, np, inst, | |
609 | desc, queue_desc, qinst, chans); | |
610 | if (ret) | |
611 | return ret; | |
612 | } | |
613 | ||
614 | mbox = &inst->mbox; | |
615 | mbox->dev = dev; | |
616 | mbox->ops = &ti_msgmgr_chan_ops; | |
617 | mbox->chans = inst->chans; | |
618 | mbox->num_chans = inst->num_valid_queues; | |
619 | mbox->txdone_irq = false; | |
620 | mbox->txdone_poll = desc->tx_polled; | |
621 | if (desc->tx_polled) | |
622 | mbox->txpoll_period = desc->tx_poll_timeout_ms; | |
623 | mbox->of_xlate = ti_msgmgr_of_xlate; | |
624 | ||
625 | platform_set_drvdata(pdev, inst); | |
626 | ret = mbox_controller_register(mbox); | |
627 | if (ret) | |
628 | dev_err(dev, "Failed to register mbox_controller(%d)\n", ret); | |
629 | ||
630 | return ret; | |
631 | } | |
632 | ||
633 | static int ti_msgmgr_remove(struct platform_device *pdev) | |
634 | { | |
635 | struct ti_msgmgr_inst *inst; | |
636 | ||
637 | inst = platform_get_drvdata(pdev); | |
638 | mbox_controller_unregister(&inst->mbox); | |
639 | ||
640 | return 0; | |
641 | } | |
642 | ||
643 | static struct platform_driver ti_msgmgr_driver = { | |
644 | .probe = ti_msgmgr_probe, | |
645 | .remove = ti_msgmgr_remove, | |
646 | .driver = { | |
647 | .name = "ti-msgmgr", | |
648 | .of_match_table = of_match_ptr(ti_msgmgr_of_match), | |
649 | }, | |
650 | }; | |
651 | module_platform_driver(ti_msgmgr_driver); | |
652 | ||
653 | MODULE_LICENSE("GPL v2"); | |
654 | MODULE_DESCRIPTION("TI message manager driver"); | |
655 | MODULE_AUTHOR("Nishanth Menon"); | |
656 | MODULE_ALIAS("platform:ti-msgmgr"); |