]>
Commit | Line | Data |
---|---|---|
a24532f8 RR |
1 | /* |
2 | * Copyright 2016 Broadcom | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, as | |
6 | * published by the Free Software Foundation (the "GPL"). | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License version 2 (GPLv2) for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License | |
14 | * version 2 (GPLv2) along with this source code. | |
15 | */ | |
16 | ||
17 | /* | |
18 | * Broadcom PDC Mailbox Driver | |
19 | * The PDC provides a ring based programming interface to one or more hardware | |
20 | * offload engines. For example, the PDC driver works with both SPU-M and SPU2 | |
21 | * cryptographic offload hardware. In some chips the PDC is referred to as MDE. | |
22 | * | |
23 | * The PDC driver registers with the Linux mailbox framework as a mailbox | |
24 | * controller, once for each PDC instance. Ring 0 for each PDC is registered as | |
25 | * a mailbox channel. The PDC driver uses interrupts to determine when data | |
26 | * transfers to and from an offload engine are complete. The PDC driver uses | |
27 | * threaded IRQs so that response messages are handled outside of interrupt | |
28 | * context. | |
29 | * | |
30 | * The PDC driver allows multiple messages to be pending in the descriptor | |
31 | * rings. The tx_msg_start descriptor index indicates where the last message | |
32 | * starts. The txin_numd value at this index indicates how many descriptor | |
33 | * indexes make up the message. Similar state is kept on the receive side. When | |
34 | * an rx interrupt indicates a response is ready, the PDC driver processes numd | |
35 | * descriptors from the tx and rx ring, thus processing one response at a time. | |
36 | */ | |
37 | ||
38 | #include <linux/errno.h> | |
39 | #include <linux/module.h> | |
40 | #include <linux/init.h> | |
41 | #include <linux/slab.h> | |
42 | #include <linux/debugfs.h> | |
43 | #include <linux/interrupt.h> | |
44 | #include <linux/wait.h> | |
45 | #include <linux/platform_device.h> | |
46 | #include <linux/io.h> | |
47 | #include <linux/of.h> | |
48 | #include <linux/of_device.h> | |
49 | #include <linux/of_address.h> | |
50 | #include <linux/of_irq.h> | |
51 | #include <linux/mailbox_controller.h> | |
52 | #include <linux/mailbox/brcm-message.h> | |
53 | #include <linux/scatterlist.h> | |
54 | #include <linux/dma-direction.h> | |
55 | #include <linux/dma-mapping.h> | |
56 | #include <linux/dmapool.h> | |
57 | ||
58 | #define PDC_SUCCESS 0 | |
59 | ||
60 | #define RING_ENTRY_SIZE sizeof(struct dma64dd) | |
61 | ||
62 | /* # entries in PDC dma ring */ | |
ab8d1b2d RR |
63 | #define PDC_RING_ENTRIES 512 |
64 | /* | |
65 | * Minimum number of ring descriptor entries that must be free to tell mailbox | |
66 | * framework that it can submit another request | |
67 | */ | |
68 | #define PDC_RING_SPACE_MIN 15 | |
69 | ||
a24532f8 RR |
70 | #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE) |
71 | /* Rings are 8k aligned */ | |
72 | #define RING_ALIGN_ORDER 13 | |
73 | #define RING_ALIGN BIT(RING_ALIGN_ORDER) | |
74 | ||
75 | #define RX_BUF_ALIGN_ORDER 5 | |
76 | #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER) | |
77 | ||
78 | /* descriptor bumping macros */ | |
79 | #define XXD(x, max_mask) ((x) & (max_mask)) | |
80 | #define TXD(x, max_mask) XXD((x), (max_mask)) | |
81 | #define RXD(x, max_mask) XXD((x), (max_mask)) | |
82 | #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask)) | |
83 | #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask)) | |
84 | #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask)) | |
85 | #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask)) | |
86 | #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask)) | |
87 | #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask)) | |
88 | ||
89 | /* Length of BCM header at start of SPU msg, in bytes */ | |
90 | #define BCM_HDR_LEN 8 | |
91 | ||
92 | /* | |
93 | * PDC driver reserves ringset 0 on each SPU for its own use. The driver does | |
94 | * not currently support use of multiple ringsets on a single PDC engine. | |
95 | */ | |
96 | #define PDC_RINGSET 0 | |
97 | ||
98 | /* | |
99 | * Interrupt mask and status definitions. Enable interrupts for tx and rx on | |
100 | * ring 0 | |
101 | */ | |
a24532f8 | 102 | #define PDC_RCVINT_0 (16 + PDC_RINGSET) |
a24532f8 | 103 | #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0) |
ab8d1b2d | 104 | #define PDC_INTMASK (PDC_RCVINTEN_0) |
a24532f8 RR |
105 | #define PDC_LAZY_FRAMECOUNT 1 |
106 | #define PDC_LAZY_TIMEOUT 10000 | |
107 | #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24)) | |
108 | #define PDC_INTMASK_OFFSET 0x24 | |
109 | #define PDC_INTSTATUS_OFFSET 0x20 | |
110 | #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET) | |
111 | ||
112 | /* | |
113 | * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata | |
114 | * before frame | |
115 | */ | |
116 | #define PDC_SPU2_RESP_HDR_LEN 17 | |
117 | #define PDC_CKSUM_CTRL BIT(27) | |
118 | #define PDC_CKSUM_CTRL_OFFSET 0x400 | |
119 | ||
120 | #define PDC_SPUM_RESP_HDR_LEN 32 | |
121 | ||
122 | /* | |
123 | * Sets the following bits for write to transmit control reg: | |
a24532f8 RR |
124 | * 11 - PtyChkDisable - parity check is disabled |
125 | * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory | |
126 | */ | |
9fb0f9ac SL |
127 | #define PDC_TX_CTL 0x000C0800 |
128 | ||
129 | /* Bit in tx control reg to enable tx channel */ | |
130 | #define PDC_TX_ENABLE 0x1 | |
a24532f8 RR |
131 | |
132 | /* | |
133 | * Sets the following bits for write to receive control reg: | |
a24532f8 RR |
134 | * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf |
135 | * 9 - SepRxHdrDescEn - place start of new frames only in descriptors | |
136 | * that have StartOfFrame set | |
137 | * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all | |
138 | * remaining bytes in current frame, report error | |
139 | * in rx frame status for current frame | |
140 | * 11 - PtyChkDisable - parity check is disabled | |
141 | * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory | |
142 | */ | |
9fb0f9ac SL |
143 | #define PDC_RX_CTL 0x000C0E00 |
144 | ||
145 | /* Bit in rx control reg to enable rx channel */ | |
146 | #define PDC_RX_ENABLE 0x1 | |
a24532f8 RR |
147 | |
148 | #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1) | |
149 | ||
150 | /* descriptor flags */ | |
151 | #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */ | |
152 | #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */ | |
153 | #define D64_CTRL1_EOF BIT(30) /* end of frame */ | |
154 | #define D64_CTRL1_SOF BIT(31) /* start of frame */ | |
155 | ||
156 | #define RX_STATUS_OVERFLOW 0x00800000 | |
157 | #define RX_STATUS_LEN 0x0000FFFF | |
158 | ||
159 | #define PDC_TXREGS_OFFSET 0x200 | |
160 | #define PDC_RXREGS_OFFSET 0x220 | |
161 | ||
162 | /* Maximum size buffer the DMA engine can handle */ | |
163 | #define PDC_DMA_BUF_MAX 16384 | |
164 | ||
165 | struct pdc_dma_map { | |
166 | void *ctx; /* opaque context associated with frame */ | |
167 | }; | |
168 | ||
169 | /* dma descriptor */ | |
170 | struct dma64dd { | |
171 | u32 ctrl1; /* misc control bits */ | |
172 | u32 ctrl2; /* buffer count and address extension */ | |
173 | u32 addrlow; /* memory address of the date buffer, bits 31:0 */ | |
174 | u32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | |
175 | }; | |
176 | ||
177 | /* dma registers per channel(xmt or rcv) */ | |
178 | struct dma64_regs { | |
179 | u32 control; /* enable, et al */ | |
180 | u32 ptr; /* last descriptor posted to chip */ | |
181 | u32 addrlow; /* descriptor ring base address low 32-bits */ | |
182 | u32 addrhigh; /* descriptor ring base address bits 63:32 */ | |
183 | u32 status0; /* last rx descriptor written by hw */ | |
184 | u32 status1; /* driver does not use */ | |
185 | }; | |
186 | ||
187 | /* cpp contortions to concatenate w/arg prescan */ | |
188 | #ifndef PAD | |
189 | #define _PADLINE(line) pad ## line | |
190 | #define _XSTR(line) _PADLINE(line) | |
191 | #define PAD _XSTR(__LINE__) | |
192 | #endif /* PAD */ | |
193 | ||
194 | /* dma registers. matches hw layout. */ | |
195 | struct dma64 { | |
196 | struct dma64_regs dmaxmt; /* dma tx */ | |
197 | u32 PAD[2]; | |
198 | struct dma64_regs dmarcv; /* dma rx */ | |
199 | u32 PAD[2]; | |
200 | }; | |
201 | ||
202 | /* PDC registers */ | |
203 | struct pdc_regs { | |
204 | u32 devcontrol; /* 0x000 */ | |
205 | u32 devstatus; /* 0x004 */ | |
206 | u32 PAD; | |
207 | u32 biststatus; /* 0x00c */ | |
208 | u32 PAD[4]; | |
209 | u32 intstatus; /* 0x020 */ | |
210 | u32 intmask; /* 0x024 */ | |
211 | u32 gptimer; /* 0x028 */ | |
212 | ||
213 | u32 PAD; | |
214 | u32 intrcvlazy_0; /* 0x030 */ | |
215 | u32 intrcvlazy_1; /* 0x034 */ | |
216 | u32 intrcvlazy_2; /* 0x038 */ | |
217 | u32 intrcvlazy_3; /* 0x03c */ | |
218 | ||
219 | u32 PAD[48]; | |
220 | u32 removed_intrecvlazy; /* 0x100 */ | |
221 | u32 flowctlthresh; /* 0x104 */ | |
222 | u32 wrrthresh; /* 0x108 */ | |
223 | u32 gmac_idle_cnt_thresh; /* 0x10c */ | |
224 | ||
225 | u32 PAD[4]; | |
226 | u32 ifioaccessaddr; /* 0x120 */ | |
227 | u32 ifioaccessbyte; /* 0x124 */ | |
228 | u32 ifioaccessdata; /* 0x128 */ | |
229 | ||
230 | u32 PAD[21]; | |
231 | u32 phyaccess; /* 0x180 */ | |
232 | u32 PAD; | |
233 | u32 phycontrol; /* 0x188 */ | |
234 | u32 txqctl; /* 0x18c */ | |
235 | u32 rxqctl; /* 0x190 */ | |
236 | u32 gpioselect; /* 0x194 */ | |
237 | u32 gpio_output_en; /* 0x198 */ | |
238 | u32 PAD; /* 0x19c */ | |
239 | u32 txq_rxq_mem_ctl; /* 0x1a0 */ | |
240 | u32 memory_ecc_status; /* 0x1a4 */ | |
241 | u32 serdes_ctl; /* 0x1a8 */ | |
242 | u32 serdes_status0; /* 0x1ac */ | |
243 | u32 serdes_status1; /* 0x1b0 */ | |
244 | u32 PAD[11]; /* 0x1b4-1dc */ | |
245 | u32 clk_ctl_st; /* 0x1e0 */ | |
246 | u32 hw_war; /* 0x1e4 */ | |
247 | u32 pwrctl; /* 0x1e8 */ | |
248 | u32 PAD[5]; | |
249 | ||
250 | #define PDC_NUM_DMA_RINGS 4 | |
251 | struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */ | |
252 | ||
253 | /* more registers follow, but we don't use them */ | |
254 | }; | |
255 | ||
256 | /* structure for allocating/freeing DMA rings */ | |
257 | struct pdc_ring_alloc { | |
258 | dma_addr_t dmabase; /* DMA address of start of ring */ | |
259 | void *vbase; /* base kernel virtual address of ring */ | |
260 | u32 size; /* ring allocation size in bytes */ | |
261 | }; | |
262 | ||
63bb50bd RR |
263 | /* |
264 | * context associated with a receive descriptor. | |
265 | * @rxp_ctx: opaque context associated with frame that starts at each | |
266 | * rx ring index. | |
267 | * @dst_sg: Scatterlist used to form reply frames beginning at a given ring | |
268 | * index. Retained in order to unmap each sg after reply is processed. | |
269 | * @rxin_numd: Number of rx descriptors associated with the message that starts | |
270 | * at a descriptor index. Not set for every index. For example, | |
271 | * if descriptor index i points to a scatterlist with 4 entries, | |
272 | * then the next three descriptor indexes don't have a value set. | |
273 | * @resp_hdr: Virtual address of buffer used to catch DMA rx status | |
274 | * @resp_hdr_daddr: physical address of DMA rx status buffer | |
275 | */ | |
276 | struct pdc_rx_ctx { | |
277 | void *rxp_ctx; | |
278 | struct scatterlist *dst_sg; | |
279 | u32 rxin_numd; | |
280 | void *resp_hdr; | |
281 | dma_addr_t resp_hdr_daddr; | |
282 | }; | |
283 | ||
a24532f8 RR |
284 | /* PDC state structure */ |
285 | struct pdc_state { | |
a24532f8 RR |
286 | /* Index of the PDC whose state is in this structure instance */ |
287 | u8 pdc_idx; | |
288 | ||
289 | /* Platform device for this PDC instance */ | |
290 | struct platform_device *pdev; | |
291 | ||
292 | /* | |
293 | * Each PDC instance has a mailbox controller. PDC receives request | |
294 | * messages through mailboxes, and sends response messages through the | |
295 | * mailbox framework. | |
296 | */ | |
297 | struct mbox_controller mbc; | |
298 | ||
299 | unsigned int pdc_irq; | |
300 | ||
8aef00f0 RR |
301 | /* tasklet for deferred processing after DMA rx interrupt */ |
302 | struct tasklet_struct rx_tasklet; | |
303 | ||
a24532f8 RR |
304 | /* Number of bytes of receive status prior to each rx frame */ |
305 | u32 rx_status_len; | |
306 | /* Whether a BCM header is prepended to each frame */ | |
307 | bool use_bcm_hdr; | |
308 | /* Sum of length of BCM header and rx status header */ | |
309 | u32 pdc_resp_hdr_len; | |
310 | ||
311 | /* The base virtual address of DMA hw registers */ | |
312 | void __iomem *pdc_reg_vbase; | |
313 | ||
314 | /* Pool for allocation of DMA rings */ | |
315 | struct dma_pool *ring_pool; | |
316 | ||
317 | /* Pool for allocation of metadata buffers for response messages */ | |
318 | struct dma_pool *rx_buf_pool; | |
319 | ||
320 | /* | |
321 | * The base virtual address of DMA tx/rx descriptor rings. Corresponding | |
322 | * DMA address and size of ring allocation. | |
323 | */ | |
324 | struct pdc_ring_alloc tx_ring_alloc; | |
325 | struct pdc_ring_alloc rx_ring_alloc; | |
326 | ||
327 | struct pdc_regs *regs; /* start of PDC registers */ | |
328 | ||
329 | struct dma64_regs *txregs_64; /* dma tx engine registers */ | |
330 | struct dma64_regs *rxregs_64; /* dma rx engine registers */ | |
331 | ||
332 | /* | |
333 | * Arrays of PDC_RING_ENTRIES descriptors | |
334 | * To use multiple ringsets, this needs to be extended | |
335 | */ | |
336 | struct dma64dd *txd_64; /* tx descriptor ring */ | |
337 | struct dma64dd *rxd_64; /* rx descriptor ring */ | |
338 | ||
339 | /* descriptor ring sizes */ | |
340 | u32 ntxd; /* # tx descriptors */ | |
341 | u32 nrxd; /* # rx descriptors */ | |
342 | u32 nrxpost; /* # rx buffers to keep posted */ | |
343 | u32 ntxpost; /* max number of tx buffers that can be posted */ | |
344 | ||
345 | /* | |
346 | * Index of next tx descriptor to reclaim. That is, the descriptor | |
347 | * index of the oldest tx buffer for which the host has yet to process | |
348 | * the corresponding response. | |
349 | */ | |
350 | u32 txin; | |
351 | ||
352 | /* | |
353 | * Index of the first receive descriptor for the sequence of | |
354 | * message fragments currently under construction. Used to build up | |
355 | * the rxin_numd count for a message. Updated to rxout when the host | |
356 | * starts a new sequence of rx buffers for a new message. | |
357 | */ | |
358 | u32 tx_msg_start; | |
359 | ||
360 | /* Index of next tx descriptor to post. */ | |
361 | u32 txout; | |
362 | ||
363 | /* | |
364 | * Number of tx descriptors associated with the message that starts | |
365 | * at this tx descriptor index. | |
366 | */ | |
367 | u32 txin_numd[PDC_RING_ENTRIES]; | |
368 | ||
369 | /* | |
370 | * Index of next rx descriptor to reclaim. This is the index of | |
371 | * the next descriptor whose data has yet to be processed by the host. | |
372 | */ | |
373 | u32 rxin; | |
374 | ||
375 | /* | |
376 | * Index of the first receive descriptor for the sequence of | |
377 | * message fragments currently under construction. Used to build up | |
378 | * the rxin_numd count for a message. Updated to rxout when the host | |
379 | * starts a new sequence of rx buffers for a new message. | |
380 | */ | |
381 | u32 rx_msg_start; | |
382 | ||
383 | /* | |
384 | * Saved value of current hardware rx descriptor index. | |
385 | * The last rx buffer written by the hw is the index previous to | |
386 | * this one. | |
387 | */ | |
388 | u32 last_rx_curr; | |
389 | ||
390 | /* Index of next rx descriptor to post. */ | |
391 | u32 rxout; | |
392 | ||
63bb50bd | 393 | struct pdc_rx_ctx rx_ctx[PDC_RING_ENTRIES]; |
a24532f8 RR |
394 | |
395 | /* | |
396 | * Scatterlists used to form request and reply frames beginning at a | |
397 | * given ring index. Retained in order to unmap each sg after reply | |
398 | * is processed | |
399 | */ | |
400 | struct scatterlist *src_sg[PDC_RING_ENTRIES]; | |
a24532f8 RR |
401 | |
402 | struct dentry *debugfs_stats; /* debug FS stats file for this PDC */ | |
403 | ||
404 | /* counters */ | |
ab8d1b2d RR |
405 | u32 pdc_requests; /* number of request messages submitted */ |
406 | u32 pdc_replies; /* number of reply messages received */ | |
407 | u32 last_tx_not_done; /* too few tx descriptors to indicate done */ | |
408 | u32 tx_ring_full; /* unable to accept msg because tx ring full */ | |
409 | u32 rx_ring_full; /* unable to accept msg because rx ring full */ | |
410 | u32 txnobuf; /* unable to create tx descriptor */ | |
411 | u32 rxnobuf; /* unable to create rx descriptor */ | |
412 | u32 rx_oflow; /* count of rx overflows */ | |
a24532f8 RR |
413 | }; |
414 | ||
415 | /* Global variables */ | |
416 | ||
417 | struct pdc_globals { | |
418 | /* Actual number of SPUs in hardware, as reported by device tree */ | |
419 | u32 num_spu; | |
420 | }; | |
421 | ||
422 | static struct pdc_globals pdcg; | |
423 | ||
424 | /* top level debug FS directory for PDC driver */ | |
425 | static struct dentry *debugfs_dir; | |
426 | ||
427 | static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf, | |
428 | size_t count, loff_t *offp) | |
429 | { | |
430 | struct pdc_state *pdcs; | |
431 | char *buf; | |
432 | ssize_t ret, out_offset, out_count; | |
433 | ||
434 | out_count = 512; | |
435 | ||
436 | buf = kmalloc(out_count, GFP_KERNEL); | |
437 | if (!buf) | |
438 | return -ENOMEM; | |
439 | ||
440 | pdcs = filp->private_data; | |
441 | out_offset = 0; | |
442 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
443 | "SPU %u stats:\n", pdcs->pdc_idx); | |
444 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
ab8d1b2d | 445 | "PDC requests....................%u\n", |
a24532f8 RR |
446 | pdcs->pdc_requests); |
447 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
ab8d1b2d | 448 | "PDC responses...................%u\n", |
a24532f8 RR |
449 | pdcs->pdc_replies); |
450 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
ab8d1b2d RR |
451 | "Tx not done.....................%u\n", |
452 | pdcs->last_tx_not_done); | |
453 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
454 | "Tx ring full....................%u\n", | |
455 | pdcs->tx_ring_full); | |
456 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
457 | "Rx ring full....................%u\n", | |
458 | pdcs->rx_ring_full); | |
459 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
460 | "Tx desc write fail. Ring full...%u\n", | |
a24532f8 RR |
461 | pdcs->txnobuf); |
462 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
ab8d1b2d | 463 | "Rx desc write fail. Ring full...%u\n", |
a24532f8 RR |
464 | pdcs->rxnobuf); |
465 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | |
ab8d1b2d | 466 | "Receive overflow................%u\n", |
a24532f8 | 467 | pdcs->rx_oflow); |
ab8d1b2d RR |
468 | out_offset += snprintf(buf + out_offset, out_count - out_offset, |
469 | "Num frags in rx ring............%u\n", | |
470 | NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, | |
cf175813 | 471 | pdcs->nrxpost)); |
a24532f8 RR |
472 | |
473 | if (out_offset > out_count) | |
474 | out_offset = out_count; | |
475 | ||
476 | ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); | |
477 | kfree(buf); | |
478 | return ret; | |
479 | } | |
480 | ||
481 | static const struct file_operations pdc_debugfs_stats = { | |
482 | .owner = THIS_MODULE, | |
483 | .open = simple_open, | |
484 | .read = pdc_debugfs_read, | |
485 | }; | |
486 | ||
487 | /** | |
488 | * pdc_setup_debugfs() - Create the debug FS directories. If the top-level | |
489 | * directory has not yet been created, create it now. Create a stats file in | |
490 | * this directory for a SPU. | |
491 | * @pdcs: PDC state structure | |
492 | */ | |
a75e4a85 | 493 | static void pdc_setup_debugfs(struct pdc_state *pdcs) |
a24532f8 RR |
494 | { |
495 | char spu_stats_name[16]; | |
496 | ||
497 | if (!debugfs_initialized()) | |
498 | return; | |
499 | ||
500 | snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx); | |
501 | if (!debugfs_dir) | |
502 | debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); | |
503 | ||
9b1b2b3a RR |
504 | /* S_IRUSR == 0400 */ |
505 | pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, 0400, | |
a24532f8 RR |
506 | debugfs_dir, pdcs, |
507 | &pdc_debugfs_stats); | |
508 | } | |
509 | ||
a75e4a85 | 510 | static void pdc_free_debugfs(void) |
a24532f8 | 511 | { |
9310f1de SL |
512 | debugfs_remove_recursive(debugfs_dir); |
513 | debugfs_dir = NULL; | |
a24532f8 RR |
514 | } |
515 | ||
516 | /** | |
517 | * pdc_build_rxd() - Build DMA descriptor to receive SPU result. | |
518 | * @pdcs: PDC state for SPU that will generate result | |
519 | * @dma_addr: DMA address of buffer that descriptor is being built for | |
520 | * @buf_len: Length of the receive buffer, in bytes | |
521 | * @flags: Flags to be stored in descriptor | |
522 | */ | |
523 | static inline void | |
524 | pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr, | |
525 | u32 buf_len, u32 flags) | |
526 | { | |
527 | struct device *dev = &pdcs->pdev->dev; | |
38ed49ed | 528 | struct dma64dd *rxd = &pdcs->rxd_64[pdcs->rxout]; |
a24532f8 RR |
529 | |
530 | dev_dbg(dev, | |
531 | "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n", | |
532 | pdcs->pdc_idx, pdcs->rxout, buf_len, flags); | |
533 | ||
38ed49ed RR |
534 | rxd->addrlow = cpu_to_le32(lower_32_bits(dma_addr)); |
535 | rxd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr)); | |
536 | rxd->ctrl1 = cpu_to_le32(flags); | |
537 | rxd->ctrl2 = cpu_to_le32(buf_len); | |
538 | ||
a24532f8 RR |
539 | /* bump ring index and return */ |
540 | pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost); | |
541 | } | |
542 | ||
543 | /** | |
544 | * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to | |
545 | * hardware. | |
546 | * @pdcs: PDC state for the SPU that will process this request | |
547 | * @dma_addr: DMA address of packet to be transmitted | |
548 | * @buf_len: Length of tx buffer, in bytes | |
549 | * @flags: Flags to be stored in descriptor | |
550 | */ | |
551 | static inline void | |
552 | pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len, | |
553 | u32 flags) | |
554 | { | |
555 | struct device *dev = &pdcs->pdev->dev; | |
38ed49ed | 556 | struct dma64dd *txd = &pdcs->txd_64[pdcs->txout]; |
a24532f8 RR |
557 | |
558 | dev_dbg(dev, | |
559 | "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n", | |
560 | pdcs->pdc_idx, pdcs->txout, buf_len, flags); | |
561 | ||
38ed49ed RR |
562 | txd->addrlow = cpu_to_le32(lower_32_bits(dma_addr)); |
563 | txd->addrhigh = cpu_to_le32(upper_32_bits(dma_addr)); | |
564 | txd->ctrl1 = cpu_to_le32(flags); | |
565 | txd->ctrl2 = cpu_to_le32(buf_len); | |
a24532f8 RR |
566 | |
567 | /* bump ring index and return */ | |
568 | pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost); | |
569 | } | |
570 | ||
571 | /** | |
e004c7e7 | 572 | * pdc_receive_one() - Receive a response message from a given SPU. |
a24532f8 | 573 | * @pdcs: PDC state for the SPU to receive from |
a24532f8 RR |
574 | * |
575 | * When the return code indicates success, the response message is available in | |
576 | * the receive buffers provided prior to submission of the request. | |
577 | * | |
a24532f8 RR |
578 | * Return: PDC_SUCCESS if one or more receive descriptors was processed |
579 | * -EAGAIN indicates that no response message is available | |
580 | * -EIO an error occurred | |
581 | */ | |
582 | static int | |
e004c7e7 | 583 | pdc_receive_one(struct pdc_state *pdcs) |
a24532f8 RR |
584 | { |
585 | struct device *dev = &pdcs->pdev->dev; | |
e004c7e7 RR |
586 | struct mbox_controller *mbc; |
587 | struct mbox_chan *chan; | |
588 | struct brcm_message mssg; | |
a24532f8 RR |
589 | u32 len, rx_status; |
590 | u32 num_frags; | |
a24532f8 RR |
591 | u8 *resp_hdr; /* virtual addr of start of resp message DMA header */ |
592 | u32 frags_rdy; /* number of fragments ready to read */ | |
593 | u32 rx_idx; /* ring index of start of receive frame */ | |
594 | dma_addr_t resp_hdr_daddr; | |
63bb50bd | 595 | struct pdc_rx_ctx *rx_ctx; |
a24532f8 | 596 | |
e004c7e7 RR |
597 | mbc = &pdcs->mbc; |
598 | chan = &mbc->chans[0]; | |
599 | mssg.type = BRCM_MESSAGE_SPU; | |
600 | ||
a24532f8 RR |
601 | /* |
602 | * return if a complete response message is not yet ready. | |
603 | * rxin_numd[rxin] is the number of fragments in the next msg | |
604 | * to read. | |
605 | */ | |
606 | frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost); | |
63bb50bd RR |
607 | if ((frags_rdy == 0) || |
608 | (frags_rdy < pdcs->rx_ctx[pdcs->rxin].rxin_numd)) | |
e004c7e7 RR |
609 | /* No response ready */ |
610 | return -EAGAIN; | |
a24532f8 RR |
611 | |
612 | num_frags = pdcs->txin_numd[pdcs->txin]; | |
e004c7e7 RR |
613 | WARN_ON(num_frags == 0); |
614 | ||
a24532f8 RR |
615 | dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin], |
616 | sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE); | |
617 | ||
63bb50bd | 618 | pdcs->txin = (pdcs->txin + num_frags) & pdcs->ntxpost; |
a24532f8 RR |
619 | |
620 | dev_dbg(dev, "PDC %u reclaimed %d tx descriptors", | |
621 | pdcs->pdc_idx, num_frags); | |
622 | ||
623 | rx_idx = pdcs->rxin; | |
63bb50bd RR |
624 | rx_ctx = &pdcs->rx_ctx[rx_idx]; |
625 | num_frags = rx_ctx->rxin_numd; | |
a24532f8 | 626 | /* Return opaque context with result */ |
63bb50bd RR |
627 | mssg.ctx = rx_ctx->rxp_ctx; |
628 | rx_ctx->rxp_ctx = NULL; | |
629 | resp_hdr = rx_ctx->resp_hdr; | |
630 | resp_hdr_daddr = rx_ctx->resp_hdr_daddr; | |
631 | dma_unmap_sg(dev, rx_ctx->dst_sg, sg_nents(rx_ctx->dst_sg), | |
632 | DMA_FROM_DEVICE); | |
a24532f8 | 633 | |
63bb50bd | 634 | pdcs->rxin = (pdcs->rxin + num_frags) & pdcs->nrxpost; |
a24532f8 | 635 | |
a24532f8 RR |
636 | dev_dbg(dev, "PDC %u reclaimed %d rx descriptors", |
637 | pdcs->pdc_idx, num_frags); | |
638 | ||
639 | dev_dbg(dev, | |
640 | "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n", | |
641 | pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin, | |
642 | pdcs->rxout, pdcs->last_rx_curr); | |
643 | ||
644 | if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) { | |
645 | /* | |
646 | * For SPU-M, get length of response msg and rx overflow status. | |
647 | */ | |
648 | rx_status = *((u32 *)resp_hdr); | |
649 | len = rx_status & RX_STATUS_LEN; | |
650 | dev_dbg(dev, | |
651 | "SPU response length %u bytes", len); | |
652 | if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) { | |
653 | if (rx_status & RX_STATUS_OVERFLOW) { | |
654 | dev_err_ratelimited(dev, | |
655 | "crypto receive overflow"); | |
656 | pdcs->rx_oflow++; | |
657 | } else { | |
658 | dev_info_ratelimited(dev, "crypto rx len = 0"); | |
659 | } | |
660 | return -EIO; | |
661 | } | |
662 | } | |
663 | ||
664 | dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr); | |
665 | ||
e004c7e7 RR |
666 | mbox_chan_received_data(chan, &mssg); |
667 | ||
a24532f8 | 668 | pdcs->pdc_replies++; |
e004c7e7 RR |
669 | return PDC_SUCCESS; |
670 | } | |
671 | ||
672 | /** | |
673 | * pdc_receive() - Process as many responses as are available in the rx ring. | |
674 | * @pdcs: PDC state | |
675 | * | |
676 | * Called within the hard IRQ. | |
677 | * Return: | |
678 | */ | |
679 | static int | |
680 | pdc_receive(struct pdc_state *pdcs) | |
681 | { | |
682 | int rx_status; | |
683 | ||
684 | /* read last_rx_curr from register once */ | |
685 | pdcs->last_rx_curr = | |
cf175813 | 686 | (ioread32(&pdcs->rxregs_64->status0) & |
e004c7e7 RR |
687 | CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; |
688 | ||
689 | do { | |
690 | /* Could be many frames ready */ | |
691 | rx_status = pdc_receive_one(pdcs); | |
692 | } while (rx_status == PDC_SUCCESS); | |
693 | ||
694 | return 0; | |
a24532f8 RR |
695 | } |
696 | ||
697 | /** | |
698 | * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit | |
699 | * descriptors for a given SPU. The scatterlist buffers contain the data for a | |
700 | * SPU request message. | |
701 | * @spu_idx: The index of the SPU to submit the request to, [0, max_spu) | |
702 | * @sg: Scatterlist whose buffers contain part of the SPU request | |
703 | * | |
704 | * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors | |
705 | * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length. | |
706 | * | |
707 | * Return: PDC_SUCCESS if successful | |
708 | * < 0 otherwise | |
709 | */ | |
710 | static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) | |
711 | { | |
712 | u32 flags = 0; | |
713 | u32 eot; | |
714 | u32 tx_avail; | |
715 | ||
716 | /* | |
717 | * Num descriptors needed. Conservatively assume we need a descriptor | |
718 | * for every entry in sg. | |
719 | */ | |
720 | u32 num_desc; | |
721 | u32 desc_w = 0; /* Number of tx descriptors written */ | |
722 | u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ | |
723 | dma_addr_t databufptr; /* DMA address to put in descriptor */ | |
724 | ||
725 | num_desc = (u32)sg_nents(sg); | |
726 | ||
727 | /* check whether enough tx descriptors are available */ | |
728 | tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout, | |
729 | pdcs->ntxpost); | |
730 | if (unlikely(num_desc > tx_avail)) { | |
731 | pdcs->txnobuf++; | |
732 | return -ENOSPC; | |
733 | } | |
734 | ||
735 | /* build tx descriptors */ | |
736 | if (pdcs->tx_msg_start == pdcs->txout) { | |
737 | /* Start of frame */ | |
738 | pdcs->txin_numd[pdcs->tx_msg_start] = 0; | |
739 | pdcs->src_sg[pdcs->txout] = sg; | |
740 | flags = D64_CTRL1_SOF; | |
741 | } | |
742 | ||
743 | while (sg) { | |
744 | if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) | |
745 | eot = D64_CTRL1_EOT; | |
746 | else | |
747 | eot = 0; | |
748 | ||
749 | /* | |
750 | * If sg buffer larger than PDC limit, split across | |
751 | * multiple descriptors | |
752 | */ | |
753 | bufcnt = sg_dma_len(sg); | |
754 | databufptr = sg_dma_address(sg); | |
755 | while (bufcnt > PDC_DMA_BUF_MAX) { | |
756 | pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX, | |
757 | flags | eot); | |
758 | desc_w++; | |
759 | bufcnt -= PDC_DMA_BUF_MAX; | |
760 | databufptr += PDC_DMA_BUF_MAX; | |
761 | if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) | |
762 | eot = D64_CTRL1_EOT; | |
763 | else | |
764 | eot = 0; | |
765 | } | |
766 | sg = sg_next(sg); | |
767 | if (!sg) | |
768 | /* Writing last descriptor for frame */ | |
769 | flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC); | |
770 | pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot); | |
771 | desc_w++; | |
772 | /* Clear start of frame after first descriptor */ | |
773 | flags &= ~D64_CTRL1_SOF; | |
774 | } | |
775 | pdcs->txin_numd[pdcs->tx_msg_start] += desc_w; | |
776 | ||
777 | return PDC_SUCCESS; | |
778 | } | |
779 | ||
780 | /** | |
781 | * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx | |
782 | * ring. | |
783 | * @pdcs: PDC state for SPU to process the request | |
784 | * | |
785 | * Sets the index of the last descriptor written in both the rx and tx ring. | |
786 | * | |
787 | * Return: PDC_SUCCESS | |
788 | */ | |
789 | static int pdc_tx_list_final(struct pdc_state *pdcs) | |
790 | { | |
791 | /* | |
792 | * write barrier to ensure all register writes are complete | |
793 | * before chip starts to process new request | |
794 | */ | |
795 | wmb(); | |
cf175813 RR |
796 | iowrite32(pdcs->rxout << 4, &pdcs->rxregs_64->ptr); |
797 | iowrite32(pdcs->txout << 4, &pdcs->txregs_64->ptr); | |
a24532f8 RR |
798 | pdcs->pdc_requests++; |
799 | ||
800 | return PDC_SUCCESS; | |
801 | } | |
802 | ||
803 | /** | |
804 | * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC. | |
805 | * @pdcs: PDC state for SPU handling request | |
806 | * @dst_sg: scatterlist providing rx buffers for response to be returned to | |
807 | * mailbox client | |
808 | * @ctx: Opaque context for this request | |
809 | * | |
810 | * Posts a single receive descriptor to hold the metadata that precedes a | |
811 | * response. For example, with SPU-M, the metadata is a 32-byte DMA header and | |
812 | * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and | |
813 | * rx to indicate the start of a new message. | |
814 | * | |
815 | * Return: PDC_SUCCESS if successful | |
816 | * < 0 if an error (e.g., rx ring is full) | |
817 | */ | |
818 | static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg, | |
819 | void *ctx) | |
820 | { | |
821 | u32 flags = 0; | |
822 | u32 rx_avail; | |
823 | u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */ | |
824 | dma_addr_t daddr; | |
825 | void *vaddr; | |
63bb50bd | 826 | struct pdc_rx_ctx *rx_ctx; |
a24532f8 RR |
827 | |
828 | rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, | |
829 | pdcs->nrxpost); | |
830 | if (unlikely(rx_pkt_cnt > rx_avail)) { | |
831 | pdcs->rxnobuf++; | |
832 | return -ENOSPC; | |
833 | } | |
834 | ||
835 | /* allocate a buffer for the dma rx status */ | |
836 | vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr); | |
7493cde3 | 837 | if (unlikely(!vaddr)) |
a24532f8 RR |
838 | return -ENOMEM; |
839 | ||
840 | /* | |
841 | * Update msg_start indexes for both tx and rx to indicate the start | |
842 | * of a new sequence of descriptor indexes that contain the fragments | |
843 | * of the same message. | |
844 | */ | |
845 | pdcs->rx_msg_start = pdcs->rxout; | |
846 | pdcs->tx_msg_start = pdcs->txout; | |
847 | ||
848 | /* This is always the first descriptor in the receive sequence */ | |
849 | flags = D64_CTRL1_SOF; | |
63bb50bd | 850 | pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd = 1; |
a24532f8 RR |
851 | |
852 | if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) | |
853 | flags |= D64_CTRL1_EOT; | |
854 | ||
63bb50bd RR |
855 | rx_ctx = &pdcs->rx_ctx[pdcs->rxout]; |
856 | rx_ctx->rxp_ctx = ctx; | |
857 | rx_ctx->dst_sg = dst_sg; | |
858 | rx_ctx->resp_hdr = vaddr; | |
859 | rx_ctx->resp_hdr_daddr = daddr; | |
a24532f8 RR |
860 | pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags); |
861 | return PDC_SUCCESS; | |
862 | } | |
863 | ||
864 | /** | |
865 | * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive | |
866 | * descriptors for a given SPU. The caller must have already DMA mapped the | |
867 | * scatterlist. | |
868 | * @spu_idx: Indicates which SPU the buffers are for | |
869 | * @sg: Scatterlist whose buffers are added to the receive ring | |
870 | * | |
871 | * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX, | |
872 | * multiple receive descriptors are written, each with a buffer <= | |
873 | * PDC_DMA_BUF_MAX. | |
874 | * | |
875 | * Return: PDC_SUCCESS if successful | |
876 | * < 0 otherwise (e.g., receive ring is full) | |
877 | */ | |
878 | static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) | |
879 | { | |
880 | u32 flags = 0; | |
881 | u32 rx_avail; | |
882 | ||
883 | /* | |
884 | * Num descriptors needed. Conservatively assume we need a descriptor | |
885 | * for every entry from our starting point in the scatterlist. | |
886 | */ | |
887 | u32 num_desc; | |
888 | u32 desc_w = 0; /* Number of tx descriptors written */ | |
889 | u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ | |
890 | dma_addr_t databufptr; /* DMA address to put in descriptor */ | |
891 | ||
892 | num_desc = (u32)sg_nents(sg); | |
893 | ||
894 | rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, | |
895 | pdcs->nrxpost); | |
896 | if (unlikely(num_desc > rx_avail)) { | |
897 | pdcs->rxnobuf++; | |
898 | return -ENOSPC; | |
899 | } | |
900 | ||
901 | while (sg) { | |
902 | if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) | |
903 | flags = D64_CTRL1_EOT; | |
904 | else | |
905 | flags = 0; | |
906 | ||
907 | /* | |
908 | * If sg buffer larger than PDC limit, split across | |
909 | * multiple descriptors | |
910 | */ | |
911 | bufcnt = sg_dma_len(sg); | |
912 | databufptr = sg_dma_address(sg); | |
913 | while (bufcnt > PDC_DMA_BUF_MAX) { | |
914 | pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags); | |
915 | desc_w++; | |
916 | bufcnt -= PDC_DMA_BUF_MAX; | |
917 | databufptr += PDC_DMA_BUF_MAX; | |
918 | if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) | |
919 | flags = D64_CTRL1_EOT; | |
920 | else | |
921 | flags = 0; | |
922 | } | |
923 | pdc_build_rxd(pdcs, databufptr, bufcnt, flags); | |
924 | desc_w++; | |
925 | sg = sg_next(sg); | |
926 | } | |
63bb50bd | 927 | pdcs->rx_ctx[pdcs->rx_msg_start].rxin_numd += desc_w; |
a24532f8 RR |
928 | |
929 | return PDC_SUCCESS; | |
930 | } | |
931 | ||
932 | /** | |
933 | * pdc_irq_handler() - Interrupt handler called in interrupt context. | |
934 | * @irq: Interrupt number that has fired | |
8aef00f0 | 935 | * @data: device struct for DMA engine that generated the interrupt |
a24532f8 RR |
936 | * |
937 | * We have to clear the device interrupt status flags here. So cache the | |
938 | * status for later use in the thread function. Other than that, just return | |
939 | * WAKE_THREAD to invoke the thread function. | |
940 | * | |
941 | * Return: IRQ_WAKE_THREAD if interrupt is ours | |
942 | * IRQ_NONE otherwise | |
943 | */ | |
8aef00f0 | 944 | static irqreturn_t pdc_irq_handler(int irq, void *data) |
a24532f8 | 945 | { |
8aef00f0 RR |
946 | struct device *dev = (struct device *)data; |
947 | struct pdc_state *pdcs = dev_get_drvdata(dev); | |
a24532f8 RR |
948 | u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); |
949 | ||
30d1ef62 RR |
950 | if (unlikely(intstatus == 0)) |
951 | return IRQ_NONE; | |
a24532f8 | 952 | |
63bb50bd RR |
953 | /* Disable interrupts until soft handler runs */ |
954 | iowrite32(0, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); | |
955 | ||
30d1ef62 RR |
956 | /* Clear interrupt flags in device */ |
957 | iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); | |
958 | ||
a24532f8 | 959 | /* Wakeup IRQ thread */ |
30d1ef62 RR |
960 | tasklet_schedule(&pdcs->rx_tasklet); |
961 | return IRQ_HANDLED; | |
a24532f8 RR |
962 | } |
963 | ||
30d1ef62 RR |
964 | /** |
965 | * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after | |
966 | * a DMA receive interrupt. Reenables the receive interrupt. | |
967 | * @data: PDC state structure | |
968 | */ | |
8aef00f0 | 969 | static void pdc_tasklet_cb(unsigned long data) |
a24532f8 | 970 | { |
8aef00f0 | 971 | struct pdc_state *pdcs = (struct pdc_state *)data; |
a24532f8 | 972 | |
30d1ef62 | 973 | pdc_receive(pdcs); |
63bb50bd RR |
974 | |
975 | /* reenable interrupts */ | |
976 | iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); | |
a24532f8 RR |
977 | } |
978 | ||
979 | /** | |
980 | * pdc_ring_init() - Allocate DMA rings and initialize constant fields of | |
981 | * descriptors in one ringset. | |
982 | * @pdcs: PDC instance state | |
983 | * @ringset: index of ringset being used | |
984 | * | |
985 | * Return: PDC_SUCCESS if ring initialized | |
986 | * < 0 otherwise | |
987 | */ | |
988 | static int pdc_ring_init(struct pdc_state *pdcs, int ringset) | |
989 | { | |
990 | int i; | |
991 | int err = PDC_SUCCESS; | |
992 | struct dma64 *dma_reg; | |
993 | struct device *dev = &pdcs->pdev->dev; | |
994 | struct pdc_ring_alloc tx; | |
995 | struct pdc_ring_alloc rx; | |
996 | ||
997 | /* Allocate tx ring */ | |
998 | tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase); | |
7493cde3 | 999 | if (unlikely(!tx.vbase)) { |
a24532f8 RR |
1000 | err = -ENOMEM; |
1001 | goto done; | |
1002 | } | |
1003 | ||
1004 | /* Allocate rx ring */ | |
1005 | rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase); | |
7493cde3 | 1006 | if (unlikely(!rx.vbase)) { |
a24532f8 RR |
1007 | err = -ENOMEM; |
1008 | goto fail_dealloc; | |
1009 | } | |
1010 | ||
a68b2166 | 1011 | dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase); |
a24532f8 | 1012 | dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase); |
a68b2166 | 1013 | dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase); |
a24532f8 RR |
1014 | dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase); |
1015 | ||
a24532f8 RR |
1016 | memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx)); |
1017 | memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx)); | |
1018 | ||
1019 | pdcs->rxin = 0; | |
1020 | pdcs->rx_msg_start = 0; | |
1021 | pdcs->last_rx_curr = 0; | |
1022 | pdcs->rxout = 0; | |
1023 | pdcs->txin = 0; | |
1024 | pdcs->tx_msg_start = 0; | |
1025 | pdcs->txout = 0; | |
1026 | ||
1027 | /* Set descriptor array base addresses */ | |
1028 | pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase; | |
1029 | pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase; | |
1030 | ||
1031 | /* Tell device the base DMA address of each ring */ | |
1032 | dma_reg = &pdcs->regs->dmaregs[ringset]; | |
9fb0f9ac SL |
1033 | |
1034 | /* But first disable DMA and set curptr to 0 for both TX & RX */ | |
1035 | iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control); | |
1036 | iowrite32((PDC_RX_CTL + (pdcs->rx_status_len << 1)), | |
cf175813 RR |
1037 | &dma_reg->dmarcv.control); |
1038 | iowrite32(0, &dma_reg->dmaxmt.ptr); | |
1039 | iowrite32(0, &dma_reg->dmarcv.ptr); | |
9fb0f9ac SL |
1040 | |
1041 | /* Set base DMA addresses */ | |
a24532f8 | 1042 | iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase), |
cf175813 | 1043 | &dma_reg->dmaxmt.addrlow); |
a24532f8 | 1044 | iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase), |
cf175813 | 1045 | &dma_reg->dmaxmt.addrhigh); |
a24532f8 RR |
1046 | |
1047 | iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase), | |
cf175813 | 1048 | &dma_reg->dmarcv.addrlow); |
a24532f8 | 1049 | iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase), |
cf175813 | 1050 | &dma_reg->dmarcv.addrhigh); |
a24532f8 | 1051 | |
9fb0f9ac SL |
1052 | /* Re-enable DMA */ |
1053 | iowrite32(PDC_TX_CTL | PDC_TX_ENABLE, &dma_reg->dmaxmt.control); | |
1054 | iowrite32((PDC_RX_CTL | PDC_RX_ENABLE | (pdcs->rx_status_len << 1)), | |
cf175813 | 1055 | &dma_reg->dmarcv.control); |
9fb0f9ac | 1056 | |
a24532f8 RR |
1057 | /* Initialize descriptors */ |
1058 | for (i = 0; i < PDC_RING_ENTRIES; i++) { | |
1059 | /* Every tx descriptor can be used for start of frame. */ | |
1060 | if (i != pdcs->ntxpost) { | |
1061 | iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF, | |
cf175813 | 1062 | &pdcs->txd_64[i].ctrl1); |
a24532f8 RR |
1063 | } else { |
1064 | /* Last descriptor in ringset. Set End of Table. */ | |
1065 | iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF | | |
cf175813 | 1066 | D64_CTRL1_EOT, &pdcs->txd_64[i].ctrl1); |
a24532f8 RR |
1067 | } |
1068 | ||
1069 | /* Every rx descriptor can be used for start of frame */ | |
1070 | if (i != pdcs->nrxpost) { | |
1071 | iowrite32(D64_CTRL1_SOF, | |
cf175813 | 1072 | &pdcs->rxd_64[i].ctrl1); |
a24532f8 RR |
1073 | } else { |
1074 | /* Last descriptor in ringset. Set End of Table. */ | |
1075 | iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT, | |
cf175813 | 1076 | &pdcs->rxd_64[i].ctrl1); |
a24532f8 RR |
1077 | } |
1078 | } | |
a24532f8 RR |
1079 | return PDC_SUCCESS; |
1080 | ||
1081 | fail_dealloc: | |
1082 | dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase); | |
1083 | done: | |
1084 | return err; | |
1085 | } | |
1086 | ||
1087 | static void pdc_ring_free(struct pdc_state *pdcs) | |
1088 | { | |
1089 | if (pdcs->tx_ring_alloc.vbase) { | |
1090 | dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase, | |
1091 | pdcs->tx_ring_alloc.dmabase); | |
1092 | pdcs->tx_ring_alloc.vbase = NULL; | |
1093 | } | |
1094 | ||
1095 | if (pdcs->rx_ring_alloc.vbase) { | |
1096 | dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase, | |
1097 | pdcs->rx_ring_alloc.dmabase); | |
1098 | pdcs->rx_ring_alloc.vbase = NULL; | |
1099 | } | |
1100 | } | |
1101 | ||
ab8d1b2d RR |
1102 | /** |
1103 | * pdc_desc_count() - Count the number of DMA descriptors that will be required | |
1104 | * for a given scatterlist. Account for the max length of a DMA buffer. | |
1105 | * @sg: Scatterlist to be DMA'd | |
1106 | * Return: Number of descriptors required | |
1107 | */ | |
1108 | static u32 pdc_desc_count(struct scatterlist *sg) | |
1109 | { | |
1110 | u32 cnt = 0; | |
1111 | ||
1112 | while (sg) { | |
1113 | cnt += ((sg->length / PDC_DMA_BUF_MAX) + 1); | |
1114 | sg = sg_next(sg); | |
1115 | } | |
1116 | return cnt; | |
1117 | } | |
1118 | ||
1119 | /** | |
1120 | * pdc_rings_full() - Check whether the tx ring has room for tx_cnt descriptors | |
1121 | * and the rx ring has room for rx_cnt descriptors. | |
1122 | * @pdcs: PDC state | |
1123 | * @tx_cnt: The number of descriptors required in the tx ring | |
1124 | * @rx_cnt: The number of descriptors required i the rx ring | |
1125 | * | |
1126 | * Return: true if one of the rings does not have enough space | |
1127 | * false if sufficient space is available in both rings | |
1128 | */ | |
1129 | static bool pdc_rings_full(struct pdc_state *pdcs, int tx_cnt, int rx_cnt) | |
1130 | { | |
1131 | u32 rx_avail; | |
1132 | u32 tx_avail; | |
1133 | bool full = false; | |
1134 | ||
1135 | /* Check if the tx and rx rings are likely to have enough space */ | |
1136 | rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, | |
1137 | pdcs->nrxpost); | |
1138 | if (unlikely(rx_cnt > rx_avail)) { | |
1139 | pdcs->rx_ring_full++; | |
1140 | full = true; | |
1141 | } | |
1142 | ||
1143 | if (likely(!full)) { | |
1144 | tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout, | |
1145 | pdcs->ntxpost); | |
1146 | if (unlikely(tx_cnt > tx_avail)) { | |
1147 | pdcs->tx_ring_full++; | |
1148 | full = true; | |
1149 | } | |
1150 | } | |
1151 | return full; | |
1152 | } | |
1153 | ||
1154 | /** | |
1155 | * pdc_last_tx_done() - If both the tx and rx rings have at least | |
1156 | * PDC_RING_SPACE_MIN descriptors available, then indicate that the mailbox | |
1157 | * framework can submit another message. | |
1158 | * @chan: mailbox channel to check | |
1159 | * Return: true if PDC can accept another message on this channel | |
1160 | */ | |
1161 | static bool pdc_last_tx_done(struct mbox_chan *chan) | |
1162 | { | |
1163 | struct pdc_state *pdcs = chan->con_priv; | |
1164 | bool ret; | |
1165 | ||
1166 | if (unlikely(pdc_rings_full(pdcs, PDC_RING_SPACE_MIN, | |
1167 | PDC_RING_SPACE_MIN))) { | |
1168 | pdcs->last_tx_not_done++; | |
1169 | ret = false; | |
1170 | } else { | |
1171 | ret = true; | |
1172 | } | |
1173 | return ret; | |
1174 | } | |
1175 | ||
a24532f8 RR |
1176 | /** |
1177 | * pdc_send_data() - mailbox send_data function | |
1178 | * @chan: The mailbox channel on which the data is sent. The channel | |
1179 | * corresponds to a DMA ringset. | |
1180 | * @data: The mailbox message to be sent. The message must be a | |
1181 | * brcm_message structure. | |
1182 | * | |
1183 | * This function is registered as the send_data function for the mailbox | |
1184 | * controller. From the destination scatterlist in the mailbox message, it | |
1185 | * creates a sequence of receive descriptors in the rx ring. From the source | |
1186 | * scatterlist, it creates a sequence of transmit descriptors in the tx ring. | |
1187 | * After creating the descriptors, it writes the rx ptr and tx ptr registers to | |
1188 | * initiate the DMA transfer. | |
1189 | * | |
1190 | * This function does the DMA map and unmap of the src and dst scatterlists in | |
1191 | * the mailbox message. | |
1192 | * | |
1193 | * Return: 0 if successful | |
1194 | * -ENOTSUPP if the mailbox message is a type this driver does not | |
1195 | * support | |
1196 | * < 0 if an error | |
1197 | */ | |
1198 | static int pdc_send_data(struct mbox_chan *chan, void *data) | |
1199 | { | |
1200 | struct pdc_state *pdcs = chan->con_priv; | |
1201 | struct device *dev = &pdcs->pdev->dev; | |
1202 | struct brcm_message *mssg = data; | |
1203 | int err = PDC_SUCCESS; | |
1204 | int src_nent; | |
1205 | int dst_nent; | |
1206 | int nent; | |
ab8d1b2d RR |
1207 | u32 tx_desc_req; |
1208 | u32 rx_desc_req; | |
a24532f8 | 1209 | |
7493cde3 | 1210 | if (unlikely(mssg->type != BRCM_MESSAGE_SPU)) |
a24532f8 RR |
1211 | return -ENOTSUPP; |
1212 | ||
1213 | src_nent = sg_nents(mssg->spu.src); | |
7493cde3 | 1214 | if (likely(src_nent)) { |
a24532f8 | 1215 | nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); |
7493cde3 | 1216 | if (unlikely(nent == 0)) |
a24532f8 RR |
1217 | return -EIO; |
1218 | } | |
1219 | ||
1220 | dst_nent = sg_nents(mssg->spu.dst); | |
7493cde3 | 1221 | if (likely(dst_nent)) { |
a24532f8 RR |
1222 | nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, |
1223 | DMA_FROM_DEVICE); | |
7493cde3 | 1224 | if (unlikely(nent == 0)) { |
a24532f8 RR |
1225 | dma_unmap_sg(dev, mssg->spu.src, src_nent, |
1226 | DMA_TO_DEVICE); | |
1227 | return -EIO; | |
1228 | } | |
1229 | } | |
1230 | ||
ab8d1b2d RR |
1231 | /* |
1232 | * Check if the tx and rx rings have enough space. Do this prior to | |
1233 | * writing any tx or rx descriptors. Need to ensure that we do not write | |
1234 | * a partial set of descriptors, or write just rx descriptors but | |
1235 | * corresponding tx descriptors don't fit. Note that we want this check | |
1236 | * and the entire sequence of descriptor to happen without another | |
1237 | * thread getting in. The channel spin lock in the mailbox framework | |
1238 | * ensures this. | |
1239 | */ | |
1240 | tx_desc_req = pdc_desc_count(mssg->spu.src); | |
1241 | rx_desc_req = pdc_desc_count(mssg->spu.dst); | |
7493cde3 | 1242 | if (unlikely(pdc_rings_full(pdcs, tx_desc_req, rx_desc_req + 1))) |
ab8d1b2d | 1243 | return -ENOSPC; |
a24532f8 RR |
1244 | |
1245 | /* Create rx descriptors to SPU catch response */ | |
1246 | err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx); | |
1247 | err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst); | |
1248 | ||
1249 | /* Create tx descriptors to submit SPU request */ | |
1250 | err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src); | |
1251 | err |= pdc_tx_list_final(pdcs); /* initiate transfer */ | |
1252 | ||
7493cde3 | 1253 | if (unlikely(err)) |
a24532f8 RR |
1254 | dev_err(&pdcs->pdev->dev, |
1255 | "%s failed with error %d", __func__, err); | |
1256 | ||
1257 | return err; | |
1258 | } | |
1259 | ||
1260 | static int pdc_startup(struct mbox_chan *chan) | |
1261 | { | |
1262 | return pdc_ring_init(chan->con_priv, PDC_RINGSET); | |
1263 | } | |
1264 | ||
1265 | static void pdc_shutdown(struct mbox_chan *chan) | |
1266 | { | |
1267 | struct pdc_state *pdcs = chan->con_priv; | |
1268 | ||
068cf29e DC |
1269 | if (!pdcs) |
1270 | return; | |
a24532f8 | 1271 | |
068cf29e DC |
1272 | dev_dbg(&pdcs->pdev->dev, |
1273 | "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx); | |
a24532f8 RR |
1274 | pdc_ring_free(pdcs); |
1275 | } | |
1276 | ||
1277 | /** | |
1278 | * pdc_hw_init() - Use the given initialization parameters to initialize the | |
1279 | * state for one of the PDCs. | |
1280 | * @pdcs: state of the PDC | |
1281 | */ | |
1282 | static | |
1283 | void pdc_hw_init(struct pdc_state *pdcs) | |
1284 | { | |
1285 | struct platform_device *pdev; | |
1286 | struct device *dev; | |
1287 | struct dma64 *dma_reg; | |
1288 | int ringset = PDC_RINGSET; | |
1289 | ||
1290 | pdev = pdcs->pdev; | |
1291 | dev = &pdev->dev; | |
1292 | ||
1293 | dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx); | |
1294 | dev_dbg(dev, "state structure: %p", | |
1295 | pdcs); | |
1296 | dev_dbg(dev, " - base virtual addr of hw regs %p", | |
1297 | pdcs->pdc_reg_vbase); | |
1298 | ||
1299 | /* initialize data structures */ | |
1300 | pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase; | |
1301 | pdcs->txregs_64 = (struct dma64_regs *) | |
cf175813 | 1302 | (((u8 *)pdcs->pdc_reg_vbase) + |
a24532f8 RR |
1303 | PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset)); |
1304 | pdcs->rxregs_64 = (struct dma64_regs *) | |
cf175813 | 1305 | (((u8 *)pdcs->pdc_reg_vbase) + |
a24532f8 RR |
1306 | PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset)); |
1307 | ||
1308 | pdcs->ntxd = PDC_RING_ENTRIES; | |
1309 | pdcs->nrxd = PDC_RING_ENTRIES; | |
1310 | pdcs->ntxpost = PDC_RING_ENTRIES - 1; | |
1311 | pdcs->nrxpost = PDC_RING_ENTRIES - 1; | |
9fb0f9ac | 1312 | iowrite32(0, &pdcs->regs->intmask); |
a24532f8 RR |
1313 | |
1314 | dma_reg = &pdcs->regs->dmaregs[ringset]; | |
a24532f8 | 1315 | |
9fb0f9ac SL |
1316 | /* Configure DMA but will enable later in pdc_ring_init() */ |
1317 | iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control); | |
a24532f8 RR |
1318 | |
1319 | iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1), | |
cf175813 | 1320 | &dma_reg->dmarcv.control); |
a24532f8 | 1321 | |
9fb0f9ac SL |
1322 | /* Reset current index pointers after making sure DMA is disabled */ |
1323 | iowrite32(0, &dma_reg->dmaxmt.ptr); | |
1324 | iowrite32(0, &dma_reg->dmarcv.ptr); | |
1325 | ||
a24532f8 RR |
1326 | if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN) |
1327 | iowrite32(PDC_CKSUM_CTRL, | |
1328 | pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET); | |
1329 | } | |
1330 | ||
9fb0f9ac SL |
1331 | /** |
1332 | * pdc_hw_disable() - Disable the tx and rx control in the hw. | |
1333 | * @pdcs: PDC state structure | |
1334 | * | |
1335 | */ | |
1336 | static void pdc_hw_disable(struct pdc_state *pdcs) | |
1337 | { | |
1338 | struct dma64 *dma_reg; | |
1339 | ||
1340 | dma_reg = &pdcs->regs->dmaregs[PDC_RINGSET]; | |
1341 | iowrite32(PDC_TX_CTL, &dma_reg->dmaxmt.control); | |
1342 | iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1), | |
1343 | &dma_reg->dmarcv.control); | |
1344 | } | |
1345 | ||
a24532f8 RR |
1346 | /** |
1347 | * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata | |
1348 | * header returned with each response message. | |
1349 | * @pdcs: PDC state structure | |
1350 | * | |
1351 | * The metadata is not returned to the mailbox client. So the PDC driver | |
1352 | * manages these buffers. | |
1353 | * | |
1354 | * Return: PDC_SUCCESS | |
1355 | * -ENOMEM if pool creation fails | |
1356 | */ | |
1357 | static int pdc_rx_buf_pool_create(struct pdc_state *pdcs) | |
1358 | { | |
1359 | struct platform_device *pdev; | |
1360 | struct device *dev; | |
1361 | ||
1362 | pdev = pdcs->pdev; | |
1363 | dev = &pdev->dev; | |
1364 | ||
1365 | pdcs->pdc_resp_hdr_len = pdcs->rx_status_len; | |
1366 | if (pdcs->use_bcm_hdr) | |
1367 | pdcs->pdc_resp_hdr_len += BCM_HDR_LEN; | |
1368 | ||
1369 | pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev, | |
1370 | pdcs->pdc_resp_hdr_len, | |
1371 | RX_BUF_ALIGN, 0); | |
1372 | if (!pdcs->rx_buf_pool) | |
1373 | return -ENOMEM; | |
1374 | ||
1375 | return PDC_SUCCESS; | |
1376 | } | |
1377 | ||
1378 | /** | |
1379 | * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and | |
1380 | * specify a threaded IRQ handler for deferred handling of interrupts outside of | |
1381 | * interrupt context. | |
1382 | * @pdcs: PDC state | |
1383 | * | |
1384 | * Set the interrupt mask for transmit and receive done. | |
1385 | * Set the lazy interrupt frame count to generate an interrupt for just one pkt. | |
1386 | * | |
1387 | * Return: PDC_SUCCESS | |
1388 | * <0 if threaded irq request fails | |
1389 | */ | |
1390 | static int pdc_interrupts_init(struct pdc_state *pdcs) | |
1391 | { | |
1392 | struct platform_device *pdev = pdcs->pdev; | |
1393 | struct device *dev = &pdev->dev; | |
1394 | struct device_node *dn = pdev->dev.of_node; | |
1395 | int err; | |
1396 | ||
a24532f8 RR |
1397 | /* interrupt configuration */ |
1398 | iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); | |
1399 | iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET); | |
1400 | ||
1401 | /* read irq from device tree */ | |
1402 | pdcs->pdc_irq = irq_of_parse_and_map(dn, 0); | |
1403 | dev_dbg(dev, "pdc device %s irq %u for pdcs %p", | |
1404 | dev_name(dev), pdcs->pdc_irq, pdcs); | |
8aef00f0 RR |
1405 | |
1406 | err = devm_request_irq(dev, pdcs->pdc_irq, pdc_irq_handler, 0, | |
1407 | dev_name(dev), dev); | |
a24532f8 | 1408 | if (err) { |
8aef00f0 | 1409 | dev_err(dev, "IRQ %u request failed with err %d\n", |
a24532f8 RR |
1410 | pdcs->pdc_irq, err); |
1411 | return err; | |
1412 | } | |
1413 | return PDC_SUCCESS; | |
1414 | } | |
1415 | ||
1416 | static const struct mbox_chan_ops pdc_mbox_chan_ops = { | |
1417 | .send_data = pdc_send_data, | |
ab8d1b2d | 1418 | .last_tx_done = pdc_last_tx_done, |
a24532f8 RR |
1419 | .startup = pdc_startup, |
1420 | .shutdown = pdc_shutdown | |
1421 | }; | |
1422 | ||
1423 | /** | |
1424 | * pdc_mb_init() - Initialize the mailbox controller. | |
1425 | * @pdcs: PDC state | |
1426 | * | |
1427 | * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel | |
1428 | * driver only uses one ringset and thus one mb channel. PDC uses the transmit | |
1429 | * complete interrupt to determine when a mailbox message has successfully been | |
1430 | * transmitted. | |
1431 | * | |
1432 | * Return: 0 on success | |
1433 | * < 0 if there is an allocation or registration failure | |
1434 | */ | |
1435 | static int pdc_mb_init(struct pdc_state *pdcs) | |
1436 | { | |
1437 | struct device *dev = &pdcs->pdev->dev; | |
1438 | struct mbox_controller *mbc; | |
1439 | int chan_index; | |
1440 | int err; | |
1441 | ||
1442 | mbc = &pdcs->mbc; | |
1443 | mbc->dev = dev; | |
1444 | mbc->ops = &pdc_mbox_chan_ops; | |
1445 | mbc->num_chans = 1; | |
1446 | mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans), | |
1447 | GFP_KERNEL); | |
1448 | if (!mbc->chans) | |
1449 | return -ENOMEM; | |
1450 | ||
ab8d1b2d RR |
1451 | mbc->txdone_irq = false; |
1452 | mbc->txdone_poll = true; | |
1453 | mbc->txpoll_period = 1; | |
a24532f8 RR |
1454 | for (chan_index = 0; chan_index < mbc->num_chans; chan_index++) |
1455 | mbc->chans[chan_index].con_priv = pdcs; | |
1456 | ||
1457 | /* Register mailbox controller */ | |
1458 | err = mbox_controller_register(mbc); | |
1459 | if (err) { | |
1460 | dev_crit(dev, | |
1461 | "Failed to register PDC mailbox controller. Error %d.", | |
1462 | err); | |
1463 | return err; | |
1464 | } | |
1465 | return 0; | |
1466 | } | |
1467 | ||
1468 | /** | |
1469 | * pdc_dt_read() - Read application-specific data from device tree. | |
1470 | * @pdev: Platform device | |
1471 | * @pdcs: PDC state | |
1472 | * | |
1473 | * Reads the number of bytes of receive status that precede each received frame. | |
1474 | * Reads whether transmit and received frames should be preceded by an 8-byte | |
1475 | * BCM header. | |
1476 | * | |
1477 | * Return: 0 if successful | |
1478 | * -ENODEV if device not available | |
1479 | */ | |
1480 | static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs) | |
1481 | { | |
1482 | struct device *dev = &pdev->dev; | |
1483 | struct device_node *dn = pdev->dev.of_node; | |
1484 | int err; | |
1485 | ||
1486 | err = of_property_read_u32(dn, "brcm,rx-status-len", | |
1487 | &pdcs->rx_status_len); | |
1488 | if (err < 0) | |
1489 | dev_err(dev, | |
1490 | "%s failed to get DMA receive status length from device tree", | |
1491 | __func__); | |
1492 | ||
1493 | pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr"); | |
1494 | ||
1495 | return 0; | |
1496 | } | |
1497 | ||
1498 | /** | |
1499 | * pdc_probe() - Probe function for PDC driver. | |
1500 | * @pdev: PDC platform device | |
1501 | * | |
1502 | * Reserve and map register regions defined in device tree. | |
1503 | * Allocate and initialize tx and rx DMA rings. | |
1504 | * Initialize a mailbox controller for each PDC. | |
1505 | * | |
1506 | * Return: 0 if successful | |
1507 | * < 0 if an error | |
1508 | */ | |
1509 | static int pdc_probe(struct platform_device *pdev) | |
1510 | { | |
1511 | int err = 0; | |
1512 | struct device *dev = &pdev->dev; | |
1513 | struct resource *pdc_regs; | |
1514 | struct pdc_state *pdcs; | |
1515 | ||
1516 | /* PDC state for one SPU */ | |
1517 | pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL); | |
1518 | if (!pdcs) { | |
1519 | err = -ENOMEM; | |
1520 | goto cleanup; | |
1521 | } | |
1522 | ||
a24532f8 RR |
1523 | pdcs->pdev = pdev; |
1524 | platform_set_drvdata(pdev, pdcs); | |
1525 | pdcs->pdc_idx = pdcg.num_spu; | |
1526 | pdcg.num_spu++; | |
1527 | ||
1528 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | |
1529 | if (err) { | |
1530 | dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err); | |
1531 | goto cleanup; | |
1532 | } | |
1533 | ||
1534 | /* Create DMA pool for tx ring */ | |
1535 | pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE, | |
1536 | RING_ALIGN, 0); | |
1537 | if (!pdcs->ring_pool) { | |
1538 | err = -ENOMEM; | |
1539 | goto cleanup; | |
1540 | } | |
1541 | ||
1542 | err = pdc_dt_read(pdev, pdcs); | |
1543 | if (err) | |
1544 | goto cleanup_ring_pool; | |
1545 | ||
1546 | pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1547 | if (!pdc_regs) { | |
1548 | err = -ENODEV; | |
1549 | goto cleanup_ring_pool; | |
1550 | } | |
a68b2166 RR |
1551 | dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa", |
1552 | &pdc_regs->start, &pdc_regs->end); | |
a24532f8 RR |
1553 | |
1554 | pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs); | |
1555 | if (IS_ERR(pdcs->pdc_reg_vbase)) { | |
1556 | err = PTR_ERR(pdcs->pdc_reg_vbase); | |
1557 | dev_err(&pdev->dev, "Failed to map registers: %d\n", err); | |
1558 | goto cleanup_ring_pool; | |
1559 | } | |
1560 | ||
1561 | /* create rx buffer pool after dt read to know how big buffers are */ | |
1562 | err = pdc_rx_buf_pool_create(pdcs); | |
1563 | if (err) | |
1564 | goto cleanup_ring_pool; | |
1565 | ||
1566 | pdc_hw_init(pdcs); | |
1567 | ||
8aef00f0 | 1568 | /* Init tasklet for deferred DMA rx processing */ |
cf175813 | 1569 | tasklet_init(&pdcs->rx_tasklet, pdc_tasklet_cb, (unsigned long)pdcs); |
8aef00f0 | 1570 | |
a24532f8 RR |
1571 | err = pdc_interrupts_init(pdcs); |
1572 | if (err) | |
1573 | goto cleanup_buf_pool; | |
1574 | ||
1575 | /* Initialize mailbox controller */ | |
1576 | err = pdc_mb_init(pdcs); | |
1577 | if (err) | |
1578 | goto cleanup_buf_pool; | |
1579 | ||
1580 | pdcs->debugfs_stats = NULL; | |
1581 | pdc_setup_debugfs(pdcs); | |
1582 | ||
1583 | dev_dbg(dev, "pdc_probe() successful"); | |
1584 | return PDC_SUCCESS; | |
1585 | ||
1586 | cleanup_buf_pool: | |
8aef00f0 | 1587 | tasklet_kill(&pdcs->rx_tasklet); |
a24532f8 RR |
1588 | dma_pool_destroy(pdcs->rx_buf_pool); |
1589 | ||
1590 | cleanup_ring_pool: | |
1591 | dma_pool_destroy(pdcs->ring_pool); | |
1592 | ||
1593 | cleanup: | |
1594 | return err; | |
1595 | } | |
1596 | ||
1597 | static int pdc_remove(struct platform_device *pdev) | |
1598 | { | |
1599 | struct pdc_state *pdcs = platform_get_drvdata(pdev); | |
1600 | ||
1601 | pdc_free_debugfs(); | |
1602 | ||
8aef00f0 RR |
1603 | tasklet_kill(&pdcs->rx_tasklet); |
1604 | ||
9fb0f9ac SL |
1605 | pdc_hw_disable(pdcs); |
1606 | ||
a24532f8 RR |
1607 | mbox_controller_unregister(&pdcs->mbc); |
1608 | ||
1609 | dma_pool_destroy(pdcs->rx_buf_pool); | |
1610 | dma_pool_destroy(pdcs->ring_pool); | |
1611 | return 0; | |
1612 | } | |
1613 | ||
1614 | static const struct of_device_id pdc_mbox_of_match[] = { | |
1615 | {.compatible = "brcm,iproc-pdc-mbox"}, | |
1616 | { /* sentinel */ } | |
1617 | }; | |
1618 | MODULE_DEVICE_TABLE(of, pdc_mbox_of_match); | |
1619 | ||
1620 | static struct platform_driver pdc_mbox_driver = { | |
1621 | .probe = pdc_probe, | |
1622 | .remove = pdc_remove, | |
1623 | .driver = { | |
1624 | .name = "brcm-iproc-pdc-mbox", | |
1625 | .of_match_table = of_match_ptr(pdc_mbox_of_match), | |
1626 | }, | |
1627 | }; | |
1628 | module_platform_driver(pdc_mbox_driver); | |
1629 | ||
1630 | MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>"); | |
1631 | MODULE_DESCRIPTION("Broadcom PDC mailbox driver"); | |
1632 | MODULE_LICENSE("GPL v2"); |