]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/altera/altera_sgdma.c
net: bonding: Fix format string mismatch in bond_sysfs.c
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / altera / altera_sgdma.c
CommitLineData
f64f8808
VB
1/* Altera TSE SGDMA and MSGDMA Linux driver
2 * Copyright (C) 2014 Altera Corporation. All rights reserved
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/list.h>
18#include "altera_utils.h"
19#include "altera_tse.h"
20#include "altera_sgdmahw.h"
21#include "altera_sgdma.h"
22
23static void sgdma_descrip(struct sgdma_descrip *desc,
24 struct sgdma_descrip *ndesc,
25 dma_addr_t ndesc_phys,
26 dma_addr_t raddr,
27 dma_addr_t waddr,
28 u16 length,
29 int generate_eop,
30 int rfixed,
31 int wfixed);
32
33static int sgdma_async_write(struct altera_tse_private *priv,
34 struct sgdma_descrip *desc);
35
36static int sgdma_async_read(struct altera_tse_private *priv);
37
38static dma_addr_t
39sgdma_txphysaddr(struct altera_tse_private *priv,
40 struct sgdma_descrip *desc);
41
42static dma_addr_t
43sgdma_rxphysaddr(struct altera_tse_private *priv,
44 struct sgdma_descrip *desc);
45
46static int sgdma_txbusy(struct altera_tse_private *priv);
47
48static int sgdma_rxbusy(struct altera_tse_private *priv);
49
50static void
51queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
52
53static void
54queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
55
56static struct tse_buffer *
57dequeue_tx(struct altera_tse_private *priv);
58
59static struct tse_buffer *
60dequeue_rx(struct altera_tse_private *priv);
61
62static struct tse_buffer *
63queue_rx_peekhead(struct altera_tse_private *priv);
64
65int sgdma_initialize(struct altera_tse_private *priv)
66{
37c0ffaa
VB
67 priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
68 SGDMA_CTRLREG_INTEN;
f64f8808
VB
69
70 priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
37c0ffaa 71 SGDMA_CTRLREG_INTEN |
f64f8808
VB
72 SGDMA_CTRLREG_ILASTD;
73
37c0ffaa
VB
74 priv->sgdmadesclen = sizeof(sgdma_descrip);
75
f64f8808
VB
76 INIT_LIST_HEAD(&priv->txlisthd);
77 INIT_LIST_HEAD(&priv->rxlisthd);
78
79 priv->rxdescphys = (dma_addr_t) 0;
80 priv->txdescphys = (dma_addr_t) 0;
81
82 priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
83 priv->rxdescmem, DMA_BIDIRECTIONAL);
84
85 if (dma_mapping_error(priv->device, priv->rxdescphys)) {
86 sgdma_uninitialize(priv);
87 netdev_err(priv->dev, "error mapping rx descriptor memory\n");
88 return -EINVAL;
89 }
90
80175f93
VB
91 priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
92 priv->txdescmem, DMA_TO_DEVICE);
f64f8808
VB
93
94 if (dma_mapping_error(priv->device, priv->txdescphys)) {
95 sgdma_uninitialize(priv);
96 netdev_err(priv->dev, "error mapping tx descriptor memory\n");
97 return -EINVAL;
98 }
99
37c0ffaa
VB
100 /* Initialize descriptor memory to all 0's, sync memory to cache */
101 memset(priv->tx_dma_desc, 0, priv->txdescmem);
102 memset(priv->rx_dma_desc, 0, priv->rxdescmem);
103
104 dma_sync_single_for_device(priv->device, priv->txdescphys,
105 priv->txdescmem, DMA_TO_DEVICE);
106
107 dma_sync_single_for_device(priv->device, priv->rxdescphys,
108 priv->rxdescmem, DMA_TO_DEVICE);
109
f64f8808
VB
110 return 0;
111}
112
113void sgdma_uninitialize(struct altera_tse_private *priv)
114{
115 if (priv->rxdescphys)
116 dma_unmap_single(priv->device, priv->rxdescphys,
117 priv->rxdescmem, DMA_BIDIRECTIONAL);
118
119 if (priv->txdescphys)
120 dma_unmap_single(priv->device, priv->txdescphys,
121 priv->txdescmem, DMA_TO_DEVICE);
122}
123
124/* This function resets the SGDMA controller and clears the
125 * descriptor memory used for transmits and receives.
126 */
127void sgdma_reset(struct altera_tse_private *priv)
128{
129 u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
130 u32 txdescriplen = priv->txdescmem;
131 u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
132 u32 rxdescriplen = priv->rxdescmem;
133 struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
134 struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
135
136 /* Initialize descriptor memory to 0 */
137 memset(ptxdescripmem, 0, txdescriplen);
138 memset(prxdescripmem, 0, rxdescriplen);
139
140 iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
141 iowrite32(0, &ptxsgdma->control);
142
143 iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
144 iowrite32(0, &prxsgdma->control);
145}
146
37c0ffaa
VB
147/* For SGDMA, interrupts remain enabled after initially enabling,
148 * so no need to provide implementations for abstract enable
149 * and disable
150 */
151
f64f8808
VB
152void sgdma_enable_rxirq(struct altera_tse_private *priv)
153{
f64f8808
VB
154}
155
156void sgdma_enable_txirq(struct altera_tse_private *priv)
157{
f64f8808
VB
158}
159
f64f8808
VB
160void sgdma_disable_rxirq(struct altera_tse_private *priv)
161{
162}
163
f64f8808
VB
164void sgdma_disable_txirq(struct altera_tse_private *priv)
165{
166}
167
168void sgdma_clear_rxirq(struct altera_tse_private *priv)
169{
170 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
171 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
172}
173
174void sgdma_clear_txirq(struct altera_tse_private *priv)
175{
176 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
177 tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
178}
179
180/* transmits buffer through SGDMA. Returns number of buffers
181 * transmitted, 0 if not possible.
182 *
183 * tx_lock is held by the caller
184 */
185int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
186{
187 int pktstx = 0;
188 struct sgdma_descrip *descbase =
189 (struct sgdma_descrip *)priv->tx_dma_desc;
190
191 struct sgdma_descrip *cdesc = &descbase[0];
192 struct sgdma_descrip *ndesc = &descbase[1];
193
194 /* wait 'til the tx sgdma is ready for the next transmit request */
195 if (sgdma_txbusy(priv))
196 return 0;
197
198 sgdma_descrip(cdesc, /* current descriptor */
199 ndesc, /* next descriptor */
200 sgdma_txphysaddr(priv, ndesc),
201 buffer->dma_addr, /* address of packet to xmit */
202 0, /* write addr 0 for tx dma */
203 buffer->len, /* length of packet */
204 SGDMA_CONTROL_EOP, /* Generate EOP */
205 0, /* read fixed */
206 SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
207
208 pktstx = sgdma_async_write(priv, cdesc);
209
210 /* enqueue the request to the pending transmit queue */
211 queue_tx(priv, buffer);
212
213 return 1;
214}
215
216
217/* tx_lock held to protect access to queued tx list
218 */
219u32 sgdma_tx_completions(struct altera_tse_private *priv)
220{
221 u32 ready = 0;
222 struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
223
224 if (!sgdma_txbusy(priv) &&
225 ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
226 (dequeue_tx(priv))) {
227 ready = 1;
228 }
229
230 return ready;
231}
232
37c0ffaa
VB
233void sgdma_start_rxdma(struct altera_tse_private *priv)
234{
235 sgdma_async_read(priv);
236}
237
238void sgdma_add_rx_desc(struct altera_tse_private *priv,
239 struct tse_buffer *rxbuffer)
f64f8808
VB
240{
241 queue_rx(priv, rxbuffer);
f64f8808
VB
242}
243
244/* status is returned on upper 16 bits,
245 * length is returned in lower 16 bits
246 */
247u32 sgdma_rx_status(struct altera_tse_private *priv)
248{
249 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
250 struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
251 struct sgdma_descrip *desc = NULL;
252 int pktsrx;
253 unsigned int rxstatus = 0;
254 unsigned int pktlength = 0;
255 unsigned int pktstatus = 0;
256 struct tse_buffer *rxbuffer = NULL;
257
37c0ffaa 258 u32 sts = ioread32(&csr->status);
f64f8808
VB
259
260 desc = &base[0];
37c0ffaa
VB
261 if (sts & SGDMA_STSREG_EOP) {
262 dma_sync_single_for_cpu(priv->device,
263 priv->rxdescphys,
264 priv->sgdmadesclen,
265 DMA_FROM_DEVICE);
266
f64f8808
VB
267 pktlength = desc->bytes_xferred;
268 pktstatus = desc->status & 0x3f;
269 rxstatus = pktstatus;
270 rxstatus = rxstatus << 16;
271 rxstatus |= (pktlength & 0xffff);
272
37c0ffaa
VB
273 if (rxstatus) {
274 desc->status = 0;
f64f8808 275
37c0ffaa
VB
276 rxbuffer = dequeue_rx(priv);
277 if (rxbuffer == NULL)
278 netdev_info(priv->dev,
279 "sgdma rx and rx queue empty!\n");
280
281 /* Clear control */
282 iowrite32(0, &csr->control);
283 /* clear status */
284 iowrite32(0xf, &csr->status);
f64f8808 285
37c0ffaa
VB
286 /* kick the rx sgdma after reaping this descriptor */
287 pktsrx = sgdma_async_read(priv);
288
289 } else {
290 /* If the SGDMA indicated an end of packet on recv,
291 * then it's expected that the rxstatus from the
292 * descriptor is non-zero - meaning a valid packet
293 * with a nonzero length, or an error has been
294 * indicated. if not, then all we can do is signal
295 * an error and return no packet received. Most likely
296 * there is a system design error, or an error in the
297 * underlying kernel (cache or cache management problem)
298 */
299 netdev_err(priv->dev,
300 "SGDMA RX Error Info: %x, %x, %x\n",
301 sts, desc->status, rxstatus);
302 }
303 } else if (sts == 0) {
f64f8808
VB
304 pktsrx = sgdma_async_read(priv);
305 }
306
307 return rxstatus;
308}
309
310
311/* Private functions */
312static void sgdma_descrip(struct sgdma_descrip *desc,
313 struct sgdma_descrip *ndesc,
314 dma_addr_t ndesc_phys,
315 dma_addr_t raddr,
316 dma_addr_t waddr,
317 u16 length,
318 int generate_eop,
319 int rfixed,
320 int wfixed)
321{
322 /* Clear the next descriptor as not owned by hardware */
323 u32 ctrl = ndesc->control;
324 ctrl &= ~SGDMA_CONTROL_HW_OWNED;
325 ndesc->control = ctrl;
326
327 ctrl = 0;
328 ctrl = SGDMA_CONTROL_HW_OWNED;
329 ctrl |= generate_eop;
330 ctrl |= rfixed;
331 ctrl |= wfixed;
332
333 /* Channel is implicitly zero, initialized to 0 by default */
334
335 desc->raddr = raddr;
336 desc->waddr = waddr;
337 desc->next = lower_32_bits(ndesc_phys);
338 desc->control = ctrl;
339 desc->status = 0;
340 desc->rburst = 0;
341 desc->wburst = 0;
342 desc->bytes = length;
343 desc->bytes_xferred = 0;
344}
345
346/* If hardware is busy, don't restart async read.
347 * if status register is 0 - meaning initial state, restart async read,
348 * probably for the first time when populating a receive buffer.
349 * If read status indicate not busy and a status, restart the async
350 * DMA read.
351 */
352static int sgdma_async_read(struct altera_tse_private *priv)
353{
354 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
355 struct sgdma_descrip *descbase =
356 (struct sgdma_descrip *)priv->rx_dma_desc;
357
358 struct sgdma_descrip *cdesc = &descbase[0];
359 struct sgdma_descrip *ndesc = &descbase[1];
360
f64f8808
VB
361 struct tse_buffer *rxbuffer = NULL;
362
363 if (!sgdma_rxbusy(priv)) {
364 rxbuffer = queue_rx_peekhead(priv);
37c0ffaa
VB
365 if (rxbuffer == NULL) {
366 netdev_err(priv->dev, "no rx buffers available\n");
f64f8808 367 return 0;
37c0ffaa 368 }
f64f8808
VB
369
370 sgdma_descrip(cdesc, /* current descriptor */
371 ndesc, /* next descriptor */
372 sgdma_rxphysaddr(priv, ndesc),
373 0, /* read addr 0 for rx dma */
374 rxbuffer->dma_addr, /* write addr for rx dma */
375 0, /* read 'til EOP */
376 0, /* EOP: NA for rx dma */
377 0, /* read fixed: NA for rx dma */
378 0); /* SOP: NA for rx DMA */
379
f64f8808
VB
380 dma_sync_single_for_device(priv->device,
381 priv->rxdescphys,
37c0ffaa
VB
382 priv->sgdmadesclen,
383 DMA_TO_DEVICE);
f64f8808
VB
384
385 iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
386 &csr->next_descrip);
387
388 iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
389 &csr->control);
390
391 return 1;
392 }
393
394 return 0;
395}
396
397static int sgdma_async_write(struct altera_tse_private *priv,
398 struct sgdma_descrip *desc)
399{
400 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
401
402 if (sgdma_txbusy(priv))
403 return 0;
404
405 /* clear control and status */
406 iowrite32(0, &csr->control);
407 iowrite32(0x1f, &csr->status);
408
409 dma_sync_single_for_device(priv->device, priv->txdescphys,
37c0ffaa 410 priv->sgdmadesclen, DMA_TO_DEVICE);
f64f8808
VB
411
412 iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
413 &csr->next_descrip);
414
415 iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
416 &csr->control);
417
418 return 1;
419}
420
421static dma_addr_t
422sgdma_txphysaddr(struct altera_tse_private *priv,
423 struct sgdma_descrip *desc)
424{
425 dma_addr_t paddr = priv->txdescmem_busaddr;
a804ad0e
VB
426 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
427 return (dma_addr_t)((uintptr_t)paddr + offs);
f64f8808
VB
428}
429
430static dma_addr_t
431sgdma_rxphysaddr(struct altera_tse_private *priv,
432 struct sgdma_descrip *desc)
433{
434 dma_addr_t paddr = priv->rxdescmem_busaddr;
a804ad0e
VB
435 uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
436 return (dma_addr_t)((uintptr_t)paddr + offs);
f64f8808
VB
437}
438
439#define list_remove_head(list, entry, type, member) \
440 do { \
441 entry = NULL; \
442 if (!list_empty(list)) { \
443 entry = list_entry((list)->next, type, member); \
444 list_del_init(&entry->member); \
445 } \
446 } while (0)
447
448#define list_peek_head(list, entry, type, member) \
449 do { \
450 entry = NULL; \
451 if (!list_empty(list)) { \
452 entry = list_entry((list)->next, type, member); \
453 } \
454 } while (0)
455
456/* adds a tse_buffer to the tail of a tx buffer list.
457 * assumes the caller is managing and holding a mutual exclusion
458 * primitive to avoid simultaneous pushes/pops to the list.
459 */
460static void
461queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
462{
463 list_add_tail(&buffer->lh, &priv->txlisthd);
464}
465
466
467/* adds a tse_buffer to the tail of a rx buffer list
468 * assumes the caller is managing and holding a mutual exclusion
469 * primitive to avoid simultaneous pushes/pops to the list.
470 */
471static void
472queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
473{
474 list_add_tail(&buffer->lh, &priv->rxlisthd);
475}
476
477/* dequeues a tse_buffer from the transmit buffer list, otherwise
478 * returns NULL if empty.
479 * assumes the caller is managing and holding a mutual exclusion
480 * primitive to avoid simultaneous pushes/pops to the list.
481 */
482static struct tse_buffer *
483dequeue_tx(struct altera_tse_private *priv)
484{
485 struct tse_buffer *buffer = NULL;
486 list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
487 return buffer;
488}
489
490/* dequeues a tse_buffer from the receive buffer list, otherwise
491 * returns NULL if empty
492 * assumes the caller is managing and holding a mutual exclusion
493 * primitive to avoid simultaneous pushes/pops to the list.
494 */
495static struct tse_buffer *
496dequeue_rx(struct altera_tse_private *priv)
497{
498 struct tse_buffer *buffer = NULL;
499 list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
500 return buffer;
501}
502
503/* dequeues a tse_buffer from the receive buffer list, otherwise
504 * returns NULL if empty
505 * assumes the caller is managing and holding a mutual exclusion
506 * primitive to avoid simultaneous pushes/pops to the list while the
507 * head is being examined.
508 */
509static struct tse_buffer *
510queue_rx_peekhead(struct altera_tse_private *priv)
511{
512 struct tse_buffer *buffer = NULL;
513 list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
514 return buffer;
515}
516
517/* check and return rx sgdma status without polling
518 */
519static int sgdma_rxbusy(struct altera_tse_private *priv)
520{
521 struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
522 return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
523}
524
525/* waits for the tx sgdma to finish it's current operation, returns 0
526 * when it transitions to nonbusy, returns 1 if the operation times out
527 */
528static int sgdma_txbusy(struct altera_tse_private *priv)
529{
530 int delay = 0;
531 struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
532
533 /* if DMA is busy, wait for current transactino to finish */
534 while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
535 udelay(1);
536
537 if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
538 netdev_err(priv->dev, "timeout waiting for tx dma\n");
539 return 1;
540 }
541 return 0;
542}