1 /* 10G controller driver for Samsung SoCs
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/delay.h>
13 #include <linux/export.h>
15 #include <linux/netdevice.h>
16 #include <linux/phy.h>
18 #include "sxgbe_common.h"
19 #include "sxgbe_dma.h"
20 #include "sxgbe_reg.h"
21 #include "sxgbe_desc.h"
23 /* DMA core initialization */
24 static int sxgbe_dma_init(void __iomem
*ioaddr
, int fix_burst
, int burst_map
)
30 writel(SXGBE_DMA_SOFT_RESET
, ioaddr
+ SXGBE_DMA_MODE_REG
);
31 while (retry_count
--) {
32 if (!(readl(ioaddr
+ SXGBE_DMA_MODE_REG
) &
33 SXGBE_DMA_SOFT_RESET
))
41 reg_val
= readl(ioaddr
+ SXGBE_DMA_SYSBUS_MODE_REG
);
43 /* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
44 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
45 * burst_map is bitmap for BLEN[4, 8, 16, 32, 64, 128 and 256].
46 * Set burst_map irrespective of fix_burst value.
49 reg_val
|= SXGBE_DMA_AXI_UNDEF_BURST
;
51 /* write burst len map */
52 reg_val
|= (burst_map
<< SXGBE_DMA_BLENMAP_LSHIFT
);
54 writel(reg_val
, ioaddr
+ SXGBE_DMA_SYSBUS_MODE_REG
);
59 static void sxgbe_dma_channel_init(void __iomem
*ioaddr
, int cha_num
,
60 int fix_burst
, int pbl
, dma_addr_t dma_tx
,
61 dma_addr_t dma_rx
, int t_rsize
, int r_rsize
)
66 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_CTL_REG(cha_num
));
69 reg_val
|= SXGBE_DMA_PBL_X8MODE
;
70 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_CTL_REG(cha_num
));
71 /* program the TX pbl */
72 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
73 reg_val
|= (pbl
<< SXGBE_DMA_TXPBL_LSHIFT
);
74 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
75 /* program the RX pbl */
76 reg_val
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cha_num
));
77 reg_val
|= (pbl
<< SXGBE_DMA_RXPBL_LSHIFT
);
78 writel(reg_val
, ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cha_num
));
81 /* program desc registers */
82 writel(upper_32_bits(dma_tx
),
83 ioaddr
+ SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num
));
84 writel(lower_32_bits(dma_tx
),
85 ioaddr
+ SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num
));
87 writel(upper_32_bits(dma_rx
),
88 ioaddr
+ SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num
));
89 writel(lower_32_bits(dma_rx
),
90 ioaddr
+ SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num
));
92 /* program tail pointers */
93 /* assumption: upper 32 bits are constant and
94 * same as TX/RX desc list
96 dma_addr
= dma_tx
+ ((t_rsize
- 1) * SXGBE_DESC_SIZE_BYTES
);
97 writel(lower_32_bits(dma_addr
),
98 ioaddr
+ SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num
));
100 dma_addr
= dma_rx
+ ((r_rsize
- 1) * SXGBE_DESC_SIZE_BYTES
);
101 writel(lower_32_bits(dma_addr
),
102 ioaddr
+ SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num
));
103 /* program the ring sizes */
104 writel(t_rsize
- 1, ioaddr
+ SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num
));
105 writel(r_rsize
- 1, ioaddr
+ SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num
));
107 /* Enable TX/RX interrupts */
108 writel(SXGBE_DMA_ENA_INT
,
109 ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num
));
112 static void sxgbe_enable_dma_transmission(void __iomem
*ioaddr
, int cha_num
)
116 tx_config
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
117 tx_config
|= SXGBE_TX_START_DMA
;
118 writel(tx_config
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cha_num
));
121 static void sxgbe_enable_dma_irq(void __iomem
*ioaddr
, int dma_cnum
)
123 /* Enable TX/RX interrupts */
124 writel(SXGBE_DMA_ENA_INT
,
125 ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum
));
128 static void sxgbe_disable_dma_irq(void __iomem
*ioaddr
, int dma_cnum
)
130 /* Disable TX/RX interrupts */
131 writel(0, ioaddr
+ SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum
));
134 static void sxgbe_dma_start_tx(void __iomem
*ioaddr
, int tchannels
)
139 for (cnum
= 0; cnum
< tchannels
; cnum
++) {
140 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
141 tx_ctl_reg
|= SXGBE_TX_ENABLE
;
143 ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
147 static void sxgbe_dma_start_tx_queue(void __iomem
*ioaddr
, int dma_cnum
)
151 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
152 tx_ctl_reg
|= SXGBE_TX_ENABLE
;
153 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
156 static void sxgbe_dma_stop_tx_queue(void __iomem
*ioaddr
, int dma_cnum
)
160 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
161 tx_ctl_reg
&= ~(SXGBE_TX_ENABLE
);
162 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(dma_cnum
));
165 static void sxgbe_dma_stop_tx(void __iomem
*ioaddr
, int tchannels
)
170 for (cnum
= 0; cnum
< tchannels
; cnum
++) {
171 tx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
172 tx_ctl_reg
&= ~(SXGBE_TX_ENABLE
);
173 writel(tx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(cnum
));
177 static void sxgbe_dma_start_rx(void __iomem
*ioaddr
, int rchannels
)
182 for (cnum
= 0; cnum
< rchannels
; cnum
++) {
183 rx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
184 rx_ctl_reg
|= SXGBE_RX_ENABLE
;
186 ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
190 static void sxgbe_dma_stop_rx(void __iomem
*ioaddr
, int rchannels
)
195 for (cnum
= 0; cnum
< rchannels
; cnum
++) {
196 rx_ctl_reg
= readl(ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
197 rx_ctl_reg
&= ~(SXGBE_RX_ENABLE
);
198 writel(rx_ctl_reg
, ioaddr
+ SXGBE_DMA_CHA_RXCTL_REG(cnum
));
202 static int sxgbe_tx_dma_int_status(void __iomem
*ioaddr
, int channel_no
,
203 struct sxgbe_extra_stats
*x
)
205 u32 int_status
= readl(ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
209 /* TX Normal Interrupt Summary */
210 if (likely(int_status
& SXGBE_DMA_INT_STATUS_NIS
)) {
212 if (int_status
& SXGBE_DMA_INT_STATUS_TI
) {
213 ret_val
|= handle_tx
;
214 x
->tx_normal_irq_n
++;
215 clear_val
|= SXGBE_DMA_INT_STATUS_TI
;
218 if (int_status
& SXGBE_DMA_INT_STATUS_TBU
) {
219 x
->tx_underflow_irq
++;
220 ret_val
|= tx_bump_tc
;
221 clear_val
|= SXGBE_DMA_INT_STATUS_TBU
;
223 } else if (unlikely(int_status
& SXGBE_DMA_INT_STATUS_AIS
)) {
224 /* TX Abnormal Interrupt Summary */
225 if (int_status
& SXGBE_DMA_INT_STATUS_TPS
) {
226 ret_val
|= tx_hard_error
;
227 clear_val
|= SXGBE_DMA_INT_STATUS_TPS
;
228 x
->tx_process_stopped_irq
++;
231 if (int_status
& SXGBE_DMA_INT_STATUS_FBE
) {
232 ret_val
|= tx_hard_error
;
233 x
->fatal_bus_error_irq
++;
235 /* Assumption: FBE bit is the combination of
236 * all the bus access erros and cleared when
237 * the respective error bits cleared
240 /* check for actual cause */
241 if (int_status
& SXGBE_DMA_INT_STATUS_TEB0
) {
242 x
->tx_read_transfer_err
++;
243 clear_val
|= SXGBE_DMA_INT_STATUS_TEB0
;
245 x
->tx_write_transfer_err
++;
248 if (int_status
& SXGBE_DMA_INT_STATUS_TEB1
) {
249 x
->tx_desc_access_err
++;
250 clear_val
|= SXGBE_DMA_INT_STATUS_TEB1
;
252 x
->tx_buffer_access_err
++;
255 if (int_status
& SXGBE_DMA_INT_STATUS_TEB2
) {
256 x
->tx_data_transfer_err
++;
257 clear_val
|= SXGBE_DMA_INT_STATUS_TEB2
;
261 /* context descriptor error */
262 if (int_status
& SXGBE_DMA_INT_STATUS_CTXTERR
) {
263 x
->tx_ctxt_desc_err
++;
264 clear_val
|= SXGBE_DMA_INT_STATUS_CTXTERR
;
268 /* clear the served bits */
269 writel(clear_val
, ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
274 static int sxgbe_rx_dma_int_status(void __iomem
*ioaddr
, int channel_no
,
275 struct sxgbe_extra_stats
*x
)
277 u32 int_status
= readl(ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
281 /* RX Normal Interrupt Summary */
282 if (likely(int_status
& SXGBE_DMA_INT_STATUS_NIS
)) {
284 if (int_status
& SXGBE_DMA_INT_STATUS_RI
) {
285 ret_val
|= handle_rx
;
286 x
->rx_normal_irq_n
++;
287 clear_val
|= SXGBE_DMA_INT_STATUS_RI
;
289 } else if (unlikely(int_status
& SXGBE_DMA_INT_STATUS_AIS
)) {
290 /* RX Abnormal Interrupt Summary */
291 if (int_status
& SXGBE_DMA_INT_STATUS_RBU
) {
292 ret_val
|= rx_bump_tc
;
293 clear_val
|= SXGBE_DMA_INT_STATUS_RBU
;
294 x
->rx_underflow_irq
++;
297 if (int_status
& SXGBE_DMA_INT_STATUS_RPS
) {
298 ret_val
|= rx_hard_error
;
299 clear_val
|= SXGBE_DMA_INT_STATUS_RPS
;
300 x
->rx_process_stopped_irq
++;
303 if (int_status
& SXGBE_DMA_INT_STATUS_FBE
) {
304 ret_val
|= rx_hard_error
;
305 x
->fatal_bus_error_irq
++;
307 /* Assumption: FBE bit is the combination of
308 * all the bus access erros and cleared when
309 * the respective error bits cleared
312 /* check for actual cause */
313 if (int_status
& SXGBE_DMA_INT_STATUS_REB0
) {
314 x
->rx_read_transfer_err
++;
315 clear_val
|= SXGBE_DMA_INT_STATUS_REB0
;
317 x
->rx_write_transfer_err
++;
320 if (int_status
& SXGBE_DMA_INT_STATUS_REB1
) {
321 x
->rx_desc_access_err
++;
322 clear_val
|= SXGBE_DMA_INT_STATUS_REB1
;
324 x
->rx_buffer_access_err
++;
327 if (int_status
& SXGBE_DMA_INT_STATUS_REB2
) {
328 x
->rx_data_transfer_err
++;
329 clear_val
|= SXGBE_DMA_INT_STATUS_REB2
;
334 /* clear the served bits */
335 writel(clear_val
, ioaddr
+ SXGBE_DMA_CHA_STATUS_REG(channel_no
));
340 /* Program the HW RX Watchdog */
341 static void sxgbe_dma_rx_watchdog(void __iomem
*ioaddr
, u32 riwt
)
345 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, que_num
) {
347 ioaddr
+ SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num
));
351 static void sxgbe_enable_tso(void __iomem
*ioaddr
, u8 chan_num
)
355 ctrl
= readl(ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(chan_num
));
356 ctrl
|= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE
;
357 writel(ctrl
, ioaddr
+ SXGBE_DMA_CHA_TXCTL_REG(chan_num
));
360 static const struct sxgbe_dma_ops sxgbe_dma_ops
= {
361 .init
= sxgbe_dma_init
,
362 .cha_init
= sxgbe_dma_channel_init
,
363 .enable_dma_transmission
= sxgbe_enable_dma_transmission
,
364 .enable_dma_irq
= sxgbe_enable_dma_irq
,
365 .disable_dma_irq
= sxgbe_disable_dma_irq
,
366 .start_tx
= sxgbe_dma_start_tx
,
367 .start_tx_queue
= sxgbe_dma_start_tx_queue
,
368 .stop_tx
= sxgbe_dma_stop_tx
,
369 .stop_tx_queue
= sxgbe_dma_stop_tx_queue
,
370 .start_rx
= sxgbe_dma_start_rx
,
371 .stop_rx
= sxgbe_dma_stop_rx
,
372 .tx_dma_int_status
= sxgbe_tx_dma_int_status
,
373 .rx_dma_int_status
= sxgbe_rx_dma_int_status
,
374 .rx_watchdog
= sxgbe_dma_rx_watchdog
,
375 .enable_tso
= sxgbe_enable_tso
,
378 const struct sxgbe_dma_ops
*sxgbe_get_dma_ops(void)
380 return &sxgbe_dma_ops
;