2 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3 * DWC Ether MAC version 4.xx has been used for developing this code.
5 * This contains the functions to handle the dma.
7 * Copyright (C) 2015 STMicroelectronics Ltd
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * Author: Alexandre Torgue <alexandre.torgue@st.com>
18 #include "dwmac4_dma.h"
20 static void dwmac4_dma_axi(void __iomem
*ioaddr
, struct stmmac_axi
*axi
)
22 u32 value
= readl(ioaddr
+ DMA_SYS_BUS_MODE
);
25 pr_info("dwmac4: Master AXI performs %s burst length\n",
26 (value
& DMA_SYS_BUS_FB
) ? "fixed" : "any");
29 value
|= DMA_AXI_EN_LPI
;
31 value
|= DMA_AXI_LPI_XIT_FRM
;
33 value
&= ~DMA_AXI_WR_OSR_LMT
;
34 value
|= (axi
->axi_wr_osr_lmt
& DMA_AXI_OSR_MAX
) <<
35 DMA_AXI_WR_OSR_LMT_SHIFT
;
37 value
&= ~DMA_AXI_RD_OSR_LMT
;
38 value
|= (axi
->axi_rd_osr_lmt
& DMA_AXI_OSR_MAX
) <<
39 DMA_AXI_RD_OSR_LMT_SHIFT
;
41 /* Depending on the UNDEF bit the Master AXI will perform any burst
42 * length according to the BLEN programmed (by default all BLEN are
45 for (i
= 0; i
< AXI_BLEN
; i
++) {
46 switch (axi
->axi_blen
[i
]) {
48 value
|= DMA_AXI_BLEN256
;
51 value
|= DMA_AXI_BLEN128
;
54 value
|= DMA_AXI_BLEN64
;
57 value
|= DMA_AXI_BLEN32
;
60 value
|= DMA_AXI_BLEN16
;
63 value
|= DMA_AXI_BLEN8
;
66 value
|= DMA_AXI_BLEN4
;
71 writel(value
, ioaddr
+ DMA_SYS_BUS_MODE
);
74 static void dwmac4_dma_init_channel(void __iomem
*ioaddr
,
75 struct stmmac_dma_cfg
*dma_cfg
,
76 u32 dma_tx_phy
, u32 dma_rx_phy
,
80 int txpbl
= dma_cfg
->txpbl
?: dma_cfg
->pbl
;
81 int rxpbl
= dma_cfg
->rxpbl
?: dma_cfg
->pbl
;
83 /* set PBL for each channels. Currently we affect same configuration
86 value
= readl(ioaddr
+ DMA_CHAN_CONTROL(channel
));
87 value
= value
| DMA_BUS_MODE_PBL
;
88 writel(value
, ioaddr
+ DMA_CHAN_CONTROL(channel
));
90 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(channel
));
91 value
= value
| (txpbl
<< DMA_BUS_MODE_PBL_SHIFT
);
92 writel(value
, ioaddr
+ DMA_CHAN_TX_CONTROL(channel
));
94 value
= readl(ioaddr
+ DMA_CHAN_RX_CONTROL(channel
));
95 value
= value
| (rxpbl
<< DMA_BUS_MODE_RPBL_SHIFT
);
96 writel(value
, ioaddr
+ DMA_CHAN_RX_CONTROL(channel
));
98 /* Mask interrupts by writing to CSR7 */
99 writel(DMA_CHAN_INTR_DEFAULT_MASK
, ioaddr
+ DMA_CHAN_INTR_ENA(channel
));
101 writel(dma_tx_phy
, ioaddr
+ DMA_CHAN_TX_BASE_ADDR(channel
));
102 writel(dma_rx_phy
, ioaddr
+ DMA_CHAN_RX_BASE_ADDR(channel
));
105 static void dwmac4_dma_init(void __iomem
*ioaddr
,
106 struct stmmac_dma_cfg
*dma_cfg
,
107 u32 dma_tx
, u32 dma_rx
, int atds
)
109 u32 value
= readl(ioaddr
+ DMA_SYS_BUS_MODE
);
112 /* Set the Fixed burst mode */
113 if (dma_cfg
->fixed_burst
)
114 value
|= DMA_SYS_BUS_FB
;
116 /* Mixed Burst has no effect when fb is set */
117 if (dma_cfg
->mixed_burst
)
118 value
|= DMA_SYS_BUS_MB
;
121 value
|= DMA_SYS_BUS_AAL
;
123 writel(value
, ioaddr
+ DMA_SYS_BUS_MODE
);
125 for (i
= 0; i
< DMA_CHANNEL_NB_MAX
; i
++)
126 dwmac4_dma_init_channel(ioaddr
, dma_cfg
, dma_tx
, dma_rx
, i
);
129 static void _dwmac4_dump_dma_regs(void __iomem
*ioaddr
, u32 channel
)
131 pr_debug(" Channel %d\n", channel
);
132 pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
133 readl(ioaddr
+ DMA_CHAN_CONTROL(channel
)));
134 pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
135 readl(ioaddr
+ DMA_CHAN_TX_CONTROL(channel
)));
136 pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
137 readl(ioaddr
+ DMA_CHAN_RX_CONTROL(channel
)));
138 pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
139 readl(ioaddr
+ DMA_CHAN_TX_BASE_ADDR(channel
)));
140 pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
141 readl(ioaddr
+ DMA_CHAN_RX_BASE_ADDR(channel
)));
142 pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
143 readl(ioaddr
+ DMA_CHAN_TX_END_ADDR(channel
)));
144 pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
145 readl(ioaddr
+ DMA_CHAN_RX_END_ADDR(channel
)));
146 pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
147 readl(ioaddr
+ DMA_CHAN_TX_RING_LEN(channel
)));
148 pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
149 readl(ioaddr
+ DMA_CHAN_RX_RING_LEN(channel
)));
150 pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
151 readl(ioaddr
+ DMA_CHAN_INTR_ENA(channel
)));
152 pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
153 readl(ioaddr
+ DMA_CHAN_RX_WATCHDOG(channel
)));
154 pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
155 readl(ioaddr
+ DMA_CHAN_SLOT_CTRL_STATUS(channel
)));
156 pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
157 readl(ioaddr
+ DMA_CHAN_CUR_TX_DESC(channel
)));
158 pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
159 readl(ioaddr
+ DMA_CHAN_CUR_RX_DESC(channel
)));
160 pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
161 readl(ioaddr
+ DMA_CHAN_CUR_TX_BUF_ADDR(channel
)));
162 pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
163 readl(ioaddr
+ DMA_CHAN_CUR_RX_BUF_ADDR(channel
)));
164 pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
165 readl(ioaddr
+ DMA_CHAN_STATUS(channel
)));
168 static void dwmac4_dump_dma_regs(void __iomem
*ioaddr
)
172 pr_debug(" GMAC4 DMA registers\n");
174 for (i
= 0; i
< DMA_CHANNEL_NB_MAX
; i
++)
175 _dwmac4_dump_dma_regs(ioaddr
, i
);
178 static void dwmac4_rx_watchdog(void __iomem
*ioaddr
, u32 riwt
)
182 for (i
= 0; i
< DMA_CHANNEL_NB_MAX
; i
++)
183 writel(riwt
, ioaddr
+ DMA_CHAN_RX_WATCHDOG(i
));
186 static void dwmac4_dma_chan_op_mode(void __iomem
*ioaddr
, int txmode
,
187 int rxmode
, u32 channel
)
189 u32 mtl_tx_op
, mtl_rx_op
, mtl_rx_int
;
191 /* Following code only done for channel 0, other channels not yet
194 mtl_tx_op
= readl(ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
196 if (txmode
== SF_DMA_MODE
) {
197 pr_debug("GMAC: enable TX store and forward mode\n");
198 /* Transmit COE type 2 cannot be done in cut-through mode. */
199 mtl_tx_op
|= MTL_OP_MODE_TSF
;
201 pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode
);
202 mtl_tx_op
&= ~MTL_OP_MODE_TSF
;
203 mtl_tx_op
&= MTL_OP_MODE_TTC_MASK
;
204 /* Set the transmit threshold */
206 mtl_tx_op
|= MTL_OP_MODE_TTC_32
;
207 else if (txmode
<= 64)
208 mtl_tx_op
|= MTL_OP_MODE_TTC_64
;
209 else if (txmode
<= 96)
210 mtl_tx_op
|= MTL_OP_MODE_TTC_96
;
211 else if (txmode
<= 128)
212 mtl_tx_op
|= MTL_OP_MODE_TTC_128
;
213 else if (txmode
<= 192)
214 mtl_tx_op
|= MTL_OP_MODE_TTC_192
;
215 else if (txmode
<= 256)
216 mtl_tx_op
|= MTL_OP_MODE_TTC_256
;
217 else if (txmode
<= 384)
218 mtl_tx_op
|= MTL_OP_MODE_TTC_384
;
220 mtl_tx_op
|= MTL_OP_MODE_TTC_512
;
222 /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
223 * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
224 * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
225 * with reset values: TXQEN off, TQS 256 bytes.
227 * Write the bits in both cases, since it will have no effect when RO.
228 * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
229 * be RO, however, writing the whole TQS field will result in a value
230 * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
232 mtl_tx_op
|= MTL_OP_MODE_TXQEN
| MTL_OP_MODE_TQS_MASK
;
233 writel(mtl_tx_op
, ioaddr
+ MTL_CHAN_TX_OP_MODE(channel
));
235 mtl_rx_op
= readl(ioaddr
+ MTL_CHAN_RX_OP_MODE(channel
));
237 if (rxmode
== SF_DMA_MODE
) {
238 pr_debug("GMAC: enable RX store and forward mode\n");
239 mtl_rx_op
|= MTL_OP_MODE_RSF
;
241 pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode
);
242 mtl_rx_op
&= ~MTL_OP_MODE_RSF
;
243 mtl_rx_op
&= MTL_OP_MODE_RTC_MASK
;
245 mtl_rx_op
|= MTL_OP_MODE_RTC_32
;
246 else if (rxmode
<= 64)
247 mtl_rx_op
|= MTL_OP_MODE_RTC_64
;
248 else if (rxmode
<= 96)
249 mtl_rx_op
|= MTL_OP_MODE_RTC_96
;
251 mtl_rx_op
|= MTL_OP_MODE_RTC_128
;
254 writel(mtl_rx_op
, ioaddr
+ MTL_CHAN_RX_OP_MODE(channel
));
256 /* Enable MTL RX overflow */
257 mtl_rx_int
= readl(ioaddr
+ MTL_CHAN_INT_CTRL(channel
));
258 writel(mtl_rx_int
| MTL_RX_OVERFLOW_INT_EN
,
259 ioaddr
+ MTL_CHAN_INT_CTRL(channel
));
262 static void dwmac4_dma_operation_mode(void __iomem
*ioaddr
, int txmode
,
263 int rxmode
, int rxfifosz
)
265 /* Only Channel 0 is actually configured and used */
266 dwmac4_dma_chan_op_mode(ioaddr
, txmode
, rxmode
, 0);
269 static void dwmac4_get_hw_feature(void __iomem
*ioaddr
,
270 struct dma_features
*dma_cap
)
272 u32 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE0
);
274 /* MAC HW feature0 */
275 dma_cap
->mbps_10_100
= (hw_cap
& GMAC_HW_FEAT_MIISEL
);
276 dma_cap
->mbps_1000
= (hw_cap
& GMAC_HW_FEAT_GMIISEL
) >> 1;
277 dma_cap
->half_duplex
= (hw_cap
& GMAC_HW_FEAT_HDSEL
) >> 2;
278 dma_cap
->hash_filter
= (hw_cap
& GMAC_HW_FEAT_VLHASH
) >> 4;
279 dma_cap
->multi_addr
= (hw_cap
& GMAC_HW_FEAT_ADDMAC
) >> 18;
280 dma_cap
->pcs
= (hw_cap
& GMAC_HW_FEAT_PCSSEL
) >> 3;
281 dma_cap
->sma_mdio
= (hw_cap
& GMAC_HW_FEAT_SMASEL
) >> 5;
282 dma_cap
->pmt_remote_wake_up
= (hw_cap
& GMAC_HW_FEAT_RWKSEL
) >> 6;
283 dma_cap
->pmt_magic_frame
= (hw_cap
& GMAC_HW_FEAT_MGKSEL
) >> 7;
285 dma_cap
->rmon
= (hw_cap
& GMAC_HW_FEAT_MMCSEL
) >> 8;
287 dma_cap
->atime_stamp
= (hw_cap
& GMAC_HW_FEAT_TSSEL
) >> 12;
288 /* 802.3az - Energy-Efficient Ethernet (EEE) */
289 dma_cap
->eee
= (hw_cap
& GMAC_HW_FEAT_EEESEL
) >> 13;
291 dma_cap
->tx_coe
= (hw_cap
& GMAC_HW_FEAT_TXCOSEL
) >> 14;
292 dma_cap
->rx_coe
= (hw_cap
& GMAC_HW_FEAT_RXCOESEL
) >> 16;
294 /* MAC HW feature1 */
295 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE1
);
296 dma_cap
->av
= (hw_cap
& GMAC_HW_FEAT_AVSEL
) >> 20;
297 dma_cap
->tsoen
= (hw_cap
& GMAC_HW_TSOEN
) >> 18;
298 /* MAC HW feature2 */
299 hw_cap
= readl(ioaddr
+ GMAC_HW_FEATURE2
);
300 /* TX and RX number of channels */
301 dma_cap
->number_rx_channel
=
302 ((hw_cap
& GMAC_HW_FEAT_RXCHCNT
) >> 12) + 1;
303 dma_cap
->number_tx_channel
=
304 ((hw_cap
& GMAC_HW_FEAT_TXCHCNT
) >> 18) + 1;
307 dma_cap
->time_stamp
= 0;
310 /* Enable/disable TSO feature and set MSS */
311 static void dwmac4_enable_tso(void __iomem
*ioaddr
, bool en
, u32 chan
)
317 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
318 writel(value
| DMA_CONTROL_TSE
,
319 ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
322 value
= readl(ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
323 writel(value
& ~DMA_CONTROL_TSE
,
324 ioaddr
+ DMA_CHAN_TX_CONTROL(chan
));
328 const struct stmmac_dma_ops dwmac4_dma_ops
= {
329 .reset
= dwmac4_dma_reset
,
330 .init
= dwmac4_dma_init
,
331 .axi
= dwmac4_dma_axi
,
332 .dump_regs
= dwmac4_dump_dma_regs
,
333 .dma_mode
= dwmac4_dma_operation_mode
,
334 .enable_dma_irq
= dwmac4_enable_dma_irq
,
335 .disable_dma_irq
= dwmac4_disable_dma_irq
,
336 .start_tx
= dwmac4_dma_start_tx
,
337 .stop_tx
= dwmac4_dma_stop_tx
,
338 .start_rx
= dwmac4_dma_start_rx
,
339 .stop_rx
= dwmac4_dma_stop_rx
,
340 .dma_interrupt
= dwmac4_dma_interrupt
,
341 .get_hw_feature
= dwmac4_get_hw_feature
,
342 .rx_watchdog
= dwmac4_rx_watchdog
,
343 .set_rx_ring_len
= dwmac4_set_rx_ring_len
,
344 .set_tx_ring_len
= dwmac4_set_tx_ring_len
,
345 .set_rx_tail_ptr
= dwmac4_set_rx_tail_ptr
,
346 .set_tx_tail_ptr
= dwmac4_set_tx_tail_ptr
,
347 .enable_tso
= dwmac4_enable_tso
,
350 const struct stmmac_dma_ops dwmac410_dma_ops
= {
351 .reset
= dwmac4_dma_reset
,
352 .init
= dwmac4_dma_init
,
353 .axi
= dwmac4_dma_axi
,
354 .dump_regs
= dwmac4_dump_dma_regs
,
355 .dma_mode
= dwmac4_dma_operation_mode
,
356 .enable_dma_irq
= dwmac410_enable_dma_irq
,
357 .disable_dma_irq
= dwmac4_disable_dma_irq
,
358 .start_tx
= dwmac4_dma_start_tx
,
359 .stop_tx
= dwmac4_dma_stop_tx
,
360 .start_rx
= dwmac4_dma_start_rx
,
361 .stop_rx
= dwmac4_dma_stop_rx
,
362 .dma_interrupt
= dwmac4_dma_interrupt
,
363 .get_hw_feature
= dwmac4_get_hw_feature
,
364 .rx_watchdog
= dwmac4_rx_watchdog
,
365 .set_rx_ring_len
= dwmac4_set_rx_ring_len
,
366 .set_tx_ring_len
= dwmac4_set_tx_ring_len
,
367 .set_rx_tail_ptr
= dwmac4_set_rx_tail_ptr
,
368 .set_tx_tail_ptr
= dwmac4_set_tx_tail_ptr
,
369 .enable_tso
= dwmac4_enable_tso
,