]>
Commit | Line | Data |
---|---|---|
47dd7a54 GC |
1 | /******************************************************************************* |
2 | This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. | |
3 | ST Ethernet IPs are built around a Synopsys IP Core. | |
4 | ||
286a8372 | 5 | Copyright(C) 2007-2011 STMicroelectronics Ltd |
47dd7a54 GC |
6 | |
7 | This program is free software; you can redistribute it and/or modify it | |
8 | under the terms and conditions of the GNU General Public License, | |
9 | version 2, as published by the Free Software Foundation. | |
10 | ||
11 | This program is distributed in the hope it will be useful, but WITHOUT | |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | more details. | |
15 | ||
47dd7a54 GC |
16 | The full GNU General Public License is included in this distribution in |
17 | the file called "COPYING". | |
18 | ||
19 | Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> | |
20 | ||
21 | Documentation available at: | |
22 | http://www.stlinux.com | |
23 | Support available at: | |
24 | https://bugzilla.stlinux.com/ | |
25 | *******************************************************************************/ | |
26 | ||
6a81c26f | 27 | #include <linux/clk.h> |
47dd7a54 GC |
28 | #include <linux/kernel.h> |
29 | #include <linux/interrupt.h> | |
47dd7a54 GC |
30 | #include <linux/ip.h> |
31 | #include <linux/tcp.h> | |
32 | #include <linux/skbuff.h> | |
33 | #include <linux/ethtool.h> | |
34 | #include <linux/if_ether.h> | |
35 | #include <linux/crc32.h> | |
36 | #include <linux/mii.h> | |
01789349 | 37 | #include <linux/if.h> |
47dd7a54 GC |
38 | #include <linux/if_vlan.h> |
39 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 40 | #include <linux/slab.h> |
70c71606 | 41 | #include <linux/prefetch.h> |
db88f10a | 42 | #include <linux/pinctrl/consumer.h> |
50fb4f74 | 43 | #ifdef CONFIG_DEBUG_FS |
7ac29055 GC |
44 | #include <linux/debugfs.h> |
45 | #include <linux/seq_file.h> | |
50fb4f74 | 46 | #endif /* CONFIG_DEBUG_FS */ |
891434b1 RK |
47 | #include <linux/net_tstamp.h> |
48 | #include "stmmac_ptp.h" | |
286a8372 | 49 | #include "stmmac.h" |
c5e4ddbd | 50 | #include <linux/reset.h> |
5790cf3c | 51 | #include <linux/of_mdio.h> |
19d857c9 | 52 | #include "dwmac1000.h" |
47dd7a54 | 53 | |
47dd7a54 | 54 | #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) |
f748be53 | 55 | #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) |
47dd7a54 GC |
56 | |
57 | /* Module parameters */ | |
32ceabca | 58 | #define TX_TIMEO 5000 |
47dd7a54 GC |
59 | static int watchdog = TX_TIMEO; |
60 | module_param(watchdog, int, S_IRUGO | S_IWUSR); | |
32ceabca | 61 | MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)"); |
47dd7a54 | 62 | |
32ceabca | 63 | static int debug = -1; |
47dd7a54 | 64 | module_param(debug, int, S_IRUGO | S_IWUSR); |
32ceabca | 65 | MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)"); |
47dd7a54 | 66 | |
47d1f71f | 67 | static int phyaddr = -1; |
47dd7a54 GC |
68 | module_param(phyaddr, int, S_IRUGO); |
69 | MODULE_PARM_DESC(phyaddr, "Physical device address"); | |
70 | ||
e3ad57c9 | 71 | #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) |
120e87f9 | 72 | #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) |
47dd7a54 GC |
73 | |
74 | static int flow_ctrl = FLOW_OFF; | |
75 | module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); | |
76 | MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); | |
77 | ||
78 | static int pause = PAUSE_TIME; | |
79 | module_param(pause, int, S_IRUGO | S_IWUSR); | |
80 | MODULE_PARM_DESC(pause, "Flow Control Pause Time"); | |
81 | ||
82 | #define TC_DEFAULT 64 | |
83 | static int tc = TC_DEFAULT; | |
84 | module_param(tc, int, S_IRUGO | S_IWUSR); | |
85 | MODULE_PARM_DESC(tc, "DMA threshold control value"); | |
86 | ||
d916701c GC |
87 | #define DEFAULT_BUFSIZE 1536 |
88 | static int buf_sz = DEFAULT_BUFSIZE; | |
47dd7a54 GC |
89 | module_param(buf_sz, int, S_IRUGO | S_IWUSR); |
90 | MODULE_PARM_DESC(buf_sz, "DMA buffer size"); | |
91 | ||
22ad3838 GC |
92 | #define STMMAC_RX_COPYBREAK 256 |
93 | ||
47dd7a54 GC |
94 | static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
95 | NETIF_MSG_LINK | NETIF_MSG_IFUP | | |
96 | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); | |
97 | ||
d765955d GC |
98 | #define STMMAC_DEFAULT_LPI_TIMER 1000 |
99 | static int eee_timer = STMMAC_DEFAULT_LPI_TIMER; | |
100 | module_param(eee_timer, int, S_IRUGO | S_IWUSR); | |
101 | MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); | |
f5351ef7 | 102 | #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x)) |
d765955d | 103 | |
22d3efe5 PM |
104 | /* By default the driver will use the ring mode to manage tx and rx descriptors, |
105 | * but allow user to force to use the chain instead of the ring | |
4a7d666a GC |
106 | */ |
107 | static unsigned int chain_mode; | |
108 | module_param(chain_mode, int, S_IRUGO); | |
109 | MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode"); | |
110 | ||
47dd7a54 | 111 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id); |
47dd7a54 | 112 | |
50fb4f74 | 113 | #ifdef CONFIG_DEBUG_FS |
bfab27a1 | 114 | static int stmmac_init_fs(struct net_device *dev); |
466c5ac8 | 115 | static void stmmac_exit_fs(struct net_device *dev); |
bfab27a1 GC |
116 | #endif |
117 | ||
9125cdd1 GC |
118 | #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x)) |
119 | ||
47dd7a54 GC |
120 | /** |
121 | * stmmac_verify_args - verify the driver parameters. | |
732fdf0e GC |
122 | * Description: it checks the driver parameters and set a default in case of |
123 | * errors. | |
47dd7a54 GC |
124 | */ |
125 | static void stmmac_verify_args(void) | |
126 | { | |
127 | if (unlikely(watchdog < 0)) | |
128 | watchdog = TX_TIMEO; | |
d916701c GC |
129 | if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) |
130 | buf_sz = DEFAULT_BUFSIZE; | |
47dd7a54 GC |
131 | if (unlikely(flow_ctrl > 1)) |
132 | flow_ctrl = FLOW_AUTO; | |
133 | else if (likely(flow_ctrl < 0)) | |
134 | flow_ctrl = FLOW_OFF; | |
135 | if (unlikely((pause < 0) || (pause > 0xffff))) | |
136 | pause = PAUSE_TIME; | |
d765955d GC |
137 | if (eee_timer < 0) |
138 | eee_timer = STMMAC_DEFAULT_LPI_TIMER; | |
47dd7a54 GC |
139 | } |
140 | ||
c22a3f48 JP |
141 | /** |
142 | * stmmac_disable_all_queues - Disable all queues | |
143 | * @priv: driver private structure | |
144 | */ | |
145 | static void stmmac_disable_all_queues(struct stmmac_priv *priv) | |
146 | { | |
147 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | |
148 | u32 queue; | |
149 | ||
150 | for (queue = 0; queue < rx_queues_cnt; queue++) { | |
151 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
152 | ||
153 | napi_disable(&rx_q->napi); | |
154 | } | |
155 | } | |
156 | ||
157 | /** | |
158 | * stmmac_enable_all_queues - Enable all queues | |
159 | * @priv: driver private structure | |
160 | */ | |
161 | static void stmmac_enable_all_queues(struct stmmac_priv *priv) | |
162 | { | |
163 | u32 rx_queues_cnt = priv->plat->rx_queues_to_use; | |
164 | u32 queue; | |
165 | ||
166 | for (queue = 0; queue < rx_queues_cnt; queue++) { | |
167 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
168 | ||
169 | napi_enable(&rx_q->napi); | |
170 | } | |
171 | } | |
172 | ||
173 | /** | |
174 | * stmmac_stop_all_queues - Stop all queues | |
175 | * @priv: driver private structure | |
176 | */ | |
177 | static void stmmac_stop_all_queues(struct stmmac_priv *priv) | |
178 | { | |
179 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | |
180 | u32 queue; | |
181 | ||
182 | for (queue = 0; queue < tx_queues_cnt; queue++) | |
183 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); | |
184 | } | |
185 | ||
186 | /** | |
187 | * stmmac_start_all_queues - Start all queues | |
188 | * @priv: driver private structure | |
189 | */ | |
190 | static void stmmac_start_all_queues(struct stmmac_priv *priv) | |
191 | { | |
192 | u32 tx_queues_cnt = priv->plat->tx_queues_to_use; | |
193 | u32 queue; | |
194 | ||
195 | for (queue = 0; queue < tx_queues_cnt; queue++) | |
196 | netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue)); | |
197 | } | |
198 | ||
32ceabca GC |
199 | /** |
200 | * stmmac_clk_csr_set - dynamically set the MDC clock | |
201 | * @priv: driver private structure | |
202 | * Description: this is to dynamically set the MDC clock according to the csr | |
203 | * clock input. | |
204 | * Note: | |
205 | * If a specific clk_csr value is passed from the platform | |
206 | * this means that the CSR Clock Range selection cannot be | |
207 | * changed at run-time and it is fixed (as reported in the driver | |
208 | * documentation). Viceversa the driver will try to set the MDC | |
209 | * clock dynamically according to the actual clock input. | |
210 | */ | |
cd7201f4 GC |
211 | static void stmmac_clk_csr_set(struct stmmac_priv *priv) |
212 | { | |
cd7201f4 GC |
213 | u32 clk_rate; |
214 | ||
f573c0b9 | 215 | clk_rate = clk_get_rate(priv->plat->stmmac_clk); |
cd7201f4 GC |
216 | |
217 | /* Platform provided default clk_csr would be assumed valid | |
ceb69499 GC |
218 | * for all other cases except for the below mentioned ones. |
219 | * For values higher than the IEEE 802.3 specified frequency | |
220 | * we can not estimate the proper divider as it is not known | |
221 | * the frequency of clk_csr_i. So we do not change the default | |
222 | * divider. | |
223 | */ | |
cd7201f4 GC |
224 | if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { |
225 | if (clk_rate < CSR_F_35M) | |
226 | priv->clk_csr = STMMAC_CSR_20_35M; | |
227 | else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M)) | |
228 | priv->clk_csr = STMMAC_CSR_35_60M; | |
229 | else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M)) | |
230 | priv->clk_csr = STMMAC_CSR_60_100M; | |
231 | else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M)) | |
232 | priv->clk_csr = STMMAC_CSR_100_150M; | |
233 | else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M)) | |
234 | priv->clk_csr = STMMAC_CSR_150_250M; | |
19d857c9 | 235 | else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M)) |
cd7201f4 | 236 | priv->clk_csr = STMMAC_CSR_250_300M; |
ceb69499 | 237 | } |
9f93ac8d LC |
238 | |
239 | if (priv->plat->has_sun8i) { | |
240 | if (clk_rate > 160000000) | |
241 | priv->clk_csr = 0x03; | |
242 | else if (clk_rate > 80000000) | |
243 | priv->clk_csr = 0x02; | |
244 | else if (clk_rate > 40000000) | |
245 | priv->clk_csr = 0x01; | |
246 | else | |
247 | priv->clk_csr = 0; | |
248 | } | |
cd7201f4 GC |
249 | } |
250 | ||
47dd7a54 GC |
251 | static void print_pkt(unsigned char *buf, int len) |
252 | { | |
424c4f78 AS |
253 | pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf); |
254 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); | |
47dd7a54 | 255 | } |
47dd7a54 | 256 | |
ce736788 | 257 | static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) |
47dd7a54 | 258 | { |
ce736788 | 259 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
a6a3e026 | 260 | u32 avail; |
e3ad57c9 | 261 | |
ce736788 JP |
262 | if (tx_q->dirty_tx > tx_q->cur_tx) |
263 | avail = tx_q->dirty_tx - tx_q->cur_tx - 1; | |
e3ad57c9 | 264 | else |
ce736788 | 265 | avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1; |
e3ad57c9 GC |
266 | |
267 | return avail; | |
268 | } | |
269 | ||
54139cf3 JP |
270 | /** |
271 | * stmmac_rx_dirty - Get RX queue dirty | |
272 | * @priv: driver private structure | |
273 | * @queue: RX queue index | |
274 | */ | |
275 | static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) | |
e3ad57c9 | 276 | { |
54139cf3 | 277 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
a6a3e026 | 278 | u32 dirty; |
e3ad57c9 | 279 | |
54139cf3 JP |
280 | if (rx_q->dirty_rx <= rx_q->cur_rx) |
281 | dirty = rx_q->cur_rx - rx_q->dirty_rx; | |
e3ad57c9 | 282 | else |
54139cf3 | 283 | dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx; |
e3ad57c9 GC |
284 | |
285 | return dirty; | |
47dd7a54 GC |
286 | } |
287 | ||
32ceabca | 288 | /** |
732fdf0e | 289 | * stmmac_hw_fix_mac_speed - callback for speed selection |
32ceabca | 290 | * @priv: driver private structure |
8d45e42b | 291 | * Description: on some platforms (e.g. ST), some HW system configuration |
32ceabca | 292 | * registers have to be set according to the link speed negotiated. |
9dfeb4d9 GC |
293 | */ |
294 | static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv) | |
295 | { | |
d6d50c7e PR |
296 | struct net_device *ndev = priv->dev; |
297 | struct phy_device *phydev = ndev->phydev; | |
9dfeb4d9 GC |
298 | |
299 | if (likely(priv->plat->fix_mac_speed)) | |
ceb69499 | 300 | priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed); |
9dfeb4d9 GC |
301 | } |
302 | ||
32ceabca | 303 | /** |
732fdf0e | 304 | * stmmac_enable_eee_mode - check and enter in LPI mode |
32ceabca | 305 | * @priv: driver private structure |
732fdf0e GC |
306 | * Description: this function is to verify and enter in LPI mode in case of |
307 | * EEE. | |
32ceabca | 308 | */ |
d765955d GC |
309 | static void stmmac_enable_eee_mode(struct stmmac_priv *priv) |
310 | { | |
ce736788 JP |
311 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
312 | u32 queue; | |
313 | ||
314 | /* check if all TX queues have the work finished */ | |
315 | for (queue = 0; queue < tx_cnt; queue++) { | |
316 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
317 | ||
318 | if (tx_q->dirty_tx != tx_q->cur_tx) | |
319 | return; /* still unfinished work */ | |
320 | } | |
321 | ||
d765955d | 322 | /* Check and enter in LPI mode */ |
ce736788 | 323 | if (!priv->tx_path_in_lpi_mode) |
b4b7b772 | 324 | priv->hw->mac->set_eee_mode(priv->hw, |
325 | priv->plat->en_tx_lpi_clockgating); | |
d765955d GC |
326 | } |
327 | ||
32ceabca | 328 | /** |
732fdf0e | 329 | * stmmac_disable_eee_mode - disable and exit from LPI mode |
32ceabca GC |
330 | * @priv: driver private structure |
331 | * Description: this function is to exit and disable EEE in case of | |
332 | * LPI state is true. This is called by the xmit. | |
333 | */ | |
d765955d GC |
334 | void stmmac_disable_eee_mode(struct stmmac_priv *priv) |
335 | { | |
7ed24bbe | 336 | priv->hw->mac->reset_eee_mode(priv->hw); |
d765955d GC |
337 | del_timer_sync(&priv->eee_ctrl_timer); |
338 | priv->tx_path_in_lpi_mode = false; | |
339 | } | |
340 | ||
341 | /** | |
732fdf0e | 342 | * stmmac_eee_ctrl_timer - EEE TX SW timer. |
d765955d GC |
343 | * @arg : data hook |
344 | * Description: | |
32ceabca | 345 | * if there is no data transfer and if we are not in LPI state, |
d765955d GC |
346 | * then MAC Transmitter can be moved to LPI state. |
347 | */ | |
e99e88a9 | 348 | static void stmmac_eee_ctrl_timer(struct timer_list *t) |
d765955d | 349 | { |
e99e88a9 | 350 | struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer); |
d765955d GC |
351 | |
352 | stmmac_enable_eee_mode(priv); | |
f5351ef7 | 353 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
d765955d GC |
354 | } |
355 | ||
356 | /** | |
732fdf0e | 357 | * stmmac_eee_init - init EEE |
32ceabca | 358 | * @priv: driver private structure |
d765955d | 359 | * Description: |
732fdf0e GC |
360 | * if the GMAC supports the EEE (from the HW cap reg) and the phy device |
361 | * can also manage EEE, this function enable the LPI state and start related | |
362 | * timer. | |
d765955d GC |
363 | */ |
364 | bool stmmac_eee_init(struct stmmac_priv *priv) | |
365 | { | |
d6d50c7e | 366 | struct net_device *ndev = priv->dev; |
4741cf9c | 367 | unsigned long flags; |
d765955d GC |
368 | bool ret = false; |
369 | ||
f5351ef7 GC |
370 | /* Using PCS we cannot dial with the phy registers at this stage |
371 | * so we do not support extra feature like EEE. | |
372 | */ | |
3fe5cadb GC |
373 | if ((priv->hw->pcs == STMMAC_PCS_RGMII) || |
374 | (priv->hw->pcs == STMMAC_PCS_TBI) || | |
375 | (priv->hw->pcs == STMMAC_PCS_RTBI)) | |
f5351ef7 GC |
376 | goto out; |
377 | ||
d765955d GC |
378 | /* MAC core supports the EEE feature. */ |
379 | if (priv->dma_cap.eee) { | |
83bf79b6 GC |
380 | int tx_lpi_timer = priv->tx_lpi_timer; |
381 | ||
d765955d | 382 | /* Check if the PHY supports EEE */ |
d6d50c7e | 383 | if (phy_init_eee(ndev->phydev, 1)) { |
83bf79b6 GC |
384 | /* To manage at run-time if the EEE cannot be supported |
385 | * anymore (for example because the lp caps have been | |
386 | * changed). | |
387 | * In that case the driver disable own timers. | |
388 | */ | |
4741cf9c | 389 | spin_lock_irqsave(&priv->lock, flags); |
83bf79b6 | 390 | if (priv->eee_active) { |
38ddc59d | 391 | netdev_dbg(priv->dev, "disable EEE\n"); |
83bf79b6 | 392 | del_timer_sync(&priv->eee_ctrl_timer); |
7ed24bbe | 393 | priv->hw->mac->set_eee_timer(priv->hw, 0, |
83bf79b6 GC |
394 | tx_lpi_timer); |
395 | } | |
396 | priv->eee_active = 0; | |
4741cf9c | 397 | spin_unlock_irqrestore(&priv->lock, flags); |
d765955d | 398 | goto out; |
83bf79b6 GC |
399 | } |
400 | /* Activate the EEE and start timers */ | |
4741cf9c | 401 | spin_lock_irqsave(&priv->lock, flags); |
f5351ef7 GC |
402 | if (!priv->eee_active) { |
403 | priv->eee_active = 1; | |
e99e88a9 KC |
404 | timer_setup(&priv->eee_ctrl_timer, |
405 | stmmac_eee_ctrl_timer, 0); | |
ccb36da1 VT |
406 | mod_timer(&priv->eee_ctrl_timer, |
407 | STMMAC_LPI_T(eee_timer)); | |
f5351ef7 | 408 | |
7ed24bbe | 409 | priv->hw->mac->set_eee_timer(priv->hw, |
f5351ef7 | 410 | STMMAC_DEFAULT_LIT_LS, |
83bf79b6 | 411 | tx_lpi_timer); |
71965352 GC |
412 | } |
413 | /* Set HW EEE according to the speed */ | |
d6d50c7e | 414 | priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link); |
d765955d | 415 | |
d765955d | 416 | ret = true; |
4741cf9c GC |
417 | spin_unlock_irqrestore(&priv->lock, flags); |
418 | ||
38ddc59d | 419 | netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); |
d765955d GC |
420 | } |
421 | out: | |
422 | return ret; | |
423 | } | |
424 | ||
732fdf0e | 425 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps |
32ceabca | 426 | * @priv: driver private structure |
ba1ffd74 | 427 | * @p : descriptor pointer |
891434b1 RK |
428 | * @skb : the socket buffer |
429 | * Description : | |
430 | * This function will read timestamp from the descriptor & pass it to stack. | |
431 | * and also perform some sanity checks. | |
432 | */ | |
433 | static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, | |
ba1ffd74 | 434 | struct dma_desc *p, struct sk_buff *skb) |
891434b1 RK |
435 | { |
436 | struct skb_shared_hwtstamps shhwtstamp; | |
437 | u64 ns; | |
891434b1 RK |
438 | |
439 | if (!priv->hwts_tx_en) | |
440 | return; | |
441 | ||
ceb69499 | 442 | /* exit if skb doesn't support hw tstamp */ |
75e4364f | 443 | if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) |
891434b1 RK |
444 | return; |
445 | ||
891434b1 | 446 | /* check tx tstamp status */ |
33d4c482 | 447 | if (priv->hw->desc->get_tx_timestamp_status(p)) { |
ba1ffd74 GC |
448 | /* get the valid tstamp */ |
449 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | |
891434b1 | 450 | |
ba1ffd74 GC |
451 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
452 | shhwtstamp.hwtstamp = ns_to_ktime(ns); | |
891434b1 | 453 | |
33d4c482 | 454 | netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
ba1ffd74 GC |
455 | /* pass tstamp to stack */ |
456 | skb_tstamp_tx(skb, &shhwtstamp); | |
457 | } | |
891434b1 RK |
458 | |
459 | return; | |
460 | } | |
461 | ||
732fdf0e | 462 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps |
32ceabca | 463 | * @priv: driver private structure |
ba1ffd74 GC |
464 | * @p : descriptor pointer |
465 | * @np : next descriptor pointer | |
891434b1 RK |
466 | * @skb : the socket buffer |
467 | * Description : | |
468 | * This function will read received packet's timestamp from the descriptor | |
469 | * and pass it to stack. It also perform some sanity checks. | |
470 | */ | |
ba1ffd74 GC |
471 | static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, |
472 | struct dma_desc *np, struct sk_buff *skb) | |
891434b1 RK |
473 | { |
474 | struct skb_shared_hwtstamps *shhwtstamp = NULL; | |
98870943 | 475 | struct dma_desc *desc = p; |
891434b1 | 476 | u64 ns; |
891434b1 RK |
477 | |
478 | if (!priv->hwts_rx_en) | |
479 | return; | |
98870943 JA |
480 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
481 | if (priv->plat->has_gmac4) | |
482 | desc = np; | |
891434b1 | 483 | |
ba1ffd74 | 484 | /* Check if timestamp is available */ |
98870943 JA |
485 | if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { |
486 | ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); | |
33d4c482 | 487 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
ba1ffd74 GC |
488 | shhwtstamp = skb_hwtstamps(skb); |
489 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | |
490 | shhwtstamp->hwtstamp = ns_to_ktime(ns); | |
491 | } else { | |
33d4c482 | 492 | netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); |
ba1ffd74 | 493 | } |
891434b1 RK |
494 | } |
495 | ||
496 | /** | |
497 | * stmmac_hwtstamp_ioctl - control hardware timestamping. | |
498 | * @dev: device pointer. | |
8d45e42b | 499 | * @ifr: An IOCTL specific structure, that can contain a pointer to |
891434b1 RK |
500 | * a proprietary structure used to pass information to the driver. |
501 | * Description: | |
502 | * This function configures the MAC to enable/disable both outgoing(TX) | |
503 | * and incoming(RX) packets time stamping based on user input. | |
504 | * Return Value: | |
505 | * 0 on success and an appropriate -ve integer on failure. | |
506 | */ | |
507 | static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |
508 | { | |
509 | struct stmmac_priv *priv = netdev_priv(dev); | |
510 | struct hwtstamp_config config; | |
0a624155 | 511 | struct timespec64 now; |
891434b1 RK |
512 | u64 temp = 0; |
513 | u32 ptp_v2 = 0; | |
514 | u32 tstamp_all = 0; | |
515 | u32 ptp_over_ipv4_udp = 0; | |
516 | u32 ptp_over_ipv6_udp = 0; | |
517 | u32 ptp_over_ethernet = 0; | |
518 | u32 snap_type_sel = 0; | |
519 | u32 ts_master_en = 0; | |
520 | u32 ts_event_en = 0; | |
521 | u32 value = 0; | |
19d857c9 | 522 | u32 sec_inc; |
891434b1 RK |
523 | |
524 | if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { | |
525 | netdev_alert(priv->dev, "No support for HW time stamping\n"); | |
526 | priv->hwts_tx_en = 0; | |
527 | priv->hwts_rx_en = 0; | |
528 | ||
529 | return -EOPNOTSUPP; | |
530 | } | |
531 | ||
532 | if (copy_from_user(&config, ifr->ifr_data, | |
ceb69499 | 533 | sizeof(struct hwtstamp_config))) |
891434b1 RK |
534 | return -EFAULT; |
535 | ||
38ddc59d LC |
536 | netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", |
537 | __func__, config.flags, config.tx_type, config.rx_filter); | |
891434b1 RK |
538 | |
539 | /* reserved for future extensions */ | |
540 | if (config.flags) | |
541 | return -EINVAL; | |
542 | ||
5f3da328 BH |
543 | if (config.tx_type != HWTSTAMP_TX_OFF && |
544 | config.tx_type != HWTSTAMP_TX_ON) | |
891434b1 | 545 | return -ERANGE; |
891434b1 RK |
546 | |
547 | if (priv->adv_ts) { | |
548 | switch (config.rx_filter) { | |
891434b1 | 549 | case HWTSTAMP_FILTER_NONE: |
ceb69499 | 550 | /* time stamp no incoming packet at all */ |
891434b1 RK |
551 | config.rx_filter = HWTSTAMP_FILTER_NONE; |
552 | break; | |
553 | ||
891434b1 | 554 | case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: |
ceb69499 | 555 | /* PTP v1, UDP, any kind of event packet */ |
891434b1 RK |
556 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; |
557 | /* take time stamp for all event messages */ | |
fd6720ae MM |
558 | if (priv->plat->has_gmac4) |
559 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | |
560 | else | |
561 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | |
891434b1 RK |
562 | |
563 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
564 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
565 | break; | |
566 | ||
891434b1 | 567 | case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: |
ceb69499 | 568 | /* PTP v1, UDP, Sync packet */ |
891434b1 RK |
569 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC; |
570 | /* take time stamp for SYNC messages only */ | |
571 | ts_event_en = PTP_TCR_TSEVNTENA; | |
572 | ||
573 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
574 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
575 | break; | |
576 | ||
891434b1 | 577 | case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: |
ceb69499 | 578 | /* PTP v1, UDP, Delay_req packet */ |
891434b1 RK |
579 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ; |
580 | /* take time stamp for Delay_Req messages only */ | |
581 | ts_master_en = PTP_TCR_TSMSTRENA; | |
582 | ts_event_en = PTP_TCR_TSEVNTENA; | |
583 | ||
584 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
585 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
586 | break; | |
587 | ||
891434b1 | 588 | case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: |
ceb69499 | 589 | /* PTP v2, UDP, any kind of event packet */ |
891434b1 RK |
590 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; |
591 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
592 | /* take time stamp for all event messages */ | |
fd6720ae MM |
593 | if (priv->plat->has_gmac4) |
594 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | |
595 | else | |
596 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | |
891434b1 RK |
597 | |
598 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
599 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
600 | break; | |
601 | ||
891434b1 | 602 | case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: |
ceb69499 | 603 | /* PTP v2, UDP, Sync packet */ |
891434b1 RK |
604 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC; |
605 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
606 | /* take time stamp for SYNC messages only */ | |
607 | ts_event_en = PTP_TCR_TSEVNTENA; | |
608 | ||
609 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
610 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
611 | break; | |
612 | ||
891434b1 | 613 | case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: |
ceb69499 | 614 | /* PTP v2, UDP, Delay_req packet */ |
891434b1 RK |
615 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ; |
616 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
617 | /* take time stamp for Delay_Req messages only */ | |
618 | ts_master_en = PTP_TCR_TSMSTRENA; | |
619 | ts_event_en = PTP_TCR_TSEVNTENA; | |
620 | ||
621 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
622 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
623 | break; | |
624 | ||
891434b1 | 625 | case HWTSTAMP_FILTER_PTP_V2_EVENT: |
ceb69499 | 626 | /* PTP v2/802.AS1 any layer, any kind of event packet */ |
891434b1 RK |
627 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; |
628 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
629 | /* take time stamp for all event messages */ | |
fd6720ae MM |
630 | if (priv->plat->has_gmac4) |
631 | snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; | |
632 | else | |
633 | snap_type_sel = PTP_TCR_SNAPTYPSEL_1; | |
891434b1 RK |
634 | |
635 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
636 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
637 | ptp_over_ethernet = PTP_TCR_TSIPENA; | |
638 | break; | |
639 | ||
891434b1 | 640 | case HWTSTAMP_FILTER_PTP_V2_SYNC: |
ceb69499 | 641 | /* PTP v2/802.AS1, any layer, Sync packet */ |
891434b1 RK |
642 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC; |
643 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
644 | /* take time stamp for SYNC messages only */ | |
645 | ts_event_en = PTP_TCR_TSEVNTENA; | |
646 | ||
647 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
648 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
649 | ptp_over_ethernet = PTP_TCR_TSIPENA; | |
650 | break; | |
651 | ||
891434b1 | 652 | case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: |
ceb69499 | 653 | /* PTP v2/802.AS1, any layer, Delay_req packet */ |
891434b1 RK |
654 | config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ; |
655 | ptp_v2 = PTP_TCR_TSVER2ENA; | |
656 | /* take time stamp for Delay_Req messages only */ | |
657 | ts_master_en = PTP_TCR_TSMSTRENA; | |
658 | ts_event_en = PTP_TCR_TSEVNTENA; | |
659 | ||
660 | ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; | |
661 | ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; | |
662 | ptp_over_ethernet = PTP_TCR_TSIPENA; | |
663 | break; | |
664 | ||
e3412575 | 665 | case HWTSTAMP_FILTER_NTP_ALL: |
891434b1 | 666 | case HWTSTAMP_FILTER_ALL: |
ceb69499 | 667 | /* time stamp any incoming packet */ |
891434b1 RK |
668 | config.rx_filter = HWTSTAMP_FILTER_ALL; |
669 | tstamp_all = PTP_TCR_TSENALL; | |
670 | break; | |
671 | ||
672 | default: | |
673 | return -ERANGE; | |
674 | } | |
675 | } else { | |
676 | switch (config.rx_filter) { | |
677 | case HWTSTAMP_FILTER_NONE: | |
678 | config.rx_filter = HWTSTAMP_FILTER_NONE; | |
679 | break; | |
680 | default: | |
681 | /* PTP v1, UDP, any kind of event packet */ | |
682 | config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; | |
683 | break; | |
684 | } | |
685 | } | |
686 | priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); | |
5f3da328 | 687 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; |
891434b1 RK |
688 | |
689 | if (!priv->hwts_tx_en && !priv->hwts_rx_en) | |
ba1ffd74 | 690 | priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); |
891434b1 RK |
691 | else { |
692 | value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | | |
ceb69499 GC |
693 | tstamp_all | ptp_v2 | ptp_over_ethernet | |
694 | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | | |
695 | ts_master_en | snap_type_sel); | |
ba1ffd74 | 696 | priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); |
891434b1 RK |
697 | |
698 | /* program Sub Second Increment reg */ | |
19d857c9 | 699 | sec_inc = priv->hw->ptp->config_sub_second_increment( |
f573c0b9 | 700 | priv->ptpaddr, priv->plat->clk_ptp_rate, |
ba1ffd74 | 701 | priv->plat->has_gmac4); |
19d857c9 | 702 | temp = div_u64(1000000000ULL, sec_inc); |
891434b1 RK |
703 | |
704 | /* calculate default added value: | |
705 | * formula is : | |
706 | * addend = (2^32)/freq_div_ratio; | |
19d857c9 | 707 | * where, freq_div_ratio = 1e9ns/sec_inc |
891434b1 | 708 | */ |
19d857c9 | 709 | temp = (u64)(temp << 32); |
f573c0b9 | 710 | priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); |
ba1ffd74 | 711 | priv->hw->ptp->config_addend(priv->ptpaddr, |
891434b1 RK |
712 | priv->default_addend); |
713 | ||
714 | /* initialize system time */ | |
0a624155 AB |
715 | ktime_get_real_ts64(&now); |
716 | ||
717 | /* lower 32 bits of tv_sec are safe until y2106 */ | |
ba1ffd74 | 718 | priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, |
891434b1 RK |
719 | now.tv_nsec); |
720 | } | |
721 | ||
722 | return copy_to_user(ifr->ifr_data, &config, | |
723 | sizeof(struct hwtstamp_config)) ? -EFAULT : 0; | |
724 | } | |
725 | ||
32ceabca | 726 | /** |
732fdf0e | 727 | * stmmac_init_ptp - init PTP |
32ceabca | 728 | * @priv: driver private structure |
732fdf0e | 729 | * Description: this is to verify if the HW supports the PTPv1 or PTPv2. |
32ceabca | 730 | * This is done by looking at the HW cap. register. |
732fdf0e | 731 | * This function also registers the ptp driver. |
32ceabca | 732 | */ |
92ba6888 | 733 | static int stmmac_init_ptp(struct stmmac_priv *priv) |
891434b1 | 734 | { |
92ba6888 RK |
735 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
736 | return -EOPNOTSUPP; | |
737 | ||
7cd01399 | 738 | priv->adv_ts = 0; |
be9b3174 GC |
739 | /* Check if adv_ts can be enabled for dwmac 4.x core */ |
740 | if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp) | |
741 | priv->adv_ts = 1; | |
742 | /* Dwmac 3.x core with extend_desc can support adv_ts */ | |
743 | else if (priv->extend_desc && priv->dma_cap.atime_stamp) | |
7cd01399 VB |
744 | priv->adv_ts = 1; |
745 | ||
be9b3174 GC |
746 | if (priv->dma_cap.time_stamp) |
747 | netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); | |
7cd01399 | 748 | |
be9b3174 GC |
749 | if (priv->adv_ts) |
750 | netdev_info(priv->dev, | |
751 | "IEEE 1588-2008 Advanced Timestamp supported\n"); | |
891434b1 RK |
752 | |
753 | priv->hw->ptp = &stmmac_ptp; | |
754 | priv->hwts_tx_en = 0; | |
755 | priv->hwts_rx_en = 0; | |
92ba6888 | 756 | |
c30a70d3 GC |
757 | stmmac_ptp_register(priv); |
758 | ||
759 | return 0; | |
92ba6888 RK |
760 | } |
761 | ||
762 | static void stmmac_release_ptp(struct stmmac_priv *priv) | |
763 | { | |
f573c0b9 | 764 | if (priv->plat->clk_ptp_ref) |
765 | clk_disable_unprepare(priv->plat->clk_ptp_ref); | |
92ba6888 | 766 | stmmac_ptp_unregister(priv); |
891434b1 RK |
767 | } |
768 | ||
29feff39 JP |
769 | /** |
770 | * stmmac_mac_flow_ctrl - Configure flow control in all queues | |
771 | * @priv: driver private structure | |
772 | * Description: It is used for configuring the flow control in all queues | |
773 | */ | |
774 | static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex) | |
775 | { | |
776 | u32 tx_cnt = priv->plat->tx_queues_to_use; | |
777 | ||
778 | priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl, | |
779 | priv->pause, tx_cnt); | |
780 | } | |
781 | ||
47dd7a54 | 782 | /** |
732fdf0e | 783 | * stmmac_adjust_link - adjusts the link parameters |
47dd7a54 | 784 | * @dev: net device structure |
732fdf0e GC |
785 | * Description: this is the helper called by the physical abstraction layer |
786 | * drivers to communicate the phy link status. According the speed and duplex | |
787 | * this driver can invoke registered glue-logic as well. | |
788 | * It also invoke the eee initialization because it could happen when switch | |
789 | * on different networks (that are eee capable). | |
47dd7a54 GC |
790 | */ |
791 | static void stmmac_adjust_link(struct net_device *dev) | |
792 | { | |
793 | struct stmmac_priv *priv = netdev_priv(dev); | |
d6d50c7e | 794 | struct phy_device *phydev = dev->phydev; |
47dd7a54 | 795 | unsigned long flags; |
99a4cca2 | 796 | bool new_state = false; |
47dd7a54 | 797 | |
662ec2b7 | 798 | if (!phydev) |
47dd7a54 GC |
799 | return; |
800 | ||
47dd7a54 | 801 | spin_lock_irqsave(&priv->lock, flags); |
d765955d | 802 | |
47dd7a54 | 803 | if (phydev->link) { |
ad01b7d4 | 804 | u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); |
47dd7a54 GC |
805 | |
806 | /* Now we make sure that we can be in full duplex mode. | |
807 | * If not, we operate in half-duplex mode. */ | |
808 | if (phydev->duplex != priv->oldduplex) { | |
99a4cca2 | 809 | new_state = true; |
50cb16d4 | 810 | if (!phydev->duplex) |
db98a0b0 | 811 | ctrl &= ~priv->hw->link.duplex; |
47dd7a54 | 812 | else |
db98a0b0 | 813 | ctrl |= priv->hw->link.duplex; |
47dd7a54 GC |
814 | priv->oldduplex = phydev->duplex; |
815 | } | |
816 | /* Flow Control operation */ | |
817 | if (phydev->pause) | |
29feff39 | 818 | stmmac_mac_flow_ctrl(priv, phydev->duplex); |
47dd7a54 GC |
819 | |
820 | if (phydev->speed != priv->speed) { | |
99a4cca2 | 821 | new_state = true; |
ca84dfb9 | 822 | ctrl &= ~priv->hw->link.speed_mask; |
47dd7a54 | 823 | switch (phydev->speed) { |
afbe17a3 | 824 | case SPEED_1000: |
ca84dfb9 | 825 | ctrl |= priv->hw->link.speed1000; |
47dd7a54 | 826 | break; |
afbe17a3 | 827 | case SPEED_100: |
ca84dfb9 | 828 | ctrl |= priv->hw->link.speed100; |
9beae261 | 829 | break; |
afbe17a3 | 830 | case SPEED_10: |
ca84dfb9 | 831 | ctrl |= priv->hw->link.speed10; |
47dd7a54 GC |
832 | break; |
833 | default: | |
b3e51069 | 834 | netif_warn(priv, link, priv->dev, |
cba920af | 835 | "broken speed: %d\n", phydev->speed); |
688495b1 | 836 | phydev->speed = SPEED_UNKNOWN; |
47dd7a54 GC |
837 | break; |
838 | } | |
5db13556 LC |
839 | if (phydev->speed != SPEED_UNKNOWN) |
840 | stmmac_hw_fix_mac_speed(priv); | |
47dd7a54 GC |
841 | priv->speed = phydev->speed; |
842 | } | |
843 | ||
ad01b7d4 | 844 | writel(ctrl, priv->ioaddr + MAC_CTRL_REG); |
47dd7a54 GC |
845 | |
846 | if (!priv->oldlink) { | |
99a4cca2 | 847 | new_state = true; |
4d869b03 | 848 | priv->oldlink = true; |
47dd7a54 GC |
849 | } |
850 | } else if (priv->oldlink) { | |
99a4cca2 | 851 | new_state = true; |
4d869b03 | 852 | priv->oldlink = false; |
bd00632c LC |
853 | priv->speed = SPEED_UNKNOWN; |
854 | priv->oldduplex = DUPLEX_UNKNOWN; | |
47dd7a54 GC |
855 | } |
856 | ||
857 | if (new_state && netif_msg_link(priv)) | |
858 | phy_print_status(phydev); | |
859 | ||
4741cf9c GC |
860 | spin_unlock_irqrestore(&priv->lock, flags); |
861 | ||
52f95bbf GC |
862 | if (phydev->is_pseudo_fixed_link) |
863 | /* Stop PHY layer to call the hook to adjust the link in case | |
864 | * of a switch is attached to the stmmac driver. | |
865 | */ | |
866 | phydev->irq = PHY_IGNORE_INTERRUPT; | |
867 | else | |
868 | /* At this stage, init the EEE if supported. | |
869 | * Never called in case of fixed_link. | |
870 | */ | |
871 | priv->eee_enabled = stmmac_eee_init(priv); | |
47dd7a54 GC |
872 | } |
873 | ||
32ceabca | 874 | /** |
732fdf0e | 875 | * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported |
32ceabca GC |
876 | * @priv: driver private structure |
877 | * Description: this is to verify if the HW supports the PCS. | |
878 | * Physical Coding Sublayer (PCS) interface that can be used when the MAC is | |
879 | * configured for the TBI, RTBI, or SGMII PHY interface. | |
880 | */ | |
e58bb43f GC |
881 | static void stmmac_check_pcs_mode(struct stmmac_priv *priv) |
882 | { | |
883 | int interface = priv->plat->interface; | |
884 | ||
885 | if (priv->dma_cap.pcs) { | |
0d909dcd BA |
886 | if ((interface == PHY_INTERFACE_MODE_RGMII) || |
887 | (interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
888 | (interface == PHY_INTERFACE_MODE_RGMII_RXID) || | |
889 | (interface == PHY_INTERFACE_MODE_RGMII_TXID)) { | |
38ddc59d | 890 | netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); |
3fe5cadb | 891 | priv->hw->pcs = STMMAC_PCS_RGMII; |
0d909dcd | 892 | } else if (interface == PHY_INTERFACE_MODE_SGMII) { |
38ddc59d | 893 | netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); |
3fe5cadb | 894 | priv->hw->pcs = STMMAC_PCS_SGMII; |
e58bb43f GC |
895 | } |
896 | } | |
897 | } | |
898 | ||
47dd7a54 GC |
899 | /** |
900 | * stmmac_init_phy - PHY initialization | |
901 | * @dev: net device structure | |
902 | * Description: it initializes the driver's PHY state, and attaches the PHY | |
903 | * to the mac driver. | |
904 | * Return value: | |
905 | * 0 on success | |
906 | */ | |
907 | static int stmmac_init_phy(struct net_device *dev) | |
908 | { | |
909 | struct stmmac_priv *priv = netdev_priv(dev); | |
910 | struct phy_device *phydev; | |
d765955d | 911 | char phy_id_fmt[MII_BUS_ID_SIZE + 3]; |
109cdd66 | 912 | char bus_id[MII_BUS_ID_SIZE]; |
79ee1dc3 | 913 | int interface = priv->plat->interface; |
9cbadf09 | 914 | int max_speed = priv->plat->max_speed; |
4d869b03 | 915 | priv->oldlink = false; |
bd00632c LC |
916 | priv->speed = SPEED_UNKNOWN; |
917 | priv->oldduplex = DUPLEX_UNKNOWN; | |
47dd7a54 | 918 | |
5790cf3c MO |
919 | if (priv->plat->phy_node) { |
920 | phydev = of_phy_connect(dev, priv->plat->phy_node, | |
921 | &stmmac_adjust_link, 0, interface); | |
922 | } else { | |
a7657f12 GC |
923 | snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", |
924 | priv->plat->bus_id); | |
5790cf3c MO |
925 | |
926 | snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, | |
927 | priv->plat->phy_addr); | |
de9a2165 | 928 | netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__, |
38ddc59d | 929 | phy_id_fmt); |
5790cf3c MO |
930 | |
931 | phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, | |
932 | interface); | |
933 | } | |
47dd7a54 | 934 | |
dfc50fca | 935 | if (IS_ERR_OR_NULL(phydev)) { |
38ddc59d | 936 | netdev_err(priv->dev, "Could not attach to PHY\n"); |
dfc50fca AB |
937 | if (!phydev) |
938 | return -ENODEV; | |
939 | ||
47dd7a54 GC |
940 | return PTR_ERR(phydev); |
941 | } | |
942 | ||
79ee1dc3 | 943 | /* Stop Advertising 1000BASE Capability if interface is not GMII */ |
c5b9b4e4 | 944 | if ((interface == PHY_INTERFACE_MODE_MII) || |
9cbadf09 | 945 | (interface == PHY_INTERFACE_MODE_RMII) || |
a77e4acc | 946 | (max_speed < 1000 && max_speed > 0)) |
c5b9b4e4 SK |
947 | phydev->advertising &= ~(SUPPORTED_1000baseT_Half | |
948 | SUPPORTED_1000baseT_Full); | |
79ee1dc3 | 949 | |
47dd7a54 GC |
950 | /* |
951 | * Broken HW is sometimes missing the pull-up resistor on the | |
952 | * MDIO line, which results in reads to non-existent devices returning | |
953 | * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent | |
954 | * device as well. | |
955 | * Note: phydev->phy_id is the result of reading the UID PHY registers. | |
956 | */ | |
27732381 | 957 | if (!priv->plat->phy_node && phydev->phy_id == 0) { |
47dd7a54 GC |
958 | phy_disconnect(phydev); |
959 | return -ENODEV; | |
960 | } | |
8e99fc5f | 961 | |
c51e424d FF |
962 | /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid |
963 | * subsequent PHY polling, make sure we force a link transition if | |
964 | * we have a UP/DOWN/UP transition | |
965 | */ | |
966 | if (phydev->is_pseudo_fixed_link) | |
967 | phydev->irq = PHY_POLL; | |
968 | ||
b05c76a1 | 969 | phy_attached_info(phydev); |
47dd7a54 GC |
970 | return 0; |
971 | } | |
972 | ||
71fedb01 | 973 | static void stmmac_display_rx_rings(struct stmmac_priv *priv) |
c24602ef | 974 | { |
54139cf3 | 975 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
71fedb01 | 976 | void *head_rx; |
54139cf3 | 977 | u32 queue; |
aff3d9ef | 978 | |
54139cf3 JP |
979 | /* Display RX rings */ |
980 | for (queue = 0; queue < rx_cnt; queue++) { | |
981 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
d0225e7d | 982 | |
54139cf3 JP |
983 | pr_info("\tRX Queue %u rings\n", queue); |
984 | ||
985 | if (priv->extend_desc) | |
986 | head_rx = (void *)rx_q->dma_erx; | |
987 | else | |
988 | head_rx = (void *)rx_q->dma_rx; | |
989 | ||
990 | /* Display RX ring */ | |
991 | priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true); | |
992 | } | |
71fedb01 JP |
993 | } |
994 | ||
995 | static void stmmac_display_tx_rings(struct stmmac_priv *priv) | |
996 | { | |
ce736788 | 997 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
71fedb01 | 998 | void *head_tx; |
ce736788 | 999 | u32 queue; |
71fedb01 | 1000 | |
ce736788 JP |
1001 | /* Display TX rings */ |
1002 | for (queue = 0; queue < tx_cnt; queue++) { | |
1003 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
71fedb01 | 1004 | |
ce736788 JP |
1005 | pr_info("\tTX Queue %d rings\n", queue); |
1006 | ||
1007 | if (priv->extend_desc) | |
1008 | head_tx = (void *)tx_q->dma_etx; | |
1009 | else | |
1010 | head_tx = (void *)tx_q->dma_tx; | |
1011 | ||
1012 | priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false); | |
1013 | } | |
c24602ef GC |
1014 | } |
1015 | ||
71fedb01 JP |
1016 | static void stmmac_display_rings(struct stmmac_priv *priv) |
1017 | { | |
1018 | /* Display RX ring */ | |
1019 | stmmac_display_rx_rings(priv); | |
1020 | ||
1021 | /* Display TX ring */ | |
1022 | stmmac_display_tx_rings(priv); | |
1023 | } | |
1024 | ||
286a8372 GC |
1025 | static int stmmac_set_bfsize(int mtu, int bufsize) |
1026 | { | |
1027 | int ret = bufsize; | |
1028 | ||
1029 | if (mtu >= BUF_SIZE_4KiB) | |
1030 | ret = BUF_SIZE_8KiB; | |
1031 | else if (mtu >= BUF_SIZE_2KiB) | |
1032 | ret = BUF_SIZE_4KiB; | |
d916701c | 1033 | else if (mtu > DEFAULT_BUFSIZE) |
286a8372 GC |
1034 | ret = BUF_SIZE_2KiB; |
1035 | else | |
d916701c | 1036 | ret = DEFAULT_BUFSIZE; |
286a8372 GC |
1037 | |
1038 | return ret; | |
1039 | } | |
1040 | ||
32ceabca | 1041 | /** |
71fedb01 | 1042 | * stmmac_clear_rx_descriptors - clear RX descriptors |
32ceabca | 1043 | * @priv: driver private structure |
54139cf3 | 1044 | * @queue: RX queue index |
71fedb01 | 1045 | * Description: this function is called to clear the RX descriptors |
32ceabca GC |
1046 | * in case of both basic and extended descriptors are used. |
1047 | */ | |
54139cf3 | 1048 | static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) |
c24602ef | 1049 | { |
54139cf3 | 1050 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
5bacd778 | 1051 | int i; |
c24602ef | 1052 | |
71fedb01 | 1053 | /* Clear the RX descriptors */ |
e3ad57c9 | 1054 | for (i = 0; i < DMA_RX_SIZE; i++) |
c24602ef | 1055 | if (priv->extend_desc) |
54139cf3 | 1056 | priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic, |
c24602ef | 1057 | priv->use_riwt, priv->mode, |
e3ad57c9 | 1058 | (i == DMA_RX_SIZE - 1)); |
c24602ef | 1059 | else |
54139cf3 | 1060 | priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i], |
c24602ef | 1061 | priv->use_riwt, priv->mode, |
e3ad57c9 | 1062 | (i == DMA_RX_SIZE - 1)); |
71fedb01 JP |
1063 | } |
1064 | ||
1065 | /** | |
1066 | * stmmac_clear_tx_descriptors - clear tx descriptors | |
1067 | * @priv: driver private structure | |
ce736788 | 1068 | * @queue: TX queue index. |
71fedb01 JP |
1069 | * Description: this function is called to clear the TX descriptors |
1070 | * in case of both basic and extended descriptors are used. | |
1071 | */ | |
ce736788 | 1072 | static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) |
71fedb01 | 1073 | { |
ce736788 | 1074 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
71fedb01 JP |
1075 | int i; |
1076 | ||
1077 | /* Clear the TX descriptors */ | |
e3ad57c9 | 1078 | for (i = 0; i < DMA_TX_SIZE; i++) |
c24602ef | 1079 | if (priv->extend_desc) |
ce736788 | 1080 | priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, |
c24602ef | 1081 | priv->mode, |
e3ad57c9 | 1082 | (i == DMA_TX_SIZE - 1)); |
c24602ef | 1083 | else |
ce736788 | 1084 | priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], |
c24602ef | 1085 | priv->mode, |
e3ad57c9 | 1086 | (i == DMA_TX_SIZE - 1)); |
c24602ef GC |
1087 | } |
1088 | ||
71fedb01 JP |
1089 | /** |
1090 | * stmmac_clear_descriptors - clear descriptors | |
1091 | * @priv: driver private structure | |
1092 | * Description: this function is called to clear the TX and RX descriptors | |
1093 | * in case of both basic and extended descriptors are used. | |
1094 | */ | |
1095 | static void stmmac_clear_descriptors(struct stmmac_priv *priv) | |
1096 | { | |
54139cf3 | 1097 | u32 rx_queue_cnt = priv->plat->rx_queues_to_use; |
ce736788 | 1098 | u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
54139cf3 JP |
1099 | u32 queue; |
1100 | ||
71fedb01 | 1101 | /* Clear the RX descriptors */ |
54139cf3 JP |
1102 | for (queue = 0; queue < rx_queue_cnt; queue++) |
1103 | stmmac_clear_rx_descriptors(priv, queue); | |
71fedb01 JP |
1104 | |
1105 | /* Clear the TX descriptors */ | |
ce736788 JP |
1106 | for (queue = 0; queue < tx_queue_cnt; queue++) |
1107 | stmmac_clear_tx_descriptors(priv, queue); | |
71fedb01 JP |
1108 | } |
1109 | ||
732fdf0e GC |
1110 | /** |
1111 | * stmmac_init_rx_buffers - init the RX descriptor buffer. | |
1112 | * @priv: driver private structure | |
1113 | * @p: descriptor pointer | |
1114 | * @i: descriptor index | |
54139cf3 JP |
1115 | * @flags: gfp flag |
1116 | * @queue: RX queue index | |
732fdf0e GC |
1117 | * Description: this function is called to allocate a receive buffer, perform |
1118 | * the DMA mapping and init the descriptor. | |
1119 | */ | |
c24602ef | 1120 | static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, |
54139cf3 | 1121 | int i, gfp_t flags, u32 queue) |
c24602ef | 1122 | { |
54139cf3 | 1123 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
c24602ef GC |
1124 | struct sk_buff *skb; |
1125 | ||
4ec49a37 | 1126 | skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags); |
56329137 | 1127 | if (!skb) { |
38ddc59d LC |
1128 | netdev_err(priv->dev, |
1129 | "%s: Rx init fails; skb is NULL\n", __func__); | |
56329137 | 1130 | return -ENOMEM; |
c24602ef | 1131 | } |
54139cf3 JP |
1132 | rx_q->rx_skbuff[i] = skb; |
1133 | rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | |
c24602ef GC |
1134 | priv->dma_buf_sz, |
1135 | DMA_FROM_DEVICE); | |
54139cf3 | 1136 | if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) { |
38ddc59d | 1137 | netdev_err(priv->dev, "%s: DMA mapping error\n", __func__); |
56329137 BZ |
1138 | dev_kfree_skb_any(skb); |
1139 | return -EINVAL; | |
1140 | } | |
c24602ef | 1141 | |
f748be53 | 1142 | if (priv->synopsys_id >= DWMAC_CORE_4_00) |
54139cf3 | 1143 | p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); |
f748be53 | 1144 | else |
54139cf3 | 1145 | p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]); |
c24602ef | 1146 | |
29896a67 | 1147 | if ((priv->hw->mode->init_desc3) && |
c24602ef | 1148 | (priv->dma_buf_sz == BUF_SIZE_16KiB)) |
29896a67 | 1149 | priv->hw->mode->init_desc3(p); |
c24602ef GC |
1150 | |
1151 | return 0; | |
1152 | } | |
1153 | ||
71fedb01 JP |
1154 | /** |
1155 | * stmmac_free_rx_buffer - free RX dma buffers | |
1156 | * @priv: private structure | |
54139cf3 | 1157 | * @queue: RX queue index |
71fedb01 JP |
1158 | * @i: buffer index. |
1159 | */ | |
54139cf3 | 1160 | static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
56329137 | 1161 | { |
54139cf3 JP |
1162 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
1163 | ||
1164 | if (rx_q->rx_skbuff[i]) { | |
1165 | dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i], | |
56329137 | 1166 | priv->dma_buf_sz, DMA_FROM_DEVICE); |
54139cf3 | 1167 | dev_kfree_skb_any(rx_q->rx_skbuff[i]); |
aff3d9ef | 1168 | } |
54139cf3 | 1169 | rx_q->rx_skbuff[i] = NULL; |
aff3d9ef JP |
1170 | } |
1171 | ||
1172 | /** | |
71fedb01 JP |
1173 | * stmmac_free_tx_buffer - free RX dma buffers |
1174 | * @priv: private structure | |
ce736788 | 1175 | * @queue: RX queue index |
71fedb01 JP |
1176 | * @i: buffer index. |
1177 | */ | |
ce736788 | 1178 | static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) |
71fedb01 | 1179 | { |
ce736788 JP |
1180 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
1181 | ||
1182 | if (tx_q->tx_skbuff_dma[i].buf) { | |
1183 | if (tx_q->tx_skbuff_dma[i].map_as_page) | |
71fedb01 | 1184 | dma_unmap_page(priv->device, |
ce736788 JP |
1185 | tx_q->tx_skbuff_dma[i].buf, |
1186 | tx_q->tx_skbuff_dma[i].len, | |
71fedb01 JP |
1187 | DMA_TO_DEVICE); |
1188 | else | |
1189 | dma_unmap_single(priv->device, | |
ce736788 JP |
1190 | tx_q->tx_skbuff_dma[i].buf, |
1191 | tx_q->tx_skbuff_dma[i].len, | |
71fedb01 JP |
1192 | DMA_TO_DEVICE); |
1193 | } | |
1194 | ||
ce736788 JP |
1195 | if (tx_q->tx_skbuff[i]) { |
1196 | dev_kfree_skb_any(tx_q->tx_skbuff[i]); | |
1197 | tx_q->tx_skbuff[i] = NULL; | |
1198 | tx_q->tx_skbuff_dma[i].buf = 0; | |
1199 | tx_q->tx_skbuff_dma[i].map_as_page = false; | |
71fedb01 JP |
1200 | } |
1201 | } | |
1202 | ||
1203 | /** | |
1204 | * init_dma_rx_desc_rings - init the RX descriptor rings | |
47dd7a54 | 1205 | * @dev: net device structure |
732fdf0e | 1206 | * @flags: gfp flag. |
71fedb01 | 1207 | * Description: this function initializes the DMA RX descriptors |
5bacd778 | 1208 | * and allocates the socket buffers. It supports the chained and ring |
286a8372 | 1209 | * modes. |
47dd7a54 | 1210 | */ |
71fedb01 | 1211 | static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) |
47dd7a54 | 1212 | { |
47dd7a54 | 1213 | struct stmmac_priv *priv = netdev_priv(dev); |
54139cf3 | 1214 | u32 rx_count = priv->plat->rx_queues_to_use; |
4a7d666a | 1215 | unsigned int bfsize = 0; |
56329137 | 1216 | int ret = -ENOMEM; |
1d3028f4 | 1217 | int queue; |
54139cf3 | 1218 | int i; |
47dd7a54 | 1219 | |
29896a67 GC |
1220 | if (priv->hw->mode->set_16kib_bfsize) |
1221 | bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); | |
286a8372 | 1222 | |
4a7d666a | 1223 | if (bfsize < BUF_SIZE_16KiB) |
286a8372 | 1224 | bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); |
47dd7a54 | 1225 | |
2618abb7 VB |
1226 | priv->dma_buf_sz = bfsize; |
1227 | ||
54139cf3 | 1228 | /* RX INITIALIZATION */ |
b3e51069 LC |
1229 | netif_dbg(priv, probe, priv->dev, |
1230 | "SKB addresses:\nskb\t\tskb data\tdma data\n"); | |
47dd7a54 | 1231 | |
54139cf3 JP |
1232 | for (queue = 0; queue < rx_count; queue++) { |
1233 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
c24602ef | 1234 | |
54139cf3 JP |
1235 | netif_dbg(priv, probe, priv->dev, |
1236 | "(%s) dma_rx_phy=0x%08x\n", __func__, | |
1237 | (u32)rx_q->dma_rx_phy); | |
f748be53 | 1238 | |
54139cf3 JP |
1239 | for (i = 0; i < DMA_RX_SIZE; i++) { |
1240 | struct dma_desc *p; | |
aff3d9ef | 1241 | |
54139cf3 JP |
1242 | if (priv->extend_desc) |
1243 | p = &((rx_q->dma_erx + i)->basic); | |
1244 | else | |
1245 | p = rx_q->dma_rx + i; | |
1246 | ||
1247 | ret = stmmac_init_rx_buffers(priv, p, i, flags, | |
1248 | queue); | |
1249 | if (ret) | |
1250 | goto err_init_rx_buffers; | |
1251 | ||
1252 | netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n", | |
1253 | rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data, | |
1254 | (unsigned int)rx_q->rx_skbuff_dma[i]); | |
1255 | } | |
1256 | ||
1257 | rx_q->cur_rx = 0; | |
1258 | rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); | |
1259 | ||
1260 | stmmac_clear_rx_descriptors(priv, queue); | |
1261 | ||
1262 | /* Setup the chained descriptor addresses */ | |
1263 | if (priv->mode == STMMAC_CHAIN_MODE) { | |
1264 | if (priv->extend_desc) | |
1265 | priv->hw->mode->init(rx_q->dma_erx, | |
1266 | rx_q->dma_rx_phy, | |
1267 | DMA_RX_SIZE, 1); | |
1268 | else | |
1269 | priv->hw->mode->init(rx_q->dma_rx, | |
1270 | rx_q->dma_rx_phy, | |
1271 | DMA_RX_SIZE, 0); | |
1272 | } | |
71fedb01 JP |
1273 | } |
1274 | ||
54139cf3 JP |
1275 | buf_sz = bfsize; |
1276 | ||
71fedb01 | 1277 | return 0; |
54139cf3 | 1278 | |
71fedb01 | 1279 | err_init_rx_buffers: |
54139cf3 JP |
1280 | while (queue >= 0) { |
1281 | while (--i >= 0) | |
1282 | stmmac_free_rx_buffer(priv, queue, i); | |
1283 | ||
1284 | if (queue == 0) | |
1285 | break; | |
1286 | ||
1287 | i = DMA_RX_SIZE; | |
1288 | queue--; | |
1289 | } | |
1290 | ||
71fedb01 JP |
1291 | return ret; |
1292 | } | |
1293 | ||
1294 | /** | |
1295 | * init_dma_tx_desc_rings - init the TX descriptor rings | |
1296 | * @dev: net device structure. | |
1297 | * Description: this function initializes the DMA TX descriptors | |
1298 | * and allocates the socket buffers. It supports the chained and ring | |
1299 | * modes. | |
1300 | */ | |
1301 | static int init_dma_tx_desc_rings(struct net_device *dev) | |
1302 | { | |
1303 | struct stmmac_priv *priv = netdev_priv(dev); | |
ce736788 JP |
1304 | u32 tx_queue_cnt = priv->plat->tx_queues_to_use; |
1305 | u32 queue; | |
71fedb01 JP |
1306 | int i; |
1307 | ||
ce736788 JP |
1308 | for (queue = 0; queue < tx_queue_cnt; queue++) { |
1309 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
71fedb01 | 1310 | |
ce736788 JP |
1311 | netif_dbg(priv, probe, priv->dev, |
1312 | "(%s) dma_tx_phy=0x%08x\n", __func__, | |
1313 | (u32)tx_q->dma_tx_phy); | |
f748be53 | 1314 | |
ce736788 JP |
1315 | /* Setup the chained descriptor addresses */ |
1316 | if (priv->mode == STMMAC_CHAIN_MODE) { | |
1317 | if (priv->extend_desc) | |
1318 | priv->hw->mode->init(tx_q->dma_etx, | |
1319 | tx_q->dma_tx_phy, | |
1320 | DMA_TX_SIZE, 1); | |
1321 | else | |
1322 | priv->hw->mode->init(tx_q->dma_tx, | |
1323 | tx_q->dma_tx_phy, | |
1324 | DMA_TX_SIZE, 0); | |
1325 | } | |
aff3d9ef | 1326 | |
ce736788 JP |
1327 | for (i = 0; i < DMA_TX_SIZE; i++) { |
1328 | struct dma_desc *p; | |
ce736788 JP |
1329 | if (priv->extend_desc) |
1330 | p = &((tx_q->dma_etx + i)->basic); | |
1331 | else | |
1332 | p = tx_q->dma_tx + i; | |
1333 | ||
1334 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
1335 | p->des0 = 0; | |
1336 | p->des1 = 0; | |
1337 | p->des2 = 0; | |
1338 | p->des3 = 0; | |
1339 | } else { | |
1340 | p->des2 = 0; | |
1341 | } | |
1342 | ||
1343 | tx_q->tx_skbuff_dma[i].buf = 0; | |
1344 | tx_q->tx_skbuff_dma[i].map_as_page = false; | |
1345 | tx_q->tx_skbuff_dma[i].len = 0; | |
1346 | tx_q->tx_skbuff_dma[i].last_segment = false; | |
1347 | tx_q->tx_skbuff[i] = NULL; | |
5bacd778 | 1348 | } |
aff3d9ef | 1349 | |
ce736788 JP |
1350 | tx_q->dirty_tx = 0; |
1351 | tx_q->cur_tx = 0; | |
286a8372 | 1352 | |
c22a3f48 JP |
1353 | netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); |
1354 | } | |
aff3d9ef | 1355 | |
71fedb01 JP |
1356 | return 0; |
1357 | } | |
1358 | ||
1359 | /** | |
1360 | * init_dma_desc_rings - init the RX/TX descriptor rings | |
1361 | * @dev: net device structure | |
1362 | * @flags: gfp flag. | |
1363 | * Description: this function initializes the DMA RX/TX descriptors | |
1364 | * and allocates the socket buffers. It supports the chained and ring | |
1365 | * modes. | |
1366 | */ | |
1367 | static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) | |
1368 | { | |
1369 | struct stmmac_priv *priv = netdev_priv(dev); | |
1370 | int ret; | |
1371 | ||
1372 | ret = init_dma_rx_desc_rings(dev, flags); | |
1373 | if (ret) | |
1374 | return ret; | |
1375 | ||
1376 | ret = init_dma_tx_desc_rings(dev); | |
1377 | ||
5bacd778 | 1378 | stmmac_clear_descriptors(priv); |
47dd7a54 | 1379 | |
c24602ef GC |
1380 | if (netif_msg_hw(priv)) |
1381 | stmmac_display_rings(priv); | |
56329137 | 1382 | |
56329137 | 1383 | return ret; |
47dd7a54 GC |
1384 | } |
1385 | ||
71fedb01 JP |
1386 | /** |
1387 | * dma_free_rx_skbufs - free RX dma buffers | |
1388 | * @priv: private structure | |
54139cf3 | 1389 | * @queue: RX queue index |
71fedb01 | 1390 | */ |
54139cf3 | 1391 | static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue) |
47dd7a54 GC |
1392 | { |
1393 | int i; | |
1394 | ||
e3ad57c9 | 1395 | for (i = 0; i < DMA_RX_SIZE; i++) |
54139cf3 | 1396 | stmmac_free_rx_buffer(priv, queue, i); |
47dd7a54 GC |
1397 | } |
1398 | ||
71fedb01 JP |
1399 | /** |
1400 | * dma_free_tx_skbufs - free TX dma buffers | |
1401 | * @priv: private structure | |
ce736788 | 1402 | * @queue: TX queue index |
71fedb01 | 1403 | */ |
ce736788 | 1404 | static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue) |
47dd7a54 GC |
1405 | { |
1406 | int i; | |
1407 | ||
71fedb01 | 1408 | for (i = 0; i < DMA_TX_SIZE; i++) |
ce736788 | 1409 | stmmac_free_tx_buffer(priv, queue, i); |
47dd7a54 GC |
1410 | } |
1411 | ||
54139cf3 JP |
1412 | /** |
1413 | * free_dma_rx_desc_resources - free RX dma desc resources | |
1414 | * @priv: private structure | |
1415 | */ | |
1416 | static void free_dma_rx_desc_resources(struct stmmac_priv *priv) | |
1417 | { | |
1418 | u32 rx_count = priv->plat->rx_queues_to_use; | |
1419 | u32 queue; | |
1420 | ||
1421 | /* Free RX queue resources */ | |
1422 | for (queue = 0; queue < rx_count; queue++) { | |
1423 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
1424 | ||
1425 | /* Release the DMA RX socket buffers */ | |
1426 | dma_free_rx_skbufs(priv, queue); | |
1427 | ||
1428 | /* Free DMA regions of consistent memory previously allocated */ | |
1429 | if (!priv->extend_desc) | |
1430 | dma_free_coherent(priv->device, | |
1431 | DMA_RX_SIZE * sizeof(struct dma_desc), | |
1432 | rx_q->dma_rx, rx_q->dma_rx_phy); | |
1433 | else | |
1434 | dma_free_coherent(priv->device, DMA_RX_SIZE * | |
1435 | sizeof(struct dma_extended_desc), | |
1436 | rx_q->dma_erx, rx_q->dma_rx_phy); | |
1437 | ||
1438 | kfree(rx_q->rx_skbuff_dma); | |
1439 | kfree(rx_q->rx_skbuff); | |
1440 | } | |
1441 | } | |
1442 | ||
ce736788 JP |
1443 | /** |
1444 | * free_dma_tx_desc_resources - free TX dma desc resources | |
1445 | * @priv: private structure | |
1446 | */ | |
1447 | static void free_dma_tx_desc_resources(struct stmmac_priv *priv) | |
1448 | { | |
1449 | u32 tx_count = priv->plat->tx_queues_to_use; | |
62242260 | 1450 | u32 queue; |
ce736788 JP |
1451 | |
1452 | /* Free TX queue resources */ | |
1453 | for (queue = 0; queue < tx_count; queue++) { | |
1454 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
1455 | ||
1456 | /* Release the DMA TX socket buffers */ | |
1457 | dma_free_tx_skbufs(priv, queue); | |
1458 | ||
1459 | /* Free DMA regions of consistent memory previously allocated */ | |
1460 | if (!priv->extend_desc) | |
1461 | dma_free_coherent(priv->device, | |
1462 | DMA_TX_SIZE * sizeof(struct dma_desc), | |
1463 | tx_q->dma_tx, tx_q->dma_tx_phy); | |
1464 | else | |
1465 | dma_free_coherent(priv->device, DMA_TX_SIZE * | |
1466 | sizeof(struct dma_extended_desc), | |
1467 | tx_q->dma_etx, tx_q->dma_tx_phy); | |
1468 | ||
1469 | kfree(tx_q->tx_skbuff_dma); | |
1470 | kfree(tx_q->tx_skbuff); | |
1471 | } | |
1472 | } | |
1473 | ||
732fdf0e | 1474 | /** |
71fedb01 | 1475 | * alloc_dma_rx_desc_resources - alloc RX resources. |
732fdf0e GC |
1476 | * @priv: private structure |
1477 | * Description: according to which descriptor can be used (extend or basic) | |
5bacd778 LC |
1478 | * this function allocates the resources for TX and RX paths. In case of |
1479 | * reception, for example, it pre-allocated the RX socket buffer in order to | |
1480 | * allow zero-copy mechanism. | |
732fdf0e | 1481 | */ |
71fedb01 | 1482 | static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) |
09f8d696 | 1483 | { |
54139cf3 | 1484 | u32 rx_count = priv->plat->rx_queues_to_use; |
09f8d696 | 1485 | int ret = -ENOMEM; |
54139cf3 | 1486 | u32 queue; |
09f8d696 | 1487 | |
54139cf3 JP |
1488 | /* RX queues buffers and DMA */ |
1489 | for (queue = 0; queue < rx_count; queue++) { | |
1490 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
09f8d696 | 1491 | |
54139cf3 JP |
1492 | rx_q->queue_index = queue; |
1493 | rx_q->priv_data = priv; | |
5bacd778 | 1494 | |
54139cf3 JP |
1495 | rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, |
1496 | sizeof(dma_addr_t), | |
71fedb01 | 1497 | GFP_KERNEL); |
54139cf3 | 1498 | if (!rx_q->rx_skbuff_dma) |
63c3aa6b | 1499 | goto err_dma; |
71fedb01 | 1500 | |
54139cf3 JP |
1501 | rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, |
1502 | sizeof(struct sk_buff *), | |
1503 | GFP_KERNEL); | |
1504 | if (!rx_q->rx_skbuff) | |
71fedb01 | 1505 | goto err_dma; |
54139cf3 JP |
1506 | |
1507 | if (priv->extend_desc) { | |
1508 | rx_q->dma_erx = dma_zalloc_coherent(priv->device, | |
1509 | DMA_RX_SIZE * | |
1510 | sizeof(struct | |
1511 | dma_extended_desc), | |
1512 | &rx_q->dma_rx_phy, | |
1513 | GFP_KERNEL); | |
1514 | if (!rx_q->dma_erx) | |
1515 | goto err_dma; | |
1516 | ||
1517 | } else { | |
1518 | rx_q->dma_rx = dma_zalloc_coherent(priv->device, | |
1519 | DMA_RX_SIZE * | |
1520 | sizeof(struct | |
1521 | dma_desc), | |
1522 | &rx_q->dma_rx_phy, | |
1523 | GFP_KERNEL); | |
1524 | if (!rx_q->dma_rx) | |
1525 | goto err_dma; | |
1526 | } | |
71fedb01 JP |
1527 | } |
1528 | ||
1529 | return 0; | |
1530 | ||
1531 | err_dma: | |
54139cf3 JP |
1532 | free_dma_rx_desc_resources(priv); |
1533 | ||
71fedb01 JP |
1534 | return ret; |
1535 | } | |
1536 | ||
1537 | /** | |
1538 | * alloc_dma_tx_desc_resources - alloc TX resources. | |
1539 | * @priv: private structure | |
1540 | * Description: according to which descriptor can be used (extend or basic) | |
1541 | * this function allocates the resources for TX and RX paths. In case of | |
1542 | * reception, for example, it pre-allocated the RX socket buffer in order to | |
1543 | * allow zero-copy mechanism. | |
1544 | */ | |
1545 | static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) | |
1546 | { | |
ce736788 | 1547 | u32 tx_count = priv->plat->tx_queues_to_use; |
71fedb01 | 1548 | int ret = -ENOMEM; |
ce736788 | 1549 | u32 queue; |
71fedb01 | 1550 | |
ce736788 JP |
1551 | /* TX queues buffers and DMA */ |
1552 | for (queue = 0; queue < tx_count; queue++) { | |
1553 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
5bacd778 | 1554 | |
ce736788 JP |
1555 | tx_q->queue_index = queue; |
1556 | tx_q->priv_data = priv; | |
5bacd778 | 1557 | |
ce736788 JP |
1558 | tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, |
1559 | sizeof(*tx_q->tx_skbuff_dma), | |
5bacd778 | 1560 | GFP_KERNEL); |
ce736788 | 1561 | if (!tx_q->tx_skbuff_dma) |
62242260 | 1562 | goto err_dma; |
ce736788 JP |
1563 | |
1564 | tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, | |
1565 | sizeof(struct sk_buff *), | |
1566 | GFP_KERNEL); | |
1567 | if (!tx_q->tx_skbuff) | |
62242260 | 1568 | goto err_dma; |
ce736788 JP |
1569 | |
1570 | if (priv->extend_desc) { | |
1571 | tx_q->dma_etx = dma_zalloc_coherent(priv->device, | |
1572 | DMA_TX_SIZE * | |
1573 | sizeof(struct | |
1574 | dma_extended_desc), | |
1575 | &tx_q->dma_tx_phy, | |
1576 | GFP_KERNEL); | |
1577 | if (!tx_q->dma_etx) | |
62242260 | 1578 | goto err_dma; |
ce736788 JP |
1579 | } else { |
1580 | tx_q->dma_tx = dma_zalloc_coherent(priv->device, | |
1581 | DMA_TX_SIZE * | |
1582 | sizeof(struct | |
1583 | dma_desc), | |
1584 | &tx_q->dma_tx_phy, | |
1585 | GFP_KERNEL); | |
1586 | if (!tx_q->dma_tx) | |
62242260 | 1587 | goto err_dma; |
ce736788 | 1588 | } |
09f8d696 SK |
1589 | } |
1590 | ||
1591 | return 0; | |
1592 | ||
62242260 | 1593 | err_dma: |
ce736788 JP |
1594 | free_dma_tx_desc_resources(priv); |
1595 | ||
09f8d696 SK |
1596 | return ret; |
1597 | } | |
1598 | ||
71fedb01 JP |
1599 | /** |
1600 | * alloc_dma_desc_resources - alloc TX/RX resources. | |
1601 | * @priv: private structure | |
1602 | * Description: according to which descriptor can be used (extend or basic) | |
1603 | * this function allocates the resources for TX and RX paths. In case of | |
1604 | * reception, for example, it pre-allocated the RX socket buffer in order to | |
1605 | * allow zero-copy mechanism. | |
1606 | */ | |
1607 | static int alloc_dma_desc_resources(struct stmmac_priv *priv) | |
1608 | { | |
54139cf3 | 1609 | /* RX Allocation */ |
71fedb01 JP |
1610 | int ret = alloc_dma_rx_desc_resources(priv); |
1611 | ||
1612 | if (ret) | |
1613 | return ret; | |
1614 | ||
1615 | ret = alloc_dma_tx_desc_resources(priv); | |
1616 | ||
1617 | return ret; | |
1618 | } | |
1619 | ||
71fedb01 JP |
1620 | /** |
1621 | * free_dma_desc_resources - free dma desc resources | |
1622 | * @priv: private structure | |
1623 | */ | |
1624 | static void free_dma_desc_resources(struct stmmac_priv *priv) | |
1625 | { | |
1626 | /* Release the DMA RX socket buffers */ | |
1627 | free_dma_rx_desc_resources(priv); | |
1628 | ||
1629 | /* Release the DMA TX socket buffers */ | |
1630 | free_dma_tx_desc_resources(priv); | |
1631 | } | |
1632 | ||
9eb12474 | 1633 | /** |
1634 | * stmmac_mac_enable_rx_queues - Enable MAC rx queues | |
1635 | * @priv: driver private structure | |
1636 | * Description: It is used for enabling the rx queues in the MAC | |
1637 | */ | |
1638 | static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv) | |
1639 | { | |
4f6046f5 JP |
1640 | u32 rx_queues_count = priv->plat->rx_queues_to_use; |
1641 | int queue; | |
1642 | u8 mode; | |
9eb12474 | 1643 | |
4f6046f5 JP |
1644 | for (queue = 0; queue < rx_queues_count; queue++) { |
1645 | mode = priv->plat->rx_queues_cfg[queue].mode_to_use; | |
1646 | priv->hw->mac->rx_queue_enable(priv->hw, mode, queue); | |
1647 | } | |
9eb12474 | 1648 | } |
1649 | ||
ae4f0d46 JP |
1650 | /** |
1651 | * stmmac_start_rx_dma - start RX DMA channel | |
1652 | * @priv: driver private structure | |
1653 | * @chan: RX channel index | |
1654 | * Description: | |
1655 | * This starts a RX DMA channel | |
1656 | */ | |
1657 | static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan) | |
1658 | { | |
1659 | netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); | |
1660 | priv->hw->dma->start_rx(priv->ioaddr, chan); | |
1661 | } | |
1662 | ||
1663 | /** | |
1664 | * stmmac_start_tx_dma - start TX DMA channel | |
1665 | * @priv: driver private structure | |
1666 | * @chan: TX channel index | |
1667 | * Description: | |
1668 | * This starts a TX DMA channel | |
1669 | */ | |
1670 | static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan) | |
1671 | { | |
1672 | netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); | |
1673 | priv->hw->dma->start_tx(priv->ioaddr, chan); | |
1674 | } | |
1675 | ||
1676 | /** | |
1677 | * stmmac_stop_rx_dma - stop RX DMA channel | |
1678 | * @priv: driver private structure | |
1679 | * @chan: RX channel index | |
1680 | * Description: | |
1681 | * This stops a RX DMA channel | |
1682 | */ | |
1683 | static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan) | |
1684 | { | |
1685 | netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); | |
1686 | priv->hw->dma->stop_rx(priv->ioaddr, chan); | |
1687 | } | |
1688 | ||
1689 | /** | |
1690 | * stmmac_stop_tx_dma - stop TX DMA channel | |
1691 | * @priv: driver private structure | |
1692 | * @chan: TX channel index | |
1693 | * Description: | |
1694 | * This stops a TX DMA channel | |
1695 | */ | |
1696 | static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan) | |
1697 | { | |
1698 | netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); | |
1699 | priv->hw->dma->stop_tx(priv->ioaddr, chan); | |
1700 | } | |
1701 | ||
1702 | /** | |
1703 | * stmmac_start_all_dma - start all RX and TX DMA channels | |
1704 | * @priv: driver private structure | |
1705 | * Description: | |
1706 | * This starts all the RX and TX DMA channels | |
1707 | */ | |
1708 | static void stmmac_start_all_dma(struct stmmac_priv *priv) | |
1709 | { | |
1710 | u32 rx_channels_count = priv->plat->rx_queues_to_use; | |
1711 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
1712 | u32 chan = 0; | |
1713 | ||
1714 | for (chan = 0; chan < rx_channels_count; chan++) | |
1715 | stmmac_start_rx_dma(priv, chan); | |
1716 | ||
1717 | for (chan = 0; chan < tx_channels_count; chan++) | |
1718 | stmmac_start_tx_dma(priv, chan); | |
1719 | } | |
1720 | ||
1721 | /** | |
1722 | * stmmac_stop_all_dma - stop all RX and TX DMA channels | |
1723 | * @priv: driver private structure | |
1724 | * Description: | |
1725 | * This stops the RX and TX DMA channels | |
1726 | */ | |
1727 | static void stmmac_stop_all_dma(struct stmmac_priv *priv) | |
1728 | { | |
1729 | u32 rx_channels_count = priv->plat->rx_queues_to_use; | |
1730 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
1731 | u32 chan = 0; | |
1732 | ||
1733 | for (chan = 0; chan < rx_channels_count; chan++) | |
1734 | stmmac_stop_rx_dma(priv, chan); | |
1735 | ||
1736 | for (chan = 0; chan < tx_channels_count; chan++) | |
1737 | stmmac_stop_tx_dma(priv, chan); | |
1738 | } | |
1739 | ||
47dd7a54 GC |
1740 | /** |
1741 | * stmmac_dma_operation_mode - HW DMA operation mode | |
32ceabca | 1742 | * @priv: driver private structure |
732fdf0e GC |
1743 | * Description: it is used for configuring the DMA operation mode register in |
1744 | * order to program the tx/rx DMA thresholds or Store-And-Forward mode. | |
47dd7a54 GC |
1745 | */ |
1746 | static void stmmac_dma_operation_mode(struct stmmac_priv *priv) | |
1747 | { | |
6deee222 JP |
1748 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
1749 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
f88203a2 | 1750 | int rxfifosz = priv->plat->rx_fifo_size; |
52a76235 | 1751 | int txfifosz = priv->plat->tx_fifo_size; |
6deee222 JP |
1752 | u32 txmode = 0; |
1753 | u32 rxmode = 0; | |
1754 | u32 chan = 0; | |
a0daae13 | 1755 | u8 qmode = 0; |
f88203a2 | 1756 | |
11fbf811 TR |
1757 | if (rxfifosz == 0) |
1758 | rxfifosz = priv->dma_cap.rx_fifo_size; | |
52a76235 JA |
1759 | if (txfifosz == 0) |
1760 | txfifosz = priv->dma_cap.tx_fifo_size; | |
1761 | ||
1762 | /* Adjust for real per queue fifo size */ | |
1763 | rxfifosz /= rx_channels_count; | |
1764 | txfifosz /= tx_channels_count; | |
11fbf811 | 1765 | |
6deee222 JP |
1766 | if (priv->plat->force_thresh_dma_mode) { |
1767 | txmode = tc; | |
1768 | rxmode = tc; | |
1769 | } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { | |
61b8013a SK |
1770 | /* |
1771 | * In case of GMAC, SF mode can be enabled | |
1772 | * to perform the TX COE in HW. This depends on: | |
ebbb293f GC |
1773 | * 1) TX COE if actually supported |
1774 | * 2) There is no bugged Jumbo frame support | |
1775 | * that needs to not insert csum in the TDES. | |
1776 | */ | |
6deee222 JP |
1777 | txmode = SF_DMA_MODE; |
1778 | rxmode = SF_DMA_MODE; | |
b2dec116 | 1779 | priv->xstats.threshold = SF_DMA_MODE; |
6deee222 JP |
1780 | } else { |
1781 | txmode = tc; | |
1782 | rxmode = SF_DMA_MODE; | |
1783 | } | |
1784 | ||
1785 | /* configure all channels */ | |
1786 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
a0daae13 JA |
1787 | for (chan = 0; chan < rx_channels_count; chan++) { |
1788 | qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; | |
1789 | ||
6deee222 | 1790 | priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, |
a0daae13 JA |
1791 | rxfifosz, qmode); |
1792 | } | |
1793 | ||
1794 | for (chan = 0; chan < tx_channels_count; chan++) { | |
1795 | qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; | |
6deee222 | 1796 | |
52a76235 | 1797 | priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, |
a0daae13 JA |
1798 | txfifosz, qmode); |
1799 | } | |
6deee222 JP |
1800 | } else { |
1801 | priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, | |
f88203a2 | 1802 | rxfifosz); |
6deee222 | 1803 | } |
47dd7a54 GC |
1804 | } |
1805 | ||
47dd7a54 | 1806 | /** |
732fdf0e | 1807 | * stmmac_tx_clean - to manage the transmission completion |
32ceabca | 1808 | * @priv: driver private structure |
ce736788 | 1809 | * @queue: TX queue index |
732fdf0e | 1810 | * Description: it reclaims the transmit resources after transmission completes. |
47dd7a54 | 1811 | */ |
ce736788 | 1812 | static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) |
47dd7a54 | 1813 | { |
ce736788 | 1814 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
38979574 | 1815 | unsigned int bytes_compl = 0, pkts_compl = 0; |
8d5f4b07 | 1816 | unsigned int entry; |
47dd7a54 | 1817 | |
739c8e14 | 1818 | netif_tx_lock(priv->dev); |
a9097a96 | 1819 | |
9125cdd1 GC |
1820 | priv->xstats.tx_clean++; |
1821 | ||
8d5f4b07 | 1822 | entry = tx_q->dirty_tx; |
ce736788 JP |
1823 | while (entry != tx_q->cur_tx) { |
1824 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | |
c24602ef | 1825 | struct dma_desc *p; |
c363b658 | 1826 | int status; |
c24602ef GC |
1827 | |
1828 | if (priv->extend_desc) | |
ce736788 | 1829 | p = (struct dma_desc *)(tx_q->dma_etx + entry); |
c24602ef | 1830 | else |
ce736788 | 1831 | p = tx_q->dma_tx + entry; |
47dd7a54 | 1832 | |
c363b658 | 1833 | status = priv->hw->desc->tx_status(&priv->dev->stats, |
ceb69499 GC |
1834 | &priv->xstats, p, |
1835 | priv->ioaddr); | |
c363b658 FG |
1836 | /* Check if the descriptor is owned by the DMA */ |
1837 | if (unlikely(status & tx_dma_own)) | |
1838 | break; | |
1839 | ||
1840 | /* Just consider the last segment and ...*/ | |
1841 | if (likely(!(status & tx_not_ls))) { | |
1842 | /* ... verify the status error condition */ | |
1843 | if (unlikely(status & tx_err)) { | |
1844 | priv->dev->stats.tx_errors++; | |
1845 | } else { | |
47dd7a54 GC |
1846 | priv->dev->stats.tx_packets++; |
1847 | priv->xstats.tx_pkt_n++; | |
c363b658 | 1848 | } |
ba1ffd74 | 1849 | stmmac_get_tx_hwtstamp(priv, p, skb); |
47dd7a54 | 1850 | } |
47dd7a54 | 1851 | |
ce736788 JP |
1852 | if (likely(tx_q->tx_skbuff_dma[entry].buf)) { |
1853 | if (tx_q->tx_skbuff_dma[entry].map_as_page) | |
362b37be | 1854 | dma_unmap_page(priv->device, |
ce736788 JP |
1855 | tx_q->tx_skbuff_dma[entry].buf, |
1856 | tx_q->tx_skbuff_dma[entry].len, | |
362b37be GC |
1857 | DMA_TO_DEVICE); |
1858 | else | |
1859 | dma_unmap_single(priv->device, | |
ce736788 JP |
1860 | tx_q->tx_skbuff_dma[entry].buf, |
1861 | tx_q->tx_skbuff_dma[entry].len, | |
362b37be | 1862 | DMA_TO_DEVICE); |
ce736788 JP |
1863 | tx_q->tx_skbuff_dma[entry].buf = 0; |
1864 | tx_q->tx_skbuff_dma[entry].len = 0; | |
1865 | tx_q->tx_skbuff_dma[entry].map_as_page = false; | |
cf32deec | 1866 | } |
f748be53 AT |
1867 | |
1868 | if (priv->hw->mode->clean_desc3) | |
ce736788 | 1869 | priv->hw->mode->clean_desc3(tx_q, p); |
f748be53 | 1870 | |
ce736788 JP |
1871 | tx_q->tx_skbuff_dma[entry].last_segment = false; |
1872 | tx_q->tx_skbuff_dma[entry].is_jumbo = false; | |
47dd7a54 GC |
1873 | |
1874 | if (likely(skb != NULL)) { | |
38979574 BG |
1875 | pkts_compl++; |
1876 | bytes_compl += skb->len; | |
7c565c33 | 1877 | dev_consume_skb_any(skb); |
ce736788 | 1878 | tx_q->tx_skbuff[entry] = NULL; |
47dd7a54 GC |
1879 | } |
1880 | ||
4a7d666a | 1881 | priv->hw->desc->release_tx_desc(p, priv->mode); |
47dd7a54 | 1882 | |
e3ad57c9 | 1883 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
47dd7a54 | 1884 | } |
ce736788 | 1885 | tx_q->dirty_tx = entry; |
38979574 | 1886 | |
c22a3f48 JP |
1887 | netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), |
1888 | pkts_compl, bytes_compl); | |
1889 | ||
1890 | if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, | |
1891 | queue))) && | |
1892 | stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) { | |
38979574 | 1893 | |
739c8e14 LS |
1894 | netif_dbg(priv, tx_done, priv->dev, |
1895 | "%s: restart transmit\n", __func__); | |
c22a3f48 | 1896 | netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); |
47dd7a54 | 1897 | } |
d765955d GC |
1898 | |
1899 | if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { | |
1900 | stmmac_enable_eee_mode(priv); | |
f5351ef7 | 1901 | mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); |
d765955d | 1902 | } |
739c8e14 | 1903 | netif_tx_unlock(priv->dev); |
47dd7a54 GC |
1904 | } |
1905 | ||
4f513ecd | 1906 | static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan) |
47dd7a54 | 1907 | { |
4f513ecd | 1908 | priv->hw->dma->enable_dma_irq(priv->ioaddr, chan); |
47dd7a54 GC |
1909 | } |
1910 | ||
4f513ecd | 1911 | static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan) |
47dd7a54 | 1912 | { |
4f513ecd | 1913 | priv->hw->dma->disable_dma_irq(priv->ioaddr, chan); |
47dd7a54 GC |
1914 | } |
1915 | ||
47dd7a54 | 1916 | /** |
732fdf0e | 1917 | * stmmac_tx_err - to manage the tx error |
32ceabca | 1918 | * @priv: driver private structure |
5bacd778 | 1919 | * @chan: channel index |
47dd7a54 | 1920 | * Description: it cleans the descriptors and restarts the transmission |
732fdf0e | 1921 | * in case of transmission errors. |
47dd7a54 | 1922 | */ |
5bacd778 | 1923 | static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) |
47dd7a54 | 1924 | { |
ce736788 | 1925 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; |
c24602ef | 1926 | int i; |
ce736788 | 1927 | |
c22a3f48 | 1928 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); |
47dd7a54 | 1929 | |
ae4f0d46 | 1930 | stmmac_stop_tx_dma(priv, chan); |
ce736788 | 1931 | dma_free_tx_skbufs(priv, chan); |
e3ad57c9 | 1932 | for (i = 0; i < DMA_TX_SIZE; i++) |
c24602ef | 1933 | if (priv->extend_desc) |
ce736788 | 1934 | priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic, |
c24602ef | 1935 | priv->mode, |
e3ad57c9 | 1936 | (i == DMA_TX_SIZE - 1)); |
c24602ef | 1937 | else |
ce736788 | 1938 | priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i], |
c24602ef | 1939 | priv->mode, |
e3ad57c9 | 1940 | (i == DMA_TX_SIZE - 1)); |
ce736788 JP |
1941 | tx_q->dirty_tx = 0; |
1942 | tx_q->cur_tx = 0; | |
c22a3f48 | 1943 | netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); |
ae4f0d46 | 1944 | stmmac_start_tx_dma(priv, chan); |
47dd7a54 GC |
1945 | |
1946 | priv->dev->stats.tx_errors++; | |
c22a3f48 | 1947 | netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); |
47dd7a54 GC |
1948 | } |
1949 | ||
6deee222 JP |
1950 | /** |
1951 | * stmmac_set_dma_operation_mode - Set DMA operation mode by channel | |
1952 | * @priv: driver private structure | |
1953 | * @txmode: TX operating mode | |
1954 | * @rxmode: RX operating mode | |
1955 | * @chan: channel index | |
1956 | * Description: it is used for configuring of the DMA operation mode in | |
1957 | * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward | |
1958 | * mode. | |
1959 | */ | |
1960 | static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode, | |
1961 | u32 rxmode, u32 chan) | |
1962 | { | |
a0daae13 JA |
1963 | u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; |
1964 | u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; | |
52a76235 JA |
1965 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
1966 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
6deee222 | 1967 | int rxfifosz = priv->plat->rx_fifo_size; |
52a76235 | 1968 | int txfifosz = priv->plat->tx_fifo_size; |
6deee222 JP |
1969 | |
1970 | if (rxfifosz == 0) | |
1971 | rxfifosz = priv->dma_cap.rx_fifo_size; | |
52a76235 JA |
1972 | if (txfifosz == 0) |
1973 | txfifosz = priv->dma_cap.tx_fifo_size; | |
1974 | ||
1975 | /* Adjust for real per queue fifo size */ | |
1976 | rxfifosz /= rx_channels_count; | |
1977 | txfifosz /= tx_channels_count; | |
6deee222 JP |
1978 | |
1979 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
1980 | priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan, | |
a0daae13 | 1981 | rxfifosz, rxqmode); |
52a76235 | 1982 | priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan, |
a0daae13 | 1983 | txfifosz, txqmode); |
6deee222 JP |
1984 | } else { |
1985 | priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode, | |
1986 | rxfifosz); | |
1987 | } | |
1988 | } | |
1989 | ||
32ceabca | 1990 | /** |
732fdf0e | 1991 | * stmmac_dma_interrupt - DMA ISR |
32ceabca GC |
1992 | * @priv: driver private structure |
1993 | * Description: this is the DMA ISR. It is called by the main ISR. | |
732fdf0e GC |
1994 | * It calls the dwmac dma routine and schedule poll method in case of some |
1995 | * work can be done. | |
32ceabca | 1996 | */ |
aec7ff27 GC |
1997 | static void stmmac_dma_interrupt(struct stmmac_priv *priv) |
1998 | { | |
d62a107a | 1999 | u32 tx_channel_count = priv->plat->tx_queues_to_use; |
aec7ff27 | 2000 | int status; |
d62a107a JP |
2001 | u32 chan; |
2002 | ||
2003 | for (chan = 0; chan < tx_channel_count; chan++) { | |
c22a3f48 JP |
2004 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan]; |
2005 | ||
d62a107a JP |
2006 | status = priv->hw->dma->dma_interrupt(priv->ioaddr, |
2007 | &priv->xstats, chan); | |
2008 | if (likely((status & handle_rx)) || (status & handle_tx)) { | |
c22a3f48 | 2009 | if (likely(napi_schedule_prep(&rx_q->napi))) { |
d62a107a | 2010 | stmmac_disable_dma_irq(priv, chan); |
c22a3f48 | 2011 | __napi_schedule(&rx_q->napi); |
d62a107a | 2012 | } |
9125cdd1 | 2013 | } |
6deee222 | 2014 | |
d62a107a JP |
2015 | if (unlikely(status & tx_hard_error_bump_tc)) { |
2016 | /* Try to bump up the dma threshold on this failure */ | |
2017 | if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && | |
2018 | (tc <= 256)) { | |
2019 | tc += 64; | |
2020 | if (priv->plat->force_thresh_dma_mode) | |
2021 | stmmac_set_dma_operation_mode(priv, | |
2022 | tc, | |
2023 | tc, | |
2024 | chan); | |
2025 | else | |
2026 | stmmac_set_dma_operation_mode(priv, | |
2027 | tc, | |
2028 | SF_DMA_MODE, | |
2029 | chan); | |
2030 | priv->xstats.threshold = tc; | |
2031 | } | |
2032 | } else if (unlikely(status == tx_hard_error)) { | |
2033 | stmmac_tx_err(priv, chan); | |
47dd7a54 | 2034 | } |
d62a107a | 2035 | } |
47dd7a54 GC |
2036 | } |
2037 | ||
32ceabca GC |
2038 | /** |
2039 | * stmmac_mmc_setup: setup the Mac Management Counters (MMC) | |
2040 | * @priv: driver private structure | |
2041 | * Description: this masks the MMC irq, in fact, the counters are managed in SW. | |
2042 | */ | |
1c901a46 GC |
2043 | static void stmmac_mmc_setup(struct stmmac_priv *priv) |
2044 | { | |
2045 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | | |
36ff7c1e | 2046 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; |
1c901a46 | 2047 | |
ba1ffd74 GC |
2048 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
2049 | priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; | |
f748be53 | 2050 | priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; |
ba1ffd74 GC |
2051 | } else { |
2052 | priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; | |
f748be53 | 2053 | priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; |
ba1ffd74 | 2054 | } |
36ff7c1e AT |
2055 | |
2056 | dwmac_mmc_intr_all_mask(priv->mmcaddr); | |
4f795b25 GC |
2057 | |
2058 | if (priv->dma_cap.rmon) { | |
36ff7c1e | 2059 | dwmac_mmc_ctrl(priv->mmcaddr, mode); |
4f795b25 GC |
2060 | memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); |
2061 | } else | |
38ddc59d | 2062 | netdev_info(priv->dev, "No MAC Management Counters available\n"); |
1c901a46 GC |
2063 | } |
2064 | ||
19e30c14 | 2065 | /** |
732fdf0e | 2066 | * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors |
32ceabca GC |
2067 | * @priv: driver private structure |
2068 | * Description: select the Enhanced/Alternate or Normal descriptors. | |
732fdf0e GC |
2069 | * In case of Enhanced/Alternate, it checks if the extended descriptors are |
2070 | * supported by the HW capability register. | |
ff3dd78c | 2071 | */ |
19e30c14 GC |
2072 | static void stmmac_selec_desc_mode(struct stmmac_priv *priv) |
2073 | { | |
2074 | if (priv->plat->enh_desc) { | |
38ddc59d | 2075 | dev_info(priv->device, "Enhanced/Alternate descriptors\n"); |
c24602ef GC |
2076 | |
2077 | /* GMAC older than 3.50 has no extended descriptors */ | |
2078 | if (priv->synopsys_id >= DWMAC_CORE_3_50) { | |
38ddc59d | 2079 | dev_info(priv->device, "Enabled extended descriptors\n"); |
c24602ef GC |
2080 | priv->extend_desc = 1; |
2081 | } else | |
38ddc59d | 2082 | dev_warn(priv->device, "Extended descriptors not supported\n"); |
c24602ef | 2083 | |
19e30c14 GC |
2084 | priv->hw->desc = &enh_desc_ops; |
2085 | } else { | |
38ddc59d | 2086 | dev_info(priv->device, "Normal descriptors\n"); |
19e30c14 GC |
2087 | priv->hw->desc = &ndesc_ops; |
2088 | } | |
2089 | } | |
2090 | ||
2091 | /** | |
732fdf0e | 2092 | * stmmac_get_hw_features - get MAC capabilities from the HW cap. register. |
32ceabca | 2093 | * @priv: driver private structure |
19e30c14 GC |
2094 | * Description: |
2095 | * new GMAC chip generations have a new register to indicate the | |
2096 | * presence of the optional feature/functions. | |
2097 | * This can be also used to override the value passed through the | |
2098 | * platform and necessary for old MAC10/100 and GMAC chips. | |
e7434821 GC |
2099 | */ |
2100 | static int stmmac_get_hw_features(struct stmmac_priv *priv) | |
2101 | { | |
f10a6a35 | 2102 | u32 ret = 0; |
3c20f72f | 2103 | |
5e6efe88 | 2104 | if (priv->hw->dma->get_hw_feature) { |
f10a6a35 AT |
2105 | priv->hw->dma->get_hw_feature(priv->ioaddr, |
2106 | &priv->dma_cap); | |
2107 | ret = 1; | |
19e30c14 | 2108 | } |
e7434821 | 2109 | |
f10a6a35 | 2110 | return ret; |
e7434821 GC |
2111 | } |
2112 | ||
32ceabca | 2113 | /** |
732fdf0e | 2114 | * stmmac_check_ether_addr - check if the MAC addr is valid |
32ceabca GC |
2115 | * @priv: driver private structure |
2116 | * Description: | |
2117 | * it is to verify if the MAC address is valid, in case of failures it | |
2118 | * generates a random MAC address | |
2119 | */ | |
bfab27a1 GC |
2120 | static void stmmac_check_ether_addr(struct stmmac_priv *priv) |
2121 | { | |
bfab27a1 | 2122 | if (!is_valid_ether_addr(priv->dev->dev_addr)) { |
7ed24bbe | 2123 | priv->hw->mac->get_umac_addr(priv->hw, |
bfab27a1 | 2124 | priv->dev->dev_addr, 0); |
ceb69499 | 2125 | if (!is_valid_ether_addr(priv->dev->dev_addr)) |
f2cedb63 | 2126 | eth_hw_addr_random(priv->dev); |
38ddc59d LC |
2127 | netdev_info(priv->dev, "device MAC address %pM\n", |
2128 | priv->dev->dev_addr); | |
bfab27a1 | 2129 | } |
bfab27a1 GC |
2130 | } |
2131 | ||
32ceabca | 2132 | /** |
732fdf0e | 2133 | * stmmac_init_dma_engine - DMA init. |
32ceabca GC |
2134 | * @priv: driver private structure |
2135 | * Description: | |
2136 | * It inits the DMA invoking the specific MAC/GMAC callback. | |
2137 | * Some DMA parameters can be passed from the platform; | |
2138 | * in case of these are not passed a default is kept for the MAC or GMAC. | |
2139 | */ | |
0f1f88a8 GC |
2140 | static int stmmac_init_dma_engine(struct stmmac_priv *priv) |
2141 | { | |
47f2a9ce JP |
2142 | u32 rx_channels_count = priv->plat->rx_queues_to_use; |
2143 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
54139cf3 | 2144 | struct stmmac_rx_queue *rx_q; |
ce736788 | 2145 | struct stmmac_tx_queue *tx_q; |
47f2a9ce JP |
2146 | u32 dummy_dma_rx_phy = 0; |
2147 | u32 dummy_dma_tx_phy = 0; | |
2148 | u32 chan = 0; | |
c24602ef | 2149 | int atds = 0; |
495db273 | 2150 | int ret = 0; |
0f1f88a8 | 2151 | |
a332e2fa NC |
2152 | if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { |
2153 | dev_err(priv->device, "Invalid DMA configuration\n"); | |
89ab75bf | 2154 | return -EINVAL; |
0f1f88a8 GC |
2155 | } |
2156 | ||
c24602ef GC |
2157 | if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) |
2158 | atds = 1; | |
2159 | ||
495db273 GC |
2160 | ret = priv->hw->dma->reset(priv->ioaddr); |
2161 | if (ret) { | |
2162 | dev_err(priv->device, "Failed to reset the dma\n"); | |
2163 | return ret; | |
2164 | } | |
2165 | ||
f748be53 | 2166 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
47f2a9ce JP |
2167 | /* DMA Configuration */ |
2168 | priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, | |
2169 | dummy_dma_tx_phy, dummy_dma_rx_phy, atds); | |
2170 | ||
2171 | /* DMA RX Channel Configuration */ | |
2172 | for (chan = 0; chan < rx_channels_count; chan++) { | |
54139cf3 JP |
2173 | rx_q = &priv->rx_queue[chan]; |
2174 | ||
47f2a9ce JP |
2175 | priv->hw->dma->init_rx_chan(priv->ioaddr, |
2176 | priv->plat->dma_cfg, | |
54139cf3 | 2177 | rx_q->dma_rx_phy, chan); |
47f2a9ce | 2178 | |
54139cf3 | 2179 | rx_q->rx_tail_addr = rx_q->dma_rx_phy + |
47f2a9ce JP |
2180 | (DMA_RX_SIZE * sizeof(struct dma_desc)); |
2181 | priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, | |
54139cf3 | 2182 | rx_q->rx_tail_addr, |
47f2a9ce JP |
2183 | chan); |
2184 | } | |
2185 | ||
2186 | /* DMA TX Channel Configuration */ | |
2187 | for (chan = 0; chan < tx_channels_count; chan++) { | |
ce736788 JP |
2188 | tx_q = &priv->tx_queue[chan]; |
2189 | ||
47f2a9ce | 2190 | priv->hw->dma->init_chan(priv->ioaddr, |
ce736788 JP |
2191 | priv->plat->dma_cfg, |
2192 | chan); | |
47f2a9ce JP |
2193 | |
2194 | priv->hw->dma->init_tx_chan(priv->ioaddr, | |
2195 | priv->plat->dma_cfg, | |
ce736788 | 2196 | tx_q->dma_tx_phy, chan); |
47f2a9ce | 2197 | |
ce736788 | 2198 | tx_q->tx_tail_addr = tx_q->dma_tx_phy + |
47f2a9ce JP |
2199 | (DMA_TX_SIZE * sizeof(struct dma_desc)); |
2200 | priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, | |
ce736788 | 2201 | tx_q->tx_tail_addr, |
47f2a9ce JP |
2202 | chan); |
2203 | } | |
2204 | } else { | |
54139cf3 | 2205 | rx_q = &priv->rx_queue[chan]; |
ce736788 | 2206 | tx_q = &priv->tx_queue[chan]; |
47f2a9ce | 2207 | priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg, |
ce736788 | 2208 | tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds); |
f748be53 AT |
2209 | } |
2210 | ||
2211 | if (priv->plat->axi && priv->hw->dma->axi) | |
afea0365 GC |
2212 | priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); |
2213 | ||
495db273 | 2214 | return ret; |
0f1f88a8 GC |
2215 | } |
2216 | ||
9125cdd1 | 2217 | /** |
732fdf0e | 2218 | * stmmac_tx_timer - mitigation sw timer for tx. |
9125cdd1 GC |
2219 | * @data: data pointer |
2220 | * Description: | |
2221 | * This is the timer handler to directly invoke the stmmac_tx_clean. | |
2222 | */ | |
e99e88a9 | 2223 | static void stmmac_tx_timer(struct timer_list *t) |
9125cdd1 | 2224 | { |
e99e88a9 | 2225 | struct stmmac_priv *priv = from_timer(priv, t, txtimer); |
ce736788 JP |
2226 | u32 tx_queues_count = priv->plat->tx_queues_to_use; |
2227 | u32 queue; | |
9125cdd1 | 2228 | |
ce736788 JP |
2229 | /* let's scan all the tx queues */ |
2230 | for (queue = 0; queue < tx_queues_count; queue++) | |
2231 | stmmac_tx_clean(priv, queue); | |
9125cdd1 GC |
2232 | } |
2233 | ||
2234 | /** | |
732fdf0e | 2235 | * stmmac_init_tx_coalesce - init tx mitigation options. |
32ceabca | 2236 | * @priv: driver private structure |
9125cdd1 GC |
2237 | * Description: |
2238 | * This inits the transmit coalesce parameters: i.e. timer rate, | |
2239 | * timer handler and default threshold used for enabling the | |
2240 | * interrupt on completion bit. | |
2241 | */ | |
2242 | static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) | |
2243 | { | |
2244 | priv->tx_coal_frames = STMMAC_TX_FRAMES; | |
2245 | priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; | |
e99e88a9 | 2246 | timer_setup(&priv->txtimer, stmmac_tx_timer, 0); |
9125cdd1 | 2247 | priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); |
9125cdd1 GC |
2248 | add_timer(&priv->txtimer); |
2249 | } | |
2250 | ||
4854ab99 JP |
2251 | static void stmmac_set_rings_length(struct stmmac_priv *priv) |
2252 | { | |
2253 | u32 rx_channels_count = priv->plat->rx_queues_to_use; | |
2254 | u32 tx_channels_count = priv->plat->tx_queues_to_use; | |
2255 | u32 chan; | |
2256 | ||
2257 | /* set TX ring length */ | |
2258 | if (priv->hw->dma->set_tx_ring_len) { | |
2259 | for (chan = 0; chan < tx_channels_count; chan++) | |
2260 | priv->hw->dma->set_tx_ring_len(priv->ioaddr, | |
2261 | (DMA_TX_SIZE - 1), chan); | |
2262 | } | |
2263 | ||
2264 | /* set RX ring length */ | |
2265 | if (priv->hw->dma->set_rx_ring_len) { | |
2266 | for (chan = 0; chan < rx_channels_count; chan++) | |
2267 | priv->hw->dma->set_rx_ring_len(priv->ioaddr, | |
2268 | (DMA_RX_SIZE - 1), chan); | |
2269 | } | |
2270 | } | |
2271 | ||
6a3a7193 JP |
2272 | /** |
2273 | * stmmac_set_tx_queue_weight - Set TX queue weight | |
2274 | * @priv: driver private structure | |
2275 | * Description: It is used for setting TX queues weight | |
2276 | */ | |
2277 | static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv) | |
2278 | { | |
2279 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2280 | u32 weight; | |
2281 | u32 queue; | |
2282 | ||
2283 | for (queue = 0; queue < tx_queues_count; queue++) { | |
2284 | weight = priv->plat->tx_queues_cfg[queue].weight; | |
2285 | priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue); | |
2286 | } | |
2287 | } | |
2288 | ||
19d91873 JP |
2289 | /** |
2290 | * stmmac_configure_cbs - Configure CBS in TX queue | |
2291 | * @priv: driver private structure | |
2292 | * Description: It is used for configuring CBS in AVB TX queues | |
2293 | */ | |
2294 | static void stmmac_configure_cbs(struct stmmac_priv *priv) | |
2295 | { | |
2296 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2297 | u32 mode_to_use; | |
2298 | u32 queue; | |
2299 | ||
44781fef JP |
2300 | /* queue 0 is reserved for legacy traffic */ |
2301 | for (queue = 1; queue < tx_queues_count; queue++) { | |
19d91873 JP |
2302 | mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; |
2303 | if (mode_to_use == MTL_QUEUE_DCB) | |
2304 | continue; | |
2305 | ||
2306 | priv->hw->mac->config_cbs(priv->hw, | |
2307 | priv->plat->tx_queues_cfg[queue].send_slope, | |
2308 | priv->plat->tx_queues_cfg[queue].idle_slope, | |
2309 | priv->plat->tx_queues_cfg[queue].high_credit, | |
2310 | priv->plat->tx_queues_cfg[queue].low_credit, | |
2311 | queue); | |
2312 | } | |
2313 | } | |
2314 | ||
d43042f4 JP |
2315 | /** |
2316 | * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel | |
2317 | * @priv: driver private structure | |
2318 | * Description: It is used for mapping RX queues to RX dma channels | |
2319 | */ | |
2320 | static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv) | |
2321 | { | |
2322 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2323 | u32 queue; | |
2324 | u32 chan; | |
2325 | ||
2326 | for (queue = 0; queue < rx_queues_count; queue++) { | |
2327 | chan = priv->plat->rx_queues_cfg[queue].chan; | |
2328 | priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan); | |
2329 | } | |
2330 | } | |
2331 | ||
a8f5102a JP |
2332 | /** |
2333 | * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority | |
2334 | * @priv: driver private structure | |
2335 | * Description: It is used for configuring the RX Queue Priority | |
2336 | */ | |
2337 | static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv) | |
2338 | { | |
2339 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2340 | u32 queue; | |
2341 | u32 prio; | |
2342 | ||
2343 | for (queue = 0; queue < rx_queues_count; queue++) { | |
2344 | if (!priv->plat->rx_queues_cfg[queue].use_prio) | |
2345 | continue; | |
2346 | ||
2347 | prio = priv->plat->rx_queues_cfg[queue].prio; | |
2348 | priv->hw->mac->rx_queue_prio(priv->hw, prio, queue); | |
2349 | } | |
2350 | } | |
2351 | ||
2352 | /** | |
2353 | * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority | |
2354 | * @priv: driver private structure | |
2355 | * Description: It is used for configuring the TX Queue Priority | |
2356 | */ | |
2357 | static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv) | |
2358 | { | |
2359 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2360 | u32 queue; | |
2361 | u32 prio; | |
2362 | ||
2363 | for (queue = 0; queue < tx_queues_count; queue++) { | |
2364 | if (!priv->plat->tx_queues_cfg[queue].use_prio) | |
2365 | continue; | |
2366 | ||
2367 | prio = priv->plat->tx_queues_cfg[queue].prio; | |
2368 | priv->hw->mac->tx_queue_prio(priv->hw, prio, queue); | |
2369 | } | |
2370 | } | |
2371 | ||
abe80fdc JP |
2372 | /** |
2373 | * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing | |
2374 | * @priv: driver private structure | |
2375 | * Description: It is used for configuring the RX queue routing | |
2376 | */ | |
2377 | static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv) | |
2378 | { | |
2379 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2380 | u32 queue; | |
2381 | u8 packet; | |
2382 | ||
2383 | for (queue = 0; queue < rx_queues_count; queue++) { | |
2384 | /* no specific packet type routing specified for the queue */ | |
2385 | if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) | |
2386 | continue; | |
2387 | ||
2388 | packet = priv->plat->rx_queues_cfg[queue].pkt_route; | |
2389 | priv->hw->mac->rx_queue_prio(priv->hw, packet, queue); | |
2390 | } | |
2391 | } | |
2392 | ||
d0a9c9f9 JP |
2393 | /** |
2394 | * stmmac_mtl_configuration - Configure MTL | |
2395 | * @priv: driver private structure | |
2396 | * Description: It is used for configurring MTL | |
2397 | */ | |
2398 | static void stmmac_mtl_configuration(struct stmmac_priv *priv) | |
2399 | { | |
2400 | u32 rx_queues_count = priv->plat->rx_queues_to_use; | |
2401 | u32 tx_queues_count = priv->plat->tx_queues_to_use; | |
2402 | ||
6a3a7193 JP |
2403 | if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight) |
2404 | stmmac_set_tx_queue_weight(priv); | |
2405 | ||
d0a9c9f9 JP |
2406 | /* Configure MTL RX algorithms */ |
2407 | if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms) | |
2408 | priv->hw->mac->prog_mtl_rx_algorithms(priv->hw, | |
2409 | priv->plat->rx_sched_algorithm); | |
2410 | ||
2411 | /* Configure MTL TX algorithms */ | |
2412 | if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms) | |
2413 | priv->hw->mac->prog_mtl_tx_algorithms(priv->hw, | |
2414 | priv->plat->tx_sched_algorithm); | |
2415 | ||
19d91873 JP |
2416 | /* Configure CBS in AVB TX queues */ |
2417 | if (tx_queues_count > 1 && priv->hw->mac->config_cbs) | |
2418 | stmmac_configure_cbs(priv); | |
2419 | ||
d43042f4 | 2420 | /* Map RX MTL to DMA channels */ |
03cf65a9 | 2421 | if (priv->hw->mac->map_mtl_to_dma) |
d43042f4 JP |
2422 | stmmac_rx_queue_dma_chan_map(priv); |
2423 | ||
d0a9c9f9 | 2424 | /* Enable MAC RX Queues */ |
f3976874 | 2425 | if (priv->hw->mac->rx_queue_enable) |
d0a9c9f9 | 2426 | stmmac_mac_enable_rx_queues(priv); |
6deee222 | 2427 | |
a8f5102a JP |
2428 | /* Set RX priorities */ |
2429 | if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio) | |
2430 | stmmac_mac_config_rx_queues_prio(priv); | |
2431 | ||
2432 | /* Set TX priorities */ | |
2433 | if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio) | |
2434 | stmmac_mac_config_tx_queues_prio(priv); | |
abe80fdc JP |
2435 | |
2436 | /* Set RX routing */ | |
2437 | if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing) | |
2438 | stmmac_mac_config_rx_queues_routing(priv); | |
d0a9c9f9 JP |
2439 | } |
2440 | ||
523f11b5 | 2441 | /** |
732fdf0e | 2442 | * stmmac_hw_setup - setup mac in a usable state. |
523f11b5 SK |
2443 | * @dev : pointer to the device structure. |
2444 | * Description: | |
732fdf0e GC |
2445 | * this is the main function to setup the HW in a usable state because the |
2446 | * dma engine is reset, the core registers are configured (e.g. AXI, | |
2447 | * Checksum features, timers). The DMA is ready to start receiving and | |
2448 | * transmitting. | |
523f11b5 SK |
2449 | * Return value: |
2450 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
2451 | * file on failure. | |
2452 | */ | |
fe131929 | 2453 | static int stmmac_hw_setup(struct net_device *dev, bool init_ptp) |
523f11b5 SK |
2454 | { |
2455 | struct stmmac_priv *priv = netdev_priv(dev); | |
3c55d4d0 | 2456 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
146617b8 JP |
2457 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
2458 | u32 chan; | |
523f11b5 SK |
2459 | int ret; |
2460 | ||
523f11b5 SK |
2461 | /* DMA initialization and SW reset */ |
2462 | ret = stmmac_init_dma_engine(priv); | |
2463 | if (ret < 0) { | |
38ddc59d LC |
2464 | netdev_err(priv->dev, "%s: DMA engine initialization failed\n", |
2465 | __func__); | |
523f11b5 SK |
2466 | return ret; |
2467 | } | |
2468 | ||
2469 | /* Copy the MAC addr into the HW */ | |
7ed24bbe | 2470 | priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0); |
523f11b5 | 2471 | |
02e57b9d GC |
2472 | /* PS and related bits will be programmed according to the speed */ |
2473 | if (priv->hw->pcs) { | |
2474 | int speed = priv->plat->mac_port_sel_speed; | |
2475 | ||
2476 | if ((speed == SPEED_10) || (speed == SPEED_100) || | |
2477 | (speed == SPEED_1000)) { | |
2478 | priv->hw->ps = speed; | |
2479 | } else { | |
2480 | dev_warn(priv->device, "invalid port speed\n"); | |
2481 | priv->hw->ps = 0; | |
2482 | } | |
2483 | } | |
2484 | ||
523f11b5 | 2485 | /* Initialize the MAC Core */ |
7ed24bbe | 2486 | priv->hw->mac->core_init(priv->hw, dev->mtu); |
523f11b5 | 2487 | |
d0a9c9f9 JP |
2488 | /* Initialize MTL*/ |
2489 | if (priv->synopsys_id >= DWMAC_CORE_4_00) | |
2490 | stmmac_mtl_configuration(priv); | |
9eb12474 | 2491 | |
978aded4 GC |
2492 | ret = priv->hw->mac->rx_ipc(priv->hw); |
2493 | if (!ret) { | |
38ddc59d | 2494 | netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); |
978aded4 | 2495 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; |
d2afb5bd | 2496 | priv->hw->rx_csum = 0; |
978aded4 GC |
2497 | } |
2498 | ||
523f11b5 | 2499 | /* Enable the MAC Rx/Tx */ |
270c7759 | 2500 | priv->hw->mac->set_mac(priv->ioaddr, true); |
523f11b5 | 2501 | |
b4f0a661 JP |
2502 | /* Set the HW DMA mode and the COE */ |
2503 | stmmac_dma_operation_mode(priv); | |
2504 | ||
523f11b5 SK |
2505 | stmmac_mmc_setup(priv); |
2506 | ||
fe131929 | 2507 | if (init_ptp) { |
0ad2be79 TR |
2508 | ret = clk_prepare_enable(priv->plat->clk_ptp_ref); |
2509 | if (ret < 0) | |
2510 | netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); | |
2511 | ||
fe131929 | 2512 | ret = stmmac_init_ptp(priv); |
722eef28 HK |
2513 | if (ret == -EOPNOTSUPP) |
2514 | netdev_warn(priv->dev, "PTP not supported by HW\n"); | |
2515 | else if (ret) | |
2516 | netdev_warn(priv->dev, "PTP init failed\n"); | |
fe131929 | 2517 | } |
523f11b5 | 2518 | |
50fb4f74 | 2519 | #ifdef CONFIG_DEBUG_FS |
523f11b5 SK |
2520 | ret = stmmac_init_fs(dev); |
2521 | if (ret < 0) | |
38ddc59d LC |
2522 | netdev_warn(priv->dev, "%s: failed debugFS registration\n", |
2523 | __func__); | |
523f11b5 SK |
2524 | #endif |
2525 | /* Start the ball rolling... */ | |
ae4f0d46 | 2526 | stmmac_start_all_dma(priv); |
523f11b5 | 2527 | |
523f11b5 SK |
2528 | priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
2529 | ||
523f11b5 SK |
2530 | if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { |
2531 | priv->rx_riwt = MAX_DMA_RIWT; | |
3c55d4d0 | 2532 | priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt); |
523f11b5 SK |
2533 | } |
2534 | ||
3fe5cadb | 2535 | if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane) |
02e57b9d | 2536 | priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0); |
523f11b5 | 2537 | |
4854ab99 JP |
2538 | /* set TX and RX rings length */ |
2539 | stmmac_set_rings_length(priv); | |
2540 | ||
f748be53 | 2541 | /* Enable TSO */ |
146617b8 JP |
2542 | if (priv->tso) { |
2543 | for (chan = 0; chan < tx_cnt; chan++) | |
2544 | priv->hw->dma->enable_tso(priv->ioaddr, 1, chan); | |
2545 | } | |
f748be53 | 2546 | |
523f11b5 SK |
2547 | return 0; |
2548 | } | |
2549 | ||
c66f6c37 TR |
2550 | static void stmmac_hw_teardown(struct net_device *dev) |
2551 | { | |
2552 | struct stmmac_priv *priv = netdev_priv(dev); | |
2553 | ||
2554 | clk_disable_unprepare(priv->plat->clk_ptp_ref); | |
2555 | } | |
2556 | ||
47dd7a54 GC |
2557 | /** |
2558 | * stmmac_open - open entry point of the driver | |
2559 | * @dev : pointer to the device structure. | |
2560 | * Description: | |
2561 | * This function is the open entry point of the driver. | |
2562 | * Return value: | |
2563 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
2564 | * file on failure. | |
2565 | */ | |
2566 | static int stmmac_open(struct net_device *dev) | |
2567 | { | |
2568 | struct stmmac_priv *priv = netdev_priv(dev); | |
47dd7a54 GC |
2569 | int ret; |
2570 | ||
4bfcbd7a FV |
2571 | stmmac_check_ether_addr(priv); |
2572 | ||
3fe5cadb GC |
2573 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
2574 | priv->hw->pcs != STMMAC_PCS_TBI && | |
2575 | priv->hw->pcs != STMMAC_PCS_RTBI) { | |
e58bb43f GC |
2576 | ret = stmmac_init_phy(dev); |
2577 | if (ret) { | |
38ddc59d LC |
2578 | netdev_err(priv->dev, |
2579 | "%s: Cannot attach to PHY (error: %d)\n", | |
2580 | __func__, ret); | |
89df20d9 | 2581 | return ret; |
e58bb43f | 2582 | } |
f66ffe28 | 2583 | } |
47dd7a54 | 2584 | |
523f11b5 SK |
2585 | /* Extra statistics */ |
2586 | memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); | |
2587 | priv->xstats.threshold = tc; | |
2588 | ||
5bacd778 | 2589 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
22ad3838 | 2590 | priv->rx_copybreak = STMMAC_RX_COPYBREAK; |
45ab4b13 | 2591 | priv->mss = 0; |
56329137 | 2592 | |
5bacd778 LC |
2593 | ret = alloc_dma_desc_resources(priv); |
2594 | if (ret < 0) { | |
2595 | netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", | |
2596 | __func__); | |
2597 | goto dma_desc_error; | |
2598 | } | |
2599 | ||
2600 | ret = init_dma_desc_rings(dev, GFP_KERNEL); | |
2601 | if (ret < 0) { | |
2602 | netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", | |
2603 | __func__); | |
2604 | goto init_error; | |
2605 | } | |
2606 | ||
fe131929 | 2607 | ret = stmmac_hw_setup(dev, true); |
56329137 | 2608 | if (ret < 0) { |
38ddc59d | 2609 | netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); |
c9324d18 | 2610 | goto init_error; |
47dd7a54 GC |
2611 | } |
2612 | ||
777da230 GC |
2613 | stmmac_init_tx_coalesce(priv); |
2614 | ||
d6d50c7e PR |
2615 | if (dev->phydev) |
2616 | phy_start(dev->phydev); | |
47dd7a54 | 2617 | |
f66ffe28 GC |
2618 | /* Request the IRQ lines */ |
2619 | ret = request_irq(dev->irq, stmmac_interrupt, | |
ceb69499 | 2620 | IRQF_SHARED, dev->name, dev); |
f66ffe28 | 2621 | if (unlikely(ret < 0)) { |
38ddc59d LC |
2622 | netdev_err(priv->dev, |
2623 | "%s: ERROR: allocating the IRQ %d (error: %d)\n", | |
2624 | __func__, dev->irq, ret); | |
6c1e5abe | 2625 | goto irq_error; |
f66ffe28 GC |
2626 | } |
2627 | ||
7a13f8f5 FV |
2628 | /* Request the Wake IRQ in case of another line is used for WoL */ |
2629 | if (priv->wol_irq != dev->irq) { | |
2630 | ret = request_irq(priv->wol_irq, stmmac_interrupt, | |
2631 | IRQF_SHARED, dev->name, dev); | |
2632 | if (unlikely(ret < 0)) { | |
38ddc59d LC |
2633 | netdev_err(priv->dev, |
2634 | "%s: ERROR: allocating the WoL IRQ %d (%d)\n", | |
2635 | __func__, priv->wol_irq, ret); | |
c9324d18 | 2636 | goto wolirq_error; |
7a13f8f5 FV |
2637 | } |
2638 | } | |
2639 | ||
d765955d | 2640 | /* Request the IRQ lines */ |
d7ec8584 | 2641 | if (priv->lpi_irq > 0) { |
d765955d GC |
2642 | ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, |
2643 | dev->name, dev); | |
2644 | if (unlikely(ret < 0)) { | |
38ddc59d LC |
2645 | netdev_err(priv->dev, |
2646 | "%s: ERROR: allocating the LPI IRQ %d (%d)\n", | |
2647 | __func__, priv->lpi_irq, ret); | |
c9324d18 | 2648 | goto lpiirq_error; |
d765955d GC |
2649 | } |
2650 | } | |
2651 | ||
c22a3f48 JP |
2652 | stmmac_enable_all_queues(priv); |
2653 | stmmac_start_all_queues(priv); | |
f66ffe28 | 2654 | |
47dd7a54 | 2655 | return 0; |
f66ffe28 | 2656 | |
c9324d18 | 2657 | lpiirq_error: |
d765955d GC |
2658 | if (priv->wol_irq != dev->irq) |
2659 | free_irq(priv->wol_irq, dev); | |
c9324d18 | 2660 | wolirq_error: |
7a13f8f5 | 2661 | free_irq(dev->irq, dev); |
6c1e5abe TR |
2662 | irq_error: |
2663 | if (dev->phydev) | |
2664 | phy_stop(dev->phydev); | |
7a13f8f5 | 2665 | |
6c1e5abe | 2666 | del_timer_sync(&priv->txtimer); |
c66f6c37 | 2667 | stmmac_hw_teardown(dev); |
c9324d18 GC |
2668 | init_error: |
2669 | free_dma_desc_resources(priv); | |
5bacd778 | 2670 | dma_desc_error: |
d6d50c7e PR |
2671 | if (dev->phydev) |
2672 | phy_disconnect(dev->phydev); | |
4bfcbd7a | 2673 | |
f66ffe28 | 2674 | return ret; |
47dd7a54 GC |
2675 | } |
2676 | ||
2677 | /** | |
2678 | * stmmac_release - close entry point of the driver | |
2679 | * @dev : device pointer. | |
2680 | * Description: | |
2681 | * This is the stop entry point of the driver. | |
2682 | */ | |
2683 | static int stmmac_release(struct net_device *dev) | |
2684 | { | |
2685 | struct stmmac_priv *priv = netdev_priv(dev); | |
2686 | ||
d765955d GC |
2687 | if (priv->eee_enabled) |
2688 | del_timer_sync(&priv->eee_ctrl_timer); | |
2689 | ||
47dd7a54 | 2690 | /* Stop and disconnect the PHY */ |
d6d50c7e PR |
2691 | if (dev->phydev) { |
2692 | phy_stop(dev->phydev); | |
2693 | phy_disconnect(dev->phydev); | |
47dd7a54 GC |
2694 | } |
2695 | ||
c22a3f48 | 2696 | stmmac_stop_all_queues(priv); |
47dd7a54 | 2697 | |
c22a3f48 | 2698 | stmmac_disable_all_queues(priv); |
47dd7a54 | 2699 | |
9125cdd1 GC |
2700 | del_timer_sync(&priv->txtimer); |
2701 | ||
47dd7a54 GC |
2702 | /* Free the IRQ lines */ |
2703 | free_irq(dev->irq, dev); | |
7a13f8f5 FV |
2704 | if (priv->wol_irq != dev->irq) |
2705 | free_irq(priv->wol_irq, dev); | |
d7ec8584 | 2706 | if (priv->lpi_irq > 0) |
d765955d | 2707 | free_irq(priv->lpi_irq, dev); |
47dd7a54 GC |
2708 | |
2709 | /* Stop TX/RX DMA and clear the descriptors */ | |
ae4f0d46 | 2710 | stmmac_stop_all_dma(priv); |
47dd7a54 GC |
2711 | |
2712 | /* Release and free the Rx/Tx resources */ | |
2713 | free_dma_desc_resources(priv); | |
2714 | ||
19449bfc | 2715 | /* Disable the MAC Rx/Tx */ |
270c7759 | 2716 | priv->hw->mac->set_mac(priv->ioaddr, false); |
47dd7a54 GC |
2717 | |
2718 | netif_carrier_off(dev); | |
2719 | ||
50fb4f74 | 2720 | #ifdef CONFIG_DEBUG_FS |
466c5ac8 | 2721 | stmmac_exit_fs(dev); |
bfab27a1 | 2722 | #endif |
bfab27a1 | 2723 | |
92ba6888 RK |
2724 | stmmac_release_ptp(priv); |
2725 | ||
47dd7a54 GC |
2726 | return 0; |
2727 | } | |
2728 | ||
f748be53 AT |
2729 | /** |
2730 | * stmmac_tso_allocator - close entry point of the driver | |
2731 | * @priv: driver private structure | |
2732 | * @des: buffer start address | |
2733 | * @total_len: total length to fill in descriptors | |
2734 | * @last_segmant: condition for the last descriptor | |
ce736788 | 2735 | * @queue: TX queue index |
f748be53 AT |
2736 | * Description: |
2737 | * This function fills descriptor and request new descriptors according to | |
2738 | * buffer length to fill | |
2739 | */ | |
2740 | static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, | |
ce736788 | 2741 | int total_len, bool last_segment, u32 queue) |
f748be53 | 2742 | { |
ce736788 | 2743 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
f748be53 | 2744 | struct dma_desc *desc; |
5bacd778 | 2745 | u32 buff_size; |
ce736788 | 2746 | int tmp_len; |
f748be53 AT |
2747 | |
2748 | tmp_len = total_len; | |
2749 | ||
2750 | while (tmp_len > 0) { | |
ce736788 JP |
2751 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
2752 | desc = tx_q->dma_tx + tx_q->cur_tx; | |
f748be53 | 2753 | |
f8be0d78 | 2754 | desc->des0 = cpu_to_le32(des + (total_len - tmp_len)); |
f748be53 AT |
2755 | buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ? |
2756 | TSO_MAX_BUFF_SIZE : tmp_len; | |
2757 | ||
2758 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, | |
2759 | 0, 1, | |
426849e6 | 2760 | (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
f748be53 AT |
2761 | 0, 0); |
2762 | ||
2763 | tmp_len -= TSO_MAX_BUFF_SIZE; | |
2764 | } | |
2765 | } | |
2766 | ||
2767 | /** | |
2768 | * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO) | |
2769 | * @skb : the socket buffer | |
2770 | * @dev : device pointer | |
2771 | * Description: this is the transmit function that is called on TSO frames | |
2772 | * (support available on GMAC4 and newer chips). | |
2773 | * Diagram below show the ring programming in case of TSO frames: | |
2774 | * | |
2775 | * First Descriptor | |
2776 | * -------- | |
2777 | * | DES0 |---> buffer1 = L2/L3/L4 header | |
2778 | * | DES1 |---> TCP Payload (can continue on next descr...) | |
2779 | * | DES2 |---> buffer 1 and 2 len | |
2780 | * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0] | |
2781 | * -------- | |
2782 | * | | |
2783 | * ... | |
2784 | * | | |
2785 | * -------- | |
2786 | * | DES0 | --| Split TCP Payload on Buffers 1 and 2 | |
2787 | * | DES1 | --| | |
2788 | * | DES2 | --> buffer 1 and 2 len | |
2789 | * | DES3 | | |
2790 | * -------- | |
2791 | * | |
2792 | * mss is fixed when enable tso, so w/o programming the TDES3 ctx field. | |
2793 | */ | |
2794 | static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) | |
2795 | { | |
ce736788 | 2796 | struct dma_desc *desc, *first, *mss_desc = NULL; |
f748be53 AT |
2797 | struct stmmac_priv *priv = netdev_priv(dev); |
2798 | int nfrags = skb_shinfo(skb)->nr_frags; | |
ce736788 | 2799 | u32 queue = skb_get_queue_mapping(skb); |
f748be53 | 2800 | unsigned int first_entry, des; |
ce736788 JP |
2801 | struct stmmac_tx_queue *tx_q; |
2802 | int tmp_pay_len = 0; | |
2803 | u32 pay_len, mss; | |
f748be53 AT |
2804 | u8 proto_hdr_len; |
2805 | int i; | |
2806 | ||
ce736788 JP |
2807 | tx_q = &priv->tx_queue[queue]; |
2808 | ||
f748be53 AT |
2809 | /* Compute header lengths */ |
2810 | proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | |
2811 | ||
2812 | /* Desc availability based on threshold should be enough safe */ | |
ce736788 | 2813 | if (unlikely(stmmac_tx_avail(priv, queue) < |
f748be53 | 2814 | (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { |
c22a3f48 JP |
2815 | if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { |
2816 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, | |
2817 | queue)); | |
f748be53 | 2818 | /* This is a hard error, log it. */ |
38ddc59d LC |
2819 | netdev_err(priv->dev, |
2820 | "%s: Tx Ring full when queue awake\n", | |
2821 | __func__); | |
f748be53 | 2822 | } |
f748be53 AT |
2823 | return NETDEV_TX_BUSY; |
2824 | } | |
2825 | ||
2826 | pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ | |
2827 | ||
2828 | mss = skb_shinfo(skb)->gso_size; | |
2829 | ||
2830 | /* set new MSS value if needed */ | |
2831 | if (mss != priv->mss) { | |
ce736788 | 2832 | mss_desc = tx_q->dma_tx + tx_q->cur_tx; |
f748be53 AT |
2833 | priv->hw->desc->set_mss(mss_desc, mss); |
2834 | priv->mss = mss; | |
ce736788 | 2835 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
f748be53 AT |
2836 | } |
2837 | ||
2838 | if (netif_msg_tx_queued(priv)) { | |
2839 | pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n", | |
2840 | __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss); | |
2841 | pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, | |
2842 | skb->data_len); | |
2843 | } | |
2844 | ||
ce736788 | 2845 | first_entry = tx_q->cur_tx; |
f748be53 | 2846 | |
ce736788 | 2847 | desc = tx_q->dma_tx + first_entry; |
f748be53 AT |
2848 | first = desc; |
2849 | ||
2850 | /* first descriptor: fill Headers on Buf1 */ | |
2851 | des = dma_map_single(priv->device, skb->data, skb_headlen(skb), | |
2852 | DMA_TO_DEVICE); | |
2853 | if (dma_mapping_error(priv->device, des)) | |
2854 | goto dma_map_err; | |
2855 | ||
ce736788 JP |
2856 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
2857 | tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); | |
f748be53 | 2858 | |
f8be0d78 | 2859 | first->des0 = cpu_to_le32(des); |
f748be53 AT |
2860 | |
2861 | /* Fill start of payload in buff2 of first descriptor */ | |
2862 | if (pay_len) | |
f8be0d78 | 2863 | first->des1 = cpu_to_le32(des + proto_hdr_len); |
f748be53 AT |
2864 | |
2865 | /* If needed take extra descriptors to fill the remaining payload */ | |
2866 | tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; | |
2867 | ||
ce736788 | 2868 | stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); |
f748be53 AT |
2869 | |
2870 | /* Prepare fragments */ | |
2871 | for (i = 0; i < nfrags; i++) { | |
2872 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
2873 | ||
2874 | des = skb_frag_dma_map(priv->device, frag, 0, | |
2875 | skb_frag_size(frag), | |
2876 | DMA_TO_DEVICE); | |
937071c1 TR |
2877 | if (dma_mapping_error(priv->device, des)) |
2878 | goto dma_map_err; | |
f748be53 AT |
2879 | |
2880 | stmmac_tso_allocator(priv, des, skb_frag_size(frag), | |
ce736788 | 2881 | (i == nfrags - 1), queue); |
f748be53 | 2882 | |
ce736788 JP |
2883 | tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; |
2884 | tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); | |
2885 | tx_q->tx_skbuff[tx_q->cur_tx] = NULL; | |
2886 | tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; | |
f748be53 AT |
2887 | } |
2888 | ||
ce736788 | 2889 | tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; |
f748be53 | 2890 | |
05cf0d1b NC |
2891 | /* Only the last descriptor gets to point to the skb. */ |
2892 | tx_q->tx_skbuff[tx_q->cur_tx] = skb; | |
2893 | ||
2894 | /* We've used all descriptors we need for this skb, however, | |
2895 | * advance cur_tx so that it references a fresh descriptor. | |
2896 | * ndo_start_xmit will fill this descriptor the next time it's | |
2897 | * called and stmmac_tx_clean may clean up to this descriptor. | |
2898 | */ | |
ce736788 | 2899 | tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); |
f748be53 | 2900 | |
ce736788 | 2901 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
b3e51069 LC |
2902 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
2903 | __func__); | |
c22a3f48 | 2904 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
f748be53 AT |
2905 | } |
2906 | ||
2907 | dev->stats.tx_bytes += skb->len; | |
2908 | priv->xstats.tx_tso_frames++; | |
2909 | priv->xstats.tx_tso_nfrags += nfrags; | |
2910 | ||
2911 | /* Manage tx mitigation */ | |
2912 | priv->tx_count_frames += nfrags + 1; | |
2913 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | |
2914 | mod_timer(&priv->txtimer, | |
2915 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | |
2916 | } else { | |
2917 | priv->tx_count_frames = 0; | |
2918 | priv->hw->desc->set_tx_ic(desc); | |
2919 | priv->xstats.tx_set_ic_bit++; | |
2920 | } | |
2921 | ||
74abc9b1 | 2922 | skb_tx_timestamp(skb); |
f748be53 AT |
2923 | |
2924 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
2925 | priv->hwts_tx_en)) { | |
2926 | /* declare that device is doing timestamping */ | |
2927 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
2928 | priv->hw->desc->enable_tx_timestamp(first); | |
2929 | } | |
2930 | ||
2931 | /* Complete the first descriptor before granting the DMA */ | |
2932 | priv->hw->desc->prepare_tso_tx_desc(first, 1, | |
2933 | proto_hdr_len, | |
2934 | pay_len, | |
ce736788 | 2935 | 1, tx_q->tx_skbuff_dma[first_entry].last_segment, |
f748be53 AT |
2936 | tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len)); |
2937 | ||
2938 | /* If context desc is used to change MSS */ | |
2939 | if (mss_desc) | |
2940 | priv->hw->desc->set_tx_owner(mss_desc); | |
2941 | ||
2942 | /* The own bit must be the latest setting done when prepare the | |
2943 | * descriptor and then barrier is needed to make sure that | |
2944 | * all is coherent before granting the DMA engine. | |
2945 | */ | |
ad688cdb | 2946 | dma_wmb(); |
f748be53 AT |
2947 | |
2948 | if (netif_msg_pktdata(priv)) { | |
2949 | pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n", | |
ce736788 JP |
2950 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
2951 | tx_q->cur_tx, first, nfrags); | |
f748be53 | 2952 | |
ce736788 | 2953 | priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE, |
f748be53 AT |
2954 | 0); |
2955 | ||
2956 | pr_info(">>> frame to be transmitted: "); | |
2957 | print_pkt(skb->data, skb_headlen(skb)); | |
2958 | } | |
2959 | ||
c22a3f48 | 2960 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
f748be53 | 2961 | |
ce736788 JP |
2962 | priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, |
2963 | queue); | |
f748be53 | 2964 | |
f748be53 AT |
2965 | return NETDEV_TX_OK; |
2966 | ||
2967 | dma_map_err: | |
f748be53 AT |
2968 | dev_err(priv->device, "Tx dma map failed\n"); |
2969 | dev_kfree_skb(skb); | |
2970 | priv->dev->stats.tx_dropped++; | |
2971 | return NETDEV_TX_OK; | |
2972 | } | |
2973 | ||
47dd7a54 | 2974 | /** |
732fdf0e | 2975 | * stmmac_xmit - Tx entry point of the driver |
47dd7a54 GC |
2976 | * @skb : the socket buffer |
2977 | * @dev : device pointer | |
32ceabca GC |
2978 | * Description : this is the tx entry point of the driver. |
2979 | * It programs the chain or the ring and supports oversized frames | |
2980 | * and SG feature. | |
47dd7a54 GC |
2981 | */ |
2982 | static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |
2983 | { | |
2984 | struct stmmac_priv *priv = netdev_priv(dev); | |
0e80bdc9 | 2985 | unsigned int nopaged_len = skb_headlen(skb); |
4a7d666a | 2986 | int i, csum_insertion = 0, is_jumbo = 0; |
ce736788 | 2987 | u32 queue = skb_get_queue_mapping(skb); |
47dd7a54 | 2988 | int nfrags = skb_shinfo(skb)->nr_frags; |
59423815 CIK |
2989 | int entry; |
2990 | unsigned int first_entry; | |
47dd7a54 | 2991 | struct dma_desc *desc, *first; |
ce736788 | 2992 | struct stmmac_tx_queue *tx_q; |
0e80bdc9 | 2993 | unsigned int enh_desc; |
f748be53 AT |
2994 | unsigned int des; |
2995 | ||
ce736788 JP |
2996 | tx_q = &priv->tx_queue[queue]; |
2997 | ||
f748be53 AT |
2998 | /* Manage oversized TCP frames for GMAC4 device */ |
2999 | if (skb_is_gso(skb) && priv->tso) { | |
9edfa7da | 3000 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) |
f748be53 AT |
3001 | return stmmac_tso_xmit(skb, dev); |
3002 | } | |
47dd7a54 | 3003 | |
ce736788 | 3004 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
c22a3f48 JP |
3005 | if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { |
3006 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, | |
3007 | queue)); | |
47dd7a54 | 3008 | /* This is a hard error, log it. */ |
38ddc59d LC |
3009 | netdev_err(priv->dev, |
3010 | "%s: Tx Ring full when queue awake\n", | |
3011 | __func__); | |
47dd7a54 GC |
3012 | } |
3013 | return NETDEV_TX_BUSY; | |
3014 | } | |
3015 | ||
d765955d GC |
3016 | if (priv->tx_path_in_lpi_mode) |
3017 | stmmac_disable_eee_mode(priv); | |
3018 | ||
ce736788 | 3019 | entry = tx_q->cur_tx; |
0e80bdc9 | 3020 | first_entry = entry; |
47dd7a54 | 3021 | |
5e982f3b | 3022 | csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); |
47dd7a54 | 3023 | |
0e80bdc9 | 3024 | if (likely(priv->extend_desc)) |
ce736788 | 3025 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
c24602ef | 3026 | else |
ce736788 | 3027 | desc = tx_q->dma_tx + entry; |
c24602ef | 3028 | |
47dd7a54 GC |
3029 | first = desc; |
3030 | ||
0e80bdc9 | 3031 | enh_desc = priv->plat->enh_desc; |
4a7d666a | 3032 | /* To program the descriptors according to the size of the frame */ |
29896a67 GC |
3033 | if (enh_desc) |
3034 | is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); | |
3035 | ||
f748be53 AT |
3036 | if (unlikely(is_jumbo) && likely(priv->synopsys_id < |
3037 | DWMAC_CORE_4_00)) { | |
ce736788 | 3038 | entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion); |
362b37be GC |
3039 | if (unlikely(entry < 0)) |
3040 | goto dma_map_err; | |
29896a67 | 3041 | } |
47dd7a54 GC |
3042 | |
3043 | for (i = 0; i < nfrags; i++) { | |
9e903e08 ED |
3044 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
3045 | int len = skb_frag_size(frag); | |
be434d50 | 3046 | bool last_segment = (i == (nfrags - 1)); |
47dd7a54 | 3047 | |
e3ad57c9 GC |
3048 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); |
3049 | ||
0e80bdc9 | 3050 | if (likely(priv->extend_desc)) |
ce736788 | 3051 | desc = (struct dma_desc *)(tx_q->dma_etx + entry); |
c24602ef | 3052 | else |
ce736788 | 3053 | desc = tx_q->dma_tx + entry; |
47dd7a54 | 3054 | |
f748be53 AT |
3055 | des = skb_frag_dma_map(priv->device, frag, 0, len, |
3056 | DMA_TO_DEVICE); | |
3057 | if (dma_mapping_error(priv->device, des)) | |
362b37be GC |
3058 | goto dma_map_err; /* should reuse desc w/o issues */ |
3059 | ||
ce736788 | 3060 | tx_q->tx_skbuff[entry] = NULL; |
f748be53 | 3061 | |
ce736788 | 3062 | tx_q->tx_skbuff_dma[entry].buf = des; |
f8be0d78 MW |
3063 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) |
3064 | desc->des0 = cpu_to_le32(des); | |
3065 | else | |
3066 | desc->des2 = cpu_to_le32(des); | |
f748be53 | 3067 | |
ce736788 JP |
3068 | tx_q->tx_skbuff_dma[entry].map_as_page = true; |
3069 | tx_q->tx_skbuff_dma[entry].len = len; | |
3070 | tx_q->tx_skbuff_dma[entry].last_segment = last_segment; | |
0e80bdc9 GC |
3071 | |
3072 | /* Prepare the descriptor and set the own bit too */ | |
4a7d666a | 3073 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, |
fe6af0e1 NC |
3074 | priv->mode, 1, last_segment, |
3075 | skb->len); | |
47dd7a54 GC |
3076 | } |
3077 | ||
05cf0d1b NC |
3078 | /* Only the last descriptor gets to point to the skb. */ |
3079 | tx_q->tx_skbuff[entry] = skb; | |
e3ad57c9 | 3080 | |
05cf0d1b NC |
3081 | /* We've used all descriptors we need for this skb, however, |
3082 | * advance cur_tx so that it references a fresh descriptor. | |
3083 | * ndo_start_xmit will fill this descriptor the next time it's | |
3084 | * called and stmmac_tx_clean may clean up to this descriptor. | |
3085 | */ | |
3086 | entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); | |
ce736788 | 3087 | tx_q->cur_tx = entry; |
47dd7a54 | 3088 | |
47dd7a54 | 3089 | if (netif_msg_pktdata(priv)) { |
d0225e7d AT |
3090 | void *tx_head; |
3091 | ||
38ddc59d LC |
3092 | netdev_dbg(priv->dev, |
3093 | "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", | |
ce736788 | 3094 | __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, |
38ddc59d | 3095 | entry, first, nfrags); |
83d7af64 | 3096 | |
c24602ef | 3097 | if (priv->extend_desc) |
ce736788 | 3098 | tx_head = (void *)tx_q->dma_etx; |
c24602ef | 3099 | else |
ce736788 | 3100 | tx_head = (void *)tx_q->dma_tx; |
d0225e7d AT |
3101 | |
3102 | priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false); | |
c24602ef | 3103 | |
38ddc59d | 3104 | netdev_dbg(priv->dev, ">>> frame to be transmitted: "); |
47dd7a54 GC |
3105 | print_pkt(skb->data, skb->len); |
3106 | } | |
0e80bdc9 | 3107 | |
ce736788 | 3108 | if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { |
b3e51069 LC |
3109 | netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", |
3110 | __func__); | |
c22a3f48 | 3111 | netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); |
47dd7a54 GC |
3112 | } |
3113 | ||
3114 | dev->stats.tx_bytes += skb->len; | |
3115 | ||
0e80bdc9 GC |
3116 | /* According to the coalesce parameter the IC bit for the latest |
3117 | * segment is reset and the timer re-started to clean the tx status. | |
3118 | * This approach takes care about the fragments: desc is the first | |
3119 | * element in case of no SG. | |
3120 | */ | |
3121 | priv->tx_count_frames += nfrags + 1; | |
3122 | if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { | |
3123 | mod_timer(&priv->txtimer, | |
3124 | STMMAC_COAL_TIMER(priv->tx_coal_timer)); | |
3125 | } else { | |
3126 | priv->tx_count_frames = 0; | |
3127 | priv->hw->desc->set_tx_ic(desc); | |
3128 | priv->xstats.tx_set_ic_bit++; | |
891434b1 RK |
3129 | } |
3130 | ||
74abc9b1 | 3131 | skb_tx_timestamp(skb); |
3e82ce12 | 3132 | |
0e80bdc9 GC |
3133 | /* Ready to fill the first descriptor and set the OWN bit w/o any |
3134 | * problems because all the descriptors are actually ready to be | |
3135 | * passed to the DMA engine. | |
3136 | */ | |
3137 | if (likely(!is_jumbo)) { | |
3138 | bool last_segment = (nfrags == 0); | |
3139 | ||
f748be53 AT |
3140 | des = dma_map_single(priv->device, skb->data, |
3141 | nopaged_len, DMA_TO_DEVICE); | |
3142 | if (dma_mapping_error(priv->device, des)) | |
0e80bdc9 GC |
3143 | goto dma_map_err; |
3144 | ||
ce736788 | 3145 | tx_q->tx_skbuff_dma[first_entry].buf = des; |
f8be0d78 MW |
3146 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) |
3147 | first->des0 = cpu_to_le32(des); | |
3148 | else | |
3149 | first->des2 = cpu_to_le32(des); | |
f748be53 | 3150 | |
ce736788 JP |
3151 | tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; |
3152 | tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; | |
0e80bdc9 GC |
3153 | |
3154 | if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && | |
3155 | priv->hwts_tx_en)) { | |
3156 | /* declare that device is doing timestamping */ | |
3157 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | |
3158 | priv->hw->desc->enable_tx_timestamp(first); | |
3159 | } | |
3160 | ||
3161 | /* Prepare the first descriptor setting the OWN bit too */ | |
3162 | priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, | |
3163 | csum_insertion, priv->mode, 1, | |
fe6af0e1 | 3164 | last_segment, skb->len); |
0e80bdc9 GC |
3165 | |
3166 | /* The own bit must be the latest setting done when prepare the | |
3167 | * descriptor and then barrier is needed to make sure that | |
3168 | * all is coherent before granting the DMA engine. | |
3169 | */ | |
ad688cdb | 3170 | dma_wmb(); |
0e80bdc9 GC |
3171 | } |
3172 | ||
c22a3f48 | 3173 | netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); |
f748be53 AT |
3174 | |
3175 | if (priv->synopsys_id < DWMAC_CORE_4_00) | |
3176 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); | |
3177 | else | |
ce736788 JP |
3178 | priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr, |
3179 | queue); | |
52f64fae | 3180 | |
362b37be | 3181 | return NETDEV_TX_OK; |
a9097a96 | 3182 | |
362b37be | 3183 | dma_map_err: |
38ddc59d | 3184 | netdev_err(priv->dev, "Tx DMA map failed\n"); |
362b37be GC |
3185 | dev_kfree_skb(skb); |
3186 | priv->dev->stats.tx_dropped++; | |
47dd7a54 GC |
3187 | return NETDEV_TX_OK; |
3188 | } | |
3189 | ||
b9381985 VB |
3190 | static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) |
3191 | { | |
3192 | struct ethhdr *ehdr; | |
3193 | u16 vlanid; | |
3194 | ||
3195 | if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) == | |
3196 | NETIF_F_HW_VLAN_CTAG_RX && | |
3197 | !__vlan_get_tag(skb, &vlanid)) { | |
3198 | /* pop the vlan tag */ | |
3199 | ehdr = (struct ethhdr *)skb->data; | |
3200 | memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2); | |
3201 | skb_pull(skb, VLAN_HLEN); | |
3202 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid); | |
3203 | } | |
3204 | } | |
3205 | ||
3206 | ||
54139cf3 | 3207 | static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q) |
120e87f9 | 3208 | { |
54139cf3 | 3209 | if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH) |
120e87f9 GC |
3210 | return 0; |
3211 | ||
3212 | return 1; | |
3213 | } | |
3214 | ||
32ceabca | 3215 | /** |
732fdf0e | 3216 | * stmmac_rx_refill - refill used skb preallocated buffers |
32ceabca | 3217 | * @priv: driver private structure |
54139cf3 | 3218 | * @queue: RX queue index |
32ceabca GC |
3219 | * Description : this is to reallocate the skb for the reception process |
3220 | * that is based on zero-copy. | |
3221 | */ | |
54139cf3 | 3222 | static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) |
47dd7a54 | 3223 | { |
54139cf3 JP |
3224 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3225 | int dirty = stmmac_rx_dirty(priv, queue); | |
3226 | unsigned int entry = rx_q->dirty_rx; | |
3227 | ||
47dd7a54 | 3228 | int bfsize = priv->dma_buf_sz; |
47dd7a54 | 3229 | |
e3ad57c9 | 3230 | while (dirty-- > 0) { |
c24602ef GC |
3231 | struct dma_desc *p; |
3232 | ||
3233 | if (priv->extend_desc) | |
54139cf3 | 3234 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
c24602ef | 3235 | else |
54139cf3 | 3236 | p = rx_q->dma_rx + entry; |
c24602ef | 3237 | |
54139cf3 | 3238 | if (likely(!rx_q->rx_skbuff[entry])) { |
47dd7a54 GC |
3239 | struct sk_buff *skb; |
3240 | ||
acb600de | 3241 | skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); |
120e87f9 GC |
3242 | if (unlikely(!skb)) { |
3243 | /* so for a while no zero-copy! */ | |
54139cf3 | 3244 | rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH; |
120e87f9 GC |
3245 | if (unlikely(net_ratelimit())) |
3246 | dev_err(priv->device, | |
3247 | "fail to alloc skb entry %d\n", | |
3248 | entry); | |
47dd7a54 | 3249 | break; |
120e87f9 | 3250 | } |
47dd7a54 | 3251 | |
54139cf3 JP |
3252 | rx_q->rx_skbuff[entry] = skb; |
3253 | rx_q->rx_skbuff_dma[entry] = | |
47dd7a54 GC |
3254 | dma_map_single(priv->device, skb->data, bfsize, |
3255 | DMA_FROM_DEVICE); | |
362b37be | 3256 | if (dma_mapping_error(priv->device, |
54139cf3 | 3257 | rx_q->rx_skbuff_dma[entry])) { |
38ddc59d | 3258 | netdev_err(priv->dev, "Rx DMA map failed\n"); |
362b37be GC |
3259 | dev_kfree_skb(skb); |
3260 | break; | |
3261 | } | |
286a8372 | 3262 | |
f748be53 | 3263 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) { |
54139cf3 | 3264 | p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); |
f748be53 AT |
3265 | p->des1 = 0; |
3266 | } else { | |
54139cf3 | 3267 | p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]); |
f748be53 AT |
3268 | } |
3269 | if (priv->hw->mode->refill_desc3) | |
54139cf3 | 3270 | priv->hw->mode->refill_desc3(rx_q, p); |
286a8372 | 3271 | |
54139cf3 JP |
3272 | if (rx_q->rx_zeroc_thresh > 0) |
3273 | rx_q->rx_zeroc_thresh--; | |
120e87f9 | 3274 | |
b3e51069 LC |
3275 | netif_dbg(priv, rx_status, priv->dev, |
3276 | "refill entry #%d\n", entry); | |
47dd7a54 | 3277 | } |
ad688cdb | 3278 | dma_wmb(); |
f748be53 AT |
3279 | |
3280 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) | |
3281 | priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0); | |
3282 | else | |
3283 | priv->hw->desc->set_rx_owner(p); | |
3284 | ||
ad688cdb | 3285 | dma_wmb(); |
e3ad57c9 GC |
3286 | |
3287 | entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); | |
47dd7a54 | 3288 | } |
54139cf3 | 3289 | rx_q->dirty_rx = entry; |
47dd7a54 GC |
3290 | } |
3291 | ||
32ceabca | 3292 | /** |
732fdf0e | 3293 | * stmmac_rx - manage the receive process |
32ceabca | 3294 | * @priv: driver private structure |
54139cf3 JP |
3295 | * @limit: napi bugget |
3296 | * @queue: RX queue index. | |
32ceabca GC |
3297 | * Description : this the function called by the napi poll method. |
3298 | * It gets all the frames inside the ring. | |
3299 | */ | |
54139cf3 | 3300 | static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
47dd7a54 | 3301 | { |
54139cf3 JP |
3302 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; |
3303 | unsigned int entry = rx_q->cur_rx; | |
3304 | int coe = priv->hw->rx_csum; | |
47dd7a54 GC |
3305 | unsigned int next_entry; |
3306 | unsigned int count = 0; | |
47dd7a54 | 3307 | |
83d7af64 | 3308 | if (netif_msg_rx_status(priv)) { |
d0225e7d AT |
3309 | void *rx_head; |
3310 | ||
38ddc59d | 3311 | netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); |
c24602ef | 3312 | if (priv->extend_desc) |
54139cf3 | 3313 | rx_head = (void *)rx_q->dma_erx; |
c24602ef | 3314 | else |
54139cf3 | 3315 | rx_head = (void *)rx_q->dma_rx; |
d0225e7d AT |
3316 | |
3317 | priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true); | |
47dd7a54 | 3318 | } |
c24602ef | 3319 | while (count < limit) { |
47dd7a54 | 3320 | int status; |
9401bb5c | 3321 | struct dma_desc *p; |
ba1ffd74 | 3322 | struct dma_desc *np; |
47dd7a54 | 3323 | |
c24602ef | 3324 | if (priv->extend_desc) |
54139cf3 | 3325 | p = (struct dma_desc *)(rx_q->dma_erx + entry); |
c24602ef | 3326 | else |
54139cf3 | 3327 | p = rx_q->dma_rx + entry; |
c24602ef | 3328 | |
c1fa3212 FG |
3329 | /* read the status of the incoming frame */ |
3330 | status = priv->hw->desc->rx_status(&priv->dev->stats, | |
3331 | &priv->xstats, p); | |
3332 | /* check if managed by the DMA otherwise go ahead */ | |
3333 | if (unlikely(status & dma_own)) | |
47dd7a54 GC |
3334 | break; |
3335 | ||
3336 | count++; | |
3337 | ||
54139cf3 JP |
3338 | rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); |
3339 | next_entry = rx_q->cur_rx; | |
e3ad57c9 | 3340 | |
c24602ef | 3341 | if (priv->extend_desc) |
54139cf3 | 3342 | np = (struct dma_desc *)(rx_q->dma_erx + next_entry); |
c24602ef | 3343 | else |
54139cf3 | 3344 | np = rx_q->dma_rx + next_entry; |
ba1ffd74 GC |
3345 | |
3346 | prefetch(np); | |
47dd7a54 | 3347 | |
c24602ef GC |
3348 | if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) |
3349 | priv->hw->desc->rx_extended_status(&priv->dev->stats, | |
3350 | &priv->xstats, | |
54139cf3 | 3351 | rx_q->dma_erx + |
c24602ef | 3352 | entry); |
891434b1 | 3353 | if (unlikely(status == discard_frame)) { |
47dd7a54 | 3354 | priv->dev->stats.rx_errors++; |
891434b1 | 3355 | if (priv->hwts_rx_en && !priv->extend_desc) { |
8d45e42b | 3356 | /* DESC2 & DESC3 will be overwritten by device |
891434b1 RK |
3357 | * with timestamp value, hence reinitialize |
3358 | * them in stmmac_rx_refill() function so that | |
3359 | * device can reuse it. | |
3360 | */ | |
9c8080d0 | 3361 | dev_kfree_skb_any(rx_q->rx_skbuff[entry]); |
54139cf3 | 3362 | rx_q->rx_skbuff[entry] = NULL; |
891434b1 | 3363 | dma_unmap_single(priv->device, |
54139cf3 | 3364 | rx_q->rx_skbuff_dma[entry], |
ceb69499 GC |
3365 | priv->dma_buf_sz, |
3366 | DMA_FROM_DEVICE); | |
891434b1 RK |
3367 | } |
3368 | } else { | |
47dd7a54 | 3369 | struct sk_buff *skb; |
3eeb2997 | 3370 | int frame_len; |
f748be53 AT |
3371 | unsigned int des; |
3372 | ||
3373 | if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) | |
f8be0d78 | 3374 | des = le32_to_cpu(p->des0); |
f748be53 | 3375 | else |
f8be0d78 | 3376 | des = le32_to_cpu(p->des2); |
47dd7a54 | 3377 | |
ceb69499 GC |
3378 | frame_len = priv->hw->desc->get_rx_frame_len(p, coe); |
3379 | ||
8d45e42b | 3380 | /* If frame length is greater than skb buffer size |
f748be53 AT |
3381 | * (preallocated during init) then the packet is |
3382 | * ignored | |
3383 | */ | |
e527c4a7 | 3384 | if (frame_len > priv->dma_buf_sz) { |
38ddc59d LC |
3385 | netdev_err(priv->dev, |
3386 | "len %d larger than size (%d)\n", | |
3387 | frame_len, priv->dma_buf_sz); | |
e527c4a7 GC |
3388 | priv->dev->stats.rx_length_errors++; |
3389 | break; | |
3390 | } | |
3391 | ||
3eeb2997 | 3392 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
ceb69499 GC |
3393 | * Type frames (LLC/LLC-SNAP) |
3394 | */ | |
3eeb2997 GC |
3395 | if (unlikely(status != llc_snap)) |
3396 | frame_len -= ETH_FCS_LEN; | |
47dd7a54 | 3397 | |
83d7af64 | 3398 | if (netif_msg_rx_status(priv)) { |
38ddc59d LC |
3399 | netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n", |
3400 | p, entry, des); | |
83d7af64 | 3401 | if (frame_len > ETH_FRAME_LEN) |
38ddc59d LC |
3402 | netdev_dbg(priv->dev, "frame size %d, COE: %d\n", |
3403 | frame_len, status); | |
83d7af64 | 3404 | } |
22ad3838 | 3405 | |
f748be53 AT |
3406 | /* The zero-copy is always used for all the sizes |
3407 | * in case of GMAC4 because it needs | |
3408 | * to refill the used descriptors, always. | |
3409 | */ | |
3410 | if (unlikely(!priv->plat->has_gmac4 && | |
3411 | ((frame_len < priv->rx_copybreak) || | |
54139cf3 | 3412 | stmmac_rx_threshold_count(rx_q)))) { |
22ad3838 GC |
3413 | skb = netdev_alloc_skb_ip_align(priv->dev, |
3414 | frame_len); | |
3415 | if (unlikely(!skb)) { | |
3416 | if (net_ratelimit()) | |
3417 | dev_warn(priv->device, | |
3418 | "packet dropped\n"); | |
3419 | priv->dev->stats.rx_dropped++; | |
3420 | break; | |
3421 | } | |
3422 | ||
3423 | dma_sync_single_for_cpu(priv->device, | |
54139cf3 | 3424 | rx_q->rx_skbuff_dma |
22ad3838 GC |
3425 | [entry], frame_len, |
3426 | DMA_FROM_DEVICE); | |
3427 | skb_copy_to_linear_data(skb, | |
54139cf3 | 3428 | rx_q-> |
22ad3838 GC |
3429 | rx_skbuff[entry]->data, |
3430 | frame_len); | |
3431 | ||
3432 | skb_put(skb, frame_len); | |
3433 | dma_sync_single_for_device(priv->device, | |
54139cf3 | 3434 | rx_q->rx_skbuff_dma |
22ad3838 GC |
3435 | [entry], frame_len, |
3436 | DMA_FROM_DEVICE); | |
3437 | } else { | |
54139cf3 | 3438 | skb = rx_q->rx_skbuff[entry]; |
22ad3838 | 3439 | if (unlikely(!skb)) { |
38ddc59d LC |
3440 | netdev_err(priv->dev, |
3441 | "%s: Inconsistent Rx chain\n", | |
3442 | priv->dev->name); | |
22ad3838 GC |
3443 | priv->dev->stats.rx_dropped++; |
3444 | break; | |
3445 | } | |
3446 | prefetch(skb->data - NET_IP_ALIGN); | |
54139cf3 JP |
3447 | rx_q->rx_skbuff[entry] = NULL; |
3448 | rx_q->rx_zeroc_thresh++; | |
22ad3838 GC |
3449 | |
3450 | skb_put(skb, frame_len); | |
3451 | dma_unmap_single(priv->device, | |
54139cf3 | 3452 | rx_q->rx_skbuff_dma[entry], |
22ad3838 GC |
3453 | priv->dma_buf_sz, |
3454 | DMA_FROM_DEVICE); | |
47dd7a54 | 3455 | } |
47dd7a54 | 3456 | |
47dd7a54 | 3457 | if (netif_msg_pktdata(priv)) { |
38ddc59d LC |
3458 | netdev_dbg(priv->dev, "frame received (%dbytes)", |
3459 | frame_len); | |
47dd7a54 GC |
3460 | print_pkt(skb->data, frame_len); |
3461 | } | |
83d7af64 | 3462 | |
ba1ffd74 GC |
3463 | stmmac_get_rx_hwtstamp(priv, p, np, skb); |
3464 | ||
b9381985 VB |
3465 | stmmac_rx_vlan(priv->dev, skb); |
3466 | ||
47dd7a54 GC |
3467 | skb->protocol = eth_type_trans(skb, priv->dev); |
3468 | ||
ceb69499 | 3469 | if (unlikely(!coe)) |
bc8acf2c | 3470 | skb_checksum_none_assert(skb); |
62a2ab93 | 3471 | else |
47dd7a54 | 3472 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
62a2ab93 | 3473 | |
c22a3f48 | 3474 | napi_gro_receive(&rx_q->napi, skb); |
47dd7a54 GC |
3475 | |
3476 | priv->dev->stats.rx_packets++; | |
3477 | priv->dev->stats.rx_bytes += frame_len; | |
47dd7a54 GC |
3478 | } |
3479 | entry = next_entry; | |
47dd7a54 GC |
3480 | } |
3481 | ||
54139cf3 | 3482 | stmmac_rx_refill(priv, queue); |
47dd7a54 GC |
3483 | |
3484 | priv->xstats.rx_pkt_n += count; | |
3485 | ||
3486 | return count; | |
3487 | } | |
3488 | ||
3489 | /** | |
3490 | * stmmac_poll - stmmac poll method (NAPI) | |
3491 | * @napi : pointer to the napi structure. | |
3492 | * @budget : maximum number of packets that the current CPU can receive from | |
3493 | * all interfaces. | |
3494 | * Description : | |
9125cdd1 | 3495 | * To look at the incoming frames and clear the tx resources. |
47dd7a54 GC |
3496 | */ |
3497 | static int stmmac_poll(struct napi_struct *napi, int budget) | |
3498 | { | |
c22a3f48 JP |
3499 | struct stmmac_rx_queue *rx_q = |
3500 | container_of(napi, struct stmmac_rx_queue, napi); | |
3501 | struct stmmac_priv *priv = rx_q->priv_data; | |
ce736788 | 3502 | u32 tx_count = priv->plat->tx_queues_to_use; |
c22a3f48 | 3503 | u32 chan = rx_q->queue_index; |
54139cf3 | 3504 | int work_done = 0; |
c22a3f48 | 3505 | u32 queue; |
47dd7a54 | 3506 | |
9125cdd1 | 3507 | priv->xstats.napi_poll++; |
ce736788 JP |
3508 | |
3509 | /* check all the queues */ | |
3510 | for (queue = 0; queue < tx_count; queue++) | |
3511 | stmmac_tx_clean(priv, queue); | |
3512 | ||
c22a3f48 | 3513 | work_done = stmmac_rx(priv, budget, rx_q->queue_index); |
47dd7a54 | 3514 | if (work_done < budget) { |
6ad20165 | 3515 | napi_complete_done(napi, work_done); |
4f513ecd | 3516 | stmmac_enable_dma_irq(priv, chan); |
47dd7a54 GC |
3517 | } |
3518 | return work_done; | |
3519 | } | |
3520 | ||
3521 | /** | |
3522 | * stmmac_tx_timeout | |
3523 | * @dev : Pointer to net device structure | |
3524 | * Description: this function is called when a packet transmission fails to | |
7284a3f1 | 3525 | * complete within a reasonable time. The driver will mark the error in the |
47dd7a54 GC |
3526 | * netdev structure and arrange for the device to be reset to a sane state |
3527 | * in order to transmit a new packet. | |
3528 | */ | |
3529 | static void stmmac_tx_timeout(struct net_device *dev) | |
3530 | { | |
3531 | struct stmmac_priv *priv = netdev_priv(dev); | |
ce736788 JP |
3532 | u32 tx_count = priv->plat->tx_queues_to_use; |
3533 | u32 chan; | |
47dd7a54 GC |
3534 | |
3535 | /* Clear Tx resources and restart transmitting again */ | |
ce736788 JP |
3536 | for (chan = 0; chan < tx_count; chan++) |
3537 | stmmac_tx_err(priv, chan); | |
47dd7a54 GC |
3538 | } |
3539 | ||
47dd7a54 | 3540 | /** |
01789349 | 3541 | * stmmac_set_rx_mode - entry point for multicast addressing |
47dd7a54 GC |
3542 | * @dev : pointer to the device structure |
3543 | * Description: | |
3544 | * This function is a driver entry point which gets called by the kernel | |
3545 | * whenever multicast addresses must be enabled/disabled. | |
3546 | * Return value: | |
3547 | * void. | |
3548 | */ | |
01789349 | 3549 | static void stmmac_set_rx_mode(struct net_device *dev) |
47dd7a54 GC |
3550 | { |
3551 | struct stmmac_priv *priv = netdev_priv(dev); | |
3552 | ||
3b57de95 | 3553 | priv->hw->mac->set_filter(priv->hw, dev); |
47dd7a54 GC |
3554 | } |
3555 | ||
3556 | /** | |
3557 | * stmmac_change_mtu - entry point to change MTU size for the device. | |
3558 | * @dev : device pointer. | |
3559 | * @new_mtu : the new MTU size for the device. | |
3560 | * Description: the Maximum Transfer Unit (MTU) is used by the network layer | |
3561 | * to drive packet transmission. Ethernet has an MTU of 1500 octets | |
3562 | * (ETH_DATA_LEN). This value can be changed with ifconfig. | |
3563 | * Return value: | |
3564 | * 0 on success and an appropriate (-)ve integer as defined in errno.h | |
3565 | * file on failure. | |
3566 | */ | |
3567 | static int stmmac_change_mtu(struct net_device *dev, int new_mtu) | |
3568 | { | |
38ddc59d LC |
3569 | struct stmmac_priv *priv = netdev_priv(dev); |
3570 | ||
47dd7a54 | 3571 | if (netif_running(dev)) { |
38ddc59d | 3572 | netdev_err(priv->dev, "must be stopped to change its MTU\n"); |
47dd7a54 GC |
3573 | return -EBUSY; |
3574 | } | |
3575 | ||
5e982f3b | 3576 | dev->mtu = new_mtu; |
f748be53 | 3577 | |
5e982f3b MM |
3578 | netdev_update_features(dev); |
3579 | ||
3580 | return 0; | |
3581 | } | |
3582 | ||
c8f44aff | 3583 | static netdev_features_t stmmac_fix_features(struct net_device *dev, |
ceb69499 | 3584 | netdev_features_t features) |
5e982f3b MM |
3585 | { |
3586 | struct stmmac_priv *priv = netdev_priv(dev); | |
3587 | ||
38912bdb | 3588 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) |
5e982f3b | 3589 | features &= ~NETIF_F_RXCSUM; |
d2afb5bd | 3590 | |
5e982f3b | 3591 | if (!priv->plat->tx_coe) |
a188222b | 3592 | features &= ~NETIF_F_CSUM_MASK; |
5e982f3b | 3593 | |
ebbb293f GC |
3594 | /* Some GMAC devices have a bugged Jumbo frame support that |
3595 | * needs to have the Tx COE disabled for oversized frames | |
3596 | * (due to limited buffer sizes). In this case we disable | |
8d45e42b | 3597 | * the TX csum insertion in the TDES and not use SF. |
ceb69499 | 3598 | */ |
5e982f3b | 3599 | if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) |
a188222b | 3600 | features &= ~NETIF_F_CSUM_MASK; |
ebbb293f | 3601 | |
f748be53 AT |
3602 | /* Disable tso if asked by ethtool */ |
3603 | if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { | |
3604 | if (features & NETIF_F_TSO) | |
3605 | priv->tso = true; | |
3606 | else | |
3607 | priv->tso = false; | |
3608 | } | |
3609 | ||
5e982f3b | 3610 | return features; |
47dd7a54 GC |
3611 | } |
3612 | ||
d2afb5bd GC |
3613 | static int stmmac_set_features(struct net_device *netdev, |
3614 | netdev_features_t features) | |
3615 | { | |
3616 | struct stmmac_priv *priv = netdev_priv(netdev); | |
3617 | ||
3618 | /* Keep the COE Type in case of csum is supporting */ | |
3619 | if (features & NETIF_F_RXCSUM) | |
3620 | priv->hw->rx_csum = priv->plat->rx_coe; | |
3621 | else | |
3622 | priv->hw->rx_csum = 0; | |
3623 | /* No check needed because rx_coe has been set before and it will be | |
3624 | * fixed in case of issue. | |
3625 | */ | |
3626 | priv->hw->mac->rx_ipc(priv->hw); | |
3627 | ||
3628 | return 0; | |
3629 | } | |
3630 | ||
32ceabca GC |
3631 | /** |
3632 | * stmmac_interrupt - main ISR | |
3633 | * @irq: interrupt number. | |
3634 | * @dev_id: to pass the net device pointer. | |
3635 | * Description: this is the main driver interrupt service routine. | |
732fdf0e GC |
3636 | * It can call: |
3637 | * o DMA service routine (to manage incoming frame reception and transmission | |
3638 | * status) | |
3639 | * o Core interrupts to manage: remote wake-up, management counter, LPI | |
3640 | * interrupts. | |
32ceabca | 3641 | */ |
47dd7a54 GC |
3642 | static irqreturn_t stmmac_interrupt(int irq, void *dev_id) |
3643 | { | |
3644 | struct net_device *dev = (struct net_device *)dev_id; | |
3645 | struct stmmac_priv *priv = netdev_priv(dev); | |
7bac4e1e JP |
3646 | u32 rx_cnt = priv->plat->rx_queues_to_use; |
3647 | u32 tx_cnt = priv->plat->tx_queues_to_use; | |
3648 | u32 queues_count; | |
3649 | u32 queue; | |
3650 | ||
3651 | queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt; | |
47dd7a54 | 3652 | |
89f7f2cf SK |
3653 | if (priv->irq_wake) |
3654 | pm_wakeup_event(priv->device, 0); | |
3655 | ||
47dd7a54 | 3656 | if (unlikely(!dev)) { |
38ddc59d | 3657 | netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__); |
47dd7a54 GC |
3658 | return IRQ_NONE; |
3659 | } | |
3660 | ||
d765955d | 3661 | /* To handle GMAC own interrupts */ |
f748be53 | 3662 | if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) { |
7ed24bbe | 3663 | int status = priv->hw->mac->host_irq_status(priv->hw, |
0982a0f6 | 3664 | &priv->xstats); |
8f71a88d | 3665 | |
d765955d | 3666 | if (unlikely(status)) { |
d765955d | 3667 | /* For LPI we need to save the tx status */ |
0982a0f6 | 3668 | if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE) |
d765955d | 3669 | priv->tx_path_in_lpi_mode = true; |
0982a0f6 | 3670 | if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE) |
d765955d | 3671 | priv->tx_path_in_lpi_mode = false; |
7bac4e1e JP |
3672 | } |
3673 | ||
3674 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { | |
3675 | for (queue = 0; queue < queues_count; queue++) { | |
54139cf3 JP |
3676 | struct stmmac_rx_queue *rx_q = |
3677 | &priv->rx_queue[queue]; | |
3678 | ||
7bac4e1e JP |
3679 | status |= |
3680 | priv->hw->mac->host_mtl_irq_status(priv->hw, | |
3681 | queue); | |
3682 | ||
3683 | if (status & CORE_IRQ_MTL_RX_OVERFLOW && | |
3684 | priv->hw->dma->set_rx_tail_ptr) | |
3685 | priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, | |
54139cf3 | 3686 | rx_q->rx_tail_addr, |
7bac4e1e JP |
3687 | queue); |
3688 | } | |
d765955d | 3689 | } |
70523e63 GC |
3690 | |
3691 | /* PCS link status */ | |
3fe5cadb | 3692 | if (priv->hw->pcs) { |
70523e63 GC |
3693 | if (priv->xstats.pcs_link) |
3694 | netif_carrier_on(dev); | |
3695 | else | |
3696 | netif_carrier_off(dev); | |
3697 | } | |
d765955d | 3698 | } |
aec7ff27 | 3699 | |
d765955d | 3700 | /* To handle DMA interrupts */ |
aec7ff27 | 3701 | stmmac_dma_interrupt(priv); |
47dd7a54 GC |
3702 | |
3703 | return IRQ_HANDLED; | |
3704 | } | |
3705 | ||
3706 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3707 | /* Polling receive - used by NETCONSOLE and other diagnostic tools | |
ceb69499 GC |
3708 | * to allow network I/O with interrupts disabled. |
3709 | */ | |
47dd7a54 GC |
3710 | static void stmmac_poll_controller(struct net_device *dev) |
3711 | { | |
3712 | disable_irq(dev->irq); | |
3713 | stmmac_interrupt(dev->irq, dev); | |
3714 | enable_irq(dev->irq); | |
3715 | } | |
3716 | #endif | |
3717 | ||
3718 | /** | |
3719 | * stmmac_ioctl - Entry point for the Ioctl | |
3720 | * @dev: Device pointer. | |
3721 | * @rq: An IOCTL specefic structure, that can contain a pointer to | |
3722 | * a proprietary structure used to pass information to the driver. | |
3723 | * @cmd: IOCTL command | |
3724 | * Description: | |
32ceabca | 3725 | * Currently it supports the phy_mii_ioctl(...) and HW time stamping. |
47dd7a54 GC |
3726 | */ |
3727 | static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
3728 | { | |
891434b1 | 3729 | int ret = -EOPNOTSUPP; |
47dd7a54 GC |
3730 | |
3731 | if (!netif_running(dev)) | |
3732 | return -EINVAL; | |
3733 | ||
891434b1 RK |
3734 | switch (cmd) { |
3735 | case SIOCGMIIPHY: | |
3736 | case SIOCGMIIREG: | |
3737 | case SIOCSMIIREG: | |
d6d50c7e | 3738 | if (!dev->phydev) |
891434b1 | 3739 | return -EINVAL; |
d6d50c7e | 3740 | ret = phy_mii_ioctl(dev->phydev, rq, cmd); |
891434b1 RK |
3741 | break; |
3742 | case SIOCSHWTSTAMP: | |
3743 | ret = stmmac_hwtstamp_ioctl(dev, rq); | |
3744 | break; | |
3745 | default: | |
3746 | break; | |
3747 | } | |
28b04113 | 3748 | |
47dd7a54 GC |
3749 | return ret; |
3750 | } | |
3751 | ||
a830405e BV |
3752 | static int stmmac_set_mac_address(struct net_device *ndev, void *addr) |
3753 | { | |
3754 | struct stmmac_priv *priv = netdev_priv(ndev); | |
3755 | int ret = 0; | |
3756 | ||
3757 | ret = eth_mac_addr(ndev, addr); | |
3758 | if (ret) | |
3759 | return ret; | |
3760 | ||
3761 | priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0); | |
3762 | ||
3763 | return ret; | |
3764 | } | |
3765 | ||
50fb4f74 | 3766 | #ifdef CONFIG_DEBUG_FS |
7ac29055 | 3767 | static struct dentry *stmmac_fs_dir; |
7ac29055 | 3768 | |
c24602ef | 3769 | static void sysfs_display_ring(void *head, int size, int extend_desc, |
ceb69499 | 3770 | struct seq_file *seq) |
7ac29055 | 3771 | { |
7ac29055 | 3772 | int i; |
ceb69499 GC |
3773 | struct dma_extended_desc *ep = (struct dma_extended_desc *)head; |
3774 | struct dma_desc *p = (struct dma_desc *)head; | |
7ac29055 | 3775 | |
c24602ef | 3776 | for (i = 0; i < size; i++) { |
c24602ef | 3777 | if (extend_desc) { |
c24602ef | 3778 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
ceb69499 | 3779 | i, (unsigned int)virt_to_phys(ep), |
f8be0d78 MW |
3780 | le32_to_cpu(ep->basic.des0), |
3781 | le32_to_cpu(ep->basic.des1), | |
3782 | le32_to_cpu(ep->basic.des2), | |
3783 | le32_to_cpu(ep->basic.des3)); | |
c24602ef GC |
3784 | ep++; |
3785 | } else { | |
c24602ef | 3786 | seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n", |
66c25f6e | 3787 | i, (unsigned int)virt_to_phys(p), |
f8be0d78 MW |
3788 | le32_to_cpu(p->des0), le32_to_cpu(p->des1), |
3789 | le32_to_cpu(p->des2), le32_to_cpu(p->des3)); | |
c24602ef GC |
3790 | p++; |
3791 | } | |
7ac29055 GC |
3792 | seq_printf(seq, "\n"); |
3793 | } | |
c24602ef | 3794 | } |
7ac29055 | 3795 | |
c24602ef GC |
3796 | static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) |
3797 | { | |
3798 | struct net_device *dev = seq->private; | |
3799 | struct stmmac_priv *priv = netdev_priv(dev); | |
54139cf3 | 3800 | u32 rx_count = priv->plat->rx_queues_to_use; |
ce736788 | 3801 | u32 tx_count = priv->plat->tx_queues_to_use; |
54139cf3 JP |
3802 | u32 queue; |
3803 | ||
3804 | for (queue = 0; queue < rx_count; queue++) { | |
3805 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
3806 | ||
3807 | seq_printf(seq, "RX Queue %d:\n", queue); | |
3808 | ||
3809 | if (priv->extend_desc) { | |
3810 | seq_printf(seq, "Extended descriptor ring:\n"); | |
3811 | sysfs_display_ring((void *)rx_q->dma_erx, | |
3812 | DMA_RX_SIZE, 1, seq); | |
3813 | } else { | |
3814 | seq_printf(seq, "Descriptor ring:\n"); | |
3815 | sysfs_display_ring((void *)rx_q->dma_rx, | |
3816 | DMA_RX_SIZE, 0, seq); | |
3817 | } | |
3818 | } | |
aff3d9ef | 3819 | |
ce736788 JP |
3820 | for (queue = 0; queue < tx_count; queue++) { |
3821 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
3822 | ||
3823 | seq_printf(seq, "TX Queue %d:\n", queue); | |
3824 | ||
3825 | if (priv->extend_desc) { | |
3826 | seq_printf(seq, "Extended descriptor ring:\n"); | |
3827 | sysfs_display_ring((void *)tx_q->dma_etx, | |
3828 | DMA_TX_SIZE, 1, seq); | |
3829 | } else { | |
3830 | seq_printf(seq, "Descriptor ring:\n"); | |
3831 | sysfs_display_ring((void *)tx_q->dma_tx, | |
3832 | DMA_TX_SIZE, 0, seq); | |
3833 | } | |
7ac29055 GC |
3834 | } |
3835 | ||
3836 | return 0; | |
3837 | } | |
3838 | ||
3839 | static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) | |
3840 | { | |
3841 | return single_open(file, stmmac_sysfs_ring_read, inode->i_private); | |
3842 | } | |
3843 | ||
22d3efe5 PM |
3844 | /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ |
3845 | ||
7ac29055 GC |
3846 | static const struct file_operations stmmac_rings_status_fops = { |
3847 | .owner = THIS_MODULE, | |
3848 | .open = stmmac_sysfs_ring_open, | |
3849 | .read = seq_read, | |
3850 | .llseek = seq_lseek, | |
74863948 | 3851 | .release = single_release, |
7ac29055 GC |
3852 | }; |
3853 | ||
e7434821 GC |
3854 | static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) |
3855 | { | |
3856 | struct net_device *dev = seq->private; | |
3857 | struct stmmac_priv *priv = netdev_priv(dev); | |
3858 | ||
19e30c14 | 3859 | if (!priv->hw_cap_support) { |
e7434821 GC |
3860 | seq_printf(seq, "DMA HW features not supported\n"); |
3861 | return 0; | |
3862 | } | |
3863 | ||
3864 | seq_printf(seq, "==============================\n"); | |
3865 | seq_printf(seq, "\tDMA HW features\n"); | |
3866 | seq_printf(seq, "==============================\n"); | |
3867 | ||
22d3efe5 | 3868 | seq_printf(seq, "\t10/100 Mbps: %s\n", |
e7434821 | 3869 | (priv->dma_cap.mbps_10_100) ? "Y" : "N"); |
22d3efe5 | 3870 | seq_printf(seq, "\t1000 Mbps: %s\n", |
e7434821 | 3871 | (priv->dma_cap.mbps_1000) ? "Y" : "N"); |
22d3efe5 | 3872 | seq_printf(seq, "\tHalf duplex: %s\n", |
e7434821 GC |
3873 | (priv->dma_cap.half_duplex) ? "Y" : "N"); |
3874 | seq_printf(seq, "\tHash Filter: %s\n", | |
3875 | (priv->dma_cap.hash_filter) ? "Y" : "N"); | |
3876 | seq_printf(seq, "\tMultiple MAC address registers: %s\n", | |
3877 | (priv->dma_cap.multi_addr) ? "Y" : "N"); | |
8d45e42b | 3878 | seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n", |
e7434821 GC |
3879 | (priv->dma_cap.pcs) ? "Y" : "N"); |
3880 | seq_printf(seq, "\tSMA (MDIO) Interface: %s\n", | |
3881 | (priv->dma_cap.sma_mdio) ? "Y" : "N"); | |
3882 | seq_printf(seq, "\tPMT Remote wake up: %s\n", | |
3883 | (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); | |
3884 | seq_printf(seq, "\tPMT Magic Frame: %s\n", | |
3885 | (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); | |
3886 | seq_printf(seq, "\tRMON module: %s\n", | |
3887 | (priv->dma_cap.rmon) ? "Y" : "N"); | |
3888 | seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", | |
3889 | (priv->dma_cap.time_stamp) ? "Y" : "N"); | |
22d3efe5 | 3890 | seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", |
e7434821 | 3891 | (priv->dma_cap.atime_stamp) ? "Y" : "N"); |
22d3efe5 | 3892 | seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", |
e7434821 GC |
3893 | (priv->dma_cap.eee) ? "Y" : "N"); |
3894 | seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); | |
3895 | seq_printf(seq, "\tChecksum Offload in TX: %s\n", | |
3896 | (priv->dma_cap.tx_coe) ? "Y" : "N"); | |
f748be53 AT |
3897 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
3898 | seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", | |
3899 | (priv->dma_cap.rx_coe) ? "Y" : "N"); | |
3900 | } else { | |
3901 | seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", | |
3902 | (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); | |
3903 | seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", | |
3904 | (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); | |
3905 | } | |
e7434821 GC |
3906 | seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n", |
3907 | (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); | |
3908 | seq_printf(seq, "\tNumber of Additional RX channel: %d\n", | |
3909 | priv->dma_cap.number_rx_channel); | |
3910 | seq_printf(seq, "\tNumber of Additional TX channel: %d\n", | |
3911 | priv->dma_cap.number_tx_channel); | |
3912 | seq_printf(seq, "\tEnhanced descriptors: %s\n", | |
3913 | (priv->dma_cap.enh_desc) ? "Y" : "N"); | |
3914 | ||
3915 | return 0; | |
3916 | } | |
3917 | ||
3918 | static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) | |
3919 | { | |
3920 | return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); | |
3921 | } | |
3922 | ||
3923 | static const struct file_operations stmmac_dma_cap_fops = { | |
3924 | .owner = THIS_MODULE, | |
3925 | .open = stmmac_sysfs_dma_cap_open, | |
3926 | .read = seq_read, | |
3927 | .llseek = seq_lseek, | |
74863948 | 3928 | .release = single_release, |
e7434821 GC |
3929 | }; |
3930 | ||
7ac29055 GC |
3931 | static int stmmac_init_fs(struct net_device *dev) |
3932 | { | |
466c5ac8 MO |
3933 | struct stmmac_priv *priv = netdev_priv(dev); |
3934 | ||
3935 | /* Create per netdev entries */ | |
3936 | priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); | |
7ac29055 | 3937 | |
466c5ac8 | 3938 | if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) { |
38ddc59d | 3939 | netdev_err(priv->dev, "ERROR failed to create debugfs directory\n"); |
7ac29055 GC |
3940 | |
3941 | return -ENOMEM; | |
3942 | } | |
3943 | ||
3944 | /* Entry to report DMA RX/TX rings */ | |
466c5ac8 MO |
3945 | priv->dbgfs_rings_status = |
3946 | debugfs_create_file("descriptors_status", S_IRUGO, | |
3947 | priv->dbgfs_dir, dev, | |
3948 | &stmmac_rings_status_fops); | |
7ac29055 | 3949 | |
466c5ac8 | 3950 | if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) { |
38ddc59d | 3951 | netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n"); |
466c5ac8 | 3952 | debugfs_remove_recursive(priv->dbgfs_dir); |
7ac29055 GC |
3953 | |
3954 | return -ENOMEM; | |
3955 | } | |
3956 | ||
e7434821 | 3957 | /* Entry to report the DMA HW features */ |
466c5ac8 MO |
3958 | priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, |
3959 | priv->dbgfs_dir, | |
3960 | dev, &stmmac_dma_cap_fops); | |
e7434821 | 3961 | |
466c5ac8 | 3962 | if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) { |
38ddc59d | 3963 | netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n"); |
466c5ac8 | 3964 | debugfs_remove_recursive(priv->dbgfs_dir); |
e7434821 GC |
3965 | |
3966 | return -ENOMEM; | |
3967 | } | |
3968 | ||
7ac29055 GC |
3969 | return 0; |
3970 | } | |
3971 | ||
466c5ac8 | 3972 | static void stmmac_exit_fs(struct net_device *dev) |
7ac29055 | 3973 | { |
466c5ac8 MO |
3974 | struct stmmac_priv *priv = netdev_priv(dev); |
3975 | ||
3976 | debugfs_remove_recursive(priv->dbgfs_dir); | |
7ac29055 | 3977 | } |
50fb4f74 | 3978 | #endif /* CONFIG_DEBUG_FS */ |
7ac29055 | 3979 | |
47dd7a54 GC |
3980 | static const struct net_device_ops stmmac_netdev_ops = { |
3981 | .ndo_open = stmmac_open, | |
3982 | .ndo_start_xmit = stmmac_xmit, | |
3983 | .ndo_stop = stmmac_release, | |
3984 | .ndo_change_mtu = stmmac_change_mtu, | |
5e982f3b | 3985 | .ndo_fix_features = stmmac_fix_features, |
d2afb5bd | 3986 | .ndo_set_features = stmmac_set_features, |
01789349 | 3987 | .ndo_set_rx_mode = stmmac_set_rx_mode, |
47dd7a54 GC |
3988 | .ndo_tx_timeout = stmmac_tx_timeout, |
3989 | .ndo_do_ioctl = stmmac_ioctl, | |
47dd7a54 GC |
3990 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3991 | .ndo_poll_controller = stmmac_poll_controller, | |
3992 | #endif | |
a830405e | 3993 | .ndo_set_mac_address = stmmac_set_mac_address, |
47dd7a54 GC |
3994 | }; |
3995 | ||
cf3f047b GC |
3996 | /** |
3997 | * stmmac_hw_init - Init the MAC device | |
32ceabca | 3998 | * @priv: driver private structure |
732fdf0e GC |
3999 | * Description: this function is to configure the MAC device according to |
4000 | * some platform parameters or the HW capability register. It prepares the | |
4001 | * driver to use either ring or chain modes and to setup either enhanced or | |
4002 | * normal descriptors. | |
cf3f047b GC |
4003 | */ |
4004 | static int stmmac_hw_init(struct stmmac_priv *priv) | |
4005 | { | |
cf3f047b GC |
4006 | struct mac_device_info *mac; |
4007 | ||
4008 | /* Identify the MAC HW device */ | |
ec33d71d LC |
4009 | if (priv->plat->setup) { |
4010 | mac = priv->plat->setup(priv); | |
4011 | } else if (priv->plat->has_gmac) { | |
03f2eecd | 4012 | priv->dev->priv_flags |= IFF_UNICAST_FLT; |
3b57de95 VB |
4013 | mac = dwmac1000_setup(priv->ioaddr, |
4014 | priv->plat->multicast_filter_bins, | |
c623d149 AT |
4015 | priv->plat->unicast_filter_entries, |
4016 | &priv->synopsys_id); | |
f748be53 AT |
4017 | } else if (priv->plat->has_gmac4) { |
4018 | priv->dev->priv_flags |= IFF_UNICAST_FLT; | |
4019 | mac = dwmac4_setup(priv->ioaddr, | |
4020 | priv->plat->multicast_filter_bins, | |
4021 | priv->plat->unicast_filter_entries, | |
4022 | &priv->synopsys_id); | |
03f2eecd | 4023 | } else { |
c623d149 | 4024 | mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id); |
03f2eecd | 4025 | } |
cf3f047b GC |
4026 | if (!mac) |
4027 | return -ENOMEM; | |
4028 | ||
4029 | priv->hw = mac; | |
4030 | ||
9f93ac8d LC |
4031 | /* dwmac-sun8i only work in chain mode */ |
4032 | if (priv->plat->has_sun8i) | |
4033 | chain_mode = 1; | |
4034 | ||
4a7d666a | 4035 | /* To use the chained or ring mode */ |
f748be53 AT |
4036 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
4037 | priv->hw->mode = &dwmac4_ring_mode_ops; | |
4a7d666a | 4038 | } else { |
f748be53 AT |
4039 | if (chain_mode) { |
4040 | priv->hw->mode = &chain_mode_ops; | |
38ddc59d | 4041 | dev_info(priv->device, "Chain mode enabled\n"); |
f748be53 AT |
4042 | priv->mode = STMMAC_CHAIN_MODE; |
4043 | } else { | |
4044 | priv->hw->mode = &ring_mode_ops; | |
38ddc59d | 4045 | dev_info(priv->device, "Ring mode enabled\n"); |
f748be53 AT |
4046 | priv->mode = STMMAC_RING_MODE; |
4047 | } | |
4a7d666a GC |
4048 | } |
4049 | ||
cf3f047b GC |
4050 | /* Get the HW capability (new GMAC newer than 3.50a) */ |
4051 | priv->hw_cap_support = stmmac_get_hw_features(priv); | |
4052 | if (priv->hw_cap_support) { | |
38ddc59d | 4053 | dev_info(priv->device, "DMA HW capability register supported\n"); |
cf3f047b GC |
4054 | |
4055 | /* We can override some gmac/dma configuration fields: e.g. | |
4056 | * enh_desc, tx_coe (e.g. that are passed through the | |
4057 | * platform) with the values from the HW capability | |
4058 | * register (if supported). | |
4059 | */ | |
4060 | priv->plat->enh_desc = priv->dma_cap.enh_desc; | |
cf3f047b | 4061 | priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; |
3fe5cadb | 4062 | priv->hw->pmt = priv->plat->pmt; |
38912bdb | 4063 | |
a8df35d4 EG |
4064 | /* TXCOE doesn't work in thresh DMA mode */ |
4065 | if (priv->plat->force_thresh_dma_mode) | |
4066 | priv->plat->tx_coe = 0; | |
4067 | else | |
4068 | priv->plat->tx_coe = priv->dma_cap.tx_coe; | |
4069 | ||
f748be53 AT |
4070 | /* In case of GMAC4 rx_coe is from HW cap register. */ |
4071 | priv->plat->rx_coe = priv->dma_cap.rx_coe; | |
38912bdb DS |
4072 | |
4073 | if (priv->dma_cap.rx_coe_type2) | |
4074 | priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; | |
4075 | else if (priv->dma_cap.rx_coe_type1) | |
4076 | priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; | |
4077 | ||
38ddc59d LC |
4078 | } else { |
4079 | dev_info(priv->device, "No HW DMA feature register supported\n"); | |
4080 | } | |
cf3f047b | 4081 | |
f748be53 AT |
4082 | /* To use alternate (extended), normal or GMAC4 descriptor structures */ |
4083 | if (priv->synopsys_id >= DWMAC_CORE_4_00) | |
4084 | priv->hw->desc = &dwmac4_desc_ops; | |
4085 | else | |
4086 | stmmac_selec_desc_mode(priv); | |
61369d02 | 4087 | |
d2afb5bd GC |
4088 | if (priv->plat->rx_coe) { |
4089 | priv->hw->rx_csum = priv->plat->rx_coe; | |
38ddc59d | 4090 | dev_info(priv->device, "RX Checksum Offload Engine supported\n"); |
f748be53 | 4091 | if (priv->synopsys_id < DWMAC_CORE_4_00) |
38ddc59d | 4092 | dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); |
d2afb5bd | 4093 | } |
cf3f047b | 4094 | if (priv->plat->tx_coe) |
38ddc59d | 4095 | dev_info(priv->device, "TX Checksum insertion supported\n"); |
cf3f047b GC |
4096 | |
4097 | if (priv->plat->pmt) { | |
38ddc59d | 4098 | dev_info(priv->device, "Wake-Up On Lan supported\n"); |
cf3f047b GC |
4099 | device_set_wakeup_capable(priv->device, 1); |
4100 | } | |
4101 | ||
f748be53 | 4102 | if (priv->dma_cap.tsoen) |
38ddc59d | 4103 | dev_info(priv->device, "TSO supported\n"); |
f748be53 | 4104 | |
c24602ef | 4105 | return 0; |
cf3f047b GC |
4106 | } |
4107 | ||
47dd7a54 | 4108 | /** |
bfab27a1 GC |
4109 | * stmmac_dvr_probe |
4110 | * @device: device pointer | |
ff3dd78c | 4111 | * @plat_dat: platform data pointer |
e56788cf | 4112 | * @res: stmmac resource pointer |
bfab27a1 GC |
4113 | * Description: this is the main probe function used to |
4114 | * call the alloc_etherdev, allocate the priv structure. | |
9afec6ef | 4115 | * Return: |
15ffac73 | 4116 | * returns 0 on success, otherwise errno. |
47dd7a54 | 4117 | */ |
15ffac73 JE |
4118 | int stmmac_dvr_probe(struct device *device, |
4119 | struct plat_stmmacenet_data *plat_dat, | |
4120 | struct stmmac_resources *res) | |
47dd7a54 | 4121 | { |
bfab27a1 GC |
4122 | struct net_device *ndev = NULL; |
4123 | struct stmmac_priv *priv; | |
c22a3f48 JP |
4124 | int ret = 0; |
4125 | u32 queue; | |
47dd7a54 | 4126 | |
c22a3f48 JP |
4127 | ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), |
4128 | MTL_MAX_TX_QUEUES, | |
4129 | MTL_MAX_RX_QUEUES); | |
41de8d4c | 4130 | if (!ndev) |
15ffac73 | 4131 | return -ENOMEM; |
bfab27a1 GC |
4132 | |
4133 | SET_NETDEV_DEV(ndev, device); | |
4134 | ||
4135 | priv = netdev_priv(ndev); | |
4136 | priv->device = device; | |
4137 | priv->dev = ndev; | |
47dd7a54 | 4138 | |
bfab27a1 | 4139 | stmmac_set_ethtool_ops(ndev); |
cf3f047b GC |
4140 | priv->pause = pause; |
4141 | priv->plat = plat_dat; | |
e56788cf JE |
4142 | priv->ioaddr = res->addr; |
4143 | priv->dev->base_addr = (unsigned long)res->addr; | |
4144 | ||
4145 | priv->dev->irq = res->irq; | |
4146 | priv->wol_irq = res->wol_irq; | |
4147 | priv->lpi_irq = res->lpi_irq; | |
4148 | ||
4149 | if (res->mac) | |
4150 | memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); | |
cf3f047b | 4151 | |
a7a62685 | 4152 | dev_set_drvdata(device, priv->dev); |
803f8fc4 | 4153 | |
cf3f047b GC |
4154 | /* Verify driver arguments */ |
4155 | stmmac_verify_args(); | |
bfab27a1 | 4156 | |
cf3f047b | 4157 | /* Override with kernel parameters if supplied XXX CRS XXX |
ceb69499 GC |
4158 | * this needs to have multiple instances |
4159 | */ | |
cf3f047b GC |
4160 | if ((phyaddr >= 0) && (phyaddr <= 31)) |
4161 | priv->plat->phy_addr = phyaddr; | |
4162 | ||
90f522a2 EP |
4163 | if (priv->plat->stmmac_rst) { |
4164 | ret = reset_control_assert(priv->plat->stmmac_rst); | |
f573c0b9 | 4165 | reset_control_deassert(priv->plat->stmmac_rst); |
90f522a2 EP |
4166 | /* Some reset controllers have only reset callback instead of |
4167 | * assert + deassert callbacks pair. | |
4168 | */ | |
4169 | if (ret == -ENOTSUPP) | |
4170 | reset_control_reset(priv->plat->stmmac_rst); | |
4171 | } | |
c5e4ddbd | 4172 | |
cf3f047b | 4173 | /* Init MAC and get the capabilities */ |
c24602ef GC |
4174 | ret = stmmac_hw_init(priv); |
4175 | if (ret) | |
62866e98 | 4176 | goto error_hw_init; |
cf3f047b | 4177 | |
c22a3f48 | 4178 | /* Configure real RX and TX queues */ |
c02b7a91 JP |
4179 | netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use); |
4180 | netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use); | |
c22a3f48 | 4181 | |
cf3f047b | 4182 | ndev->netdev_ops = &stmmac_netdev_ops; |
bfab27a1 | 4183 | |
cf3f047b GC |
4184 | ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
4185 | NETIF_F_RXCSUM; | |
f748be53 AT |
4186 | |
4187 | if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { | |
9edfa7da | 4188 | ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; |
f748be53 | 4189 | priv->tso = true; |
38ddc59d | 4190 | dev_info(priv->device, "TSO feature enabled\n"); |
f748be53 | 4191 | } |
bfab27a1 GC |
4192 | ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; |
4193 | ndev->watchdog_timeo = msecs_to_jiffies(watchdog); | |
47dd7a54 GC |
4194 | #ifdef STMMAC_VLAN_TAG_USED |
4195 | /* Both mac100 and gmac support receive VLAN tag detection */ | |
f646968f | 4196 | ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; |
47dd7a54 GC |
4197 | #endif |
4198 | priv->msg_enable = netif_msg_init(debug, default_msg_level); | |
4199 | ||
44770e11 JW |
4200 | /* MTU range: 46 - hw-specific max */ |
4201 | ndev->min_mtu = ETH_ZLEN - ETH_HLEN; | |
4202 | if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) | |
4203 | ndev->max_mtu = JUMBO_LEN; | |
4204 | else | |
4205 | ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); | |
a2cd64f3 KHL |
4206 | /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu |
4207 | * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. | |
4208 | */ | |
4209 | if ((priv->plat->maxmtu < ndev->max_mtu) && | |
4210 | (priv->plat->maxmtu >= ndev->min_mtu)) | |
44770e11 | 4211 | ndev->max_mtu = priv->plat->maxmtu; |
a2cd64f3 | 4212 | else if (priv->plat->maxmtu < ndev->min_mtu) |
b618ab45 HK |
4213 | dev_warn(priv->device, |
4214 | "%s: warning: maxmtu having invalid value (%d)\n", | |
4215 | __func__, priv->plat->maxmtu); | |
44770e11 | 4216 | |
47dd7a54 GC |
4217 | if (flow_ctrl) |
4218 | priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ | |
4219 | ||
62a2ab93 GC |
4220 | /* Rx Watchdog is available in the COREs newer than the 3.40. |
4221 | * In some case, for example on bugged HW this feature | |
4222 | * has to be disable and this can be done by passing the | |
4223 | * riwt_off field from the platform. | |
4224 | */ | |
4225 | if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) { | |
4226 | priv->use_riwt = 1; | |
b618ab45 HK |
4227 | dev_info(priv->device, |
4228 | "Enable RX Mitigation via HW Watchdog Timer\n"); | |
62a2ab93 GC |
4229 | } |
4230 | ||
c22a3f48 JP |
4231 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { |
4232 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
4233 | ||
4234 | netif_napi_add(ndev, &rx_q->napi, stmmac_poll, | |
4235 | (8 * priv->plat->rx_queues_to_use)); | |
4236 | } | |
47dd7a54 | 4237 | |
f8e96161 VL |
4238 | spin_lock_init(&priv->lock); |
4239 | ||
cd7201f4 GC |
4240 | /* If a specific clk_csr value is passed from the platform |
4241 | * this means that the CSR Clock Range selection cannot be | |
4242 | * changed at run-time and it is fixed. Viceversa the driver'll try to | |
4243 | * set the MDC clock dynamically according to the csr actual | |
4244 | * clock input. | |
4245 | */ | |
4246 | if (!priv->plat->clk_csr) | |
4247 | stmmac_clk_csr_set(priv); | |
4248 | else | |
4249 | priv->clk_csr = priv->plat->clk_csr; | |
4250 | ||
e58bb43f GC |
4251 | stmmac_check_pcs_mode(priv); |
4252 | ||
3fe5cadb GC |
4253 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
4254 | priv->hw->pcs != STMMAC_PCS_TBI && | |
4255 | priv->hw->pcs != STMMAC_PCS_RTBI) { | |
e58bb43f GC |
4256 | /* MDIO bus Registration */ |
4257 | ret = stmmac_mdio_register(ndev); | |
4258 | if (ret < 0) { | |
b618ab45 HK |
4259 | dev_err(priv->device, |
4260 | "%s: MDIO bus (id: %d) registration failed", | |
4261 | __func__, priv->plat->bus_id); | |
e58bb43f GC |
4262 | goto error_mdio_register; |
4263 | } | |
4bfcbd7a FV |
4264 | } |
4265 | ||
57016590 | 4266 | ret = register_netdev(ndev); |
b2eb09af | 4267 | if (ret) { |
b618ab45 HK |
4268 | dev_err(priv->device, "%s: ERROR %i registering the device\n", |
4269 | __func__, ret); | |
b2eb09af FF |
4270 | goto error_netdev_register; |
4271 | } | |
57016590 FF |
4272 | |
4273 | return ret; | |
47dd7a54 | 4274 | |
6a81c26f | 4275 | error_netdev_register: |
b2eb09af FF |
4276 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
4277 | priv->hw->pcs != STMMAC_PCS_TBI && | |
4278 | priv->hw->pcs != STMMAC_PCS_RTBI) | |
4279 | stmmac_mdio_unregister(ndev); | |
6a81c26f | 4280 | error_mdio_register: |
c22a3f48 JP |
4281 | for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { |
4282 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
4283 | ||
4284 | netif_napi_del(&rx_q->napi); | |
4285 | } | |
62866e98 | 4286 | error_hw_init: |
34a52f36 | 4287 | free_netdev(ndev); |
47dd7a54 | 4288 | |
15ffac73 | 4289 | return ret; |
47dd7a54 | 4290 | } |
b2e2f0c7 | 4291 | EXPORT_SYMBOL_GPL(stmmac_dvr_probe); |
47dd7a54 GC |
4292 | |
4293 | /** | |
4294 | * stmmac_dvr_remove | |
f4e7bd81 | 4295 | * @dev: device pointer |
47dd7a54 | 4296 | * Description: this function resets the TX/RX processes, disables the MAC RX/TX |
bfab27a1 | 4297 | * changes the link status, releases the DMA descriptor rings. |
47dd7a54 | 4298 | */ |
f4e7bd81 | 4299 | int stmmac_dvr_remove(struct device *dev) |
47dd7a54 | 4300 | { |
f4e7bd81 | 4301 | struct net_device *ndev = dev_get_drvdata(dev); |
aec7ff27 | 4302 | struct stmmac_priv *priv = netdev_priv(ndev); |
47dd7a54 | 4303 | |
38ddc59d | 4304 | netdev_info(priv->dev, "%s: removing driver", __func__); |
47dd7a54 | 4305 | |
ae4f0d46 | 4306 | stmmac_stop_all_dma(priv); |
47dd7a54 | 4307 | |
270c7759 | 4308 | priv->hw->mac->set_mac(priv->ioaddr, false); |
47dd7a54 | 4309 | netif_carrier_off(ndev); |
47dd7a54 | 4310 | unregister_netdev(ndev); |
f573c0b9 | 4311 | if (priv->plat->stmmac_rst) |
4312 | reset_control_assert(priv->plat->stmmac_rst); | |
4313 | clk_disable_unprepare(priv->plat->pclk); | |
4314 | clk_disable_unprepare(priv->plat->stmmac_clk); | |
3fe5cadb GC |
4315 | if (priv->hw->pcs != STMMAC_PCS_RGMII && |
4316 | priv->hw->pcs != STMMAC_PCS_TBI && | |
4317 | priv->hw->pcs != STMMAC_PCS_RTBI) | |
e743471f | 4318 | stmmac_mdio_unregister(ndev); |
47dd7a54 GC |
4319 | free_netdev(ndev); |
4320 | ||
4321 | return 0; | |
4322 | } | |
b2e2f0c7 | 4323 | EXPORT_SYMBOL_GPL(stmmac_dvr_remove); |
47dd7a54 | 4324 | |
732fdf0e GC |
4325 | /** |
4326 | * stmmac_suspend - suspend callback | |
f4e7bd81 | 4327 | * @dev: device pointer |
732fdf0e GC |
4328 | * Description: this is the function to suspend the device and it is called |
4329 | * by the platform driver to stop the network queue, release the resources, | |
4330 | * program the PMT register (for WoL), clean and release driver resources. | |
4331 | */ | |
f4e7bd81 | 4332 | int stmmac_suspend(struct device *dev) |
47dd7a54 | 4333 | { |
f4e7bd81 | 4334 | struct net_device *ndev = dev_get_drvdata(dev); |
874bd42d | 4335 | struct stmmac_priv *priv = netdev_priv(ndev); |
f8c5a875 | 4336 | unsigned long flags; |
47dd7a54 | 4337 | |
874bd42d | 4338 | if (!ndev || !netif_running(ndev)) |
47dd7a54 GC |
4339 | return 0; |
4340 | ||
d6d50c7e PR |
4341 | if (ndev->phydev) |
4342 | phy_stop(ndev->phydev); | |
102463b1 | 4343 | |
f8c5a875 | 4344 | spin_lock_irqsave(&priv->lock, flags); |
47dd7a54 | 4345 | |
874bd42d | 4346 | netif_device_detach(ndev); |
c22a3f48 | 4347 | stmmac_stop_all_queues(priv); |
47dd7a54 | 4348 | |
c22a3f48 | 4349 | stmmac_disable_all_queues(priv); |
874bd42d GC |
4350 | |
4351 | /* Stop TX/RX DMA */ | |
ae4f0d46 | 4352 | stmmac_stop_all_dma(priv); |
c24602ef | 4353 | |
874bd42d | 4354 | /* Enable Power down mode by programming the PMT regs */ |
89f7f2cf | 4355 | if (device_may_wakeup(priv->device)) { |
7ed24bbe | 4356 | priv->hw->mac->pmt(priv->hw, priv->wolopts); |
89f7f2cf SK |
4357 | priv->irq_wake = 1; |
4358 | } else { | |
270c7759 | 4359 | priv->hw->mac->set_mac(priv->ioaddr, false); |
db88f10a | 4360 | pinctrl_pm_select_sleep_state(priv->device); |
ba1377ff | 4361 | /* Disable clock in case of PWM is off */ |
f573c0b9 | 4362 | clk_disable(priv->plat->pclk); |
4363 | clk_disable(priv->plat->stmmac_clk); | |
ba1377ff | 4364 | } |
f8c5a875 | 4365 | spin_unlock_irqrestore(&priv->lock, flags); |
2d871aa0 | 4366 | |
4d869b03 | 4367 | priv->oldlink = false; |
bd00632c LC |
4368 | priv->speed = SPEED_UNKNOWN; |
4369 | priv->oldduplex = DUPLEX_UNKNOWN; | |
47dd7a54 GC |
4370 | return 0; |
4371 | } | |
b2e2f0c7 | 4372 | EXPORT_SYMBOL_GPL(stmmac_suspend); |
47dd7a54 | 4373 | |
54139cf3 JP |
4374 | /** |
4375 | * stmmac_reset_queues_param - reset queue parameters | |
4376 | * @dev: device pointer | |
4377 | */ | |
4378 | static void stmmac_reset_queues_param(struct stmmac_priv *priv) | |
4379 | { | |
4380 | u32 rx_cnt = priv->plat->rx_queues_to_use; | |
ce736788 | 4381 | u32 tx_cnt = priv->plat->tx_queues_to_use; |
54139cf3 JP |
4382 | u32 queue; |
4383 | ||
4384 | for (queue = 0; queue < rx_cnt; queue++) { | |
4385 | struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; | |
4386 | ||
4387 | rx_q->cur_rx = 0; | |
4388 | rx_q->dirty_rx = 0; | |
4389 | } | |
4390 | ||
ce736788 JP |
4391 | for (queue = 0; queue < tx_cnt; queue++) { |
4392 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | |
4393 | ||
4394 | tx_q->cur_tx = 0; | |
4395 | tx_q->dirty_tx = 0; | |
4396 | } | |
54139cf3 JP |
4397 | } |
4398 | ||
732fdf0e GC |
4399 | /** |
4400 | * stmmac_resume - resume callback | |
f4e7bd81 | 4401 | * @dev: device pointer |
732fdf0e GC |
4402 | * Description: when resume this function is invoked to setup the DMA and CORE |
4403 | * in a usable state. | |
4404 | */ | |
f4e7bd81 | 4405 | int stmmac_resume(struct device *dev) |
47dd7a54 | 4406 | { |
f4e7bd81 | 4407 | struct net_device *ndev = dev_get_drvdata(dev); |
874bd42d | 4408 | struct stmmac_priv *priv = netdev_priv(ndev); |
f8c5a875 | 4409 | unsigned long flags; |
47dd7a54 | 4410 | |
874bd42d | 4411 | if (!netif_running(ndev)) |
47dd7a54 GC |
4412 | return 0; |
4413 | ||
47dd7a54 GC |
4414 | /* Power Down bit, into the PM register, is cleared |
4415 | * automatically as soon as a magic packet or a Wake-up frame | |
4416 | * is received. Anyway, it's better to manually clear | |
4417 | * this bit because it can generate problems while resuming | |
ceb69499 GC |
4418 | * from another devices (e.g. serial console). |
4419 | */ | |
623997fb | 4420 | if (device_may_wakeup(priv->device)) { |
f55d84b0 | 4421 | spin_lock_irqsave(&priv->lock, flags); |
7ed24bbe | 4422 | priv->hw->mac->pmt(priv->hw, 0); |
f55d84b0 | 4423 | spin_unlock_irqrestore(&priv->lock, flags); |
89f7f2cf | 4424 | priv->irq_wake = 0; |
623997fb | 4425 | } else { |
db88f10a | 4426 | pinctrl_pm_select_default_state(priv->device); |
8d45e42b | 4427 | /* enable the clk previously disabled */ |
f573c0b9 | 4428 | clk_enable(priv->plat->stmmac_clk); |
4429 | clk_enable(priv->plat->pclk); | |
623997fb SK |
4430 | /* reset the phy so that it's ready */ |
4431 | if (priv->mii) | |
4432 | stmmac_mdio_reset(priv->mii); | |
4433 | } | |
47dd7a54 | 4434 | |
874bd42d | 4435 | netif_device_attach(ndev); |
47dd7a54 | 4436 | |
f55d84b0 VP |
4437 | spin_lock_irqsave(&priv->lock, flags); |
4438 | ||
54139cf3 JP |
4439 | stmmac_reset_queues_param(priv); |
4440 | ||
f748be53 AT |
4441 | /* reset private mss value to force mss context settings at |
4442 | * next tso xmit (only used for gmac4). | |
4443 | */ | |
4444 | priv->mss = 0; | |
4445 | ||
ae79a639 GC |
4446 | stmmac_clear_descriptors(priv); |
4447 | ||
fe131929 | 4448 | stmmac_hw_setup(ndev, false); |
777da230 | 4449 | stmmac_init_tx_coalesce(priv); |
ac316c78 | 4450 | stmmac_set_rx_mode(ndev); |
47dd7a54 | 4451 | |
c22a3f48 | 4452 | stmmac_enable_all_queues(priv); |
47dd7a54 | 4453 | |
c22a3f48 | 4454 | stmmac_start_all_queues(priv); |
47dd7a54 | 4455 | |
f8c5a875 | 4456 | spin_unlock_irqrestore(&priv->lock, flags); |
102463b1 | 4457 | |
d6d50c7e PR |
4458 | if (ndev->phydev) |
4459 | phy_start(ndev->phydev); | |
102463b1 | 4460 | |
47dd7a54 GC |
4461 | return 0; |
4462 | } | |
b2e2f0c7 | 4463 | EXPORT_SYMBOL_GPL(stmmac_resume); |
ba27ec66 | 4464 | |
47dd7a54 GC |
4465 | #ifndef MODULE |
4466 | static int __init stmmac_cmdline_opt(char *str) | |
4467 | { | |
4468 | char *opt; | |
4469 | ||
4470 | if (!str || !*str) | |
4471 | return -EINVAL; | |
4472 | while ((opt = strsep(&str, ",")) != NULL) { | |
f3240e28 | 4473 | if (!strncmp(opt, "debug:", 6)) { |
ea2ab871 | 4474 | if (kstrtoint(opt + 6, 0, &debug)) |
f3240e28 GC |
4475 | goto err; |
4476 | } else if (!strncmp(opt, "phyaddr:", 8)) { | |
ea2ab871 | 4477 | if (kstrtoint(opt + 8, 0, &phyaddr)) |
f3240e28 | 4478 | goto err; |
f3240e28 | 4479 | } else if (!strncmp(opt, "buf_sz:", 7)) { |
ea2ab871 | 4480 | if (kstrtoint(opt + 7, 0, &buf_sz)) |
f3240e28 GC |
4481 | goto err; |
4482 | } else if (!strncmp(opt, "tc:", 3)) { | |
ea2ab871 | 4483 | if (kstrtoint(opt + 3, 0, &tc)) |
f3240e28 GC |
4484 | goto err; |
4485 | } else if (!strncmp(opt, "watchdog:", 9)) { | |
ea2ab871 | 4486 | if (kstrtoint(opt + 9, 0, &watchdog)) |
f3240e28 GC |
4487 | goto err; |
4488 | } else if (!strncmp(opt, "flow_ctrl:", 10)) { | |
ea2ab871 | 4489 | if (kstrtoint(opt + 10, 0, &flow_ctrl)) |
f3240e28 GC |
4490 | goto err; |
4491 | } else if (!strncmp(opt, "pause:", 6)) { | |
ea2ab871 | 4492 | if (kstrtoint(opt + 6, 0, &pause)) |
f3240e28 | 4493 | goto err; |
506f669c | 4494 | } else if (!strncmp(opt, "eee_timer:", 10)) { |
d765955d GC |
4495 | if (kstrtoint(opt + 10, 0, &eee_timer)) |
4496 | goto err; | |
4a7d666a GC |
4497 | } else if (!strncmp(opt, "chain_mode:", 11)) { |
4498 | if (kstrtoint(opt + 11, 0, &chain_mode)) | |
4499 | goto err; | |
f3240e28 | 4500 | } |
47dd7a54 GC |
4501 | } |
4502 | return 0; | |
f3240e28 GC |
4503 | |
4504 | err: | |
4505 | pr_err("%s: ERROR broken module parameter conversion", __func__); | |
4506 | return -EINVAL; | |
47dd7a54 GC |
4507 | } |
4508 | ||
4509 | __setup("stmmaceth=", stmmac_cmdline_opt); | |
ceb69499 | 4510 | #endif /* MODULE */ |
6fc0d0f2 | 4511 | |
466c5ac8 MO |
4512 | static int __init stmmac_init(void) |
4513 | { | |
4514 | #ifdef CONFIG_DEBUG_FS | |
4515 | /* Create debugfs main directory if it doesn't exist yet */ | |
4516 | if (!stmmac_fs_dir) { | |
4517 | stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL); | |
4518 | ||
4519 | if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) { | |
4520 | pr_err("ERROR %s, debugfs create directory failed\n", | |
4521 | STMMAC_RESOURCE_NAME); | |
4522 | ||
4523 | return -ENOMEM; | |
4524 | } | |
4525 | } | |
4526 | #endif | |
4527 | ||
4528 | return 0; | |
4529 | } | |
4530 | ||
4531 | static void __exit stmmac_exit(void) | |
4532 | { | |
4533 | #ifdef CONFIG_DEBUG_FS | |
4534 | debugfs_remove_recursive(stmmac_fs_dir); | |
4535 | #endif | |
4536 | } | |
4537 | ||
4538 | module_init(stmmac_init) | |
4539 | module_exit(stmmac_exit) | |
4540 | ||
6fc0d0f2 GC |
4541 | MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver"); |
4542 | MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); | |
4543 | MODULE_LICENSE("GPL"); |