]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
286a8372 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54
GC
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
47dd7a54
GC
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25*******************************************************************************/
26
6a81c26f 27#include <linux/clk.h>
47dd7a54
GC
28#include <linux/kernel.h>
29#include <linux/interrupt.h>
47dd7a54
GC
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/if_ether.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
01789349 37#include <linux/if.h>
47dd7a54
GC
38#include <linux/if_vlan.h>
39#include <linux/dma-mapping.h>
5a0e3ad6 40#include <linux/slab.h>
70c71606 41#include <linux/prefetch.h>
db88f10a 42#include <linux/pinctrl/consumer.h>
50fb4f74 43#ifdef CONFIG_DEBUG_FS
7ac29055
GC
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
50fb4f74 46#endif /* CONFIG_DEBUG_FS */
891434b1 47#include <linux/net_tstamp.h>
4dbbe8dd 48#include <net/pkt_cls.h>
891434b1 49#include "stmmac_ptp.h"
286a8372 50#include "stmmac.h"
c5e4ddbd 51#include <linux/reset.h>
5790cf3c 52#include <linux/of_mdio.h>
19d857c9 53#include "dwmac1000.h"
7d9e6c5a 54#include "dwxgmac2.h"
42de047d 55#include "hwif.h"
47dd7a54 56
9939a46d 57#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
f748be53 58#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
47dd7a54
GC
59
60/* Module parameters */
32ceabca 61#define TX_TIMEO 5000
47dd7a54 62static int watchdog = TX_TIMEO;
d3757ba4 63module_param(watchdog, int, 0644);
32ceabca 64MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 65
32ceabca 66static int debug = -1;
d3757ba4 67module_param(debug, int, 0644);
32ceabca 68MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 69
47d1f71f 70static int phyaddr = -1;
d3757ba4 71module_param(phyaddr, int, 0444);
47dd7a54
GC
72MODULE_PARM_DESC(phyaddr, "Physical device address");
73
e3ad57c9 74#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
120e87f9 75#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
47dd7a54
GC
76
77static int flow_ctrl = FLOW_OFF;
d3757ba4 78module_param(flow_ctrl, int, 0644);
47dd7a54
GC
79MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81static int pause = PAUSE_TIME;
d3757ba4 82module_param(pause, int, 0644);
47dd7a54
GC
83MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85#define TC_DEFAULT 64
86static int tc = TC_DEFAULT;
d3757ba4 87module_param(tc, int, 0644);
47dd7a54
GC
88MODULE_PARM_DESC(tc, "DMA threshold control value");
89
d916701c
GC
90#define DEFAULT_BUFSIZE 1536
91static int buf_sz = DEFAULT_BUFSIZE;
d3757ba4 92module_param(buf_sz, int, 0644);
47dd7a54
GC
93MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
22ad3838
GC
95#define STMMAC_RX_COPYBREAK 256
96
47dd7a54
GC
97static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
d765955d
GC
101#define STMMAC_DEFAULT_LPI_TIMER 1000
102static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
d3757ba4 103module_param(eee_timer, int, 0644);
d765955d 104MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
f5351ef7 105#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
d765955d 106
22d3efe5
PM
107/* By default the driver will use the ring mode to manage tx and rx descriptors,
108 * but allow user to force to use the chain instead of the ring
4a7d666a
GC
109 */
110static unsigned int chain_mode;
d3757ba4 111module_param(chain_mode, int, 0444);
4a7d666a
GC
112MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
47dd7a54 114static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 115
50fb4f74 116#ifdef CONFIG_DEBUG_FS
bfab27a1 117static int stmmac_init_fs(struct net_device *dev);
466c5ac8 118static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
119#endif
120
9125cdd1
GC
121#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
47dd7a54
GC
123/**
124 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
125 * Description: it checks the driver parameters and set a default in case of
126 * errors.
47dd7a54
GC
127 */
128static void stmmac_verify_args(void)
129{
130 if (unlikely(watchdog < 0))
131 watchdog = TX_TIMEO;
d916701c
GC
132 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
134 if (unlikely(flow_ctrl > 1))
135 flow_ctrl = FLOW_AUTO;
136 else if (likely(flow_ctrl < 0))
137 flow_ctrl = FLOW_OFF;
138 if (unlikely((pause < 0) || (pause > 0xffff)))
139 pause = PAUSE_TIME;
d765955d
GC
140 if (eee_timer < 0)
141 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
142}
143
c22a3f48
JP
144/**
145 * stmmac_disable_all_queues - Disable all queues
146 * @priv: driver private structure
147 */
148static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149{
150 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
8fce3331
JA
151 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
c22a3f48
JP
153 u32 queue;
154
8fce3331
JA
155 for (queue = 0; queue < maxq; queue++) {
156 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 157
4ccb4585
JA
158 if (queue < rx_queues_cnt)
159 napi_disable(&ch->rx_napi);
160 if (queue < tx_queues_cnt)
161 napi_disable(&ch->tx_napi);
c22a3f48
JP
162 }
163}
164
165/**
166 * stmmac_enable_all_queues - Enable all queues
167 * @priv: driver private structure
168 */
169static void stmmac_enable_all_queues(struct stmmac_priv *priv)
170{
171 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
8fce3331
JA
172 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
173 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
c22a3f48
JP
174 u32 queue;
175
8fce3331
JA
176 for (queue = 0; queue < maxq; queue++) {
177 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 178
4ccb4585
JA
179 if (queue < rx_queues_cnt)
180 napi_enable(&ch->rx_napi);
181 if (queue < tx_queues_cnt)
182 napi_enable(&ch->tx_napi);
c22a3f48
JP
183 }
184}
185
186/**
187 * stmmac_stop_all_queues - Stop all queues
188 * @priv: driver private structure
189 */
190static void stmmac_stop_all_queues(struct stmmac_priv *priv)
191{
192 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 u32 queue;
194
195 for (queue = 0; queue < tx_queues_cnt; queue++)
196 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
197}
198
199/**
200 * stmmac_start_all_queues - Start all queues
201 * @priv: driver private structure
202 */
203static void stmmac_start_all_queues(struct stmmac_priv *priv)
204{
205 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
206 u32 queue;
207
208 for (queue = 0; queue < tx_queues_cnt; queue++)
209 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
210}
211
34877a15
JA
212static void stmmac_service_event_schedule(struct stmmac_priv *priv)
213{
214 if (!test_bit(STMMAC_DOWN, &priv->state) &&
215 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
216 queue_work(priv->wq, &priv->service_task);
217}
218
219static void stmmac_global_err(struct stmmac_priv *priv)
220{
221 netif_carrier_off(priv->dev);
222 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
223 stmmac_service_event_schedule(priv);
224}
225
32ceabca
GC
226/**
227 * stmmac_clk_csr_set - dynamically set the MDC clock
228 * @priv: driver private structure
229 * Description: this is to dynamically set the MDC clock according to the csr
230 * clock input.
231 * Note:
232 * If a specific clk_csr value is passed from the platform
233 * this means that the CSR Clock Range selection cannot be
234 * changed at run-time and it is fixed (as reported in the driver
235 * documentation). Viceversa the driver will try to set the MDC
236 * clock dynamically according to the actual clock input.
237 */
cd7201f4
GC
238static void stmmac_clk_csr_set(struct stmmac_priv *priv)
239{
cd7201f4
GC
240 u32 clk_rate;
241
f573c0b9 242 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
cd7201f4
GC
243
244 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
245 * for all other cases except for the below mentioned ones.
246 * For values higher than the IEEE 802.3 specified frequency
247 * we can not estimate the proper divider as it is not known
248 * the frequency of clk_csr_i. So we do not change the default
249 * divider.
250 */
cd7201f4
GC
251 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
252 if (clk_rate < CSR_F_35M)
253 priv->clk_csr = STMMAC_CSR_20_35M;
254 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
255 priv->clk_csr = STMMAC_CSR_35_60M;
256 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
257 priv->clk_csr = STMMAC_CSR_60_100M;
258 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
259 priv->clk_csr = STMMAC_CSR_100_150M;
260 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
261 priv->clk_csr = STMMAC_CSR_150_250M;
19d857c9 262 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
cd7201f4 263 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 264 }
9f93ac8d
LC
265
266 if (priv->plat->has_sun8i) {
267 if (clk_rate > 160000000)
268 priv->clk_csr = 0x03;
269 else if (clk_rate > 80000000)
270 priv->clk_csr = 0x02;
271 else if (clk_rate > 40000000)
272 priv->clk_csr = 0x01;
273 else
274 priv->clk_csr = 0;
275 }
7d9e6c5a
JA
276
277 if (priv->plat->has_xgmac) {
278 if (clk_rate > 400000000)
279 priv->clk_csr = 0x5;
280 else if (clk_rate > 350000000)
281 priv->clk_csr = 0x4;
282 else if (clk_rate > 300000000)
283 priv->clk_csr = 0x3;
284 else if (clk_rate > 250000000)
285 priv->clk_csr = 0x2;
286 else if (clk_rate > 150000000)
287 priv->clk_csr = 0x1;
288 else
289 priv->clk_csr = 0x0;
290 }
cd7201f4
GC
291}
292
47dd7a54
GC
293static void print_pkt(unsigned char *buf, int len)
294{
424c4f78
AS
295 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
296 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 297}
47dd7a54 298
ce736788 299static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
47dd7a54 300{
ce736788 301 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
a6a3e026 302 u32 avail;
e3ad57c9 303
ce736788
JP
304 if (tx_q->dirty_tx > tx_q->cur_tx)
305 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
e3ad57c9 306 else
ce736788 307 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
e3ad57c9
GC
308
309 return avail;
310}
311
54139cf3
JP
312/**
313 * stmmac_rx_dirty - Get RX queue dirty
314 * @priv: driver private structure
315 * @queue: RX queue index
316 */
317static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
e3ad57c9 318{
54139cf3 319 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
a6a3e026 320 u32 dirty;
e3ad57c9 321
54139cf3
JP
322 if (rx_q->dirty_rx <= rx_q->cur_rx)
323 dirty = rx_q->cur_rx - rx_q->dirty_rx;
e3ad57c9 324 else
54139cf3 325 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
e3ad57c9
GC
326
327 return dirty;
47dd7a54
GC
328}
329
32ceabca 330/**
732fdf0e 331 * stmmac_hw_fix_mac_speed - callback for speed selection
32ceabca 332 * @priv: driver private structure
8d45e42b 333 * Description: on some platforms (e.g. ST), some HW system configuration
32ceabca 334 * registers have to be set according to the link speed negotiated.
9dfeb4d9
GC
335 */
336static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
337{
d6d50c7e
PR
338 struct net_device *ndev = priv->dev;
339 struct phy_device *phydev = ndev->phydev;
9dfeb4d9
GC
340
341 if (likely(priv->plat->fix_mac_speed))
ceb69499 342 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
9dfeb4d9
GC
343}
344
32ceabca 345/**
732fdf0e 346 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 347 * @priv: driver private structure
732fdf0e
GC
348 * Description: this function is to verify and enter in LPI mode in case of
349 * EEE.
32ceabca 350 */
d765955d
GC
351static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
352{
ce736788
JP
353 u32 tx_cnt = priv->plat->tx_queues_to_use;
354 u32 queue;
355
356 /* check if all TX queues have the work finished */
357 for (queue = 0; queue < tx_cnt; queue++) {
358 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
359
360 if (tx_q->dirty_tx != tx_q->cur_tx)
361 return; /* still unfinished work */
362 }
363
d765955d 364 /* Check and enter in LPI mode */
ce736788 365 if (!priv->tx_path_in_lpi_mode)
c10d4c82
JA
366 stmmac_set_eee_mode(priv, priv->hw,
367 priv->plat->en_tx_lpi_clockgating);
d765955d
GC
368}
369
32ceabca 370/**
732fdf0e 371 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
372 * @priv: driver private structure
373 * Description: this function is to exit and disable EEE in case of
374 * LPI state is true. This is called by the xmit.
375 */
d765955d
GC
376void stmmac_disable_eee_mode(struct stmmac_priv *priv)
377{
c10d4c82 378 stmmac_reset_eee_mode(priv, priv->hw);
d765955d
GC
379 del_timer_sync(&priv->eee_ctrl_timer);
380 priv->tx_path_in_lpi_mode = false;
381}
382
383/**
732fdf0e 384 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d765955d
GC
385 * @arg : data hook
386 * Description:
32ceabca 387 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
388 * then MAC Transmitter can be moved to LPI state.
389 */
e99e88a9 390static void stmmac_eee_ctrl_timer(struct timer_list *t)
d765955d 391{
e99e88a9 392 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
d765955d
GC
393
394 stmmac_enable_eee_mode(priv);
f5351ef7 395 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d
GC
396}
397
398/**
732fdf0e 399 * stmmac_eee_init - init EEE
32ceabca 400 * @priv: driver private structure
d765955d 401 * Description:
732fdf0e
GC
402 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
403 * can also manage EEE, this function enable the LPI state and start related
404 * timer.
d765955d
GC
405 */
406bool stmmac_eee_init(struct stmmac_priv *priv)
407{
d6d50c7e 408 struct net_device *ndev = priv->dev;
879626e3 409 int interface = priv->plat->interface;
d765955d
GC
410 bool ret = false;
411
879626e3
JB
412 if ((interface != PHY_INTERFACE_MODE_MII) &&
413 (interface != PHY_INTERFACE_MODE_GMII) &&
414 !phy_interface_mode_is_rgmii(interface))
415 goto out;
416
f5351ef7
GC
417 /* Using PCS we cannot dial with the phy registers at this stage
418 * so we do not support extra feature like EEE.
419 */
3fe5cadb
GC
420 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
421 (priv->hw->pcs == STMMAC_PCS_TBI) ||
422 (priv->hw->pcs == STMMAC_PCS_RTBI))
f5351ef7
GC
423 goto out;
424
d765955d
GC
425 /* MAC core supports the EEE feature. */
426 if (priv->dma_cap.eee) {
83bf79b6
GC
427 int tx_lpi_timer = priv->tx_lpi_timer;
428
d765955d 429 /* Check if the PHY supports EEE */
d6d50c7e 430 if (phy_init_eee(ndev->phydev, 1)) {
83bf79b6
GC
431 /* To manage at run-time if the EEE cannot be supported
432 * anymore (for example because the lp caps have been
433 * changed).
434 * In that case the driver disable own timers.
435 */
29555fa3 436 mutex_lock(&priv->lock);
83bf79b6 437 if (priv->eee_active) {
38ddc59d 438 netdev_dbg(priv->dev, "disable EEE\n");
83bf79b6 439 del_timer_sync(&priv->eee_ctrl_timer);
c10d4c82
JA
440 stmmac_set_eee_timer(priv, priv->hw, 0,
441 tx_lpi_timer);
83bf79b6
GC
442 }
443 priv->eee_active = 0;
29555fa3 444 mutex_unlock(&priv->lock);
d765955d 445 goto out;
83bf79b6
GC
446 }
447 /* Activate the EEE and start timers */
29555fa3 448 mutex_lock(&priv->lock);
f5351ef7
GC
449 if (!priv->eee_active) {
450 priv->eee_active = 1;
e99e88a9
KC
451 timer_setup(&priv->eee_ctrl_timer,
452 stmmac_eee_ctrl_timer, 0);
ccb36da1
VT
453 mod_timer(&priv->eee_ctrl_timer,
454 STMMAC_LPI_T(eee_timer));
f5351ef7 455
c10d4c82
JA
456 stmmac_set_eee_timer(priv, priv->hw,
457 STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
71965352
GC
458 }
459 /* Set HW EEE according to the speed */
c10d4c82 460 stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
d765955d 461
d765955d 462 ret = true;
29555fa3 463 mutex_unlock(&priv->lock);
4741cf9c 464
38ddc59d 465 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
d765955d
GC
466 }
467out:
468 return ret;
469}
470
732fdf0e 471/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 472 * @priv: driver private structure
ba1ffd74 473 * @p : descriptor pointer
891434b1
RK
474 * @skb : the socket buffer
475 * Description :
476 * This function will read timestamp from the descriptor & pass it to stack.
477 * and also perform some sanity checks.
478 */
479static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ba1ffd74 480 struct dma_desc *p, struct sk_buff *skb)
891434b1
RK
481{
482 struct skb_shared_hwtstamps shhwtstamp;
df103170 483 u64 ns = 0;
891434b1
RK
484
485 if (!priv->hwts_tx_en)
486 return;
487
ceb69499 488 /* exit if skb doesn't support hw tstamp */
75e4364f 489 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
490 return;
491
891434b1 492 /* check tx tstamp status */
42de047d 493 if (stmmac_get_tx_timestamp_status(priv, p)) {
ba1ffd74 494 /* get the valid tstamp */
42de047d 495 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
891434b1 496
ba1ffd74
GC
497 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
498 shhwtstamp.hwtstamp = ns_to_ktime(ns);
891434b1 499
33d4c482 500 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
ba1ffd74
GC
501 /* pass tstamp to stack */
502 skb_tstamp_tx(skb, &shhwtstamp);
503 }
891434b1
RK
504
505 return;
506}
507
732fdf0e 508/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 509 * @priv: driver private structure
ba1ffd74
GC
510 * @p : descriptor pointer
511 * @np : next descriptor pointer
891434b1
RK
512 * @skb : the socket buffer
513 * Description :
514 * This function will read received packet's timestamp from the descriptor
515 * and pass it to stack. It also perform some sanity checks.
516 */
ba1ffd74
GC
517static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
518 struct dma_desc *np, struct sk_buff *skb)
891434b1
RK
519{
520 struct skb_shared_hwtstamps *shhwtstamp = NULL;
98870943 521 struct dma_desc *desc = p;
df103170 522 u64 ns = 0;
891434b1
RK
523
524 if (!priv->hwts_rx_en)
525 return;
98870943 526 /* For GMAC4, the valid timestamp is from CTX next desc. */
7d9e6c5a 527 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
98870943 528 desc = np;
891434b1 529
ba1ffd74 530 /* Check if timestamp is available */
42de047d
JA
531 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
532 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
33d4c482 533 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
ba1ffd74
GC
534 shhwtstamp = skb_hwtstamps(skb);
535 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
536 shhwtstamp->hwtstamp = ns_to_ktime(ns);
537 } else {
33d4c482 538 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
ba1ffd74 539 }
891434b1
RK
540}
541
542/**
d6228b7c 543 * stmmac_hwtstamp_set - control hardware timestamping.
891434b1 544 * @dev: device pointer.
8d45e42b 545 * @ifr: An IOCTL specific structure, that can contain a pointer to
891434b1
RK
546 * a proprietary structure used to pass information to the driver.
547 * Description:
548 * This function configures the MAC to enable/disable both outgoing(TX)
549 * and incoming(RX) packets time stamping based on user input.
550 * Return Value:
551 * 0 on success and an appropriate -ve integer on failure.
552 */
d6228b7c 553static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
891434b1
RK
554{
555 struct stmmac_priv *priv = netdev_priv(dev);
556 struct hwtstamp_config config;
0a624155 557 struct timespec64 now;
891434b1
RK
558 u64 temp = 0;
559 u32 ptp_v2 = 0;
560 u32 tstamp_all = 0;
561 u32 ptp_over_ipv4_udp = 0;
562 u32 ptp_over_ipv6_udp = 0;
563 u32 ptp_over_ethernet = 0;
564 u32 snap_type_sel = 0;
565 u32 ts_master_en = 0;
566 u32 ts_event_en = 0;
df103170 567 u32 sec_inc = 0;
891434b1 568 u32 value = 0;
7d9e6c5a
JA
569 bool xmac;
570
571 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
891434b1
RK
572
573 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
574 netdev_alert(priv->dev, "No support for HW time stamping\n");
575 priv->hwts_tx_en = 0;
576 priv->hwts_rx_en = 0;
577
578 return -EOPNOTSUPP;
579 }
580
581 if (copy_from_user(&config, ifr->ifr_data,
d6228b7c 582 sizeof(config)))
891434b1
RK
583 return -EFAULT;
584
38ddc59d
LC
585 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
586 __func__, config.flags, config.tx_type, config.rx_filter);
891434b1
RK
587
588 /* reserved for future extensions */
589 if (config.flags)
590 return -EINVAL;
591
5f3da328
BH
592 if (config.tx_type != HWTSTAMP_TX_OFF &&
593 config.tx_type != HWTSTAMP_TX_ON)
891434b1 594 return -ERANGE;
891434b1
RK
595
596 if (priv->adv_ts) {
597 switch (config.rx_filter) {
891434b1 598 case HWTSTAMP_FILTER_NONE:
ceb69499 599 /* time stamp no incoming packet at all */
891434b1
RK
600 config.rx_filter = HWTSTAMP_FILTER_NONE;
601 break;
602
891434b1 603 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 604 /* PTP v1, UDP, any kind of event packet */
891434b1 605 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
7d8e249f
IA
606 /* 'xmac' hardware can support Sync, Pdelay_Req and
607 * Pdelay_resp by setting bit14 and bits17/16 to 01
608 * This leaves Delay_Req timestamps out.
609 * Enable all events *and* general purpose message
610 * timestamping
611 */
612 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
613 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
614 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
615 break;
616
891434b1 617 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 618 /* PTP v1, UDP, Sync packet */
891434b1
RK
619 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
620 /* take time stamp for SYNC messages only */
621 ts_event_en = PTP_TCR_TSEVNTENA;
622
623 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 break;
626
891434b1 627 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 628 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
629 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
630 /* take time stamp for Delay_Req messages only */
631 ts_master_en = PTP_TCR_TSMSTRENA;
632 ts_event_en = PTP_TCR_TSEVNTENA;
633
634 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
635 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
636 break;
637
891434b1 638 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 639 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
640 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
641 ptp_v2 = PTP_TCR_TSVER2ENA;
642 /* take time stamp for all event messages */
7d8e249f 643 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
644
645 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
646 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
647 break;
648
891434b1 649 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 650 /* PTP v2, UDP, Sync packet */
891434b1
RK
651 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
652 ptp_v2 = PTP_TCR_TSVER2ENA;
653 /* take time stamp for SYNC messages only */
654 ts_event_en = PTP_TCR_TSEVNTENA;
655
656 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
657 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
658 break;
659
891434b1 660 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 661 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
662 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
663 ptp_v2 = PTP_TCR_TSVER2ENA;
664 /* take time stamp for Delay_Req messages only */
665 ts_master_en = PTP_TCR_TSMSTRENA;
666 ts_event_en = PTP_TCR_TSEVNTENA;
667
668 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
669 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
670 break;
671
891434b1 672 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 673 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
674 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
675 ptp_v2 = PTP_TCR_TSVER2ENA;
7d8e249f 676 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 ptp_over_ethernet = PTP_TCR_TSIPENA;
680 break;
681
891434b1 682 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 683 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
684 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
685 ptp_v2 = PTP_TCR_TSVER2ENA;
686 /* take time stamp for SYNC messages only */
687 ts_event_en = PTP_TCR_TSEVNTENA;
688
689 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
690 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
691 ptp_over_ethernet = PTP_TCR_TSIPENA;
692 break;
693
891434b1 694 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 695 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
696 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
697 ptp_v2 = PTP_TCR_TSVER2ENA;
698 /* take time stamp for Delay_Req messages only */
699 ts_master_en = PTP_TCR_TSMSTRENA;
700 ts_event_en = PTP_TCR_TSEVNTENA;
701
702 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
703 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
704 ptp_over_ethernet = PTP_TCR_TSIPENA;
705 break;
706
e3412575 707 case HWTSTAMP_FILTER_NTP_ALL:
891434b1 708 case HWTSTAMP_FILTER_ALL:
ceb69499 709 /* time stamp any incoming packet */
891434b1
RK
710 config.rx_filter = HWTSTAMP_FILTER_ALL;
711 tstamp_all = PTP_TCR_TSENALL;
712 break;
713
714 default:
715 return -ERANGE;
716 }
717 } else {
718 switch (config.rx_filter) {
719 case HWTSTAMP_FILTER_NONE:
720 config.rx_filter = HWTSTAMP_FILTER_NONE;
721 break;
722 default:
723 /* PTP v1, UDP, any kind of event packet */
724 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
725 break;
726 }
727 }
728 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 729 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1
RK
730
731 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
cc4c9001 732 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0);
891434b1
RK
733 else {
734 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
ceb69499
GC
735 tstamp_all | ptp_v2 | ptp_over_ethernet |
736 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
737 ts_master_en | snap_type_sel);
cc4c9001 738 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value);
891434b1
RK
739
740 /* program Sub Second Increment reg */
cc4c9001
JA
741 stmmac_config_sub_second_increment(priv,
742 priv->ptpaddr, priv->plat->clk_ptp_rate,
7d9e6c5a 743 xmac, &sec_inc);
19d857c9 744 temp = div_u64(1000000000ULL, sec_inc);
891434b1 745
9a8a02c9
JA
746 /* Store sub second increment and flags for later use */
747 priv->sub_second_inc = sec_inc;
748 priv->systime_flags = value;
749
891434b1
RK
750 /* calculate default added value:
751 * formula is :
752 * addend = (2^32)/freq_div_ratio;
19d857c9 753 * where, freq_div_ratio = 1e9ns/sec_inc
891434b1 754 */
19d857c9 755 temp = (u64)(temp << 32);
f573c0b9 756 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
cc4c9001 757 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
891434b1
RK
758
759 /* initialize system time */
0a624155
AB
760 ktime_get_real_ts64(&now);
761
762 /* lower 32 bits of tv_sec are safe until y2106 */
cc4c9001
JA
763 stmmac_init_systime(priv, priv->ptpaddr,
764 (u32)now.tv_sec, now.tv_nsec);
891434b1
RK
765 }
766
d6228b7c
AP
767 memcpy(&priv->tstamp_config, &config, sizeof(config));
768
891434b1 769 return copy_to_user(ifr->ifr_data, &config,
d6228b7c
AP
770 sizeof(config)) ? -EFAULT : 0;
771}
772
773/**
774 * stmmac_hwtstamp_get - read hardware timestamping.
775 * @dev: device pointer.
776 * @ifr: An IOCTL specific structure, that can contain a pointer to
777 * a proprietary structure used to pass information to the driver.
778 * Description:
779 * This function obtain the current hardware timestamping settings
780 as requested.
781 */
782static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
783{
784 struct stmmac_priv *priv = netdev_priv(dev);
785 struct hwtstamp_config *config = &priv->tstamp_config;
786
787 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
788 return -EOPNOTSUPP;
789
790 return copy_to_user(ifr->ifr_data, config,
791 sizeof(*config)) ? -EFAULT : 0;
891434b1
RK
792}
793
32ceabca 794/**
732fdf0e 795 * stmmac_init_ptp - init PTP
32ceabca 796 * @priv: driver private structure
732fdf0e 797 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 798 * This is done by looking at the HW cap. register.
732fdf0e 799 * This function also registers the ptp driver.
32ceabca 800 */
92ba6888 801static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 802{
7d9e6c5a
JA
803 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
804
92ba6888
RK
805 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
806 return -EOPNOTSUPP;
807
7cd01399 808 priv->adv_ts = 0;
7d9e6c5a
JA
809 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
810 if (xmac && priv->dma_cap.atime_stamp)
be9b3174
GC
811 priv->adv_ts = 1;
812 /* Dwmac 3.x core with extend_desc can support adv_ts */
813 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
7cd01399
VB
814 priv->adv_ts = 1;
815
be9b3174
GC
816 if (priv->dma_cap.time_stamp)
817 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7cd01399 818
be9b3174
GC
819 if (priv->adv_ts)
820 netdev_info(priv->dev,
821 "IEEE 1588-2008 Advanced Timestamp supported\n");
891434b1 822
891434b1
RK
823 priv->hwts_tx_en = 0;
824 priv->hwts_rx_en = 0;
92ba6888 825
c30a70d3
GC
826 stmmac_ptp_register(priv);
827
828 return 0;
92ba6888
RK
829}
830
831static void stmmac_release_ptp(struct stmmac_priv *priv)
832{
f573c0b9 833 if (priv->plat->clk_ptp_ref)
834 clk_disable_unprepare(priv->plat->clk_ptp_ref);
92ba6888 835 stmmac_ptp_unregister(priv);
891434b1
RK
836}
837
29feff39
JP
838/**
839 * stmmac_mac_flow_ctrl - Configure flow control in all queues
840 * @priv: driver private structure
841 * Description: It is used for configuring the flow control in all queues
842 */
843static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
844{
845 u32 tx_cnt = priv->plat->tx_queues_to_use;
846
c10d4c82
JA
847 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
848 priv->pause, tx_cnt);
29feff39
JP
849}
850
47dd7a54 851/**
732fdf0e 852 * stmmac_adjust_link - adjusts the link parameters
47dd7a54 853 * @dev: net device structure
732fdf0e
GC
854 * Description: this is the helper called by the physical abstraction layer
855 * drivers to communicate the phy link status. According the speed and duplex
856 * this driver can invoke registered glue-logic as well.
857 * It also invoke the eee initialization because it could happen when switch
858 * on different networks (that are eee capable).
47dd7a54
GC
859 */
860static void stmmac_adjust_link(struct net_device *dev)
861{
862 struct stmmac_priv *priv = netdev_priv(dev);
d6d50c7e 863 struct phy_device *phydev = dev->phydev;
99a4cca2 864 bool new_state = false;
47dd7a54 865
662ec2b7 866 if (!phydev)
47dd7a54
GC
867 return;
868
29555fa3 869 mutex_lock(&priv->lock);
d765955d 870
47dd7a54 871 if (phydev->link) {
ad01b7d4 872 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
873
874 /* Now we make sure that we can be in full duplex mode.
875 * If not, we operate in half-duplex mode. */
876 if (phydev->duplex != priv->oldduplex) {
99a4cca2 877 new_state = true;
50cb16d4 878 if (!phydev->duplex)
db98a0b0 879 ctrl &= ~priv->hw->link.duplex;
47dd7a54 880 else
db98a0b0 881 ctrl |= priv->hw->link.duplex;
47dd7a54
GC
882 priv->oldduplex = phydev->duplex;
883 }
884 /* Flow Control operation */
885 if (phydev->pause)
29feff39 886 stmmac_mac_flow_ctrl(priv, phydev->duplex);
47dd7a54
GC
887
888 if (phydev->speed != priv->speed) {
99a4cca2 889 new_state = true;
ca84dfb9 890 ctrl &= ~priv->hw->link.speed_mask;
47dd7a54 891 switch (phydev->speed) {
afbe17a3 892 case SPEED_1000:
ca84dfb9 893 ctrl |= priv->hw->link.speed1000;
47dd7a54 894 break;
afbe17a3 895 case SPEED_100:
ca84dfb9 896 ctrl |= priv->hw->link.speed100;
9beae261 897 break;
afbe17a3 898 case SPEED_10:
ca84dfb9 899 ctrl |= priv->hw->link.speed10;
47dd7a54
GC
900 break;
901 default:
b3e51069 902 netif_warn(priv, link, priv->dev,
cba920af 903 "broken speed: %d\n", phydev->speed);
688495b1 904 phydev->speed = SPEED_UNKNOWN;
47dd7a54
GC
905 break;
906 }
5db13556
LC
907 if (phydev->speed != SPEED_UNKNOWN)
908 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
909 priv->speed = phydev->speed;
910 }
911
ad01b7d4 912 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
913
914 if (!priv->oldlink) {
99a4cca2 915 new_state = true;
4d869b03 916 priv->oldlink = true;
47dd7a54
GC
917 }
918 } else if (priv->oldlink) {
99a4cca2 919 new_state = true;
4d869b03 920 priv->oldlink = false;
bd00632c
LC
921 priv->speed = SPEED_UNKNOWN;
922 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
923 }
924
925 if (new_state && netif_msg_link(priv))
926 phy_print_status(phydev);
927
29555fa3 928 mutex_unlock(&priv->lock);
4741cf9c 929
52f95bbf
GC
930 if (phydev->is_pseudo_fixed_link)
931 /* Stop PHY layer to call the hook to adjust the link in case
932 * of a switch is attached to the stmmac driver.
933 */
934 phydev->irq = PHY_IGNORE_INTERRUPT;
935 else
936 /* At this stage, init the EEE if supported.
937 * Never called in case of fixed_link.
938 */
939 priv->eee_enabled = stmmac_eee_init(priv);
47dd7a54
GC
940}
941
32ceabca 942/**
732fdf0e 943 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
944 * @priv: driver private structure
945 * Description: this is to verify if the HW supports the PCS.
946 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
947 * configured for the TBI, RTBI, or SGMII PHY interface.
948 */
e58bb43f
GC
949static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
950{
951 int interface = priv->plat->interface;
952
953 if (priv->dma_cap.pcs) {
0d909dcd
BA
954 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
955 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
956 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
957 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
38ddc59d 958 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
3fe5cadb 959 priv->hw->pcs = STMMAC_PCS_RGMII;
0d909dcd 960 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
38ddc59d 961 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
3fe5cadb 962 priv->hw->pcs = STMMAC_PCS_SGMII;
e58bb43f
GC
963 }
964 }
965}
966
47dd7a54
GC
967/**
968 * stmmac_init_phy - PHY initialization
969 * @dev: net device structure
970 * Description: it initializes the driver's PHY state, and attaches the PHY
971 * to the mac driver.
972 * Return value:
973 * 0 on success
974 */
975static int stmmac_init_phy(struct net_device *dev)
976{
977 struct stmmac_priv *priv = netdev_priv(dev);
b6cfffa7 978 u32 tx_cnt = priv->plat->tx_queues_to_use;
47dd7a54 979 struct phy_device *phydev;
d765955d 980 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
109cdd66 981 char bus_id[MII_BUS_ID_SIZE];
79ee1dc3 982 int interface = priv->plat->interface;
9cbadf09 983 int max_speed = priv->plat->max_speed;
4d869b03 984 priv->oldlink = false;
bd00632c
LC
985 priv->speed = SPEED_UNKNOWN;
986 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54 987
5790cf3c
MO
988 if (priv->plat->phy_node) {
989 phydev = of_phy_connect(dev, priv->plat->phy_node,
990 &stmmac_adjust_link, 0, interface);
991 } else {
a7657f12
GC
992 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
993 priv->plat->bus_id);
5790cf3c
MO
994
995 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
996 priv->plat->phy_addr);
de9a2165 997 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
38ddc59d 998 phy_id_fmt);
5790cf3c
MO
999
1000 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
1001 interface);
1002 }
47dd7a54 1003
dfc50fca 1004 if (IS_ERR_OR_NULL(phydev)) {
38ddc59d 1005 netdev_err(priv->dev, "Could not attach to PHY\n");
dfc50fca
AB
1006 if (!phydev)
1007 return -ENODEV;
1008
47dd7a54
GC
1009 return PTR_ERR(phydev);
1010 }
1011
79ee1dc3 1012 /* Stop Advertising 1000BASE Capability if interface is not GMII */
c5b9b4e4 1013 if ((interface == PHY_INTERFACE_MODE_MII) ||
9cbadf09 1014 (interface == PHY_INTERFACE_MODE_RMII) ||
a77e4acc 1015 (max_speed < 1000 && max_speed > 0))
58056c1e 1016 phy_set_max_speed(phydev, SPEED_100);
79ee1dc3 1017
b6cfffa7
BV
1018 /*
1019 * Half-duplex mode not supported with multiqueue
1020 * half-duplex can only works with single queue
1021 */
41124fa6
AL
1022 if (tx_cnt > 1) {
1023 phy_remove_link_mode(phydev,
1024 ETHTOOL_LINK_MODE_10baseT_Half_BIT);
1025 phy_remove_link_mode(phydev,
1026 ETHTOOL_LINK_MODE_100baseT_Half_BIT);
1027 phy_remove_link_mode(phydev,
1028 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
1029 }
b6cfffa7 1030
47dd7a54
GC
1031 /*
1032 * Broken HW is sometimes missing the pull-up resistor on the
1033 * MDIO line, which results in reads to non-existent devices returning
1034 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
1035 * device as well.
1036 * Note: phydev->phy_id is the result of reading the UID PHY registers.
1037 */
27732381 1038 if (!priv->plat->phy_node && phydev->phy_id == 0) {
47dd7a54
GC
1039 phy_disconnect(phydev);
1040 return -ENODEV;
1041 }
8e99fc5f 1042
c51e424d
FF
1043 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
1044 * subsequent PHY polling, make sure we force a link transition if
1045 * we have a UP/DOWN/UP transition
1046 */
1047 if (phydev->is_pseudo_fixed_link)
1048 phydev->irq = PHY_POLL;
1049
b05c76a1 1050 phy_attached_info(phydev);
47dd7a54
GC
1051 return 0;
1052}
1053
71fedb01 1054static void stmmac_display_rx_rings(struct stmmac_priv *priv)
c24602ef 1055{
54139cf3 1056 u32 rx_cnt = priv->plat->rx_queues_to_use;
71fedb01 1057 void *head_rx;
54139cf3 1058 u32 queue;
aff3d9ef 1059
54139cf3
JP
1060 /* Display RX rings */
1061 for (queue = 0; queue < rx_cnt; queue++) {
1062 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
d0225e7d 1063
54139cf3
JP
1064 pr_info("\tRX Queue %u rings\n", queue);
1065
1066 if (priv->extend_desc)
1067 head_rx = (void *)rx_q->dma_erx;
1068 else
1069 head_rx = (void *)rx_q->dma_rx;
1070
1071 /* Display RX ring */
42de047d 1072 stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
54139cf3 1073 }
71fedb01
JP
1074}
1075
1076static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1077{
ce736788 1078 u32 tx_cnt = priv->plat->tx_queues_to_use;
71fedb01 1079 void *head_tx;
ce736788 1080 u32 queue;
71fedb01 1081
ce736788
JP
1082 /* Display TX rings */
1083 for (queue = 0; queue < tx_cnt; queue++) {
1084 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1085
ce736788
JP
1086 pr_info("\tTX Queue %d rings\n", queue);
1087
1088 if (priv->extend_desc)
1089 head_tx = (void *)tx_q->dma_etx;
1090 else
1091 head_tx = (void *)tx_q->dma_tx;
1092
42de047d 1093 stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
ce736788 1094 }
c24602ef
GC
1095}
1096
71fedb01
JP
1097static void stmmac_display_rings(struct stmmac_priv *priv)
1098{
1099 /* Display RX ring */
1100 stmmac_display_rx_rings(priv);
1101
1102 /* Display TX ring */
1103 stmmac_display_tx_rings(priv);
1104}
1105
286a8372
GC
1106static int stmmac_set_bfsize(int mtu, int bufsize)
1107{
1108 int ret = bufsize;
1109
1110 if (mtu >= BUF_SIZE_4KiB)
1111 ret = BUF_SIZE_8KiB;
1112 else if (mtu >= BUF_SIZE_2KiB)
1113 ret = BUF_SIZE_4KiB;
d916701c 1114 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
1115 ret = BUF_SIZE_2KiB;
1116 else
d916701c 1117 ret = DEFAULT_BUFSIZE;
286a8372
GC
1118
1119 return ret;
1120}
1121
32ceabca 1122/**
71fedb01 1123 * stmmac_clear_rx_descriptors - clear RX descriptors
32ceabca 1124 * @priv: driver private structure
54139cf3 1125 * @queue: RX queue index
71fedb01 1126 * Description: this function is called to clear the RX descriptors
32ceabca
GC
1127 * in case of both basic and extended descriptors are used.
1128 */
54139cf3 1129static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
c24602ef 1130{
54139cf3 1131 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5bacd778 1132 int i;
c24602ef 1133
71fedb01 1134 /* Clear the RX descriptors */
e3ad57c9 1135 for (i = 0; i < DMA_RX_SIZE; i++)
c24602ef 1136 if (priv->extend_desc)
42de047d
JA
1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 priv->use_riwt, priv->mode,
1139 (i == DMA_RX_SIZE - 1));
c24602ef 1140 else
42de047d
JA
1141 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142 priv->use_riwt, priv->mode,
1143 (i == DMA_RX_SIZE - 1));
71fedb01
JP
1144}
1145
1146/**
1147 * stmmac_clear_tx_descriptors - clear tx descriptors
1148 * @priv: driver private structure
ce736788 1149 * @queue: TX queue index.
71fedb01
JP
1150 * Description: this function is called to clear the TX descriptors
1151 * in case of both basic and extended descriptors are used.
1152 */
ce736788 1153static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
71fedb01 1154{
ce736788 1155 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01
JP
1156 int i;
1157
1158 /* Clear the TX descriptors */
e3ad57c9 1159 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef 1160 if (priv->extend_desc)
42de047d
JA
1161 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1162 priv->mode, (i == DMA_TX_SIZE - 1));
c24602ef 1163 else
42de047d
JA
1164 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1165 priv->mode, (i == DMA_TX_SIZE - 1));
c24602ef
GC
1166}
1167
71fedb01
JP
1168/**
1169 * stmmac_clear_descriptors - clear descriptors
1170 * @priv: driver private structure
1171 * Description: this function is called to clear the TX and RX descriptors
1172 * in case of both basic and extended descriptors are used.
1173 */
1174static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1175{
54139cf3 1176 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
ce736788 1177 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
1178 u32 queue;
1179
71fedb01 1180 /* Clear the RX descriptors */
54139cf3
JP
1181 for (queue = 0; queue < rx_queue_cnt; queue++)
1182 stmmac_clear_rx_descriptors(priv, queue);
71fedb01
JP
1183
1184 /* Clear the TX descriptors */
ce736788
JP
1185 for (queue = 0; queue < tx_queue_cnt; queue++)
1186 stmmac_clear_tx_descriptors(priv, queue);
71fedb01
JP
1187}
1188
732fdf0e
GC
1189/**
1190 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1191 * @priv: driver private structure
1192 * @p: descriptor pointer
1193 * @i: descriptor index
54139cf3
JP
1194 * @flags: gfp flag
1195 * @queue: RX queue index
732fdf0e
GC
1196 * Description: this function is called to allocate a receive buffer, perform
1197 * the DMA mapping and init the descriptor.
1198 */
c24602ef 1199static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
54139cf3 1200 int i, gfp_t flags, u32 queue)
c24602ef 1201{
54139cf3 1202 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
c24602ef
GC
1203 struct sk_buff *skb;
1204
4ec49a37 1205 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
56329137 1206 if (!skb) {
38ddc59d
LC
1207 netdev_err(priv->dev,
1208 "%s: Rx init fails; skb is NULL\n", __func__);
56329137 1209 return -ENOMEM;
c24602ef 1210 }
54139cf3
JP
1211 rx_q->rx_skbuff[i] = skb;
1212 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
c24602ef
GC
1213 priv->dma_buf_sz,
1214 DMA_FROM_DEVICE);
54139cf3 1215 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
38ddc59d 1216 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
56329137
BZ
1217 dev_kfree_skb_any(skb);
1218 return -EINVAL;
1219 }
c24602ef 1220
6844171d 1221 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
c24602ef 1222
2c520b1c
JA
1223 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1224 stmmac_init_desc3(priv, p);
c24602ef
GC
1225
1226 return 0;
1227}
1228
71fedb01
JP
1229/**
1230 * stmmac_free_rx_buffer - free RX dma buffers
1231 * @priv: private structure
54139cf3 1232 * @queue: RX queue index
71fedb01
JP
1233 * @i: buffer index.
1234 */
54139cf3 1235static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
56329137 1236{
54139cf3
JP
1237 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1238
1239 if (rx_q->rx_skbuff[i]) {
1240 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
56329137 1241 priv->dma_buf_sz, DMA_FROM_DEVICE);
54139cf3 1242 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
aff3d9ef 1243 }
54139cf3 1244 rx_q->rx_skbuff[i] = NULL;
aff3d9ef
JP
1245}
1246
1247/**
71fedb01
JP
1248 * stmmac_free_tx_buffer - free RX dma buffers
1249 * @priv: private structure
ce736788 1250 * @queue: RX queue index
71fedb01
JP
1251 * @i: buffer index.
1252 */
ce736788 1253static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
71fedb01 1254{
ce736788
JP
1255 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1256
1257 if (tx_q->tx_skbuff_dma[i].buf) {
1258 if (tx_q->tx_skbuff_dma[i].map_as_page)
71fedb01 1259 dma_unmap_page(priv->device,
ce736788
JP
1260 tx_q->tx_skbuff_dma[i].buf,
1261 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1262 DMA_TO_DEVICE);
1263 else
1264 dma_unmap_single(priv->device,
ce736788
JP
1265 tx_q->tx_skbuff_dma[i].buf,
1266 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1267 DMA_TO_DEVICE);
1268 }
1269
ce736788
JP
1270 if (tx_q->tx_skbuff[i]) {
1271 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1272 tx_q->tx_skbuff[i] = NULL;
1273 tx_q->tx_skbuff_dma[i].buf = 0;
1274 tx_q->tx_skbuff_dma[i].map_as_page = false;
71fedb01
JP
1275 }
1276}
1277
1278/**
1279 * init_dma_rx_desc_rings - init the RX descriptor rings
47dd7a54 1280 * @dev: net device structure
732fdf0e 1281 * @flags: gfp flag.
71fedb01 1282 * Description: this function initializes the DMA RX descriptors
5bacd778 1283 * and allocates the socket buffers. It supports the chained and ring
286a8372 1284 * modes.
47dd7a54 1285 */
71fedb01 1286static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
47dd7a54 1287{
47dd7a54 1288 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 1289 u32 rx_count = priv->plat->rx_queues_to_use;
56329137 1290 int ret = -ENOMEM;
2c520b1c 1291 int bfsize = 0;
1d3028f4 1292 int queue;
54139cf3 1293 int i;
47dd7a54 1294
2c520b1c
JA
1295 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
1296 if (bfsize < 0)
1297 bfsize = 0;
286a8372 1298
4a7d666a 1299 if (bfsize < BUF_SIZE_16KiB)
286a8372 1300 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
47dd7a54 1301
2618abb7
VB
1302 priv->dma_buf_sz = bfsize;
1303
54139cf3 1304 /* RX INITIALIZATION */
b3e51069
LC
1305 netif_dbg(priv, probe, priv->dev,
1306 "SKB addresses:\nskb\t\tskb data\tdma data\n");
47dd7a54 1307
54139cf3
JP
1308 for (queue = 0; queue < rx_count; queue++) {
1309 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
c24602ef 1310
54139cf3
JP
1311 netif_dbg(priv, probe, priv->dev,
1312 "(%s) dma_rx_phy=0x%08x\n", __func__,
1313 (u32)rx_q->dma_rx_phy);
f748be53 1314
54139cf3
JP
1315 for (i = 0; i < DMA_RX_SIZE; i++) {
1316 struct dma_desc *p;
aff3d9ef 1317
54139cf3
JP
1318 if (priv->extend_desc)
1319 p = &((rx_q->dma_erx + i)->basic);
1320 else
1321 p = rx_q->dma_rx + i;
1322
1323 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1324 queue);
1325 if (ret)
1326 goto err_init_rx_buffers;
1327
1328 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1329 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1330 (unsigned int)rx_q->rx_skbuff_dma[i]);
1331 }
1332
1333 rx_q->cur_rx = 0;
1334 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1335
1336 stmmac_clear_rx_descriptors(priv, queue);
1337
1338 /* Setup the chained descriptor addresses */
1339 if (priv->mode == STMMAC_CHAIN_MODE) {
1340 if (priv->extend_desc)
2c520b1c
JA
1341 stmmac_mode_init(priv, rx_q->dma_erx,
1342 rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
54139cf3 1343 else
2c520b1c
JA
1344 stmmac_mode_init(priv, rx_q->dma_rx,
1345 rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
54139cf3 1346 }
71fedb01
JP
1347 }
1348
54139cf3
JP
1349 buf_sz = bfsize;
1350
71fedb01 1351 return 0;
54139cf3 1352
71fedb01 1353err_init_rx_buffers:
54139cf3
JP
1354 while (queue >= 0) {
1355 while (--i >= 0)
1356 stmmac_free_rx_buffer(priv, queue, i);
1357
1358 if (queue == 0)
1359 break;
1360
1361 i = DMA_RX_SIZE;
1362 queue--;
1363 }
1364
71fedb01
JP
1365 return ret;
1366}
1367
1368/**
1369 * init_dma_tx_desc_rings - init the TX descriptor rings
1370 * @dev: net device structure.
1371 * Description: this function initializes the DMA TX descriptors
1372 * and allocates the socket buffers. It supports the chained and ring
1373 * modes.
1374 */
1375static int init_dma_tx_desc_rings(struct net_device *dev)
1376{
1377 struct stmmac_priv *priv = netdev_priv(dev);
ce736788
JP
1378 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1379 u32 queue;
71fedb01
JP
1380 int i;
1381
ce736788
JP
1382 for (queue = 0; queue < tx_queue_cnt; queue++) {
1383 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1384
ce736788
JP
1385 netif_dbg(priv, probe, priv->dev,
1386 "(%s) dma_tx_phy=0x%08x\n", __func__,
1387 (u32)tx_q->dma_tx_phy);
f748be53 1388
ce736788
JP
1389 /* Setup the chained descriptor addresses */
1390 if (priv->mode == STMMAC_CHAIN_MODE) {
1391 if (priv->extend_desc)
2c520b1c
JA
1392 stmmac_mode_init(priv, tx_q->dma_etx,
1393 tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
ce736788 1394 else
2c520b1c
JA
1395 stmmac_mode_init(priv, tx_q->dma_tx,
1396 tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
ce736788 1397 }
aff3d9ef 1398
ce736788
JP
1399 for (i = 0; i < DMA_TX_SIZE; i++) {
1400 struct dma_desc *p;
ce736788
JP
1401 if (priv->extend_desc)
1402 p = &((tx_q->dma_etx + i)->basic);
1403 else
1404 p = tx_q->dma_tx + i;
1405
44c67f85 1406 stmmac_clear_desc(priv, p);
ce736788
JP
1407
1408 tx_q->tx_skbuff_dma[i].buf = 0;
1409 tx_q->tx_skbuff_dma[i].map_as_page = false;
1410 tx_q->tx_skbuff_dma[i].len = 0;
1411 tx_q->tx_skbuff_dma[i].last_segment = false;
1412 tx_q->tx_skbuff[i] = NULL;
5bacd778 1413 }
aff3d9ef 1414
ce736788
JP
1415 tx_q->dirty_tx = 0;
1416 tx_q->cur_tx = 0;
8d212a9e 1417 tx_q->mss = 0;
286a8372 1418
c22a3f48
JP
1419 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1420 }
aff3d9ef 1421
71fedb01
JP
1422 return 0;
1423}
1424
1425/**
1426 * init_dma_desc_rings - init the RX/TX descriptor rings
1427 * @dev: net device structure
1428 * @flags: gfp flag.
1429 * Description: this function initializes the DMA RX/TX descriptors
1430 * and allocates the socket buffers. It supports the chained and ring
1431 * modes.
1432 */
1433static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1434{
1435 struct stmmac_priv *priv = netdev_priv(dev);
1436 int ret;
1437
1438 ret = init_dma_rx_desc_rings(dev, flags);
1439 if (ret)
1440 return ret;
1441
1442 ret = init_dma_tx_desc_rings(dev);
1443
5bacd778 1444 stmmac_clear_descriptors(priv);
47dd7a54 1445
c24602ef
GC
1446 if (netif_msg_hw(priv))
1447 stmmac_display_rings(priv);
56329137 1448
56329137 1449 return ret;
47dd7a54
GC
1450}
1451
71fedb01
JP
1452/**
1453 * dma_free_rx_skbufs - free RX dma buffers
1454 * @priv: private structure
54139cf3 1455 * @queue: RX queue index
71fedb01 1456 */
54139cf3 1457static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54
GC
1458{
1459 int i;
1460
e3ad57c9 1461 for (i = 0; i < DMA_RX_SIZE; i++)
54139cf3 1462 stmmac_free_rx_buffer(priv, queue, i);
47dd7a54
GC
1463}
1464
71fedb01
JP
1465/**
1466 * dma_free_tx_skbufs - free TX dma buffers
1467 * @priv: private structure
ce736788 1468 * @queue: TX queue index
71fedb01 1469 */
ce736788 1470static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54
GC
1471{
1472 int i;
1473
71fedb01 1474 for (i = 0; i < DMA_TX_SIZE; i++)
ce736788 1475 stmmac_free_tx_buffer(priv, queue, i);
47dd7a54
GC
1476}
1477
54139cf3
JP
1478/**
1479 * free_dma_rx_desc_resources - free RX dma desc resources
1480 * @priv: private structure
1481 */
1482static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1483{
1484 u32 rx_count = priv->plat->rx_queues_to_use;
1485 u32 queue;
1486
1487 /* Free RX queue resources */
1488 for (queue = 0; queue < rx_count; queue++) {
1489 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1490
1491 /* Release the DMA RX socket buffers */
1492 dma_free_rx_skbufs(priv, queue);
1493
1494 /* Free DMA regions of consistent memory previously allocated */
1495 if (!priv->extend_desc)
1496 dma_free_coherent(priv->device,
1497 DMA_RX_SIZE * sizeof(struct dma_desc),
1498 rx_q->dma_rx, rx_q->dma_rx_phy);
1499 else
1500 dma_free_coherent(priv->device, DMA_RX_SIZE *
1501 sizeof(struct dma_extended_desc),
1502 rx_q->dma_erx, rx_q->dma_rx_phy);
1503
1504 kfree(rx_q->rx_skbuff_dma);
1505 kfree(rx_q->rx_skbuff);
1506 }
1507}
1508
ce736788
JP
1509/**
1510 * free_dma_tx_desc_resources - free TX dma desc resources
1511 * @priv: private structure
1512 */
1513static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1514{
1515 u32 tx_count = priv->plat->tx_queues_to_use;
62242260 1516 u32 queue;
ce736788
JP
1517
1518 /* Free TX queue resources */
1519 for (queue = 0; queue < tx_count; queue++) {
1520 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1521
1522 /* Release the DMA TX socket buffers */
1523 dma_free_tx_skbufs(priv, queue);
1524
1525 /* Free DMA regions of consistent memory previously allocated */
1526 if (!priv->extend_desc)
1527 dma_free_coherent(priv->device,
1528 DMA_TX_SIZE * sizeof(struct dma_desc),
1529 tx_q->dma_tx, tx_q->dma_tx_phy);
1530 else
1531 dma_free_coherent(priv->device, DMA_TX_SIZE *
1532 sizeof(struct dma_extended_desc),
1533 tx_q->dma_etx, tx_q->dma_tx_phy);
1534
1535 kfree(tx_q->tx_skbuff_dma);
1536 kfree(tx_q->tx_skbuff);
1537 }
1538}
1539
732fdf0e 1540/**
71fedb01 1541 * alloc_dma_rx_desc_resources - alloc RX resources.
732fdf0e
GC
1542 * @priv: private structure
1543 * Description: according to which descriptor can be used (extend or basic)
5bacd778
LC
1544 * this function allocates the resources for TX and RX paths. In case of
1545 * reception, for example, it pre-allocated the RX socket buffer in order to
1546 * allow zero-copy mechanism.
732fdf0e 1547 */
71fedb01 1548static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
09f8d696 1549{
54139cf3 1550 u32 rx_count = priv->plat->rx_queues_to_use;
09f8d696 1551 int ret = -ENOMEM;
54139cf3 1552 u32 queue;
09f8d696 1553
54139cf3
JP
1554 /* RX queues buffers and DMA */
1555 for (queue = 0; queue < rx_count; queue++) {
1556 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
09f8d696 1557
54139cf3
JP
1558 rx_q->queue_index = queue;
1559 rx_q->priv_data = priv;
5bacd778 1560
54139cf3
JP
1561 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1562 sizeof(dma_addr_t),
71fedb01 1563 GFP_KERNEL);
54139cf3 1564 if (!rx_q->rx_skbuff_dma)
63c3aa6b 1565 goto err_dma;
71fedb01 1566
54139cf3
JP
1567 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1568 sizeof(struct sk_buff *),
1569 GFP_KERNEL);
1570 if (!rx_q->rx_skbuff)
71fedb01 1571 goto err_dma;
54139cf3
JP
1572
1573 if (priv->extend_desc) {
750afb08
LC
1574 rx_q->dma_erx = dma_alloc_coherent(priv->device,
1575 DMA_RX_SIZE * sizeof(struct dma_extended_desc),
1576 &rx_q->dma_rx_phy,
1577 GFP_KERNEL);
54139cf3
JP
1578 if (!rx_q->dma_erx)
1579 goto err_dma;
1580
1581 } else {
750afb08
LC
1582 rx_q->dma_rx = dma_alloc_coherent(priv->device,
1583 DMA_RX_SIZE * sizeof(struct dma_desc),
1584 &rx_q->dma_rx_phy,
1585 GFP_KERNEL);
54139cf3
JP
1586 if (!rx_q->dma_rx)
1587 goto err_dma;
1588 }
71fedb01
JP
1589 }
1590
1591 return 0;
1592
1593err_dma:
54139cf3
JP
1594 free_dma_rx_desc_resources(priv);
1595
71fedb01
JP
1596 return ret;
1597}
1598
1599/**
1600 * alloc_dma_tx_desc_resources - alloc TX resources.
1601 * @priv: private structure
1602 * Description: according to which descriptor can be used (extend or basic)
1603 * this function allocates the resources for TX and RX paths. In case of
1604 * reception, for example, it pre-allocated the RX socket buffer in order to
1605 * allow zero-copy mechanism.
1606 */
1607static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1608{
ce736788 1609 u32 tx_count = priv->plat->tx_queues_to_use;
71fedb01 1610 int ret = -ENOMEM;
ce736788 1611 u32 queue;
71fedb01 1612
ce736788
JP
1613 /* TX queues buffers and DMA */
1614 for (queue = 0; queue < tx_count; queue++) {
1615 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5bacd778 1616
ce736788
JP
1617 tx_q->queue_index = queue;
1618 tx_q->priv_data = priv;
5bacd778 1619
ce736788
JP
1620 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1621 sizeof(*tx_q->tx_skbuff_dma),
5bacd778 1622 GFP_KERNEL);
ce736788 1623 if (!tx_q->tx_skbuff_dma)
62242260 1624 goto err_dma;
ce736788
JP
1625
1626 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1627 sizeof(struct sk_buff *),
1628 GFP_KERNEL);
1629 if (!tx_q->tx_skbuff)
62242260 1630 goto err_dma;
ce736788
JP
1631
1632 if (priv->extend_desc) {
750afb08
LC
1633 tx_q->dma_etx = dma_alloc_coherent(priv->device,
1634 DMA_TX_SIZE * sizeof(struct dma_extended_desc),
1635 &tx_q->dma_tx_phy,
1636 GFP_KERNEL);
ce736788 1637 if (!tx_q->dma_etx)
62242260 1638 goto err_dma;
ce736788 1639 } else {
750afb08
LC
1640 tx_q->dma_tx = dma_alloc_coherent(priv->device,
1641 DMA_TX_SIZE * sizeof(struct dma_desc),
1642 &tx_q->dma_tx_phy,
1643 GFP_KERNEL);
ce736788 1644 if (!tx_q->dma_tx)
62242260 1645 goto err_dma;
ce736788 1646 }
09f8d696
SK
1647 }
1648
1649 return 0;
1650
62242260 1651err_dma:
ce736788
JP
1652 free_dma_tx_desc_resources(priv);
1653
09f8d696
SK
1654 return ret;
1655}
1656
71fedb01
JP
1657/**
1658 * alloc_dma_desc_resources - alloc TX/RX resources.
1659 * @priv: private structure
1660 * Description: according to which descriptor can be used (extend or basic)
1661 * this function allocates the resources for TX and RX paths. In case of
1662 * reception, for example, it pre-allocated the RX socket buffer in order to
1663 * allow zero-copy mechanism.
1664 */
1665static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1666{
54139cf3 1667 /* RX Allocation */
71fedb01
JP
1668 int ret = alloc_dma_rx_desc_resources(priv);
1669
1670 if (ret)
1671 return ret;
1672
1673 ret = alloc_dma_tx_desc_resources(priv);
1674
1675 return ret;
1676}
1677
71fedb01
JP
1678/**
1679 * free_dma_desc_resources - free dma desc resources
1680 * @priv: private structure
1681 */
1682static void free_dma_desc_resources(struct stmmac_priv *priv)
1683{
1684 /* Release the DMA RX socket buffers */
1685 free_dma_rx_desc_resources(priv);
1686
1687 /* Release the DMA TX socket buffers */
1688 free_dma_tx_desc_resources(priv);
1689}
1690
9eb12474 1691/**
1692 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1693 * @priv: driver private structure
1694 * Description: It is used for enabling the rx queues in the MAC
1695 */
1696static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1697{
4f6046f5
JP
1698 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1699 int queue;
1700 u8 mode;
9eb12474 1701
4f6046f5
JP
1702 for (queue = 0; queue < rx_queues_count; queue++) {
1703 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
c10d4c82 1704 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
4f6046f5 1705 }
9eb12474 1706}
1707
ae4f0d46
JP
1708/**
1709 * stmmac_start_rx_dma - start RX DMA channel
1710 * @priv: driver private structure
1711 * @chan: RX channel index
1712 * Description:
1713 * This starts a RX DMA channel
1714 */
1715static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1716{
1717 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
a4e887fa 1718 stmmac_start_rx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1719}
1720
1721/**
1722 * stmmac_start_tx_dma - start TX DMA channel
1723 * @priv: driver private structure
1724 * @chan: TX channel index
1725 * Description:
1726 * This starts a TX DMA channel
1727 */
1728static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1729{
1730 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
a4e887fa 1731 stmmac_start_tx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1732}
1733
1734/**
1735 * stmmac_stop_rx_dma - stop RX DMA channel
1736 * @priv: driver private structure
1737 * @chan: RX channel index
1738 * Description:
1739 * This stops a RX DMA channel
1740 */
1741static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1742{
1743 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
a4e887fa 1744 stmmac_stop_rx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1745}
1746
1747/**
1748 * stmmac_stop_tx_dma - stop TX DMA channel
1749 * @priv: driver private structure
1750 * @chan: TX channel index
1751 * Description:
1752 * This stops a TX DMA channel
1753 */
1754static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1755{
1756 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
a4e887fa 1757 stmmac_stop_tx(priv, priv->ioaddr, chan);
ae4f0d46
JP
1758}
1759
1760/**
1761 * stmmac_start_all_dma - start all RX and TX DMA channels
1762 * @priv: driver private structure
1763 * Description:
1764 * This starts all the RX and TX DMA channels
1765 */
1766static void stmmac_start_all_dma(struct stmmac_priv *priv)
1767{
1768 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1769 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1770 u32 chan = 0;
1771
1772 for (chan = 0; chan < rx_channels_count; chan++)
1773 stmmac_start_rx_dma(priv, chan);
1774
1775 for (chan = 0; chan < tx_channels_count; chan++)
1776 stmmac_start_tx_dma(priv, chan);
1777}
1778
1779/**
1780 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1781 * @priv: driver private structure
1782 * Description:
1783 * This stops the RX and TX DMA channels
1784 */
1785static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1786{
1787 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1788 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1789 u32 chan = 0;
1790
1791 for (chan = 0; chan < rx_channels_count; chan++)
1792 stmmac_stop_rx_dma(priv, chan);
1793
1794 for (chan = 0; chan < tx_channels_count; chan++)
1795 stmmac_stop_tx_dma(priv, chan);
1796}
1797
47dd7a54
GC
1798/**
1799 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 1800 * @priv: driver private structure
732fdf0e
GC
1801 * Description: it is used for configuring the DMA operation mode register in
1802 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
1803 */
1804static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1805{
6deee222
JP
1806 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1807 u32 tx_channels_count = priv->plat->tx_queues_to_use;
f88203a2 1808 int rxfifosz = priv->plat->rx_fifo_size;
52a76235 1809 int txfifosz = priv->plat->tx_fifo_size;
6deee222
JP
1810 u32 txmode = 0;
1811 u32 rxmode = 0;
1812 u32 chan = 0;
a0daae13 1813 u8 qmode = 0;
f88203a2 1814
11fbf811
TR
1815 if (rxfifosz == 0)
1816 rxfifosz = priv->dma_cap.rx_fifo_size;
52a76235
JA
1817 if (txfifosz == 0)
1818 txfifosz = priv->dma_cap.tx_fifo_size;
1819
1820 /* Adjust for real per queue fifo size */
1821 rxfifosz /= rx_channels_count;
1822 txfifosz /= tx_channels_count;
11fbf811 1823
6deee222
JP
1824 if (priv->plat->force_thresh_dma_mode) {
1825 txmode = tc;
1826 rxmode = tc;
1827 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
1828 /*
1829 * In case of GMAC, SF mode can be enabled
1830 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1831 * 1) TX COE if actually supported
1832 * 2) There is no bugged Jumbo frame support
1833 * that needs to not insert csum in the TDES.
1834 */
6deee222
JP
1835 txmode = SF_DMA_MODE;
1836 rxmode = SF_DMA_MODE;
b2dec116 1837 priv->xstats.threshold = SF_DMA_MODE;
6deee222
JP
1838 } else {
1839 txmode = tc;
1840 rxmode = SF_DMA_MODE;
1841 }
1842
1843 /* configure all channels */
ab0204e3
JA
1844 for (chan = 0; chan < rx_channels_count; chan++) {
1845 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
a0daae13 1846
ab0204e3
JA
1847 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1848 rxfifosz, qmode);
4205c88e
JA
1849 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1850 chan);
ab0204e3 1851 }
a0daae13 1852
ab0204e3
JA
1853 for (chan = 0; chan < tx_channels_count; chan++) {
1854 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
6deee222 1855
ab0204e3
JA
1856 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
1857 txfifosz, qmode);
6deee222 1858 }
47dd7a54
GC
1859}
1860
47dd7a54 1861/**
732fdf0e 1862 * stmmac_tx_clean - to manage the transmission completion
32ceabca 1863 * @priv: driver private structure
ce736788 1864 * @queue: TX queue index
732fdf0e 1865 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 1866 */
8fce3331 1867static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
47dd7a54 1868{
ce736788 1869 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
38979574 1870 unsigned int bytes_compl = 0, pkts_compl = 0;
8fce3331 1871 unsigned int entry, count = 0;
47dd7a54 1872
8fce3331 1873 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
a9097a96 1874
9125cdd1
GC
1875 priv->xstats.tx_clean++;
1876
8d5f4b07 1877 entry = tx_q->dirty_tx;
8fce3331 1878 while ((entry != tx_q->cur_tx) && (count < budget)) {
ce736788 1879 struct sk_buff *skb = tx_q->tx_skbuff[entry];
c24602ef 1880 struct dma_desc *p;
c363b658 1881 int status;
c24602ef
GC
1882
1883 if (priv->extend_desc)
ce736788 1884 p = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 1885 else
ce736788 1886 p = tx_q->dma_tx + entry;
47dd7a54 1887
42de047d
JA
1888 status = stmmac_tx_status(priv, &priv->dev->stats,
1889 &priv->xstats, p, priv->ioaddr);
c363b658
FG
1890 /* Check if the descriptor is owned by the DMA */
1891 if (unlikely(status & tx_dma_own))
1892 break;
1893
8fce3331
JA
1894 count++;
1895
a6b25da5
NC
1896 /* Make sure descriptor fields are read after reading
1897 * the own bit.
1898 */
1899 dma_rmb();
1900
c363b658
FG
1901 /* Just consider the last segment and ...*/
1902 if (likely(!(status & tx_not_ls))) {
1903 /* ... verify the status error condition */
1904 if (unlikely(status & tx_err)) {
1905 priv->dev->stats.tx_errors++;
1906 } else {
47dd7a54
GC
1907 priv->dev->stats.tx_packets++;
1908 priv->xstats.tx_pkt_n++;
c363b658 1909 }
ba1ffd74 1910 stmmac_get_tx_hwtstamp(priv, p, skb);
47dd7a54 1911 }
47dd7a54 1912
ce736788
JP
1913 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1914 if (tx_q->tx_skbuff_dma[entry].map_as_page)
362b37be 1915 dma_unmap_page(priv->device,
ce736788
JP
1916 tx_q->tx_skbuff_dma[entry].buf,
1917 tx_q->tx_skbuff_dma[entry].len,
362b37be
GC
1918 DMA_TO_DEVICE);
1919 else
1920 dma_unmap_single(priv->device,
ce736788
JP
1921 tx_q->tx_skbuff_dma[entry].buf,
1922 tx_q->tx_skbuff_dma[entry].len,
362b37be 1923 DMA_TO_DEVICE);
ce736788
JP
1924 tx_q->tx_skbuff_dma[entry].buf = 0;
1925 tx_q->tx_skbuff_dma[entry].len = 0;
1926 tx_q->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 1927 }
f748be53 1928
2c520b1c 1929 stmmac_clean_desc3(priv, tx_q, p);
f748be53 1930
ce736788
JP
1931 tx_q->tx_skbuff_dma[entry].last_segment = false;
1932 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54
GC
1933
1934 if (likely(skb != NULL)) {
38979574
BG
1935 pkts_compl++;
1936 bytes_compl += skb->len;
7c565c33 1937 dev_consume_skb_any(skb);
ce736788 1938 tx_q->tx_skbuff[entry] = NULL;
47dd7a54
GC
1939 }
1940
42de047d 1941 stmmac_release_tx_desc(priv, p, priv->mode);
47dd7a54 1942
e3ad57c9 1943 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
47dd7a54 1944 }
ce736788 1945 tx_q->dirty_tx = entry;
38979574 1946
c22a3f48
JP
1947 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1948 pkts_compl, bytes_compl);
1949
1950 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1951 queue))) &&
1952 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
38979574 1953
739c8e14
LS
1954 netif_dbg(priv, tx_done, priv->dev,
1955 "%s: restart transmit\n", __func__);
c22a3f48 1956 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54 1957 }
d765955d
GC
1958
1959 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1960 stmmac_enable_eee_mode(priv);
f5351ef7 1961 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d 1962 }
8fce3331 1963
4ccb4585
JA
1964 /* We still have pending packets, let's call for a new scheduling */
1965 if (tx_q->dirty_tx != tx_q->cur_tx)
1966 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
1967
8fce3331
JA
1968 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1969
1970 return count;
47dd7a54
GC
1971}
1972
47dd7a54 1973/**
732fdf0e 1974 * stmmac_tx_err - to manage the tx error
32ceabca 1975 * @priv: driver private structure
5bacd778 1976 * @chan: channel index
47dd7a54 1977 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 1978 * in case of transmission errors.
47dd7a54 1979 */
5bacd778 1980static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
47dd7a54 1981{
ce736788 1982 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
c24602ef 1983 int i;
ce736788 1984
c22a3f48 1985 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54 1986
ae4f0d46 1987 stmmac_stop_tx_dma(priv, chan);
ce736788 1988 dma_free_tx_skbufs(priv, chan);
e3ad57c9 1989 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef 1990 if (priv->extend_desc)
42de047d
JA
1991 stmmac_init_tx_desc(priv, &tx_q->dma_etx[i].basic,
1992 priv->mode, (i == DMA_TX_SIZE - 1));
c24602ef 1993 else
42de047d
JA
1994 stmmac_init_tx_desc(priv, &tx_q->dma_tx[i],
1995 priv->mode, (i == DMA_TX_SIZE - 1));
ce736788
JP
1996 tx_q->dirty_tx = 0;
1997 tx_q->cur_tx = 0;
8d212a9e 1998 tx_q->mss = 0;
c22a3f48 1999 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
ae4f0d46 2000 stmmac_start_tx_dma(priv, chan);
47dd7a54
GC
2001
2002 priv->dev->stats.tx_errors++;
c22a3f48 2003 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54
GC
2004}
2005
6deee222
JP
2006/**
2007 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2008 * @priv: driver private structure
2009 * @txmode: TX operating mode
2010 * @rxmode: RX operating mode
2011 * @chan: channel index
2012 * Description: it is used for configuring of the DMA operation mode in
2013 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2014 * mode.
2015 */
2016static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2017 u32 rxmode, u32 chan)
2018{
a0daae13
JA
2019 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2020 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
52a76235
JA
2021 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2022 u32 tx_channels_count = priv->plat->tx_queues_to_use;
6deee222 2023 int rxfifosz = priv->plat->rx_fifo_size;
52a76235 2024 int txfifosz = priv->plat->tx_fifo_size;
6deee222
JP
2025
2026 if (rxfifosz == 0)
2027 rxfifosz = priv->dma_cap.rx_fifo_size;
52a76235
JA
2028 if (txfifosz == 0)
2029 txfifosz = priv->dma_cap.tx_fifo_size;
2030
2031 /* Adjust for real per queue fifo size */
2032 rxfifosz /= rx_channels_count;
2033 txfifosz /= tx_channels_count;
6deee222 2034
ab0204e3
JA
2035 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2036 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
6deee222
JP
2037}
2038
8bf993a5
JA
2039static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2040{
63a550fc 2041 int ret;
8bf993a5 2042
c10d4c82
JA
2043 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2044 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2045 if (ret && (ret != -EINVAL)) {
8bf993a5 2046 stmmac_global_err(priv);
c10d4c82
JA
2047 return true;
2048 }
2049
2050 return false;
8bf993a5
JA
2051}
2052
8fce3331
JA
2053static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2054{
2055 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2056 &priv->xstats, chan);
2057 struct stmmac_channel *ch = &priv->channel[chan];
8fce3331 2058
4ccb4585
JA
2059 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2060 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2061 napi_schedule_irqoff(&ch->rx_napi);
8fce3331
JA
2062 }
2063
4ccb4585 2064 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
8fce3331 2065 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
4ccb4585 2066 napi_schedule_irqoff(&ch->tx_napi);
8fce3331
JA
2067 }
2068
2069 return status;
2070}
2071
32ceabca 2072/**
732fdf0e 2073 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
2074 * @priv: driver private structure
2075 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
2076 * It calls the dwmac dma routine and schedule poll method in case of some
2077 * work can be done.
32ceabca 2078 */
aec7ff27
GC
2079static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2080{
d62a107a 2081 u32 tx_channel_count = priv->plat->tx_queues_to_use;
5a6a0445
NC
2082 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2083 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2084 tx_channel_count : rx_channel_count;
d62a107a 2085 u32 chan;
8ac60ffb
KC
2086 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2087
2088 /* Make sure we never check beyond our status buffer. */
2089 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2090 channels_to_check = ARRAY_SIZE(status);
5a6a0445 2091
5a6a0445 2092 for (chan = 0; chan < channels_to_check; chan++)
8fce3331 2093 status[chan] = stmmac_napi_check(priv, chan);
6deee222 2094
5a6a0445
NC
2095 for (chan = 0; chan < tx_channel_count; chan++) {
2096 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
d62a107a
JP
2097 /* Try to bump up the dma threshold on this failure */
2098 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2099 (tc <= 256)) {
2100 tc += 64;
2101 if (priv->plat->force_thresh_dma_mode)
2102 stmmac_set_dma_operation_mode(priv,
2103 tc,
2104 tc,
2105 chan);
2106 else
2107 stmmac_set_dma_operation_mode(priv,
2108 tc,
2109 SF_DMA_MODE,
2110 chan);
2111 priv->xstats.threshold = tc;
2112 }
5a6a0445 2113 } else if (unlikely(status[chan] == tx_hard_error)) {
d62a107a 2114 stmmac_tx_err(priv, chan);
47dd7a54 2115 }
d62a107a 2116 }
47dd7a54
GC
2117}
2118
32ceabca
GC
2119/**
2120 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2121 * @priv: driver private structure
2122 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2123 */
1c901a46
GC
2124static void stmmac_mmc_setup(struct stmmac_priv *priv)
2125{
2126 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
36ff7c1e 2127 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 2128
36ff7c1e 2129 dwmac_mmc_intr_all_mask(priv->mmcaddr);
4f795b25
GC
2130
2131 if (priv->dma_cap.rmon) {
36ff7c1e 2132 dwmac_mmc_ctrl(priv->mmcaddr, mode);
4f795b25
GC
2133 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2134 } else
38ddc59d 2135 netdev_info(priv->dev, "No MAC Management Counters available\n");
1c901a46
GC
2136}
2137
19e30c14 2138/**
732fdf0e 2139 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 2140 * @priv: driver private structure
19e30c14
GC
2141 * Description:
2142 * new GMAC chip generations have a new register to indicate the
2143 * presence of the optional feature/functions.
2144 * This can be also used to override the value passed through the
2145 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
2146 */
2147static int stmmac_get_hw_features(struct stmmac_priv *priv)
2148{
a4e887fa 2149 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
e7434821
GC
2150}
2151
32ceabca 2152/**
732fdf0e 2153 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
2154 * @priv: driver private structure
2155 * Description:
2156 * it is to verify if the MAC address is valid, in case of failures it
2157 * generates a random MAC address
2158 */
bfab27a1
GC
2159static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2160{
bfab27a1 2161 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
c10d4c82 2162 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
ceb69499 2163 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 2164 eth_hw_addr_random(priv->dev);
38ddc59d
LC
2165 netdev_info(priv->dev, "device MAC address %pM\n",
2166 priv->dev->dev_addr);
bfab27a1 2167 }
bfab27a1
GC
2168}
2169
32ceabca 2170/**
732fdf0e 2171 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
2172 * @priv: driver private structure
2173 * Description:
2174 * It inits the DMA invoking the specific MAC/GMAC callback.
2175 * Some DMA parameters can be passed from the platform;
2176 * in case of these are not passed a default is kept for the MAC or GMAC.
2177 */
0f1f88a8
GC
2178static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2179{
47f2a9ce
JP
2180 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2181 u32 tx_channels_count = priv->plat->tx_queues_to_use;
24aaed0c 2182 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
54139cf3 2183 struct stmmac_rx_queue *rx_q;
ce736788 2184 struct stmmac_tx_queue *tx_q;
47f2a9ce 2185 u32 chan = 0;
c24602ef 2186 int atds = 0;
495db273 2187 int ret = 0;
0f1f88a8 2188
a332e2fa
NC
2189 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2190 dev_err(priv->device, "Invalid DMA configuration\n");
89ab75bf 2191 return -EINVAL;
0f1f88a8
GC
2192 }
2193
c24602ef
GC
2194 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2195 atds = 1;
2196
a4e887fa 2197 ret = stmmac_reset(priv, priv->ioaddr);
495db273
GC
2198 if (ret) {
2199 dev_err(priv->device, "Failed to reset the dma\n");
2200 return ret;
2201 }
2202
7d9e6c5a
JA
2203 /* DMA Configuration */
2204 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2205
2206 if (priv->plat->axi)
2207 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2208
24aaed0c
JA
2209 /* DMA RX Channel Configuration */
2210 for (chan = 0; chan < rx_channels_count; chan++) {
2211 rx_q = &priv->rx_queue[chan];
47f2a9ce 2212
24aaed0c
JA
2213 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2214 rx_q->dma_rx_phy, chan);
54139cf3 2215
24aaed0c
JA
2216 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2217 (DMA_RX_SIZE * sizeof(struct dma_desc));
2218 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2219 rx_q->rx_tail_addr, chan);
2220 }
47f2a9ce 2221
24aaed0c
JA
2222 /* DMA TX Channel Configuration */
2223 for (chan = 0; chan < tx_channels_count; chan++) {
2224 tx_q = &priv->tx_queue[chan];
47f2a9ce 2225
24aaed0c
JA
2226 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2227 tx_q->dma_tx_phy, chan);
ce736788 2228
0431100b 2229 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
24aaed0c
JA
2230 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2231 tx_q->tx_tail_addr, chan);
2232 }
47f2a9ce 2233
24aaed0c
JA
2234 /* DMA CSR Channel configuration */
2235 for (chan = 0; chan < dma_csr_ch; chan++)
2236 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
47f2a9ce 2237
495db273 2238 return ret;
0f1f88a8
GC
2239}
2240
8fce3331
JA
2241static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2242{
2243 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2244
2245 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2246}
2247
9125cdd1 2248/**
732fdf0e 2249 * stmmac_tx_timer - mitigation sw timer for tx.
9125cdd1
GC
2250 * @data: data pointer
2251 * Description:
2252 * This is the timer handler to directly invoke the stmmac_tx_clean.
2253 */
e99e88a9 2254static void stmmac_tx_timer(struct timer_list *t)
9125cdd1 2255{
8fce3331
JA
2256 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2257 struct stmmac_priv *priv = tx_q->priv_data;
2258 struct stmmac_channel *ch;
2259
2260 ch = &priv->channel[tx_q->queue_index];
9125cdd1 2261
4ccb4585
JA
2262 /*
2263 * If NAPI is already running we can miss some events. Let's rearm
2264 * the timer and try again.
2265 */
2266 if (likely(napi_schedule_prep(&ch->tx_napi)))
2267 __napi_schedule(&ch->tx_napi);
2268 else
2269 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(10));
9125cdd1
GC
2270}
2271
2272/**
732fdf0e 2273 * stmmac_init_tx_coalesce - init tx mitigation options.
32ceabca 2274 * @priv: driver private structure
9125cdd1
GC
2275 * Description:
2276 * This inits the transmit coalesce parameters: i.e. timer rate,
2277 * timer handler and default threshold used for enabling the
2278 * interrupt on completion bit.
2279 */
2280static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2281{
8fce3331
JA
2282 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2283 u32 chan;
2284
9125cdd1
GC
2285 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2286 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
8fce3331
JA
2287
2288 for (chan = 0; chan < tx_channel_count; chan++) {
2289 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2290
2291 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2292 }
9125cdd1
GC
2293}
2294
4854ab99
JP
2295static void stmmac_set_rings_length(struct stmmac_priv *priv)
2296{
2297 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2298 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2299 u32 chan;
2300
2301 /* set TX ring length */
a4e887fa
JA
2302 for (chan = 0; chan < tx_channels_count; chan++)
2303 stmmac_set_tx_ring_len(priv, priv->ioaddr,
2304 (DMA_TX_SIZE - 1), chan);
4854ab99
JP
2305
2306 /* set RX ring length */
a4e887fa
JA
2307 for (chan = 0; chan < rx_channels_count; chan++)
2308 stmmac_set_rx_ring_len(priv, priv->ioaddr,
2309 (DMA_RX_SIZE - 1), chan);
4854ab99
JP
2310}
2311
6a3a7193
JP
2312/**
2313 * stmmac_set_tx_queue_weight - Set TX queue weight
2314 * @priv: driver private structure
2315 * Description: It is used for setting TX queues weight
2316 */
2317static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2318{
2319 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2320 u32 weight;
2321 u32 queue;
2322
2323 for (queue = 0; queue < tx_queues_count; queue++) {
2324 weight = priv->plat->tx_queues_cfg[queue].weight;
c10d4c82 2325 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
6a3a7193
JP
2326 }
2327}
2328
19d91873
JP
2329/**
2330 * stmmac_configure_cbs - Configure CBS in TX queue
2331 * @priv: driver private structure
2332 * Description: It is used for configuring CBS in AVB TX queues
2333 */
2334static void stmmac_configure_cbs(struct stmmac_priv *priv)
2335{
2336 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2337 u32 mode_to_use;
2338 u32 queue;
2339
44781fef
JP
2340 /* queue 0 is reserved for legacy traffic */
2341 for (queue = 1; queue < tx_queues_count; queue++) {
19d91873
JP
2342 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2343 if (mode_to_use == MTL_QUEUE_DCB)
2344 continue;
2345
c10d4c82 2346 stmmac_config_cbs(priv, priv->hw,
19d91873
JP
2347 priv->plat->tx_queues_cfg[queue].send_slope,
2348 priv->plat->tx_queues_cfg[queue].idle_slope,
2349 priv->plat->tx_queues_cfg[queue].high_credit,
2350 priv->plat->tx_queues_cfg[queue].low_credit,
2351 queue);
2352 }
2353}
2354
d43042f4
JP
2355/**
2356 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2357 * @priv: driver private structure
2358 * Description: It is used for mapping RX queues to RX dma channels
2359 */
2360static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2361{
2362 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2363 u32 queue;
2364 u32 chan;
2365
2366 for (queue = 0; queue < rx_queues_count; queue++) {
2367 chan = priv->plat->rx_queues_cfg[queue].chan;
c10d4c82 2368 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
d43042f4
JP
2369 }
2370}
2371
a8f5102a
JP
2372/**
2373 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2374 * @priv: driver private structure
2375 * Description: It is used for configuring the RX Queue Priority
2376 */
2377static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2378{
2379 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2380 u32 queue;
2381 u32 prio;
2382
2383 for (queue = 0; queue < rx_queues_count; queue++) {
2384 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2385 continue;
2386
2387 prio = priv->plat->rx_queues_cfg[queue].prio;
c10d4c82 2388 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
a8f5102a
JP
2389 }
2390}
2391
2392/**
2393 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2394 * @priv: driver private structure
2395 * Description: It is used for configuring the TX Queue Priority
2396 */
2397static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2398{
2399 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2400 u32 queue;
2401 u32 prio;
2402
2403 for (queue = 0; queue < tx_queues_count; queue++) {
2404 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2405 continue;
2406
2407 prio = priv->plat->tx_queues_cfg[queue].prio;
c10d4c82 2408 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
a8f5102a
JP
2409 }
2410}
2411
abe80fdc
JP
2412/**
2413 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2414 * @priv: driver private structure
2415 * Description: It is used for configuring the RX queue routing
2416 */
2417static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2418{
2419 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2420 u32 queue;
2421 u8 packet;
2422
2423 for (queue = 0; queue < rx_queues_count; queue++) {
2424 /* no specific packet type routing specified for the queue */
2425 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2426 continue;
2427
2428 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
c10d4c82 2429 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
abe80fdc
JP
2430 }
2431}
2432
d0a9c9f9
JP
2433/**
2434 * stmmac_mtl_configuration - Configure MTL
2435 * @priv: driver private structure
2436 * Description: It is used for configurring MTL
2437 */
2438static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2439{
2440 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2441 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2442
c10d4c82 2443 if (tx_queues_count > 1)
6a3a7193
JP
2444 stmmac_set_tx_queue_weight(priv);
2445
d0a9c9f9 2446 /* Configure MTL RX algorithms */
c10d4c82
JA
2447 if (rx_queues_count > 1)
2448 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
2449 priv->plat->rx_sched_algorithm);
d0a9c9f9
JP
2450
2451 /* Configure MTL TX algorithms */
c10d4c82
JA
2452 if (tx_queues_count > 1)
2453 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
2454 priv->plat->tx_sched_algorithm);
d0a9c9f9 2455
19d91873 2456 /* Configure CBS in AVB TX queues */
c10d4c82 2457 if (tx_queues_count > 1)
19d91873
JP
2458 stmmac_configure_cbs(priv);
2459
d43042f4 2460 /* Map RX MTL to DMA channels */
c10d4c82 2461 stmmac_rx_queue_dma_chan_map(priv);
d43042f4 2462
d0a9c9f9 2463 /* Enable MAC RX Queues */
c10d4c82 2464 stmmac_mac_enable_rx_queues(priv);
6deee222 2465
a8f5102a 2466 /* Set RX priorities */
c10d4c82 2467 if (rx_queues_count > 1)
a8f5102a
JP
2468 stmmac_mac_config_rx_queues_prio(priv);
2469
2470 /* Set TX priorities */
c10d4c82 2471 if (tx_queues_count > 1)
a8f5102a 2472 stmmac_mac_config_tx_queues_prio(priv);
abe80fdc
JP
2473
2474 /* Set RX routing */
c10d4c82 2475 if (rx_queues_count > 1)
abe80fdc 2476 stmmac_mac_config_rx_queues_routing(priv);
d0a9c9f9
JP
2477}
2478
8bf993a5
JA
2479static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
2480{
c10d4c82 2481 if (priv->dma_cap.asp) {
8bf993a5 2482 netdev_info(priv->dev, "Enabling Safety Features\n");
c10d4c82 2483 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
8bf993a5
JA
2484 } else {
2485 netdev_info(priv->dev, "No Safety Features support found\n");
2486 }
2487}
2488
523f11b5 2489/**
732fdf0e 2490 * stmmac_hw_setup - setup mac in a usable state.
523f11b5
SK
2491 * @dev : pointer to the device structure.
2492 * Description:
732fdf0e
GC
2493 * this is the main function to setup the HW in a usable state because the
2494 * dma engine is reset, the core registers are configured (e.g. AXI,
2495 * Checksum features, timers). The DMA is ready to start receiving and
2496 * transmitting.
523f11b5
SK
2497 * Return value:
2498 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2499 * file on failure.
2500 */
fe131929 2501static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
523f11b5
SK
2502{
2503 struct stmmac_priv *priv = netdev_priv(dev);
3c55d4d0 2504 u32 rx_cnt = priv->plat->rx_queues_to_use;
146617b8
JP
2505 u32 tx_cnt = priv->plat->tx_queues_to_use;
2506 u32 chan;
523f11b5
SK
2507 int ret;
2508
523f11b5
SK
2509 /* DMA initialization and SW reset */
2510 ret = stmmac_init_dma_engine(priv);
2511 if (ret < 0) {
38ddc59d
LC
2512 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2513 __func__);
523f11b5
SK
2514 return ret;
2515 }
2516
2517 /* Copy the MAC addr into the HW */
c10d4c82 2518 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
523f11b5 2519
02e57b9d
GC
2520 /* PS and related bits will be programmed according to the speed */
2521 if (priv->hw->pcs) {
2522 int speed = priv->plat->mac_port_sel_speed;
2523
2524 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2525 (speed == SPEED_1000)) {
2526 priv->hw->ps = speed;
2527 } else {
2528 dev_warn(priv->device, "invalid port speed\n");
2529 priv->hw->ps = 0;
2530 }
2531 }
2532
523f11b5 2533 /* Initialize the MAC Core */
c10d4c82 2534 stmmac_core_init(priv, priv->hw, dev);
523f11b5 2535
d0a9c9f9 2536 /* Initialize MTL*/
63a550fc 2537 stmmac_mtl_configuration(priv);
9eb12474 2538
8bf993a5 2539 /* Initialize Safety Features */
63a550fc 2540 stmmac_safety_feat_configuration(priv);
8bf993a5 2541
c10d4c82 2542 ret = stmmac_rx_ipc(priv, priv->hw);
978aded4 2543 if (!ret) {
38ddc59d 2544 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
978aded4 2545 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 2546 priv->hw->rx_csum = 0;
978aded4
GC
2547 }
2548
523f11b5 2549 /* Enable the MAC Rx/Tx */
c10d4c82 2550 stmmac_mac_set(priv, priv->ioaddr, true);
523f11b5 2551
b4f0a661
JP
2552 /* Set the HW DMA mode and the COE */
2553 stmmac_dma_operation_mode(priv);
2554
523f11b5
SK
2555 stmmac_mmc_setup(priv);
2556
fe131929 2557 if (init_ptp) {
0ad2be79
TR
2558 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2559 if (ret < 0)
2560 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2561
fe131929 2562 ret = stmmac_init_ptp(priv);
722eef28
HK
2563 if (ret == -EOPNOTSUPP)
2564 netdev_warn(priv->dev, "PTP not supported by HW\n");
2565 else if (ret)
2566 netdev_warn(priv->dev, "PTP init failed\n");
fe131929 2567 }
523f11b5 2568
523f11b5
SK
2569 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2570
a4e887fa
JA
2571 if (priv->use_riwt) {
2572 ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2573 if (!ret)
2574 priv->rx_riwt = MAX_DMA_RIWT;
523f11b5
SK
2575 }
2576
c10d4c82
JA
2577 if (priv->hw->pcs)
2578 stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
523f11b5 2579
4854ab99
JP
2580 /* set TX and RX rings length */
2581 stmmac_set_rings_length(priv);
2582
f748be53 2583 /* Enable TSO */
146617b8
JP
2584 if (priv->tso) {
2585 for (chan = 0; chan < tx_cnt; chan++)
a4e887fa 2586 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
146617b8 2587 }
f748be53 2588
7d9e6c5a
JA
2589 /* Start the ball rolling... */
2590 stmmac_start_all_dma(priv);
2591
523f11b5
SK
2592 return 0;
2593}
2594
c66f6c37
TR
2595static void stmmac_hw_teardown(struct net_device *dev)
2596{
2597 struct stmmac_priv *priv = netdev_priv(dev);
2598
2599 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2600}
2601
47dd7a54
GC
2602/**
2603 * stmmac_open - open entry point of the driver
2604 * @dev : pointer to the device structure.
2605 * Description:
2606 * This function is the open entry point of the driver.
2607 * Return value:
2608 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2609 * file on failure.
2610 */
2611static int stmmac_open(struct net_device *dev)
2612{
2613 struct stmmac_priv *priv = netdev_priv(dev);
8fce3331 2614 u32 chan;
47dd7a54
GC
2615 int ret;
2616
4bfcbd7a
FV
2617 stmmac_check_ether_addr(priv);
2618
3fe5cadb
GC
2619 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2620 priv->hw->pcs != STMMAC_PCS_TBI &&
2621 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
2622 ret = stmmac_init_phy(dev);
2623 if (ret) {
38ddc59d
LC
2624 netdev_err(priv->dev,
2625 "%s: Cannot attach to PHY (error: %d)\n",
2626 __func__, ret);
89df20d9 2627 return ret;
e58bb43f 2628 }
f66ffe28 2629 }
47dd7a54 2630
523f11b5
SK
2631 /* Extra statistics */
2632 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2633 priv->xstats.threshold = tc;
2634
5bacd778 2635 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
22ad3838 2636 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 2637
5bacd778
LC
2638 ret = alloc_dma_desc_resources(priv);
2639 if (ret < 0) {
2640 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2641 __func__);
2642 goto dma_desc_error;
2643 }
2644
2645 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2646 if (ret < 0) {
2647 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2648 __func__);
2649 goto init_error;
2650 }
2651
fe131929 2652 ret = stmmac_hw_setup(dev, true);
56329137 2653 if (ret < 0) {
38ddc59d 2654 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
c9324d18 2655 goto init_error;
47dd7a54
GC
2656 }
2657
777da230
GC
2658 stmmac_init_tx_coalesce(priv);
2659
d6d50c7e
PR
2660 if (dev->phydev)
2661 phy_start(dev->phydev);
47dd7a54 2662
f66ffe28
GC
2663 /* Request the IRQ lines */
2664 ret = request_irq(dev->irq, stmmac_interrupt,
ceb69499 2665 IRQF_SHARED, dev->name, dev);
f66ffe28 2666 if (unlikely(ret < 0)) {
38ddc59d
LC
2667 netdev_err(priv->dev,
2668 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2669 __func__, dev->irq, ret);
6c1e5abe 2670 goto irq_error;
f66ffe28
GC
2671 }
2672
7a13f8f5
FV
2673 /* Request the Wake IRQ in case of another line is used for WoL */
2674 if (priv->wol_irq != dev->irq) {
2675 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2676 IRQF_SHARED, dev->name, dev);
2677 if (unlikely(ret < 0)) {
38ddc59d
LC
2678 netdev_err(priv->dev,
2679 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2680 __func__, priv->wol_irq, ret);
c9324d18 2681 goto wolirq_error;
7a13f8f5
FV
2682 }
2683 }
2684
d765955d 2685 /* Request the IRQ lines */
d7ec8584 2686 if (priv->lpi_irq > 0) {
d765955d
GC
2687 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2688 dev->name, dev);
2689 if (unlikely(ret < 0)) {
38ddc59d
LC
2690 netdev_err(priv->dev,
2691 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2692 __func__, priv->lpi_irq, ret);
c9324d18 2693 goto lpiirq_error;
d765955d
GC
2694 }
2695 }
2696
c22a3f48
JP
2697 stmmac_enable_all_queues(priv);
2698 stmmac_start_all_queues(priv);
f66ffe28 2699
47dd7a54 2700 return 0;
f66ffe28 2701
c9324d18 2702lpiirq_error:
d765955d
GC
2703 if (priv->wol_irq != dev->irq)
2704 free_irq(priv->wol_irq, dev);
c9324d18 2705wolirq_error:
7a13f8f5 2706 free_irq(dev->irq, dev);
6c1e5abe
TR
2707irq_error:
2708 if (dev->phydev)
2709 phy_stop(dev->phydev);
7a13f8f5 2710
8fce3331
JA
2711 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2712 del_timer_sync(&priv->tx_queue[chan].txtimer);
2713
c66f6c37 2714 stmmac_hw_teardown(dev);
c9324d18
GC
2715init_error:
2716 free_dma_desc_resources(priv);
5bacd778 2717dma_desc_error:
d6d50c7e
PR
2718 if (dev->phydev)
2719 phy_disconnect(dev->phydev);
4bfcbd7a 2720
f66ffe28 2721 return ret;
47dd7a54
GC
2722}
2723
2724/**
2725 * stmmac_release - close entry point of the driver
2726 * @dev : device pointer.
2727 * Description:
2728 * This is the stop entry point of the driver.
2729 */
2730static int stmmac_release(struct net_device *dev)
2731{
2732 struct stmmac_priv *priv = netdev_priv(dev);
8fce3331 2733 u32 chan;
47dd7a54 2734
d765955d
GC
2735 if (priv->eee_enabled)
2736 del_timer_sync(&priv->eee_ctrl_timer);
2737
47dd7a54 2738 /* Stop and disconnect the PHY */
d6d50c7e
PR
2739 if (dev->phydev) {
2740 phy_stop(dev->phydev);
2741 phy_disconnect(dev->phydev);
47dd7a54
GC
2742 }
2743
c22a3f48 2744 stmmac_stop_all_queues(priv);
47dd7a54 2745
c22a3f48 2746 stmmac_disable_all_queues(priv);
47dd7a54 2747
8fce3331
JA
2748 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2749 del_timer_sync(&priv->tx_queue[chan].txtimer);
9125cdd1 2750
47dd7a54
GC
2751 /* Free the IRQ lines */
2752 free_irq(dev->irq, dev);
7a13f8f5
FV
2753 if (priv->wol_irq != dev->irq)
2754 free_irq(priv->wol_irq, dev);
d7ec8584 2755 if (priv->lpi_irq > 0)
d765955d 2756 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
2757
2758 /* Stop TX/RX DMA and clear the descriptors */
ae4f0d46 2759 stmmac_stop_all_dma(priv);
47dd7a54
GC
2760
2761 /* Release and free the Rx/Tx resources */
2762 free_dma_desc_resources(priv);
2763
19449bfc 2764 /* Disable the MAC Rx/Tx */
c10d4c82 2765 stmmac_mac_set(priv, priv->ioaddr, false);
47dd7a54
GC
2766
2767 netif_carrier_off(dev);
2768
92ba6888
RK
2769 stmmac_release_ptp(priv);
2770
47dd7a54
GC
2771 return 0;
2772}
2773
f748be53
AT
2774/**
2775 * stmmac_tso_allocator - close entry point of the driver
2776 * @priv: driver private structure
2777 * @des: buffer start address
2778 * @total_len: total length to fill in descriptors
2779 * @last_segmant: condition for the last descriptor
ce736788 2780 * @queue: TX queue index
f748be53
AT
2781 * Description:
2782 * This function fills descriptor and request new descriptors according to
2783 * buffer length to fill
2784 */
2785static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
ce736788 2786 int total_len, bool last_segment, u32 queue)
f748be53 2787{
ce736788 2788 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
f748be53 2789 struct dma_desc *desc;
5bacd778 2790 u32 buff_size;
ce736788 2791 int tmp_len;
f748be53
AT
2792
2793 tmp_len = total_len;
2794
2795 while (tmp_len > 0) {
ce736788 2796 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
b4c9784c 2797 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
ce736788 2798 desc = tx_q->dma_tx + tx_q->cur_tx;
f748be53 2799
f8be0d78 2800 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
f748be53
AT
2801 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2802 TSO_MAX_BUFF_SIZE : tmp_len;
2803
42de047d
JA
2804 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
2805 0, 1,
2806 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2807 0, 0);
f748be53
AT
2808
2809 tmp_len -= TSO_MAX_BUFF_SIZE;
2810 }
2811}
2812
2813/**
2814 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2815 * @skb : the socket buffer
2816 * @dev : device pointer
2817 * Description: this is the transmit function that is called on TSO frames
2818 * (support available on GMAC4 and newer chips).
2819 * Diagram below show the ring programming in case of TSO frames:
2820 *
2821 * First Descriptor
2822 * --------
2823 * | DES0 |---> buffer1 = L2/L3/L4 header
2824 * | DES1 |---> TCP Payload (can continue on next descr...)
2825 * | DES2 |---> buffer 1 and 2 len
2826 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2827 * --------
2828 * |
2829 * ...
2830 * |
2831 * --------
2832 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2833 * | DES1 | --|
2834 * | DES2 | --> buffer 1 and 2 len
2835 * | DES3 |
2836 * --------
2837 *
2838 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2839 */
2840static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2841{
ce736788 2842 struct dma_desc *desc, *first, *mss_desc = NULL;
f748be53
AT
2843 struct stmmac_priv *priv = netdev_priv(dev);
2844 int nfrags = skb_shinfo(skb)->nr_frags;
ce736788 2845 u32 queue = skb_get_queue_mapping(skb);
f748be53 2846 unsigned int first_entry, des;
ce736788
JP
2847 struct stmmac_tx_queue *tx_q;
2848 int tmp_pay_len = 0;
2849 u32 pay_len, mss;
f748be53
AT
2850 u8 proto_hdr_len;
2851 int i;
2852
ce736788
JP
2853 tx_q = &priv->tx_queue[queue];
2854
f748be53
AT
2855 /* Compute header lengths */
2856 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2857
2858 /* Desc availability based on threshold should be enough safe */
ce736788 2859 if (unlikely(stmmac_tx_avail(priv, queue) <
f748be53 2860 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
c22a3f48
JP
2861 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2862 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2863 queue));
f748be53 2864 /* This is a hard error, log it. */
38ddc59d
LC
2865 netdev_err(priv->dev,
2866 "%s: Tx Ring full when queue awake\n",
2867 __func__);
f748be53 2868 }
f748be53
AT
2869 return NETDEV_TX_BUSY;
2870 }
2871
2872 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2873
2874 mss = skb_shinfo(skb)->gso_size;
2875
2876 /* set new MSS value if needed */
8d212a9e 2877 if (mss != tx_q->mss) {
ce736788 2878 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
42de047d 2879 stmmac_set_mss(priv, mss_desc, mss);
8d212a9e 2880 tx_q->mss = mss;
ce736788 2881 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
b4c9784c 2882 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
f748be53
AT
2883 }
2884
2885 if (netif_msg_tx_queued(priv)) {
2886 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2887 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2888 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2889 skb->data_len);
2890 }
2891
ce736788 2892 first_entry = tx_q->cur_tx;
b4c9784c 2893 WARN_ON(tx_q->tx_skbuff[first_entry]);
f748be53 2894
ce736788 2895 desc = tx_q->dma_tx + first_entry;
f748be53
AT
2896 first = desc;
2897
2898 /* first descriptor: fill Headers on Buf1 */
2899 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2900 DMA_TO_DEVICE);
2901 if (dma_mapping_error(priv->device, des))
2902 goto dma_map_err;
2903
ce736788
JP
2904 tx_q->tx_skbuff_dma[first_entry].buf = des;
2905 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
f748be53 2906
f8be0d78 2907 first->des0 = cpu_to_le32(des);
f748be53
AT
2908
2909 /* Fill start of payload in buff2 of first descriptor */
2910 if (pay_len)
f8be0d78 2911 first->des1 = cpu_to_le32(des + proto_hdr_len);
f748be53
AT
2912
2913 /* If needed take extra descriptors to fill the remaining payload */
2914 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2915
ce736788 2916 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
f748be53
AT
2917
2918 /* Prepare fragments */
2919 for (i = 0; i < nfrags; i++) {
2920 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2921
2922 des = skb_frag_dma_map(priv->device, frag, 0,
2923 skb_frag_size(frag),
2924 DMA_TO_DEVICE);
937071c1
TR
2925 if (dma_mapping_error(priv->device, des))
2926 goto dma_map_err;
f748be53
AT
2927
2928 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
ce736788 2929 (i == nfrags - 1), queue);
f748be53 2930
ce736788
JP
2931 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2932 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
ce736788 2933 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
f748be53
AT
2934 }
2935
ce736788 2936 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
f748be53 2937
05cf0d1b
NC
2938 /* Only the last descriptor gets to point to the skb. */
2939 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2940
2941 /* We've used all descriptors we need for this skb, however,
2942 * advance cur_tx so that it references a fresh descriptor.
2943 * ndo_start_xmit will fill this descriptor the next time it's
2944 * called and stmmac_tx_clean may clean up to this descriptor.
2945 */
ce736788 2946 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
f748be53 2947
ce736788 2948 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
2949 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2950 __func__);
c22a3f48 2951 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
f748be53
AT
2952 }
2953
2954 dev->stats.tx_bytes += skb->len;
2955 priv->xstats.tx_tso_frames++;
2956 priv->xstats.tx_tso_nfrags += nfrags;
2957
2958 /* Manage tx mitigation */
8fce3331
JA
2959 tx_q->tx_count_frames += nfrags + 1;
2960 if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
42de047d 2961 stmmac_set_tx_ic(priv, desc);
f748be53 2962 priv->xstats.tx_set_ic_bit++;
8fce3331
JA
2963 tx_q->tx_count_frames = 0;
2964 } else {
2965 stmmac_tx_timer_arm(priv, queue);
f748be53
AT
2966 }
2967
74abc9b1 2968 skb_tx_timestamp(skb);
f748be53
AT
2969
2970 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2971 priv->hwts_tx_en)) {
2972 /* declare that device is doing timestamping */
2973 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
42de047d 2974 stmmac_enable_tx_timestamp(priv, first);
f748be53
AT
2975 }
2976
2977 /* Complete the first descriptor before granting the DMA */
42de047d 2978 stmmac_prepare_tso_tx_desc(priv, first, 1,
f748be53
AT
2979 proto_hdr_len,
2980 pay_len,
ce736788 2981 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
f748be53
AT
2982 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2983
2984 /* If context desc is used to change MSS */
15d2ee42
NC
2985 if (mss_desc) {
2986 /* Make sure that first descriptor has been completely
2987 * written, including its own bit. This is because MSS is
2988 * actually before first descriptor, so we need to make
2989 * sure that MSS's own bit is the last thing written.
2990 */
2991 dma_wmb();
42de047d 2992 stmmac_set_tx_owner(priv, mss_desc);
15d2ee42 2993 }
f748be53
AT
2994
2995 /* The own bit must be the latest setting done when prepare the
2996 * descriptor and then barrier is needed to make sure that
2997 * all is coherent before granting the DMA engine.
2998 */
95eb930a 2999 wmb();
f748be53
AT
3000
3001 if (netif_msg_pktdata(priv)) {
3002 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
ce736788
JP
3003 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3004 tx_q->cur_tx, first, nfrags);
f748be53 3005
42de047d 3006 stmmac_display_ring(priv, (void *)tx_q->dma_tx, DMA_TX_SIZE, 0);
f748be53
AT
3007
3008 pr_info(">>> frame to be transmitted: ");
3009 print_pkt(skb->data, skb_headlen(skb));
3010 }
3011
c22a3f48 3012 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 3013
0431100b 3014 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
a4e887fa 3015 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
f748be53 3016
f748be53
AT
3017 return NETDEV_TX_OK;
3018
3019dma_map_err:
f748be53
AT
3020 dev_err(priv->device, "Tx dma map failed\n");
3021 dev_kfree_skb(skb);
3022 priv->dev->stats.tx_dropped++;
3023 return NETDEV_TX_OK;
3024}
3025
47dd7a54 3026/**
732fdf0e 3027 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
3028 * @skb : the socket buffer
3029 * @dev : device pointer
32ceabca
GC
3030 * Description : this is the tx entry point of the driver.
3031 * It programs the chain or the ring and supports oversized frames
3032 * and SG feature.
47dd7a54
GC
3033 */
3034static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3035{
3036 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 3037 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 3038 int i, csum_insertion = 0, is_jumbo = 0;
ce736788 3039 u32 queue = skb_get_queue_mapping(skb);
47dd7a54 3040 int nfrags = skb_shinfo(skb)->nr_frags;
59423815
CIK
3041 int entry;
3042 unsigned int first_entry;
47dd7a54 3043 struct dma_desc *desc, *first;
ce736788 3044 struct stmmac_tx_queue *tx_q;
0e80bdc9 3045 unsigned int enh_desc;
f748be53
AT
3046 unsigned int des;
3047
ce736788
JP
3048 tx_q = &priv->tx_queue[queue];
3049
e2cd682d
JA
3050 if (priv->tx_path_in_lpi_mode)
3051 stmmac_disable_eee_mode(priv);
3052
f748be53
AT
3053 /* Manage oversized TCP frames for GMAC4 device */
3054 if (skb_is_gso(skb) && priv->tso) {
c5acdbee
JA
3055 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3056 /*
3057 * There is no way to determine the number of TSO
3058 * capable Queues. Let's use always the Queue 0
3059 * because if TSO is supported then at least this
3060 * one will be capable.
3061 */
3062 skb_set_queue_mapping(skb, 0);
3063
f748be53 3064 return stmmac_tso_xmit(skb, dev);
c5acdbee 3065 }
f748be53 3066 }
47dd7a54 3067
ce736788 3068 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
c22a3f48
JP
3069 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3070 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3071 queue));
47dd7a54 3072 /* This is a hard error, log it. */
38ddc59d
LC
3073 netdev_err(priv->dev,
3074 "%s: Tx Ring full when queue awake\n",
3075 __func__);
47dd7a54
GC
3076 }
3077 return NETDEV_TX_BUSY;
3078 }
3079
ce736788 3080 entry = tx_q->cur_tx;
0e80bdc9 3081 first_entry = entry;
b4c9784c 3082 WARN_ON(tx_q->tx_skbuff[first_entry]);
47dd7a54 3083
5e982f3b 3084 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 3085
0e80bdc9 3086 if (likely(priv->extend_desc))
ce736788 3087 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 3088 else
ce736788 3089 desc = tx_q->dma_tx + entry;
c24602ef 3090
47dd7a54
GC
3091 first = desc;
3092
0e80bdc9 3093 enh_desc = priv->plat->enh_desc;
4a7d666a 3094 /* To program the descriptors according to the size of the frame */
29896a67 3095 if (enh_desc)
2c520b1c 3096 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
29896a67 3097
63a550fc 3098 if (unlikely(is_jumbo)) {
2c520b1c 3099 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
63a550fc 3100 if (unlikely(entry < 0) && (entry != -EINVAL))
362b37be 3101 goto dma_map_err;
29896a67 3102 }
47dd7a54
GC
3103
3104 for (i = 0; i < nfrags; i++) {
9e903e08
ED
3105 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3106 int len = skb_frag_size(frag);
be434d50 3107 bool last_segment = (i == (nfrags - 1));
47dd7a54 3108
e3ad57c9 3109 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
b4c9784c 3110 WARN_ON(tx_q->tx_skbuff[entry]);
e3ad57c9 3111
0e80bdc9 3112 if (likely(priv->extend_desc))
ce736788 3113 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 3114 else
ce736788 3115 desc = tx_q->dma_tx + entry;
47dd7a54 3116
f748be53
AT
3117 des = skb_frag_dma_map(priv->device, frag, 0, len,
3118 DMA_TO_DEVICE);
3119 if (dma_mapping_error(priv->device, des))
362b37be
GC
3120 goto dma_map_err; /* should reuse desc w/o issues */
3121
ce736788 3122 tx_q->tx_skbuff_dma[entry].buf = des;
6844171d
JA
3123
3124 stmmac_set_desc_addr(priv, desc, des);
f748be53 3125
ce736788
JP
3126 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3127 tx_q->tx_skbuff_dma[entry].len = len;
3128 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
0e80bdc9
GC
3129
3130 /* Prepare the descriptor and set the own bit too */
42de047d
JA
3131 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
3132 priv->mode, 1, last_segment, skb->len);
47dd7a54
GC
3133 }
3134
05cf0d1b
NC
3135 /* Only the last descriptor gets to point to the skb. */
3136 tx_q->tx_skbuff[entry] = skb;
e3ad57c9 3137
05cf0d1b
NC
3138 /* We've used all descriptors we need for this skb, however,
3139 * advance cur_tx so that it references a fresh descriptor.
3140 * ndo_start_xmit will fill this descriptor the next time it's
3141 * called and stmmac_tx_clean may clean up to this descriptor.
3142 */
3143 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
ce736788 3144 tx_q->cur_tx = entry;
47dd7a54 3145
47dd7a54 3146 if (netif_msg_pktdata(priv)) {
d0225e7d
AT
3147 void *tx_head;
3148
38ddc59d
LC
3149 netdev_dbg(priv->dev,
3150 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
ce736788 3151 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
38ddc59d 3152 entry, first, nfrags);
83d7af64 3153
c24602ef 3154 if (priv->extend_desc)
ce736788 3155 tx_head = (void *)tx_q->dma_etx;
c24602ef 3156 else
ce736788 3157 tx_head = (void *)tx_q->dma_tx;
d0225e7d 3158
42de047d 3159 stmmac_display_ring(priv, tx_head, DMA_TX_SIZE, false);
c24602ef 3160
38ddc59d 3161 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
47dd7a54
GC
3162 print_pkt(skb->data, skb->len);
3163 }
0e80bdc9 3164
ce736788 3165 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
3166 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3167 __func__);
c22a3f48 3168 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54
GC
3169 }
3170
3171 dev->stats.tx_bytes += skb->len;
3172
0e80bdc9
GC
3173 /* According to the coalesce parameter the IC bit for the latest
3174 * segment is reset and the timer re-started to clean the tx status.
3175 * This approach takes care about the fragments: desc is the first
3176 * element in case of no SG.
3177 */
8fce3331
JA
3178 tx_q->tx_count_frames += nfrags + 1;
3179 if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
42de047d 3180 stmmac_set_tx_ic(priv, desc);
0e80bdc9 3181 priv->xstats.tx_set_ic_bit++;
8fce3331
JA
3182 tx_q->tx_count_frames = 0;
3183 } else {
3184 stmmac_tx_timer_arm(priv, queue);
891434b1
RK
3185 }
3186
74abc9b1 3187 skb_tx_timestamp(skb);
3e82ce12 3188
0e80bdc9
GC
3189 /* Ready to fill the first descriptor and set the OWN bit w/o any
3190 * problems because all the descriptors are actually ready to be
3191 * passed to the DMA engine.
3192 */
3193 if (likely(!is_jumbo)) {
3194 bool last_segment = (nfrags == 0);
3195
f748be53
AT
3196 des = dma_map_single(priv->device, skb->data,
3197 nopaged_len, DMA_TO_DEVICE);
3198 if (dma_mapping_error(priv->device, des))
0e80bdc9
GC
3199 goto dma_map_err;
3200
ce736788 3201 tx_q->tx_skbuff_dma[first_entry].buf = des;
6844171d
JA
3202
3203 stmmac_set_desc_addr(priv, first, des);
f748be53 3204
ce736788
JP
3205 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3206 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
0e80bdc9
GC
3207
3208 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3209 priv->hwts_tx_en)) {
3210 /* declare that device is doing timestamping */
3211 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
42de047d 3212 stmmac_enable_tx_timestamp(priv, first);
0e80bdc9
GC
3213 }
3214
3215 /* Prepare the first descriptor setting the OWN bit too */
42de047d
JA
3216 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217 csum_insertion, priv->mode, 1, last_segment,
3218 skb->len);
80acbed9
AK
3219 } else {
3220 stmmac_set_tx_owner(priv, first);
0e80bdc9
GC
3221 }
3222
80acbed9
AK
3223 /* The own bit must be the latest setting done when prepare the
3224 * descriptor and then barrier is needed to make sure that
3225 * all is coherent before granting the DMA engine.
3226 */
3227 wmb();
3228
c22a3f48 3229 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 3230
f1565c60 3231 stmmac_enable_dma_transmission(priv, priv->ioaddr);
8fce3331 3232
0431100b 3233 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
f1565c60 3234 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
52f64fae 3235
362b37be 3236 return NETDEV_TX_OK;
a9097a96 3237
362b37be 3238dma_map_err:
38ddc59d 3239 netdev_err(priv->dev, "Tx DMA map failed\n");
362b37be
GC
3240 dev_kfree_skb(skb);
3241 priv->dev->stats.tx_dropped++;
47dd7a54
GC
3242 return NETDEV_TX_OK;
3243}
3244
b9381985
VB
3245static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3246{
ab188e8f
EN
3247 struct vlan_ethhdr *veth;
3248 __be16 vlan_proto;
b9381985
VB
3249 u16 vlanid;
3250
ab188e8f
EN
3251 veth = (struct vlan_ethhdr *)skb->data;
3252 vlan_proto = veth->h_vlan_proto;
3253
3254 if ((vlan_proto == htons(ETH_P_8021Q) &&
3255 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
3256 (vlan_proto == htons(ETH_P_8021AD) &&
3257 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
b9381985 3258 /* pop the vlan tag */
ab188e8f
EN
3259 vlanid = ntohs(veth->h_vlan_TCI);
3260 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
b9381985 3261 skb_pull(skb, VLAN_HLEN);
ab188e8f 3262 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
b9381985
VB
3263 }
3264}
3265
3266
54139cf3 3267static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
120e87f9 3268{
54139cf3 3269 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
120e87f9
GC
3270 return 0;
3271
3272 return 1;
3273}
3274
32ceabca 3275/**
732fdf0e 3276 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca 3277 * @priv: driver private structure
54139cf3 3278 * @queue: RX queue index
32ceabca
GC
3279 * Description : this is to reallocate the skb for the reception process
3280 * that is based on zero-copy.
3281 */
54139cf3 3282static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
47dd7a54 3283{
54139cf3
JP
3284 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3285 int dirty = stmmac_rx_dirty(priv, queue);
3286 unsigned int entry = rx_q->dirty_rx;
3287
47dd7a54 3288 int bfsize = priv->dma_buf_sz;
47dd7a54 3289
e3ad57c9 3290 while (dirty-- > 0) {
c24602ef
GC
3291 struct dma_desc *p;
3292
3293 if (priv->extend_desc)
54139cf3 3294 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 3295 else
54139cf3 3296 p = rx_q->dma_rx + entry;
c24602ef 3297
54139cf3 3298 if (likely(!rx_q->rx_skbuff[entry])) {
47dd7a54
GC
3299 struct sk_buff *skb;
3300
acb600de 3301 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
120e87f9
GC
3302 if (unlikely(!skb)) {
3303 /* so for a while no zero-copy! */
54139cf3 3304 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
120e87f9
GC
3305 if (unlikely(net_ratelimit()))
3306 dev_err(priv->device,
3307 "fail to alloc skb entry %d\n",
3308 entry);
47dd7a54 3309 break;
120e87f9 3310 }
47dd7a54 3311
54139cf3
JP
3312 rx_q->rx_skbuff[entry] = skb;
3313 rx_q->rx_skbuff_dma[entry] =
47dd7a54
GC
3314 dma_map_single(priv->device, skb->data, bfsize,
3315 DMA_FROM_DEVICE);
362b37be 3316 if (dma_mapping_error(priv->device,
54139cf3 3317 rx_q->rx_skbuff_dma[entry])) {
38ddc59d 3318 netdev_err(priv->dev, "Rx DMA map failed\n");
362b37be
GC
3319 dev_kfree_skb(skb);
3320 break;
3321 }
286a8372 3322
6844171d 3323 stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
2c520b1c 3324 stmmac_refill_desc3(priv, rx_q, p);
286a8372 3325
54139cf3
JP
3326 if (rx_q->rx_zeroc_thresh > 0)
3327 rx_q->rx_zeroc_thresh--;
120e87f9 3328
b3e51069
LC
3329 netif_dbg(priv, rx_status, priv->dev,
3330 "refill entry #%d\n", entry);
47dd7a54 3331 }
ad688cdb 3332 dma_wmb();
f748be53 3333
357951cd 3334 stmmac_set_rx_owner(priv, p, priv->use_riwt);
f748be53 3335
ad688cdb 3336 dma_wmb();
e3ad57c9
GC
3337
3338 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
47dd7a54 3339 }
54139cf3 3340 rx_q->dirty_rx = entry;
47dd7a54
GC
3341}
3342
32ceabca 3343/**
732fdf0e 3344 * stmmac_rx - manage the receive process
32ceabca 3345 * @priv: driver private structure
54139cf3
JP
3346 * @limit: napi bugget
3347 * @queue: RX queue index.
32ceabca
GC
3348 * Description : this the function called by the napi poll method.
3349 * It gets all the frames inside the ring.
3350 */
54139cf3 3351static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
47dd7a54 3352{
54139cf3 3353 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
8fce3331 3354 struct stmmac_channel *ch = &priv->channel[queue];
54139cf3
JP
3355 unsigned int entry = rx_q->cur_rx;
3356 int coe = priv->hw->rx_csum;
47dd7a54
GC
3357 unsigned int next_entry;
3358 unsigned int count = 0;
7d9e6c5a
JA
3359 bool xmac;
3360
3361 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
47dd7a54 3362
83d7af64 3363 if (netif_msg_rx_status(priv)) {
d0225e7d
AT
3364 void *rx_head;
3365
38ddc59d 3366 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
c24602ef 3367 if (priv->extend_desc)
54139cf3 3368 rx_head = (void *)rx_q->dma_erx;
c24602ef 3369 else
54139cf3 3370 rx_head = (void *)rx_q->dma_rx;
d0225e7d 3371
42de047d 3372 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
47dd7a54 3373 }
c24602ef 3374 while (count < limit) {
47dd7a54 3375 int status;
9401bb5c 3376 struct dma_desc *p;
ba1ffd74 3377 struct dma_desc *np;
47dd7a54 3378
c24602ef 3379 if (priv->extend_desc)
54139cf3 3380 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 3381 else
54139cf3 3382 p = rx_q->dma_rx + entry;
c24602ef 3383
c1fa3212 3384 /* read the status of the incoming frame */
42de047d
JA
3385 status = stmmac_rx_status(priv, &priv->dev->stats,
3386 &priv->xstats, p);
c1fa3212
FG
3387 /* check if managed by the DMA otherwise go ahead */
3388 if (unlikely(status & dma_own))
47dd7a54
GC
3389 break;
3390
3391 count++;
3392
54139cf3
JP
3393 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3394 next_entry = rx_q->cur_rx;
e3ad57c9 3395
c24602ef 3396 if (priv->extend_desc)
54139cf3 3397 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
c24602ef 3398 else
54139cf3 3399 np = rx_q->dma_rx + next_entry;
ba1ffd74
GC
3400
3401 prefetch(np);
47dd7a54 3402
42de047d
JA
3403 if (priv->extend_desc)
3404 stmmac_rx_extended_status(priv, &priv->dev->stats,
3405 &priv->xstats, rx_q->dma_erx + entry);
891434b1 3406 if (unlikely(status == discard_frame)) {
47dd7a54 3407 priv->dev->stats.rx_errors++;
891434b1 3408 if (priv->hwts_rx_en && !priv->extend_desc) {
8d45e42b 3409 /* DESC2 & DESC3 will be overwritten by device
891434b1
RK
3410 * with timestamp value, hence reinitialize
3411 * them in stmmac_rx_refill() function so that
3412 * device can reuse it.
3413 */
9c8080d0 3414 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
54139cf3 3415 rx_q->rx_skbuff[entry] = NULL;
891434b1 3416 dma_unmap_single(priv->device,
54139cf3 3417 rx_q->rx_skbuff_dma[entry],
ceb69499
GC
3418 priv->dma_buf_sz,
3419 DMA_FROM_DEVICE);
891434b1
RK
3420 }
3421 } else {
47dd7a54 3422 struct sk_buff *skb;
3eeb2997 3423 int frame_len;
f748be53
AT
3424 unsigned int des;
3425
d2df9ea0 3426 stmmac_get_desc_addr(priv, p, &des);
42de047d 3427 frame_len = stmmac_get_rx_frame_len(priv, p, coe);
ceb69499 3428
8d45e42b 3429 /* If frame length is greater than skb buffer size
f748be53
AT
3430 * (preallocated during init) then the packet is
3431 * ignored
3432 */
e527c4a7 3433 if (frame_len > priv->dma_buf_sz) {
38ddc59d
LC
3434 netdev_err(priv->dev,
3435 "len %d larger than size (%d)\n",
3436 frame_len, priv->dma_buf_sz);
e527c4a7
GC
3437 priv->dev->stats.rx_length_errors++;
3438 break;
3439 }
3440
3eeb2997 3441 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
ceb69499 3442 * Type frames (LLC/LLC-SNAP)
565020aa
JA
3443 *
3444 * llc_snap is never checked in GMAC >= 4, so this ACS
3445 * feature is always disabled and packets need to be
3446 * stripped manually.
ceb69499 3447 */
565020aa
JA
3448 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
3449 unlikely(status != llc_snap))
3eeb2997 3450 frame_len -= ETH_FCS_LEN;
47dd7a54 3451
83d7af64 3452 if (netif_msg_rx_status(priv)) {
38ddc59d
LC
3453 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3454 p, entry, des);
1ca7992c
FF
3455 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3456 frame_len, status);
83d7af64 3457 }
22ad3838 3458
f748be53
AT
3459 /* The zero-copy is always used for all the sizes
3460 * in case of GMAC4 because it needs
3461 * to refill the used descriptors, always.
3462 */
7d9e6c5a 3463 if (unlikely(!xmac &&
f748be53 3464 ((frame_len < priv->rx_copybreak) ||
54139cf3 3465 stmmac_rx_threshold_count(rx_q)))) {
22ad3838
GC
3466 skb = netdev_alloc_skb_ip_align(priv->dev,
3467 frame_len);
3468 if (unlikely(!skb)) {
3469 if (net_ratelimit())
3470 dev_warn(priv->device,
3471 "packet dropped\n");
3472 priv->dev->stats.rx_dropped++;
3473 break;
3474 }
3475
3476 dma_sync_single_for_cpu(priv->device,
54139cf3 3477 rx_q->rx_skbuff_dma
22ad3838
GC
3478 [entry], frame_len,
3479 DMA_FROM_DEVICE);
3480 skb_copy_to_linear_data(skb,
54139cf3 3481 rx_q->
22ad3838
GC
3482 rx_skbuff[entry]->data,
3483 frame_len);
3484
3485 skb_put(skb, frame_len);
3486 dma_sync_single_for_device(priv->device,
54139cf3 3487 rx_q->rx_skbuff_dma
22ad3838
GC
3488 [entry], frame_len,
3489 DMA_FROM_DEVICE);
3490 } else {
54139cf3 3491 skb = rx_q->rx_skbuff[entry];
22ad3838 3492 if (unlikely(!skb)) {
38ddc59d
LC
3493 netdev_err(priv->dev,
3494 "%s: Inconsistent Rx chain\n",
3495 priv->dev->name);
22ad3838
GC
3496 priv->dev->stats.rx_dropped++;
3497 break;
3498 }
3499 prefetch(skb->data - NET_IP_ALIGN);
54139cf3
JP
3500 rx_q->rx_skbuff[entry] = NULL;
3501 rx_q->rx_zeroc_thresh++;
22ad3838
GC
3502
3503 skb_put(skb, frame_len);
3504 dma_unmap_single(priv->device,
54139cf3 3505 rx_q->rx_skbuff_dma[entry],
22ad3838
GC
3506 priv->dma_buf_sz,
3507 DMA_FROM_DEVICE);
47dd7a54 3508 }
47dd7a54 3509
47dd7a54 3510 if (netif_msg_pktdata(priv)) {
38ddc59d
LC
3511 netdev_dbg(priv->dev, "frame received (%dbytes)",
3512 frame_len);
47dd7a54
GC
3513 print_pkt(skb->data, frame_len);
3514 }
83d7af64 3515
ba1ffd74
GC
3516 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3517
b9381985
VB
3518 stmmac_rx_vlan(priv->dev, skb);
3519
47dd7a54
GC
3520 skb->protocol = eth_type_trans(skb, priv->dev);
3521
ceb69499 3522 if (unlikely(!coe))
bc8acf2c 3523 skb_checksum_none_assert(skb);
62a2ab93 3524 else
47dd7a54 3525 skb->ip_summed = CHECKSUM_UNNECESSARY;
62a2ab93 3526
4ccb4585 3527 napi_gro_receive(&ch->rx_napi, skb);
47dd7a54
GC
3528
3529 priv->dev->stats.rx_packets++;
3530 priv->dev->stats.rx_bytes += frame_len;
47dd7a54
GC
3531 }
3532 entry = next_entry;
47dd7a54
GC
3533 }
3534
54139cf3 3535 stmmac_rx_refill(priv, queue);
47dd7a54
GC
3536
3537 priv->xstats.rx_pkt_n += count;
3538
3539 return count;
3540}
3541
4ccb4585 3542static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
47dd7a54 3543{
8fce3331 3544 struct stmmac_channel *ch =
4ccb4585 3545 container_of(napi, struct stmmac_channel, rx_napi);
8fce3331 3546 struct stmmac_priv *priv = ch->priv_data;
8fce3331 3547 u32 chan = ch->index;
4ccb4585 3548 int work_done;
47dd7a54 3549
9125cdd1 3550 priv->xstats.napi_poll++;
ce736788 3551
4ccb4585
JA
3552 work_done = stmmac_rx(priv, budget, chan);
3553 if (work_done < budget && napi_complete_done(napi, work_done))
3554 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3555 return work_done;
3556}
ce736788 3557
4ccb4585
JA
3558static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
3559{
3560 struct stmmac_channel *ch =
3561 container_of(napi, struct stmmac_channel, tx_napi);
3562 struct stmmac_priv *priv = ch->priv_data;
3563 struct stmmac_tx_queue *tx_q;
3564 u32 chan = ch->index;
3565 int work_done;
8fce3331 3566
4ccb4585
JA
3567 priv->xstats.napi_poll++;
3568
3569 work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
3570 work_done = min(work_done, budget);
8fce3331 3571
4ccb4585 3572 if (work_done < budget && napi_complete_done(napi, work_done))
8fce3331 3573 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
4ccb4585
JA
3574
3575 /* Force transmission restart */
3576 tx_q = &priv->tx_queue[chan];
3577 if (tx_q->cur_tx != tx_q->dirty_tx) {
3578 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3579 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr,
3580 chan);
fa0be0a4 3581 }
8fce3331 3582
47dd7a54
GC
3583 return work_done;
3584}
3585
3586/**
3587 * stmmac_tx_timeout
3588 * @dev : Pointer to net device structure
3589 * Description: this function is called when a packet transmission fails to
7284a3f1 3590 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
3591 * netdev structure and arrange for the device to be reset to a sane state
3592 * in order to transmit a new packet.
3593 */
3594static void stmmac_tx_timeout(struct net_device *dev)
3595{
3596 struct stmmac_priv *priv = netdev_priv(dev);
3597
34877a15 3598 stmmac_global_err(priv);
47dd7a54
GC
3599}
3600
47dd7a54 3601/**
01789349 3602 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
3603 * @dev : pointer to the device structure
3604 * Description:
3605 * This function is a driver entry point which gets called by the kernel
3606 * whenever multicast addresses must be enabled/disabled.
3607 * Return value:
3608 * void.
3609 */
01789349 3610static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
3611{
3612 struct stmmac_priv *priv = netdev_priv(dev);
3613
c10d4c82 3614 stmmac_set_filter(priv, priv->hw, dev);
47dd7a54
GC
3615}
3616
3617/**
3618 * stmmac_change_mtu - entry point to change MTU size for the device.
3619 * @dev : device pointer.
3620 * @new_mtu : the new MTU size for the device.
3621 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3622 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3623 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3624 * Return value:
3625 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3626 * file on failure.
3627 */
3628static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3629{
38ddc59d
LC
3630 struct stmmac_priv *priv = netdev_priv(dev);
3631
47dd7a54 3632 if (netif_running(dev)) {
38ddc59d 3633 netdev_err(priv->dev, "must be stopped to change its MTU\n");
47dd7a54
GC
3634 return -EBUSY;
3635 }
3636
5e982f3b 3637 dev->mtu = new_mtu;
f748be53 3638
5e982f3b
MM
3639 netdev_update_features(dev);
3640
3641 return 0;
3642}
3643
c8f44aff 3644static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 3645 netdev_features_t features)
5e982f3b
MM
3646{
3647 struct stmmac_priv *priv = netdev_priv(dev);
3648
38912bdb 3649 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 3650 features &= ~NETIF_F_RXCSUM;
d2afb5bd 3651
5e982f3b 3652 if (!priv->plat->tx_coe)
a188222b 3653 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 3654
ebbb293f
GC
3655 /* Some GMAC devices have a bugged Jumbo frame support that
3656 * needs to have the Tx COE disabled for oversized frames
3657 * (due to limited buffer sizes). In this case we disable
8d45e42b 3658 * the TX csum insertion in the TDES and not use SF.
ceb69499 3659 */
5e982f3b 3660 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 3661 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 3662
f748be53
AT
3663 /* Disable tso if asked by ethtool */
3664 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3665 if (features & NETIF_F_TSO)
3666 priv->tso = true;
3667 else
3668 priv->tso = false;
3669 }
3670
5e982f3b 3671 return features;
47dd7a54
GC
3672}
3673
d2afb5bd
GC
3674static int stmmac_set_features(struct net_device *netdev,
3675 netdev_features_t features)
3676{
3677 struct stmmac_priv *priv = netdev_priv(netdev);
3678
3679 /* Keep the COE Type in case of csum is supporting */
3680 if (features & NETIF_F_RXCSUM)
3681 priv->hw->rx_csum = priv->plat->rx_coe;
3682 else
3683 priv->hw->rx_csum = 0;
3684 /* No check needed because rx_coe has been set before and it will be
3685 * fixed in case of issue.
3686 */
c10d4c82 3687 stmmac_rx_ipc(priv, priv->hw);
d2afb5bd
GC
3688
3689 return 0;
3690}
3691
32ceabca
GC
3692/**
3693 * stmmac_interrupt - main ISR
3694 * @irq: interrupt number.
3695 * @dev_id: to pass the net device pointer.
3696 * Description: this is the main driver interrupt service routine.
732fdf0e
GC
3697 * It can call:
3698 * o DMA service routine (to manage incoming frame reception and transmission
3699 * status)
3700 * o Core interrupts to manage: remote wake-up, management counter, LPI
3701 * interrupts.
32ceabca 3702 */
47dd7a54
GC
3703static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3704{
3705 struct net_device *dev = (struct net_device *)dev_id;
3706 struct stmmac_priv *priv = netdev_priv(dev);
7bac4e1e
JP
3707 u32 rx_cnt = priv->plat->rx_queues_to_use;
3708 u32 tx_cnt = priv->plat->tx_queues_to_use;
3709 u32 queues_count;
3710 u32 queue;
7d9e6c5a 3711 bool xmac;
7bac4e1e 3712
7d9e6c5a 3713 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7bac4e1e 3714 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
47dd7a54 3715
89f7f2cf
SK
3716 if (priv->irq_wake)
3717 pm_wakeup_event(priv->device, 0);
3718
47dd7a54 3719 if (unlikely(!dev)) {
38ddc59d 3720 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
47dd7a54
GC
3721 return IRQ_NONE;
3722 }
3723
34877a15
JA
3724 /* Check if adapter is up */
3725 if (test_bit(STMMAC_DOWN, &priv->state))
3726 return IRQ_HANDLED;
8bf993a5
JA
3727 /* Check if a fatal error happened */
3728 if (stmmac_safety_feat_interrupt(priv))
3729 return IRQ_HANDLED;
34877a15 3730
d765955d 3731 /* To handle GMAC own interrupts */
7d9e6c5a 3732 if ((priv->plat->has_gmac) || xmac) {
c10d4c82 3733 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
61fac60a 3734 int mtl_status;
8f71a88d 3735
d765955d 3736 if (unlikely(status)) {
d765955d 3737 /* For LPI we need to save the tx status */
0982a0f6 3738 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 3739 priv->tx_path_in_lpi_mode = true;
0982a0f6 3740 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 3741 priv->tx_path_in_lpi_mode = false;
7bac4e1e
JP
3742 }
3743
61fac60a
JA
3744 for (queue = 0; queue < queues_count; queue++) {
3745 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
54139cf3 3746
61fac60a
JA
3747 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw,
3748 queue);
3749 if (mtl_status != -EINVAL)
3750 status |= mtl_status;
7bac4e1e 3751
61fac60a
JA
3752 if (status & CORE_IRQ_MTL_RX_OVERFLOW)
3753 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3754 rx_q->rx_tail_addr,
3755 queue);
d765955d 3756 }
70523e63
GC
3757
3758 /* PCS link status */
3fe5cadb 3759 if (priv->hw->pcs) {
70523e63
GC
3760 if (priv->xstats.pcs_link)
3761 netif_carrier_on(dev);
3762 else
3763 netif_carrier_off(dev);
3764 }
d765955d 3765 }
aec7ff27 3766
d765955d 3767 /* To handle DMA interrupts */
aec7ff27 3768 stmmac_dma_interrupt(priv);
47dd7a54
GC
3769
3770 return IRQ_HANDLED;
3771}
3772
3773#ifdef CONFIG_NET_POLL_CONTROLLER
3774/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
3775 * to allow network I/O with interrupts disabled.
3776 */
47dd7a54
GC
3777static void stmmac_poll_controller(struct net_device *dev)
3778{
3779 disable_irq(dev->irq);
3780 stmmac_interrupt(dev->irq, dev);
3781 enable_irq(dev->irq);
3782}
3783#endif
3784
3785/**
3786 * stmmac_ioctl - Entry point for the Ioctl
3787 * @dev: Device pointer.
3788 * @rq: An IOCTL specefic structure, that can contain a pointer to
3789 * a proprietary structure used to pass information to the driver.
3790 * @cmd: IOCTL command
3791 * Description:
32ceabca 3792 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
3793 */
3794static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3795{
891434b1 3796 int ret = -EOPNOTSUPP;
47dd7a54
GC
3797
3798 if (!netif_running(dev))
3799 return -EINVAL;
3800
891434b1
RK
3801 switch (cmd) {
3802 case SIOCGMIIPHY:
3803 case SIOCGMIIREG:
3804 case SIOCSMIIREG:
d6d50c7e 3805 if (!dev->phydev)
891434b1 3806 return -EINVAL;
d6d50c7e 3807 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
891434b1
RK
3808 break;
3809 case SIOCSHWTSTAMP:
d6228b7c
AP
3810 ret = stmmac_hwtstamp_set(dev, rq);
3811 break;
3812 case SIOCGHWTSTAMP:
3813 ret = stmmac_hwtstamp_get(dev, rq);
891434b1
RK
3814 break;
3815 default:
3816 break;
3817 }
28b04113 3818
47dd7a54
GC
3819 return ret;
3820}
3821
4dbbe8dd
JA
3822static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3823 void *cb_priv)
3824{
3825 struct stmmac_priv *priv = cb_priv;
3826 int ret = -EOPNOTSUPP;
3827
3828 stmmac_disable_all_queues(priv);
3829
3830 switch (type) {
3831 case TC_SETUP_CLSU32:
3832 if (tc_cls_can_offload_and_chain0(priv->dev, type_data))
3833 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
3834 break;
3835 default:
3836 break;
3837 }
3838
3839 stmmac_enable_all_queues(priv);
3840 return ret;
3841}
3842
3843static int stmmac_setup_tc_block(struct stmmac_priv *priv,
3844 struct tc_block_offload *f)
3845{
3846 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
3847 return -EOPNOTSUPP;
3848
3849 switch (f->command) {
3850 case TC_BLOCK_BIND:
3851 return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
60513bd8 3852 priv, priv, f->extack);
4dbbe8dd
JA
3853 case TC_BLOCK_UNBIND:
3854 tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
3855 return 0;
3856 default:
3857 return -EOPNOTSUPP;
3858 }
3859}
3860
3861static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
3862 void *type_data)
3863{
3864 struct stmmac_priv *priv = netdev_priv(ndev);
3865
3866 switch (type) {
3867 case TC_SETUP_BLOCK:
3868 return stmmac_setup_tc_block(priv, type_data);
1f705bc6
JA
3869 case TC_SETUP_QDISC_CBS:
3870 return stmmac_tc_setup_cbs(priv, priv, type_data);
4dbbe8dd
JA
3871 default:
3872 return -EOPNOTSUPP;
3873 }
3874}
3875
a830405e
BV
3876static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3877{
3878 struct stmmac_priv *priv = netdev_priv(ndev);
3879 int ret = 0;
3880
3881 ret = eth_mac_addr(ndev, addr);
3882 if (ret)
3883 return ret;
3884
c10d4c82 3885 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
a830405e
BV
3886
3887 return ret;
3888}
3889
50fb4f74 3890#ifdef CONFIG_DEBUG_FS
7ac29055 3891static struct dentry *stmmac_fs_dir;
7ac29055 3892
c24602ef 3893static void sysfs_display_ring(void *head, int size, int extend_desc,
ceb69499 3894 struct seq_file *seq)
7ac29055 3895{
7ac29055 3896 int i;
ceb69499
GC
3897 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3898 struct dma_desc *p = (struct dma_desc *)head;
7ac29055 3899
c24602ef 3900 for (i = 0; i < size; i++) {
c24602ef 3901 if (extend_desc) {
c24602ef 3902 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 3903 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
3904 le32_to_cpu(ep->basic.des0),
3905 le32_to_cpu(ep->basic.des1),
3906 le32_to_cpu(ep->basic.des2),
3907 le32_to_cpu(ep->basic.des3));
c24602ef
GC
3908 ep++;
3909 } else {
c24602ef 3910 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
66c25f6e 3911 i, (unsigned int)virt_to_phys(p),
f8be0d78
MW
3912 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3913 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
c24602ef
GC
3914 p++;
3915 }
7ac29055
GC
3916 seq_printf(seq, "\n");
3917 }
c24602ef 3918}
7ac29055 3919
fb0d9c63 3920static int stmmac_rings_status_show(struct seq_file *seq, void *v)
c24602ef
GC
3921{
3922 struct net_device *dev = seq->private;
3923 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 3924 u32 rx_count = priv->plat->rx_queues_to_use;
ce736788 3925 u32 tx_count = priv->plat->tx_queues_to_use;
54139cf3
JP
3926 u32 queue;
3927
5f2b8b62
TR
3928 if ((dev->flags & IFF_UP) == 0)
3929 return 0;
3930
54139cf3
JP
3931 for (queue = 0; queue < rx_count; queue++) {
3932 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3933
3934 seq_printf(seq, "RX Queue %d:\n", queue);
3935
3936 if (priv->extend_desc) {
3937 seq_printf(seq, "Extended descriptor ring:\n");
3938 sysfs_display_ring((void *)rx_q->dma_erx,
3939 DMA_RX_SIZE, 1, seq);
3940 } else {
3941 seq_printf(seq, "Descriptor ring:\n");
3942 sysfs_display_ring((void *)rx_q->dma_rx,
3943 DMA_RX_SIZE, 0, seq);
3944 }
3945 }
aff3d9ef 3946
ce736788
JP
3947 for (queue = 0; queue < tx_count; queue++) {
3948 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3949
3950 seq_printf(seq, "TX Queue %d:\n", queue);
3951
3952 if (priv->extend_desc) {
3953 seq_printf(seq, "Extended descriptor ring:\n");
3954 sysfs_display_ring((void *)tx_q->dma_etx,
3955 DMA_TX_SIZE, 1, seq);
3956 } else {
3957 seq_printf(seq, "Descriptor ring:\n");
3958 sysfs_display_ring((void *)tx_q->dma_tx,
3959 DMA_TX_SIZE, 0, seq);
3960 }
7ac29055
GC
3961 }
3962
3963 return 0;
3964}
fb0d9c63 3965DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
7ac29055 3966
fb0d9c63 3967static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
e7434821
GC
3968{
3969 struct net_device *dev = seq->private;
3970 struct stmmac_priv *priv = netdev_priv(dev);
3971
19e30c14 3972 if (!priv->hw_cap_support) {
e7434821
GC
3973 seq_printf(seq, "DMA HW features not supported\n");
3974 return 0;
3975 }
3976
3977 seq_printf(seq, "==============================\n");
3978 seq_printf(seq, "\tDMA HW features\n");
3979 seq_printf(seq, "==============================\n");
3980
22d3efe5 3981 seq_printf(seq, "\t10/100 Mbps: %s\n",
e7434821 3982 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
22d3efe5 3983 seq_printf(seq, "\t1000 Mbps: %s\n",
e7434821 3984 (priv->dma_cap.mbps_1000) ? "Y" : "N");
22d3efe5 3985 seq_printf(seq, "\tHalf duplex: %s\n",
e7434821
GC
3986 (priv->dma_cap.half_duplex) ? "Y" : "N");
3987 seq_printf(seq, "\tHash Filter: %s\n",
3988 (priv->dma_cap.hash_filter) ? "Y" : "N");
3989 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3990 (priv->dma_cap.multi_addr) ? "Y" : "N");
8d45e42b 3991 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
e7434821
GC
3992 (priv->dma_cap.pcs) ? "Y" : "N");
3993 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3994 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3995 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3996 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3997 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3998 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3999 seq_printf(seq, "\tRMON module: %s\n",
4000 (priv->dma_cap.rmon) ? "Y" : "N");
4001 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
4002 (priv->dma_cap.time_stamp) ? "Y" : "N");
22d3efe5 4003 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
e7434821 4004 (priv->dma_cap.atime_stamp) ? "Y" : "N");
22d3efe5 4005 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
e7434821
GC
4006 (priv->dma_cap.eee) ? "Y" : "N");
4007 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
4008 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
4009 (priv->dma_cap.tx_coe) ? "Y" : "N");
f748be53
AT
4010 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4011 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
4012 (priv->dma_cap.rx_coe) ? "Y" : "N");
4013 } else {
4014 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
4015 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
4016 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
4017 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
4018 }
e7434821
GC
4019 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
4020 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
4021 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
4022 priv->dma_cap.number_rx_channel);
4023 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
4024 priv->dma_cap.number_tx_channel);
4025 seq_printf(seq, "\tEnhanced descriptors: %s\n",
4026 (priv->dma_cap.enh_desc) ? "Y" : "N");
4027
4028 return 0;
4029}
fb0d9c63 4030DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
e7434821 4031
7ac29055
GC
4032static int stmmac_init_fs(struct net_device *dev)
4033{
466c5ac8
MO
4034 struct stmmac_priv *priv = netdev_priv(dev);
4035
4036 /* Create per netdev entries */
4037 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 4038
466c5ac8 4039 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
38ddc59d 4040 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
7ac29055
GC
4041
4042 return -ENOMEM;
4043 }
4044
4045 /* Entry to report DMA RX/TX rings */
466c5ac8 4046 priv->dbgfs_rings_status =
d3757ba4 4047 debugfs_create_file("descriptors_status", 0444,
466c5ac8
MO
4048 priv->dbgfs_dir, dev,
4049 &stmmac_rings_status_fops);
7ac29055 4050
466c5ac8 4051 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
38ddc59d 4052 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
466c5ac8 4053 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055
GC
4054
4055 return -ENOMEM;
4056 }
4057
e7434821 4058 /* Entry to report the DMA HW features */
d3757ba4
JP
4059 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
4060 priv->dbgfs_dir,
4061 dev, &stmmac_dma_cap_fops);
e7434821 4062
466c5ac8 4063 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
38ddc59d 4064 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
466c5ac8 4065 debugfs_remove_recursive(priv->dbgfs_dir);
e7434821
GC
4066
4067 return -ENOMEM;
4068 }
4069
7ac29055
GC
4070 return 0;
4071}
4072
466c5ac8 4073static void stmmac_exit_fs(struct net_device *dev)
7ac29055 4074{
466c5ac8
MO
4075 struct stmmac_priv *priv = netdev_priv(dev);
4076
4077 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 4078}
50fb4f74 4079#endif /* CONFIG_DEBUG_FS */
7ac29055 4080
47dd7a54
GC
4081static const struct net_device_ops stmmac_netdev_ops = {
4082 .ndo_open = stmmac_open,
4083 .ndo_start_xmit = stmmac_xmit,
4084 .ndo_stop = stmmac_release,
4085 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 4086 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 4087 .ndo_set_features = stmmac_set_features,
01789349 4088 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
4089 .ndo_tx_timeout = stmmac_tx_timeout,
4090 .ndo_do_ioctl = stmmac_ioctl,
4dbbe8dd 4091 .ndo_setup_tc = stmmac_setup_tc,
47dd7a54
GC
4092#ifdef CONFIG_NET_POLL_CONTROLLER
4093 .ndo_poll_controller = stmmac_poll_controller,
4094#endif
a830405e 4095 .ndo_set_mac_address = stmmac_set_mac_address,
47dd7a54
GC
4096};
4097
34877a15
JA
4098static void stmmac_reset_subtask(struct stmmac_priv *priv)
4099{
4100 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
4101 return;
4102 if (test_bit(STMMAC_DOWN, &priv->state))
4103 return;
4104
4105 netdev_err(priv->dev, "Reset adapter.\n");
4106
4107 rtnl_lock();
4108 netif_trans_update(priv->dev);
4109 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
4110 usleep_range(1000, 2000);
4111
4112 set_bit(STMMAC_DOWN, &priv->state);
4113 dev_close(priv->dev);
00f54e68 4114 dev_open(priv->dev, NULL);
34877a15
JA
4115 clear_bit(STMMAC_DOWN, &priv->state);
4116 clear_bit(STMMAC_RESETING, &priv->state);
4117 rtnl_unlock();
4118}
4119
4120static void stmmac_service_task(struct work_struct *work)
4121{
4122 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
4123 service_task);
4124
4125 stmmac_reset_subtask(priv);
4126 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
4127}
4128
cf3f047b
GC
4129/**
4130 * stmmac_hw_init - Init the MAC device
32ceabca 4131 * @priv: driver private structure
732fdf0e
GC
4132 * Description: this function is to configure the MAC device according to
4133 * some platform parameters or the HW capability register. It prepares the
4134 * driver to use either ring or chain modes and to setup either enhanced or
4135 * normal descriptors.
cf3f047b
GC
4136 */
4137static int stmmac_hw_init(struct stmmac_priv *priv)
4138{
5f0456b4 4139 int ret;
cf3f047b 4140
9f93ac8d
LC
4141 /* dwmac-sun8i only work in chain mode */
4142 if (priv->plat->has_sun8i)
4143 chain_mode = 1;
5f0456b4 4144 priv->chain_mode = chain_mode;
9f93ac8d 4145
5f0456b4
JA
4146 /* Initialize HW Interface */
4147 ret = stmmac_hwif_init(priv);
4148 if (ret)
4149 return ret;
4a7d666a 4150
cf3f047b
GC
4151 /* Get the HW capability (new GMAC newer than 3.50a) */
4152 priv->hw_cap_support = stmmac_get_hw_features(priv);
4153 if (priv->hw_cap_support) {
38ddc59d 4154 dev_info(priv->device, "DMA HW capability register supported\n");
cf3f047b
GC
4155
4156 /* We can override some gmac/dma configuration fields: e.g.
4157 * enh_desc, tx_coe (e.g. that are passed through the
4158 * platform) with the values from the HW capability
4159 * register (if supported).
4160 */
4161 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 4162 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3fe5cadb 4163 priv->hw->pmt = priv->plat->pmt;
38912bdb 4164
a8df35d4
EG
4165 /* TXCOE doesn't work in thresh DMA mode */
4166 if (priv->plat->force_thresh_dma_mode)
4167 priv->plat->tx_coe = 0;
4168 else
4169 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4170
f748be53
AT
4171 /* In case of GMAC4 rx_coe is from HW cap register. */
4172 priv->plat->rx_coe = priv->dma_cap.rx_coe;
38912bdb
DS
4173
4174 if (priv->dma_cap.rx_coe_type2)
4175 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4176 else if (priv->dma_cap.rx_coe_type1)
4177 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4178
38ddc59d
LC
4179 } else {
4180 dev_info(priv->device, "No HW DMA feature register supported\n");
4181 }
cf3f047b 4182
d2afb5bd
GC
4183 if (priv->plat->rx_coe) {
4184 priv->hw->rx_csum = priv->plat->rx_coe;
38ddc59d 4185 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
f748be53 4186 if (priv->synopsys_id < DWMAC_CORE_4_00)
38ddc59d 4187 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
d2afb5bd 4188 }
cf3f047b 4189 if (priv->plat->tx_coe)
38ddc59d 4190 dev_info(priv->device, "TX Checksum insertion supported\n");
cf3f047b
GC
4191
4192 if (priv->plat->pmt) {
38ddc59d 4193 dev_info(priv->device, "Wake-Up On Lan supported\n");
cf3f047b
GC
4194 device_set_wakeup_capable(priv->device, 1);
4195 }
4196
f748be53 4197 if (priv->dma_cap.tsoen)
38ddc59d 4198 dev_info(priv->device, "TSO supported\n");
f748be53 4199
7cfde0af
JA
4200 /* Run HW quirks, if any */
4201 if (priv->hwif_quirks) {
4202 ret = priv->hwif_quirks(priv);
4203 if (ret)
4204 return ret;
4205 }
4206
3b509466
JA
4207 /* Rx Watchdog is available in the COREs newer than the 3.40.
4208 * In some case, for example on bugged HW this feature
4209 * has to be disable and this can be done by passing the
4210 * riwt_off field from the platform.
4211 */
4212 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
4213 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
4214 priv->use_riwt = 1;
4215 dev_info(priv->device,
4216 "Enable RX Mitigation via HW Watchdog Timer\n");
4217 }
4218
c24602ef 4219 return 0;
cf3f047b
GC
4220}
4221
47dd7a54 4222/**
bfab27a1
GC
4223 * stmmac_dvr_probe
4224 * @device: device pointer
ff3dd78c 4225 * @plat_dat: platform data pointer
e56788cf 4226 * @res: stmmac resource pointer
bfab27a1
GC
4227 * Description: this is the main probe function used to
4228 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 4229 * Return:
15ffac73 4230 * returns 0 on success, otherwise errno.
47dd7a54 4231 */
15ffac73
JE
4232int stmmac_dvr_probe(struct device *device,
4233 struct plat_stmmacenet_data *plat_dat,
4234 struct stmmac_resources *res)
47dd7a54 4235{
bfab27a1
GC
4236 struct net_device *ndev = NULL;
4237 struct stmmac_priv *priv;
8fce3331 4238 u32 queue, maxq;
c22a3f48 4239 int ret = 0;
47dd7a54 4240
c22a3f48
JP
4241 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4242 MTL_MAX_TX_QUEUES,
4243 MTL_MAX_RX_QUEUES);
41de8d4c 4244 if (!ndev)
15ffac73 4245 return -ENOMEM;
bfab27a1
GC
4246
4247 SET_NETDEV_DEV(ndev, device);
4248
4249 priv = netdev_priv(ndev);
4250 priv->device = device;
4251 priv->dev = ndev;
47dd7a54 4252
bfab27a1 4253 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
4254 priv->pause = pause;
4255 priv->plat = plat_dat;
e56788cf
JE
4256 priv->ioaddr = res->addr;
4257 priv->dev->base_addr = (unsigned long)res->addr;
4258
4259 priv->dev->irq = res->irq;
4260 priv->wol_irq = res->wol_irq;
4261 priv->lpi_irq = res->lpi_irq;
4262
4263 if (res->mac)
4264 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 4265
a7a62685 4266 dev_set_drvdata(device, priv->dev);
803f8fc4 4267
cf3f047b
GC
4268 /* Verify driver arguments */
4269 stmmac_verify_args();
bfab27a1 4270
34877a15
JA
4271 /* Allocate workqueue */
4272 priv->wq = create_singlethread_workqueue("stmmac_wq");
4273 if (!priv->wq) {
4274 dev_err(priv->device, "failed to create workqueue\n");
b26322d2 4275 ret = -ENOMEM;
34877a15
JA
4276 goto error_wq;
4277 }
4278
4279 INIT_WORK(&priv->service_task, stmmac_service_task);
4280
cf3f047b 4281 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
4282 * this needs to have multiple instances
4283 */
cf3f047b
GC
4284 if ((phyaddr >= 0) && (phyaddr <= 31))
4285 priv->plat->phy_addr = phyaddr;
4286
90f522a2
EP
4287 if (priv->plat->stmmac_rst) {
4288 ret = reset_control_assert(priv->plat->stmmac_rst);
f573c0b9 4289 reset_control_deassert(priv->plat->stmmac_rst);
90f522a2
EP
4290 /* Some reset controllers have only reset callback instead of
4291 * assert + deassert callbacks pair.
4292 */
4293 if (ret == -ENOTSUPP)
4294 reset_control_reset(priv->plat->stmmac_rst);
4295 }
c5e4ddbd 4296
cf3f047b 4297 /* Init MAC and get the capabilities */
c24602ef
GC
4298 ret = stmmac_hw_init(priv);
4299 if (ret)
62866e98 4300 goto error_hw_init;
cf3f047b 4301
c22a3f48 4302 /* Configure real RX and TX queues */
c02b7a91
JP
4303 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4304 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
c22a3f48 4305
cf3f047b 4306 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 4307
cf3f047b
GC
4308 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4309 NETIF_F_RXCSUM;
f748be53 4310
4dbbe8dd
JA
4311 ret = stmmac_tc_init(priv, priv);
4312 if (!ret) {
4313 ndev->hw_features |= NETIF_F_HW_TC;
4314 }
4315
f748be53 4316 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
9edfa7da 4317 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
f748be53 4318 priv->tso = true;
38ddc59d 4319 dev_info(priv->device, "TSO feature enabled\n");
f748be53 4320 }
bfab27a1
GC
4321 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4322 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
4323#ifdef STMMAC_VLAN_TAG_USED
4324 /* Both mac100 and gmac support receive VLAN tag detection */
ab188e8f 4325 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
47dd7a54
GC
4326#endif
4327 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4328
44770e11
JW
4329 /* MTU range: 46 - hw-specific max */
4330 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4331 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4332 ndev->max_mtu = JUMBO_LEN;
7d9e6c5a
JA
4333 else if (priv->plat->has_xgmac)
4334 ndev->max_mtu = XGMAC_JUMBO_LEN;
44770e11
JW
4335 else
4336 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
a2cd64f3
KHL
4337 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4338 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4339 */
4340 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4341 (priv->plat->maxmtu >= ndev->min_mtu))
44770e11 4342 ndev->max_mtu = priv->plat->maxmtu;
a2cd64f3 4343 else if (priv->plat->maxmtu < ndev->min_mtu)
b618ab45
HK
4344 dev_warn(priv->device,
4345 "%s: warning: maxmtu having invalid value (%d)\n",
4346 __func__, priv->plat->maxmtu);
44770e11 4347
47dd7a54
GC
4348 if (flow_ctrl)
4349 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4350
8fce3331
JA
4351 /* Setup channels NAPI */
4352 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
c22a3f48 4353
8fce3331
JA
4354 for (queue = 0; queue < maxq; queue++) {
4355 struct stmmac_channel *ch = &priv->channel[queue];
4356
4357 ch->priv_data = priv;
4358 ch->index = queue;
4359
4ccb4585
JA
4360 if (queue < priv->plat->rx_queues_to_use) {
4361 netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
4362 NAPI_POLL_WEIGHT);
4363 }
4364 if (queue < priv->plat->tx_queues_to_use) {
4365 netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
4366 NAPI_POLL_WEIGHT);
4367 }
c22a3f48 4368 }
47dd7a54 4369
29555fa3 4370 mutex_init(&priv->lock);
f8e96161 4371
cd7201f4
GC
4372 /* If a specific clk_csr value is passed from the platform
4373 * this means that the CSR Clock Range selection cannot be
4374 * changed at run-time and it is fixed. Viceversa the driver'll try to
4375 * set the MDC clock dynamically according to the csr actual
4376 * clock input.
4377 */
4378 if (!priv->plat->clk_csr)
4379 stmmac_clk_csr_set(priv);
4380 else
4381 priv->clk_csr = priv->plat->clk_csr;
4382
e58bb43f
GC
4383 stmmac_check_pcs_mode(priv);
4384
3fe5cadb
GC
4385 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4386 priv->hw->pcs != STMMAC_PCS_TBI &&
4387 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
4388 /* MDIO bus Registration */
4389 ret = stmmac_mdio_register(ndev);
4390 if (ret < 0) {
b618ab45
HK
4391 dev_err(priv->device,
4392 "%s: MDIO bus (id: %d) registration failed",
4393 __func__, priv->plat->bus_id);
e58bb43f
GC
4394 goto error_mdio_register;
4395 }
4bfcbd7a
FV
4396 }
4397
57016590 4398 ret = register_netdev(ndev);
b2eb09af 4399 if (ret) {
b618ab45
HK
4400 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4401 __func__, ret);
b2eb09af
FF
4402 goto error_netdev_register;
4403 }
57016590 4404
5f2b8b62
TR
4405#ifdef CONFIG_DEBUG_FS
4406 ret = stmmac_init_fs(ndev);
4407 if (ret < 0)
4408 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
4409 __func__);
4410#endif
4411
57016590 4412 return ret;
47dd7a54 4413
6a81c26f 4414error_netdev_register:
b2eb09af
FF
4415 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4416 priv->hw->pcs != STMMAC_PCS_TBI &&
4417 priv->hw->pcs != STMMAC_PCS_RTBI)
4418 stmmac_mdio_unregister(ndev);
6a81c26f 4419error_mdio_register:
8fce3331
JA
4420 for (queue = 0; queue < maxq; queue++) {
4421 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 4422
4ccb4585
JA
4423 if (queue < priv->plat->rx_queues_to_use)
4424 netif_napi_del(&ch->rx_napi);
4425 if (queue < priv->plat->tx_queues_to_use)
4426 netif_napi_del(&ch->tx_napi);
c22a3f48 4427 }
62866e98 4428error_hw_init:
34877a15
JA
4429 destroy_workqueue(priv->wq);
4430error_wq:
34a52f36 4431 free_netdev(ndev);
47dd7a54 4432
15ffac73 4433 return ret;
47dd7a54 4434}
b2e2f0c7 4435EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
4436
4437/**
4438 * stmmac_dvr_remove
f4e7bd81 4439 * @dev: device pointer
47dd7a54 4440 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 4441 * changes the link status, releases the DMA descriptor rings.
47dd7a54 4442 */
f4e7bd81 4443int stmmac_dvr_remove(struct device *dev)
47dd7a54 4444{
f4e7bd81 4445 struct net_device *ndev = dev_get_drvdata(dev);
aec7ff27 4446 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4447
38ddc59d 4448 netdev_info(priv->dev, "%s: removing driver", __func__);
47dd7a54 4449
5f2b8b62
TR
4450#ifdef CONFIG_DEBUG_FS
4451 stmmac_exit_fs(ndev);
4452#endif
ae4f0d46 4453 stmmac_stop_all_dma(priv);
47dd7a54 4454
c10d4c82 4455 stmmac_mac_set(priv, priv->ioaddr, false);
47dd7a54 4456 netif_carrier_off(ndev);
47dd7a54 4457 unregister_netdev(ndev);
f573c0b9 4458 if (priv->plat->stmmac_rst)
4459 reset_control_assert(priv->plat->stmmac_rst);
4460 clk_disable_unprepare(priv->plat->pclk);
4461 clk_disable_unprepare(priv->plat->stmmac_clk);
3fe5cadb
GC
4462 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4463 priv->hw->pcs != STMMAC_PCS_TBI &&
4464 priv->hw->pcs != STMMAC_PCS_RTBI)
e743471f 4465 stmmac_mdio_unregister(ndev);
34877a15 4466 destroy_workqueue(priv->wq);
29555fa3 4467 mutex_destroy(&priv->lock);
47dd7a54
GC
4468 free_netdev(ndev);
4469
4470 return 0;
4471}
b2e2f0c7 4472EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 4473
732fdf0e
GC
4474/**
4475 * stmmac_suspend - suspend callback
f4e7bd81 4476 * @dev: device pointer
732fdf0e
GC
4477 * Description: this is the function to suspend the device and it is called
4478 * by the platform driver to stop the network queue, release the resources,
4479 * program the PMT register (for WoL), clean and release driver resources.
4480 */
f4e7bd81 4481int stmmac_suspend(struct device *dev)
47dd7a54 4482{
f4e7bd81 4483 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 4484 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4485
874bd42d 4486 if (!ndev || !netif_running(ndev))
47dd7a54
GC
4487 return 0;
4488
d6d50c7e
PR
4489 if (ndev->phydev)
4490 phy_stop(ndev->phydev);
102463b1 4491
29555fa3 4492 mutex_lock(&priv->lock);
47dd7a54 4493
874bd42d 4494 netif_device_detach(ndev);
c22a3f48 4495 stmmac_stop_all_queues(priv);
47dd7a54 4496
c22a3f48 4497 stmmac_disable_all_queues(priv);
874bd42d
GC
4498
4499 /* Stop TX/RX DMA */
ae4f0d46 4500 stmmac_stop_all_dma(priv);
c24602ef 4501
874bd42d 4502 /* Enable Power down mode by programming the PMT regs */
89f7f2cf 4503 if (device_may_wakeup(priv->device)) {
c10d4c82 4504 stmmac_pmt(priv, priv->hw, priv->wolopts);
89f7f2cf
SK
4505 priv->irq_wake = 1;
4506 } else {
c10d4c82 4507 stmmac_mac_set(priv, priv->ioaddr, false);
db88f10a 4508 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 4509 /* Disable clock in case of PWM is off */
f573c0b9 4510 clk_disable(priv->plat->pclk);
4511 clk_disable(priv->plat->stmmac_clk);
ba1377ff 4512 }
29555fa3 4513 mutex_unlock(&priv->lock);
2d871aa0 4514
4d869b03 4515 priv->oldlink = false;
bd00632c
LC
4516 priv->speed = SPEED_UNKNOWN;
4517 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
4518 return 0;
4519}
b2e2f0c7 4520EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 4521
54139cf3
JP
4522/**
4523 * stmmac_reset_queues_param - reset queue parameters
4524 * @dev: device pointer
4525 */
4526static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4527{
4528 u32 rx_cnt = priv->plat->rx_queues_to_use;
ce736788 4529 u32 tx_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
4530 u32 queue;
4531
4532 for (queue = 0; queue < rx_cnt; queue++) {
4533 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4534
4535 rx_q->cur_rx = 0;
4536 rx_q->dirty_rx = 0;
4537 }
4538
ce736788
JP
4539 for (queue = 0; queue < tx_cnt; queue++) {
4540 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4541
4542 tx_q->cur_tx = 0;
4543 tx_q->dirty_tx = 0;
8d212a9e 4544 tx_q->mss = 0;
ce736788 4545 }
54139cf3
JP
4546}
4547
732fdf0e
GC
4548/**
4549 * stmmac_resume - resume callback
f4e7bd81 4550 * @dev: device pointer
732fdf0e
GC
4551 * Description: when resume this function is invoked to setup the DMA and CORE
4552 * in a usable state.
4553 */
f4e7bd81 4554int stmmac_resume(struct device *dev)
47dd7a54 4555{
f4e7bd81 4556 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 4557 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4558
874bd42d 4559 if (!netif_running(ndev))
47dd7a54
GC
4560 return 0;
4561
47dd7a54
GC
4562 /* Power Down bit, into the PM register, is cleared
4563 * automatically as soon as a magic packet or a Wake-up frame
4564 * is received. Anyway, it's better to manually clear
4565 * this bit because it can generate problems while resuming
ceb69499
GC
4566 * from another devices (e.g. serial console).
4567 */
623997fb 4568 if (device_may_wakeup(priv->device)) {
29555fa3 4569 mutex_lock(&priv->lock);
c10d4c82 4570 stmmac_pmt(priv, priv->hw, 0);
29555fa3 4571 mutex_unlock(&priv->lock);
89f7f2cf 4572 priv->irq_wake = 0;
623997fb 4573 } else {
db88f10a 4574 pinctrl_pm_select_default_state(priv->device);
8d45e42b 4575 /* enable the clk previously disabled */
f573c0b9 4576 clk_enable(priv->plat->stmmac_clk);
4577 clk_enable(priv->plat->pclk);
623997fb
SK
4578 /* reset the phy so that it's ready */
4579 if (priv->mii)
4580 stmmac_mdio_reset(priv->mii);
4581 }
47dd7a54 4582
874bd42d 4583 netif_device_attach(ndev);
47dd7a54 4584
29555fa3 4585 mutex_lock(&priv->lock);
f55d84b0 4586
54139cf3
JP
4587 stmmac_reset_queues_param(priv);
4588
ae79a639
GC
4589 stmmac_clear_descriptors(priv);
4590
fe131929 4591 stmmac_hw_setup(ndev, false);
777da230 4592 stmmac_init_tx_coalesce(priv);
ac316c78 4593 stmmac_set_rx_mode(ndev);
47dd7a54 4594
c22a3f48 4595 stmmac_enable_all_queues(priv);
47dd7a54 4596
c22a3f48 4597 stmmac_start_all_queues(priv);
47dd7a54 4598
29555fa3 4599 mutex_unlock(&priv->lock);
102463b1 4600
d6d50c7e
PR
4601 if (ndev->phydev)
4602 phy_start(ndev->phydev);
102463b1 4603
47dd7a54
GC
4604 return 0;
4605}
b2e2f0c7 4606EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 4607
47dd7a54
GC
4608#ifndef MODULE
4609static int __init stmmac_cmdline_opt(char *str)
4610{
4611 char *opt;
4612
4613 if (!str || !*str)
4614 return -EINVAL;
4615 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 4616 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 4617 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
4618 goto err;
4619 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 4620 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 4621 goto err;
f3240e28 4622 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 4623 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
4624 goto err;
4625 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 4626 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
4627 goto err;
4628 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 4629 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
4630 goto err;
4631 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 4632 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
4633 goto err;
4634 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 4635 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 4636 goto err;
506f669c 4637 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
4638 if (kstrtoint(opt + 10, 0, &eee_timer))
4639 goto err;
4a7d666a
GC
4640 } else if (!strncmp(opt, "chain_mode:", 11)) {
4641 if (kstrtoint(opt + 11, 0, &chain_mode))
4642 goto err;
f3240e28 4643 }
47dd7a54
GC
4644 }
4645 return 0;
f3240e28
GC
4646
4647err:
4648 pr_err("%s: ERROR broken module parameter conversion", __func__);
4649 return -EINVAL;
47dd7a54
GC
4650}
4651
4652__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 4653#endif /* MODULE */
6fc0d0f2 4654
466c5ac8
MO
4655static int __init stmmac_init(void)
4656{
4657#ifdef CONFIG_DEBUG_FS
4658 /* Create debugfs main directory if it doesn't exist yet */
4659 if (!stmmac_fs_dir) {
4660 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4661
4662 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4663 pr_err("ERROR %s, debugfs create directory failed\n",
4664 STMMAC_RESOURCE_NAME);
4665
4666 return -ENOMEM;
4667 }
4668 }
4669#endif
4670
4671 return 0;
4672}
4673
4674static void __exit stmmac_exit(void)
4675{
4676#ifdef CONFIG_DEBUG_FS
4677 debugfs_remove_recursive(stmmac_fs_dir);
4678#endif
4679}
4680
4681module_init(stmmac_init)
4682module_exit(stmmac_exit)
4683
6fc0d0f2
GC
4684MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4685MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4686MODULE_LICENSE("GPL");