]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next...
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
286a8372 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54
GC
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
47dd7a54
GC
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25*******************************************************************************/
26
6a81c26f 27#include <linux/clk.h>
47dd7a54
GC
28#include <linux/kernel.h>
29#include <linux/interrupt.h>
47dd7a54
GC
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/if_ether.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
01789349 37#include <linux/if.h>
47dd7a54
GC
38#include <linux/if_vlan.h>
39#include <linux/dma-mapping.h>
5a0e3ad6 40#include <linux/slab.h>
70c71606 41#include <linux/prefetch.h>
db88f10a 42#include <linux/pinctrl/consumer.h>
50fb4f74 43#ifdef CONFIG_DEBUG_FS
7ac29055
GC
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
50fb4f74 46#endif /* CONFIG_DEBUG_FS */
891434b1
RK
47#include <linux/net_tstamp.h>
48#include "stmmac_ptp.h"
286a8372 49#include "stmmac.h"
c5e4ddbd 50#include <linux/reset.h>
5790cf3c 51#include <linux/of_mdio.h>
19d857c9 52#include "dwmac1000.h"
47dd7a54 53
47dd7a54 54#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
f748be53 55#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
47dd7a54
GC
56
57/* Module parameters */
32ceabca 58#define TX_TIMEO 5000
47dd7a54
GC
59static int watchdog = TX_TIMEO;
60module_param(watchdog, int, S_IRUGO | S_IWUSR);
32ceabca 61MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 62
32ceabca 63static int debug = -1;
47dd7a54 64module_param(debug, int, S_IRUGO | S_IWUSR);
32ceabca 65MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 66
47d1f71f 67static int phyaddr = -1;
47dd7a54
GC
68module_param(phyaddr, int, S_IRUGO);
69MODULE_PARM_DESC(phyaddr, "Physical device address");
70
e3ad57c9 71#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
120e87f9 72#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
47dd7a54
GC
73
74static int flow_ctrl = FLOW_OFF;
75module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78static int pause = PAUSE_TIME;
79module_param(pause, int, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82#define TC_DEFAULT 64
83static int tc = TC_DEFAULT;
84module_param(tc, int, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(tc, "DMA threshold control value");
86
d916701c
GC
87#define DEFAULT_BUFSIZE 1536
88static int buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
89module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
22ad3838
GC
92#define STMMAC_RX_COPYBREAK 256
93
47dd7a54
GC
94static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
d765955d
GC
98#define STMMAC_DEFAULT_LPI_TIMER 1000
99static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
f5351ef7 102#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
d765955d 103
22d3efe5
PM
104/* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
4a7d666a
GC
106 */
107static unsigned int chain_mode;
108module_param(chain_mode, int, S_IRUGO);
109MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
47dd7a54 111static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 112
50fb4f74 113#ifdef CONFIG_DEBUG_FS
bfab27a1 114static int stmmac_init_fs(struct net_device *dev);
466c5ac8 115static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
116#endif
117
9125cdd1
GC
118#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
47dd7a54
GC
120/**
121 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
47dd7a54
GC
124 */
125static void stmmac_verify_args(void)
126{
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
d916701c
GC
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
d765955d
GC
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
139}
140
32ceabca
GC
141/**
142 * stmmac_clk_csr_set - dynamically set the MDC clock
143 * @priv: driver private structure
144 * Description: this is to dynamically set the MDC clock according to the csr
145 * clock input.
146 * Note:
147 * If a specific clk_csr value is passed from the platform
148 * this means that the CSR Clock Range selection cannot be
149 * changed at run-time and it is fixed (as reported in the driver
150 * documentation). Viceversa the driver will try to set the MDC
151 * clock dynamically according to the actual clock input.
152 */
cd7201f4
GC
153static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154{
cd7201f4
GC
155 u32 clk_rate;
156
f573c0b9 157 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
cd7201f4
GC
158
159 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
160 * for all other cases except for the below mentioned ones.
161 * For values higher than the IEEE 802.3 specified frequency
162 * we can not estimate the proper divider as it is not known
163 * the frequency of clk_csr_i. So we do not change the default
164 * divider.
165 */
cd7201f4
GC
166 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167 if (clk_rate < CSR_F_35M)
168 priv->clk_csr = STMMAC_CSR_20_35M;
169 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170 priv->clk_csr = STMMAC_CSR_35_60M;
171 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172 priv->clk_csr = STMMAC_CSR_60_100M;
173 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174 priv->clk_csr = STMMAC_CSR_100_150M;
175 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176 priv->clk_csr = STMMAC_CSR_150_250M;
19d857c9 177 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
cd7201f4 178 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 179 }
cd7201f4
GC
180}
181
47dd7a54
GC
182static void print_pkt(unsigned char *buf, int len)
183{
424c4f78
AS
184 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 186}
47dd7a54 187
47dd7a54
GC
188static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189{
a6a3e026 190 u32 avail;
e3ad57c9
GC
191
192 if (priv->dirty_tx > priv->cur_tx)
193 avail = priv->dirty_tx - priv->cur_tx - 1;
194 else
195 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197 return avail;
198}
199
200static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201{
a6a3e026 202 u32 dirty;
e3ad57c9
GC
203
204 if (priv->dirty_rx <= priv->cur_rx)
205 dirty = priv->cur_rx - priv->dirty_rx;
206 else
207 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209 return dirty;
47dd7a54
GC
210}
211
32ceabca 212/**
732fdf0e 213 * stmmac_hw_fix_mac_speed - callback for speed selection
32ceabca 214 * @priv: driver private structure
8d45e42b 215 * Description: on some platforms (e.g. ST), some HW system configuration
32ceabca 216 * registers have to be set according to the link speed negotiated.
9dfeb4d9
GC
217 */
218static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219{
d6d50c7e
PR
220 struct net_device *ndev = priv->dev;
221 struct phy_device *phydev = ndev->phydev;
9dfeb4d9
GC
222
223 if (likely(priv->plat->fix_mac_speed))
ceb69499 224 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
9dfeb4d9
GC
225}
226
32ceabca 227/**
732fdf0e 228 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 229 * @priv: driver private structure
732fdf0e
GC
230 * Description: this function is to verify and enter in LPI mode in case of
231 * EEE.
32ceabca 232 */
d765955d
GC
233static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234{
235 /* Check and enter in LPI mode */
236 if ((priv->dirty_tx == priv->cur_tx) &&
237 (priv->tx_path_in_lpi_mode == false))
b4b7b772 238 priv->hw->mac->set_eee_mode(priv->hw,
239 priv->plat->en_tx_lpi_clockgating);
d765955d
GC
240}
241
32ceabca 242/**
732fdf0e 243 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
244 * @priv: driver private structure
245 * Description: this function is to exit and disable EEE in case of
246 * LPI state is true. This is called by the xmit.
247 */
d765955d
GC
248void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249{
7ed24bbe 250 priv->hw->mac->reset_eee_mode(priv->hw);
d765955d
GC
251 del_timer_sync(&priv->eee_ctrl_timer);
252 priv->tx_path_in_lpi_mode = false;
253}
254
255/**
732fdf0e 256 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d765955d
GC
257 * @arg : data hook
258 * Description:
32ceabca 259 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
260 * then MAC Transmitter can be moved to LPI state.
261 */
262static void stmmac_eee_ctrl_timer(unsigned long arg)
263{
264 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266 stmmac_enable_eee_mode(priv);
f5351ef7 267 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d
GC
268}
269
270/**
732fdf0e 271 * stmmac_eee_init - init EEE
32ceabca 272 * @priv: driver private structure
d765955d 273 * Description:
732fdf0e
GC
274 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
275 * can also manage EEE, this function enable the LPI state and start related
276 * timer.
d765955d
GC
277 */
278bool stmmac_eee_init(struct stmmac_priv *priv)
279{
d6d50c7e 280 struct net_device *ndev = priv->dev;
4741cf9c 281 unsigned long flags;
d765955d
GC
282 bool ret = false;
283
f5351ef7
GC
284 /* Using PCS we cannot dial with the phy registers at this stage
285 * so we do not support extra feature like EEE.
286 */
3fe5cadb
GC
287 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288 (priv->hw->pcs == STMMAC_PCS_TBI) ||
289 (priv->hw->pcs == STMMAC_PCS_RTBI))
f5351ef7
GC
290 goto out;
291
d765955d
GC
292 /* MAC core supports the EEE feature. */
293 if (priv->dma_cap.eee) {
83bf79b6
GC
294 int tx_lpi_timer = priv->tx_lpi_timer;
295
d765955d 296 /* Check if the PHY supports EEE */
d6d50c7e 297 if (phy_init_eee(ndev->phydev, 1)) {
83bf79b6
GC
298 /* To manage at run-time if the EEE cannot be supported
299 * anymore (for example because the lp caps have been
300 * changed).
301 * In that case the driver disable own timers.
302 */
4741cf9c 303 spin_lock_irqsave(&priv->lock, flags);
83bf79b6 304 if (priv->eee_active) {
38ddc59d 305 netdev_dbg(priv->dev, "disable EEE\n");
83bf79b6 306 del_timer_sync(&priv->eee_ctrl_timer);
7ed24bbe 307 priv->hw->mac->set_eee_timer(priv->hw, 0,
83bf79b6
GC
308 tx_lpi_timer);
309 }
310 priv->eee_active = 0;
4741cf9c 311 spin_unlock_irqrestore(&priv->lock, flags);
d765955d 312 goto out;
83bf79b6
GC
313 }
314 /* Activate the EEE and start timers */
4741cf9c 315 spin_lock_irqsave(&priv->lock, flags);
f5351ef7
GC
316 if (!priv->eee_active) {
317 priv->eee_active = 1;
ccb36da1
VT
318 setup_timer(&priv->eee_ctrl_timer,
319 stmmac_eee_ctrl_timer,
320 (unsigned long)priv);
321 mod_timer(&priv->eee_ctrl_timer,
322 STMMAC_LPI_T(eee_timer));
f5351ef7 323
7ed24bbe 324 priv->hw->mac->set_eee_timer(priv->hw,
f5351ef7 325 STMMAC_DEFAULT_LIT_LS,
83bf79b6 326 tx_lpi_timer);
71965352
GC
327 }
328 /* Set HW EEE according to the speed */
d6d50c7e 329 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
d765955d 330
d765955d 331 ret = true;
4741cf9c
GC
332 spin_unlock_irqrestore(&priv->lock, flags);
333
38ddc59d 334 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
d765955d
GC
335 }
336out:
337 return ret;
338}
339
732fdf0e 340/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 341 * @priv: driver private structure
ba1ffd74 342 * @p : descriptor pointer
891434b1
RK
343 * @skb : the socket buffer
344 * Description :
345 * This function will read timestamp from the descriptor & pass it to stack.
346 * and also perform some sanity checks.
347 */
348static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ba1ffd74 349 struct dma_desc *p, struct sk_buff *skb)
891434b1
RK
350{
351 struct skb_shared_hwtstamps shhwtstamp;
352 u64 ns;
891434b1
RK
353
354 if (!priv->hwts_tx_en)
355 return;
356
ceb69499 357 /* exit if skb doesn't support hw tstamp */
75e4364f 358 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
359 return;
360
891434b1 361 /* check tx tstamp status */
ba1ffd74
GC
362 if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363 /* get the valid tstamp */
364 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
891434b1 365
ba1ffd74
GC
366 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367 shhwtstamp.hwtstamp = ns_to_ktime(ns);
891434b1 368
ba1ffd74
GC
369 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370 /* pass tstamp to stack */
371 skb_tstamp_tx(skb, &shhwtstamp);
372 }
891434b1
RK
373
374 return;
375}
376
732fdf0e 377/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 378 * @priv: driver private structure
ba1ffd74
GC
379 * @p : descriptor pointer
380 * @np : next descriptor pointer
891434b1
RK
381 * @skb : the socket buffer
382 * Description :
383 * This function will read received packet's timestamp from the descriptor
384 * and pass it to stack. It also perform some sanity checks.
385 */
ba1ffd74
GC
386static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387 struct dma_desc *np, struct sk_buff *skb)
891434b1
RK
388{
389 struct skb_shared_hwtstamps *shhwtstamp = NULL;
390 u64 ns;
891434b1
RK
391
392 if (!priv->hwts_rx_en)
393 return;
394
ba1ffd74
GC
395 /* Check if timestamp is available */
396 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397 /* For GMAC4, the valid timestamp is from CTX next desc. */
398 if (priv->plat->has_gmac4)
399 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400 else
401 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
891434b1 402
ba1ffd74
GC
403 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404 shhwtstamp = skb_hwtstamps(skb);
405 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407 } else {
408 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409 }
891434b1
RK
410}
411
412/**
413 * stmmac_hwtstamp_ioctl - control hardware timestamping.
414 * @dev: device pointer.
8d45e42b 415 * @ifr: An IOCTL specific structure, that can contain a pointer to
891434b1
RK
416 * a proprietary structure used to pass information to the driver.
417 * Description:
418 * This function configures the MAC to enable/disable both outgoing(TX)
419 * and incoming(RX) packets time stamping based on user input.
420 * Return Value:
421 * 0 on success and an appropriate -ve integer on failure.
422 */
423static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424{
425 struct stmmac_priv *priv = netdev_priv(dev);
426 struct hwtstamp_config config;
0a624155 427 struct timespec64 now;
891434b1
RK
428 u64 temp = 0;
429 u32 ptp_v2 = 0;
430 u32 tstamp_all = 0;
431 u32 ptp_over_ipv4_udp = 0;
432 u32 ptp_over_ipv6_udp = 0;
433 u32 ptp_over_ethernet = 0;
434 u32 snap_type_sel = 0;
435 u32 ts_master_en = 0;
436 u32 ts_event_en = 0;
437 u32 value = 0;
19d857c9 438 u32 sec_inc;
891434b1
RK
439
440 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441 netdev_alert(priv->dev, "No support for HW time stamping\n");
442 priv->hwts_tx_en = 0;
443 priv->hwts_rx_en = 0;
444
445 return -EOPNOTSUPP;
446 }
447
448 if (copy_from_user(&config, ifr->ifr_data,
ceb69499 449 sizeof(struct hwtstamp_config)))
891434b1
RK
450 return -EFAULT;
451
38ddc59d
LC
452 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453 __func__, config.flags, config.tx_type, config.rx_filter);
891434b1
RK
454
455 /* reserved for future extensions */
456 if (config.flags)
457 return -EINVAL;
458
5f3da328
BH
459 if (config.tx_type != HWTSTAMP_TX_OFF &&
460 config.tx_type != HWTSTAMP_TX_ON)
891434b1 461 return -ERANGE;
891434b1
RK
462
463 if (priv->adv_ts) {
464 switch (config.rx_filter) {
891434b1 465 case HWTSTAMP_FILTER_NONE:
ceb69499 466 /* time stamp no incoming packet at all */
891434b1
RK
467 config.rx_filter = HWTSTAMP_FILTER_NONE;
468 break;
469
891434b1 470 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 471 /* PTP v1, UDP, any kind of event packet */
891434b1
RK
472 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473 /* take time stamp for all event messages */
474 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478 break;
479
891434b1 480 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 481 /* PTP v1, UDP, Sync packet */
891434b1
RK
482 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483 /* take time stamp for SYNC messages only */
484 ts_event_en = PTP_TCR_TSEVNTENA;
485
486 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488 break;
489
891434b1 490 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 491 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
492 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493 /* take time stamp for Delay_Req messages only */
494 ts_master_en = PTP_TCR_TSMSTRENA;
495 ts_event_en = PTP_TCR_TSEVNTENA;
496
497 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499 break;
500
891434b1 501 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 502 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
503 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504 ptp_v2 = PTP_TCR_TSVER2ENA;
505 /* take time stamp for all event messages */
506 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510 break;
511
891434b1 512 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 513 /* PTP v2, UDP, Sync packet */
891434b1
RK
514 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515 ptp_v2 = PTP_TCR_TSVER2ENA;
516 /* take time stamp for SYNC messages only */
517 ts_event_en = PTP_TCR_TSEVNTENA;
518
519 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521 break;
522
891434b1 523 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 524 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
525 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526 ptp_v2 = PTP_TCR_TSVER2ENA;
527 /* take time stamp for Delay_Req messages only */
528 ts_master_en = PTP_TCR_TSMSTRENA;
529 ts_event_en = PTP_TCR_TSEVNTENA;
530
531 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533 break;
534
891434b1 535 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 536 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
537 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538 ptp_v2 = PTP_TCR_TSVER2ENA;
539 /* take time stamp for all event messages */
540 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544 ptp_over_ethernet = PTP_TCR_TSIPENA;
545 break;
546
891434b1 547 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 548 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
549 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550 ptp_v2 = PTP_TCR_TSVER2ENA;
551 /* take time stamp for SYNC messages only */
552 ts_event_en = PTP_TCR_TSEVNTENA;
553
554 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556 ptp_over_ethernet = PTP_TCR_TSIPENA;
557 break;
558
891434b1 559 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 560 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
561 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562 ptp_v2 = PTP_TCR_TSVER2ENA;
563 /* take time stamp for Delay_Req messages only */
564 ts_master_en = PTP_TCR_TSMSTRENA;
565 ts_event_en = PTP_TCR_TSEVNTENA;
566
567 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569 ptp_over_ethernet = PTP_TCR_TSIPENA;
570 break;
571
891434b1 572 case HWTSTAMP_FILTER_ALL:
ceb69499 573 /* time stamp any incoming packet */
891434b1
RK
574 config.rx_filter = HWTSTAMP_FILTER_ALL;
575 tstamp_all = PTP_TCR_TSENALL;
576 break;
577
578 default:
579 return -ERANGE;
580 }
581 } else {
582 switch (config.rx_filter) {
583 case HWTSTAMP_FILTER_NONE:
584 config.rx_filter = HWTSTAMP_FILTER_NONE;
585 break;
586 default:
587 /* PTP v1, UDP, any kind of event packet */
588 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589 break;
590 }
591 }
592 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 593 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1
RK
594
595 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
ba1ffd74 596 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
891434b1
RK
597 else {
598 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
ceb69499
GC
599 tstamp_all | ptp_v2 | ptp_over_ethernet |
600 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601 ts_master_en | snap_type_sel);
ba1ffd74 602 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
891434b1
RK
603
604 /* program Sub Second Increment reg */
19d857c9 605 sec_inc = priv->hw->ptp->config_sub_second_increment(
f573c0b9 606 priv->ptpaddr, priv->plat->clk_ptp_rate,
ba1ffd74 607 priv->plat->has_gmac4);
19d857c9 608 temp = div_u64(1000000000ULL, sec_inc);
891434b1
RK
609
610 /* calculate default added value:
611 * formula is :
612 * addend = (2^32)/freq_div_ratio;
19d857c9 613 * where, freq_div_ratio = 1e9ns/sec_inc
891434b1 614 */
19d857c9 615 temp = (u64)(temp << 32);
f573c0b9 616 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
ba1ffd74 617 priv->hw->ptp->config_addend(priv->ptpaddr,
891434b1
RK
618 priv->default_addend);
619
620 /* initialize system time */
0a624155
AB
621 ktime_get_real_ts64(&now);
622
623 /* lower 32 bits of tv_sec are safe until y2106 */
ba1ffd74 624 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
891434b1
RK
625 now.tv_nsec);
626 }
627
628 return copy_to_user(ifr->ifr_data, &config,
629 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630}
631
32ceabca 632/**
732fdf0e 633 * stmmac_init_ptp - init PTP
32ceabca 634 * @priv: driver private structure
732fdf0e 635 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 636 * This is done by looking at the HW cap. register.
732fdf0e 637 * This function also registers the ptp driver.
32ceabca 638 */
92ba6888 639static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 640{
92ba6888
RK
641 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642 return -EOPNOTSUPP;
643
7cd01399 644 priv->adv_ts = 0;
be9b3174
GC
645 /* Check if adv_ts can be enabled for dwmac 4.x core */
646 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647 priv->adv_ts = 1;
648 /* Dwmac 3.x core with extend_desc can support adv_ts */
649 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
7cd01399
VB
650 priv->adv_ts = 1;
651
be9b3174
GC
652 if (priv->dma_cap.time_stamp)
653 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7cd01399 654
be9b3174
GC
655 if (priv->adv_ts)
656 netdev_info(priv->dev,
657 "IEEE 1588-2008 Advanced Timestamp supported\n");
891434b1
RK
658
659 priv->hw->ptp = &stmmac_ptp;
660 priv->hwts_tx_en = 0;
661 priv->hwts_rx_en = 0;
92ba6888 662
c30a70d3
GC
663 stmmac_ptp_register(priv);
664
665 return 0;
92ba6888
RK
666}
667
668static void stmmac_release_ptp(struct stmmac_priv *priv)
669{
f573c0b9 670 if (priv->plat->clk_ptp_ref)
671 clk_disable_unprepare(priv->plat->clk_ptp_ref);
92ba6888 672 stmmac_ptp_unregister(priv);
891434b1
RK
673}
674
29feff39
JP
675/**
676 * stmmac_mac_flow_ctrl - Configure flow control in all queues
677 * @priv: driver private structure
678 * Description: It is used for configuring the flow control in all queues
679 */
680static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
681{
682 u32 tx_cnt = priv->plat->tx_queues_to_use;
683
684 priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
685 priv->pause, tx_cnt);
686}
687
47dd7a54 688/**
732fdf0e 689 * stmmac_adjust_link - adjusts the link parameters
47dd7a54 690 * @dev: net device structure
732fdf0e
GC
691 * Description: this is the helper called by the physical abstraction layer
692 * drivers to communicate the phy link status. According the speed and duplex
693 * this driver can invoke registered glue-logic as well.
694 * It also invoke the eee initialization because it could happen when switch
695 * on different networks (that are eee capable).
47dd7a54
GC
696 */
697static void stmmac_adjust_link(struct net_device *dev)
698{
699 struct stmmac_priv *priv = netdev_priv(dev);
d6d50c7e 700 struct phy_device *phydev = dev->phydev;
47dd7a54
GC
701 unsigned long flags;
702 int new_state = 0;
47dd7a54 703
662ec2b7 704 if (!phydev)
47dd7a54
GC
705 return;
706
47dd7a54 707 spin_lock_irqsave(&priv->lock, flags);
d765955d 708
47dd7a54 709 if (phydev->link) {
ad01b7d4 710 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
711
712 /* Now we make sure that we can be in full duplex mode.
713 * If not, we operate in half-duplex mode. */
714 if (phydev->duplex != priv->oldduplex) {
715 new_state = 1;
716 if (!(phydev->duplex))
db98a0b0 717 ctrl &= ~priv->hw->link.duplex;
47dd7a54 718 else
db98a0b0 719 ctrl |= priv->hw->link.duplex;
47dd7a54
GC
720 priv->oldduplex = phydev->duplex;
721 }
722 /* Flow Control operation */
723 if (phydev->pause)
29feff39 724 stmmac_mac_flow_ctrl(priv, phydev->duplex);
47dd7a54
GC
725
726 if (phydev->speed != priv->speed) {
727 new_state = 1;
728 switch (phydev->speed) {
729 case 1000:
3e12790e
LC
730 if (priv->plat->has_gmac ||
731 priv->plat->has_gmac4)
db98a0b0 732 ctrl &= ~priv->hw->link.port;
47dd7a54
GC
733 break;
734 case 100:
9beae261
LC
735 if (priv->plat->has_gmac ||
736 priv->plat->has_gmac4) {
737 ctrl |= priv->hw->link.port;
738 ctrl |= priv->hw->link.speed;
739 } else {
740 ctrl &= ~priv->hw->link.port;
741 }
742 break;
47dd7a54 743 case 10:
3e12790e
LC
744 if (priv->plat->has_gmac ||
745 priv->plat->has_gmac4) {
db98a0b0 746 ctrl |= priv->hw->link.port;
9beae261 747 ctrl &= ~(priv->hw->link.speed);
47dd7a54 748 } else {
db98a0b0 749 ctrl &= ~priv->hw->link.port;
47dd7a54 750 }
47dd7a54
GC
751 break;
752 default:
b3e51069 753 netif_warn(priv, link, priv->dev,
cba920af 754 "broken speed: %d\n", phydev->speed);
688495b1 755 phydev->speed = SPEED_UNKNOWN;
47dd7a54
GC
756 break;
757 }
5db13556
LC
758 if (phydev->speed != SPEED_UNKNOWN)
759 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
760 priv->speed = phydev->speed;
761 }
762
ad01b7d4 763 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
764
765 if (!priv->oldlink) {
766 new_state = 1;
767 priv->oldlink = 1;
768 }
769 } else if (priv->oldlink) {
770 new_state = 1;
771 priv->oldlink = 0;
bd00632c
LC
772 priv->speed = SPEED_UNKNOWN;
773 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
774 }
775
776 if (new_state && netif_msg_link(priv))
777 phy_print_status(phydev);
778
4741cf9c
GC
779 spin_unlock_irqrestore(&priv->lock, flags);
780
52f95bbf
GC
781 if (phydev->is_pseudo_fixed_link)
782 /* Stop PHY layer to call the hook to adjust the link in case
783 * of a switch is attached to the stmmac driver.
784 */
785 phydev->irq = PHY_IGNORE_INTERRUPT;
786 else
787 /* At this stage, init the EEE if supported.
788 * Never called in case of fixed_link.
789 */
790 priv->eee_enabled = stmmac_eee_init(priv);
47dd7a54
GC
791}
792
32ceabca 793/**
732fdf0e 794 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
795 * @priv: driver private structure
796 * Description: this is to verify if the HW supports the PCS.
797 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
798 * configured for the TBI, RTBI, or SGMII PHY interface.
799 */
e58bb43f
GC
800static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
801{
802 int interface = priv->plat->interface;
803
804 if (priv->dma_cap.pcs) {
0d909dcd
BA
805 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
806 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
807 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
808 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
38ddc59d 809 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
3fe5cadb 810 priv->hw->pcs = STMMAC_PCS_RGMII;
0d909dcd 811 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
38ddc59d 812 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
3fe5cadb 813 priv->hw->pcs = STMMAC_PCS_SGMII;
e58bb43f
GC
814 }
815 }
816}
817
47dd7a54
GC
818/**
819 * stmmac_init_phy - PHY initialization
820 * @dev: net device structure
821 * Description: it initializes the driver's PHY state, and attaches the PHY
822 * to the mac driver.
823 * Return value:
824 * 0 on success
825 */
826static int stmmac_init_phy(struct net_device *dev)
827{
828 struct stmmac_priv *priv = netdev_priv(dev);
829 struct phy_device *phydev;
d765955d 830 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
109cdd66 831 char bus_id[MII_BUS_ID_SIZE];
79ee1dc3 832 int interface = priv->plat->interface;
9cbadf09 833 int max_speed = priv->plat->max_speed;
47dd7a54 834 priv->oldlink = 0;
bd00632c
LC
835 priv->speed = SPEED_UNKNOWN;
836 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54 837
5790cf3c
MO
838 if (priv->plat->phy_node) {
839 phydev = of_phy_connect(dev, priv->plat->phy_node,
840 &stmmac_adjust_link, 0, interface);
841 } else {
a7657f12
GC
842 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
843 priv->plat->bus_id);
5790cf3c
MO
844
845 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
846 priv->plat->phy_addr);
de9a2165 847 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
38ddc59d 848 phy_id_fmt);
5790cf3c
MO
849
850 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
851 interface);
852 }
47dd7a54 853
dfc50fca 854 if (IS_ERR_OR_NULL(phydev)) {
38ddc59d 855 netdev_err(priv->dev, "Could not attach to PHY\n");
dfc50fca
AB
856 if (!phydev)
857 return -ENODEV;
858
47dd7a54
GC
859 return PTR_ERR(phydev);
860 }
861
79ee1dc3 862 /* Stop Advertising 1000BASE Capability if interface is not GMII */
c5b9b4e4 863 if ((interface == PHY_INTERFACE_MODE_MII) ||
9cbadf09 864 (interface == PHY_INTERFACE_MODE_RMII) ||
a77e4acc 865 (max_speed < 1000 && max_speed > 0))
c5b9b4e4
SK
866 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
867 SUPPORTED_1000baseT_Full);
79ee1dc3 868
47dd7a54
GC
869 /*
870 * Broken HW is sometimes missing the pull-up resistor on the
871 * MDIO line, which results in reads to non-existent devices returning
872 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
873 * device as well.
874 * Note: phydev->phy_id is the result of reading the UID PHY registers.
875 */
27732381 876 if (!priv->plat->phy_node && phydev->phy_id == 0) {
47dd7a54
GC
877 phy_disconnect(phydev);
878 return -ENODEV;
879 }
8e99fc5f 880
c51e424d
FF
881 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
882 * subsequent PHY polling, make sure we force a link transition if
883 * we have a UP/DOWN/UP transition
884 */
885 if (phydev->is_pseudo_fixed_link)
886 phydev->irq = PHY_POLL;
887
b05c76a1 888 phy_attached_info(phydev);
47dd7a54
GC
889 return 0;
890}
891
c24602ef
GC
892static void stmmac_display_rings(struct stmmac_priv *priv)
893{
d0225e7d
AT
894 void *head_rx, *head_tx;
895
c24602ef 896 if (priv->extend_desc) {
d0225e7d
AT
897 head_rx = (void *)priv->dma_erx;
898 head_tx = (void *)priv->dma_etx;
c24602ef 899 } else {
d0225e7d
AT
900 head_rx = (void *)priv->dma_rx;
901 head_tx = (void *)priv->dma_tx;
c24602ef 902 }
d0225e7d
AT
903
904 /* Display Rx ring */
905 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
906 /* Display Tx ring */
907 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
c24602ef
GC
908}
909
286a8372
GC
910static int stmmac_set_bfsize(int mtu, int bufsize)
911{
912 int ret = bufsize;
913
914 if (mtu >= BUF_SIZE_4KiB)
915 ret = BUF_SIZE_8KiB;
916 else if (mtu >= BUF_SIZE_2KiB)
917 ret = BUF_SIZE_4KiB;
d916701c 918 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
919 ret = BUF_SIZE_2KiB;
920 else
d916701c 921 ret = DEFAULT_BUFSIZE;
286a8372
GC
922
923 return ret;
924}
925
32ceabca 926/**
732fdf0e 927 * stmmac_clear_descriptors - clear descriptors
32ceabca
GC
928 * @priv: driver private structure
929 * Description: this function is called to clear the tx and rx descriptors
930 * in case of both basic and extended descriptors are used.
931 */
c24602ef
GC
932static void stmmac_clear_descriptors(struct stmmac_priv *priv)
933{
934 int i;
c24602ef
GC
935
936 /* Clear the Rx/Tx descriptors */
e3ad57c9 937 for (i = 0; i < DMA_RX_SIZE; i++)
c24602ef
GC
938 if (priv->extend_desc)
939 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
940 priv->use_riwt, priv->mode,
e3ad57c9 941 (i == DMA_RX_SIZE - 1));
c24602ef
GC
942 else
943 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
944 priv->use_riwt, priv->mode,
e3ad57c9
GC
945 (i == DMA_RX_SIZE - 1));
946 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef
GC
947 if (priv->extend_desc)
948 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
949 priv->mode,
e3ad57c9 950 (i == DMA_TX_SIZE - 1));
c24602ef
GC
951 else
952 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
953 priv->mode,
e3ad57c9 954 (i == DMA_TX_SIZE - 1));
c24602ef
GC
955}
956
732fdf0e
GC
957/**
958 * stmmac_init_rx_buffers - init the RX descriptor buffer.
959 * @priv: driver private structure
960 * @p: descriptor pointer
961 * @i: descriptor index
962 * @flags: gfp flag.
963 * Description: this function is called to allocate a receive buffer, perform
964 * the DMA mapping and init the descriptor.
965 */
c24602ef 966static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
777da230 967 int i, gfp_t flags)
c24602ef
GC
968{
969 struct sk_buff *skb;
970
4ec49a37 971 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
56329137 972 if (!skb) {
38ddc59d
LC
973 netdev_err(priv->dev,
974 "%s: Rx init fails; skb is NULL\n", __func__);
56329137 975 return -ENOMEM;
c24602ef 976 }
c24602ef
GC
977 priv->rx_skbuff[i] = skb;
978 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
979 priv->dma_buf_sz,
980 DMA_FROM_DEVICE);
56329137 981 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
38ddc59d 982 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
56329137
BZ
983 dev_kfree_skb_any(skb);
984 return -EINVAL;
985 }
c24602ef 986
f748be53 987 if (priv->synopsys_id >= DWMAC_CORE_4_00)
f8be0d78 988 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
f748be53 989 else
f8be0d78 990 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
c24602ef 991
29896a67 992 if ((priv->hw->mode->init_desc3) &&
c24602ef 993 (priv->dma_buf_sz == BUF_SIZE_16KiB))
29896a67 994 priv->hw->mode->init_desc3(p);
c24602ef
GC
995
996 return 0;
997}
998
56329137
BZ
999static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1000{
1001 if (priv->rx_skbuff[i]) {
1002 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1003 priv->dma_buf_sz, DMA_FROM_DEVICE);
1004 dev_kfree_skb_any(priv->rx_skbuff[i]);
1005 }
1006 priv->rx_skbuff[i] = NULL;
1007}
1008
47dd7a54
GC
1009/**
1010 * init_dma_desc_rings - init the RX/TX descriptor rings
1011 * @dev: net device structure
732fdf0e
GC
1012 * @flags: gfp flag.
1013 * Description: this function initializes the DMA RX/TX descriptors
8d45e42b 1014 * and allocates the socket buffers. It supports the chained and ring
286a8372 1015 * modes.
47dd7a54 1016 */
777da230 1017static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
47dd7a54
GC
1018{
1019 int i;
1020 struct stmmac_priv *priv = netdev_priv(dev);
4a7d666a 1021 unsigned int bfsize = 0;
56329137 1022 int ret = -ENOMEM;
47dd7a54 1023
29896a67
GC
1024 if (priv->hw->mode->set_16kib_bfsize)
1025 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
286a8372 1026
4a7d666a 1027 if (bfsize < BUF_SIZE_16KiB)
286a8372 1028 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
47dd7a54 1029
2618abb7
VB
1030 priv->dma_buf_sz = bfsize;
1031
b3e51069
LC
1032 netif_dbg(priv, probe, priv->dev,
1033 "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1034 __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1035
1036 /* RX INITIALIZATION */
1037 netif_dbg(priv, probe, priv->dev,
1038 "SKB addresses:\nskb\t\tskb data\tdma data\n");
47dd7a54 1039
e3ad57c9 1040 for (i = 0; i < DMA_RX_SIZE; i++) {
c24602ef
GC
1041 struct dma_desc *p;
1042 if (priv->extend_desc)
1043 p = &((priv->dma_erx + i)->basic);
1044 else
1045 p = priv->dma_rx + i;
47dd7a54 1046
777da230 1047 ret = stmmac_init_rx_buffers(priv, p, i, flags);
56329137
BZ
1048 if (ret)
1049 goto err_init_rx_buffers;
286a8372 1050
b3e51069
LC
1051 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1052 priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1053 (unsigned int)priv->rx_skbuff_dma[i]);
47dd7a54
GC
1054 }
1055 priv->cur_rx = 0;
e3ad57c9 1056 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
47dd7a54
GC
1057 buf_sz = bfsize;
1058
c24602ef
GC
1059 /* Setup the chained descriptor addresses */
1060 if (priv->mode == STMMAC_CHAIN_MODE) {
1061 if (priv->extend_desc) {
29896a67 1062 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
e3ad57c9 1063 DMA_RX_SIZE, 1);
29896a67 1064 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
e3ad57c9 1065 DMA_TX_SIZE, 1);
c24602ef 1066 } else {
29896a67 1067 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
e3ad57c9 1068 DMA_RX_SIZE, 0);
29896a67 1069 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
e3ad57c9 1070 DMA_TX_SIZE, 0);
c24602ef
GC
1071 }
1072 }
1073
47dd7a54 1074 /* TX INITIALIZATION */
e3ad57c9 1075 for (i = 0; i < DMA_TX_SIZE; i++) {
c24602ef
GC
1076 struct dma_desc *p;
1077 if (priv->extend_desc)
1078 p = &((priv->dma_etx + i)->basic);
1079 else
1080 p = priv->dma_tx + i;
f748be53
AT
1081
1082 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1083 p->des0 = 0;
1084 p->des1 = 0;
1085 p->des2 = 0;
1086 p->des3 = 0;
1087 } else {
1088 p->des2 = 0;
1089 }
1090
362b37be
GC
1091 priv->tx_skbuff_dma[i].buf = 0;
1092 priv->tx_skbuff_dma[i].map_as_page = false;
553e2ab3 1093 priv->tx_skbuff_dma[i].len = 0;
2a6d8e17 1094 priv->tx_skbuff_dma[i].last_segment = false;
47dd7a54 1095 priv->tx_skbuff[i] = NULL;
47dd7a54 1096 }
286a8372 1097
47dd7a54
GC
1098 priv->dirty_tx = 0;
1099 priv->cur_tx = 0;
38979574 1100 netdev_reset_queue(priv->dev);
47dd7a54 1101
c24602ef 1102 stmmac_clear_descriptors(priv);
47dd7a54 1103
c24602ef
GC
1104 if (netif_msg_hw(priv))
1105 stmmac_display_rings(priv);
56329137
BZ
1106
1107 return 0;
1108err_init_rx_buffers:
1109 while (--i >= 0)
1110 stmmac_free_rx_buffers(priv, i);
56329137 1111 return ret;
47dd7a54
GC
1112}
1113
1114static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1115{
1116 int i;
1117
e3ad57c9 1118 for (i = 0; i < DMA_RX_SIZE; i++)
56329137 1119 stmmac_free_rx_buffers(priv, i);
47dd7a54
GC
1120}
1121
1122static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1123{
1124 int i;
1125
e3ad57c9 1126 for (i = 0; i < DMA_TX_SIZE; i++) {
362b37be
GC
1127 if (priv->tx_skbuff_dma[i].buf) {
1128 if (priv->tx_skbuff_dma[i].map_as_page)
1129 dma_unmap_page(priv->device,
1130 priv->tx_skbuff_dma[i].buf,
553e2ab3 1131 priv->tx_skbuff_dma[i].len,
362b37be
GC
1132 DMA_TO_DEVICE);
1133 else
1134 dma_unmap_single(priv->device,
1135 priv->tx_skbuff_dma[i].buf,
553e2ab3 1136 priv->tx_skbuff_dma[i].len,
362b37be 1137 DMA_TO_DEVICE);
75e4364f 1138 }
c24602ef 1139
662ec2b7 1140 if (priv->tx_skbuff[i]) {
47dd7a54
GC
1141 dev_kfree_skb_any(priv->tx_skbuff[i]);
1142 priv->tx_skbuff[i] = NULL;
362b37be
GC
1143 priv->tx_skbuff_dma[i].buf = 0;
1144 priv->tx_skbuff_dma[i].map_as_page = false;
47dd7a54
GC
1145 }
1146 }
47dd7a54
GC
1147}
1148
732fdf0e
GC
1149/**
1150 * alloc_dma_desc_resources - alloc TX/RX resources.
1151 * @priv: private structure
1152 * Description: according to which descriptor can be used (extend or basic)
1153 * this function allocates the resources for TX and RX paths. In case of
1154 * reception, for example, it pre-allocated the RX socket buffer in order to
1155 * allow zero-copy mechanism.
1156 */
09f8d696
SK
1157static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1158{
09f8d696
SK
1159 int ret = -ENOMEM;
1160
e3ad57c9 1161 priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
09f8d696
SK
1162 GFP_KERNEL);
1163 if (!priv->rx_skbuff_dma)
1164 return -ENOMEM;
1165
e3ad57c9 1166 priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
09f8d696
SK
1167 GFP_KERNEL);
1168 if (!priv->rx_skbuff)
1169 goto err_rx_skbuff;
1170
e3ad57c9 1171 priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
362b37be 1172 sizeof(*priv->tx_skbuff_dma),
09f8d696
SK
1173 GFP_KERNEL);
1174 if (!priv->tx_skbuff_dma)
1175 goto err_tx_skbuff_dma;
1176
e3ad57c9 1177 priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
09f8d696
SK
1178 GFP_KERNEL);
1179 if (!priv->tx_skbuff)
1180 goto err_tx_skbuff;
1181
1182 if (priv->extend_desc) {
e3ad57c9 1183 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1184 sizeof(struct
1185 dma_extended_desc),
1186 &priv->dma_rx_phy,
1187 GFP_KERNEL);
09f8d696
SK
1188 if (!priv->dma_erx)
1189 goto err_dma;
1190
e3ad57c9 1191 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
f1590670
AB
1192 sizeof(struct
1193 dma_extended_desc),
1194 &priv->dma_tx_phy,
1195 GFP_KERNEL);
09f8d696 1196 if (!priv->dma_etx) {
e3ad57c9 1197 dma_free_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1198 sizeof(struct dma_extended_desc),
1199 priv->dma_erx, priv->dma_rx_phy);
09f8d696
SK
1200 goto err_dma;
1201 }
1202 } else {
e3ad57c9 1203 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1204 sizeof(struct dma_desc),
1205 &priv->dma_rx_phy,
1206 GFP_KERNEL);
09f8d696
SK
1207 if (!priv->dma_rx)
1208 goto err_dma;
1209
e3ad57c9 1210 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
f1590670
AB
1211 sizeof(struct dma_desc),
1212 &priv->dma_tx_phy,
1213 GFP_KERNEL);
09f8d696 1214 if (!priv->dma_tx) {
e3ad57c9 1215 dma_free_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1216 sizeof(struct dma_desc),
1217 priv->dma_rx, priv->dma_rx_phy);
09f8d696
SK
1218 goto err_dma;
1219 }
1220 }
1221
1222 return 0;
1223
1224err_dma:
1225 kfree(priv->tx_skbuff);
1226err_tx_skbuff:
1227 kfree(priv->tx_skbuff_dma);
1228err_tx_skbuff_dma:
1229 kfree(priv->rx_skbuff);
1230err_rx_skbuff:
1231 kfree(priv->rx_skbuff_dma);
1232 return ret;
1233}
1234
47dd7a54
GC
1235static void free_dma_desc_resources(struct stmmac_priv *priv)
1236{
1237 /* Release the DMA TX/RX socket buffers */
1238 dma_free_rx_skbufs(priv);
1239 dma_free_tx_skbufs(priv);
1240
ceb69499 1241 /* Free DMA regions of consistent memory previously allocated */
c24602ef
GC
1242 if (!priv->extend_desc) {
1243 dma_free_coherent(priv->device,
e3ad57c9 1244 DMA_TX_SIZE * sizeof(struct dma_desc),
c24602ef
GC
1245 priv->dma_tx, priv->dma_tx_phy);
1246 dma_free_coherent(priv->device,
e3ad57c9 1247 DMA_RX_SIZE * sizeof(struct dma_desc),
c24602ef
GC
1248 priv->dma_rx, priv->dma_rx_phy);
1249 } else {
e3ad57c9 1250 dma_free_coherent(priv->device, DMA_TX_SIZE *
c24602ef
GC
1251 sizeof(struct dma_extended_desc),
1252 priv->dma_etx, priv->dma_tx_phy);
e3ad57c9 1253 dma_free_coherent(priv->device, DMA_RX_SIZE *
c24602ef
GC
1254 sizeof(struct dma_extended_desc),
1255 priv->dma_erx, priv->dma_rx_phy);
1256 }
47dd7a54
GC
1257 kfree(priv->rx_skbuff_dma);
1258 kfree(priv->rx_skbuff);
cf32deec 1259 kfree(priv->tx_skbuff_dma);
47dd7a54 1260 kfree(priv->tx_skbuff);
47dd7a54
GC
1261}
1262
9eb12474 1263/**
1264 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1265 * @priv: driver private structure
1266 * Description: It is used for enabling the rx queues in the MAC
1267 */
1268static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1269{
4f6046f5
JP
1270 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1271 int queue;
1272 u8 mode;
9eb12474 1273
4f6046f5
JP
1274 for (queue = 0; queue < rx_queues_count; queue++) {
1275 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1276 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1277 }
9eb12474 1278}
1279
47dd7a54
GC
1280/**
1281 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 1282 * @priv: driver private structure
732fdf0e
GC
1283 * Description: it is used for configuring the DMA operation mode register in
1284 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
1285 */
1286static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1287{
f88203a2
VB
1288 int rxfifosz = priv->plat->rx_fifo_size;
1289
11fbf811
TR
1290 if (rxfifosz == 0)
1291 rxfifosz = priv->dma_cap.rx_fifo_size;
1292
e2a240c7 1293 if (priv->plat->force_thresh_dma_mode)
f88203a2 1294 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
e2a240c7 1295 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
1296 /*
1297 * In case of GMAC, SF mode can be enabled
1298 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1299 * 1) TX COE if actually supported
1300 * 2) There is no bugged Jumbo frame support
1301 * that needs to not insert csum in the TDES.
1302 */
f88203a2
VB
1303 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1304 rxfifosz);
b2dec116 1305 priv->xstats.threshold = SF_DMA_MODE;
ebbb293f 1306 } else
f88203a2
VB
1307 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1308 rxfifosz);
47dd7a54
GC
1309}
1310
47dd7a54 1311/**
732fdf0e 1312 * stmmac_tx_clean - to manage the transmission completion
32ceabca 1313 * @priv: driver private structure
732fdf0e 1314 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 1315 */
9125cdd1 1316static void stmmac_tx_clean(struct stmmac_priv *priv)
47dd7a54 1317{
38979574 1318 unsigned int bytes_compl = 0, pkts_compl = 0;
e3ad57c9 1319 unsigned int entry = priv->dirty_tx;
47dd7a54 1320
739c8e14 1321 netif_tx_lock(priv->dev);
a9097a96 1322
9125cdd1
GC
1323 priv->xstats.tx_clean++;
1324
e3ad57c9 1325 while (entry != priv->cur_tx) {
47dd7a54 1326 struct sk_buff *skb = priv->tx_skbuff[entry];
c24602ef 1327 struct dma_desc *p;
c363b658 1328 int status;
c24602ef
GC
1329
1330 if (priv->extend_desc)
ceb69499 1331 p = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
1332 else
1333 p = priv->dma_tx + entry;
47dd7a54 1334
c363b658 1335 status = priv->hw->desc->tx_status(&priv->dev->stats,
ceb69499
GC
1336 &priv->xstats, p,
1337 priv->ioaddr);
c363b658
FG
1338 /* Check if the descriptor is owned by the DMA */
1339 if (unlikely(status & tx_dma_own))
1340 break;
1341
1342 /* Just consider the last segment and ...*/
1343 if (likely(!(status & tx_not_ls))) {
1344 /* ... verify the status error condition */
1345 if (unlikely(status & tx_err)) {
1346 priv->dev->stats.tx_errors++;
1347 } else {
47dd7a54
GC
1348 priv->dev->stats.tx_packets++;
1349 priv->xstats.tx_pkt_n++;
c363b658 1350 }
ba1ffd74 1351 stmmac_get_tx_hwtstamp(priv, p, skb);
47dd7a54 1352 }
47dd7a54 1353
362b37be
GC
1354 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1355 if (priv->tx_skbuff_dma[entry].map_as_page)
1356 dma_unmap_page(priv->device,
1357 priv->tx_skbuff_dma[entry].buf,
553e2ab3 1358 priv->tx_skbuff_dma[entry].len,
362b37be
GC
1359 DMA_TO_DEVICE);
1360 else
1361 dma_unmap_single(priv->device,
1362 priv->tx_skbuff_dma[entry].buf,
553e2ab3 1363 priv->tx_skbuff_dma[entry].len,
362b37be
GC
1364 DMA_TO_DEVICE);
1365 priv->tx_skbuff_dma[entry].buf = 0;
f748be53 1366 priv->tx_skbuff_dma[entry].len = 0;
362b37be 1367 priv->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 1368 }
f748be53
AT
1369
1370 if (priv->hw->mode->clean_desc3)
1371 priv->hw->mode->clean_desc3(priv, p);
1372
2a6d8e17 1373 priv->tx_skbuff_dma[entry].last_segment = false;
96951366 1374 priv->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54
GC
1375
1376 if (likely(skb != NULL)) {
38979574
BG
1377 pkts_compl++;
1378 bytes_compl += skb->len;
7c565c33 1379 dev_consume_skb_any(skb);
47dd7a54
GC
1380 priv->tx_skbuff[entry] = NULL;
1381 }
1382
4a7d666a 1383 priv->hw->desc->release_tx_desc(p, priv->mode);
47dd7a54 1384
e3ad57c9 1385 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
47dd7a54 1386 }
fbc80823 1387 priv->dirty_tx = entry;
38979574
BG
1388
1389 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1390
47dd7a54 1391 if (unlikely(netif_queue_stopped(priv->dev) &&
739c8e14
LS
1392 stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1393 netif_dbg(priv, tx_done, priv->dev,
1394 "%s: restart transmit\n", __func__);
1395 netif_wake_queue(priv->dev);
47dd7a54 1396 }
d765955d
GC
1397
1398 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1399 stmmac_enable_eee_mode(priv);
f5351ef7 1400 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d 1401 }
739c8e14 1402 netif_tx_unlock(priv->dev);
47dd7a54
GC
1403}
1404
9125cdd1 1405static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1406{
7284a3f1 1407 priv->hw->dma->enable_dma_irq(priv->ioaddr);
47dd7a54
GC
1408}
1409
9125cdd1 1410static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1411{
7284a3f1 1412 priv->hw->dma->disable_dma_irq(priv->ioaddr);
47dd7a54
GC
1413}
1414
47dd7a54 1415/**
732fdf0e 1416 * stmmac_tx_err - to manage the tx error
32ceabca 1417 * @priv: driver private structure
47dd7a54 1418 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 1419 * in case of transmission errors.
47dd7a54
GC
1420 */
1421static void stmmac_tx_err(struct stmmac_priv *priv)
1422{
c24602ef 1423 int i;
47dd7a54
GC
1424 netif_stop_queue(priv->dev);
1425
ad01b7d4 1426 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 1427 dma_free_tx_skbufs(priv);
e3ad57c9 1428 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef
GC
1429 if (priv->extend_desc)
1430 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1431 priv->mode,
e3ad57c9 1432 (i == DMA_TX_SIZE - 1));
c24602ef
GC
1433 else
1434 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1435 priv->mode,
e3ad57c9 1436 (i == DMA_TX_SIZE - 1));
47dd7a54
GC
1437 priv->dirty_tx = 0;
1438 priv->cur_tx = 0;
38979574 1439 netdev_reset_queue(priv->dev);
ad01b7d4 1440 priv->hw->dma->start_tx(priv->ioaddr);
47dd7a54
GC
1441
1442 priv->dev->stats.tx_errors++;
1443 netif_wake_queue(priv->dev);
47dd7a54
GC
1444}
1445
32ceabca 1446/**
732fdf0e 1447 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
1448 * @priv: driver private structure
1449 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
1450 * It calls the dwmac dma routine and schedule poll method in case of some
1451 * work can be done.
32ceabca 1452 */
aec7ff27
GC
1453static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1454{
aec7ff27 1455 int status;
f88203a2 1456 int rxfifosz = priv->plat->rx_fifo_size;
aec7ff27 1457
68e5cfaf
JP
1458 if (rxfifosz == 0)
1459 rxfifosz = priv->dma_cap.rx_fifo_size;
1460
ad01b7d4 1461 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
9125cdd1
GC
1462 if (likely((status & handle_rx)) || (status & handle_tx)) {
1463 if (likely(napi_schedule_prep(&priv->napi))) {
1464 stmmac_disable_dma_irq(priv);
1465 __napi_schedule(&priv->napi);
1466 }
1467 }
1468 if (unlikely(status & tx_hard_error_bump_tc)) {
aec7ff27 1469 /* Try to bump up the dma threshold on this failure */
b2dec116
SZ
1470 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1471 (tc <= 256)) {
aec7ff27 1472 tc += 64;
c405abe2 1473 if (priv->plat->force_thresh_dma_mode)
f88203a2
VB
1474 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1475 rxfifosz);
c405abe2
SZ
1476 else
1477 priv->hw->dma->dma_mode(priv->ioaddr, tc,
f88203a2 1478 SF_DMA_MODE, rxfifosz);
aec7ff27 1479 priv->xstats.threshold = tc;
47dd7a54 1480 }
aec7ff27
GC
1481 } else if (unlikely(status == tx_hard_error))
1482 stmmac_tx_err(priv);
47dd7a54
GC
1483}
1484
32ceabca
GC
1485/**
1486 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1487 * @priv: driver private structure
1488 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1489 */
1c901a46
GC
1490static void stmmac_mmc_setup(struct stmmac_priv *priv)
1491{
1492 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
36ff7c1e 1493 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 1494
ba1ffd74
GC
1495 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1496 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
f748be53 1497 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
ba1ffd74
GC
1498 } else {
1499 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
f748be53 1500 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
ba1ffd74 1501 }
36ff7c1e
AT
1502
1503 dwmac_mmc_intr_all_mask(priv->mmcaddr);
4f795b25
GC
1504
1505 if (priv->dma_cap.rmon) {
36ff7c1e 1506 dwmac_mmc_ctrl(priv->mmcaddr, mode);
4f795b25
GC
1507 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1508 } else
38ddc59d 1509 netdev_info(priv->dev, "No MAC Management Counters available\n");
1c901a46
GC
1510}
1511
19e30c14 1512/**
732fdf0e 1513 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
32ceabca
GC
1514 * @priv: driver private structure
1515 * Description: select the Enhanced/Alternate or Normal descriptors.
732fdf0e
GC
1516 * In case of Enhanced/Alternate, it checks if the extended descriptors are
1517 * supported by the HW capability register.
ff3dd78c 1518 */
19e30c14
GC
1519static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1520{
1521 if (priv->plat->enh_desc) {
38ddc59d 1522 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
c24602ef
GC
1523
1524 /* GMAC older than 3.50 has no extended descriptors */
1525 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
38ddc59d 1526 dev_info(priv->device, "Enabled extended descriptors\n");
c24602ef
GC
1527 priv->extend_desc = 1;
1528 } else
38ddc59d 1529 dev_warn(priv->device, "Extended descriptors not supported\n");
c24602ef 1530
19e30c14
GC
1531 priv->hw->desc = &enh_desc_ops;
1532 } else {
38ddc59d 1533 dev_info(priv->device, "Normal descriptors\n");
19e30c14
GC
1534 priv->hw->desc = &ndesc_ops;
1535 }
1536}
1537
1538/**
732fdf0e 1539 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 1540 * @priv: driver private structure
19e30c14
GC
1541 * Description:
1542 * new GMAC chip generations have a new register to indicate the
1543 * presence of the optional feature/functions.
1544 * This can be also used to override the value passed through the
1545 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
1546 */
1547static int stmmac_get_hw_features(struct stmmac_priv *priv)
1548{
f10a6a35 1549 u32 ret = 0;
3c20f72f 1550
5e6efe88 1551 if (priv->hw->dma->get_hw_feature) {
f10a6a35
AT
1552 priv->hw->dma->get_hw_feature(priv->ioaddr,
1553 &priv->dma_cap);
1554 ret = 1;
19e30c14 1555 }
e7434821 1556
f10a6a35 1557 return ret;
e7434821
GC
1558}
1559
32ceabca 1560/**
732fdf0e 1561 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
1562 * @priv: driver private structure
1563 * Description:
1564 * it is to verify if the MAC address is valid, in case of failures it
1565 * generates a random MAC address
1566 */
bfab27a1
GC
1567static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1568{
bfab27a1 1569 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
7ed24bbe 1570 priv->hw->mac->get_umac_addr(priv->hw,
bfab27a1 1571 priv->dev->dev_addr, 0);
ceb69499 1572 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 1573 eth_hw_addr_random(priv->dev);
38ddc59d
LC
1574 netdev_info(priv->dev, "device MAC address %pM\n",
1575 priv->dev->dev_addr);
bfab27a1 1576 }
bfab27a1
GC
1577}
1578
32ceabca 1579/**
732fdf0e 1580 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
1581 * @priv: driver private structure
1582 * Description:
1583 * It inits the DMA invoking the specific MAC/GMAC callback.
1584 * Some DMA parameters can be passed from the platform;
1585 * in case of these are not passed a default is kept for the MAC or GMAC.
1586 */
0f1f88a8
GC
1587static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1588{
c24602ef 1589 int atds = 0;
495db273 1590 int ret = 0;
0f1f88a8 1591
a332e2fa
NC
1592 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1593 dev_err(priv->device, "Invalid DMA configuration\n");
89ab75bf 1594 return -EINVAL;
0f1f88a8
GC
1595 }
1596
c24602ef
GC
1597 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1598 atds = 1;
1599
495db273
GC
1600 ret = priv->hw->dma->reset(priv->ioaddr);
1601 if (ret) {
1602 dev_err(priv->device, "Failed to reset the dma\n");
1603 return ret;
1604 }
1605
50ca903a 1606 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
89ab75bf 1607 priv->dma_tx_phy, priv->dma_rx_phy, atds);
afea0365 1608
f748be53
AT
1609 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1610 priv->rx_tail_addr = priv->dma_rx_phy +
1611 (DMA_RX_SIZE * sizeof(struct dma_desc));
1612 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1613 STMMAC_CHAN0);
1614
1615 priv->tx_tail_addr = priv->dma_tx_phy +
1616 (DMA_TX_SIZE * sizeof(struct dma_desc));
1617 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1618 STMMAC_CHAN0);
1619 }
1620
1621 if (priv->plat->axi && priv->hw->dma->axi)
afea0365
GC
1622 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1623
495db273 1624 return ret;
0f1f88a8
GC
1625}
1626
9125cdd1 1627/**
732fdf0e 1628 * stmmac_tx_timer - mitigation sw timer for tx.
9125cdd1
GC
1629 * @data: data pointer
1630 * Description:
1631 * This is the timer handler to directly invoke the stmmac_tx_clean.
1632 */
1633static void stmmac_tx_timer(unsigned long data)
1634{
1635 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1636
1637 stmmac_tx_clean(priv);
1638}
1639
1640/**
732fdf0e 1641 * stmmac_init_tx_coalesce - init tx mitigation options.
32ceabca 1642 * @priv: driver private structure
9125cdd1
GC
1643 * Description:
1644 * This inits the transmit coalesce parameters: i.e. timer rate,
1645 * timer handler and default threshold used for enabling the
1646 * interrupt on completion bit.
1647 */
1648static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1649{
1650 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1651 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1652 init_timer(&priv->txtimer);
1653 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1654 priv->txtimer.data = (unsigned long)priv;
1655 priv->txtimer.function = stmmac_tx_timer;
1656 add_timer(&priv->txtimer);
1657}
1658
6a3a7193
JP
1659/**
1660 * stmmac_set_tx_queue_weight - Set TX queue weight
1661 * @priv: driver private structure
1662 * Description: It is used for setting TX queues weight
1663 */
1664static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
1665{
1666 u32 tx_queues_count = priv->plat->tx_queues_to_use;
1667 u32 weight;
1668 u32 queue;
1669
1670 for (queue = 0; queue < tx_queues_count; queue++) {
1671 weight = priv->plat->tx_queues_cfg[queue].weight;
1672 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
1673 }
1674}
1675
19d91873
JP
1676/**
1677 * stmmac_configure_cbs - Configure CBS in TX queue
1678 * @priv: driver private structure
1679 * Description: It is used for configuring CBS in AVB TX queues
1680 */
1681static void stmmac_configure_cbs(struct stmmac_priv *priv)
1682{
1683 u32 tx_queues_count = priv->plat->tx_queues_to_use;
1684 u32 mode_to_use;
1685 u32 queue;
1686
1687 for (queue = 0; queue < tx_queues_count; queue++) {
1688 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
1689 if (mode_to_use == MTL_QUEUE_DCB)
1690 continue;
1691
1692 priv->hw->mac->config_cbs(priv->hw,
1693 priv->plat->tx_queues_cfg[queue].send_slope,
1694 priv->plat->tx_queues_cfg[queue].idle_slope,
1695 priv->plat->tx_queues_cfg[queue].high_credit,
1696 priv->plat->tx_queues_cfg[queue].low_credit,
1697 queue);
1698 }
1699}
1700
d43042f4
JP
1701/**
1702 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
1703 * @priv: driver private structure
1704 * Description: It is used for mapping RX queues to RX dma channels
1705 */
1706static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
1707{
1708 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1709 u32 queue;
1710 u32 chan;
1711
1712 for (queue = 0; queue < rx_queues_count; queue++) {
1713 chan = priv->plat->rx_queues_cfg[queue].chan;
1714 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
1715 }
1716}
1717
d0a9c9f9
JP
1718/**
1719 * stmmac_mtl_configuration - Configure MTL
1720 * @priv: driver private structure
1721 * Description: It is used for configurring MTL
1722 */
1723static void stmmac_mtl_configuration(struct stmmac_priv *priv)
1724{
1725 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1726 u32 tx_queues_count = priv->plat->tx_queues_to_use;
1727
6a3a7193
JP
1728 if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
1729 stmmac_set_tx_queue_weight(priv);
1730
d0a9c9f9
JP
1731 /* Configure MTL RX algorithms */
1732 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
1733 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
1734 priv->plat->rx_sched_algorithm);
1735
1736 /* Configure MTL TX algorithms */
1737 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
1738 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
1739 priv->plat->tx_sched_algorithm);
1740
19d91873
JP
1741 /* Configure CBS in AVB TX queues */
1742 if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
1743 stmmac_configure_cbs(priv);
1744
d43042f4
JP
1745 /* Map RX MTL to DMA channels */
1746 if (rx_queues_count > 1 && priv->hw->mac->map_mtl_to_dma)
1747 stmmac_rx_queue_dma_chan_map(priv);
1748
d0a9c9f9
JP
1749 /* Enable MAC RX Queues */
1750 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
1751 stmmac_mac_enable_rx_queues(priv);
1752}
1753
523f11b5 1754/**
732fdf0e 1755 * stmmac_hw_setup - setup mac in a usable state.
523f11b5
SK
1756 * @dev : pointer to the device structure.
1757 * Description:
732fdf0e
GC
1758 * this is the main function to setup the HW in a usable state because the
1759 * dma engine is reset, the core registers are configured (e.g. AXI,
1760 * Checksum features, timers). The DMA is ready to start receiving and
1761 * transmitting.
523f11b5
SK
1762 * Return value:
1763 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1764 * file on failure.
1765 */
fe131929 1766static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
523f11b5
SK
1767{
1768 struct stmmac_priv *priv = netdev_priv(dev);
1769 int ret;
1770
523f11b5
SK
1771 /* DMA initialization and SW reset */
1772 ret = stmmac_init_dma_engine(priv);
1773 if (ret < 0) {
38ddc59d
LC
1774 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1775 __func__);
523f11b5
SK
1776 return ret;
1777 }
1778
1779 /* Copy the MAC addr into the HW */
7ed24bbe 1780 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
523f11b5 1781
02e57b9d
GC
1782 /* PS and related bits will be programmed according to the speed */
1783 if (priv->hw->pcs) {
1784 int speed = priv->plat->mac_port_sel_speed;
1785
1786 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1787 (speed == SPEED_1000)) {
1788 priv->hw->ps = speed;
1789 } else {
1790 dev_warn(priv->device, "invalid port speed\n");
1791 priv->hw->ps = 0;
1792 }
1793 }
1794
523f11b5 1795 /* Initialize the MAC Core */
7ed24bbe 1796 priv->hw->mac->core_init(priv->hw, dev->mtu);
523f11b5 1797
d0a9c9f9
JP
1798 /* Initialize MTL*/
1799 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1800 stmmac_mtl_configuration(priv);
9eb12474 1801
978aded4
GC
1802 ret = priv->hw->mac->rx_ipc(priv->hw);
1803 if (!ret) {
38ddc59d 1804 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
978aded4 1805 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 1806 priv->hw->rx_csum = 0;
978aded4
GC
1807 }
1808
523f11b5 1809 /* Enable the MAC Rx/Tx */
f748be53
AT
1810 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1811 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1812 else
1813 stmmac_set_mac(priv->ioaddr, true);
523f11b5
SK
1814
1815 /* Set the HW DMA mode and the COE */
1816 stmmac_dma_operation_mode(priv);
1817
1818 stmmac_mmc_setup(priv);
1819
fe131929 1820 if (init_ptp) {
0ad2be79
TR
1821 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1822 if (ret < 0)
1823 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
1824
fe131929 1825 ret = stmmac_init_ptp(priv);
722eef28
HK
1826 if (ret == -EOPNOTSUPP)
1827 netdev_warn(priv->dev, "PTP not supported by HW\n");
1828 else if (ret)
1829 netdev_warn(priv->dev, "PTP init failed\n");
fe131929 1830 }
523f11b5 1831
50fb4f74 1832#ifdef CONFIG_DEBUG_FS
523f11b5
SK
1833 ret = stmmac_init_fs(dev);
1834 if (ret < 0)
38ddc59d
LC
1835 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1836 __func__);
523f11b5
SK
1837#endif
1838 /* Start the ball rolling... */
38ddc59d 1839 netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
523f11b5
SK
1840 priv->hw->dma->start_tx(priv->ioaddr);
1841 priv->hw->dma->start_rx(priv->ioaddr);
1842
523f11b5
SK
1843 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1844
523f11b5
SK
1845 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1846 priv->rx_riwt = MAX_DMA_RIWT;
1847 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1848 }
1849
3fe5cadb 1850 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
02e57b9d 1851 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
523f11b5 1852
f748be53
AT
1853 /* set TX ring length */
1854 if (priv->hw->dma->set_tx_ring_len)
1855 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1856 (DMA_TX_SIZE - 1));
1857 /* set RX ring length */
1858 if (priv->hw->dma->set_rx_ring_len)
1859 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1860 (DMA_RX_SIZE - 1));
1861 /* Enable TSO */
1862 if (priv->tso)
1863 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1864
523f11b5
SK
1865 return 0;
1866}
1867
c66f6c37
TR
1868static void stmmac_hw_teardown(struct net_device *dev)
1869{
1870 struct stmmac_priv *priv = netdev_priv(dev);
1871
1872 clk_disable_unprepare(priv->plat->clk_ptp_ref);
1873}
1874
47dd7a54
GC
1875/**
1876 * stmmac_open - open entry point of the driver
1877 * @dev : pointer to the device structure.
1878 * Description:
1879 * This function is the open entry point of the driver.
1880 * Return value:
1881 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1882 * file on failure.
1883 */
1884static int stmmac_open(struct net_device *dev)
1885{
1886 struct stmmac_priv *priv = netdev_priv(dev);
47dd7a54
GC
1887 int ret;
1888
4bfcbd7a
FV
1889 stmmac_check_ether_addr(priv);
1890
3fe5cadb
GC
1891 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1892 priv->hw->pcs != STMMAC_PCS_TBI &&
1893 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
1894 ret = stmmac_init_phy(dev);
1895 if (ret) {
38ddc59d
LC
1896 netdev_err(priv->dev,
1897 "%s: Cannot attach to PHY (error: %d)\n",
1898 __func__, ret);
89df20d9 1899 return ret;
e58bb43f 1900 }
f66ffe28 1901 }
47dd7a54 1902
523f11b5
SK
1903 /* Extra statistics */
1904 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1905 priv->xstats.threshold = tc;
1906
47dd7a54 1907 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
22ad3838 1908 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 1909
7262b7b2 1910 ret = alloc_dma_desc_resources(priv);
09f8d696 1911 if (ret < 0) {
38ddc59d
LC
1912 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1913 __func__);
09f8d696
SK
1914 goto dma_desc_error;
1915 }
1916
777da230
GC
1917 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1918 if (ret < 0) {
38ddc59d
LC
1919 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1920 __func__);
777da230
GC
1921 goto init_error;
1922 }
1923
fe131929 1924 ret = stmmac_hw_setup(dev, true);
56329137 1925 if (ret < 0) {
38ddc59d 1926 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
c9324d18 1927 goto init_error;
47dd7a54
GC
1928 }
1929
777da230
GC
1930 stmmac_init_tx_coalesce(priv);
1931
d6d50c7e
PR
1932 if (dev->phydev)
1933 phy_start(dev->phydev);
47dd7a54 1934
f66ffe28
GC
1935 /* Request the IRQ lines */
1936 ret = request_irq(dev->irq, stmmac_interrupt,
ceb69499 1937 IRQF_SHARED, dev->name, dev);
f66ffe28 1938 if (unlikely(ret < 0)) {
38ddc59d
LC
1939 netdev_err(priv->dev,
1940 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1941 __func__, dev->irq, ret);
6c1e5abe 1942 goto irq_error;
f66ffe28
GC
1943 }
1944
7a13f8f5
FV
1945 /* Request the Wake IRQ in case of another line is used for WoL */
1946 if (priv->wol_irq != dev->irq) {
1947 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1948 IRQF_SHARED, dev->name, dev);
1949 if (unlikely(ret < 0)) {
38ddc59d
LC
1950 netdev_err(priv->dev,
1951 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1952 __func__, priv->wol_irq, ret);
c9324d18 1953 goto wolirq_error;
7a13f8f5
FV
1954 }
1955 }
1956
d765955d 1957 /* Request the IRQ lines */
d7ec8584 1958 if (priv->lpi_irq > 0) {
d765955d
GC
1959 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1960 dev->name, dev);
1961 if (unlikely(ret < 0)) {
38ddc59d
LC
1962 netdev_err(priv->dev,
1963 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1964 __func__, priv->lpi_irq, ret);
c9324d18 1965 goto lpiirq_error;
d765955d
GC
1966 }
1967 }
1968
47dd7a54 1969 napi_enable(&priv->napi);
47dd7a54 1970 netif_start_queue(dev);
f66ffe28 1971
47dd7a54 1972 return 0;
f66ffe28 1973
c9324d18 1974lpiirq_error:
d765955d
GC
1975 if (priv->wol_irq != dev->irq)
1976 free_irq(priv->wol_irq, dev);
c9324d18 1977wolirq_error:
7a13f8f5 1978 free_irq(dev->irq, dev);
6c1e5abe
TR
1979irq_error:
1980 if (dev->phydev)
1981 phy_stop(dev->phydev);
7a13f8f5 1982
6c1e5abe 1983 del_timer_sync(&priv->txtimer);
c66f6c37 1984 stmmac_hw_teardown(dev);
c9324d18
GC
1985init_error:
1986 free_dma_desc_resources(priv);
56329137 1987dma_desc_error:
d6d50c7e
PR
1988 if (dev->phydev)
1989 phy_disconnect(dev->phydev);
4bfcbd7a 1990
f66ffe28 1991 return ret;
47dd7a54
GC
1992}
1993
1994/**
1995 * stmmac_release - close entry point of the driver
1996 * @dev : device pointer.
1997 * Description:
1998 * This is the stop entry point of the driver.
1999 */
2000static int stmmac_release(struct net_device *dev)
2001{
2002 struct stmmac_priv *priv = netdev_priv(dev);
2003
d765955d
GC
2004 if (priv->eee_enabled)
2005 del_timer_sync(&priv->eee_ctrl_timer);
2006
47dd7a54 2007 /* Stop and disconnect the PHY */
d6d50c7e
PR
2008 if (dev->phydev) {
2009 phy_stop(dev->phydev);
2010 phy_disconnect(dev->phydev);
47dd7a54
GC
2011 }
2012
2013 netif_stop_queue(dev);
2014
47dd7a54 2015 napi_disable(&priv->napi);
47dd7a54 2016
9125cdd1
GC
2017 del_timer_sync(&priv->txtimer);
2018
47dd7a54
GC
2019 /* Free the IRQ lines */
2020 free_irq(dev->irq, dev);
7a13f8f5
FV
2021 if (priv->wol_irq != dev->irq)
2022 free_irq(priv->wol_irq, dev);
d7ec8584 2023 if (priv->lpi_irq > 0)
d765955d 2024 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
2025
2026 /* Stop TX/RX DMA and clear the descriptors */
ad01b7d4
GC
2027 priv->hw->dma->stop_tx(priv->ioaddr);
2028 priv->hw->dma->stop_rx(priv->ioaddr);
47dd7a54
GC
2029
2030 /* Release and free the Rx/Tx resources */
2031 free_dma_desc_resources(priv);
2032
19449bfc 2033 /* Disable the MAC Rx/Tx */
bfab27a1 2034 stmmac_set_mac(priv->ioaddr, false);
47dd7a54
GC
2035
2036 netif_carrier_off(dev);
2037
50fb4f74 2038#ifdef CONFIG_DEBUG_FS
466c5ac8 2039 stmmac_exit_fs(dev);
bfab27a1 2040#endif
bfab27a1 2041
92ba6888
RK
2042 stmmac_release_ptp(priv);
2043
47dd7a54
GC
2044 return 0;
2045}
2046
f748be53
AT
2047/**
2048 * stmmac_tso_allocator - close entry point of the driver
2049 * @priv: driver private structure
2050 * @des: buffer start address
2051 * @total_len: total length to fill in descriptors
2052 * @last_segmant: condition for the last descriptor
2053 * Description:
2054 * This function fills descriptor and request new descriptors according to
2055 * buffer length to fill
2056 */
2057static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2058 int total_len, bool last_segment)
2059{
2060 struct dma_desc *desc;
2061 int tmp_len;
2062 u32 buff_size;
2063
2064 tmp_len = total_len;
2065
2066 while (tmp_len > 0) {
2067 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2068 desc = priv->dma_tx + priv->cur_tx;
2069
f8be0d78 2070 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
f748be53
AT
2071 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2072 TSO_MAX_BUFF_SIZE : tmp_len;
2073
2074 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2075 0, 1,
2076 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2077 0, 0);
2078
2079 tmp_len -= TSO_MAX_BUFF_SIZE;
2080 }
2081}
2082
2083/**
2084 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2085 * @skb : the socket buffer
2086 * @dev : device pointer
2087 * Description: this is the transmit function that is called on TSO frames
2088 * (support available on GMAC4 and newer chips).
2089 * Diagram below show the ring programming in case of TSO frames:
2090 *
2091 * First Descriptor
2092 * --------
2093 * | DES0 |---> buffer1 = L2/L3/L4 header
2094 * | DES1 |---> TCP Payload (can continue on next descr...)
2095 * | DES2 |---> buffer 1 and 2 len
2096 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2097 * --------
2098 * |
2099 * ...
2100 * |
2101 * --------
2102 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2103 * | DES1 | --|
2104 * | DES2 | --> buffer 1 and 2 len
2105 * | DES3 |
2106 * --------
2107 *
2108 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2109 */
2110static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2111{
2112 u32 pay_len, mss;
2113 int tmp_pay_len = 0;
2114 struct stmmac_priv *priv = netdev_priv(dev);
2115 int nfrags = skb_shinfo(skb)->nr_frags;
2116 unsigned int first_entry, des;
2117 struct dma_desc *desc, *first, *mss_desc = NULL;
2118 u8 proto_hdr_len;
2119 int i;
2120
f748be53
AT
2121 /* Compute header lengths */
2122 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2123
2124 /* Desc availability based on threshold should be enough safe */
2125 if (unlikely(stmmac_tx_avail(priv) <
2126 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2127 if (!netif_queue_stopped(dev)) {
2128 netif_stop_queue(dev);
2129 /* This is a hard error, log it. */
38ddc59d
LC
2130 netdev_err(priv->dev,
2131 "%s: Tx Ring full when queue awake\n",
2132 __func__);
f748be53 2133 }
f748be53
AT
2134 return NETDEV_TX_BUSY;
2135 }
2136
2137 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2138
2139 mss = skb_shinfo(skb)->gso_size;
2140
2141 /* set new MSS value if needed */
2142 if (mss != priv->mss) {
2143 mss_desc = priv->dma_tx + priv->cur_tx;
2144 priv->hw->desc->set_mss(mss_desc, mss);
2145 priv->mss = mss;
2146 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2147 }
2148
2149 if (netif_msg_tx_queued(priv)) {
2150 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2151 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2152 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2153 skb->data_len);
2154 }
2155
2156 first_entry = priv->cur_tx;
2157
2158 desc = priv->dma_tx + first_entry;
2159 first = desc;
2160
2161 /* first descriptor: fill Headers on Buf1 */
2162 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2163 DMA_TO_DEVICE);
2164 if (dma_mapping_error(priv->device, des))
2165 goto dma_map_err;
2166
2167 priv->tx_skbuff_dma[first_entry].buf = des;
2168 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2169 priv->tx_skbuff[first_entry] = skb;
2170
f8be0d78 2171 first->des0 = cpu_to_le32(des);
f748be53
AT
2172
2173 /* Fill start of payload in buff2 of first descriptor */
2174 if (pay_len)
f8be0d78 2175 first->des1 = cpu_to_le32(des + proto_hdr_len);
f748be53
AT
2176
2177 /* If needed take extra descriptors to fill the remaining payload */
2178 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2179
2180 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2181
2182 /* Prepare fragments */
2183 for (i = 0; i < nfrags; i++) {
2184 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2185
2186 des = skb_frag_dma_map(priv->device, frag, 0,
2187 skb_frag_size(frag),
2188 DMA_TO_DEVICE);
937071c1
TR
2189 if (dma_mapping_error(priv->device, des))
2190 goto dma_map_err;
f748be53
AT
2191
2192 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2193 (i == nfrags - 1));
2194
2195 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2196 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2197 priv->tx_skbuff[priv->cur_tx] = NULL;
2198 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2199 }
2200
2201 priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2202
2203 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2204
2205 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
2206 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2207 __func__);
f748be53
AT
2208 netif_stop_queue(dev);
2209 }
2210
2211 dev->stats.tx_bytes += skb->len;
2212 priv->xstats.tx_tso_frames++;
2213 priv->xstats.tx_tso_nfrags += nfrags;
2214
2215 /* Manage tx mitigation */
2216 priv->tx_count_frames += nfrags + 1;
2217 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2218 mod_timer(&priv->txtimer,
2219 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2220 } else {
2221 priv->tx_count_frames = 0;
2222 priv->hw->desc->set_tx_ic(desc);
2223 priv->xstats.tx_set_ic_bit++;
2224 }
2225
2226 if (!priv->hwts_tx_en)
2227 skb_tx_timestamp(skb);
2228
2229 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2230 priv->hwts_tx_en)) {
2231 /* declare that device is doing timestamping */
2232 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2233 priv->hw->desc->enable_tx_timestamp(first);
2234 }
2235
2236 /* Complete the first descriptor before granting the DMA */
2237 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2238 proto_hdr_len,
2239 pay_len,
2240 1, priv->tx_skbuff_dma[first_entry].last_segment,
2241 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2242
2243 /* If context desc is used to change MSS */
2244 if (mss_desc)
2245 priv->hw->desc->set_tx_owner(mss_desc);
2246
2247 /* The own bit must be the latest setting done when prepare the
2248 * descriptor and then barrier is needed to make sure that
2249 * all is coherent before granting the DMA engine.
2250 */
ad688cdb 2251 dma_wmb();
f748be53
AT
2252
2253 if (netif_msg_pktdata(priv)) {
2254 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2255 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2256 priv->cur_tx, first, nfrags);
2257
2258 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2259 0);
2260
2261 pr_info(">>> frame to be transmitted: ");
2262 print_pkt(skb->data, skb_headlen(skb));
2263 }
2264
2265 netdev_sent_queue(dev, skb->len);
2266
2267 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2268 STMMAC_CHAN0);
2269
f748be53
AT
2270 return NETDEV_TX_OK;
2271
2272dma_map_err:
f748be53
AT
2273 dev_err(priv->device, "Tx dma map failed\n");
2274 dev_kfree_skb(skb);
2275 priv->dev->stats.tx_dropped++;
2276 return NETDEV_TX_OK;
2277}
2278
47dd7a54 2279/**
732fdf0e 2280 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
2281 * @skb : the socket buffer
2282 * @dev : device pointer
32ceabca
GC
2283 * Description : this is the tx entry point of the driver.
2284 * It programs the chain or the ring and supports oversized frames
2285 * and SG feature.
47dd7a54
GC
2286 */
2287static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2288{
2289 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 2290 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 2291 int i, csum_insertion = 0, is_jumbo = 0;
47dd7a54 2292 int nfrags = skb_shinfo(skb)->nr_frags;
0e80bdc9 2293 unsigned int entry, first_entry;
47dd7a54 2294 struct dma_desc *desc, *first;
0e80bdc9 2295 unsigned int enh_desc;
f748be53
AT
2296 unsigned int des;
2297
2298 /* Manage oversized TCP frames for GMAC4 device */
2299 if (skb_is_gso(skb) && priv->tso) {
2300 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2301 return stmmac_tso_xmit(skb, dev);
2302 }
47dd7a54
GC
2303
2304 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2305 if (!netif_queue_stopped(dev)) {
2306 netif_stop_queue(dev);
2307 /* This is a hard error, log it. */
38ddc59d
LC
2308 netdev_err(priv->dev,
2309 "%s: Tx Ring full when queue awake\n",
2310 __func__);
47dd7a54
GC
2311 }
2312 return NETDEV_TX_BUSY;
2313 }
2314
d765955d
GC
2315 if (priv->tx_path_in_lpi_mode)
2316 stmmac_disable_eee_mode(priv);
2317
e3ad57c9 2318 entry = priv->cur_tx;
0e80bdc9 2319 first_entry = entry;
47dd7a54 2320
5e982f3b 2321 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 2322
0e80bdc9 2323 if (likely(priv->extend_desc))
ceb69499 2324 desc = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
2325 else
2326 desc = priv->dma_tx + entry;
2327
47dd7a54
GC
2328 first = desc;
2329
0e80bdc9
GC
2330 priv->tx_skbuff[first_entry] = skb;
2331
2332 enh_desc = priv->plat->enh_desc;
4a7d666a 2333 /* To program the descriptors according to the size of the frame */
29896a67
GC
2334 if (enh_desc)
2335 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2336
f748be53
AT
2337 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2338 DWMAC_CORE_4_00)) {
29896a67 2339 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
362b37be
GC
2340 if (unlikely(entry < 0))
2341 goto dma_map_err;
29896a67 2342 }
47dd7a54
GC
2343
2344 for (i = 0; i < nfrags; i++) {
9e903e08
ED
2345 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2346 int len = skb_frag_size(frag);
be434d50 2347 bool last_segment = (i == (nfrags - 1));
47dd7a54 2348
e3ad57c9
GC
2349 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2350
0e80bdc9 2351 if (likely(priv->extend_desc))
ceb69499 2352 desc = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
2353 else
2354 desc = priv->dma_tx + entry;
47dd7a54 2355
f748be53
AT
2356 des = skb_frag_dma_map(priv->device, frag, 0, len,
2357 DMA_TO_DEVICE);
2358 if (dma_mapping_error(priv->device, des))
362b37be
GC
2359 goto dma_map_err; /* should reuse desc w/o issues */
2360
0e80bdc9 2361 priv->tx_skbuff[entry] = NULL;
f748be53 2362
f8be0d78
MW
2363 priv->tx_skbuff_dma[entry].buf = des;
2364 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2365 desc->des0 = cpu_to_le32(des);
2366 else
2367 desc->des2 = cpu_to_le32(des);
f748be53 2368
362b37be 2369 priv->tx_skbuff_dma[entry].map_as_page = true;
553e2ab3 2370 priv->tx_skbuff_dma[entry].len = len;
0e80bdc9
GC
2371 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2372
2373 /* Prepare the descriptor and set the own bit too */
4a7d666a 2374 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
be434d50 2375 priv->mode, 1, last_segment);
47dd7a54
GC
2376 }
2377
e3ad57c9
GC
2378 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2379
2380 priv->cur_tx = entry;
47dd7a54 2381
47dd7a54 2382 if (netif_msg_pktdata(priv)) {
d0225e7d
AT
2383 void *tx_head;
2384
38ddc59d
LC
2385 netdev_dbg(priv->dev,
2386 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2387 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2388 entry, first, nfrags);
83d7af64 2389
c24602ef 2390 if (priv->extend_desc)
d0225e7d 2391 tx_head = (void *)priv->dma_etx;
c24602ef 2392 else
d0225e7d
AT
2393 tx_head = (void *)priv->dma_tx;
2394
2395 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
c24602ef 2396
38ddc59d 2397 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
47dd7a54
GC
2398 print_pkt(skb->data, skb->len);
2399 }
0e80bdc9 2400
47dd7a54 2401 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
2402 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2403 __func__);
47dd7a54
GC
2404 netif_stop_queue(dev);
2405 }
2406
2407 dev->stats.tx_bytes += skb->len;
2408
0e80bdc9
GC
2409 /* According to the coalesce parameter the IC bit for the latest
2410 * segment is reset and the timer re-started to clean the tx status.
2411 * This approach takes care about the fragments: desc is the first
2412 * element in case of no SG.
2413 */
2414 priv->tx_count_frames += nfrags + 1;
2415 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2416 mod_timer(&priv->txtimer,
2417 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2418 } else {
2419 priv->tx_count_frames = 0;
2420 priv->hw->desc->set_tx_ic(desc);
2421 priv->xstats.tx_set_ic_bit++;
891434b1
RK
2422 }
2423
2424 if (!priv->hwts_tx_en)
2425 skb_tx_timestamp(skb);
3e82ce12 2426
0e80bdc9
GC
2427 /* Ready to fill the first descriptor and set the OWN bit w/o any
2428 * problems because all the descriptors are actually ready to be
2429 * passed to the DMA engine.
2430 */
2431 if (likely(!is_jumbo)) {
2432 bool last_segment = (nfrags == 0);
2433
f748be53
AT
2434 des = dma_map_single(priv->device, skb->data,
2435 nopaged_len, DMA_TO_DEVICE);
2436 if (dma_mapping_error(priv->device, des))
0e80bdc9
GC
2437 goto dma_map_err;
2438
f8be0d78
MW
2439 priv->tx_skbuff_dma[first_entry].buf = des;
2440 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2441 first->des0 = cpu_to_le32(des);
2442 else
2443 first->des2 = cpu_to_le32(des);
f748be53 2444
0e80bdc9
GC
2445 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2446 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2447
2448 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2449 priv->hwts_tx_en)) {
2450 /* declare that device is doing timestamping */
2451 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2452 priv->hw->desc->enable_tx_timestamp(first);
2453 }
2454
2455 /* Prepare the first descriptor setting the OWN bit too */
2456 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2457 csum_insertion, priv->mode, 1,
2458 last_segment);
2459
2460 /* The own bit must be the latest setting done when prepare the
2461 * descriptor and then barrier is needed to make sure that
2462 * all is coherent before granting the DMA engine.
2463 */
ad688cdb 2464 dma_wmb();
0e80bdc9
GC
2465 }
2466
38979574 2467 netdev_sent_queue(dev, skb->len);
f748be53
AT
2468
2469 if (priv->synopsys_id < DWMAC_CORE_4_00)
2470 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2471 else
2472 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2473 STMMAC_CHAN0);
52f64fae 2474
362b37be 2475 return NETDEV_TX_OK;
a9097a96 2476
362b37be 2477dma_map_err:
38ddc59d 2478 netdev_err(priv->dev, "Tx DMA map failed\n");
362b37be
GC
2479 dev_kfree_skb(skb);
2480 priv->dev->stats.tx_dropped++;
47dd7a54
GC
2481 return NETDEV_TX_OK;
2482}
2483
b9381985
VB
2484static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2485{
2486 struct ethhdr *ehdr;
2487 u16 vlanid;
2488
2489 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2490 NETIF_F_HW_VLAN_CTAG_RX &&
2491 !__vlan_get_tag(skb, &vlanid)) {
2492 /* pop the vlan tag */
2493 ehdr = (struct ethhdr *)skb->data;
2494 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2495 skb_pull(skb, VLAN_HLEN);
2496 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2497 }
2498}
2499
2500
120e87f9
GC
2501static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2502{
2503 if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2504 return 0;
2505
2506 return 1;
2507}
2508
32ceabca 2509/**
732fdf0e 2510 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca
GC
2511 * @priv: driver private structure
2512 * Description : this is to reallocate the skb for the reception process
2513 * that is based on zero-copy.
2514 */
47dd7a54
GC
2515static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2516{
47dd7a54 2517 int bfsize = priv->dma_buf_sz;
e3ad57c9
GC
2518 unsigned int entry = priv->dirty_rx;
2519 int dirty = stmmac_rx_dirty(priv);
47dd7a54 2520
e3ad57c9 2521 while (dirty-- > 0) {
c24602ef
GC
2522 struct dma_desc *p;
2523
2524 if (priv->extend_desc)
ceb69499 2525 p = (struct dma_desc *)(priv->dma_erx + entry);
c24602ef
GC
2526 else
2527 p = priv->dma_rx + entry;
2528
47dd7a54
GC
2529 if (likely(priv->rx_skbuff[entry] == NULL)) {
2530 struct sk_buff *skb;
2531
acb600de 2532 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
120e87f9
GC
2533 if (unlikely(!skb)) {
2534 /* so for a while no zero-copy! */
2535 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2536 if (unlikely(net_ratelimit()))
2537 dev_err(priv->device,
2538 "fail to alloc skb entry %d\n",
2539 entry);
47dd7a54 2540 break;
120e87f9 2541 }
47dd7a54
GC
2542
2543 priv->rx_skbuff[entry] = skb;
2544 priv->rx_skbuff_dma[entry] =
2545 dma_map_single(priv->device, skb->data, bfsize,
2546 DMA_FROM_DEVICE);
362b37be
GC
2547 if (dma_mapping_error(priv->device,
2548 priv->rx_skbuff_dma[entry])) {
38ddc59d 2549 netdev_err(priv->dev, "Rx DMA map failed\n");
362b37be
GC
2550 dev_kfree_skb(skb);
2551 break;
2552 }
286a8372 2553
f748be53 2554 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
f8be0d78 2555 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
f748be53
AT
2556 p->des1 = 0;
2557 } else {
f8be0d78 2558 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
f748be53
AT
2559 }
2560 if (priv->hw->mode->refill_desc3)
2561 priv->hw->mode->refill_desc3(priv, p);
286a8372 2562
120e87f9
GC
2563 if (priv->rx_zeroc_thresh > 0)
2564 priv->rx_zeroc_thresh--;
2565
b3e51069
LC
2566 netif_dbg(priv, rx_status, priv->dev,
2567 "refill entry #%d\n", entry);
47dd7a54 2568 }
ad688cdb 2569 dma_wmb();
f748be53
AT
2570
2571 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2572 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2573 else
2574 priv->hw->desc->set_rx_owner(p);
2575
ad688cdb 2576 dma_wmb();
e3ad57c9
GC
2577
2578 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
47dd7a54 2579 }
e3ad57c9 2580 priv->dirty_rx = entry;
47dd7a54
GC
2581}
2582
32ceabca 2583/**
732fdf0e 2584 * stmmac_rx - manage the receive process
32ceabca
GC
2585 * @priv: driver private structure
2586 * @limit: napi bugget.
2587 * Description : this the function called by the napi poll method.
2588 * It gets all the frames inside the ring.
2589 */
47dd7a54
GC
2590static int stmmac_rx(struct stmmac_priv *priv, int limit)
2591{
e3ad57c9 2592 unsigned int entry = priv->cur_rx;
47dd7a54
GC
2593 unsigned int next_entry;
2594 unsigned int count = 0;
d2afb5bd 2595 int coe = priv->hw->rx_csum;
47dd7a54 2596
83d7af64 2597 if (netif_msg_rx_status(priv)) {
d0225e7d
AT
2598 void *rx_head;
2599
38ddc59d 2600 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
c24602ef 2601 if (priv->extend_desc)
d0225e7d 2602 rx_head = (void *)priv->dma_erx;
c24602ef 2603 else
d0225e7d
AT
2604 rx_head = (void *)priv->dma_rx;
2605
2606 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
47dd7a54 2607 }
c24602ef 2608 while (count < limit) {
47dd7a54 2609 int status;
9401bb5c 2610 struct dma_desc *p;
ba1ffd74 2611 struct dma_desc *np;
47dd7a54 2612
c24602ef 2613 if (priv->extend_desc)
ceb69499 2614 p = (struct dma_desc *)(priv->dma_erx + entry);
c24602ef 2615 else
ceb69499 2616 p = priv->dma_rx + entry;
c24602ef 2617
c1fa3212
FG
2618 /* read the status of the incoming frame */
2619 status = priv->hw->desc->rx_status(&priv->dev->stats,
2620 &priv->xstats, p);
2621 /* check if managed by the DMA otherwise go ahead */
2622 if (unlikely(status & dma_own))
47dd7a54
GC
2623 break;
2624
2625 count++;
2626
e3ad57c9
GC
2627 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2628 next_entry = priv->cur_rx;
2629
c24602ef 2630 if (priv->extend_desc)
ba1ffd74 2631 np = (struct dma_desc *)(priv->dma_erx + next_entry);
c24602ef 2632 else
ba1ffd74
GC
2633 np = priv->dma_rx + next_entry;
2634
2635 prefetch(np);
47dd7a54 2636
c24602ef
GC
2637 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2638 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2639 &priv->xstats,
2640 priv->dma_erx +
2641 entry);
891434b1 2642 if (unlikely(status == discard_frame)) {
47dd7a54 2643 priv->dev->stats.rx_errors++;
891434b1 2644 if (priv->hwts_rx_en && !priv->extend_desc) {
8d45e42b 2645 /* DESC2 & DESC3 will be overwritten by device
891434b1
RK
2646 * with timestamp value, hence reinitialize
2647 * them in stmmac_rx_refill() function so that
2648 * device can reuse it.
2649 */
2650 priv->rx_skbuff[entry] = NULL;
2651 dma_unmap_single(priv->device,
ceb69499
GC
2652 priv->rx_skbuff_dma[entry],
2653 priv->dma_buf_sz,
2654 DMA_FROM_DEVICE);
891434b1
RK
2655 }
2656 } else {
47dd7a54 2657 struct sk_buff *skb;
3eeb2997 2658 int frame_len;
f748be53
AT
2659 unsigned int des;
2660
2661 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
f8be0d78 2662 des = le32_to_cpu(p->des0);
f748be53 2663 else
f8be0d78 2664 des = le32_to_cpu(p->des2);
47dd7a54 2665
ceb69499
GC
2666 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2667
8d45e42b 2668 /* If frame length is greater than skb buffer size
f748be53
AT
2669 * (preallocated during init) then the packet is
2670 * ignored
2671 */
e527c4a7 2672 if (frame_len > priv->dma_buf_sz) {
38ddc59d
LC
2673 netdev_err(priv->dev,
2674 "len %d larger than size (%d)\n",
2675 frame_len, priv->dma_buf_sz);
e527c4a7
GC
2676 priv->dev->stats.rx_length_errors++;
2677 break;
2678 }
2679
3eeb2997 2680 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
ceb69499
GC
2681 * Type frames (LLC/LLC-SNAP)
2682 */
3eeb2997
GC
2683 if (unlikely(status != llc_snap))
2684 frame_len -= ETH_FCS_LEN;
47dd7a54 2685
83d7af64 2686 if (netif_msg_rx_status(priv)) {
38ddc59d
LC
2687 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2688 p, entry, des);
83d7af64 2689 if (frame_len > ETH_FRAME_LEN)
38ddc59d
LC
2690 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2691 frame_len, status);
83d7af64 2692 }
22ad3838 2693
f748be53
AT
2694 /* The zero-copy is always used for all the sizes
2695 * in case of GMAC4 because it needs
2696 * to refill the used descriptors, always.
2697 */
2698 if (unlikely(!priv->plat->has_gmac4 &&
2699 ((frame_len < priv->rx_copybreak) ||
2700 stmmac_rx_threshold_count(priv)))) {
22ad3838
GC
2701 skb = netdev_alloc_skb_ip_align(priv->dev,
2702 frame_len);
2703 if (unlikely(!skb)) {
2704 if (net_ratelimit())
2705 dev_warn(priv->device,
2706 "packet dropped\n");
2707 priv->dev->stats.rx_dropped++;
2708 break;
2709 }
2710
2711 dma_sync_single_for_cpu(priv->device,
2712 priv->rx_skbuff_dma
2713 [entry], frame_len,
2714 DMA_FROM_DEVICE);
2715 skb_copy_to_linear_data(skb,
2716 priv->
2717 rx_skbuff[entry]->data,
2718 frame_len);
2719
2720 skb_put(skb, frame_len);
2721 dma_sync_single_for_device(priv->device,
2722 priv->rx_skbuff_dma
2723 [entry], frame_len,
2724 DMA_FROM_DEVICE);
2725 } else {
2726 skb = priv->rx_skbuff[entry];
2727 if (unlikely(!skb)) {
38ddc59d
LC
2728 netdev_err(priv->dev,
2729 "%s: Inconsistent Rx chain\n",
2730 priv->dev->name);
22ad3838
GC
2731 priv->dev->stats.rx_dropped++;
2732 break;
2733 }
2734 prefetch(skb->data - NET_IP_ALIGN);
2735 priv->rx_skbuff[entry] = NULL;
120e87f9 2736 priv->rx_zeroc_thresh++;
22ad3838
GC
2737
2738 skb_put(skb, frame_len);
2739 dma_unmap_single(priv->device,
2740 priv->rx_skbuff_dma[entry],
2741 priv->dma_buf_sz,
2742 DMA_FROM_DEVICE);
47dd7a54 2743 }
47dd7a54 2744
47dd7a54 2745 if (netif_msg_pktdata(priv)) {
38ddc59d
LC
2746 netdev_dbg(priv->dev, "frame received (%dbytes)",
2747 frame_len);
47dd7a54
GC
2748 print_pkt(skb->data, frame_len);
2749 }
83d7af64 2750
ba1ffd74
GC
2751 stmmac_get_rx_hwtstamp(priv, p, np, skb);
2752
b9381985
VB
2753 stmmac_rx_vlan(priv->dev, skb);
2754
47dd7a54
GC
2755 skb->protocol = eth_type_trans(skb, priv->dev);
2756
ceb69499 2757 if (unlikely(!coe))
bc8acf2c 2758 skb_checksum_none_assert(skb);
62a2ab93 2759 else
47dd7a54 2760 skb->ip_summed = CHECKSUM_UNNECESSARY;
62a2ab93
GC
2761
2762 napi_gro_receive(&priv->napi, skb);
47dd7a54
GC
2763
2764 priv->dev->stats.rx_packets++;
2765 priv->dev->stats.rx_bytes += frame_len;
47dd7a54
GC
2766 }
2767 entry = next_entry;
47dd7a54
GC
2768 }
2769
2770 stmmac_rx_refill(priv);
2771
2772 priv->xstats.rx_pkt_n += count;
2773
2774 return count;
2775}
2776
2777/**
2778 * stmmac_poll - stmmac poll method (NAPI)
2779 * @napi : pointer to the napi structure.
2780 * @budget : maximum number of packets that the current CPU can receive from
2781 * all interfaces.
2782 * Description :
9125cdd1 2783 * To look at the incoming frames and clear the tx resources.
47dd7a54
GC
2784 */
2785static int stmmac_poll(struct napi_struct *napi, int budget)
2786{
2787 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2788 int work_done = 0;
2789
9125cdd1
GC
2790 priv->xstats.napi_poll++;
2791 stmmac_tx_clean(priv);
47dd7a54 2792
9125cdd1 2793 work_done = stmmac_rx(priv, budget);
47dd7a54 2794 if (work_done < budget) {
6ad20165 2795 napi_complete_done(napi, work_done);
9125cdd1 2796 stmmac_enable_dma_irq(priv);
47dd7a54
GC
2797 }
2798 return work_done;
2799}
2800
2801/**
2802 * stmmac_tx_timeout
2803 * @dev : Pointer to net device structure
2804 * Description: this function is called when a packet transmission fails to
7284a3f1 2805 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
2806 * netdev structure and arrange for the device to be reset to a sane state
2807 * in order to transmit a new packet.
2808 */
2809static void stmmac_tx_timeout(struct net_device *dev)
2810{
2811 struct stmmac_priv *priv = netdev_priv(dev);
2812
2813 /* Clear Tx resources and restart transmitting again */
2814 stmmac_tx_err(priv);
47dd7a54
GC
2815}
2816
47dd7a54 2817/**
01789349 2818 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
2819 * @dev : pointer to the device structure
2820 * Description:
2821 * This function is a driver entry point which gets called by the kernel
2822 * whenever multicast addresses must be enabled/disabled.
2823 * Return value:
2824 * void.
2825 */
01789349 2826static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
2827{
2828 struct stmmac_priv *priv = netdev_priv(dev);
2829
3b57de95 2830 priv->hw->mac->set_filter(priv->hw, dev);
47dd7a54
GC
2831}
2832
2833/**
2834 * stmmac_change_mtu - entry point to change MTU size for the device.
2835 * @dev : device pointer.
2836 * @new_mtu : the new MTU size for the device.
2837 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
2838 * to drive packet transmission. Ethernet has an MTU of 1500 octets
2839 * (ETH_DATA_LEN). This value can be changed with ifconfig.
2840 * Return value:
2841 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2842 * file on failure.
2843 */
2844static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2845{
38ddc59d
LC
2846 struct stmmac_priv *priv = netdev_priv(dev);
2847
47dd7a54 2848 if (netif_running(dev)) {
38ddc59d 2849 netdev_err(priv->dev, "must be stopped to change its MTU\n");
47dd7a54
GC
2850 return -EBUSY;
2851 }
2852
5e982f3b 2853 dev->mtu = new_mtu;
f748be53 2854
5e982f3b
MM
2855 netdev_update_features(dev);
2856
2857 return 0;
2858}
2859
c8f44aff 2860static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 2861 netdev_features_t features)
5e982f3b
MM
2862{
2863 struct stmmac_priv *priv = netdev_priv(dev);
2864
38912bdb 2865 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 2866 features &= ~NETIF_F_RXCSUM;
d2afb5bd 2867
5e982f3b 2868 if (!priv->plat->tx_coe)
a188222b 2869 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 2870
ebbb293f
GC
2871 /* Some GMAC devices have a bugged Jumbo frame support that
2872 * needs to have the Tx COE disabled for oversized frames
2873 * (due to limited buffer sizes). In this case we disable
8d45e42b 2874 * the TX csum insertion in the TDES and not use SF.
ceb69499 2875 */
5e982f3b 2876 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 2877 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 2878
f748be53
AT
2879 /* Disable tso if asked by ethtool */
2880 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2881 if (features & NETIF_F_TSO)
2882 priv->tso = true;
2883 else
2884 priv->tso = false;
2885 }
2886
5e982f3b 2887 return features;
47dd7a54
GC
2888}
2889
d2afb5bd
GC
2890static int stmmac_set_features(struct net_device *netdev,
2891 netdev_features_t features)
2892{
2893 struct stmmac_priv *priv = netdev_priv(netdev);
2894
2895 /* Keep the COE Type in case of csum is supporting */
2896 if (features & NETIF_F_RXCSUM)
2897 priv->hw->rx_csum = priv->plat->rx_coe;
2898 else
2899 priv->hw->rx_csum = 0;
2900 /* No check needed because rx_coe has been set before and it will be
2901 * fixed in case of issue.
2902 */
2903 priv->hw->mac->rx_ipc(priv->hw);
2904
2905 return 0;
2906}
2907
32ceabca
GC
2908/**
2909 * stmmac_interrupt - main ISR
2910 * @irq: interrupt number.
2911 * @dev_id: to pass the net device pointer.
2912 * Description: this is the main driver interrupt service routine.
732fdf0e
GC
2913 * It can call:
2914 * o DMA service routine (to manage incoming frame reception and transmission
2915 * status)
2916 * o Core interrupts to manage: remote wake-up, management counter, LPI
2917 * interrupts.
32ceabca 2918 */
47dd7a54
GC
2919static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2920{
2921 struct net_device *dev = (struct net_device *)dev_id;
2922 struct stmmac_priv *priv = netdev_priv(dev);
2923
89f7f2cf
SK
2924 if (priv->irq_wake)
2925 pm_wakeup_event(priv->device, 0);
2926
47dd7a54 2927 if (unlikely(!dev)) {
38ddc59d 2928 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
47dd7a54
GC
2929 return IRQ_NONE;
2930 }
2931
d765955d 2932 /* To handle GMAC own interrupts */
f748be53 2933 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
7ed24bbe 2934 int status = priv->hw->mac->host_irq_status(priv->hw,
0982a0f6 2935 &priv->xstats);
8f71a88d
JP
2936
2937 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2938 status |= priv->hw->mac->host_mtl_irq_status(priv->hw,
2939 STMMAC_CHAN0);
2940
d765955d 2941 if (unlikely(status)) {
d765955d 2942 /* For LPI we need to save the tx status */
0982a0f6 2943 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 2944 priv->tx_path_in_lpi_mode = true;
0982a0f6 2945 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 2946 priv->tx_path_in_lpi_mode = false;
a8b7d770 2947 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
f748be53
AT
2948 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2949 priv->rx_tail_addr,
2950 STMMAC_CHAN0);
d765955d 2951 }
70523e63
GC
2952
2953 /* PCS link status */
3fe5cadb 2954 if (priv->hw->pcs) {
70523e63
GC
2955 if (priv->xstats.pcs_link)
2956 netif_carrier_on(dev);
2957 else
2958 netif_carrier_off(dev);
2959 }
d765955d 2960 }
aec7ff27 2961
d765955d 2962 /* To handle DMA interrupts */
aec7ff27 2963 stmmac_dma_interrupt(priv);
47dd7a54
GC
2964
2965 return IRQ_HANDLED;
2966}
2967
2968#ifdef CONFIG_NET_POLL_CONTROLLER
2969/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
2970 * to allow network I/O with interrupts disabled.
2971 */
47dd7a54
GC
2972static void stmmac_poll_controller(struct net_device *dev)
2973{
2974 disable_irq(dev->irq);
2975 stmmac_interrupt(dev->irq, dev);
2976 enable_irq(dev->irq);
2977}
2978#endif
2979
2980/**
2981 * stmmac_ioctl - Entry point for the Ioctl
2982 * @dev: Device pointer.
2983 * @rq: An IOCTL specefic structure, that can contain a pointer to
2984 * a proprietary structure used to pass information to the driver.
2985 * @cmd: IOCTL command
2986 * Description:
32ceabca 2987 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
2988 */
2989static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2990{
891434b1 2991 int ret = -EOPNOTSUPP;
47dd7a54
GC
2992
2993 if (!netif_running(dev))
2994 return -EINVAL;
2995
891434b1
RK
2996 switch (cmd) {
2997 case SIOCGMIIPHY:
2998 case SIOCGMIIREG:
2999 case SIOCSMIIREG:
d6d50c7e 3000 if (!dev->phydev)
891434b1 3001 return -EINVAL;
d6d50c7e 3002 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
891434b1
RK
3003 break;
3004 case SIOCSHWTSTAMP:
3005 ret = stmmac_hwtstamp_ioctl(dev, rq);
3006 break;
3007 default:
3008 break;
3009 }
28b04113 3010
47dd7a54
GC
3011 return ret;
3012}
3013
50fb4f74 3014#ifdef CONFIG_DEBUG_FS
7ac29055 3015static struct dentry *stmmac_fs_dir;
7ac29055 3016
c24602ef 3017static void sysfs_display_ring(void *head, int size, int extend_desc,
ceb69499 3018 struct seq_file *seq)
7ac29055 3019{
7ac29055 3020 int i;
ceb69499
GC
3021 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3022 struct dma_desc *p = (struct dma_desc *)head;
7ac29055 3023
c24602ef 3024 for (i = 0; i < size; i++) {
c24602ef 3025 if (extend_desc) {
c24602ef 3026 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 3027 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
3028 le32_to_cpu(ep->basic.des0),
3029 le32_to_cpu(ep->basic.des1),
3030 le32_to_cpu(ep->basic.des2),
3031 le32_to_cpu(ep->basic.des3));
c24602ef
GC
3032 ep++;
3033 } else {
c24602ef 3034 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 3035 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
3036 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3037 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
c24602ef
GC
3038 p++;
3039 }
7ac29055
GC
3040 seq_printf(seq, "\n");
3041 }
c24602ef 3042}
7ac29055 3043
c24602ef
GC
3044static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3045{
3046 struct net_device *dev = seq->private;
3047 struct stmmac_priv *priv = netdev_priv(dev);
7ac29055 3048
c24602ef
GC
3049 if (priv->extend_desc) {
3050 seq_printf(seq, "Extended RX descriptor ring:\n");
e3ad57c9 3051 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
c24602ef 3052 seq_printf(seq, "Extended TX descriptor ring:\n");
e3ad57c9 3053 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
c24602ef
GC
3054 } else {
3055 seq_printf(seq, "RX descriptor ring:\n");
e3ad57c9 3056 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
c24602ef 3057 seq_printf(seq, "TX descriptor ring:\n");
e3ad57c9 3058 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
7ac29055
GC
3059 }
3060
3061 return 0;
3062}
3063
3064static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3065{
3066 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3067}
3068
22d3efe5
PM
3069/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3070
7ac29055
GC
3071static const struct file_operations stmmac_rings_status_fops = {
3072 .owner = THIS_MODULE,
3073 .open = stmmac_sysfs_ring_open,
3074 .read = seq_read,
3075 .llseek = seq_lseek,
74863948 3076 .release = single_release,
7ac29055
GC
3077};
3078
e7434821
GC
3079static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3080{
3081 struct net_device *dev = seq->private;
3082 struct stmmac_priv *priv = netdev_priv(dev);
3083
19e30c14 3084 if (!priv->hw_cap_support) {
e7434821
GC
3085 seq_printf(seq, "DMA HW features not supported\n");
3086 return 0;
3087 }
3088
3089 seq_printf(seq, "==============================\n");
3090 seq_printf(seq, "\tDMA HW features\n");
3091 seq_printf(seq, "==============================\n");
3092
22d3efe5 3093 seq_printf(seq, "\t10/100 Mbps: %s\n",
e7434821 3094 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
22d3efe5 3095 seq_printf(seq, "\t1000 Mbps: %s\n",
e7434821 3096 (priv->dma_cap.mbps_1000) ? "Y" : "N");
22d3efe5 3097 seq_printf(seq, "\tHalf duplex: %s\n",
e7434821
GC
3098 (priv->dma_cap.half_duplex) ? "Y" : "N");
3099 seq_printf(seq, "\tHash Filter: %s\n",
3100 (priv->dma_cap.hash_filter) ? "Y" : "N");
3101 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3102 (priv->dma_cap.multi_addr) ? "Y" : "N");
8d45e42b 3103 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
e7434821
GC
3104 (priv->dma_cap.pcs) ? "Y" : "N");
3105 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3106 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3107 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3108 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3109 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3110 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3111 seq_printf(seq, "\tRMON module: %s\n",
3112 (priv->dma_cap.rmon) ? "Y" : "N");
3113 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3114 (priv->dma_cap.time_stamp) ? "Y" : "N");
22d3efe5 3115 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
e7434821 3116 (priv->dma_cap.atime_stamp) ? "Y" : "N");
22d3efe5 3117 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
e7434821
GC
3118 (priv->dma_cap.eee) ? "Y" : "N");
3119 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3120 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3121 (priv->dma_cap.tx_coe) ? "Y" : "N");
f748be53
AT
3122 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3123 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3124 (priv->dma_cap.rx_coe) ? "Y" : "N");
3125 } else {
3126 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3127 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3128 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3129 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3130 }
e7434821
GC
3131 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3132 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3133 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3134 priv->dma_cap.number_rx_channel);
3135 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3136 priv->dma_cap.number_tx_channel);
3137 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3138 (priv->dma_cap.enh_desc) ? "Y" : "N");
3139
3140 return 0;
3141}
3142
3143static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3144{
3145 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3146}
3147
3148static const struct file_operations stmmac_dma_cap_fops = {
3149 .owner = THIS_MODULE,
3150 .open = stmmac_sysfs_dma_cap_open,
3151 .read = seq_read,
3152 .llseek = seq_lseek,
74863948 3153 .release = single_release,
e7434821
GC
3154};
3155
7ac29055
GC
3156static int stmmac_init_fs(struct net_device *dev)
3157{
466c5ac8
MO
3158 struct stmmac_priv *priv = netdev_priv(dev);
3159
3160 /* Create per netdev entries */
3161 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 3162
466c5ac8 3163 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
38ddc59d 3164 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
7ac29055
GC
3165
3166 return -ENOMEM;
3167 }
3168
3169 /* Entry to report DMA RX/TX rings */
466c5ac8
MO
3170 priv->dbgfs_rings_status =
3171 debugfs_create_file("descriptors_status", S_IRUGO,
3172 priv->dbgfs_dir, dev,
3173 &stmmac_rings_status_fops);
7ac29055 3174
466c5ac8 3175 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
38ddc59d 3176 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
466c5ac8 3177 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055
GC
3178
3179 return -ENOMEM;
3180 }
3181
e7434821 3182 /* Entry to report the DMA HW features */
466c5ac8
MO
3183 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3184 priv->dbgfs_dir,
3185 dev, &stmmac_dma_cap_fops);
e7434821 3186
466c5ac8 3187 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
38ddc59d 3188 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
466c5ac8 3189 debugfs_remove_recursive(priv->dbgfs_dir);
e7434821
GC
3190
3191 return -ENOMEM;
3192 }
3193
7ac29055
GC
3194 return 0;
3195}
3196
466c5ac8 3197static void stmmac_exit_fs(struct net_device *dev)
7ac29055 3198{
466c5ac8
MO
3199 struct stmmac_priv *priv = netdev_priv(dev);
3200
3201 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 3202}
50fb4f74 3203#endif /* CONFIG_DEBUG_FS */
7ac29055 3204
47dd7a54
GC
3205static const struct net_device_ops stmmac_netdev_ops = {
3206 .ndo_open = stmmac_open,
3207 .ndo_start_xmit = stmmac_xmit,
3208 .ndo_stop = stmmac_release,
3209 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 3210 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 3211 .ndo_set_features = stmmac_set_features,
01789349 3212 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
3213 .ndo_tx_timeout = stmmac_tx_timeout,
3214 .ndo_do_ioctl = stmmac_ioctl,
47dd7a54
GC
3215#ifdef CONFIG_NET_POLL_CONTROLLER
3216 .ndo_poll_controller = stmmac_poll_controller,
3217#endif
3218 .ndo_set_mac_address = eth_mac_addr,
3219};
3220
cf3f047b
GC
3221/**
3222 * stmmac_hw_init - Init the MAC device
32ceabca 3223 * @priv: driver private structure
732fdf0e
GC
3224 * Description: this function is to configure the MAC device according to
3225 * some platform parameters or the HW capability register. It prepares the
3226 * driver to use either ring or chain modes and to setup either enhanced or
3227 * normal descriptors.
cf3f047b
GC
3228 */
3229static int stmmac_hw_init(struct stmmac_priv *priv)
3230{
cf3f047b
GC
3231 struct mac_device_info *mac;
3232
3233 /* Identify the MAC HW device */
03f2eecd
MKB
3234 if (priv->plat->has_gmac) {
3235 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3b57de95
VB
3236 mac = dwmac1000_setup(priv->ioaddr,
3237 priv->plat->multicast_filter_bins,
c623d149
AT
3238 priv->plat->unicast_filter_entries,
3239 &priv->synopsys_id);
f748be53
AT
3240 } else if (priv->plat->has_gmac4) {
3241 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3242 mac = dwmac4_setup(priv->ioaddr,
3243 priv->plat->multicast_filter_bins,
3244 priv->plat->unicast_filter_entries,
3245 &priv->synopsys_id);
03f2eecd 3246 } else {
c623d149 3247 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
03f2eecd 3248 }
cf3f047b
GC
3249 if (!mac)
3250 return -ENOMEM;
3251
3252 priv->hw = mac;
3253
4a7d666a 3254 /* To use the chained or ring mode */
f748be53
AT
3255 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3256 priv->hw->mode = &dwmac4_ring_mode_ops;
4a7d666a 3257 } else {
f748be53
AT
3258 if (chain_mode) {
3259 priv->hw->mode = &chain_mode_ops;
38ddc59d 3260 dev_info(priv->device, "Chain mode enabled\n");
f748be53
AT
3261 priv->mode = STMMAC_CHAIN_MODE;
3262 } else {
3263 priv->hw->mode = &ring_mode_ops;
38ddc59d 3264 dev_info(priv->device, "Ring mode enabled\n");
f748be53
AT
3265 priv->mode = STMMAC_RING_MODE;
3266 }
4a7d666a
GC
3267 }
3268
cf3f047b
GC
3269 /* Get the HW capability (new GMAC newer than 3.50a) */
3270 priv->hw_cap_support = stmmac_get_hw_features(priv);
3271 if (priv->hw_cap_support) {
38ddc59d 3272 dev_info(priv->device, "DMA HW capability register supported\n");
cf3f047b
GC
3273
3274 /* We can override some gmac/dma configuration fields: e.g.
3275 * enh_desc, tx_coe (e.g. that are passed through the
3276 * platform) with the values from the HW capability
3277 * register (if supported).
3278 */
3279 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 3280 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3fe5cadb 3281 priv->hw->pmt = priv->plat->pmt;
38912bdb 3282
a8df35d4
EG
3283 /* TXCOE doesn't work in thresh DMA mode */
3284 if (priv->plat->force_thresh_dma_mode)
3285 priv->plat->tx_coe = 0;
3286 else
3287 priv->plat->tx_coe = priv->dma_cap.tx_coe;
3288
f748be53
AT
3289 /* In case of GMAC4 rx_coe is from HW cap register. */
3290 priv->plat->rx_coe = priv->dma_cap.rx_coe;
38912bdb
DS
3291
3292 if (priv->dma_cap.rx_coe_type2)
3293 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3294 else if (priv->dma_cap.rx_coe_type1)
3295 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3296
38ddc59d
LC
3297 } else {
3298 dev_info(priv->device, "No HW DMA feature register supported\n");
3299 }
cf3f047b 3300
f748be53
AT
3301 /* To use alternate (extended), normal or GMAC4 descriptor structures */
3302 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3303 priv->hw->desc = &dwmac4_desc_ops;
3304 else
3305 stmmac_selec_desc_mode(priv);
61369d02 3306
d2afb5bd
GC
3307 if (priv->plat->rx_coe) {
3308 priv->hw->rx_csum = priv->plat->rx_coe;
38ddc59d 3309 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
f748be53 3310 if (priv->synopsys_id < DWMAC_CORE_4_00)
38ddc59d 3311 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
d2afb5bd 3312 }
cf3f047b 3313 if (priv->plat->tx_coe)
38ddc59d 3314 dev_info(priv->device, "TX Checksum insertion supported\n");
cf3f047b
GC
3315
3316 if (priv->plat->pmt) {
38ddc59d 3317 dev_info(priv->device, "Wake-Up On Lan supported\n");
cf3f047b
GC
3318 device_set_wakeup_capable(priv->device, 1);
3319 }
3320
f748be53 3321 if (priv->dma_cap.tsoen)
38ddc59d 3322 dev_info(priv->device, "TSO supported\n");
f748be53 3323
c24602ef 3324 return 0;
cf3f047b
GC
3325}
3326
47dd7a54 3327/**
bfab27a1
GC
3328 * stmmac_dvr_probe
3329 * @device: device pointer
ff3dd78c 3330 * @plat_dat: platform data pointer
e56788cf 3331 * @res: stmmac resource pointer
bfab27a1
GC
3332 * Description: this is the main probe function used to
3333 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 3334 * Return:
15ffac73 3335 * returns 0 on success, otherwise errno.
47dd7a54 3336 */
15ffac73
JE
3337int stmmac_dvr_probe(struct device *device,
3338 struct plat_stmmacenet_data *plat_dat,
3339 struct stmmac_resources *res)
47dd7a54
GC
3340{
3341 int ret = 0;
bfab27a1
GC
3342 struct net_device *ndev = NULL;
3343 struct stmmac_priv *priv;
47dd7a54 3344
bfab27a1 3345 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
41de8d4c 3346 if (!ndev)
15ffac73 3347 return -ENOMEM;
bfab27a1
GC
3348
3349 SET_NETDEV_DEV(ndev, device);
3350
3351 priv = netdev_priv(ndev);
3352 priv->device = device;
3353 priv->dev = ndev;
47dd7a54 3354
bfab27a1 3355 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
3356 priv->pause = pause;
3357 priv->plat = plat_dat;
e56788cf
JE
3358 priv->ioaddr = res->addr;
3359 priv->dev->base_addr = (unsigned long)res->addr;
3360
3361 priv->dev->irq = res->irq;
3362 priv->wol_irq = res->wol_irq;
3363 priv->lpi_irq = res->lpi_irq;
3364
3365 if (res->mac)
3366 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 3367
a7a62685 3368 dev_set_drvdata(device, priv->dev);
803f8fc4 3369
cf3f047b
GC
3370 /* Verify driver arguments */
3371 stmmac_verify_args();
bfab27a1 3372
cf3f047b 3373 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
3374 * this needs to have multiple instances
3375 */
cf3f047b
GC
3376 if ((phyaddr >= 0) && (phyaddr <= 31))
3377 priv->plat->phy_addr = phyaddr;
3378
f573c0b9 3379 if (priv->plat->stmmac_rst)
3380 reset_control_deassert(priv->plat->stmmac_rst);
c5e4ddbd 3381
cf3f047b 3382 /* Init MAC and get the capabilities */
c24602ef
GC
3383 ret = stmmac_hw_init(priv);
3384 if (ret)
62866e98 3385 goto error_hw_init;
cf3f047b
GC
3386
3387 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 3388
cf3f047b
GC
3389 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3390 NETIF_F_RXCSUM;
f748be53
AT
3391
3392 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3393 ndev->hw_features |= NETIF_F_TSO;
3394 priv->tso = true;
38ddc59d 3395 dev_info(priv->device, "TSO feature enabled\n");
f748be53 3396 }
bfab27a1
GC
3397 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3398 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
3399#ifdef STMMAC_VLAN_TAG_USED
3400 /* Both mac100 and gmac support receive VLAN tag detection */
f646968f 3401 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
47dd7a54
GC
3402#endif
3403 priv->msg_enable = netif_msg_init(debug, default_msg_level);
3404
44770e11
JW
3405 /* MTU range: 46 - hw-specific max */
3406 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3407 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3408 ndev->max_mtu = JUMBO_LEN;
3409 else
3410 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
a2cd64f3
KHL
3411 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3412 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3413 */
3414 if ((priv->plat->maxmtu < ndev->max_mtu) &&
3415 (priv->plat->maxmtu >= ndev->min_mtu))
44770e11 3416 ndev->max_mtu = priv->plat->maxmtu;
a2cd64f3 3417 else if (priv->plat->maxmtu < ndev->min_mtu)
b618ab45
HK
3418 dev_warn(priv->device,
3419 "%s: warning: maxmtu having invalid value (%d)\n",
3420 __func__, priv->plat->maxmtu);
44770e11 3421
47dd7a54
GC
3422 if (flow_ctrl)
3423 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
3424
62a2ab93
GC
3425 /* Rx Watchdog is available in the COREs newer than the 3.40.
3426 * In some case, for example on bugged HW this feature
3427 * has to be disable and this can be done by passing the
3428 * riwt_off field from the platform.
3429 */
3430 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3431 priv->use_riwt = 1;
b618ab45
HK
3432 dev_info(priv->device,
3433 "Enable RX Mitigation via HW Watchdog Timer\n");
62a2ab93
GC
3434 }
3435
bfab27a1 3436 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
47dd7a54 3437
f8e96161
VL
3438 spin_lock_init(&priv->lock);
3439
cd7201f4
GC
3440 /* If a specific clk_csr value is passed from the platform
3441 * this means that the CSR Clock Range selection cannot be
3442 * changed at run-time and it is fixed. Viceversa the driver'll try to
3443 * set the MDC clock dynamically according to the csr actual
3444 * clock input.
3445 */
3446 if (!priv->plat->clk_csr)
3447 stmmac_clk_csr_set(priv);
3448 else
3449 priv->clk_csr = priv->plat->clk_csr;
3450
e58bb43f
GC
3451 stmmac_check_pcs_mode(priv);
3452
3fe5cadb
GC
3453 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3454 priv->hw->pcs != STMMAC_PCS_TBI &&
3455 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
3456 /* MDIO bus Registration */
3457 ret = stmmac_mdio_register(ndev);
3458 if (ret < 0) {
b618ab45
HK
3459 dev_err(priv->device,
3460 "%s: MDIO bus (id: %d) registration failed",
3461 __func__, priv->plat->bus_id);
e58bb43f
GC
3462 goto error_mdio_register;
3463 }
4bfcbd7a
FV
3464 }
3465
57016590 3466 ret = register_netdev(ndev);
b2eb09af 3467 if (ret) {
b618ab45
HK
3468 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3469 __func__, ret);
b2eb09af
FF
3470 goto error_netdev_register;
3471 }
57016590
FF
3472
3473 return ret;
47dd7a54 3474
6a81c26f 3475error_netdev_register:
b2eb09af
FF
3476 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3477 priv->hw->pcs != STMMAC_PCS_TBI &&
3478 priv->hw->pcs != STMMAC_PCS_RTBI)
3479 stmmac_mdio_unregister(ndev);
6a81c26f 3480error_mdio_register:
6a81c26f 3481 netif_napi_del(&priv->napi);
62866e98 3482error_hw_init:
34a52f36 3483 free_netdev(ndev);
47dd7a54 3484
15ffac73 3485 return ret;
47dd7a54 3486}
b2e2f0c7 3487EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
3488
3489/**
3490 * stmmac_dvr_remove
f4e7bd81 3491 * @dev: device pointer
47dd7a54 3492 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 3493 * changes the link status, releases the DMA descriptor rings.
47dd7a54 3494 */
f4e7bd81 3495int stmmac_dvr_remove(struct device *dev)
47dd7a54 3496{
f4e7bd81 3497 struct net_device *ndev = dev_get_drvdata(dev);
aec7ff27 3498 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 3499
38ddc59d 3500 netdev_info(priv->dev, "%s: removing driver", __func__);
47dd7a54 3501
ad01b7d4
GC
3502 priv->hw->dma->stop_rx(priv->ioaddr);
3503 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 3504
bfab27a1 3505 stmmac_set_mac(priv->ioaddr, false);
47dd7a54 3506 netif_carrier_off(ndev);
47dd7a54 3507 unregister_netdev(ndev);
f573c0b9 3508 if (priv->plat->stmmac_rst)
3509 reset_control_assert(priv->plat->stmmac_rst);
3510 clk_disable_unprepare(priv->plat->pclk);
3511 clk_disable_unprepare(priv->plat->stmmac_clk);
3fe5cadb
GC
3512 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3513 priv->hw->pcs != STMMAC_PCS_TBI &&
3514 priv->hw->pcs != STMMAC_PCS_RTBI)
e743471f 3515 stmmac_mdio_unregister(ndev);
47dd7a54
GC
3516 free_netdev(ndev);
3517
3518 return 0;
3519}
b2e2f0c7 3520EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 3521
732fdf0e
GC
3522/**
3523 * stmmac_suspend - suspend callback
f4e7bd81 3524 * @dev: device pointer
732fdf0e
GC
3525 * Description: this is the function to suspend the device and it is called
3526 * by the platform driver to stop the network queue, release the resources,
3527 * program the PMT register (for WoL), clean and release driver resources.
3528 */
f4e7bd81 3529int stmmac_suspend(struct device *dev)
47dd7a54 3530{
f4e7bd81 3531 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 3532 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 3533 unsigned long flags;
47dd7a54 3534
874bd42d 3535 if (!ndev || !netif_running(ndev))
47dd7a54
GC
3536 return 0;
3537
d6d50c7e
PR
3538 if (ndev->phydev)
3539 phy_stop(ndev->phydev);
102463b1 3540
f8c5a875 3541 spin_lock_irqsave(&priv->lock, flags);
47dd7a54 3542
874bd42d
GC
3543 netif_device_detach(ndev);
3544 netif_stop_queue(ndev);
47dd7a54 3545
874bd42d
GC
3546 napi_disable(&priv->napi);
3547
3548 /* Stop TX/RX DMA */
3549 priv->hw->dma->stop_tx(priv->ioaddr);
3550 priv->hw->dma->stop_rx(priv->ioaddr);
c24602ef 3551
874bd42d 3552 /* Enable Power down mode by programming the PMT regs */
89f7f2cf 3553 if (device_may_wakeup(priv->device)) {
7ed24bbe 3554 priv->hw->mac->pmt(priv->hw, priv->wolopts);
89f7f2cf
SK
3555 priv->irq_wake = 1;
3556 } else {
bfab27a1 3557 stmmac_set_mac(priv->ioaddr, false);
db88f10a 3558 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 3559 /* Disable clock in case of PWM is off */
f573c0b9 3560 clk_disable(priv->plat->pclk);
3561 clk_disable(priv->plat->stmmac_clk);
ba1377ff 3562 }
f8c5a875 3563 spin_unlock_irqrestore(&priv->lock, flags);
2d871aa0
VB
3564
3565 priv->oldlink = 0;
bd00632c
LC
3566 priv->speed = SPEED_UNKNOWN;
3567 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
3568 return 0;
3569}
b2e2f0c7 3570EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 3571
732fdf0e
GC
3572/**
3573 * stmmac_resume - resume callback
f4e7bd81 3574 * @dev: device pointer
732fdf0e
GC
3575 * Description: when resume this function is invoked to setup the DMA and CORE
3576 * in a usable state.
3577 */
f4e7bd81 3578int stmmac_resume(struct device *dev)
47dd7a54 3579{
f4e7bd81 3580 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 3581 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 3582 unsigned long flags;
47dd7a54 3583
874bd42d 3584 if (!netif_running(ndev))
47dd7a54
GC
3585 return 0;
3586
47dd7a54
GC
3587 /* Power Down bit, into the PM register, is cleared
3588 * automatically as soon as a magic packet or a Wake-up frame
3589 * is received. Anyway, it's better to manually clear
3590 * this bit because it can generate problems while resuming
ceb69499
GC
3591 * from another devices (e.g. serial console).
3592 */
623997fb 3593 if (device_may_wakeup(priv->device)) {
f55d84b0 3594 spin_lock_irqsave(&priv->lock, flags);
7ed24bbe 3595 priv->hw->mac->pmt(priv->hw, 0);
f55d84b0 3596 spin_unlock_irqrestore(&priv->lock, flags);
89f7f2cf 3597 priv->irq_wake = 0;
623997fb 3598 } else {
db88f10a 3599 pinctrl_pm_select_default_state(priv->device);
8d45e42b 3600 /* enable the clk previously disabled */
f573c0b9 3601 clk_enable(priv->plat->stmmac_clk);
3602 clk_enable(priv->plat->pclk);
623997fb
SK
3603 /* reset the phy so that it's ready */
3604 if (priv->mii)
3605 stmmac_mdio_reset(priv->mii);
3606 }
47dd7a54 3607
874bd42d 3608 netif_device_attach(ndev);
47dd7a54 3609
f55d84b0
VP
3610 spin_lock_irqsave(&priv->lock, flags);
3611
ae79a639
GC
3612 priv->cur_rx = 0;
3613 priv->dirty_rx = 0;
3614 priv->dirty_tx = 0;
3615 priv->cur_tx = 0;
f748be53
AT
3616 /* reset private mss value to force mss context settings at
3617 * next tso xmit (only used for gmac4).
3618 */
3619 priv->mss = 0;
3620
ae79a639
GC
3621 stmmac_clear_descriptors(priv);
3622
fe131929 3623 stmmac_hw_setup(ndev, false);
777da230 3624 stmmac_init_tx_coalesce(priv);
ac316c78 3625 stmmac_set_rx_mode(ndev);
47dd7a54 3626
47dd7a54
GC
3627 napi_enable(&priv->napi);
3628
874bd42d 3629 netif_start_queue(ndev);
47dd7a54 3630
f8c5a875 3631 spin_unlock_irqrestore(&priv->lock, flags);
102463b1 3632
d6d50c7e
PR
3633 if (ndev->phydev)
3634 phy_start(ndev->phydev);
102463b1 3635
47dd7a54
GC
3636 return 0;
3637}
b2e2f0c7 3638EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 3639
47dd7a54
GC
3640#ifndef MODULE
3641static int __init stmmac_cmdline_opt(char *str)
3642{
3643 char *opt;
3644
3645 if (!str || !*str)
3646 return -EINVAL;
3647 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 3648 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 3649 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
3650 goto err;
3651 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 3652 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 3653 goto err;
f3240e28 3654 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 3655 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
3656 goto err;
3657 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 3658 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
3659 goto err;
3660 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 3661 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
3662 goto err;
3663 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 3664 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
3665 goto err;
3666 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 3667 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 3668 goto err;
506f669c 3669 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
3670 if (kstrtoint(opt + 10, 0, &eee_timer))
3671 goto err;
4a7d666a
GC
3672 } else if (!strncmp(opt, "chain_mode:", 11)) {
3673 if (kstrtoint(opt + 11, 0, &chain_mode))
3674 goto err;
f3240e28 3675 }
47dd7a54
GC
3676 }
3677 return 0;
f3240e28
GC
3678
3679err:
3680 pr_err("%s: ERROR broken module parameter conversion", __func__);
3681 return -EINVAL;
47dd7a54
GC
3682}
3683
3684__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 3685#endif /* MODULE */
6fc0d0f2 3686
466c5ac8
MO
3687static int __init stmmac_init(void)
3688{
3689#ifdef CONFIG_DEBUG_FS
3690 /* Create debugfs main directory if it doesn't exist yet */
3691 if (!stmmac_fs_dir) {
3692 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3693
3694 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3695 pr_err("ERROR %s, debugfs create directory failed\n",
3696 STMMAC_RESOURCE_NAME);
3697
3698 return -ENOMEM;
3699 }
3700 }
3701#endif
3702
3703 return 0;
3704}
3705
3706static void __exit stmmac_exit(void)
3707{
3708#ifdef CONFIG_DEBUG_FS
3709 debugfs_remove_recursive(stmmac_fs_dir);
3710#endif
3711}
3712
3713module_init(stmmac_init)
3714module_exit(stmmac_exit)
3715
6fc0d0f2
GC
3716MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3717MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3718MODULE_LICENSE("GPL");