]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Stop PHY and remove TX timer on error
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
286a8372 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54
GC
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
47dd7a54
GC
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25*******************************************************************************/
26
6a81c26f 27#include <linux/clk.h>
47dd7a54
GC
28#include <linux/kernel.h>
29#include <linux/interrupt.h>
47dd7a54
GC
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/if_ether.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
01789349 37#include <linux/if.h>
47dd7a54
GC
38#include <linux/if_vlan.h>
39#include <linux/dma-mapping.h>
5a0e3ad6 40#include <linux/slab.h>
70c71606 41#include <linux/prefetch.h>
db88f10a 42#include <linux/pinctrl/consumer.h>
50fb4f74 43#ifdef CONFIG_DEBUG_FS
7ac29055
GC
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
50fb4f74 46#endif /* CONFIG_DEBUG_FS */
891434b1
RK
47#include <linux/net_tstamp.h>
48#include "stmmac_ptp.h"
286a8372 49#include "stmmac.h"
c5e4ddbd 50#include <linux/reset.h>
5790cf3c 51#include <linux/of_mdio.h>
19d857c9 52#include "dwmac1000.h"
47dd7a54 53
47dd7a54 54#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
f748be53 55#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
47dd7a54
GC
56
57/* Module parameters */
32ceabca 58#define TX_TIMEO 5000
47dd7a54
GC
59static int watchdog = TX_TIMEO;
60module_param(watchdog, int, S_IRUGO | S_IWUSR);
32ceabca 61MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 62
32ceabca 63static int debug = -1;
47dd7a54 64module_param(debug, int, S_IRUGO | S_IWUSR);
32ceabca 65MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 66
47d1f71f 67static int phyaddr = -1;
47dd7a54
GC
68module_param(phyaddr, int, S_IRUGO);
69MODULE_PARM_DESC(phyaddr, "Physical device address");
70
e3ad57c9 71#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
120e87f9 72#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
47dd7a54
GC
73
74static int flow_ctrl = FLOW_OFF;
75module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78static int pause = PAUSE_TIME;
79module_param(pause, int, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82#define TC_DEFAULT 64
83static int tc = TC_DEFAULT;
84module_param(tc, int, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(tc, "DMA threshold control value");
86
d916701c
GC
87#define DEFAULT_BUFSIZE 1536
88static int buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
89module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
22ad3838
GC
92#define STMMAC_RX_COPYBREAK 256
93
47dd7a54
GC
94static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
d765955d
GC
98#define STMMAC_DEFAULT_LPI_TIMER 1000
99static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
f5351ef7 102#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
d765955d 103
22d3efe5
PM
104/* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
4a7d666a
GC
106 */
107static unsigned int chain_mode;
108module_param(chain_mode, int, S_IRUGO);
109MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
47dd7a54 111static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 112
50fb4f74 113#ifdef CONFIG_DEBUG_FS
bfab27a1 114static int stmmac_init_fs(struct net_device *dev);
466c5ac8 115static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
116#endif
117
9125cdd1
GC
118#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
47dd7a54
GC
120/**
121 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
47dd7a54
GC
124 */
125static void stmmac_verify_args(void)
126{
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
d916701c
GC
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
d765955d
GC
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
139}
140
32ceabca
GC
141/**
142 * stmmac_clk_csr_set - dynamically set the MDC clock
143 * @priv: driver private structure
144 * Description: this is to dynamically set the MDC clock according to the csr
145 * clock input.
146 * Note:
147 * If a specific clk_csr value is passed from the platform
148 * this means that the CSR Clock Range selection cannot be
149 * changed at run-time and it is fixed (as reported in the driver
150 * documentation). Viceversa the driver will try to set the MDC
151 * clock dynamically according to the actual clock input.
152 */
cd7201f4
GC
153static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154{
cd7201f4
GC
155 u32 clk_rate;
156
f573c0b9 157 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
cd7201f4
GC
158
159 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
160 * for all other cases except for the below mentioned ones.
161 * For values higher than the IEEE 802.3 specified frequency
162 * we can not estimate the proper divider as it is not known
163 * the frequency of clk_csr_i. So we do not change the default
164 * divider.
165 */
cd7201f4
GC
166 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167 if (clk_rate < CSR_F_35M)
168 priv->clk_csr = STMMAC_CSR_20_35M;
169 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170 priv->clk_csr = STMMAC_CSR_35_60M;
171 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172 priv->clk_csr = STMMAC_CSR_60_100M;
173 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174 priv->clk_csr = STMMAC_CSR_100_150M;
175 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176 priv->clk_csr = STMMAC_CSR_150_250M;
19d857c9 177 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
cd7201f4 178 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 179 }
cd7201f4
GC
180}
181
47dd7a54
GC
182static void print_pkt(unsigned char *buf, int len)
183{
424c4f78
AS
184 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 186}
47dd7a54 187
47dd7a54
GC
188static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189{
a6a3e026 190 u32 avail;
e3ad57c9
GC
191
192 if (priv->dirty_tx > priv->cur_tx)
193 avail = priv->dirty_tx - priv->cur_tx - 1;
194 else
195 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197 return avail;
198}
199
200static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201{
a6a3e026 202 u32 dirty;
e3ad57c9
GC
203
204 if (priv->dirty_rx <= priv->cur_rx)
205 dirty = priv->cur_rx - priv->dirty_rx;
206 else
207 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209 return dirty;
47dd7a54
GC
210}
211
32ceabca 212/**
732fdf0e 213 * stmmac_hw_fix_mac_speed - callback for speed selection
32ceabca 214 * @priv: driver private structure
8d45e42b 215 * Description: on some platforms (e.g. ST), some HW system configuration
32ceabca 216 * registers have to be set according to the link speed negotiated.
9dfeb4d9
GC
217 */
218static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219{
d6d50c7e
PR
220 struct net_device *ndev = priv->dev;
221 struct phy_device *phydev = ndev->phydev;
9dfeb4d9
GC
222
223 if (likely(priv->plat->fix_mac_speed))
ceb69499 224 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
9dfeb4d9
GC
225}
226
32ceabca 227/**
732fdf0e 228 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 229 * @priv: driver private structure
732fdf0e
GC
230 * Description: this function is to verify and enter in LPI mode in case of
231 * EEE.
32ceabca 232 */
d765955d
GC
233static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234{
235 /* Check and enter in LPI mode */
236 if ((priv->dirty_tx == priv->cur_tx) &&
237 (priv->tx_path_in_lpi_mode == false))
b4b7b772 238 priv->hw->mac->set_eee_mode(priv->hw,
239 priv->plat->en_tx_lpi_clockgating);
d765955d
GC
240}
241
32ceabca 242/**
732fdf0e 243 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
244 * @priv: driver private structure
245 * Description: this function is to exit and disable EEE in case of
246 * LPI state is true. This is called by the xmit.
247 */
d765955d
GC
248void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249{
7ed24bbe 250 priv->hw->mac->reset_eee_mode(priv->hw);
d765955d
GC
251 del_timer_sync(&priv->eee_ctrl_timer);
252 priv->tx_path_in_lpi_mode = false;
253}
254
255/**
732fdf0e 256 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d765955d
GC
257 * @arg : data hook
258 * Description:
32ceabca 259 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
260 * then MAC Transmitter can be moved to LPI state.
261 */
262static void stmmac_eee_ctrl_timer(unsigned long arg)
263{
264 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266 stmmac_enable_eee_mode(priv);
f5351ef7 267 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d
GC
268}
269
270/**
732fdf0e 271 * stmmac_eee_init - init EEE
32ceabca 272 * @priv: driver private structure
d765955d 273 * Description:
732fdf0e
GC
274 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
275 * can also manage EEE, this function enable the LPI state and start related
276 * timer.
d765955d
GC
277 */
278bool stmmac_eee_init(struct stmmac_priv *priv)
279{
d6d50c7e 280 struct net_device *ndev = priv->dev;
4741cf9c 281 unsigned long flags;
d765955d
GC
282 bool ret = false;
283
f5351ef7
GC
284 /* Using PCS we cannot dial with the phy registers at this stage
285 * so we do not support extra feature like EEE.
286 */
3fe5cadb
GC
287 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288 (priv->hw->pcs == STMMAC_PCS_TBI) ||
289 (priv->hw->pcs == STMMAC_PCS_RTBI))
f5351ef7
GC
290 goto out;
291
d765955d
GC
292 /* MAC core supports the EEE feature. */
293 if (priv->dma_cap.eee) {
83bf79b6
GC
294 int tx_lpi_timer = priv->tx_lpi_timer;
295
d765955d 296 /* Check if the PHY supports EEE */
d6d50c7e 297 if (phy_init_eee(ndev->phydev, 1)) {
83bf79b6
GC
298 /* To manage at run-time if the EEE cannot be supported
299 * anymore (for example because the lp caps have been
300 * changed).
301 * In that case the driver disable own timers.
302 */
4741cf9c 303 spin_lock_irqsave(&priv->lock, flags);
83bf79b6 304 if (priv->eee_active) {
38ddc59d 305 netdev_dbg(priv->dev, "disable EEE\n");
83bf79b6 306 del_timer_sync(&priv->eee_ctrl_timer);
7ed24bbe 307 priv->hw->mac->set_eee_timer(priv->hw, 0,
83bf79b6
GC
308 tx_lpi_timer);
309 }
310 priv->eee_active = 0;
4741cf9c 311 spin_unlock_irqrestore(&priv->lock, flags);
d765955d 312 goto out;
83bf79b6
GC
313 }
314 /* Activate the EEE and start timers */
4741cf9c 315 spin_lock_irqsave(&priv->lock, flags);
f5351ef7
GC
316 if (!priv->eee_active) {
317 priv->eee_active = 1;
ccb36da1
VT
318 setup_timer(&priv->eee_ctrl_timer,
319 stmmac_eee_ctrl_timer,
320 (unsigned long)priv);
321 mod_timer(&priv->eee_ctrl_timer,
322 STMMAC_LPI_T(eee_timer));
f5351ef7 323
7ed24bbe 324 priv->hw->mac->set_eee_timer(priv->hw,
f5351ef7 325 STMMAC_DEFAULT_LIT_LS,
83bf79b6 326 tx_lpi_timer);
71965352
GC
327 }
328 /* Set HW EEE according to the speed */
d6d50c7e 329 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
d765955d 330
d765955d 331 ret = true;
4741cf9c
GC
332 spin_unlock_irqrestore(&priv->lock, flags);
333
38ddc59d 334 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
d765955d
GC
335 }
336out:
337 return ret;
338}
339
732fdf0e 340/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 341 * @priv: driver private structure
ba1ffd74 342 * @p : descriptor pointer
891434b1
RK
343 * @skb : the socket buffer
344 * Description :
345 * This function will read timestamp from the descriptor & pass it to stack.
346 * and also perform some sanity checks.
347 */
348static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ba1ffd74 349 struct dma_desc *p, struct sk_buff *skb)
891434b1
RK
350{
351 struct skb_shared_hwtstamps shhwtstamp;
352 u64 ns;
891434b1
RK
353
354 if (!priv->hwts_tx_en)
355 return;
356
ceb69499 357 /* exit if skb doesn't support hw tstamp */
75e4364f 358 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
359 return;
360
891434b1 361 /* check tx tstamp status */
ba1ffd74
GC
362 if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363 /* get the valid tstamp */
364 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
891434b1 365
ba1ffd74
GC
366 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367 shhwtstamp.hwtstamp = ns_to_ktime(ns);
891434b1 368
ba1ffd74
GC
369 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370 /* pass tstamp to stack */
371 skb_tstamp_tx(skb, &shhwtstamp);
372 }
891434b1
RK
373
374 return;
375}
376
732fdf0e 377/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 378 * @priv: driver private structure
ba1ffd74
GC
379 * @p : descriptor pointer
380 * @np : next descriptor pointer
891434b1
RK
381 * @skb : the socket buffer
382 * Description :
383 * This function will read received packet's timestamp from the descriptor
384 * and pass it to stack. It also perform some sanity checks.
385 */
ba1ffd74
GC
386static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387 struct dma_desc *np, struct sk_buff *skb)
891434b1
RK
388{
389 struct skb_shared_hwtstamps *shhwtstamp = NULL;
390 u64 ns;
891434b1
RK
391
392 if (!priv->hwts_rx_en)
393 return;
394
ba1ffd74
GC
395 /* Check if timestamp is available */
396 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397 /* For GMAC4, the valid timestamp is from CTX next desc. */
398 if (priv->plat->has_gmac4)
399 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400 else
401 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
891434b1 402
ba1ffd74
GC
403 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404 shhwtstamp = skb_hwtstamps(skb);
405 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407 } else {
408 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409 }
891434b1
RK
410}
411
412/**
413 * stmmac_hwtstamp_ioctl - control hardware timestamping.
414 * @dev: device pointer.
8d45e42b 415 * @ifr: An IOCTL specific structure, that can contain a pointer to
891434b1
RK
416 * a proprietary structure used to pass information to the driver.
417 * Description:
418 * This function configures the MAC to enable/disable both outgoing(TX)
419 * and incoming(RX) packets time stamping based on user input.
420 * Return Value:
421 * 0 on success and an appropriate -ve integer on failure.
422 */
423static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424{
425 struct stmmac_priv *priv = netdev_priv(dev);
426 struct hwtstamp_config config;
0a624155 427 struct timespec64 now;
891434b1
RK
428 u64 temp = 0;
429 u32 ptp_v2 = 0;
430 u32 tstamp_all = 0;
431 u32 ptp_over_ipv4_udp = 0;
432 u32 ptp_over_ipv6_udp = 0;
433 u32 ptp_over_ethernet = 0;
434 u32 snap_type_sel = 0;
435 u32 ts_master_en = 0;
436 u32 ts_event_en = 0;
437 u32 value = 0;
19d857c9 438 u32 sec_inc;
891434b1
RK
439
440 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441 netdev_alert(priv->dev, "No support for HW time stamping\n");
442 priv->hwts_tx_en = 0;
443 priv->hwts_rx_en = 0;
444
445 return -EOPNOTSUPP;
446 }
447
448 if (copy_from_user(&config, ifr->ifr_data,
ceb69499 449 sizeof(struct hwtstamp_config)))
891434b1
RK
450 return -EFAULT;
451
38ddc59d
LC
452 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453 __func__, config.flags, config.tx_type, config.rx_filter);
891434b1
RK
454
455 /* reserved for future extensions */
456 if (config.flags)
457 return -EINVAL;
458
5f3da328
BH
459 if (config.tx_type != HWTSTAMP_TX_OFF &&
460 config.tx_type != HWTSTAMP_TX_ON)
891434b1 461 return -ERANGE;
891434b1
RK
462
463 if (priv->adv_ts) {
464 switch (config.rx_filter) {
891434b1 465 case HWTSTAMP_FILTER_NONE:
ceb69499 466 /* time stamp no incoming packet at all */
891434b1
RK
467 config.rx_filter = HWTSTAMP_FILTER_NONE;
468 break;
469
891434b1 470 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 471 /* PTP v1, UDP, any kind of event packet */
891434b1
RK
472 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473 /* take time stamp for all event messages */
474 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478 break;
479
891434b1 480 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 481 /* PTP v1, UDP, Sync packet */
891434b1
RK
482 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483 /* take time stamp for SYNC messages only */
484 ts_event_en = PTP_TCR_TSEVNTENA;
485
486 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488 break;
489
891434b1 490 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 491 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
492 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493 /* take time stamp for Delay_Req messages only */
494 ts_master_en = PTP_TCR_TSMSTRENA;
495 ts_event_en = PTP_TCR_TSEVNTENA;
496
497 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499 break;
500
891434b1 501 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 502 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
503 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504 ptp_v2 = PTP_TCR_TSVER2ENA;
505 /* take time stamp for all event messages */
506 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510 break;
511
891434b1 512 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 513 /* PTP v2, UDP, Sync packet */
891434b1
RK
514 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515 ptp_v2 = PTP_TCR_TSVER2ENA;
516 /* take time stamp for SYNC messages only */
517 ts_event_en = PTP_TCR_TSEVNTENA;
518
519 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521 break;
522
891434b1 523 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 524 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
525 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526 ptp_v2 = PTP_TCR_TSVER2ENA;
527 /* take time stamp for Delay_Req messages only */
528 ts_master_en = PTP_TCR_TSMSTRENA;
529 ts_event_en = PTP_TCR_TSEVNTENA;
530
531 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533 break;
534
891434b1 535 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 536 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
537 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538 ptp_v2 = PTP_TCR_TSVER2ENA;
539 /* take time stamp for all event messages */
540 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544 ptp_over_ethernet = PTP_TCR_TSIPENA;
545 break;
546
891434b1 547 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 548 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
549 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550 ptp_v2 = PTP_TCR_TSVER2ENA;
551 /* take time stamp for SYNC messages only */
552 ts_event_en = PTP_TCR_TSEVNTENA;
553
554 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556 ptp_over_ethernet = PTP_TCR_TSIPENA;
557 break;
558
891434b1 559 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 560 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
561 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562 ptp_v2 = PTP_TCR_TSVER2ENA;
563 /* take time stamp for Delay_Req messages only */
564 ts_master_en = PTP_TCR_TSMSTRENA;
565 ts_event_en = PTP_TCR_TSEVNTENA;
566
567 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569 ptp_over_ethernet = PTP_TCR_TSIPENA;
570 break;
571
891434b1 572 case HWTSTAMP_FILTER_ALL:
ceb69499 573 /* time stamp any incoming packet */
891434b1
RK
574 config.rx_filter = HWTSTAMP_FILTER_ALL;
575 tstamp_all = PTP_TCR_TSENALL;
576 break;
577
578 default:
579 return -ERANGE;
580 }
581 } else {
582 switch (config.rx_filter) {
583 case HWTSTAMP_FILTER_NONE:
584 config.rx_filter = HWTSTAMP_FILTER_NONE;
585 break;
586 default:
587 /* PTP v1, UDP, any kind of event packet */
588 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589 break;
590 }
591 }
592 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 593 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1
RK
594
595 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
ba1ffd74 596 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
891434b1
RK
597 else {
598 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
ceb69499
GC
599 tstamp_all | ptp_v2 | ptp_over_ethernet |
600 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601 ts_master_en | snap_type_sel);
ba1ffd74 602 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
891434b1
RK
603
604 /* program Sub Second Increment reg */
19d857c9 605 sec_inc = priv->hw->ptp->config_sub_second_increment(
f573c0b9 606 priv->ptpaddr, priv->plat->clk_ptp_rate,
ba1ffd74 607 priv->plat->has_gmac4);
19d857c9 608 temp = div_u64(1000000000ULL, sec_inc);
891434b1
RK
609
610 /* calculate default added value:
611 * formula is :
612 * addend = (2^32)/freq_div_ratio;
19d857c9 613 * where, freq_div_ratio = 1e9ns/sec_inc
891434b1 614 */
19d857c9 615 temp = (u64)(temp << 32);
f573c0b9 616 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
ba1ffd74 617 priv->hw->ptp->config_addend(priv->ptpaddr,
891434b1
RK
618 priv->default_addend);
619
620 /* initialize system time */
0a624155
AB
621 ktime_get_real_ts64(&now);
622
623 /* lower 32 bits of tv_sec are safe until y2106 */
ba1ffd74 624 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
891434b1
RK
625 now.tv_nsec);
626 }
627
628 return copy_to_user(ifr->ifr_data, &config,
629 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630}
631
32ceabca 632/**
732fdf0e 633 * stmmac_init_ptp - init PTP
32ceabca 634 * @priv: driver private structure
732fdf0e 635 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 636 * This is done by looking at the HW cap. register.
732fdf0e 637 * This function also registers the ptp driver.
32ceabca 638 */
92ba6888 639static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 640{
92ba6888
RK
641 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642 return -EOPNOTSUPP;
643
7cd01399 644 priv->adv_ts = 0;
be9b3174
GC
645 /* Check if adv_ts can be enabled for dwmac 4.x core */
646 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647 priv->adv_ts = 1;
648 /* Dwmac 3.x core with extend_desc can support adv_ts */
649 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
7cd01399
VB
650 priv->adv_ts = 1;
651
be9b3174
GC
652 if (priv->dma_cap.time_stamp)
653 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7cd01399 654
be9b3174
GC
655 if (priv->adv_ts)
656 netdev_info(priv->dev,
657 "IEEE 1588-2008 Advanced Timestamp supported\n");
891434b1
RK
658
659 priv->hw->ptp = &stmmac_ptp;
660 priv->hwts_tx_en = 0;
661 priv->hwts_rx_en = 0;
92ba6888 662
c30a70d3
GC
663 stmmac_ptp_register(priv);
664
665 return 0;
92ba6888
RK
666}
667
668static void stmmac_release_ptp(struct stmmac_priv *priv)
669{
f573c0b9 670 if (priv->plat->clk_ptp_ref)
671 clk_disable_unprepare(priv->plat->clk_ptp_ref);
92ba6888 672 stmmac_ptp_unregister(priv);
891434b1
RK
673}
674
47dd7a54 675/**
732fdf0e 676 * stmmac_adjust_link - adjusts the link parameters
47dd7a54 677 * @dev: net device structure
732fdf0e
GC
678 * Description: this is the helper called by the physical abstraction layer
679 * drivers to communicate the phy link status. According the speed and duplex
680 * this driver can invoke registered glue-logic as well.
681 * It also invoke the eee initialization because it could happen when switch
682 * on different networks (that are eee capable).
47dd7a54
GC
683 */
684static void stmmac_adjust_link(struct net_device *dev)
685{
686 struct stmmac_priv *priv = netdev_priv(dev);
d6d50c7e 687 struct phy_device *phydev = dev->phydev;
47dd7a54
GC
688 unsigned long flags;
689 int new_state = 0;
690 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691
662ec2b7 692 if (!phydev)
47dd7a54
GC
693 return;
694
47dd7a54 695 spin_lock_irqsave(&priv->lock, flags);
d765955d 696
47dd7a54 697 if (phydev->link) {
ad01b7d4 698 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
699
700 /* Now we make sure that we can be in full duplex mode.
701 * If not, we operate in half-duplex mode. */
702 if (phydev->duplex != priv->oldduplex) {
703 new_state = 1;
704 if (!(phydev->duplex))
db98a0b0 705 ctrl &= ~priv->hw->link.duplex;
47dd7a54 706 else
db98a0b0 707 ctrl |= priv->hw->link.duplex;
47dd7a54
GC
708 priv->oldduplex = phydev->duplex;
709 }
710 /* Flow Control operation */
711 if (phydev->pause)
7ed24bbe 712 priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
db98a0b0 713 fc, pause_time);
47dd7a54
GC
714
715 if (phydev->speed != priv->speed) {
716 new_state = 1;
717 switch (phydev->speed) {
718 case 1000:
3e12790e
LC
719 if (priv->plat->has_gmac ||
720 priv->plat->has_gmac4)
db98a0b0 721 ctrl &= ~priv->hw->link.port;
47dd7a54
GC
722 break;
723 case 100:
9beae261
LC
724 if (priv->plat->has_gmac ||
725 priv->plat->has_gmac4) {
726 ctrl |= priv->hw->link.port;
727 ctrl |= priv->hw->link.speed;
728 } else {
729 ctrl &= ~priv->hw->link.port;
730 }
731 break;
47dd7a54 732 case 10:
3e12790e
LC
733 if (priv->plat->has_gmac ||
734 priv->plat->has_gmac4) {
db98a0b0 735 ctrl |= priv->hw->link.port;
9beae261 736 ctrl &= ~(priv->hw->link.speed);
47dd7a54 737 } else {
db98a0b0 738 ctrl &= ~priv->hw->link.port;
47dd7a54 739 }
47dd7a54
GC
740 break;
741 default:
b3e51069 742 netif_warn(priv, link, priv->dev,
cba920af 743 "broken speed: %d\n", phydev->speed);
688495b1 744 phydev->speed = SPEED_UNKNOWN;
47dd7a54
GC
745 break;
746 }
5db13556
LC
747 if (phydev->speed != SPEED_UNKNOWN)
748 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
749 priv->speed = phydev->speed;
750 }
751
ad01b7d4 752 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
753
754 if (!priv->oldlink) {
755 new_state = 1;
756 priv->oldlink = 1;
757 }
758 } else if (priv->oldlink) {
759 new_state = 1;
760 priv->oldlink = 0;
bd00632c
LC
761 priv->speed = SPEED_UNKNOWN;
762 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
763 }
764
765 if (new_state && netif_msg_link(priv))
766 phy_print_status(phydev);
767
4741cf9c
GC
768 spin_unlock_irqrestore(&priv->lock, flags);
769
52f95bbf
GC
770 if (phydev->is_pseudo_fixed_link)
771 /* Stop PHY layer to call the hook to adjust the link in case
772 * of a switch is attached to the stmmac driver.
773 */
774 phydev->irq = PHY_IGNORE_INTERRUPT;
775 else
776 /* At this stage, init the EEE if supported.
777 * Never called in case of fixed_link.
778 */
779 priv->eee_enabled = stmmac_eee_init(priv);
47dd7a54
GC
780}
781
32ceabca 782/**
732fdf0e 783 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
784 * @priv: driver private structure
785 * Description: this is to verify if the HW supports the PCS.
786 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
787 * configured for the TBI, RTBI, or SGMII PHY interface.
788 */
e58bb43f
GC
789static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
790{
791 int interface = priv->plat->interface;
792
793 if (priv->dma_cap.pcs) {
0d909dcd
BA
794 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
795 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
796 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
797 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
38ddc59d 798 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
3fe5cadb 799 priv->hw->pcs = STMMAC_PCS_RGMII;
0d909dcd 800 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
38ddc59d 801 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
3fe5cadb 802 priv->hw->pcs = STMMAC_PCS_SGMII;
e58bb43f
GC
803 }
804 }
805}
806
47dd7a54
GC
807/**
808 * stmmac_init_phy - PHY initialization
809 * @dev: net device structure
810 * Description: it initializes the driver's PHY state, and attaches the PHY
811 * to the mac driver.
812 * Return value:
813 * 0 on success
814 */
815static int stmmac_init_phy(struct net_device *dev)
816{
817 struct stmmac_priv *priv = netdev_priv(dev);
818 struct phy_device *phydev;
d765955d 819 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
109cdd66 820 char bus_id[MII_BUS_ID_SIZE];
79ee1dc3 821 int interface = priv->plat->interface;
9cbadf09 822 int max_speed = priv->plat->max_speed;
47dd7a54 823 priv->oldlink = 0;
bd00632c
LC
824 priv->speed = SPEED_UNKNOWN;
825 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54 826
5790cf3c
MO
827 if (priv->plat->phy_node) {
828 phydev = of_phy_connect(dev, priv->plat->phy_node,
829 &stmmac_adjust_link, 0, interface);
830 } else {
a7657f12
GC
831 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
832 priv->plat->bus_id);
5790cf3c
MO
833
834 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
835 priv->plat->phy_addr);
de9a2165 836 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
38ddc59d 837 phy_id_fmt);
5790cf3c
MO
838
839 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
840 interface);
841 }
47dd7a54 842
dfc50fca 843 if (IS_ERR_OR_NULL(phydev)) {
38ddc59d 844 netdev_err(priv->dev, "Could not attach to PHY\n");
dfc50fca
AB
845 if (!phydev)
846 return -ENODEV;
847
47dd7a54
GC
848 return PTR_ERR(phydev);
849 }
850
79ee1dc3 851 /* Stop Advertising 1000BASE Capability if interface is not GMII */
c5b9b4e4 852 if ((interface == PHY_INTERFACE_MODE_MII) ||
9cbadf09 853 (interface == PHY_INTERFACE_MODE_RMII) ||
a77e4acc 854 (max_speed < 1000 && max_speed > 0))
c5b9b4e4
SK
855 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
856 SUPPORTED_1000baseT_Full);
79ee1dc3 857
47dd7a54
GC
858 /*
859 * Broken HW is sometimes missing the pull-up resistor on the
860 * MDIO line, which results in reads to non-existent devices returning
861 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
862 * device as well.
863 * Note: phydev->phy_id is the result of reading the UID PHY registers.
864 */
27732381 865 if (!priv->plat->phy_node && phydev->phy_id == 0) {
47dd7a54
GC
866 phy_disconnect(phydev);
867 return -ENODEV;
868 }
8e99fc5f 869
c51e424d
FF
870 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
871 * subsequent PHY polling, make sure we force a link transition if
872 * we have a UP/DOWN/UP transition
873 */
874 if (phydev->is_pseudo_fixed_link)
875 phydev->irq = PHY_POLL;
876
b05c76a1 877 phy_attached_info(phydev);
47dd7a54
GC
878 return 0;
879}
880
c24602ef
GC
881static void stmmac_display_rings(struct stmmac_priv *priv)
882{
d0225e7d
AT
883 void *head_rx, *head_tx;
884
c24602ef 885 if (priv->extend_desc) {
d0225e7d
AT
886 head_rx = (void *)priv->dma_erx;
887 head_tx = (void *)priv->dma_etx;
c24602ef 888 } else {
d0225e7d
AT
889 head_rx = (void *)priv->dma_rx;
890 head_tx = (void *)priv->dma_tx;
c24602ef 891 }
d0225e7d
AT
892
893 /* Display Rx ring */
894 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895 /* Display Tx ring */
896 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
c24602ef
GC
897}
898
286a8372
GC
899static int stmmac_set_bfsize(int mtu, int bufsize)
900{
901 int ret = bufsize;
902
903 if (mtu >= BUF_SIZE_4KiB)
904 ret = BUF_SIZE_8KiB;
905 else if (mtu >= BUF_SIZE_2KiB)
906 ret = BUF_SIZE_4KiB;
d916701c 907 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
908 ret = BUF_SIZE_2KiB;
909 else
d916701c 910 ret = DEFAULT_BUFSIZE;
286a8372
GC
911
912 return ret;
913}
914
32ceabca 915/**
732fdf0e 916 * stmmac_clear_descriptors - clear descriptors
32ceabca
GC
917 * @priv: driver private structure
918 * Description: this function is called to clear the tx and rx descriptors
919 * in case of both basic and extended descriptors are used.
920 */
c24602ef
GC
921static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922{
923 int i;
c24602ef
GC
924
925 /* Clear the Rx/Tx descriptors */
e3ad57c9 926 for (i = 0; i < DMA_RX_SIZE; i++)
c24602ef
GC
927 if (priv->extend_desc)
928 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929 priv->use_riwt, priv->mode,
e3ad57c9 930 (i == DMA_RX_SIZE - 1));
c24602ef
GC
931 else
932 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933 priv->use_riwt, priv->mode,
e3ad57c9
GC
934 (i == DMA_RX_SIZE - 1));
935 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef
GC
936 if (priv->extend_desc)
937 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938 priv->mode,
e3ad57c9 939 (i == DMA_TX_SIZE - 1));
c24602ef
GC
940 else
941 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942 priv->mode,
e3ad57c9 943 (i == DMA_TX_SIZE - 1));
c24602ef
GC
944}
945
732fdf0e
GC
946/**
947 * stmmac_init_rx_buffers - init the RX descriptor buffer.
948 * @priv: driver private structure
949 * @p: descriptor pointer
950 * @i: descriptor index
951 * @flags: gfp flag.
952 * Description: this function is called to allocate a receive buffer, perform
953 * the DMA mapping and init the descriptor.
954 */
c24602ef 955static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
777da230 956 int i, gfp_t flags)
c24602ef
GC
957{
958 struct sk_buff *skb;
959
4ec49a37 960 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
56329137 961 if (!skb) {
38ddc59d
LC
962 netdev_err(priv->dev,
963 "%s: Rx init fails; skb is NULL\n", __func__);
56329137 964 return -ENOMEM;
c24602ef 965 }
c24602ef
GC
966 priv->rx_skbuff[i] = skb;
967 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968 priv->dma_buf_sz,
969 DMA_FROM_DEVICE);
56329137 970 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
38ddc59d 971 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
56329137
BZ
972 dev_kfree_skb_any(skb);
973 return -EINVAL;
974 }
c24602ef 975
f748be53 976 if (priv->synopsys_id >= DWMAC_CORE_4_00)
f8be0d78 977 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
f748be53 978 else
f8be0d78 979 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
c24602ef 980
29896a67 981 if ((priv->hw->mode->init_desc3) &&
c24602ef 982 (priv->dma_buf_sz == BUF_SIZE_16KiB))
29896a67 983 priv->hw->mode->init_desc3(p);
c24602ef
GC
984
985 return 0;
986}
987
56329137
BZ
988static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
989{
990 if (priv->rx_skbuff[i]) {
991 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
992 priv->dma_buf_sz, DMA_FROM_DEVICE);
993 dev_kfree_skb_any(priv->rx_skbuff[i]);
994 }
995 priv->rx_skbuff[i] = NULL;
996}
997
47dd7a54
GC
998/**
999 * init_dma_desc_rings - init the RX/TX descriptor rings
1000 * @dev: net device structure
732fdf0e
GC
1001 * @flags: gfp flag.
1002 * Description: this function initializes the DMA RX/TX descriptors
8d45e42b 1003 * and allocates the socket buffers. It supports the chained and ring
286a8372 1004 * modes.
47dd7a54 1005 */
777da230 1006static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
47dd7a54
GC
1007{
1008 int i;
1009 struct stmmac_priv *priv = netdev_priv(dev);
4a7d666a 1010 unsigned int bfsize = 0;
56329137 1011 int ret = -ENOMEM;
47dd7a54 1012
29896a67
GC
1013 if (priv->hw->mode->set_16kib_bfsize)
1014 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
286a8372 1015
4a7d666a 1016 if (bfsize < BUF_SIZE_16KiB)
286a8372 1017 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
47dd7a54 1018
2618abb7
VB
1019 priv->dma_buf_sz = bfsize;
1020
b3e51069
LC
1021 netif_dbg(priv, probe, priv->dev,
1022 "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023 __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024
1025 /* RX INITIALIZATION */
1026 netif_dbg(priv, probe, priv->dev,
1027 "SKB addresses:\nskb\t\tskb data\tdma data\n");
47dd7a54 1028
e3ad57c9 1029 for (i = 0; i < DMA_RX_SIZE; i++) {
c24602ef
GC
1030 struct dma_desc *p;
1031 if (priv->extend_desc)
1032 p = &((priv->dma_erx + i)->basic);
1033 else
1034 p = priv->dma_rx + i;
47dd7a54 1035
777da230 1036 ret = stmmac_init_rx_buffers(priv, p, i, flags);
56329137
BZ
1037 if (ret)
1038 goto err_init_rx_buffers;
286a8372 1039
b3e51069
LC
1040 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1041 priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1042 (unsigned int)priv->rx_skbuff_dma[i]);
47dd7a54
GC
1043 }
1044 priv->cur_rx = 0;
e3ad57c9 1045 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
47dd7a54
GC
1046 buf_sz = bfsize;
1047
c24602ef
GC
1048 /* Setup the chained descriptor addresses */
1049 if (priv->mode == STMMAC_CHAIN_MODE) {
1050 if (priv->extend_desc) {
29896a67 1051 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
e3ad57c9 1052 DMA_RX_SIZE, 1);
29896a67 1053 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
e3ad57c9 1054 DMA_TX_SIZE, 1);
c24602ef 1055 } else {
29896a67 1056 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
e3ad57c9 1057 DMA_RX_SIZE, 0);
29896a67 1058 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
e3ad57c9 1059 DMA_TX_SIZE, 0);
c24602ef
GC
1060 }
1061 }
1062
47dd7a54 1063 /* TX INITIALIZATION */
e3ad57c9 1064 for (i = 0; i < DMA_TX_SIZE; i++) {
c24602ef
GC
1065 struct dma_desc *p;
1066 if (priv->extend_desc)
1067 p = &((priv->dma_etx + i)->basic);
1068 else
1069 p = priv->dma_tx + i;
f748be53
AT
1070
1071 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1072 p->des0 = 0;
1073 p->des1 = 0;
1074 p->des2 = 0;
1075 p->des3 = 0;
1076 } else {
1077 p->des2 = 0;
1078 }
1079
362b37be
GC
1080 priv->tx_skbuff_dma[i].buf = 0;
1081 priv->tx_skbuff_dma[i].map_as_page = false;
553e2ab3 1082 priv->tx_skbuff_dma[i].len = 0;
2a6d8e17 1083 priv->tx_skbuff_dma[i].last_segment = false;
47dd7a54 1084 priv->tx_skbuff[i] = NULL;
47dd7a54 1085 }
286a8372 1086
47dd7a54
GC
1087 priv->dirty_tx = 0;
1088 priv->cur_tx = 0;
38979574 1089 netdev_reset_queue(priv->dev);
47dd7a54 1090
c24602ef 1091 stmmac_clear_descriptors(priv);
47dd7a54 1092
c24602ef
GC
1093 if (netif_msg_hw(priv))
1094 stmmac_display_rings(priv);
56329137
BZ
1095
1096 return 0;
1097err_init_rx_buffers:
1098 while (--i >= 0)
1099 stmmac_free_rx_buffers(priv, i);
56329137 1100 return ret;
47dd7a54
GC
1101}
1102
1103static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1104{
1105 int i;
1106
e3ad57c9 1107 for (i = 0; i < DMA_RX_SIZE; i++)
56329137 1108 stmmac_free_rx_buffers(priv, i);
47dd7a54
GC
1109}
1110
1111static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112{
1113 int i;
1114
e3ad57c9 1115 for (i = 0; i < DMA_TX_SIZE; i++) {
362b37be
GC
1116 if (priv->tx_skbuff_dma[i].buf) {
1117 if (priv->tx_skbuff_dma[i].map_as_page)
1118 dma_unmap_page(priv->device,
1119 priv->tx_skbuff_dma[i].buf,
553e2ab3 1120 priv->tx_skbuff_dma[i].len,
362b37be
GC
1121 DMA_TO_DEVICE);
1122 else
1123 dma_unmap_single(priv->device,
1124 priv->tx_skbuff_dma[i].buf,
553e2ab3 1125 priv->tx_skbuff_dma[i].len,
362b37be 1126 DMA_TO_DEVICE);
75e4364f 1127 }
c24602ef 1128
662ec2b7 1129 if (priv->tx_skbuff[i]) {
47dd7a54
GC
1130 dev_kfree_skb_any(priv->tx_skbuff[i]);
1131 priv->tx_skbuff[i] = NULL;
362b37be
GC
1132 priv->tx_skbuff_dma[i].buf = 0;
1133 priv->tx_skbuff_dma[i].map_as_page = false;
47dd7a54
GC
1134 }
1135 }
47dd7a54
GC
1136}
1137
732fdf0e
GC
1138/**
1139 * alloc_dma_desc_resources - alloc TX/RX resources.
1140 * @priv: private structure
1141 * Description: according to which descriptor can be used (extend or basic)
1142 * this function allocates the resources for TX and RX paths. In case of
1143 * reception, for example, it pre-allocated the RX socket buffer in order to
1144 * allow zero-copy mechanism.
1145 */
09f8d696
SK
1146static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1147{
09f8d696
SK
1148 int ret = -ENOMEM;
1149
e3ad57c9 1150 priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
09f8d696
SK
1151 GFP_KERNEL);
1152 if (!priv->rx_skbuff_dma)
1153 return -ENOMEM;
1154
e3ad57c9 1155 priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
09f8d696
SK
1156 GFP_KERNEL);
1157 if (!priv->rx_skbuff)
1158 goto err_rx_skbuff;
1159
e3ad57c9 1160 priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
362b37be 1161 sizeof(*priv->tx_skbuff_dma),
09f8d696
SK
1162 GFP_KERNEL);
1163 if (!priv->tx_skbuff_dma)
1164 goto err_tx_skbuff_dma;
1165
e3ad57c9 1166 priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
09f8d696
SK
1167 GFP_KERNEL);
1168 if (!priv->tx_skbuff)
1169 goto err_tx_skbuff;
1170
1171 if (priv->extend_desc) {
e3ad57c9 1172 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1173 sizeof(struct
1174 dma_extended_desc),
1175 &priv->dma_rx_phy,
1176 GFP_KERNEL);
09f8d696
SK
1177 if (!priv->dma_erx)
1178 goto err_dma;
1179
e3ad57c9 1180 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
f1590670
AB
1181 sizeof(struct
1182 dma_extended_desc),
1183 &priv->dma_tx_phy,
1184 GFP_KERNEL);
09f8d696 1185 if (!priv->dma_etx) {
e3ad57c9 1186 dma_free_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1187 sizeof(struct dma_extended_desc),
1188 priv->dma_erx, priv->dma_rx_phy);
09f8d696
SK
1189 goto err_dma;
1190 }
1191 } else {
e3ad57c9 1192 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1193 sizeof(struct dma_desc),
1194 &priv->dma_rx_phy,
1195 GFP_KERNEL);
09f8d696
SK
1196 if (!priv->dma_rx)
1197 goto err_dma;
1198
e3ad57c9 1199 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
f1590670
AB
1200 sizeof(struct dma_desc),
1201 &priv->dma_tx_phy,
1202 GFP_KERNEL);
09f8d696 1203 if (!priv->dma_tx) {
e3ad57c9 1204 dma_free_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1205 sizeof(struct dma_desc),
1206 priv->dma_rx, priv->dma_rx_phy);
09f8d696
SK
1207 goto err_dma;
1208 }
1209 }
1210
1211 return 0;
1212
1213err_dma:
1214 kfree(priv->tx_skbuff);
1215err_tx_skbuff:
1216 kfree(priv->tx_skbuff_dma);
1217err_tx_skbuff_dma:
1218 kfree(priv->rx_skbuff);
1219err_rx_skbuff:
1220 kfree(priv->rx_skbuff_dma);
1221 return ret;
1222}
1223
47dd7a54
GC
1224static void free_dma_desc_resources(struct stmmac_priv *priv)
1225{
1226 /* Release the DMA TX/RX socket buffers */
1227 dma_free_rx_skbufs(priv);
1228 dma_free_tx_skbufs(priv);
1229
ceb69499 1230 /* Free DMA regions of consistent memory previously allocated */
c24602ef
GC
1231 if (!priv->extend_desc) {
1232 dma_free_coherent(priv->device,
e3ad57c9 1233 DMA_TX_SIZE * sizeof(struct dma_desc),
c24602ef
GC
1234 priv->dma_tx, priv->dma_tx_phy);
1235 dma_free_coherent(priv->device,
e3ad57c9 1236 DMA_RX_SIZE * sizeof(struct dma_desc),
c24602ef
GC
1237 priv->dma_rx, priv->dma_rx_phy);
1238 } else {
e3ad57c9 1239 dma_free_coherent(priv->device, DMA_TX_SIZE *
c24602ef
GC
1240 sizeof(struct dma_extended_desc),
1241 priv->dma_etx, priv->dma_tx_phy);
e3ad57c9 1242 dma_free_coherent(priv->device, DMA_RX_SIZE *
c24602ef
GC
1243 sizeof(struct dma_extended_desc),
1244 priv->dma_erx, priv->dma_rx_phy);
1245 }
47dd7a54
GC
1246 kfree(priv->rx_skbuff_dma);
1247 kfree(priv->rx_skbuff);
cf32deec 1248 kfree(priv->tx_skbuff_dma);
47dd7a54 1249 kfree(priv->tx_skbuff);
47dd7a54
GC
1250}
1251
9eb12474 1252/**
1253 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1254 * @priv: driver private structure
1255 * Description: It is used for enabling the rx queues in the MAC
1256 */
1257static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258{
1259 int rx_count = priv->dma_cap.number_rx_queues;
1260 int queue = 0;
1261
1262 /* If GMAC does not have multiple queues, then this is not necessary*/
1263 if (rx_count == 1)
1264 return;
1265
1266 /**
1267 * If the core is synthesized with multiple rx queues / multiple
1268 * dma channels, then rx queues will be disabled by default.
1269 * For now only rx queue 0 is enabled.
1270 */
1271 priv->hw->mac->rx_queue_enable(priv->hw, queue);
1272}
1273
47dd7a54
GC
1274/**
1275 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 1276 * @priv: driver private structure
732fdf0e
GC
1277 * Description: it is used for configuring the DMA operation mode register in
1278 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
1279 */
1280static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1281{
f88203a2
VB
1282 int rxfifosz = priv->plat->rx_fifo_size;
1283
e2a240c7 1284 if (priv->plat->force_thresh_dma_mode)
f88203a2 1285 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
e2a240c7 1286 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
1287 /*
1288 * In case of GMAC, SF mode can be enabled
1289 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1290 * 1) TX COE if actually supported
1291 * 2) There is no bugged Jumbo frame support
1292 * that needs to not insert csum in the TDES.
1293 */
f88203a2
VB
1294 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1295 rxfifosz);
b2dec116 1296 priv->xstats.threshold = SF_DMA_MODE;
ebbb293f 1297 } else
f88203a2
VB
1298 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1299 rxfifosz);
47dd7a54
GC
1300}
1301
47dd7a54 1302/**
732fdf0e 1303 * stmmac_tx_clean - to manage the transmission completion
32ceabca 1304 * @priv: driver private structure
732fdf0e 1305 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 1306 */
9125cdd1 1307static void stmmac_tx_clean(struct stmmac_priv *priv)
47dd7a54 1308{
38979574 1309 unsigned int bytes_compl = 0, pkts_compl = 0;
e3ad57c9 1310 unsigned int entry = priv->dirty_tx;
47dd7a54 1311
739c8e14 1312 netif_tx_lock(priv->dev);
a9097a96 1313
9125cdd1
GC
1314 priv->xstats.tx_clean++;
1315
e3ad57c9 1316 while (entry != priv->cur_tx) {
47dd7a54 1317 struct sk_buff *skb = priv->tx_skbuff[entry];
c24602ef 1318 struct dma_desc *p;
c363b658 1319 int status;
c24602ef
GC
1320
1321 if (priv->extend_desc)
ceb69499 1322 p = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
1323 else
1324 p = priv->dma_tx + entry;
47dd7a54 1325
c363b658 1326 status = priv->hw->desc->tx_status(&priv->dev->stats,
ceb69499
GC
1327 &priv->xstats, p,
1328 priv->ioaddr);
c363b658
FG
1329 /* Check if the descriptor is owned by the DMA */
1330 if (unlikely(status & tx_dma_own))
1331 break;
1332
1333 /* Just consider the last segment and ...*/
1334 if (likely(!(status & tx_not_ls))) {
1335 /* ... verify the status error condition */
1336 if (unlikely(status & tx_err)) {
1337 priv->dev->stats.tx_errors++;
1338 } else {
47dd7a54
GC
1339 priv->dev->stats.tx_packets++;
1340 priv->xstats.tx_pkt_n++;
c363b658 1341 }
ba1ffd74 1342 stmmac_get_tx_hwtstamp(priv, p, skb);
47dd7a54 1343 }
47dd7a54 1344
362b37be
GC
1345 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1346 if (priv->tx_skbuff_dma[entry].map_as_page)
1347 dma_unmap_page(priv->device,
1348 priv->tx_skbuff_dma[entry].buf,
553e2ab3 1349 priv->tx_skbuff_dma[entry].len,
362b37be
GC
1350 DMA_TO_DEVICE);
1351 else
1352 dma_unmap_single(priv->device,
1353 priv->tx_skbuff_dma[entry].buf,
553e2ab3 1354 priv->tx_skbuff_dma[entry].len,
362b37be
GC
1355 DMA_TO_DEVICE);
1356 priv->tx_skbuff_dma[entry].buf = 0;
f748be53 1357 priv->tx_skbuff_dma[entry].len = 0;
362b37be 1358 priv->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 1359 }
f748be53
AT
1360
1361 if (priv->hw->mode->clean_desc3)
1362 priv->hw->mode->clean_desc3(priv, p);
1363
2a6d8e17 1364 priv->tx_skbuff_dma[entry].last_segment = false;
96951366 1365 priv->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54
GC
1366
1367 if (likely(skb != NULL)) {
38979574
BG
1368 pkts_compl++;
1369 bytes_compl += skb->len;
7c565c33 1370 dev_consume_skb_any(skb);
47dd7a54
GC
1371 priv->tx_skbuff[entry] = NULL;
1372 }
1373
4a7d666a 1374 priv->hw->desc->release_tx_desc(p, priv->mode);
47dd7a54 1375
e3ad57c9 1376 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
47dd7a54 1377 }
fbc80823 1378 priv->dirty_tx = entry;
38979574
BG
1379
1380 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1381
47dd7a54 1382 if (unlikely(netif_queue_stopped(priv->dev) &&
739c8e14
LS
1383 stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1384 netif_dbg(priv, tx_done, priv->dev,
1385 "%s: restart transmit\n", __func__);
1386 netif_wake_queue(priv->dev);
47dd7a54 1387 }
d765955d
GC
1388
1389 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1390 stmmac_enable_eee_mode(priv);
f5351ef7 1391 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d 1392 }
739c8e14 1393 netif_tx_unlock(priv->dev);
47dd7a54
GC
1394}
1395
9125cdd1 1396static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1397{
7284a3f1 1398 priv->hw->dma->enable_dma_irq(priv->ioaddr);
47dd7a54
GC
1399}
1400
9125cdd1 1401static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1402{
7284a3f1 1403 priv->hw->dma->disable_dma_irq(priv->ioaddr);
47dd7a54
GC
1404}
1405
47dd7a54 1406/**
732fdf0e 1407 * stmmac_tx_err - to manage the tx error
32ceabca 1408 * @priv: driver private structure
47dd7a54 1409 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 1410 * in case of transmission errors.
47dd7a54
GC
1411 */
1412static void stmmac_tx_err(struct stmmac_priv *priv)
1413{
c24602ef 1414 int i;
47dd7a54
GC
1415 netif_stop_queue(priv->dev);
1416
ad01b7d4 1417 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 1418 dma_free_tx_skbufs(priv);
e3ad57c9 1419 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef
GC
1420 if (priv->extend_desc)
1421 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1422 priv->mode,
e3ad57c9 1423 (i == DMA_TX_SIZE - 1));
c24602ef
GC
1424 else
1425 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1426 priv->mode,
e3ad57c9 1427 (i == DMA_TX_SIZE - 1));
47dd7a54
GC
1428 priv->dirty_tx = 0;
1429 priv->cur_tx = 0;
38979574 1430 netdev_reset_queue(priv->dev);
ad01b7d4 1431 priv->hw->dma->start_tx(priv->ioaddr);
47dd7a54
GC
1432
1433 priv->dev->stats.tx_errors++;
1434 netif_wake_queue(priv->dev);
47dd7a54
GC
1435}
1436
32ceabca 1437/**
732fdf0e 1438 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
1439 * @priv: driver private structure
1440 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
1441 * It calls the dwmac dma routine and schedule poll method in case of some
1442 * work can be done.
32ceabca 1443 */
aec7ff27
GC
1444static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1445{
aec7ff27 1446 int status;
f88203a2 1447 int rxfifosz = priv->plat->rx_fifo_size;
aec7ff27 1448
ad01b7d4 1449 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
9125cdd1
GC
1450 if (likely((status & handle_rx)) || (status & handle_tx)) {
1451 if (likely(napi_schedule_prep(&priv->napi))) {
1452 stmmac_disable_dma_irq(priv);
1453 __napi_schedule(&priv->napi);
1454 }
1455 }
1456 if (unlikely(status & tx_hard_error_bump_tc)) {
aec7ff27 1457 /* Try to bump up the dma threshold on this failure */
b2dec116
SZ
1458 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1459 (tc <= 256)) {
aec7ff27 1460 tc += 64;
c405abe2 1461 if (priv->plat->force_thresh_dma_mode)
f88203a2
VB
1462 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1463 rxfifosz);
c405abe2
SZ
1464 else
1465 priv->hw->dma->dma_mode(priv->ioaddr, tc,
f88203a2 1466 SF_DMA_MODE, rxfifosz);
aec7ff27 1467 priv->xstats.threshold = tc;
47dd7a54 1468 }
aec7ff27
GC
1469 } else if (unlikely(status == tx_hard_error))
1470 stmmac_tx_err(priv);
47dd7a54
GC
1471}
1472
32ceabca
GC
1473/**
1474 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1475 * @priv: driver private structure
1476 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1477 */
1c901a46
GC
1478static void stmmac_mmc_setup(struct stmmac_priv *priv)
1479{
1480 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
36ff7c1e 1481 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 1482
ba1ffd74
GC
1483 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1484 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
f748be53 1485 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
ba1ffd74
GC
1486 } else {
1487 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
f748be53 1488 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
ba1ffd74 1489 }
36ff7c1e
AT
1490
1491 dwmac_mmc_intr_all_mask(priv->mmcaddr);
4f795b25
GC
1492
1493 if (priv->dma_cap.rmon) {
36ff7c1e 1494 dwmac_mmc_ctrl(priv->mmcaddr, mode);
4f795b25
GC
1495 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1496 } else
38ddc59d 1497 netdev_info(priv->dev, "No MAC Management Counters available\n");
1c901a46
GC
1498}
1499
19e30c14 1500/**
732fdf0e 1501 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
32ceabca
GC
1502 * @priv: driver private structure
1503 * Description: select the Enhanced/Alternate or Normal descriptors.
732fdf0e
GC
1504 * In case of Enhanced/Alternate, it checks if the extended descriptors are
1505 * supported by the HW capability register.
ff3dd78c 1506 */
19e30c14
GC
1507static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1508{
1509 if (priv->plat->enh_desc) {
38ddc59d 1510 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
c24602ef
GC
1511
1512 /* GMAC older than 3.50 has no extended descriptors */
1513 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
38ddc59d 1514 dev_info(priv->device, "Enabled extended descriptors\n");
c24602ef
GC
1515 priv->extend_desc = 1;
1516 } else
38ddc59d 1517 dev_warn(priv->device, "Extended descriptors not supported\n");
c24602ef 1518
19e30c14
GC
1519 priv->hw->desc = &enh_desc_ops;
1520 } else {
38ddc59d 1521 dev_info(priv->device, "Normal descriptors\n");
19e30c14
GC
1522 priv->hw->desc = &ndesc_ops;
1523 }
1524}
1525
1526/**
732fdf0e 1527 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 1528 * @priv: driver private structure
19e30c14
GC
1529 * Description:
1530 * new GMAC chip generations have a new register to indicate the
1531 * presence of the optional feature/functions.
1532 * This can be also used to override the value passed through the
1533 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
1534 */
1535static int stmmac_get_hw_features(struct stmmac_priv *priv)
1536{
f10a6a35 1537 u32 ret = 0;
3c20f72f 1538
5e6efe88 1539 if (priv->hw->dma->get_hw_feature) {
f10a6a35
AT
1540 priv->hw->dma->get_hw_feature(priv->ioaddr,
1541 &priv->dma_cap);
1542 ret = 1;
19e30c14 1543 }
e7434821 1544
f10a6a35 1545 return ret;
e7434821
GC
1546}
1547
32ceabca 1548/**
732fdf0e 1549 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
1550 * @priv: driver private structure
1551 * Description:
1552 * it is to verify if the MAC address is valid, in case of failures it
1553 * generates a random MAC address
1554 */
bfab27a1
GC
1555static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1556{
bfab27a1 1557 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
7ed24bbe 1558 priv->hw->mac->get_umac_addr(priv->hw,
bfab27a1 1559 priv->dev->dev_addr, 0);
ceb69499 1560 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 1561 eth_hw_addr_random(priv->dev);
38ddc59d
LC
1562 netdev_info(priv->dev, "device MAC address %pM\n",
1563 priv->dev->dev_addr);
bfab27a1 1564 }
bfab27a1
GC
1565}
1566
32ceabca 1567/**
732fdf0e 1568 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
1569 * @priv: driver private structure
1570 * Description:
1571 * It inits the DMA invoking the specific MAC/GMAC callback.
1572 * Some DMA parameters can be passed from the platform;
1573 * in case of these are not passed a default is kept for the MAC or GMAC.
1574 */
0f1f88a8
GC
1575static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1576{
c24602ef 1577 int atds = 0;
495db273 1578 int ret = 0;
0f1f88a8 1579
a332e2fa
NC
1580 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1581 dev_err(priv->device, "Invalid DMA configuration\n");
89ab75bf 1582 return -EINVAL;
0f1f88a8
GC
1583 }
1584
c24602ef
GC
1585 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1586 atds = 1;
1587
495db273
GC
1588 ret = priv->hw->dma->reset(priv->ioaddr);
1589 if (ret) {
1590 dev_err(priv->device, "Failed to reset the dma\n");
1591 return ret;
1592 }
1593
50ca903a 1594 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
89ab75bf 1595 priv->dma_tx_phy, priv->dma_rx_phy, atds);
afea0365 1596
f748be53
AT
1597 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1598 priv->rx_tail_addr = priv->dma_rx_phy +
1599 (DMA_RX_SIZE * sizeof(struct dma_desc));
1600 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1601 STMMAC_CHAN0);
1602
1603 priv->tx_tail_addr = priv->dma_tx_phy +
1604 (DMA_TX_SIZE * sizeof(struct dma_desc));
1605 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1606 STMMAC_CHAN0);
1607 }
1608
1609 if (priv->plat->axi && priv->hw->dma->axi)
afea0365
GC
1610 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1611
495db273 1612 return ret;
0f1f88a8
GC
1613}
1614
9125cdd1 1615/**
732fdf0e 1616 * stmmac_tx_timer - mitigation sw timer for tx.
9125cdd1
GC
1617 * @data: data pointer
1618 * Description:
1619 * This is the timer handler to directly invoke the stmmac_tx_clean.
1620 */
1621static void stmmac_tx_timer(unsigned long data)
1622{
1623 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1624
1625 stmmac_tx_clean(priv);
1626}
1627
1628/**
732fdf0e 1629 * stmmac_init_tx_coalesce - init tx mitigation options.
32ceabca 1630 * @priv: driver private structure
9125cdd1
GC
1631 * Description:
1632 * This inits the transmit coalesce parameters: i.e. timer rate,
1633 * timer handler and default threshold used for enabling the
1634 * interrupt on completion bit.
1635 */
1636static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1637{
1638 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1639 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1640 init_timer(&priv->txtimer);
1641 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1642 priv->txtimer.data = (unsigned long)priv;
1643 priv->txtimer.function = stmmac_tx_timer;
1644 add_timer(&priv->txtimer);
1645}
1646
523f11b5 1647/**
732fdf0e 1648 * stmmac_hw_setup - setup mac in a usable state.
523f11b5
SK
1649 * @dev : pointer to the device structure.
1650 * Description:
732fdf0e
GC
1651 * this is the main function to setup the HW in a usable state because the
1652 * dma engine is reset, the core registers are configured (e.g. AXI,
1653 * Checksum features, timers). The DMA is ready to start receiving and
1654 * transmitting.
523f11b5
SK
1655 * Return value:
1656 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1657 * file on failure.
1658 */
fe131929 1659static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
523f11b5
SK
1660{
1661 struct stmmac_priv *priv = netdev_priv(dev);
1662 int ret;
1663
523f11b5
SK
1664 /* DMA initialization and SW reset */
1665 ret = stmmac_init_dma_engine(priv);
1666 if (ret < 0) {
38ddc59d
LC
1667 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1668 __func__);
523f11b5
SK
1669 return ret;
1670 }
1671
1672 /* Copy the MAC addr into the HW */
7ed24bbe 1673 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
523f11b5 1674
02e57b9d
GC
1675 /* PS and related bits will be programmed according to the speed */
1676 if (priv->hw->pcs) {
1677 int speed = priv->plat->mac_port_sel_speed;
1678
1679 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1680 (speed == SPEED_1000)) {
1681 priv->hw->ps = speed;
1682 } else {
1683 dev_warn(priv->device, "invalid port speed\n");
1684 priv->hw->ps = 0;
1685 }
1686 }
1687
523f11b5 1688 /* Initialize the MAC Core */
7ed24bbe 1689 priv->hw->mac->core_init(priv->hw, dev->mtu);
523f11b5 1690
9eb12474 1691 /* Initialize MAC RX Queues */
1692 if (priv->hw->mac->rx_queue_enable)
1693 stmmac_mac_enable_rx_queues(priv);
1694
978aded4
GC
1695 ret = priv->hw->mac->rx_ipc(priv->hw);
1696 if (!ret) {
38ddc59d 1697 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
978aded4 1698 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 1699 priv->hw->rx_csum = 0;
978aded4
GC
1700 }
1701
523f11b5 1702 /* Enable the MAC Rx/Tx */
f748be53
AT
1703 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1704 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1705 else
1706 stmmac_set_mac(priv->ioaddr, true);
523f11b5
SK
1707
1708 /* Set the HW DMA mode and the COE */
1709 stmmac_dma_operation_mode(priv);
1710
1711 stmmac_mmc_setup(priv);
1712
fe131929
HC
1713 if (init_ptp) {
1714 ret = stmmac_init_ptp(priv);
722eef28
HK
1715 if (ret == -EOPNOTSUPP)
1716 netdev_warn(priv->dev, "PTP not supported by HW\n");
1717 else if (ret)
1718 netdev_warn(priv->dev, "PTP init failed\n");
fe131929 1719 }
523f11b5 1720
50fb4f74 1721#ifdef CONFIG_DEBUG_FS
523f11b5
SK
1722 ret = stmmac_init_fs(dev);
1723 if (ret < 0)
38ddc59d
LC
1724 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1725 __func__);
523f11b5
SK
1726#endif
1727 /* Start the ball rolling... */
38ddc59d 1728 netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
523f11b5
SK
1729 priv->hw->dma->start_tx(priv->ioaddr);
1730 priv->hw->dma->start_rx(priv->ioaddr);
1731
523f11b5
SK
1732 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1733
523f11b5
SK
1734 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1735 priv->rx_riwt = MAX_DMA_RIWT;
1736 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1737 }
1738
3fe5cadb 1739 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
02e57b9d 1740 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
523f11b5 1741
f748be53
AT
1742 /* set TX ring length */
1743 if (priv->hw->dma->set_tx_ring_len)
1744 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1745 (DMA_TX_SIZE - 1));
1746 /* set RX ring length */
1747 if (priv->hw->dma->set_rx_ring_len)
1748 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1749 (DMA_RX_SIZE - 1));
1750 /* Enable TSO */
1751 if (priv->tso)
1752 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1753
523f11b5
SK
1754 return 0;
1755}
1756
47dd7a54
GC
1757/**
1758 * stmmac_open - open entry point of the driver
1759 * @dev : pointer to the device structure.
1760 * Description:
1761 * This function is the open entry point of the driver.
1762 * Return value:
1763 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1764 * file on failure.
1765 */
1766static int stmmac_open(struct net_device *dev)
1767{
1768 struct stmmac_priv *priv = netdev_priv(dev);
47dd7a54
GC
1769 int ret;
1770
4bfcbd7a
FV
1771 stmmac_check_ether_addr(priv);
1772
3fe5cadb
GC
1773 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1774 priv->hw->pcs != STMMAC_PCS_TBI &&
1775 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
1776 ret = stmmac_init_phy(dev);
1777 if (ret) {
38ddc59d
LC
1778 netdev_err(priv->dev,
1779 "%s: Cannot attach to PHY (error: %d)\n",
1780 __func__, ret);
89df20d9 1781 return ret;
e58bb43f 1782 }
f66ffe28 1783 }
47dd7a54 1784
523f11b5
SK
1785 /* Extra statistics */
1786 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1787 priv->xstats.threshold = tc;
1788
47dd7a54 1789 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
22ad3838 1790 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 1791
7262b7b2 1792 ret = alloc_dma_desc_resources(priv);
09f8d696 1793 if (ret < 0) {
38ddc59d
LC
1794 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1795 __func__);
09f8d696
SK
1796 goto dma_desc_error;
1797 }
1798
777da230
GC
1799 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1800 if (ret < 0) {
38ddc59d
LC
1801 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1802 __func__);
777da230
GC
1803 goto init_error;
1804 }
1805
fe131929 1806 ret = stmmac_hw_setup(dev, true);
56329137 1807 if (ret < 0) {
38ddc59d 1808 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
c9324d18 1809 goto init_error;
47dd7a54
GC
1810 }
1811
777da230
GC
1812 stmmac_init_tx_coalesce(priv);
1813
d6d50c7e
PR
1814 if (dev->phydev)
1815 phy_start(dev->phydev);
47dd7a54 1816
f66ffe28
GC
1817 /* Request the IRQ lines */
1818 ret = request_irq(dev->irq, stmmac_interrupt,
ceb69499 1819 IRQF_SHARED, dev->name, dev);
f66ffe28 1820 if (unlikely(ret < 0)) {
38ddc59d
LC
1821 netdev_err(priv->dev,
1822 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1823 __func__, dev->irq, ret);
6c1e5abe 1824 goto irq_error;
f66ffe28
GC
1825 }
1826
7a13f8f5
FV
1827 /* Request the Wake IRQ in case of another line is used for WoL */
1828 if (priv->wol_irq != dev->irq) {
1829 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1830 IRQF_SHARED, dev->name, dev);
1831 if (unlikely(ret < 0)) {
38ddc59d
LC
1832 netdev_err(priv->dev,
1833 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1834 __func__, priv->wol_irq, ret);
c9324d18 1835 goto wolirq_error;
7a13f8f5
FV
1836 }
1837 }
1838
d765955d 1839 /* Request the IRQ lines */
d7ec8584 1840 if (priv->lpi_irq > 0) {
d765955d
GC
1841 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1842 dev->name, dev);
1843 if (unlikely(ret < 0)) {
38ddc59d
LC
1844 netdev_err(priv->dev,
1845 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1846 __func__, priv->lpi_irq, ret);
c9324d18 1847 goto lpiirq_error;
d765955d
GC
1848 }
1849 }
1850
47dd7a54 1851 napi_enable(&priv->napi);
47dd7a54 1852 netif_start_queue(dev);
f66ffe28 1853
47dd7a54 1854 return 0;
f66ffe28 1855
c9324d18 1856lpiirq_error:
d765955d
GC
1857 if (priv->wol_irq != dev->irq)
1858 free_irq(priv->wol_irq, dev);
c9324d18 1859wolirq_error:
7a13f8f5 1860 free_irq(dev->irq, dev);
6c1e5abe
TR
1861irq_error:
1862 if (dev->phydev)
1863 phy_stop(dev->phydev);
7a13f8f5 1864
6c1e5abe 1865 del_timer_sync(&priv->txtimer);
c9324d18
GC
1866init_error:
1867 free_dma_desc_resources(priv);
56329137 1868dma_desc_error:
d6d50c7e
PR
1869 if (dev->phydev)
1870 phy_disconnect(dev->phydev);
4bfcbd7a 1871
f66ffe28 1872 return ret;
47dd7a54
GC
1873}
1874
1875/**
1876 * stmmac_release - close entry point of the driver
1877 * @dev : device pointer.
1878 * Description:
1879 * This is the stop entry point of the driver.
1880 */
1881static int stmmac_release(struct net_device *dev)
1882{
1883 struct stmmac_priv *priv = netdev_priv(dev);
1884
d765955d
GC
1885 if (priv->eee_enabled)
1886 del_timer_sync(&priv->eee_ctrl_timer);
1887
47dd7a54 1888 /* Stop and disconnect the PHY */
d6d50c7e
PR
1889 if (dev->phydev) {
1890 phy_stop(dev->phydev);
1891 phy_disconnect(dev->phydev);
47dd7a54
GC
1892 }
1893
1894 netif_stop_queue(dev);
1895
47dd7a54 1896 napi_disable(&priv->napi);
47dd7a54 1897
9125cdd1
GC
1898 del_timer_sync(&priv->txtimer);
1899
47dd7a54
GC
1900 /* Free the IRQ lines */
1901 free_irq(dev->irq, dev);
7a13f8f5
FV
1902 if (priv->wol_irq != dev->irq)
1903 free_irq(priv->wol_irq, dev);
d7ec8584 1904 if (priv->lpi_irq > 0)
d765955d 1905 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
1906
1907 /* Stop TX/RX DMA and clear the descriptors */
ad01b7d4
GC
1908 priv->hw->dma->stop_tx(priv->ioaddr);
1909 priv->hw->dma->stop_rx(priv->ioaddr);
47dd7a54
GC
1910
1911 /* Release and free the Rx/Tx resources */
1912 free_dma_desc_resources(priv);
1913
19449bfc 1914 /* Disable the MAC Rx/Tx */
bfab27a1 1915 stmmac_set_mac(priv->ioaddr, false);
47dd7a54
GC
1916
1917 netif_carrier_off(dev);
1918
50fb4f74 1919#ifdef CONFIG_DEBUG_FS
466c5ac8 1920 stmmac_exit_fs(dev);
bfab27a1 1921#endif
bfab27a1 1922
92ba6888
RK
1923 stmmac_release_ptp(priv);
1924
47dd7a54
GC
1925 return 0;
1926}
1927
f748be53
AT
1928/**
1929 * stmmac_tso_allocator - close entry point of the driver
1930 * @priv: driver private structure
1931 * @des: buffer start address
1932 * @total_len: total length to fill in descriptors
1933 * @last_segmant: condition for the last descriptor
1934 * Description:
1935 * This function fills descriptor and request new descriptors according to
1936 * buffer length to fill
1937 */
1938static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1939 int total_len, bool last_segment)
1940{
1941 struct dma_desc *desc;
1942 int tmp_len;
1943 u32 buff_size;
1944
1945 tmp_len = total_len;
1946
1947 while (tmp_len > 0) {
1948 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1949 desc = priv->dma_tx + priv->cur_tx;
1950
f8be0d78 1951 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
f748be53
AT
1952 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1953 TSO_MAX_BUFF_SIZE : tmp_len;
1954
1955 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1956 0, 1,
1957 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1958 0, 0);
1959
1960 tmp_len -= TSO_MAX_BUFF_SIZE;
1961 }
1962}
1963
1964/**
1965 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
1966 * @skb : the socket buffer
1967 * @dev : device pointer
1968 * Description: this is the transmit function that is called on TSO frames
1969 * (support available on GMAC4 and newer chips).
1970 * Diagram below show the ring programming in case of TSO frames:
1971 *
1972 * First Descriptor
1973 * --------
1974 * | DES0 |---> buffer1 = L2/L3/L4 header
1975 * | DES1 |---> TCP Payload (can continue on next descr...)
1976 * | DES2 |---> buffer 1 and 2 len
1977 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
1978 * --------
1979 * |
1980 * ...
1981 * |
1982 * --------
1983 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
1984 * | DES1 | --|
1985 * | DES2 | --> buffer 1 and 2 len
1986 * | DES3 |
1987 * --------
1988 *
1989 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
1990 */
1991static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
1992{
1993 u32 pay_len, mss;
1994 int tmp_pay_len = 0;
1995 struct stmmac_priv *priv = netdev_priv(dev);
1996 int nfrags = skb_shinfo(skb)->nr_frags;
1997 unsigned int first_entry, des;
1998 struct dma_desc *desc, *first, *mss_desc = NULL;
1999 u8 proto_hdr_len;
2000 int i;
2001
f748be53
AT
2002 /* Compute header lengths */
2003 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2004
2005 /* Desc availability based on threshold should be enough safe */
2006 if (unlikely(stmmac_tx_avail(priv) <
2007 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2008 if (!netif_queue_stopped(dev)) {
2009 netif_stop_queue(dev);
2010 /* This is a hard error, log it. */
38ddc59d
LC
2011 netdev_err(priv->dev,
2012 "%s: Tx Ring full when queue awake\n",
2013 __func__);
f748be53 2014 }
f748be53
AT
2015 return NETDEV_TX_BUSY;
2016 }
2017
2018 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2019
2020 mss = skb_shinfo(skb)->gso_size;
2021
2022 /* set new MSS value if needed */
2023 if (mss != priv->mss) {
2024 mss_desc = priv->dma_tx + priv->cur_tx;
2025 priv->hw->desc->set_mss(mss_desc, mss);
2026 priv->mss = mss;
2027 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2028 }
2029
2030 if (netif_msg_tx_queued(priv)) {
2031 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2032 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2033 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2034 skb->data_len);
2035 }
2036
2037 first_entry = priv->cur_tx;
2038
2039 desc = priv->dma_tx + first_entry;
2040 first = desc;
2041
2042 /* first descriptor: fill Headers on Buf1 */
2043 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2044 DMA_TO_DEVICE);
2045 if (dma_mapping_error(priv->device, des))
2046 goto dma_map_err;
2047
2048 priv->tx_skbuff_dma[first_entry].buf = des;
2049 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2050 priv->tx_skbuff[first_entry] = skb;
2051
f8be0d78 2052 first->des0 = cpu_to_le32(des);
f748be53
AT
2053
2054 /* Fill start of payload in buff2 of first descriptor */
2055 if (pay_len)
f8be0d78 2056 first->des1 = cpu_to_le32(des + proto_hdr_len);
f748be53
AT
2057
2058 /* If needed take extra descriptors to fill the remaining payload */
2059 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2060
2061 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2062
2063 /* Prepare fragments */
2064 for (i = 0; i < nfrags; i++) {
2065 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2066
2067 des = skb_frag_dma_map(priv->device, frag, 0,
2068 skb_frag_size(frag),
2069 DMA_TO_DEVICE);
2070
2071 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2072 (i == nfrags - 1));
2073
2074 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2075 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2076 priv->tx_skbuff[priv->cur_tx] = NULL;
2077 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2078 }
2079
2080 priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2081
2082 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2083
2084 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
2085 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2086 __func__);
f748be53
AT
2087 netif_stop_queue(dev);
2088 }
2089
2090 dev->stats.tx_bytes += skb->len;
2091 priv->xstats.tx_tso_frames++;
2092 priv->xstats.tx_tso_nfrags += nfrags;
2093
2094 /* Manage tx mitigation */
2095 priv->tx_count_frames += nfrags + 1;
2096 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2097 mod_timer(&priv->txtimer,
2098 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2099 } else {
2100 priv->tx_count_frames = 0;
2101 priv->hw->desc->set_tx_ic(desc);
2102 priv->xstats.tx_set_ic_bit++;
2103 }
2104
2105 if (!priv->hwts_tx_en)
2106 skb_tx_timestamp(skb);
2107
2108 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2109 priv->hwts_tx_en)) {
2110 /* declare that device is doing timestamping */
2111 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2112 priv->hw->desc->enable_tx_timestamp(first);
2113 }
2114
2115 /* Complete the first descriptor before granting the DMA */
2116 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2117 proto_hdr_len,
2118 pay_len,
2119 1, priv->tx_skbuff_dma[first_entry].last_segment,
2120 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2121
2122 /* If context desc is used to change MSS */
2123 if (mss_desc)
2124 priv->hw->desc->set_tx_owner(mss_desc);
2125
2126 /* The own bit must be the latest setting done when prepare the
2127 * descriptor and then barrier is needed to make sure that
2128 * all is coherent before granting the DMA engine.
2129 */
ad688cdb 2130 dma_wmb();
f748be53
AT
2131
2132 if (netif_msg_pktdata(priv)) {
2133 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2134 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2135 priv->cur_tx, first, nfrags);
2136
2137 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2138 0);
2139
2140 pr_info(">>> frame to be transmitted: ");
2141 print_pkt(skb->data, skb_headlen(skb));
2142 }
2143
2144 netdev_sent_queue(dev, skb->len);
2145
2146 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2147 STMMAC_CHAN0);
2148
f748be53
AT
2149 return NETDEV_TX_OK;
2150
2151dma_map_err:
f748be53
AT
2152 dev_err(priv->device, "Tx dma map failed\n");
2153 dev_kfree_skb(skb);
2154 priv->dev->stats.tx_dropped++;
2155 return NETDEV_TX_OK;
2156}
2157
47dd7a54 2158/**
732fdf0e 2159 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
2160 * @skb : the socket buffer
2161 * @dev : device pointer
32ceabca
GC
2162 * Description : this is the tx entry point of the driver.
2163 * It programs the chain or the ring and supports oversized frames
2164 * and SG feature.
47dd7a54
GC
2165 */
2166static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2167{
2168 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 2169 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 2170 int i, csum_insertion = 0, is_jumbo = 0;
47dd7a54 2171 int nfrags = skb_shinfo(skb)->nr_frags;
0e80bdc9 2172 unsigned int entry, first_entry;
47dd7a54 2173 struct dma_desc *desc, *first;
0e80bdc9 2174 unsigned int enh_desc;
f748be53
AT
2175 unsigned int des;
2176
2177 /* Manage oversized TCP frames for GMAC4 device */
2178 if (skb_is_gso(skb) && priv->tso) {
2179 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2180 return stmmac_tso_xmit(skb, dev);
2181 }
47dd7a54
GC
2182
2183 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2184 if (!netif_queue_stopped(dev)) {
2185 netif_stop_queue(dev);
2186 /* This is a hard error, log it. */
38ddc59d
LC
2187 netdev_err(priv->dev,
2188 "%s: Tx Ring full when queue awake\n",
2189 __func__);
47dd7a54
GC
2190 }
2191 return NETDEV_TX_BUSY;
2192 }
2193
d765955d
GC
2194 if (priv->tx_path_in_lpi_mode)
2195 stmmac_disable_eee_mode(priv);
2196
e3ad57c9 2197 entry = priv->cur_tx;
0e80bdc9 2198 first_entry = entry;
47dd7a54 2199
5e982f3b 2200 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 2201
0e80bdc9 2202 if (likely(priv->extend_desc))
ceb69499 2203 desc = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
2204 else
2205 desc = priv->dma_tx + entry;
2206
47dd7a54
GC
2207 first = desc;
2208
0e80bdc9
GC
2209 priv->tx_skbuff[first_entry] = skb;
2210
2211 enh_desc = priv->plat->enh_desc;
4a7d666a 2212 /* To program the descriptors according to the size of the frame */
29896a67
GC
2213 if (enh_desc)
2214 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2215
f748be53
AT
2216 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2217 DWMAC_CORE_4_00)) {
29896a67 2218 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
362b37be
GC
2219 if (unlikely(entry < 0))
2220 goto dma_map_err;
29896a67 2221 }
47dd7a54
GC
2222
2223 for (i = 0; i < nfrags; i++) {
9e903e08
ED
2224 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2225 int len = skb_frag_size(frag);
be434d50 2226 bool last_segment = (i == (nfrags - 1));
47dd7a54 2227
e3ad57c9
GC
2228 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2229
0e80bdc9 2230 if (likely(priv->extend_desc))
ceb69499 2231 desc = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
2232 else
2233 desc = priv->dma_tx + entry;
47dd7a54 2234
f748be53
AT
2235 des = skb_frag_dma_map(priv->device, frag, 0, len,
2236 DMA_TO_DEVICE);
2237 if (dma_mapping_error(priv->device, des))
362b37be
GC
2238 goto dma_map_err; /* should reuse desc w/o issues */
2239
0e80bdc9 2240 priv->tx_skbuff[entry] = NULL;
f748be53 2241
f8be0d78
MW
2242 priv->tx_skbuff_dma[entry].buf = des;
2243 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2244 desc->des0 = cpu_to_le32(des);
2245 else
2246 desc->des2 = cpu_to_le32(des);
f748be53 2247
362b37be 2248 priv->tx_skbuff_dma[entry].map_as_page = true;
553e2ab3 2249 priv->tx_skbuff_dma[entry].len = len;
0e80bdc9
GC
2250 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2251
2252 /* Prepare the descriptor and set the own bit too */
4a7d666a 2253 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
be434d50 2254 priv->mode, 1, last_segment);
47dd7a54
GC
2255 }
2256
e3ad57c9
GC
2257 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2258
2259 priv->cur_tx = entry;
47dd7a54 2260
47dd7a54 2261 if (netif_msg_pktdata(priv)) {
d0225e7d
AT
2262 void *tx_head;
2263
38ddc59d
LC
2264 netdev_dbg(priv->dev,
2265 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2266 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2267 entry, first, nfrags);
83d7af64 2268
c24602ef 2269 if (priv->extend_desc)
d0225e7d 2270 tx_head = (void *)priv->dma_etx;
c24602ef 2271 else
d0225e7d
AT
2272 tx_head = (void *)priv->dma_tx;
2273
2274 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
c24602ef 2275
38ddc59d 2276 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
47dd7a54
GC
2277 print_pkt(skb->data, skb->len);
2278 }
0e80bdc9 2279
47dd7a54 2280 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
2281 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2282 __func__);
47dd7a54
GC
2283 netif_stop_queue(dev);
2284 }
2285
2286 dev->stats.tx_bytes += skb->len;
2287
0e80bdc9
GC
2288 /* According to the coalesce parameter the IC bit for the latest
2289 * segment is reset and the timer re-started to clean the tx status.
2290 * This approach takes care about the fragments: desc is the first
2291 * element in case of no SG.
2292 */
2293 priv->tx_count_frames += nfrags + 1;
2294 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2295 mod_timer(&priv->txtimer,
2296 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2297 } else {
2298 priv->tx_count_frames = 0;
2299 priv->hw->desc->set_tx_ic(desc);
2300 priv->xstats.tx_set_ic_bit++;
891434b1
RK
2301 }
2302
2303 if (!priv->hwts_tx_en)
2304 skb_tx_timestamp(skb);
3e82ce12 2305
0e80bdc9
GC
2306 /* Ready to fill the first descriptor and set the OWN bit w/o any
2307 * problems because all the descriptors are actually ready to be
2308 * passed to the DMA engine.
2309 */
2310 if (likely(!is_jumbo)) {
2311 bool last_segment = (nfrags == 0);
2312
f748be53
AT
2313 des = dma_map_single(priv->device, skb->data,
2314 nopaged_len, DMA_TO_DEVICE);
2315 if (dma_mapping_error(priv->device, des))
0e80bdc9
GC
2316 goto dma_map_err;
2317
f8be0d78
MW
2318 priv->tx_skbuff_dma[first_entry].buf = des;
2319 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2320 first->des0 = cpu_to_le32(des);
2321 else
2322 first->des2 = cpu_to_le32(des);
f748be53 2323
0e80bdc9
GC
2324 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2325 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2326
2327 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2328 priv->hwts_tx_en)) {
2329 /* declare that device is doing timestamping */
2330 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2331 priv->hw->desc->enable_tx_timestamp(first);
2332 }
2333
2334 /* Prepare the first descriptor setting the OWN bit too */
2335 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2336 csum_insertion, priv->mode, 1,
2337 last_segment);
2338
2339 /* The own bit must be the latest setting done when prepare the
2340 * descriptor and then barrier is needed to make sure that
2341 * all is coherent before granting the DMA engine.
2342 */
ad688cdb 2343 dma_wmb();
0e80bdc9
GC
2344 }
2345
38979574 2346 netdev_sent_queue(dev, skb->len);
f748be53
AT
2347
2348 if (priv->synopsys_id < DWMAC_CORE_4_00)
2349 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2350 else
2351 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2352 STMMAC_CHAN0);
52f64fae 2353
362b37be 2354 return NETDEV_TX_OK;
a9097a96 2355
362b37be 2356dma_map_err:
38ddc59d 2357 netdev_err(priv->dev, "Tx DMA map failed\n");
362b37be
GC
2358 dev_kfree_skb(skb);
2359 priv->dev->stats.tx_dropped++;
47dd7a54
GC
2360 return NETDEV_TX_OK;
2361}
2362
b9381985
VB
2363static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2364{
2365 struct ethhdr *ehdr;
2366 u16 vlanid;
2367
2368 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2369 NETIF_F_HW_VLAN_CTAG_RX &&
2370 !__vlan_get_tag(skb, &vlanid)) {
2371 /* pop the vlan tag */
2372 ehdr = (struct ethhdr *)skb->data;
2373 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2374 skb_pull(skb, VLAN_HLEN);
2375 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2376 }
2377}
2378
2379
120e87f9
GC
2380static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2381{
2382 if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2383 return 0;
2384
2385 return 1;
2386}
2387
32ceabca 2388/**
732fdf0e 2389 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca
GC
2390 * @priv: driver private structure
2391 * Description : this is to reallocate the skb for the reception process
2392 * that is based on zero-copy.
2393 */
47dd7a54
GC
2394static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2395{
47dd7a54 2396 int bfsize = priv->dma_buf_sz;
e3ad57c9
GC
2397 unsigned int entry = priv->dirty_rx;
2398 int dirty = stmmac_rx_dirty(priv);
47dd7a54 2399
e3ad57c9 2400 while (dirty-- > 0) {
c24602ef
GC
2401 struct dma_desc *p;
2402
2403 if (priv->extend_desc)
ceb69499 2404 p = (struct dma_desc *)(priv->dma_erx + entry);
c24602ef
GC
2405 else
2406 p = priv->dma_rx + entry;
2407
47dd7a54
GC
2408 if (likely(priv->rx_skbuff[entry] == NULL)) {
2409 struct sk_buff *skb;
2410
acb600de 2411 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
120e87f9
GC
2412 if (unlikely(!skb)) {
2413 /* so for a while no zero-copy! */
2414 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2415 if (unlikely(net_ratelimit()))
2416 dev_err(priv->device,
2417 "fail to alloc skb entry %d\n",
2418 entry);
47dd7a54 2419 break;
120e87f9 2420 }
47dd7a54
GC
2421
2422 priv->rx_skbuff[entry] = skb;
2423 priv->rx_skbuff_dma[entry] =
2424 dma_map_single(priv->device, skb->data, bfsize,
2425 DMA_FROM_DEVICE);
362b37be
GC
2426 if (dma_mapping_error(priv->device,
2427 priv->rx_skbuff_dma[entry])) {
38ddc59d 2428 netdev_err(priv->dev, "Rx DMA map failed\n");
362b37be
GC
2429 dev_kfree_skb(skb);
2430 break;
2431 }
286a8372 2432
f748be53 2433 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
f8be0d78 2434 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
f748be53
AT
2435 p->des1 = 0;
2436 } else {
f8be0d78 2437 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
f748be53
AT
2438 }
2439 if (priv->hw->mode->refill_desc3)
2440 priv->hw->mode->refill_desc3(priv, p);
286a8372 2441
120e87f9
GC
2442 if (priv->rx_zeroc_thresh > 0)
2443 priv->rx_zeroc_thresh--;
2444
b3e51069
LC
2445 netif_dbg(priv, rx_status, priv->dev,
2446 "refill entry #%d\n", entry);
47dd7a54 2447 }
ad688cdb 2448 dma_wmb();
f748be53
AT
2449
2450 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2451 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2452 else
2453 priv->hw->desc->set_rx_owner(p);
2454
ad688cdb 2455 dma_wmb();
e3ad57c9
GC
2456
2457 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
47dd7a54 2458 }
e3ad57c9 2459 priv->dirty_rx = entry;
47dd7a54
GC
2460}
2461
32ceabca 2462/**
732fdf0e 2463 * stmmac_rx - manage the receive process
32ceabca
GC
2464 * @priv: driver private structure
2465 * @limit: napi bugget.
2466 * Description : this the function called by the napi poll method.
2467 * It gets all the frames inside the ring.
2468 */
47dd7a54
GC
2469static int stmmac_rx(struct stmmac_priv *priv, int limit)
2470{
e3ad57c9 2471 unsigned int entry = priv->cur_rx;
47dd7a54
GC
2472 unsigned int next_entry;
2473 unsigned int count = 0;
d2afb5bd 2474 int coe = priv->hw->rx_csum;
47dd7a54 2475
83d7af64 2476 if (netif_msg_rx_status(priv)) {
d0225e7d
AT
2477 void *rx_head;
2478
38ddc59d 2479 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
c24602ef 2480 if (priv->extend_desc)
d0225e7d 2481 rx_head = (void *)priv->dma_erx;
c24602ef 2482 else
d0225e7d
AT
2483 rx_head = (void *)priv->dma_rx;
2484
2485 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
47dd7a54 2486 }
c24602ef 2487 while (count < limit) {
47dd7a54 2488 int status;
9401bb5c 2489 struct dma_desc *p;
ba1ffd74 2490 struct dma_desc *np;
47dd7a54 2491
c24602ef 2492 if (priv->extend_desc)
ceb69499 2493 p = (struct dma_desc *)(priv->dma_erx + entry);
c24602ef 2494 else
ceb69499 2495 p = priv->dma_rx + entry;
c24602ef 2496
c1fa3212
FG
2497 /* read the status of the incoming frame */
2498 status = priv->hw->desc->rx_status(&priv->dev->stats,
2499 &priv->xstats, p);
2500 /* check if managed by the DMA otherwise go ahead */
2501 if (unlikely(status & dma_own))
47dd7a54
GC
2502 break;
2503
2504 count++;
2505
e3ad57c9
GC
2506 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2507 next_entry = priv->cur_rx;
2508
c24602ef 2509 if (priv->extend_desc)
ba1ffd74 2510 np = (struct dma_desc *)(priv->dma_erx + next_entry);
c24602ef 2511 else
ba1ffd74
GC
2512 np = priv->dma_rx + next_entry;
2513
2514 prefetch(np);
47dd7a54 2515
c24602ef
GC
2516 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2517 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2518 &priv->xstats,
2519 priv->dma_erx +
2520 entry);
891434b1 2521 if (unlikely(status == discard_frame)) {
47dd7a54 2522 priv->dev->stats.rx_errors++;
891434b1 2523 if (priv->hwts_rx_en && !priv->extend_desc) {
8d45e42b 2524 /* DESC2 & DESC3 will be overwritten by device
891434b1
RK
2525 * with timestamp value, hence reinitialize
2526 * them in stmmac_rx_refill() function so that
2527 * device can reuse it.
2528 */
2529 priv->rx_skbuff[entry] = NULL;
2530 dma_unmap_single(priv->device,
ceb69499
GC
2531 priv->rx_skbuff_dma[entry],
2532 priv->dma_buf_sz,
2533 DMA_FROM_DEVICE);
891434b1
RK
2534 }
2535 } else {
47dd7a54 2536 struct sk_buff *skb;
3eeb2997 2537 int frame_len;
f748be53
AT
2538 unsigned int des;
2539
2540 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
f8be0d78 2541 des = le32_to_cpu(p->des0);
f748be53 2542 else
f8be0d78 2543 des = le32_to_cpu(p->des2);
47dd7a54 2544
ceb69499
GC
2545 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2546
8d45e42b 2547 /* If frame length is greater than skb buffer size
f748be53
AT
2548 * (preallocated during init) then the packet is
2549 * ignored
2550 */
e527c4a7 2551 if (frame_len > priv->dma_buf_sz) {
38ddc59d
LC
2552 netdev_err(priv->dev,
2553 "len %d larger than size (%d)\n",
2554 frame_len, priv->dma_buf_sz);
e527c4a7
GC
2555 priv->dev->stats.rx_length_errors++;
2556 break;
2557 }
2558
3eeb2997 2559 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
ceb69499
GC
2560 * Type frames (LLC/LLC-SNAP)
2561 */
3eeb2997
GC
2562 if (unlikely(status != llc_snap))
2563 frame_len -= ETH_FCS_LEN;
47dd7a54 2564
83d7af64 2565 if (netif_msg_rx_status(priv)) {
38ddc59d
LC
2566 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2567 p, entry, des);
83d7af64 2568 if (frame_len > ETH_FRAME_LEN)
38ddc59d
LC
2569 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2570 frame_len, status);
83d7af64 2571 }
22ad3838 2572
f748be53
AT
2573 /* The zero-copy is always used for all the sizes
2574 * in case of GMAC4 because it needs
2575 * to refill the used descriptors, always.
2576 */
2577 if (unlikely(!priv->plat->has_gmac4 &&
2578 ((frame_len < priv->rx_copybreak) ||
2579 stmmac_rx_threshold_count(priv)))) {
22ad3838
GC
2580 skb = netdev_alloc_skb_ip_align(priv->dev,
2581 frame_len);
2582 if (unlikely(!skb)) {
2583 if (net_ratelimit())
2584 dev_warn(priv->device,
2585 "packet dropped\n");
2586 priv->dev->stats.rx_dropped++;
2587 break;
2588 }
2589
2590 dma_sync_single_for_cpu(priv->device,
2591 priv->rx_skbuff_dma
2592 [entry], frame_len,
2593 DMA_FROM_DEVICE);
2594 skb_copy_to_linear_data(skb,
2595 priv->
2596 rx_skbuff[entry]->data,
2597 frame_len);
2598
2599 skb_put(skb, frame_len);
2600 dma_sync_single_for_device(priv->device,
2601 priv->rx_skbuff_dma
2602 [entry], frame_len,
2603 DMA_FROM_DEVICE);
2604 } else {
2605 skb = priv->rx_skbuff[entry];
2606 if (unlikely(!skb)) {
38ddc59d
LC
2607 netdev_err(priv->dev,
2608 "%s: Inconsistent Rx chain\n",
2609 priv->dev->name);
22ad3838
GC
2610 priv->dev->stats.rx_dropped++;
2611 break;
2612 }
2613 prefetch(skb->data - NET_IP_ALIGN);
2614 priv->rx_skbuff[entry] = NULL;
120e87f9 2615 priv->rx_zeroc_thresh++;
22ad3838
GC
2616
2617 skb_put(skb, frame_len);
2618 dma_unmap_single(priv->device,
2619 priv->rx_skbuff_dma[entry],
2620 priv->dma_buf_sz,
2621 DMA_FROM_DEVICE);
47dd7a54 2622 }
47dd7a54 2623
47dd7a54 2624 if (netif_msg_pktdata(priv)) {
38ddc59d
LC
2625 netdev_dbg(priv->dev, "frame received (%dbytes)",
2626 frame_len);
47dd7a54
GC
2627 print_pkt(skb->data, frame_len);
2628 }
83d7af64 2629
ba1ffd74
GC
2630 stmmac_get_rx_hwtstamp(priv, p, np, skb);
2631
b9381985
VB
2632 stmmac_rx_vlan(priv->dev, skb);
2633
47dd7a54
GC
2634 skb->protocol = eth_type_trans(skb, priv->dev);
2635
ceb69499 2636 if (unlikely(!coe))
bc8acf2c 2637 skb_checksum_none_assert(skb);
62a2ab93 2638 else
47dd7a54 2639 skb->ip_summed = CHECKSUM_UNNECESSARY;
62a2ab93
GC
2640
2641 napi_gro_receive(&priv->napi, skb);
47dd7a54
GC
2642
2643 priv->dev->stats.rx_packets++;
2644 priv->dev->stats.rx_bytes += frame_len;
47dd7a54
GC
2645 }
2646 entry = next_entry;
47dd7a54
GC
2647 }
2648
2649 stmmac_rx_refill(priv);
2650
2651 priv->xstats.rx_pkt_n += count;
2652
2653 return count;
2654}
2655
2656/**
2657 * stmmac_poll - stmmac poll method (NAPI)
2658 * @napi : pointer to the napi structure.
2659 * @budget : maximum number of packets that the current CPU can receive from
2660 * all interfaces.
2661 * Description :
9125cdd1 2662 * To look at the incoming frames and clear the tx resources.
47dd7a54
GC
2663 */
2664static int stmmac_poll(struct napi_struct *napi, int budget)
2665{
2666 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2667 int work_done = 0;
2668
9125cdd1
GC
2669 priv->xstats.napi_poll++;
2670 stmmac_tx_clean(priv);
47dd7a54 2671
9125cdd1 2672 work_done = stmmac_rx(priv, budget);
47dd7a54 2673 if (work_done < budget) {
6ad20165 2674 napi_complete_done(napi, work_done);
9125cdd1 2675 stmmac_enable_dma_irq(priv);
47dd7a54
GC
2676 }
2677 return work_done;
2678}
2679
2680/**
2681 * stmmac_tx_timeout
2682 * @dev : Pointer to net device structure
2683 * Description: this function is called when a packet transmission fails to
7284a3f1 2684 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
2685 * netdev structure and arrange for the device to be reset to a sane state
2686 * in order to transmit a new packet.
2687 */
2688static void stmmac_tx_timeout(struct net_device *dev)
2689{
2690 struct stmmac_priv *priv = netdev_priv(dev);
2691
2692 /* Clear Tx resources and restart transmitting again */
2693 stmmac_tx_err(priv);
47dd7a54
GC
2694}
2695
47dd7a54 2696/**
01789349 2697 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
2698 * @dev : pointer to the device structure
2699 * Description:
2700 * This function is a driver entry point which gets called by the kernel
2701 * whenever multicast addresses must be enabled/disabled.
2702 * Return value:
2703 * void.
2704 */
01789349 2705static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
2706{
2707 struct stmmac_priv *priv = netdev_priv(dev);
2708
3b57de95 2709 priv->hw->mac->set_filter(priv->hw, dev);
47dd7a54
GC
2710}
2711
2712/**
2713 * stmmac_change_mtu - entry point to change MTU size for the device.
2714 * @dev : device pointer.
2715 * @new_mtu : the new MTU size for the device.
2716 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
2717 * to drive packet transmission. Ethernet has an MTU of 1500 octets
2718 * (ETH_DATA_LEN). This value can be changed with ifconfig.
2719 * Return value:
2720 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2721 * file on failure.
2722 */
2723static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2724{
38ddc59d
LC
2725 struct stmmac_priv *priv = netdev_priv(dev);
2726
47dd7a54 2727 if (netif_running(dev)) {
38ddc59d 2728 netdev_err(priv->dev, "must be stopped to change its MTU\n");
47dd7a54
GC
2729 return -EBUSY;
2730 }
2731
5e982f3b 2732 dev->mtu = new_mtu;
f748be53 2733
5e982f3b
MM
2734 netdev_update_features(dev);
2735
2736 return 0;
2737}
2738
c8f44aff 2739static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 2740 netdev_features_t features)
5e982f3b
MM
2741{
2742 struct stmmac_priv *priv = netdev_priv(dev);
2743
38912bdb 2744 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 2745 features &= ~NETIF_F_RXCSUM;
d2afb5bd 2746
5e982f3b 2747 if (!priv->plat->tx_coe)
a188222b 2748 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 2749
ebbb293f
GC
2750 /* Some GMAC devices have a bugged Jumbo frame support that
2751 * needs to have the Tx COE disabled for oversized frames
2752 * (due to limited buffer sizes). In this case we disable
8d45e42b 2753 * the TX csum insertion in the TDES and not use SF.
ceb69499 2754 */
5e982f3b 2755 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 2756 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 2757
f748be53
AT
2758 /* Disable tso if asked by ethtool */
2759 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2760 if (features & NETIF_F_TSO)
2761 priv->tso = true;
2762 else
2763 priv->tso = false;
2764 }
2765
5e982f3b 2766 return features;
47dd7a54
GC
2767}
2768
d2afb5bd
GC
2769static int stmmac_set_features(struct net_device *netdev,
2770 netdev_features_t features)
2771{
2772 struct stmmac_priv *priv = netdev_priv(netdev);
2773
2774 /* Keep the COE Type in case of csum is supporting */
2775 if (features & NETIF_F_RXCSUM)
2776 priv->hw->rx_csum = priv->plat->rx_coe;
2777 else
2778 priv->hw->rx_csum = 0;
2779 /* No check needed because rx_coe has been set before and it will be
2780 * fixed in case of issue.
2781 */
2782 priv->hw->mac->rx_ipc(priv->hw);
2783
2784 return 0;
2785}
2786
32ceabca
GC
2787/**
2788 * stmmac_interrupt - main ISR
2789 * @irq: interrupt number.
2790 * @dev_id: to pass the net device pointer.
2791 * Description: this is the main driver interrupt service routine.
732fdf0e
GC
2792 * It can call:
2793 * o DMA service routine (to manage incoming frame reception and transmission
2794 * status)
2795 * o Core interrupts to manage: remote wake-up, management counter, LPI
2796 * interrupts.
32ceabca 2797 */
47dd7a54
GC
2798static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2799{
2800 struct net_device *dev = (struct net_device *)dev_id;
2801 struct stmmac_priv *priv = netdev_priv(dev);
2802
89f7f2cf
SK
2803 if (priv->irq_wake)
2804 pm_wakeup_event(priv->device, 0);
2805
47dd7a54 2806 if (unlikely(!dev)) {
38ddc59d 2807 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
47dd7a54
GC
2808 return IRQ_NONE;
2809 }
2810
d765955d 2811 /* To handle GMAC own interrupts */
f748be53 2812 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
7ed24bbe 2813 int status = priv->hw->mac->host_irq_status(priv->hw,
0982a0f6 2814 &priv->xstats);
d765955d 2815 if (unlikely(status)) {
d765955d 2816 /* For LPI we need to save the tx status */
0982a0f6 2817 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 2818 priv->tx_path_in_lpi_mode = true;
0982a0f6 2819 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 2820 priv->tx_path_in_lpi_mode = false;
a8b7d770 2821 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
f748be53
AT
2822 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2823 priv->rx_tail_addr,
2824 STMMAC_CHAN0);
d765955d 2825 }
70523e63
GC
2826
2827 /* PCS link status */
3fe5cadb 2828 if (priv->hw->pcs) {
70523e63
GC
2829 if (priv->xstats.pcs_link)
2830 netif_carrier_on(dev);
2831 else
2832 netif_carrier_off(dev);
2833 }
d765955d 2834 }
aec7ff27 2835
d765955d 2836 /* To handle DMA interrupts */
aec7ff27 2837 stmmac_dma_interrupt(priv);
47dd7a54
GC
2838
2839 return IRQ_HANDLED;
2840}
2841
2842#ifdef CONFIG_NET_POLL_CONTROLLER
2843/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
2844 * to allow network I/O with interrupts disabled.
2845 */
47dd7a54
GC
2846static void stmmac_poll_controller(struct net_device *dev)
2847{
2848 disable_irq(dev->irq);
2849 stmmac_interrupt(dev->irq, dev);
2850 enable_irq(dev->irq);
2851}
2852#endif
2853
2854/**
2855 * stmmac_ioctl - Entry point for the Ioctl
2856 * @dev: Device pointer.
2857 * @rq: An IOCTL specefic structure, that can contain a pointer to
2858 * a proprietary structure used to pass information to the driver.
2859 * @cmd: IOCTL command
2860 * Description:
32ceabca 2861 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
2862 */
2863static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2864{
891434b1 2865 int ret = -EOPNOTSUPP;
47dd7a54
GC
2866
2867 if (!netif_running(dev))
2868 return -EINVAL;
2869
891434b1
RK
2870 switch (cmd) {
2871 case SIOCGMIIPHY:
2872 case SIOCGMIIREG:
2873 case SIOCSMIIREG:
d6d50c7e 2874 if (!dev->phydev)
891434b1 2875 return -EINVAL;
d6d50c7e 2876 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
891434b1
RK
2877 break;
2878 case SIOCSHWTSTAMP:
2879 ret = stmmac_hwtstamp_ioctl(dev, rq);
2880 break;
2881 default:
2882 break;
2883 }
28b04113 2884
47dd7a54
GC
2885 return ret;
2886}
2887
50fb4f74 2888#ifdef CONFIG_DEBUG_FS
7ac29055 2889static struct dentry *stmmac_fs_dir;
7ac29055 2890
c24602ef 2891static void sysfs_display_ring(void *head, int size, int extend_desc,
ceb69499 2892 struct seq_file *seq)
7ac29055 2893{
7ac29055 2894 int i;
ceb69499
GC
2895 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2896 struct dma_desc *p = (struct dma_desc *)head;
7ac29055 2897
c24602ef 2898 for (i = 0; i < size; i++) {
c24602ef 2899 if (extend_desc) {
c24602ef 2900 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 2901 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
2902 le32_to_cpu(ep->basic.des0),
2903 le32_to_cpu(ep->basic.des1),
2904 le32_to_cpu(ep->basic.des2),
2905 le32_to_cpu(ep->basic.des3));
c24602ef
GC
2906 ep++;
2907 } else {
c24602ef 2908 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 2909 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
2910 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2911 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
c24602ef
GC
2912 p++;
2913 }
7ac29055
GC
2914 seq_printf(seq, "\n");
2915 }
c24602ef 2916}
7ac29055 2917
c24602ef
GC
2918static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2919{
2920 struct net_device *dev = seq->private;
2921 struct stmmac_priv *priv = netdev_priv(dev);
7ac29055 2922
c24602ef
GC
2923 if (priv->extend_desc) {
2924 seq_printf(seq, "Extended RX descriptor ring:\n");
e3ad57c9 2925 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
c24602ef 2926 seq_printf(seq, "Extended TX descriptor ring:\n");
e3ad57c9 2927 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
c24602ef
GC
2928 } else {
2929 seq_printf(seq, "RX descriptor ring:\n");
e3ad57c9 2930 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
c24602ef 2931 seq_printf(seq, "TX descriptor ring:\n");
e3ad57c9 2932 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
7ac29055
GC
2933 }
2934
2935 return 0;
2936}
2937
2938static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2939{
2940 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2941}
2942
22d3efe5
PM
2943/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
2944
7ac29055
GC
2945static const struct file_operations stmmac_rings_status_fops = {
2946 .owner = THIS_MODULE,
2947 .open = stmmac_sysfs_ring_open,
2948 .read = seq_read,
2949 .llseek = seq_lseek,
74863948 2950 .release = single_release,
7ac29055
GC
2951};
2952
e7434821
GC
2953static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2954{
2955 struct net_device *dev = seq->private;
2956 struct stmmac_priv *priv = netdev_priv(dev);
2957
19e30c14 2958 if (!priv->hw_cap_support) {
e7434821
GC
2959 seq_printf(seq, "DMA HW features not supported\n");
2960 return 0;
2961 }
2962
2963 seq_printf(seq, "==============================\n");
2964 seq_printf(seq, "\tDMA HW features\n");
2965 seq_printf(seq, "==============================\n");
2966
22d3efe5 2967 seq_printf(seq, "\t10/100 Mbps: %s\n",
e7434821 2968 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
22d3efe5 2969 seq_printf(seq, "\t1000 Mbps: %s\n",
e7434821 2970 (priv->dma_cap.mbps_1000) ? "Y" : "N");
22d3efe5 2971 seq_printf(seq, "\tHalf duplex: %s\n",
e7434821
GC
2972 (priv->dma_cap.half_duplex) ? "Y" : "N");
2973 seq_printf(seq, "\tHash Filter: %s\n",
2974 (priv->dma_cap.hash_filter) ? "Y" : "N");
2975 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2976 (priv->dma_cap.multi_addr) ? "Y" : "N");
8d45e42b 2977 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
e7434821
GC
2978 (priv->dma_cap.pcs) ? "Y" : "N");
2979 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2980 (priv->dma_cap.sma_mdio) ? "Y" : "N");
2981 seq_printf(seq, "\tPMT Remote wake up: %s\n",
2982 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2983 seq_printf(seq, "\tPMT Magic Frame: %s\n",
2984 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2985 seq_printf(seq, "\tRMON module: %s\n",
2986 (priv->dma_cap.rmon) ? "Y" : "N");
2987 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2988 (priv->dma_cap.time_stamp) ? "Y" : "N");
22d3efe5 2989 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
e7434821 2990 (priv->dma_cap.atime_stamp) ? "Y" : "N");
22d3efe5 2991 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
e7434821
GC
2992 (priv->dma_cap.eee) ? "Y" : "N");
2993 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2994 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2995 (priv->dma_cap.tx_coe) ? "Y" : "N");
f748be53
AT
2996 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2997 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
2998 (priv->dma_cap.rx_coe) ? "Y" : "N");
2999 } else {
3000 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3001 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3002 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3003 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3004 }
e7434821
GC
3005 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3006 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3007 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3008 priv->dma_cap.number_rx_channel);
3009 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3010 priv->dma_cap.number_tx_channel);
3011 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3012 (priv->dma_cap.enh_desc) ? "Y" : "N");
3013
3014 return 0;
3015}
3016
3017static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3018{
3019 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3020}
3021
3022static const struct file_operations stmmac_dma_cap_fops = {
3023 .owner = THIS_MODULE,
3024 .open = stmmac_sysfs_dma_cap_open,
3025 .read = seq_read,
3026 .llseek = seq_lseek,
74863948 3027 .release = single_release,
e7434821
GC
3028};
3029
7ac29055
GC
3030static int stmmac_init_fs(struct net_device *dev)
3031{
466c5ac8
MO
3032 struct stmmac_priv *priv = netdev_priv(dev);
3033
3034 /* Create per netdev entries */
3035 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 3036
466c5ac8 3037 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
38ddc59d 3038 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
7ac29055
GC
3039
3040 return -ENOMEM;
3041 }
3042
3043 /* Entry to report DMA RX/TX rings */
466c5ac8
MO
3044 priv->dbgfs_rings_status =
3045 debugfs_create_file("descriptors_status", S_IRUGO,
3046 priv->dbgfs_dir, dev,
3047 &stmmac_rings_status_fops);
7ac29055 3048
466c5ac8 3049 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
38ddc59d 3050 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
466c5ac8 3051 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055
GC
3052
3053 return -ENOMEM;
3054 }
3055
e7434821 3056 /* Entry to report the DMA HW features */
466c5ac8
MO
3057 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3058 priv->dbgfs_dir,
3059 dev, &stmmac_dma_cap_fops);
e7434821 3060
466c5ac8 3061 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
38ddc59d 3062 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
466c5ac8 3063 debugfs_remove_recursive(priv->dbgfs_dir);
e7434821
GC
3064
3065 return -ENOMEM;
3066 }
3067
7ac29055
GC
3068 return 0;
3069}
3070
466c5ac8 3071static void stmmac_exit_fs(struct net_device *dev)
7ac29055 3072{
466c5ac8
MO
3073 struct stmmac_priv *priv = netdev_priv(dev);
3074
3075 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 3076}
50fb4f74 3077#endif /* CONFIG_DEBUG_FS */
7ac29055 3078
47dd7a54
GC
3079static const struct net_device_ops stmmac_netdev_ops = {
3080 .ndo_open = stmmac_open,
3081 .ndo_start_xmit = stmmac_xmit,
3082 .ndo_stop = stmmac_release,
3083 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 3084 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 3085 .ndo_set_features = stmmac_set_features,
01789349 3086 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
3087 .ndo_tx_timeout = stmmac_tx_timeout,
3088 .ndo_do_ioctl = stmmac_ioctl,
47dd7a54
GC
3089#ifdef CONFIG_NET_POLL_CONTROLLER
3090 .ndo_poll_controller = stmmac_poll_controller,
3091#endif
3092 .ndo_set_mac_address = eth_mac_addr,
3093};
3094
cf3f047b
GC
3095/**
3096 * stmmac_hw_init - Init the MAC device
32ceabca 3097 * @priv: driver private structure
732fdf0e
GC
3098 * Description: this function is to configure the MAC device according to
3099 * some platform parameters or the HW capability register. It prepares the
3100 * driver to use either ring or chain modes and to setup either enhanced or
3101 * normal descriptors.
cf3f047b
GC
3102 */
3103static int stmmac_hw_init(struct stmmac_priv *priv)
3104{
cf3f047b
GC
3105 struct mac_device_info *mac;
3106
3107 /* Identify the MAC HW device */
03f2eecd
MKB
3108 if (priv->plat->has_gmac) {
3109 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3b57de95
VB
3110 mac = dwmac1000_setup(priv->ioaddr,
3111 priv->plat->multicast_filter_bins,
c623d149
AT
3112 priv->plat->unicast_filter_entries,
3113 &priv->synopsys_id);
f748be53
AT
3114 } else if (priv->plat->has_gmac4) {
3115 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3116 mac = dwmac4_setup(priv->ioaddr,
3117 priv->plat->multicast_filter_bins,
3118 priv->plat->unicast_filter_entries,
3119 &priv->synopsys_id);
03f2eecd 3120 } else {
c623d149 3121 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
03f2eecd 3122 }
cf3f047b
GC
3123 if (!mac)
3124 return -ENOMEM;
3125
3126 priv->hw = mac;
3127
4a7d666a 3128 /* To use the chained or ring mode */
f748be53
AT
3129 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3130 priv->hw->mode = &dwmac4_ring_mode_ops;
4a7d666a 3131 } else {
f748be53
AT
3132 if (chain_mode) {
3133 priv->hw->mode = &chain_mode_ops;
38ddc59d 3134 dev_info(priv->device, "Chain mode enabled\n");
f748be53
AT
3135 priv->mode = STMMAC_CHAIN_MODE;
3136 } else {
3137 priv->hw->mode = &ring_mode_ops;
38ddc59d 3138 dev_info(priv->device, "Ring mode enabled\n");
f748be53
AT
3139 priv->mode = STMMAC_RING_MODE;
3140 }
4a7d666a
GC
3141 }
3142
cf3f047b
GC
3143 /* Get the HW capability (new GMAC newer than 3.50a) */
3144 priv->hw_cap_support = stmmac_get_hw_features(priv);
3145 if (priv->hw_cap_support) {
38ddc59d 3146 dev_info(priv->device, "DMA HW capability register supported\n");
cf3f047b
GC
3147
3148 /* We can override some gmac/dma configuration fields: e.g.
3149 * enh_desc, tx_coe (e.g. that are passed through the
3150 * platform) with the values from the HW capability
3151 * register (if supported).
3152 */
3153 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 3154 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3fe5cadb 3155 priv->hw->pmt = priv->plat->pmt;
38912bdb 3156
a8df35d4
EG
3157 /* TXCOE doesn't work in thresh DMA mode */
3158 if (priv->plat->force_thresh_dma_mode)
3159 priv->plat->tx_coe = 0;
3160 else
3161 priv->plat->tx_coe = priv->dma_cap.tx_coe;
3162
f748be53
AT
3163 /* In case of GMAC4 rx_coe is from HW cap register. */
3164 priv->plat->rx_coe = priv->dma_cap.rx_coe;
38912bdb
DS
3165
3166 if (priv->dma_cap.rx_coe_type2)
3167 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3168 else if (priv->dma_cap.rx_coe_type1)
3169 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3170
38ddc59d
LC
3171 } else {
3172 dev_info(priv->device, "No HW DMA feature register supported\n");
3173 }
cf3f047b 3174
f748be53
AT
3175 /* To use alternate (extended), normal or GMAC4 descriptor structures */
3176 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3177 priv->hw->desc = &dwmac4_desc_ops;
3178 else
3179 stmmac_selec_desc_mode(priv);
61369d02 3180
d2afb5bd
GC
3181 if (priv->plat->rx_coe) {
3182 priv->hw->rx_csum = priv->plat->rx_coe;
38ddc59d 3183 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
f748be53 3184 if (priv->synopsys_id < DWMAC_CORE_4_00)
38ddc59d 3185 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
d2afb5bd 3186 }
cf3f047b 3187 if (priv->plat->tx_coe)
38ddc59d 3188 dev_info(priv->device, "TX Checksum insertion supported\n");
cf3f047b
GC
3189
3190 if (priv->plat->pmt) {
38ddc59d 3191 dev_info(priv->device, "Wake-Up On Lan supported\n");
cf3f047b
GC
3192 device_set_wakeup_capable(priv->device, 1);
3193 }
3194
f748be53 3195 if (priv->dma_cap.tsoen)
38ddc59d 3196 dev_info(priv->device, "TSO supported\n");
f748be53 3197
c24602ef 3198 return 0;
cf3f047b
GC
3199}
3200
47dd7a54 3201/**
bfab27a1
GC
3202 * stmmac_dvr_probe
3203 * @device: device pointer
ff3dd78c 3204 * @plat_dat: platform data pointer
e56788cf 3205 * @res: stmmac resource pointer
bfab27a1
GC
3206 * Description: this is the main probe function used to
3207 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 3208 * Return:
15ffac73 3209 * returns 0 on success, otherwise errno.
47dd7a54 3210 */
15ffac73
JE
3211int stmmac_dvr_probe(struct device *device,
3212 struct plat_stmmacenet_data *plat_dat,
3213 struct stmmac_resources *res)
47dd7a54
GC
3214{
3215 int ret = 0;
bfab27a1
GC
3216 struct net_device *ndev = NULL;
3217 struct stmmac_priv *priv;
47dd7a54 3218
bfab27a1 3219 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
41de8d4c 3220 if (!ndev)
15ffac73 3221 return -ENOMEM;
bfab27a1
GC
3222
3223 SET_NETDEV_DEV(ndev, device);
3224
3225 priv = netdev_priv(ndev);
3226 priv->device = device;
3227 priv->dev = ndev;
47dd7a54 3228
bfab27a1 3229 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
3230 priv->pause = pause;
3231 priv->plat = plat_dat;
e56788cf
JE
3232 priv->ioaddr = res->addr;
3233 priv->dev->base_addr = (unsigned long)res->addr;
3234
3235 priv->dev->irq = res->irq;
3236 priv->wol_irq = res->wol_irq;
3237 priv->lpi_irq = res->lpi_irq;
3238
3239 if (res->mac)
3240 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 3241
a7a62685 3242 dev_set_drvdata(device, priv->dev);
803f8fc4 3243
cf3f047b
GC
3244 /* Verify driver arguments */
3245 stmmac_verify_args();
bfab27a1 3246
cf3f047b 3247 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
3248 * this needs to have multiple instances
3249 */
cf3f047b
GC
3250 if ((phyaddr >= 0) && (phyaddr <= 31))
3251 priv->plat->phy_addr = phyaddr;
3252
f573c0b9 3253 if (priv->plat->stmmac_rst)
3254 reset_control_deassert(priv->plat->stmmac_rst);
c5e4ddbd 3255
cf3f047b 3256 /* Init MAC and get the capabilities */
c24602ef
GC
3257 ret = stmmac_hw_init(priv);
3258 if (ret)
62866e98 3259 goto error_hw_init;
cf3f047b
GC
3260
3261 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 3262
cf3f047b
GC
3263 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3264 NETIF_F_RXCSUM;
f748be53
AT
3265
3266 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3267 ndev->hw_features |= NETIF_F_TSO;
3268 priv->tso = true;
38ddc59d 3269 dev_info(priv->device, "TSO feature enabled\n");
f748be53 3270 }
bfab27a1
GC
3271 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3272 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
3273#ifdef STMMAC_VLAN_TAG_USED
3274 /* Both mac100 and gmac support receive VLAN tag detection */
f646968f 3275 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
47dd7a54
GC
3276#endif
3277 priv->msg_enable = netif_msg_init(debug, default_msg_level);
3278
44770e11
JW
3279 /* MTU range: 46 - hw-specific max */
3280 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3281 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3282 ndev->max_mtu = JUMBO_LEN;
3283 else
3284 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
a2cd64f3
KHL
3285 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3286 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3287 */
3288 if ((priv->plat->maxmtu < ndev->max_mtu) &&
3289 (priv->plat->maxmtu >= ndev->min_mtu))
44770e11 3290 ndev->max_mtu = priv->plat->maxmtu;
a2cd64f3 3291 else if (priv->plat->maxmtu < ndev->min_mtu)
b618ab45
HK
3292 dev_warn(priv->device,
3293 "%s: warning: maxmtu having invalid value (%d)\n",
3294 __func__, priv->plat->maxmtu);
44770e11 3295
47dd7a54
GC
3296 if (flow_ctrl)
3297 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
3298
62a2ab93
GC
3299 /* Rx Watchdog is available in the COREs newer than the 3.40.
3300 * In some case, for example on bugged HW this feature
3301 * has to be disable and this can be done by passing the
3302 * riwt_off field from the platform.
3303 */
3304 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3305 priv->use_riwt = 1;
b618ab45
HK
3306 dev_info(priv->device,
3307 "Enable RX Mitigation via HW Watchdog Timer\n");
62a2ab93
GC
3308 }
3309
bfab27a1 3310 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
47dd7a54 3311
f8e96161
VL
3312 spin_lock_init(&priv->lock);
3313
cd7201f4
GC
3314 /* If a specific clk_csr value is passed from the platform
3315 * this means that the CSR Clock Range selection cannot be
3316 * changed at run-time and it is fixed. Viceversa the driver'll try to
3317 * set the MDC clock dynamically according to the csr actual
3318 * clock input.
3319 */
3320 if (!priv->plat->clk_csr)
3321 stmmac_clk_csr_set(priv);
3322 else
3323 priv->clk_csr = priv->plat->clk_csr;
3324
e58bb43f
GC
3325 stmmac_check_pcs_mode(priv);
3326
3fe5cadb
GC
3327 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3328 priv->hw->pcs != STMMAC_PCS_TBI &&
3329 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
3330 /* MDIO bus Registration */
3331 ret = stmmac_mdio_register(ndev);
3332 if (ret < 0) {
b618ab45
HK
3333 dev_err(priv->device,
3334 "%s: MDIO bus (id: %d) registration failed",
3335 __func__, priv->plat->bus_id);
e58bb43f
GC
3336 goto error_mdio_register;
3337 }
4bfcbd7a
FV
3338 }
3339
57016590 3340 ret = register_netdev(ndev);
b2eb09af 3341 if (ret) {
b618ab45
HK
3342 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3343 __func__, ret);
b2eb09af
FF
3344 goto error_netdev_register;
3345 }
57016590
FF
3346
3347 return ret;
47dd7a54 3348
6a81c26f 3349error_netdev_register:
b2eb09af
FF
3350 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3351 priv->hw->pcs != STMMAC_PCS_TBI &&
3352 priv->hw->pcs != STMMAC_PCS_RTBI)
3353 stmmac_mdio_unregister(ndev);
6a81c26f 3354error_mdio_register:
6a81c26f 3355 netif_napi_del(&priv->napi);
62866e98 3356error_hw_init:
34a52f36 3357 free_netdev(ndev);
47dd7a54 3358
15ffac73 3359 return ret;
47dd7a54 3360}
b2e2f0c7 3361EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
3362
3363/**
3364 * stmmac_dvr_remove
f4e7bd81 3365 * @dev: device pointer
47dd7a54 3366 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 3367 * changes the link status, releases the DMA descriptor rings.
47dd7a54 3368 */
f4e7bd81 3369int stmmac_dvr_remove(struct device *dev)
47dd7a54 3370{
f4e7bd81 3371 struct net_device *ndev = dev_get_drvdata(dev);
aec7ff27 3372 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 3373
38ddc59d 3374 netdev_info(priv->dev, "%s: removing driver", __func__);
47dd7a54 3375
ad01b7d4
GC
3376 priv->hw->dma->stop_rx(priv->ioaddr);
3377 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 3378
bfab27a1 3379 stmmac_set_mac(priv->ioaddr, false);
47dd7a54 3380 netif_carrier_off(ndev);
47dd7a54 3381 unregister_netdev(ndev);
f573c0b9 3382 if (priv->plat->stmmac_rst)
3383 reset_control_assert(priv->plat->stmmac_rst);
3384 clk_disable_unprepare(priv->plat->pclk);
3385 clk_disable_unprepare(priv->plat->stmmac_clk);
3fe5cadb
GC
3386 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3387 priv->hw->pcs != STMMAC_PCS_TBI &&
3388 priv->hw->pcs != STMMAC_PCS_RTBI)
e743471f 3389 stmmac_mdio_unregister(ndev);
47dd7a54
GC
3390 free_netdev(ndev);
3391
3392 return 0;
3393}
b2e2f0c7 3394EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 3395
732fdf0e
GC
3396/**
3397 * stmmac_suspend - suspend callback
f4e7bd81 3398 * @dev: device pointer
732fdf0e
GC
3399 * Description: this is the function to suspend the device and it is called
3400 * by the platform driver to stop the network queue, release the resources,
3401 * program the PMT register (for WoL), clean and release driver resources.
3402 */
f4e7bd81 3403int stmmac_suspend(struct device *dev)
47dd7a54 3404{
f4e7bd81 3405 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 3406 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 3407 unsigned long flags;
47dd7a54 3408
874bd42d 3409 if (!ndev || !netif_running(ndev))
47dd7a54
GC
3410 return 0;
3411
d6d50c7e
PR
3412 if (ndev->phydev)
3413 phy_stop(ndev->phydev);
102463b1 3414
f8c5a875 3415 spin_lock_irqsave(&priv->lock, flags);
47dd7a54 3416
874bd42d
GC
3417 netif_device_detach(ndev);
3418 netif_stop_queue(ndev);
47dd7a54 3419
874bd42d
GC
3420 napi_disable(&priv->napi);
3421
3422 /* Stop TX/RX DMA */
3423 priv->hw->dma->stop_tx(priv->ioaddr);
3424 priv->hw->dma->stop_rx(priv->ioaddr);
c24602ef 3425
874bd42d 3426 /* Enable Power down mode by programming the PMT regs */
89f7f2cf 3427 if (device_may_wakeup(priv->device)) {
7ed24bbe 3428 priv->hw->mac->pmt(priv->hw, priv->wolopts);
89f7f2cf
SK
3429 priv->irq_wake = 1;
3430 } else {
bfab27a1 3431 stmmac_set_mac(priv->ioaddr, false);
db88f10a 3432 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 3433 /* Disable clock in case of PWM is off */
f573c0b9 3434 clk_disable(priv->plat->pclk);
3435 clk_disable(priv->plat->stmmac_clk);
ba1377ff 3436 }
f8c5a875 3437 spin_unlock_irqrestore(&priv->lock, flags);
2d871aa0
VB
3438
3439 priv->oldlink = 0;
bd00632c
LC
3440 priv->speed = SPEED_UNKNOWN;
3441 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
3442 return 0;
3443}
b2e2f0c7 3444EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 3445
732fdf0e
GC
3446/**
3447 * stmmac_resume - resume callback
f4e7bd81 3448 * @dev: device pointer
732fdf0e
GC
3449 * Description: when resume this function is invoked to setup the DMA and CORE
3450 * in a usable state.
3451 */
f4e7bd81 3452int stmmac_resume(struct device *dev)
47dd7a54 3453{
f4e7bd81 3454 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 3455 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 3456 unsigned long flags;
47dd7a54 3457
874bd42d 3458 if (!netif_running(ndev))
47dd7a54
GC
3459 return 0;
3460
47dd7a54
GC
3461 /* Power Down bit, into the PM register, is cleared
3462 * automatically as soon as a magic packet or a Wake-up frame
3463 * is received. Anyway, it's better to manually clear
3464 * this bit because it can generate problems while resuming
ceb69499
GC
3465 * from another devices (e.g. serial console).
3466 */
623997fb 3467 if (device_may_wakeup(priv->device)) {
f55d84b0 3468 spin_lock_irqsave(&priv->lock, flags);
7ed24bbe 3469 priv->hw->mac->pmt(priv->hw, 0);
f55d84b0 3470 spin_unlock_irqrestore(&priv->lock, flags);
89f7f2cf 3471 priv->irq_wake = 0;
623997fb 3472 } else {
db88f10a 3473 pinctrl_pm_select_default_state(priv->device);
8d45e42b 3474 /* enable the clk previously disabled */
f573c0b9 3475 clk_enable(priv->plat->stmmac_clk);
3476 clk_enable(priv->plat->pclk);
623997fb
SK
3477 /* reset the phy so that it's ready */
3478 if (priv->mii)
3479 stmmac_mdio_reset(priv->mii);
3480 }
47dd7a54 3481
874bd42d 3482 netif_device_attach(ndev);
47dd7a54 3483
f55d84b0
VP
3484 spin_lock_irqsave(&priv->lock, flags);
3485
ae79a639
GC
3486 priv->cur_rx = 0;
3487 priv->dirty_rx = 0;
3488 priv->dirty_tx = 0;
3489 priv->cur_tx = 0;
f748be53
AT
3490 /* reset private mss value to force mss context settings at
3491 * next tso xmit (only used for gmac4).
3492 */
3493 priv->mss = 0;
3494
ae79a639
GC
3495 stmmac_clear_descriptors(priv);
3496
fe131929 3497 stmmac_hw_setup(ndev, false);
777da230 3498 stmmac_init_tx_coalesce(priv);
ac316c78 3499 stmmac_set_rx_mode(ndev);
47dd7a54 3500
47dd7a54
GC
3501 napi_enable(&priv->napi);
3502
874bd42d 3503 netif_start_queue(ndev);
47dd7a54 3504
f8c5a875 3505 spin_unlock_irqrestore(&priv->lock, flags);
102463b1 3506
d6d50c7e
PR
3507 if (ndev->phydev)
3508 phy_start(ndev->phydev);
102463b1 3509
47dd7a54
GC
3510 return 0;
3511}
b2e2f0c7 3512EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 3513
47dd7a54
GC
3514#ifndef MODULE
3515static int __init stmmac_cmdline_opt(char *str)
3516{
3517 char *opt;
3518
3519 if (!str || !*str)
3520 return -EINVAL;
3521 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 3522 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 3523 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
3524 goto err;
3525 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 3526 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 3527 goto err;
f3240e28 3528 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 3529 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
3530 goto err;
3531 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 3532 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
3533 goto err;
3534 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 3535 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
3536 goto err;
3537 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 3538 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
3539 goto err;
3540 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 3541 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 3542 goto err;
506f669c 3543 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
3544 if (kstrtoint(opt + 10, 0, &eee_timer))
3545 goto err;
4a7d666a
GC
3546 } else if (!strncmp(opt, "chain_mode:", 11)) {
3547 if (kstrtoint(opt + 11, 0, &chain_mode))
3548 goto err;
f3240e28 3549 }
47dd7a54
GC
3550 }
3551 return 0;
f3240e28
GC
3552
3553err:
3554 pr_err("%s: ERROR broken module parameter conversion", __func__);
3555 return -EINVAL;
47dd7a54
GC
3556}
3557
3558__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 3559#endif /* MODULE */
6fc0d0f2 3560
466c5ac8
MO
3561static int __init stmmac_init(void)
3562{
3563#ifdef CONFIG_DEBUG_FS
3564 /* Create debugfs main directory if it doesn't exist yet */
3565 if (!stmmac_fs_dir) {
3566 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3567
3568 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3569 pr_err("ERROR %s, debugfs create directory failed\n",
3570 STMMAC_RESOURCE_NAME);
3571
3572 return -ENOMEM;
3573 }
3574 }
3575#endif
3576
3577 return 0;
3578}
3579
3580static void __exit stmmac_exit(void)
3581{
3582#ifdef CONFIG_DEBUG_FS
3583 debugfs_remove_recursive(stmmac_fs_dir);
3584#endif
3585}
3586
3587module_init(stmmac_init)
3588module_exit(stmmac_exit)
3589
6fc0d0f2
GC
3590MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3591MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3592MODULE_LICENSE("GPL");