]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
stmmac: rework get_hw_feature function
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
286a8372 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54
GC
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25 Documentation available at:
26 http://www.stlinux.com
27 Support available at:
28 https://bugzilla.stlinux.com/
29*******************************************************************************/
30
6a81c26f 31#include <linux/clk.h>
47dd7a54
GC
32#include <linux/kernel.h>
33#include <linux/interrupt.h>
47dd7a54
GC
34#include <linux/ip.h>
35#include <linux/tcp.h>
36#include <linux/skbuff.h>
37#include <linux/ethtool.h>
38#include <linux/if_ether.h>
39#include <linux/crc32.h>
40#include <linux/mii.h>
01789349 41#include <linux/if.h>
47dd7a54
GC
42#include <linux/if_vlan.h>
43#include <linux/dma-mapping.h>
5a0e3ad6 44#include <linux/slab.h>
70c71606 45#include <linux/prefetch.h>
db88f10a 46#include <linux/pinctrl/consumer.h>
50fb4f74 47#ifdef CONFIG_DEBUG_FS
7ac29055
GC
48#include <linux/debugfs.h>
49#include <linux/seq_file.h>
50fb4f74 50#endif /* CONFIG_DEBUG_FS */
891434b1
RK
51#include <linux/net_tstamp.h>
52#include "stmmac_ptp.h"
286a8372 53#include "stmmac.h"
c5e4ddbd 54#include <linux/reset.h>
5790cf3c 55#include <linux/of_mdio.h>
19d857c9 56#include "dwmac1000.h"
47dd7a54 57
47dd7a54 58#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
47dd7a54
GC
59
60/* Module parameters */
32ceabca 61#define TX_TIMEO 5000
47dd7a54
GC
62static int watchdog = TX_TIMEO;
63module_param(watchdog, int, S_IRUGO | S_IWUSR);
32ceabca 64MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 65
32ceabca 66static int debug = -1;
47dd7a54 67module_param(debug, int, S_IRUGO | S_IWUSR);
32ceabca 68MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 69
47d1f71f 70static int phyaddr = -1;
47dd7a54
GC
71module_param(phyaddr, int, S_IRUGO);
72MODULE_PARM_DESC(phyaddr, "Physical device address");
73
e3ad57c9 74#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
120e87f9 75#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
47dd7a54
GC
76
77static int flow_ctrl = FLOW_OFF;
78module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
79MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
80
81static int pause = PAUSE_TIME;
82module_param(pause, int, S_IRUGO | S_IWUSR);
83MODULE_PARM_DESC(pause, "Flow Control Pause Time");
84
85#define TC_DEFAULT 64
86static int tc = TC_DEFAULT;
87module_param(tc, int, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(tc, "DMA threshold control value");
89
d916701c
GC
90#define DEFAULT_BUFSIZE 1536
91static int buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
92module_param(buf_sz, int, S_IRUGO | S_IWUSR);
93MODULE_PARM_DESC(buf_sz, "DMA buffer size");
94
22ad3838
GC
95#define STMMAC_RX_COPYBREAK 256
96
47dd7a54
GC
97static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
98 NETIF_MSG_LINK | NETIF_MSG_IFUP |
99 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
100
d765955d
GC
101#define STMMAC_DEFAULT_LPI_TIMER 1000
102static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
103module_param(eee_timer, int, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
f5351ef7 105#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
d765955d 106
4a7d666a
GC
107/* By default the driver will use the ring mode to manage tx and rx descriptors
108 * but passing this value so user can force to use the chain instead of the ring
109 */
110static unsigned int chain_mode;
111module_param(chain_mode, int, S_IRUGO);
112MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
113
47dd7a54 114static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 115
50fb4f74 116#ifdef CONFIG_DEBUG_FS
bfab27a1 117static int stmmac_init_fs(struct net_device *dev);
466c5ac8 118static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
119#endif
120
9125cdd1
GC
121#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
122
47dd7a54
GC
123/**
124 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
125 * Description: it checks the driver parameters and set a default in case of
126 * errors.
47dd7a54
GC
127 */
128static void stmmac_verify_args(void)
129{
130 if (unlikely(watchdog < 0))
131 watchdog = TX_TIMEO;
d916701c
GC
132 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
133 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
134 if (unlikely(flow_ctrl > 1))
135 flow_ctrl = FLOW_AUTO;
136 else if (likely(flow_ctrl < 0))
137 flow_ctrl = FLOW_OFF;
138 if (unlikely((pause < 0) || (pause > 0xffff)))
139 pause = PAUSE_TIME;
d765955d
GC
140 if (eee_timer < 0)
141 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
142}
143
32ceabca
GC
144/**
145 * stmmac_clk_csr_set - dynamically set the MDC clock
146 * @priv: driver private structure
147 * Description: this is to dynamically set the MDC clock according to the csr
148 * clock input.
149 * Note:
150 * If a specific clk_csr value is passed from the platform
151 * this means that the CSR Clock Range selection cannot be
152 * changed at run-time and it is fixed (as reported in the driver
153 * documentation). Viceversa the driver will try to set the MDC
154 * clock dynamically according to the actual clock input.
155 */
cd7201f4
GC
156static void stmmac_clk_csr_set(struct stmmac_priv *priv)
157{
cd7201f4
GC
158 u32 clk_rate;
159
160 clk_rate = clk_get_rate(priv->stmmac_clk);
161
162 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
163 * for all other cases except for the below mentioned ones.
164 * For values higher than the IEEE 802.3 specified frequency
165 * we can not estimate the proper divider as it is not known
166 * the frequency of clk_csr_i. So we do not change the default
167 * divider.
168 */
cd7201f4
GC
169 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
170 if (clk_rate < CSR_F_35M)
171 priv->clk_csr = STMMAC_CSR_20_35M;
172 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
173 priv->clk_csr = STMMAC_CSR_35_60M;
174 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
175 priv->clk_csr = STMMAC_CSR_60_100M;
176 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
177 priv->clk_csr = STMMAC_CSR_100_150M;
178 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
179 priv->clk_csr = STMMAC_CSR_150_250M;
19d857c9 180 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
cd7201f4 181 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 182 }
cd7201f4
GC
183}
184
47dd7a54
GC
185static void print_pkt(unsigned char *buf, int len)
186{
424c4f78
AS
187 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
188 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 189}
47dd7a54 190
47dd7a54
GC
191static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
192{
e3ad57c9
GC
193 unsigned avail;
194
195 if (priv->dirty_tx > priv->cur_tx)
196 avail = priv->dirty_tx - priv->cur_tx - 1;
197 else
198 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
199
200 return avail;
201}
202
203static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
204{
205 unsigned dirty;
206
207 if (priv->dirty_rx <= priv->cur_rx)
208 dirty = priv->cur_rx - priv->dirty_rx;
209 else
210 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
211
212 return dirty;
47dd7a54
GC
213}
214
32ceabca 215/**
732fdf0e 216 * stmmac_hw_fix_mac_speed - callback for speed selection
32ceabca
GC
217 * @priv: driver private structure
218 * Description: on some platforms (e.g. ST), some HW system configuraton
219 * registers have to be set according to the link speed negotiated.
9dfeb4d9
GC
220 */
221static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
222{
223 struct phy_device *phydev = priv->phydev;
224
225 if (likely(priv->plat->fix_mac_speed))
ceb69499 226 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
9dfeb4d9
GC
227}
228
32ceabca 229/**
732fdf0e 230 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 231 * @priv: driver private structure
732fdf0e
GC
232 * Description: this function is to verify and enter in LPI mode in case of
233 * EEE.
32ceabca 234 */
d765955d
GC
235static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
236{
237 /* Check and enter in LPI mode */
238 if ((priv->dirty_tx == priv->cur_tx) &&
239 (priv->tx_path_in_lpi_mode == false))
7ed24bbe 240 priv->hw->mac->set_eee_mode(priv->hw);
d765955d
GC
241}
242
32ceabca 243/**
732fdf0e 244 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
245 * @priv: driver private structure
246 * Description: this function is to exit and disable EEE in case of
247 * LPI state is true. This is called by the xmit.
248 */
d765955d
GC
249void stmmac_disable_eee_mode(struct stmmac_priv *priv)
250{
7ed24bbe 251 priv->hw->mac->reset_eee_mode(priv->hw);
d765955d
GC
252 del_timer_sync(&priv->eee_ctrl_timer);
253 priv->tx_path_in_lpi_mode = false;
254}
255
256/**
732fdf0e 257 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d765955d
GC
258 * @arg : data hook
259 * Description:
32ceabca 260 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
261 * then MAC Transmitter can be moved to LPI state.
262 */
263static void stmmac_eee_ctrl_timer(unsigned long arg)
264{
265 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
266
267 stmmac_enable_eee_mode(priv);
f5351ef7 268 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d
GC
269}
270
271/**
732fdf0e 272 * stmmac_eee_init - init EEE
32ceabca 273 * @priv: driver private structure
d765955d 274 * Description:
732fdf0e
GC
275 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
276 * can also manage EEE, this function enable the LPI state and start related
277 * timer.
d765955d
GC
278 */
279bool stmmac_eee_init(struct stmmac_priv *priv)
280{
4741cf9c 281 unsigned long flags;
d765955d
GC
282 bool ret = false;
283
f5351ef7
GC
284 /* Using PCS we cannot dial with the phy registers at this stage
285 * so we do not support extra feature like EEE.
286 */
287 if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
288 (priv->pcs == STMMAC_PCS_RTBI))
289 goto out;
290
56b88c25 291 /* Never init EEE in case of a switch is attached */
a7657f12 292 if (priv->phydev->is_pseudo_fixed_link)
56b88c25
GC
293 goto out;
294
d765955d
GC
295 /* MAC core supports the EEE feature. */
296 if (priv->dma_cap.eee) {
83bf79b6
GC
297 int tx_lpi_timer = priv->tx_lpi_timer;
298
d765955d 299 /* Check if the PHY supports EEE */
83bf79b6
GC
300 if (phy_init_eee(priv->phydev, 1)) {
301 /* To manage at run-time if the EEE cannot be supported
302 * anymore (for example because the lp caps have been
303 * changed).
304 * In that case the driver disable own timers.
305 */
4741cf9c 306 spin_lock_irqsave(&priv->lock, flags);
83bf79b6
GC
307 if (priv->eee_active) {
308 pr_debug("stmmac: disable EEE\n");
309 del_timer_sync(&priv->eee_ctrl_timer);
7ed24bbe 310 priv->hw->mac->set_eee_timer(priv->hw, 0,
83bf79b6
GC
311 tx_lpi_timer);
312 }
313 priv->eee_active = 0;
4741cf9c 314 spin_unlock_irqrestore(&priv->lock, flags);
d765955d 315 goto out;
83bf79b6
GC
316 }
317 /* Activate the EEE and start timers */
4741cf9c 318 spin_lock_irqsave(&priv->lock, flags);
f5351ef7
GC
319 if (!priv->eee_active) {
320 priv->eee_active = 1;
ccb36da1
VT
321 setup_timer(&priv->eee_ctrl_timer,
322 stmmac_eee_ctrl_timer,
323 (unsigned long)priv);
324 mod_timer(&priv->eee_ctrl_timer,
325 STMMAC_LPI_T(eee_timer));
f5351ef7 326
7ed24bbe 327 priv->hw->mac->set_eee_timer(priv->hw,
f5351ef7 328 STMMAC_DEFAULT_LIT_LS,
83bf79b6 329 tx_lpi_timer);
71965352
GC
330 }
331 /* Set HW EEE according to the speed */
332 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
d765955d 333
d765955d 334 ret = true;
4741cf9c
GC
335 spin_unlock_irqrestore(&priv->lock, flags);
336
337 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
d765955d
GC
338 }
339out:
340 return ret;
341}
342
732fdf0e 343/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 344 * @priv: driver private structure
891434b1
RK
345 * @entry : descriptor index to be used.
346 * @skb : the socket buffer
347 * Description :
348 * This function will read timestamp from the descriptor & pass it to stack.
349 * and also perform some sanity checks.
350 */
351static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ceb69499 352 unsigned int entry, struct sk_buff *skb)
891434b1
RK
353{
354 struct skb_shared_hwtstamps shhwtstamp;
355 u64 ns;
356 void *desc = NULL;
357
358 if (!priv->hwts_tx_en)
359 return;
360
ceb69499 361 /* exit if skb doesn't support hw tstamp */
75e4364f 362 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
363 return;
364
365 if (priv->adv_ts)
366 desc = (priv->dma_etx + entry);
367 else
368 desc = (priv->dma_tx + entry);
369
370 /* check tx tstamp status */
371 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
372 return;
373
374 /* get the valid tstamp */
375 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
376
377 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
378 shhwtstamp.hwtstamp = ns_to_ktime(ns);
379 /* pass tstamp to stack */
380 skb_tstamp_tx(skb, &shhwtstamp);
381
382 return;
383}
384
732fdf0e 385/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 386 * @priv: driver private structure
891434b1
RK
387 * @entry : descriptor index to be used.
388 * @skb : the socket buffer
389 * Description :
390 * This function will read received packet's timestamp from the descriptor
391 * and pass it to stack. It also perform some sanity checks.
392 */
393static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
ceb69499 394 unsigned int entry, struct sk_buff *skb)
891434b1
RK
395{
396 struct skb_shared_hwtstamps *shhwtstamp = NULL;
397 u64 ns;
398 void *desc = NULL;
399
400 if (!priv->hwts_rx_en)
401 return;
402
403 if (priv->adv_ts)
404 desc = (priv->dma_erx + entry);
405 else
406 desc = (priv->dma_rx + entry);
407
ceb69499 408 /* exit if rx tstamp is not valid */
891434b1
RK
409 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
410 return;
411
412 /* get valid tstamp */
413 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
414 shhwtstamp = skb_hwtstamps(skb);
415 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
416 shhwtstamp->hwtstamp = ns_to_ktime(ns);
417}
418
419/**
420 * stmmac_hwtstamp_ioctl - control hardware timestamping.
421 * @dev: device pointer.
422 * @ifr: An IOCTL specefic structure, that can contain a pointer to
423 * a proprietary structure used to pass information to the driver.
424 * Description:
425 * This function configures the MAC to enable/disable both outgoing(TX)
426 * and incoming(RX) packets time stamping based on user input.
427 * Return Value:
428 * 0 on success and an appropriate -ve integer on failure.
429 */
430static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
431{
432 struct stmmac_priv *priv = netdev_priv(dev);
433 struct hwtstamp_config config;
0a624155 434 struct timespec64 now;
891434b1
RK
435 u64 temp = 0;
436 u32 ptp_v2 = 0;
437 u32 tstamp_all = 0;
438 u32 ptp_over_ipv4_udp = 0;
439 u32 ptp_over_ipv6_udp = 0;
440 u32 ptp_over_ethernet = 0;
441 u32 snap_type_sel = 0;
442 u32 ts_master_en = 0;
443 u32 ts_event_en = 0;
444 u32 value = 0;
19d857c9 445 u32 sec_inc;
891434b1
RK
446
447 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
448 netdev_alert(priv->dev, "No support for HW time stamping\n");
449 priv->hwts_tx_en = 0;
450 priv->hwts_rx_en = 0;
451
452 return -EOPNOTSUPP;
453 }
454
455 if (copy_from_user(&config, ifr->ifr_data,
ceb69499 456 sizeof(struct hwtstamp_config)))
891434b1
RK
457 return -EFAULT;
458
459 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
460 __func__, config.flags, config.tx_type, config.rx_filter);
461
462 /* reserved for future extensions */
463 if (config.flags)
464 return -EINVAL;
465
5f3da328
BH
466 if (config.tx_type != HWTSTAMP_TX_OFF &&
467 config.tx_type != HWTSTAMP_TX_ON)
891434b1 468 return -ERANGE;
891434b1
RK
469
470 if (priv->adv_ts) {
471 switch (config.rx_filter) {
891434b1 472 case HWTSTAMP_FILTER_NONE:
ceb69499 473 /* time stamp no incoming packet at all */
891434b1
RK
474 config.rx_filter = HWTSTAMP_FILTER_NONE;
475 break;
476
891434b1 477 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 478 /* PTP v1, UDP, any kind of event packet */
891434b1
RK
479 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
480 /* take time stamp for all event messages */
481 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
482
483 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
484 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
485 break;
486
891434b1 487 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 488 /* PTP v1, UDP, Sync packet */
891434b1
RK
489 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
490 /* take time stamp for SYNC messages only */
491 ts_event_en = PTP_TCR_TSEVNTENA;
492
493 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
494 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
495 break;
496
891434b1 497 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 498 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
499 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
500 /* take time stamp for Delay_Req messages only */
501 ts_master_en = PTP_TCR_TSMSTRENA;
502 ts_event_en = PTP_TCR_TSEVNTENA;
503
504 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
505 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
506 break;
507
891434b1 508 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 509 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
510 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
511 ptp_v2 = PTP_TCR_TSVER2ENA;
512 /* take time stamp for all event messages */
513 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
514
515 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
516 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
517 break;
518
891434b1 519 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 520 /* PTP v2, UDP, Sync packet */
891434b1
RK
521 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
522 ptp_v2 = PTP_TCR_TSVER2ENA;
523 /* take time stamp for SYNC messages only */
524 ts_event_en = PTP_TCR_TSEVNTENA;
525
526 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
527 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
528 break;
529
891434b1 530 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 531 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
532 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
533 ptp_v2 = PTP_TCR_TSVER2ENA;
534 /* take time stamp for Delay_Req messages only */
535 ts_master_en = PTP_TCR_TSMSTRENA;
536 ts_event_en = PTP_TCR_TSEVNTENA;
537
538 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
539 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
540 break;
541
891434b1 542 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 543 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
544 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
545 ptp_v2 = PTP_TCR_TSVER2ENA;
546 /* take time stamp for all event messages */
547 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
548
549 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
550 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
551 ptp_over_ethernet = PTP_TCR_TSIPENA;
552 break;
553
891434b1 554 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 555 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
556 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
557 ptp_v2 = PTP_TCR_TSVER2ENA;
558 /* take time stamp for SYNC messages only */
559 ts_event_en = PTP_TCR_TSEVNTENA;
560
561 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
562 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
563 ptp_over_ethernet = PTP_TCR_TSIPENA;
564 break;
565
891434b1 566 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 567 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
568 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
569 ptp_v2 = PTP_TCR_TSVER2ENA;
570 /* take time stamp for Delay_Req messages only */
571 ts_master_en = PTP_TCR_TSMSTRENA;
572 ts_event_en = PTP_TCR_TSEVNTENA;
573
574 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
575 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
576 ptp_over_ethernet = PTP_TCR_TSIPENA;
577 break;
578
891434b1 579 case HWTSTAMP_FILTER_ALL:
ceb69499 580 /* time stamp any incoming packet */
891434b1
RK
581 config.rx_filter = HWTSTAMP_FILTER_ALL;
582 tstamp_all = PTP_TCR_TSENALL;
583 break;
584
585 default:
586 return -ERANGE;
587 }
588 } else {
589 switch (config.rx_filter) {
590 case HWTSTAMP_FILTER_NONE:
591 config.rx_filter = HWTSTAMP_FILTER_NONE;
592 break;
593 default:
594 /* PTP v1, UDP, any kind of event packet */
595 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
596 break;
597 }
598 }
599 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 600 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1
RK
601
602 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
603 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
604 else {
605 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
ceb69499
GC
606 tstamp_all | ptp_v2 | ptp_over_ethernet |
607 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
608 ts_master_en | snap_type_sel);
891434b1
RK
609 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
610
611 /* program Sub Second Increment reg */
19d857c9
PR
612 sec_inc = priv->hw->ptp->config_sub_second_increment(
613 priv->ioaddr, priv->clk_ptp_rate);
614 temp = div_u64(1000000000ULL, sec_inc);
891434b1
RK
615
616 /* calculate default added value:
617 * formula is :
618 * addend = (2^32)/freq_div_ratio;
19d857c9 619 * where, freq_div_ratio = 1e9ns/sec_inc
891434b1 620 */
19d857c9 621 temp = (u64)(temp << 32);
5566401f 622 priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
891434b1
RK
623 priv->hw->ptp->config_addend(priv->ioaddr,
624 priv->default_addend);
625
626 /* initialize system time */
0a624155
AB
627 ktime_get_real_ts64(&now);
628
629 /* lower 32 bits of tv_sec are safe until y2106 */
630 priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
891434b1
RK
631 now.tv_nsec);
632 }
633
634 return copy_to_user(ifr->ifr_data, &config,
635 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
636}
637
32ceabca 638/**
732fdf0e 639 * stmmac_init_ptp - init PTP
32ceabca 640 * @priv: driver private structure
732fdf0e 641 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 642 * This is done by looking at the HW cap. register.
732fdf0e 643 * This function also registers the ptp driver.
32ceabca 644 */
92ba6888 645static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 646{
92ba6888
RK
647 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
648 return -EOPNOTSUPP;
649
5566401f
GC
650 /* Fall-back to main clock in case of no PTP ref is passed */
651 priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref");
652 if (IS_ERR(priv->clk_ptp_ref)) {
653 priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
654 priv->clk_ptp_ref = NULL;
655 } else {
656 clk_prepare_enable(priv->clk_ptp_ref);
657 priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
658 }
659
7cd01399
VB
660 priv->adv_ts = 0;
661 if (priv->dma_cap.atime_stamp && priv->extend_desc)
662 priv->adv_ts = 1;
663
664 if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
665 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
666
667 if (netif_msg_hw(priv) && priv->adv_ts)
668 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
891434b1
RK
669
670 priv->hw->ptp = &stmmac_ptp;
671 priv->hwts_tx_en = 0;
672 priv->hwts_rx_en = 0;
92ba6888
RK
673
674 return stmmac_ptp_register(priv);
675}
676
677static void stmmac_release_ptp(struct stmmac_priv *priv)
678{
5566401f
GC
679 if (priv->clk_ptp_ref)
680 clk_disable_unprepare(priv->clk_ptp_ref);
92ba6888 681 stmmac_ptp_unregister(priv);
891434b1
RK
682}
683
47dd7a54 684/**
732fdf0e 685 * stmmac_adjust_link - adjusts the link parameters
47dd7a54 686 * @dev: net device structure
732fdf0e
GC
687 * Description: this is the helper called by the physical abstraction layer
688 * drivers to communicate the phy link status. According the speed and duplex
689 * this driver can invoke registered glue-logic as well.
690 * It also invoke the eee initialization because it could happen when switch
691 * on different networks (that are eee capable).
47dd7a54
GC
692 */
693static void stmmac_adjust_link(struct net_device *dev)
694{
695 struct stmmac_priv *priv = netdev_priv(dev);
696 struct phy_device *phydev = priv->phydev;
47dd7a54
GC
697 unsigned long flags;
698 int new_state = 0;
699 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
700
701 if (phydev == NULL)
702 return;
703
47dd7a54 704 spin_lock_irqsave(&priv->lock, flags);
d765955d 705
47dd7a54 706 if (phydev->link) {
ad01b7d4 707 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
708
709 /* Now we make sure that we can be in full duplex mode.
710 * If not, we operate in half-duplex mode. */
711 if (phydev->duplex != priv->oldduplex) {
712 new_state = 1;
713 if (!(phydev->duplex))
db98a0b0 714 ctrl &= ~priv->hw->link.duplex;
47dd7a54 715 else
db98a0b0 716 ctrl |= priv->hw->link.duplex;
47dd7a54
GC
717 priv->oldduplex = phydev->duplex;
718 }
719 /* Flow Control operation */
720 if (phydev->pause)
7ed24bbe 721 priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
db98a0b0 722 fc, pause_time);
47dd7a54
GC
723
724 if (phydev->speed != priv->speed) {
725 new_state = 1;
726 switch (phydev->speed) {
727 case 1000:
9dfeb4d9 728 if (likely(priv->plat->has_gmac))
db98a0b0 729 ctrl &= ~priv->hw->link.port;
ceb69499 730 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
731 break;
732 case 100:
733 case 10:
9dfeb4d9 734 if (priv->plat->has_gmac) {
db98a0b0 735 ctrl |= priv->hw->link.port;
47dd7a54 736 if (phydev->speed == SPEED_100) {
db98a0b0 737 ctrl |= priv->hw->link.speed;
47dd7a54 738 } else {
db98a0b0 739 ctrl &= ~(priv->hw->link.speed);
47dd7a54
GC
740 }
741 } else {
db98a0b0 742 ctrl &= ~priv->hw->link.port;
47dd7a54 743 }
9dfeb4d9 744 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
745 break;
746 default:
747 if (netif_msg_link(priv))
ceb69499
GC
748 pr_warn("%s: Speed (%d) not 10/100\n",
749 dev->name, phydev->speed);
47dd7a54
GC
750 break;
751 }
752
753 priv->speed = phydev->speed;
754 }
755
ad01b7d4 756 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
757
758 if (!priv->oldlink) {
759 new_state = 1;
760 priv->oldlink = 1;
761 }
762 } else if (priv->oldlink) {
763 new_state = 1;
764 priv->oldlink = 0;
765 priv->speed = 0;
766 priv->oldduplex = -1;
767 }
768
769 if (new_state && netif_msg_link(priv))
770 phy_print_status(phydev);
771
4741cf9c
GC
772 spin_unlock_irqrestore(&priv->lock, flags);
773
f5351ef7
GC
774 /* At this stage, it could be needed to setup the EEE or adjust some
775 * MAC related HW registers.
776 */
777 priv->eee_enabled = stmmac_eee_init(priv);
47dd7a54
GC
778}
779
32ceabca 780/**
732fdf0e 781 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
782 * @priv: driver private structure
783 * Description: this is to verify if the HW supports the PCS.
784 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
785 * configured for the TBI, RTBI, or SGMII PHY interface.
786 */
e58bb43f
GC
787static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
788{
789 int interface = priv->plat->interface;
790
791 if (priv->dma_cap.pcs) {
0d909dcd
BA
792 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
793 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
794 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
795 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
e58bb43f
GC
796 pr_debug("STMMAC: PCS RGMII support enable\n");
797 priv->pcs = STMMAC_PCS_RGMII;
0d909dcd 798 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
e58bb43f
GC
799 pr_debug("STMMAC: PCS SGMII support enable\n");
800 priv->pcs = STMMAC_PCS_SGMII;
801 }
802 }
803}
804
47dd7a54
GC
805/**
806 * stmmac_init_phy - PHY initialization
807 * @dev: net device structure
808 * Description: it initializes the driver's PHY state, and attaches the PHY
809 * to the mac driver.
810 * Return value:
811 * 0 on success
812 */
813static int stmmac_init_phy(struct net_device *dev)
814{
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct phy_device *phydev;
d765955d 817 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
109cdd66 818 char bus_id[MII_BUS_ID_SIZE];
79ee1dc3 819 int interface = priv->plat->interface;
9cbadf09 820 int max_speed = priv->plat->max_speed;
47dd7a54
GC
821 priv->oldlink = 0;
822 priv->speed = 0;
823 priv->oldduplex = -1;
824
5790cf3c
MO
825 if (priv->plat->phy_node) {
826 phydev = of_phy_connect(dev, priv->plat->phy_node,
827 &stmmac_adjust_link, 0, interface);
828 } else {
a7657f12
GC
829 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
830 priv->plat->bus_id);
5790cf3c
MO
831
832 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
833 priv->plat->phy_addr);
834 pr_debug("stmmac_init_phy: trying to attach to %s\n",
835 phy_id_fmt);
836
837 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
838 interface);
839 }
47dd7a54 840
dfc50fca 841 if (IS_ERR_OR_NULL(phydev)) {
47dd7a54 842 pr_err("%s: Could not attach to PHY\n", dev->name);
dfc50fca
AB
843 if (!phydev)
844 return -ENODEV;
845
47dd7a54
GC
846 return PTR_ERR(phydev);
847 }
848
79ee1dc3 849 /* Stop Advertising 1000BASE Capability if interface is not GMII */
c5b9b4e4 850 if ((interface == PHY_INTERFACE_MODE_MII) ||
9cbadf09 851 (interface == PHY_INTERFACE_MODE_RMII) ||
a77e4acc 852 (max_speed < 1000 && max_speed > 0))
c5b9b4e4
SK
853 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
854 SUPPORTED_1000baseT_Full);
79ee1dc3 855
47dd7a54
GC
856 /*
857 * Broken HW is sometimes missing the pull-up resistor on the
858 * MDIO line, which results in reads to non-existent devices returning
859 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
860 * device as well.
861 * Note: phydev->phy_id is the result of reading the UID PHY registers.
862 */
27732381 863 if (!priv->plat->phy_node && phydev->phy_id == 0) {
47dd7a54
GC
864 phy_disconnect(phydev);
865 return -ENODEV;
866 }
8e99fc5f
GC
867
868 /* If attached to a switch, there is no reason to poll phy handler */
a7657f12
GC
869 if (phydev->is_pseudo_fixed_link)
870 phydev->irq = PHY_IGNORE_INTERRUPT;
8e99fc5f 871
47dd7a54 872 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
36bcfe7d 873 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
47dd7a54
GC
874
875 priv->phydev = phydev;
876
877 return 0;
878}
879
47dd7a54 880/**
732fdf0e 881 * stmmac_display_ring - display ring
32ceabca 882 * @head: pointer to the head of the ring passed.
47dd7a54 883 * @size: size of the ring.
32ceabca 884 * @extend_desc: to verify if extended descriptors are used.
c24602ef 885 * Description: display the control/status and buffer descriptors.
47dd7a54 886 */
c24602ef 887static void stmmac_display_ring(void *head, int size, int extend_desc)
47dd7a54 888{
47dd7a54 889 int i;
ceb69499
GC
890 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
891 struct dma_desc *p = (struct dma_desc *)head;
c24602ef 892
47dd7a54 893 for (i = 0; i < size; i++) {
c24602ef
GC
894 u64 x;
895 if (extend_desc) {
896 x = *(u64 *) ep;
897 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499
GC
898 i, (unsigned int)virt_to_phys(ep),
899 (unsigned int)x, (unsigned int)(x >> 32),
c24602ef
GC
900 ep->basic.des2, ep->basic.des3);
901 ep++;
902 } else {
903 x = *(u64 *) p;
904 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
ceb69499
GC
905 i, (unsigned int)virt_to_phys(p),
906 (unsigned int)x, (unsigned int)(x >> 32),
c24602ef
GC
907 p->des2, p->des3);
908 p++;
909 }
47dd7a54
GC
910 pr_info("\n");
911 }
912}
913
c24602ef
GC
914static void stmmac_display_rings(struct stmmac_priv *priv)
915{
c24602ef
GC
916 if (priv->extend_desc) {
917 pr_info("Extended RX descriptor ring:\n");
e3ad57c9 918 stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
c24602ef 919 pr_info("Extended TX descriptor ring:\n");
e3ad57c9 920 stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
c24602ef
GC
921 } else {
922 pr_info("RX descriptor ring:\n");
e3ad57c9 923 stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
c24602ef 924 pr_info("TX descriptor ring:\n");
e3ad57c9 925 stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
c24602ef
GC
926 }
927}
928
286a8372
GC
929static int stmmac_set_bfsize(int mtu, int bufsize)
930{
931 int ret = bufsize;
932
933 if (mtu >= BUF_SIZE_4KiB)
934 ret = BUF_SIZE_8KiB;
935 else if (mtu >= BUF_SIZE_2KiB)
936 ret = BUF_SIZE_4KiB;
d916701c 937 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
938 ret = BUF_SIZE_2KiB;
939 else
d916701c 940 ret = DEFAULT_BUFSIZE;
286a8372
GC
941
942 return ret;
943}
944
32ceabca 945/**
732fdf0e 946 * stmmac_clear_descriptors - clear descriptors
32ceabca
GC
947 * @priv: driver private structure
948 * Description: this function is called to clear the tx and rx descriptors
949 * in case of both basic and extended descriptors are used.
950 */
c24602ef
GC
951static void stmmac_clear_descriptors(struct stmmac_priv *priv)
952{
953 int i;
c24602ef
GC
954
955 /* Clear the Rx/Tx descriptors */
e3ad57c9 956 for (i = 0; i < DMA_RX_SIZE; i++)
c24602ef
GC
957 if (priv->extend_desc)
958 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
959 priv->use_riwt, priv->mode,
e3ad57c9 960 (i == DMA_RX_SIZE - 1));
c24602ef
GC
961 else
962 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
963 priv->use_riwt, priv->mode,
e3ad57c9
GC
964 (i == DMA_RX_SIZE - 1));
965 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef
GC
966 if (priv->extend_desc)
967 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
968 priv->mode,
e3ad57c9 969 (i == DMA_TX_SIZE - 1));
c24602ef
GC
970 else
971 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
972 priv->mode,
e3ad57c9 973 (i == DMA_TX_SIZE - 1));
c24602ef
GC
974}
975
732fdf0e
GC
976/**
977 * stmmac_init_rx_buffers - init the RX descriptor buffer.
978 * @priv: driver private structure
979 * @p: descriptor pointer
980 * @i: descriptor index
981 * @flags: gfp flag.
982 * Description: this function is called to allocate a receive buffer, perform
983 * the DMA mapping and init the descriptor.
984 */
c24602ef 985static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
777da230 986 int i, gfp_t flags)
c24602ef
GC
987{
988 struct sk_buff *skb;
989
4ec49a37 990 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
56329137 991 if (!skb) {
c24602ef 992 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
56329137 993 return -ENOMEM;
c24602ef 994 }
c24602ef
GC
995 priv->rx_skbuff[i] = skb;
996 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
997 priv->dma_buf_sz,
998 DMA_FROM_DEVICE);
56329137
BZ
999 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
1000 pr_err("%s: DMA mapping error\n", __func__);
1001 dev_kfree_skb_any(skb);
1002 return -EINVAL;
1003 }
c24602ef
GC
1004
1005 p->des2 = priv->rx_skbuff_dma[i];
1006
29896a67 1007 if ((priv->hw->mode->init_desc3) &&
c24602ef 1008 (priv->dma_buf_sz == BUF_SIZE_16KiB))
29896a67 1009 priv->hw->mode->init_desc3(p);
c24602ef
GC
1010
1011 return 0;
1012}
1013
56329137
BZ
1014static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1015{
1016 if (priv->rx_skbuff[i]) {
1017 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1018 priv->dma_buf_sz, DMA_FROM_DEVICE);
1019 dev_kfree_skb_any(priv->rx_skbuff[i]);
1020 }
1021 priv->rx_skbuff[i] = NULL;
1022}
1023
47dd7a54
GC
1024/**
1025 * init_dma_desc_rings - init the RX/TX descriptor rings
1026 * @dev: net device structure
732fdf0e
GC
1027 * @flags: gfp flag.
1028 * Description: this function initializes the DMA RX/TX descriptors
286a8372
GC
1029 * and allocates the socket buffers. It suppors the chained and ring
1030 * modes.
47dd7a54 1031 */
777da230 1032static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
47dd7a54
GC
1033{
1034 int i;
1035 struct stmmac_priv *priv = netdev_priv(dev);
4a7d666a 1036 unsigned int bfsize = 0;
56329137 1037 int ret = -ENOMEM;
47dd7a54 1038
29896a67
GC
1039 if (priv->hw->mode->set_16kib_bfsize)
1040 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
286a8372 1041
4a7d666a 1042 if (bfsize < BUF_SIZE_16KiB)
286a8372 1043 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
47dd7a54 1044
2618abb7
VB
1045 priv->dma_buf_sz = bfsize;
1046
83d7af64 1047 if (netif_msg_probe(priv)) {
c24602ef
GC
1048 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1049 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
47dd7a54 1050
83d7af64
GC
1051 /* RX INITIALIZATION */
1052 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1053 }
e3ad57c9 1054 for (i = 0; i < DMA_RX_SIZE; i++) {
c24602ef
GC
1055 struct dma_desc *p;
1056 if (priv->extend_desc)
1057 p = &((priv->dma_erx + i)->basic);
1058 else
1059 p = priv->dma_rx + i;
47dd7a54 1060
777da230 1061 ret = stmmac_init_rx_buffers(priv, p, i, flags);
56329137
BZ
1062 if (ret)
1063 goto err_init_rx_buffers;
286a8372 1064
83d7af64
GC
1065 if (netif_msg_probe(priv))
1066 pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1067 priv->rx_skbuff[i]->data,
1068 (unsigned int)priv->rx_skbuff_dma[i]);
47dd7a54
GC
1069 }
1070 priv->cur_rx = 0;
e3ad57c9 1071 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
47dd7a54
GC
1072 buf_sz = bfsize;
1073
c24602ef
GC
1074 /* Setup the chained descriptor addresses */
1075 if (priv->mode == STMMAC_CHAIN_MODE) {
1076 if (priv->extend_desc) {
29896a67 1077 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
e3ad57c9 1078 DMA_RX_SIZE, 1);
29896a67 1079 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
e3ad57c9 1080 DMA_TX_SIZE, 1);
c24602ef 1081 } else {
29896a67 1082 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
e3ad57c9 1083 DMA_RX_SIZE, 0);
29896a67 1084 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
e3ad57c9 1085 DMA_TX_SIZE, 0);
c24602ef
GC
1086 }
1087 }
1088
47dd7a54 1089 /* TX INITIALIZATION */
e3ad57c9 1090 for (i = 0; i < DMA_TX_SIZE; i++) {
c24602ef
GC
1091 struct dma_desc *p;
1092 if (priv->extend_desc)
1093 p = &((priv->dma_etx + i)->basic);
1094 else
1095 p = priv->dma_tx + i;
1096 p->des2 = 0;
362b37be
GC
1097 priv->tx_skbuff_dma[i].buf = 0;
1098 priv->tx_skbuff_dma[i].map_as_page = false;
553e2ab3 1099 priv->tx_skbuff_dma[i].len = 0;
2a6d8e17 1100 priv->tx_skbuff_dma[i].last_segment = false;
47dd7a54 1101 priv->tx_skbuff[i] = NULL;
47dd7a54 1102 }
286a8372 1103
47dd7a54
GC
1104 priv->dirty_tx = 0;
1105 priv->cur_tx = 0;
38979574 1106 netdev_reset_queue(priv->dev);
47dd7a54 1107
c24602ef 1108 stmmac_clear_descriptors(priv);
47dd7a54 1109
c24602ef
GC
1110 if (netif_msg_hw(priv))
1111 stmmac_display_rings(priv);
56329137
BZ
1112
1113 return 0;
1114err_init_rx_buffers:
1115 while (--i >= 0)
1116 stmmac_free_rx_buffers(priv, i);
56329137 1117 return ret;
47dd7a54
GC
1118}
1119
1120static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1121{
1122 int i;
1123
e3ad57c9 1124 for (i = 0; i < DMA_RX_SIZE; i++)
56329137 1125 stmmac_free_rx_buffers(priv, i);
47dd7a54
GC
1126}
1127
1128static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1129{
1130 int i;
1131
e3ad57c9 1132 for (i = 0; i < DMA_TX_SIZE; i++) {
75e4364f 1133 struct dma_desc *p;
1134
1135 if (priv->extend_desc)
1136 p = &((priv->dma_etx + i)->basic);
1137 else
1138 p = priv->dma_tx + i;
1139
362b37be
GC
1140 if (priv->tx_skbuff_dma[i].buf) {
1141 if (priv->tx_skbuff_dma[i].map_as_page)
1142 dma_unmap_page(priv->device,
1143 priv->tx_skbuff_dma[i].buf,
553e2ab3 1144 priv->tx_skbuff_dma[i].len,
362b37be
GC
1145 DMA_TO_DEVICE);
1146 else
1147 dma_unmap_single(priv->device,
1148 priv->tx_skbuff_dma[i].buf,
553e2ab3 1149 priv->tx_skbuff_dma[i].len,
362b37be 1150 DMA_TO_DEVICE);
75e4364f 1151 }
c24602ef 1152
75e4364f 1153 if (priv->tx_skbuff[i] != NULL) {
47dd7a54
GC
1154 dev_kfree_skb_any(priv->tx_skbuff[i]);
1155 priv->tx_skbuff[i] = NULL;
362b37be
GC
1156 priv->tx_skbuff_dma[i].buf = 0;
1157 priv->tx_skbuff_dma[i].map_as_page = false;
47dd7a54
GC
1158 }
1159 }
47dd7a54
GC
1160}
1161
732fdf0e
GC
1162/**
1163 * alloc_dma_desc_resources - alloc TX/RX resources.
1164 * @priv: private structure
1165 * Description: according to which descriptor can be used (extend or basic)
1166 * this function allocates the resources for TX and RX paths. In case of
1167 * reception, for example, it pre-allocated the RX socket buffer in order to
1168 * allow zero-copy mechanism.
1169 */
09f8d696
SK
1170static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1171{
09f8d696
SK
1172 int ret = -ENOMEM;
1173
e3ad57c9 1174 priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
09f8d696
SK
1175 GFP_KERNEL);
1176 if (!priv->rx_skbuff_dma)
1177 return -ENOMEM;
1178
e3ad57c9 1179 priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
09f8d696
SK
1180 GFP_KERNEL);
1181 if (!priv->rx_skbuff)
1182 goto err_rx_skbuff;
1183
e3ad57c9 1184 priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
362b37be 1185 sizeof(*priv->tx_skbuff_dma),
09f8d696
SK
1186 GFP_KERNEL);
1187 if (!priv->tx_skbuff_dma)
1188 goto err_tx_skbuff_dma;
1189
e3ad57c9 1190 priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
09f8d696
SK
1191 GFP_KERNEL);
1192 if (!priv->tx_skbuff)
1193 goto err_tx_skbuff;
1194
1195 if (priv->extend_desc) {
e3ad57c9 1196 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1197 sizeof(struct
1198 dma_extended_desc),
1199 &priv->dma_rx_phy,
1200 GFP_KERNEL);
09f8d696
SK
1201 if (!priv->dma_erx)
1202 goto err_dma;
1203
e3ad57c9 1204 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
f1590670
AB
1205 sizeof(struct
1206 dma_extended_desc),
1207 &priv->dma_tx_phy,
1208 GFP_KERNEL);
09f8d696 1209 if (!priv->dma_etx) {
e3ad57c9 1210 dma_free_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1211 sizeof(struct dma_extended_desc),
1212 priv->dma_erx, priv->dma_rx_phy);
09f8d696
SK
1213 goto err_dma;
1214 }
1215 } else {
e3ad57c9 1216 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1217 sizeof(struct dma_desc),
1218 &priv->dma_rx_phy,
1219 GFP_KERNEL);
09f8d696
SK
1220 if (!priv->dma_rx)
1221 goto err_dma;
1222
e3ad57c9 1223 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
f1590670
AB
1224 sizeof(struct dma_desc),
1225 &priv->dma_tx_phy,
1226 GFP_KERNEL);
09f8d696 1227 if (!priv->dma_tx) {
e3ad57c9 1228 dma_free_coherent(priv->device, DMA_RX_SIZE *
f1590670
AB
1229 sizeof(struct dma_desc),
1230 priv->dma_rx, priv->dma_rx_phy);
09f8d696
SK
1231 goto err_dma;
1232 }
1233 }
1234
1235 return 0;
1236
1237err_dma:
1238 kfree(priv->tx_skbuff);
1239err_tx_skbuff:
1240 kfree(priv->tx_skbuff_dma);
1241err_tx_skbuff_dma:
1242 kfree(priv->rx_skbuff);
1243err_rx_skbuff:
1244 kfree(priv->rx_skbuff_dma);
1245 return ret;
1246}
1247
47dd7a54
GC
1248static void free_dma_desc_resources(struct stmmac_priv *priv)
1249{
1250 /* Release the DMA TX/RX socket buffers */
1251 dma_free_rx_skbufs(priv);
1252 dma_free_tx_skbufs(priv);
1253
ceb69499 1254 /* Free DMA regions of consistent memory previously allocated */
c24602ef
GC
1255 if (!priv->extend_desc) {
1256 dma_free_coherent(priv->device,
e3ad57c9 1257 DMA_TX_SIZE * sizeof(struct dma_desc),
c24602ef
GC
1258 priv->dma_tx, priv->dma_tx_phy);
1259 dma_free_coherent(priv->device,
e3ad57c9 1260 DMA_RX_SIZE * sizeof(struct dma_desc),
c24602ef
GC
1261 priv->dma_rx, priv->dma_rx_phy);
1262 } else {
e3ad57c9 1263 dma_free_coherent(priv->device, DMA_TX_SIZE *
c24602ef
GC
1264 sizeof(struct dma_extended_desc),
1265 priv->dma_etx, priv->dma_tx_phy);
e3ad57c9 1266 dma_free_coherent(priv->device, DMA_RX_SIZE *
c24602ef
GC
1267 sizeof(struct dma_extended_desc),
1268 priv->dma_erx, priv->dma_rx_phy);
1269 }
47dd7a54
GC
1270 kfree(priv->rx_skbuff_dma);
1271 kfree(priv->rx_skbuff);
cf32deec 1272 kfree(priv->tx_skbuff_dma);
47dd7a54 1273 kfree(priv->tx_skbuff);
47dd7a54
GC
1274}
1275
47dd7a54
GC
1276/**
1277 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 1278 * @priv: driver private structure
732fdf0e
GC
1279 * Description: it is used for configuring the DMA operation mode register in
1280 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
1281 */
1282static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1283{
f88203a2
VB
1284 int rxfifosz = priv->plat->rx_fifo_size;
1285
e2a240c7 1286 if (priv->plat->force_thresh_dma_mode)
f88203a2 1287 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
e2a240c7 1288 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
1289 /*
1290 * In case of GMAC, SF mode can be enabled
1291 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1292 * 1) TX COE if actually supported
1293 * 2) There is no bugged Jumbo frame support
1294 * that needs to not insert csum in the TDES.
1295 */
f88203a2
VB
1296 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1297 rxfifosz);
b2dec116 1298 priv->xstats.threshold = SF_DMA_MODE;
ebbb293f 1299 } else
f88203a2
VB
1300 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1301 rxfifosz);
47dd7a54
GC
1302}
1303
47dd7a54 1304/**
732fdf0e 1305 * stmmac_tx_clean - to manage the transmission completion
32ceabca 1306 * @priv: driver private structure
732fdf0e 1307 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 1308 */
9125cdd1 1309static void stmmac_tx_clean(struct stmmac_priv *priv)
47dd7a54 1310{
38979574 1311 unsigned int bytes_compl = 0, pkts_compl = 0;
e3ad57c9 1312 unsigned int entry = priv->dirty_tx;
47dd7a54 1313
a9097a96
GC
1314 spin_lock(&priv->tx_lock);
1315
9125cdd1
GC
1316 priv->xstats.tx_clean++;
1317
e3ad57c9 1318 while (entry != priv->cur_tx) {
47dd7a54 1319 struct sk_buff *skb = priv->tx_skbuff[entry];
c24602ef 1320 struct dma_desc *p;
c363b658 1321 int status;
c24602ef
GC
1322
1323 if (priv->extend_desc)
ceb69499 1324 p = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
1325 else
1326 p = priv->dma_tx + entry;
47dd7a54 1327
c363b658 1328 status = priv->hw->desc->tx_status(&priv->dev->stats,
ceb69499
GC
1329 &priv->xstats, p,
1330 priv->ioaddr);
c363b658
FG
1331 /* Check if the descriptor is owned by the DMA */
1332 if (unlikely(status & tx_dma_own))
1333 break;
1334
1335 /* Just consider the last segment and ...*/
1336 if (likely(!(status & tx_not_ls))) {
1337 /* ... verify the status error condition */
1338 if (unlikely(status & tx_err)) {
1339 priv->dev->stats.tx_errors++;
1340 } else {
47dd7a54
GC
1341 priv->dev->stats.tx_packets++;
1342 priv->xstats.tx_pkt_n++;
c363b658 1343 }
891434b1 1344 stmmac_get_tx_hwtstamp(priv, entry, skb);
47dd7a54 1345 }
47dd7a54 1346
362b37be
GC
1347 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1348 if (priv->tx_skbuff_dma[entry].map_as_page)
1349 dma_unmap_page(priv->device,
1350 priv->tx_skbuff_dma[entry].buf,
553e2ab3 1351 priv->tx_skbuff_dma[entry].len,
362b37be
GC
1352 DMA_TO_DEVICE);
1353 else
1354 dma_unmap_single(priv->device,
1355 priv->tx_skbuff_dma[entry].buf,
553e2ab3 1356 priv->tx_skbuff_dma[entry].len,
362b37be
GC
1357 DMA_TO_DEVICE);
1358 priv->tx_skbuff_dma[entry].buf = 0;
1359 priv->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 1360 }
29896a67 1361 priv->hw->mode->clean_desc3(priv, p);
2a6d8e17 1362 priv->tx_skbuff_dma[entry].last_segment = false;
96951366 1363 priv->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54
GC
1364
1365 if (likely(skb != NULL)) {
38979574
BG
1366 pkts_compl++;
1367 bytes_compl += skb->len;
7c565c33 1368 dev_consume_skb_any(skb);
47dd7a54
GC
1369 priv->tx_skbuff[entry] = NULL;
1370 }
1371
4a7d666a 1372 priv->hw->desc->release_tx_desc(p, priv->mode);
47dd7a54 1373
e3ad57c9 1374 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
47dd7a54 1375 }
fbc80823 1376 priv->dirty_tx = entry;
38979574
BG
1377
1378 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1379
47dd7a54 1380 if (unlikely(netif_queue_stopped(priv->dev) &&
e3ad57c9 1381 stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
47dd7a54
GC
1382 netif_tx_lock(priv->dev);
1383 if (netif_queue_stopped(priv->dev) &&
e3ad57c9 1384 stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
83d7af64
GC
1385 if (netif_msg_tx_done(priv))
1386 pr_debug("%s: restart transmit\n", __func__);
47dd7a54
GC
1387 netif_wake_queue(priv->dev);
1388 }
1389 netif_tx_unlock(priv->dev);
1390 }
d765955d
GC
1391
1392 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1393 stmmac_enable_eee_mode(priv);
f5351ef7 1394 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d 1395 }
a9097a96 1396 spin_unlock(&priv->tx_lock);
47dd7a54
GC
1397}
1398
9125cdd1 1399static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1400{
7284a3f1 1401 priv->hw->dma->enable_dma_irq(priv->ioaddr);
47dd7a54
GC
1402}
1403
9125cdd1 1404static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1405{
7284a3f1 1406 priv->hw->dma->disable_dma_irq(priv->ioaddr);
47dd7a54
GC
1407}
1408
47dd7a54 1409/**
732fdf0e 1410 * stmmac_tx_err - to manage the tx error
32ceabca 1411 * @priv: driver private structure
47dd7a54 1412 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 1413 * in case of transmission errors.
47dd7a54
GC
1414 */
1415static void stmmac_tx_err(struct stmmac_priv *priv)
1416{
c24602ef 1417 int i;
47dd7a54
GC
1418 netif_stop_queue(priv->dev);
1419
ad01b7d4 1420 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 1421 dma_free_tx_skbufs(priv);
e3ad57c9 1422 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef
GC
1423 if (priv->extend_desc)
1424 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1425 priv->mode,
e3ad57c9 1426 (i == DMA_TX_SIZE - 1));
c24602ef
GC
1427 else
1428 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1429 priv->mode,
e3ad57c9 1430 (i == DMA_TX_SIZE - 1));
47dd7a54
GC
1431 priv->dirty_tx = 0;
1432 priv->cur_tx = 0;
38979574 1433 netdev_reset_queue(priv->dev);
ad01b7d4 1434 priv->hw->dma->start_tx(priv->ioaddr);
47dd7a54
GC
1435
1436 priv->dev->stats.tx_errors++;
1437 netif_wake_queue(priv->dev);
47dd7a54
GC
1438}
1439
32ceabca 1440/**
732fdf0e 1441 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
1442 * @priv: driver private structure
1443 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
1444 * It calls the dwmac dma routine and schedule poll method in case of some
1445 * work can be done.
32ceabca 1446 */
aec7ff27
GC
1447static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1448{
aec7ff27 1449 int status;
f88203a2 1450 int rxfifosz = priv->plat->rx_fifo_size;
aec7ff27 1451
ad01b7d4 1452 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
9125cdd1
GC
1453 if (likely((status & handle_rx)) || (status & handle_tx)) {
1454 if (likely(napi_schedule_prep(&priv->napi))) {
1455 stmmac_disable_dma_irq(priv);
1456 __napi_schedule(&priv->napi);
1457 }
1458 }
1459 if (unlikely(status & tx_hard_error_bump_tc)) {
aec7ff27 1460 /* Try to bump up the dma threshold on this failure */
b2dec116
SZ
1461 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1462 (tc <= 256)) {
aec7ff27 1463 tc += 64;
c405abe2 1464 if (priv->plat->force_thresh_dma_mode)
f88203a2
VB
1465 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1466 rxfifosz);
c405abe2
SZ
1467 else
1468 priv->hw->dma->dma_mode(priv->ioaddr, tc,
f88203a2 1469 SF_DMA_MODE, rxfifosz);
aec7ff27 1470 priv->xstats.threshold = tc;
47dd7a54 1471 }
aec7ff27
GC
1472 } else if (unlikely(status == tx_hard_error))
1473 stmmac_tx_err(priv);
47dd7a54
GC
1474}
1475
32ceabca
GC
1476/**
1477 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1478 * @priv: driver private structure
1479 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1480 */
1c901a46
GC
1481static void stmmac_mmc_setup(struct stmmac_priv *priv)
1482{
1483 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
ceb69499 1484 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 1485
1c901a46 1486 dwmac_mmc_intr_all_mask(priv->ioaddr);
4f795b25
GC
1487
1488 if (priv->dma_cap.rmon) {
1489 dwmac_mmc_ctrl(priv->ioaddr, mode);
1490 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1491 } else
aae54cff 1492 pr_info(" No MAC Management Counters available\n");
1c901a46
GC
1493}
1494
732fdf0e
GC
1495/**
1496 * stmmac_get_synopsys_id - return the SYINID.
1497 * @priv: driver private structure
1498 * Description: this simple function is to decode and return the SYINID
1499 * starting from the HW core register.
1500 */
f0b9d786
GC
1501static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1502{
1503 u32 hwid = priv->hw->synopsys_uid;
1504
ceb69499 1505 /* Check Synopsys Id (not available on old chips) */
f0b9d786
GC
1506 if (likely(hwid)) {
1507 u32 uid = ((hwid & 0x0000ff00) >> 8);
1508 u32 synid = (hwid & 0x000000ff);
1509
cf3f047b 1510 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
f0b9d786
GC
1511 uid, synid);
1512
1513 return synid;
1514 }
1515 return 0;
1516}
e7434821 1517
19e30c14 1518/**
732fdf0e 1519 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
32ceabca
GC
1520 * @priv: driver private structure
1521 * Description: select the Enhanced/Alternate or Normal descriptors.
732fdf0e
GC
1522 * In case of Enhanced/Alternate, it checks if the extended descriptors are
1523 * supported by the HW capability register.
ff3dd78c 1524 */
19e30c14
GC
1525static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1526{
1527 if (priv->plat->enh_desc) {
1528 pr_info(" Enhanced/Alternate descriptors\n");
c24602ef
GC
1529
1530 /* GMAC older than 3.50 has no extended descriptors */
1531 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1532 pr_info("\tEnabled extended descriptors\n");
1533 priv->extend_desc = 1;
1534 } else
1535 pr_warn("Extended descriptors not supported\n");
1536
19e30c14
GC
1537 priv->hw->desc = &enh_desc_ops;
1538 } else {
1539 pr_info(" Normal descriptors\n");
1540 priv->hw->desc = &ndesc_ops;
1541 }
1542}
1543
1544/**
732fdf0e 1545 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 1546 * @priv: driver private structure
19e30c14
GC
1547 * Description:
1548 * new GMAC chip generations have a new register to indicate the
1549 * presence of the optional feature/functions.
1550 * This can be also used to override the value passed through the
1551 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
1552 */
1553static int stmmac_get_hw_features(struct stmmac_priv *priv)
1554{
f10a6a35 1555 u32 ret = 0;
3c20f72f 1556
5e6efe88 1557 if (priv->hw->dma->get_hw_feature) {
f10a6a35
AT
1558 priv->hw->dma->get_hw_feature(priv->ioaddr,
1559 &priv->dma_cap);
1560 ret = 1;
19e30c14 1561 }
e7434821 1562
f10a6a35 1563 return ret;
e7434821
GC
1564}
1565
32ceabca 1566/**
732fdf0e 1567 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
1568 * @priv: driver private structure
1569 * Description:
1570 * it is to verify if the MAC address is valid, in case of failures it
1571 * generates a random MAC address
1572 */
bfab27a1
GC
1573static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1574{
bfab27a1 1575 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
7ed24bbe 1576 priv->hw->mac->get_umac_addr(priv->hw,
bfab27a1 1577 priv->dev->dev_addr, 0);
ceb69499 1578 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 1579 eth_hw_addr_random(priv->dev);
c88460b7
HG
1580 pr_info("%s: device MAC address %pM\n", priv->dev->name,
1581 priv->dev->dev_addr);
bfab27a1 1582 }
bfab27a1
GC
1583}
1584
32ceabca 1585/**
732fdf0e 1586 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
1587 * @priv: driver private structure
1588 * Description:
1589 * It inits the DMA invoking the specific MAC/GMAC callback.
1590 * Some DMA parameters can be passed from the platform;
1591 * in case of these are not passed a default is kept for the MAC or GMAC.
1592 */
0f1f88a8
GC
1593static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1594{
afea0365 1595 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
b9cde0a8 1596 int mixed_burst = 0;
c24602ef 1597 int atds = 0;
495db273 1598 int ret = 0;
0f1f88a8 1599
0f1f88a8
GC
1600 if (priv->plat->dma_cfg) {
1601 pbl = priv->plat->dma_cfg->pbl;
1602 fixed_burst = priv->plat->dma_cfg->fixed_burst;
b9cde0a8 1603 mixed_burst = priv->plat->dma_cfg->mixed_burst;
afea0365 1604 aal = priv->plat->dma_cfg->aal;
0f1f88a8
GC
1605 }
1606
c24602ef
GC
1607 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1608 atds = 1;
1609
495db273
GC
1610 ret = priv->hw->dma->reset(priv->ioaddr);
1611 if (ret) {
1612 dev_err(priv->device, "Failed to reset the dma\n");
1613 return ret;
1614 }
1615
1616 priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
afea0365
GC
1617 aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
1618
1619 if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
1620 (priv->plat->axi && priv->hw->dma->axi))
1621 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1622
495db273 1623 return ret;
0f1f88a8
GC
1624}
1625
9125cdd1 1626/**
732fdf0e 1627 * stmmac_tx_timer - mitigation sw timer for tx.
9125cdd1
GC
1628 * @data: data pointer
1629 * Description:
1630 * This is the timer handler to directly invoke the stmmac_tx_clean.
1631 */
1632static void stmmac_tx_timer(unsigned long data)
1633{
1634 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1635
1636 stmmac_tx_clean(priv);
1637}
1638
1639/**
732fdf0e 1640 * stmmac_init_tx_coalesce - init tx mitigation options.
32ceabca 1641 * @priv: driver private structure
9125cdd1
GC
1642 * Description:
1643 * This inits the transmit coalesce parameters: i.e. timer rate,
1644 * timer handler and default threshold used for enabling the
1645 * interrupt on completion bit.
1646 */
1647static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1648{
1649 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1650 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1651 init_timer(&priv->txtimer);
1652 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1653 priv->txtimer.data = (unsigned long)priv;
1654 priv->txtimer.function = stmmac_tx_timer;
1655 add_timer(&priv->txtimer);
1656}
1657
523f11b5 1658/**
732fdf0e 1659 * stmmac_hw_setup - setup mac in a usable state.
523f11b5
SK
1660 * @dev : pointer to the device structure.
1661 * Description:
732fdf0e
GC
1662 * this is the main function to setup the HW in a usable state because the
1663 * dma engine is reset, the core registers are configured (e.g. AXI,
1664 * Checksum features, timers). The DMA is ready to start receiving and
1665 * transmitting.
523f11b5
SK
1666 * Return value:
1667 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1668 * file on failure.
1669 */
fe131929 1670static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
523f11b5
SK
1671{
1672 struct stmmac_priv *priv = netdev_priv(dev);
1673 int ret;
1674
523f11b5
SK
1675 /* DMA initialization and SW reset */
1676 ret = stmmac_init_dma_engine(priv);
1677 if (ret < 0) {
1678 pr_err("%s: DMA engine initialization failed\n", __func__);
1679 return ret;
1680 }
1681
1682 /* Copy the MAC addr into the HW */
7ed24bbe 1683 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
523f11b5
SK
1684
1685 /* If required, perform hw setup of the bus. */
1686 if (priv->plat->bus_setup)
1687 priv->plat->bus_setup(priv->ioaddr);
1688
1689 /* Initialize the MAC Core */
7ed24bbe 1690 priv->hw->mac->core_init(priv->hw, dev->mtu);
523f11b5 1691
978aded4
GC
1692 ret = priv->hw->mac->rx_ipc(priv->hw);
1693 if (!ret) {
1694 pr_warn(" RX IPC Checksum Offload disabled\n");
1695 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 1696 priv->hw->rx_csum = 0;
978aded4
GC
1697 }
1698
523f11b5
SK
1699 /* Enable the MAC Rx/Tx */
1700 stmmac_set_mac(priv->ioaddr, true);
1701
1702 /* Set the HW DMA mode and the COE */
1703 stmmac_dma_operation_mode(priv);
1704
1705 stmmac_mmc_setup(priv);
1706
fe131929
HC
1707 if (init_ptp) {
1708 ret = stmmac_init_ptp(priv);
1709 if (ret && ret != -EOPNOTSUPP)
1710 pr_warn("%s: failed PTP initialisation\n", __func__);
1711 }
523f11b5 1712
50fb4f74 1713#ifdef CONFIG_DEBUG_FS
523f11b5
SK
1714 ret = stmmac_init_fs(dev);
1715 if (ret < 0)
1716 pr_warn("%s: failed debugFS registration\n", __func__);
1717#endif
1718 /* Start the ball rolling... */
1719 pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1720 priv->hw->dma->start_tx(priv->ioaddr);
1721 priv->hw->dma->start_rx(priv->ioaddr);
1722
1723 /* Dump DMA/MAC registers */
1724 if (netif_msg_hw(priv)) {
7ed24bbe 1725 priv->hw->mac->dump_regs(priv->hw);
523f11b5
SK
1726 priv->hw->dma->dump_regs(priv->ioaddr);
1727 }
1728 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1729
523f11b5
SK
1730 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1731 priv->rx_riwt = MAX_DMA_RIWT;
1732 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1733 }
1734
1735 if (priv->pcs && priv->hw->mac->ctrl_ane)
7ed24bbe 1736 priv->hw->mac->ctrl_ane(priv->hw, 0);
523f11b5
SK
1737
1738 return 0;
1739}
1740
47dd7a54
GC
1741/**
1742 * stmmac_open - open entry point of the driver
1743 * @dev : pointer to the device structure.
1744 * Description:
1745 * This function is the open entry point of the driver.
1746 * Return value:
1747 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1748 * file on failure.
1749 */
1750static int stmmac_open(struct net_device *dev)
1751{
1752 struct stmmac_priv *priv = netdev_priv(dev);
47dd7a54
GC
1753 int ret;
1754
4bfcbd7a
FV
1755 stmmac_check_ether_addr(priv);
1756
4d8f0825
BA
1757 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1758 priv->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
1759 ret = stmmac_init_phy(dev);
1760 if (ret) {
1761 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1762 __func__, ret);
89df20d9 1763 return ret;
e58bb43f 1764 }
f66ffe28 1765 }
47dd7a54 1766
523f11b5
SK
1767 /* Extra statistics */
1768 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1769 priv->xstats.threshold = tc;
1770
47dd7a54 1771 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
22ad3838 1772 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 1773
7262b7b2 1774 ret = alloc_dma_desc_resources(priv);
09f8d696
SK
1775 if (ret < 0) {
1776 pr_err("%s: DMA descriptors allocation failed\n", __func__);
1777 goto dma_desc_error;
1778 }
1779
777da230
GC
1780 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1781 if (ret < 0) {
1782 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1783 goto init_error;
1784 }
1785
fe131929 1786 ret = stmmac_hw_setup(dev, true);
56329137 1787 if (ret < 0) {
523f11b5 1788 pr_err("%s: Hw setup failed\n", __func__);
c9324d18 1789 goto init_error;
47dd7a54
GC
1790 }
1791
777da230
GC
1792 stmmac_init_tx_coalesce(priv);
1793
523f11b5
SK
1794 if (priv->phydev)
1795 phy_start(priv->phydev);
47dd7a54 1796
f66ffe28
GC
1797 /* Request the IRQ lines */
1798 ret = request_irq(dev->irq, stmmac_interrupt,
ceb69499 1799 IRQF_SHARED, dev->name, dev);
f66ffe28
GC
1800 if (unlikely(ret < 0)) {
1801 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1802 __func__, dev->irq, ret);
c9324d18 1803 goto init_error;
f66ffe28
GC
1804 }
1805
7a13f8f5
FV
1806 /* Request the Wake IRQ in case of another line is used for WoL */
1807 if (priv->wol_irq != dev->irq) {
1808 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1809 IRQF_SHARED, dev->name, dev);
1810 if (unlikely(ret < 0)) {
ceb69499
GC
1811 pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1812 __func__, priv->wol_irq, ret);
c9324d18 1813 goto wolirq_error;
7a13f8f5
FV
1814 }
1815 }
1816
d765955d 1817 /* Request the IRQ lines */
d7ec8584 1818 if (priv->lpi_irq > 0) {
d765955d
GC
1819 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1820 dev->name, dev);
1821 if (unlikely(ret < 0)) {
1822 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1823 __func__, priv->lpi_irq, ret);
c9324d18 1824 goto lpiirq_error;
d765955d
GC
1825 }
1826 }
1827
47dd7a54 1828 napi_enable(&priv->napi);
47dd7a54 1829 netif_start_queue(dev);
f66ffe28 1830
47dd7a54 1831 return 0;
f66ffe28 1832
c9324d18 1833lpiirq_error:
d765955d
GC
1834 if (priv->wol_irq != dev->irq)
1835 free_irq(priv->wol_irq, dev);
c9324d18 1836wolirq_error:
7a13f8f5
FV
1837 free_irq(dev->irq, dev);
1838
c9324d18
GC
1839init_error:
1840 free_dma_desc_resources(priv);
56329137 1841dma_desc_error:
f66ffe28
GC
1842 if (priv->phydev)
1843 phy_disconnect(priv->phydev);
4bfcbd7a 1844
f66ffe28 1845 return ret;
47dd7a54
GC
1846}
1847
1848/**
1849 * stmmac_release - close entry point of the driver
1850 * @dev : device pointer.
1851 * Description:
1852 * This is the stop entry point of the driver.
1853 */
1854static int stmmac_release(struct net_device *dev)
1855{
1856 struct stmmac_priv *priv = netdev_priv(dev);
1857
d765955d
GC
1858 if (priv->eee_enabled)
1859 del_timer_sync(&priv->eee_ctrl_timer);
1860
47dd7a54
GC
1861 /* Stop and disconnect the PHY */
1862 if (priv->phydev) {
1863 phy_stop(priv->phydev);
1864 phy_disconnect(priv->phydev);
1865 priv->phydev = NULL;
1866 }
1867
1868 netif_stop_queue(dev);
1869
47dd7a54 1870 napi_disable(&priv->napi);
47dd7a54 1871
9125cdd1
GC
1872 del_timer_sync(&priv->txtimer);
1873
47dd7a54
GC
1874 /* Free the IRQ lines */
1875 free_irq(dev->irq, dev);
7a13f8f5
FV
1876 if (priv->wol_irq != dev->irq)
1877 free_irq(priv->wol_irq, dev);
d7ec8584 1878 if (priv->lpi_irq > 0)
d765955d 1879 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
1880
1881 /* Stop TX/RX DMA and clear the descriptors */
ad01b7d4
GC
1882 priv->hw->dma->stop_tx(priv->ioaddr);
1883 priv->hw->dma->stop_rx(priv->ioaddr);
47dd7a54
GC
1884
1885 /* Release and free the Rx/Tx resources */
1886 free_dma_desc_resources(priv);
1887
19449bfc 1888 /* Disable the MAC Rx/Tx */
bfab27a1 1889 stmmac_set_mac(priv->ioaddr, false);
47dd7a54
GC
1890
1891 netif_carrier_off(dev);
1892
50fb4f74 1893#ifdef CONFIG_DEBUG_FS
466c5ac8 1894 stmmac_exit_fs(dev);
bfab27a1 1895#endif
bfab27a1 1896
92ba6888
RK
1897 stmmac_release_ptp(priv);
1898
47dd7a54
GC
1899 return 0;
1900}
1901
47dd7a54 1902/**
732fdf0e 1903 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
1904 * @skb : the socket buffer
1905 * @dev : device pointer
32ceabca
GC
1906 * Description : this is the tx entry point of the driver.
1907 * It programs the chain or the ring and supports oversized frames
1908 * and SG feature.
47dd7a54
GC
1909 */
1910static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1911{
1912 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 1913 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 1914 int i, csum_insertion = 0, is_jumbo = 0;
47dd7a54 1915 int nfrags = skb_shinfo(skb)->nr_frags;
0e80bdc9 1916 unsigned int entry, first_entry;
47dd7a54 1917 struct dma_desc *desc, *first;
0e80bdc9 1918 unsigned int enh_desc;
47dd7a54 1919
16ee817e
FG
1920 spin_lock(&priv->tx_lock);
1921
47dd7a54 1922 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
16ee817e 1923 spin_unlock(&priv->tx_lock);
47dd7a54
GC
1924 if (!netif_queue_stopped(dev)) {
1925 netif_stop_queue(dev);
1926 /* This is a hard error, log it. */
ceb69499 1927 pr_err("%s: Tx Ring full when queue awake\n", __func__);
47dd7a54
GC
1928 }
1929 return NETDEV_TX_BUSY;
1930 }
1931
d765955d
GC
1932 if (priv->tx_path_in_lpi_mode)
1933 stmmac_disable_eee_mode(priv);
1934
e3ad57c9 1935 entry = priv->cur_tx;
0e80bdc9 1936 first_entry = entry;
47dd7a54 1937
5e982f3b 1938 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 1939
0e80bdc9 1940 if (likely(priv->extend_desc))
ceb69499 1941 desc = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
1942 else
1943 desc = priv->dma_tx + entry;
1944
47dd7a54
GC
1945 first = desc;
1946
0e80bdc9
GC
1947 priv->tx_skbuff[first_entry] = skb;
1948
1949 enh_desc = priv->plat->enh_desc;
4a7d666a 1950 /* To program the descriptors according to the size of the frame */
29896a67
GC
1951 if (enh_desc)
1952 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
1953
0e80bdc9 1954 if (unlikely(is_jumbo)) {
29896a67 1955 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
362b37be
GC
1956 if (unlikely(entry < 0))
1957 goto dma_map_err;
29896a67 1958 }
47dd7a54
GC
1959
1960 for (i = 0; i < nfrags; i++) {
9e903e08
ED
1961 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1962 int len = skb_frag_size(frag);
be434d50 1963 bool last_segment = (i == (nfrags - 1));
47dd7a54 1964
e3ad57c9
GC
1965 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1966
0e80bdc9 1967 if (likely(priv->extend_desc))
ceb69499 1968 desc = (struct dma_desc *)(priv->dma_etx + entry);
c24602ef
GC
1969 else
1970 desc = priv->dma_tx + entry;
47dd7a54 1971
f722380d
IC
1972 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1973 DMA_TO_DEVICE);
362b37be
GC
1974 if (dma_mapping_error(priv->device, desc->des2))
1975 goto dma_map_err; /* should reuse desc w/o issues */
1976
0e80bdc9 1977 priv->tx_skbuff[entry] = NULL;
362b37be
GC
1978 priv->tx_skbuff_dma[entry].buf = desc->des2;
1979 priv->tx_skbuff_dma[entry].map_as_page = true;
553e2ab3 1980 priv->tx_skbuff_dma[entry].len = len;
0e80bdc9
GC
1981 priv->tx_skbuff_dma[entry].last_segment = last_segment;
1982
1983 /* Prepare the descriptor and set the own bit too */
4a7d666a 1984 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
be434d50 1985 priv->mode, 1, last_segment);
47dd7a54
GC
1986 }
1987
e3ad57c9
GC
1988 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1989
1990 priv->cur_tx = entry;
47dd7a54 1991
47dd7a54 1992 if (netif_msg_pktdata(priv)) {
0e80bdc9
GC
1993 pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
1994 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
1995 entry, first, nfrags);
83d7af64 1996
c24602ef 1997 if (priv->extend_desc)
e3ad57c9
GC
1998 stmmac_display_ring((void *)priv->dma_etx,
1999 DMA_TX_SIZE, 1);
c24602ef 2000 else
e3ad57c9
GC
2001 stmmac_display_ring((void *)priv->dma_tx,
2002 DMA_TX_SIZE, 0);
c24602ef 2003
83d7af64 2004 pr_debug(">>> frame to be transmitted: ");
47dd7a54
GC
2005 print_pkt(skb->data, skb->len);
2006 }
0e80bdc9 2007
47dd7a54 2008 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
83d7af64
GC
2009 if (netif_msg_hw(priv))
2010 pr_debug("%s: stop transmitted packets\n", __func__);
47dd7a54
GC
2011 netif_stop_queue(dev);
2012 }
2013
2014 dev->stats.tx_bytes += skb->len;
2015
0e80bdc9
GC
2016 /* According to the coalesce parameter the IC bit for the latest
2017 * segment is reset and the timer re-started to clean the tx status.
2018 * This approach takes care about the fragments: desc is the first
2019 * element in case of no SG.
2020 */
2021 priv->tx_count_frames += nfrags + 1;
2022 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2023 mod_timer(&priv->txtimer,
2024 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2025 } else {
2026 priv->tx_count_frames = 0;
2027 priv->hw->desc->set_tx_ic(desc);
2028 priv->xstats.tx_set_ic_bit++;
891434b1
RK
2029 }
2030
2031 if (!priv->hwts_tx_en)
2032 skb_tx_timestamp(skb);
3e82ce12 2033
0e80bdc9
GC
2034 /* Ready to fill the first descriptor and set the OWN bit w/o any
2035 * problems because all the descriptors are actually ready to be
2036 * passed to the DMA engine.
2037 */
2038 if (likely(!is_jumbo)) {
2039 bool last_segment = (nfrags == 0);
2040
2041 first->des2 = dma_map_single(priv->device, skb->data,
2042 nopaged_len, DMA_TO_DEVICE);
2043 if (dma_mapping_error(priv->device, first->des2))
2044 goto dma_map_err;
2045
2046 priv->tx_skbuff_dma[first_entry].buf = first->des2;
2047 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2048 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2049
2050 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2051 priv->hwts_tx_en)) {
2052 /* declare that device is doing timestamping */
2053 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2054 priv->hw->desc->enable_tx_timestamp(first);
2055 }
2056
2057 /* Prepare the first descriptor setting the OWN bit too */
2058 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2059 csum_insertion, priv->mode, 1,
2060 last_segment);
2061
2062 /* The own bit must be the latest setting done when prepare the
2063 * descriptor and then barrier is needed to make sure that
2064 * all is coherent before granting the DMA engine.
2065 */
2066 smp_wmb();
2067 }
2068
38979574 2069 netdev_sent_queue(dev, skb->len);
52f64fae
RC
2070 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2071
a9097a96 2072 spin_unlock(&priv->tx_lock);
362b37be 2073 return NETDEV_TX_OK;
a9097a96 2074
362b37be 2075dma_map_err:
758a0ab5 2076 spin_unlock(&priv->tx_lock);
362b37be
GC
2077 dev_err(priv->device, "Tx dma map failed\n");
2078 dev_kfree_skb(skb);
2079 priv->dev->stats.tx_dropped++;
47dd7a54
GC
2080 return NETDEV_TX_OK;
2081}
2082
b9381985
VB
2083static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2084{
2085 struct ethhdr *ehdr;
2086 u16 vlanid;
2087
2088 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2089 NETIF_F_HW_VLAN_CTAG_RX &&
2090 !__vlan_get_tag(skb, &vlanid)) {
2091 /* pop the vlan tag */
2092 ehdr = (struct ethhdr *)skb->data;
2093 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2094 skb_pull(skb, VLAN_HLEN);
2095 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2096 }
2097}
2098
2099
120e87f9
GC
2100static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2101{
2102 if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2103 return 0;
2104
2105 return 1;
2106}
2107
32ceabca 2108/**
732fdf0e 2109 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca
GC
2110 * @priv: driver private structure
2111 * Description : this is to reallocate the skb for the reception process
2112 * that is based on zero-copy.
2113 */
47dd7a54
GC
2114static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2115{
47dd7a54 2116 int bfsize = priv->dma_buf_sz;
e3ad57c9
GC
2117 unsigned int entry = priv->dirty_rx;
2118 int dirty = stmmac_rx_dirty(priv);
47dd7a54 2119
e3ad57c9 2120 while (dirty-- > 0) {
c24602ef
GC
2121 struct dma_desc *p;
2122
2123 if (priv->extend_desc)
ceb69499 2124 p = (struct dma_desc *)(priv->dma_erx + entry);
c24602ef
GC
2125 else
2126 p = priv->dma_rx + entry;
2127
47dd7a54
GC
2128 if (likely(priv->rx_skbuff[entry] == NULL)) {
2129 struct sk_buff *skb;
2130
acb600de 2131 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
120e87f9
GC
2132 if (unlikely(!skb)) {
2133 /* so for a while no zero-copy! */
2134 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2135 if (unlikely(net_ratelimit()))
2136 dev_err(priv->device,
2137 "fail to alloc skb entry %d\n",
2138 entry);
47dd7a54 2139 break;
120e87f9 2140 }
47dd7a54
GC
2141
2142 priv->rx_skbuff[entry] = skb;
2143 priv->rx_skbuff_dma[entry] =
2144 dma_map_single(priv->device, skb->data, bfsize,
2145 DMA_FROM_DEVICE);
362b37be
GC
2146 if (dma_mapping_error(priv->device,
2147 priv->rx_skbuff_dma[entry])) {
2148 dev_err(priv->device, "Rx dma map failed\n");
2149 dev_kfree_skb(skb);
2150 break;
2151 }
c24602ef 2152 p->des2 = priv->rx_skbuff_dma[entry];
286a8372 2153
29896a67 2154 priv->hw->mode->refill_desc3(priv, p);
286a8372 2155
120e87f9
GC
2156 if (priv->rx_zeroc_thresh > 0)
2157 priv->rx_zeroc_thresh--;
2158
83d7af64
GC
2159 if (netif_msg_rx_status(priv))
2160 pr_debug("\trefill entry #%d\n", entry);
47dd7a54 2161 }
120e87f9 2162
eb0dc4bb 2163 wmb();
c24602ef 2164 priv->hw->desc->set_rx_owner(p);
8e839891 2165 wmb();
e3ad57c9
GC
2166
2167 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
47dd7a54 2168 }
e3ad57c9 2169 priv->dirty_rx = entry;
47dd7a54
GC
2170}
2171
32ceabca 2172/**
732fdf0e 2173 * stmmac_rx - manage the receive process
32ceabca
GC
2174 * @priv: driver private structure
2175 * @limit: napi bugget.
2176 * Description : this the function called by the napi poll method.
2177 * It gets all the frames inside the ring.
2178 */
47dd7a54
GC
2179static int stmmac_rx(struct stmmac_priv *priv, int limit)
2180{
e3ad57c9 2181 unsigned int entry = priv->cur_rx;
47dd7a54
GC
2182 unsigned int next_entry;
2183 unsigned int count = 0;
d2afb5bd 2184 int coe = priv->hw->rx_csum;
47dd7a54 2185
83d7af64
GC
2186 if (netif_msg_rx_status(priv)) {
2187 pr_debug("%s: descriptor ring:\n", __func__);
c24602ef 2188 if (priv->extend_desc)
e3ad57c9
GC
2189 stmmac_display_ring((void *)priv->dma_erx,
2190 DMA_RX_SIZE, 1);
c24602ef 2191 else
e3ad57c9
GC
2192 stmmac_display_ring((void *)priv->dma_rx,
2193 DMA_RX_SIZE, 0);
47dd7a54 2194 }
c24602ef 2195 while (count < limit) {
47dd7a54 2196 int status;
9401bb5c 2197 struct dma_desc *p;
47dd7a54 2198
c24602ef 2199 if (priv->extend_desc)
ceb69499 2200 p = (struct dma_desc *)(priv->dma_erx + entry);
c24602ef 2201 else
ceb69499 2202 p = priv->dma_rx + entry;
c24602ef 2203
c1fa3212
FG
2204 /* read the status of the incoming frame */
2205 status = priv->hw->desc->rx_status(&priv->dev->stats,
2206 &priv->xstats, p);
2207 /* check if managed by the DMA otherwise go ahead */
2208 if (unlikely(status & dma_own))
47dd7a54
GC
2209 break;
2210
2211 count++;
2212
e3ad57c9
GC
2213 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2214 next_entry = priv->cur_rx;
2215
c24602ef 2216 if (priv->extend_desc)
9401bb5c 2217 prefetch(priv->dma_erx + next_entry);
c24602ef 2218 else
9401bb5c 2219 prefetch(priv->dma_rx + next_entry);
47dd7a54 2220
c24602ef
GC
2221 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2222 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2223 &priv->xstats,
2224 priv->dma_erx +
2225 entry);
891434b1 2226 if (unlikely(status == discard_frame)) {
47dd7a54 2227 priv->dev->stats.rx_errors++;
891434b1
RK
2228 if (priv->hwts_rx_en && !priv->extend_desc) {
2229 /* DESC2 & DESC3 will be overwitten by device
2230 * with timestamp value, hence reinitialize
2231 * them in stmmac_rx_refill() function so that
2232 * device can reuse it.
2233 */
2234 priv->rx_skbuff[entry] = NULL;
2235 dma_unmap_single(priv->device,
ceb69499
GC
2236 priv->rx_skbuff_dma[entry],
2237 priv->dma_buf_sz,
2238 DMA_FROM_DEVICE);
891434b1
RK
2239 }
2240 } else {
47dd7a54 2241 struct sk_buff *skb;
3eeb2997 2242 int frame_len;
47dd7a54 2243
ceb69499
GC
2244 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2245
e527c4a7
GC
2246 /* check if frame_len fits the preallocated memory */
2247 if (frame_len > priv->dma_buf_sz) {
2248 priv->dev->stats.rx_length_errors++;
2249 break;
2250 }
2251
3eeb2997 2252 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
ceb69499
GC
2253 * Type frames (LLC/LLC-SNAP)
2254 */
3eeb2997
GC
2255 if (unlikely(status != llc_snap))
2256 frame_len -= ETH_FCS_LEN;
47dd7a54 2257
83d7af64 2258 if (netif_msg_rx_status(priv)) {
47dd7a54 2259 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
ceb69499 2260 p, entry, p->des2);
83d7af64
GC
2261 if (frame_len > ETH_FRAME_LEN)
2262 pr_debug("\tframe size %d, COE: %d\n",
2263 frame_len, status);
2264 }
22ad3838 2265
120e87f9
GC
2266 if (unlikely((frame_len < priv->rx_copybreak) ||
2267 stmmac_rx_threshold_count(priv))) {
22ad3838
GC
2268 skb = netdev_alloc_skb_ip_align(priv->dev,
2269 frame_len);
2270 if (unlikely(!skb)) {
2271 if (net_ratelimit())
2272 dev_warn(priv->device,
2273 "packet dropped\n");
2274 priv->dev->stats.rx_dropped++;
2275 break;
2276 }
2277
2278 dma_sync_single_for_cpu(priv->device,
2279 priv->rx_skbuff_dma
2280 [entry], frame_len,
2281 DMA_FROM_DEVICE);
2282 skb_copy_to_linear_data(skb,
2283 priv->
2284 rx_skbuff[entry]->data,
2285 frame_len);
2286
2287 skb_put(skb, frame_len);
2288 dma_sync_single_for_device(priv->device,
2289 priv->rx_skbuff_dma
2290 [entry], frame_len,
2291 DMA_FROM_DEVICE);
2292 } else {
2293 skb = priv->rx_skbuff[entry];
2294 if (unlikely(!skb)) {
2295 pr_err("%s: Inconsistent Rx chain\n",
2296 priv->dev->name);
2297 priv->dev->stats.rx_dropped++;
2298 break;
2299 }
2300 prefetch(skb->data - NET_IP_ALIGN);
2301 priv->rx_skbuff[entry] = NULL;
120e87f9 2302 priv->rx_zeroc_thresh++;
22ad3838
GC
2303
2304 skb_put(skb, frame_len);
2305 dma_unmap_single(priv->device,
2306 priv->rx_skbuff_dma[entry],
2307 priv->dma_buf_sz,
2308 DMA_FROM_DEVICE);
47dd7a54 2309 }
47dd7a54 2310
891434b1
RK
2311 stmmac_get_rx_hwtstamp(priv, entry, skb);
2312
47dd7a54 2313 if (netif_msg_pktdata(priv)) {
83d7af64 2314 pr_debug("frame received (%dbytes)", frame_len);
47dd7a54
GC
2315 print_pkt(skb->data, frame_len);
2316 }
83d7af64 2317
b9381985
VB
2318 stmmac_rx_vlan(priv->dev, skb);
2319
47dd7a54
GC
2320 skb->protocol = eth_type_trans(skb, priv->dev);
2321
ceb69499 2322 if (unlikely(!coe))
bc8acf2c 2323 skb_checksum_none_assert(skb);
62a2ab93 2324 else
47dd7a54 2325 skb->ip_summed = CHECKSUM_UNNECESSARY;
62a2ab93
GC
2326
2327 napi_gro_receive(&priv->napi, skb);
47dd7a54
GC
2328
2329 priv->dev->stats.rx_packets++;
2330 priv->dev->stats.rx_bytes += frame_len;
47dd7a54
GC
2331 }
2332 entry = next_entry;
47dd7a54
GC
2333 }
2334
2335 stmmac_rx_refill(priv);
2336
2337 priv->xstats.rx_pkt_n += count;
2338
2339 return count;
2340}
2341
2342/**
2343 * stmmac_poll - stmmac poll method (NAPI)
2344 * @napi : pointer to the napi structure.
2345 * @budget : maximum number of packets that the current CPU can receive from
2346 * all interfaces.
2347 * Description :
9125cdd1 2348 * To look at the incoming frames and clear the tx resources.
47dd7a54
GC
2349 */
2350static int stmmac_poll(struct napi_struct *napi, int budget)
2351{
2352 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2353 int work_done = 0;
2354
9125cdd1
GC
2355 priv->xstats.napi_poll++;
2356 stmmac_tx_clean(priv);
47dd7a54 2357
9125cdd1 2358 work_done = stmmac_rx(priv, budget);
47dd7a54
GC
2359 if (work_done < budget) {
2360 napi_complete(napi);
9125cdd1 2361 stmmac_enable_dma_irq(priv);
47dd7a54
GC
2362 }
2363 return work_done;
2364}
2365
2366/**
2367 * stmmac_tx_timeout
2368 * @dev : Pointer to net device structure
2369 * Description: this function is called when a packet transmission fails to
7284a3f1 2370 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
2371 * netdev structure and arrange for the device to be reset to a sane state
2372 * in order to transmit a new packet.
2373 */
2374static void stmmac_tx_timeout(struct net_device *dev)
2375{
2376 struct stmmac_priv *priv = netdev_priv(dev);
2377
2378 /* Clear Tx resources and restart transmitting again */
2379 stmmac_tx_err(priv);
47dd7a54
GC
2380}
2381
47dd7a54 2382/**
01789349 2383 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
2384 * @dev : pointer to the device structure
2385 * Description:
2386 * This function is a driver entry point which gets called by the kernel
2387 * whenever multicast addresses must be enabled/disabled.
2388 * Return value:
2389 * void.
2390 */
01789349 2391static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
2392{
2393 struct stmmac_priv *priv = netdev_priv(dev);
2394
3b57de95 2395 priv->hw->mac->set_filter(priv->hw, dev);
47dd7a54
GC
2396}
2397
2398/**
2399 * stmmac_change_mtu - entry point to change MTU size for the device.
2400 * @dev : device pointer.
2401 * @new_mtu : the new MTU size for the device.
2402 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
2403 * to drive packet transmission. Ethernet has an MTU of 1500 octets
2404 * (ETH_DATA_LEN). This value can be changed with ifconfig.
2405 * Return value:
2406 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2407 * file on failure.
2408 */
2409static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2410{
2411 struct stmmac_priv *priv = netdev_priv(dev);
2412 int max_mtu;
2413
2414 if (netif_running(dev)) {
2415 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2416 return -EBUSY;
2417 }
2418
48febf7e 2419 if (priv->plat->enh_desc)
47dd7a54
GC
2420 max_mtu = JUMBO_LEN;
2421 else
45db81e1 2422 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
47dd7a54 2423
2618abb7
VB
2424 if (priv->plat->maxmtu < max_mtu)
2425 max_mtu = priv->plat->maxmtu;
2426
47dd7a54
GC
2427 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2428 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2429 return -EINVAL;
2430 }
2431
5e982f3b
MM
2432 dev->mtu = new_mtu;
2433 netdev_update_features(dev);
2434
2435 return 0;
2436}
2437
c8f44aff 2438static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 2439 netdev_features_t features)
5e982f3b
MM
2440{
2441 struct stmmac_priv *priv = netdev_priv(dev);
2442
38912bdb 2443 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 2444 features &= ~NETIF_F_RXCSUM;
d2afb5bd 2445
5e982f3b 2446 if (!priv->plat->tx_coe)
a188222b 2447 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 2448
ebbb293f
GC
2449 /* Some GMAC devices have a bugged Jumbo frame support that
2450 * needs to have the Tx COE disabled for oversized frames
2451 * (due to limited buffer sizes). In this case we disable
ceb69499
GC
2452 * the TX csum insertionin the TDES and not use SF.
2453 */
5e982f3b 2454 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 2455 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 2456
5e982f3b 2457 return features;
47dd7a54
GC
2458}
2459
d2afb5bd
GC
2460static int stmmac_set_features(struct net_device *netdev,
2461 netdev_features_t features)
2462{
2463 struct stmmac_priv *priv = netdev_priv(netdev);
2464
2465 /* Keep the COE Type in case of csum is supporting */
2466 if (features & NETIF_F_RXCSUM)
2467 priv->hw->rx_csum = priv->plat->rx_coe;
2468 else
2469 priv->hw->rx_csum = 0;
2470 /* No check needed because rx_coe has been set before and it will be
2471 * fixed in case of issue.
2472 */
2473 priv->hw->mac->rx_ipc(priv->hw);
2474
2475 return 0;
2476}
2477
32ceabca
GC
2478/**
2479 * stmmac_interrupt - main ISR
2480 * @irq: interrupt number.
2481 * @dev_id: to pass the net device pointer.
2482 * Description: this is the main driver interrupt service routine.
732fdf0e
GC
2483 * It can call:
2484 * o DMA service routine (to manage incoming frame reception and transmission
2485 * status)
2486 * o Core interrupts to manage: remote wake-up, management counter, LPI
2487 * interrupts.
32ceabca 2488 */
47dd7a54
GC
2489static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2490{
2491 struct net_device *dev = (struct net_device *)dev_id;
2492 struct stmmac_priv *priv = netdev_priv(dev);
2493
89f7f2cf
SK
2494 if (priv->irq_wake)
2495 pm_wakeup_event(priv->device, 0);
2496
47dd7a54
GC
2497 if (unlikely(!dev)) {
2498 pr_err("%s: invalid dev pointer\n", __func__);
2499 return IRQ_NONE;
2500 }
2501
d765955d
GC
2502 /* To handle GMAC own interrupts */
2503 if (priv->plat->has_gmac) {
7ed24bbe 2504 int status = priv->hw->mac->host_irq_status(priv->hw,
0982a0f6 2505 &priv->xstats);
d765955d 2506 if (unlikely(status)) {
d765955d 2507 /* For LPI we need to save the tx status */
0982a0f6 2508 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 2509 priv->tx_path_in_lpi_mode = true;
0982a0f6 2510 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 2511 priv->tx_path_in_lpi_mode = false;
d765955d
GC
2512 }
2513 }
aec7ff27 2514
d765955d 2515 /* To handle DMA interrupts */
aec7ff27 2516 stmmac_dma_interrupt(priv);
47dd7a54
GC
2517
2518 return IRQ_HANDLED;
2519}
2520
2521#ifdef CONFIG_NET_POLL_CONTROLLER
2522/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
2523 * to allow network I/O with interrupts disabled.
2524 */
47dd7a54
GC
2525static void stmmac_poll_controller(struct net_device *dev)
2526{
2527 disable_irq(dev->irq);
2528 stmmac_interrupt(dev->irq, dev);
2529 enable_irq(dev->irq);
2530}
2531#endif
2532
2533/**
2534 * stmmac_ioctl - Entry point for the Ioctl
2535 * @dev: Device pointer.
2536 * @rq: An IOCTL specefic structure, that can contain a pointer to
2537 * a proprietary structure used to pass information to the driver.
2538 * @cmd: IOCTL command
2539 * Description:
32ceabca 2540 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
2541 */
2542static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2543{
2544 struct stmmac_priv *priv = netdev_priv(dev);
891434b1 2545 int ret = -EOPNOTSUPP;
47dd7a54
GC
2546
2547 if (!netif_running(dev))
2548 return -EINVAL;
2549
891434b1
RK
2550 switch (cmd) {
2551 case SIOCGMIIPHY:
2552 case SIOCGMIIREG:
2553 case SIOCSMIIREG:
2554 if (!priv->phydev)
2555 return -EINVAL;
2556 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2557 break;
2558 case SIOCSHWTSTAMP:
2559 ret = stmmac_hwtstamp_ioctl(dev, rq);
2560 break;
2561 default:
2562 break;
2563 }
28b04113 2564
47dd7a54
GC
2565 return ret;
2566}
2567
50fb4f74 2568#ifdef CONFIG_DEBUG_FS
7ac29055 2569static struct dentry *stmmac_fs_dir;
7ac29055 2570
c24602ef 2571static void sysfs_display_ring(void *head, int size, int extend_desc,
ceb69499 2572 struct seq_file *seq)
7ac29055 2573{
7ac29055 2574 int i;
ceb69499
GC
2575 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2576 struct dma_desc *p = (struct dma_desc *)head;
7ac29055 2577
c24602ef
GC
2578 for (i = 0; i < size; i++) {
2579 u64 x;
2580 if (extend_desc) {
2581 x = *(u64 *) ep;
2582 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499
GC
2583 i, (unsigned int)virt_to_phys(ep),
2584 (unsigned int)x, (unsigned int)(x >> 32),
c24602ef
GC
2585 ep->basic.des2, ep->basic.des3);
2586 ep++;
2587 } else {
2588 x = *(u64 *) p;
2589 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499
GC
2590 i, (unsigned int)virt_to_phys(ep),
2591 (unsigned int)x, (unsigned int)(x >> 32),
c24602ef
GC
2592 p->des2, p->des3);
2593 p++;
2594 }
7ac29055
GC
2595 seq_printf(seq, "\n");
2596 }
c24602ef 2597}
7ac29055 2598
c24602ef
GC
2599static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2600{
2601 struct net_device *dev = seq->private;
2602 struct stmmac_priv *priv = netdev_priv(dev);
7ac29055 2603
c24602ef
GC
2604 if (priv->extend_desc) {
2605 seq_printf(seq, "Extended RX descriptor ring:\n");
e3ad57c9 2606 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
c24602ef 2607 seq_printf(seq, "Extended TX descriptor ring:\n");
e3ad57c9 2608 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
c24602ef
GC
2609 } else {
2610 seq_printf(seq, "RX descriptor ring:\n");
e3ad57c9 2611 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
c24602ef 2612 seq_printf(seq, "TX descriptor ring:\n");
e3ad57c9 2613 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
7ac29055
GC
2614 }
2615
2616 return 0;
2617}
2618
2619static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2620{
2621 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2622}
2623
2624static const struct file_operations stmmac_rings_status_fops = {
2625 .owner = THIS_MODULE,
2626 .open = stmmac_sysfs_ring_open,
2627 .read = seq_read,
2628 .llseek = seq_lseek,
74863948 2629 .release = single_release,
7ac29055
GC
2630};
2631
e7434821
GC
2632static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2633{
2634 struct net_device *dev = seq->private;
2635 struct stmmac_priv *priv = netdev_priv(dev);
2636
19e30c14 2637 if (!priv->hw_cap_support) {
e7434821
GC
2638 seq_printf(seq, "DMA HW features not supported\n");
2639 return 0;
2640 }
2641
2642 seq_printf(seq, "==============================\n");
2643 seq_printf(seq, "\tDMA HW features\n");
2644 seq_printf(seq, "==============================\n");
2645
2646 seq_printf(seq, "\t10/100 Mbps %s\n",
2647 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2648 seq_printf(seq, "\t1000 Mbps %s\n",
2649 (priv->dma_cap.mbps_1000) ? "Y" : "N");
2650 seq_printf(seq, "\tHalf duple %s\n",
2651 (priv->dma_cap.half_duplex) ? "Y" : "N");
2652 seq_printf(seq, "\tHash Filter: %s\n",
2653 (priv->dma_cap.hash_filter) ? "Y" : "N");
2654 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2655 (priv->dma_cap.multi_addr) ? "Y" : "N");
2656 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2657 (priv->dma_cap.pcs) ? "Y" : "N");
2658 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2659 (priv->dma_cap.sma_mdio) ? "Y" : "N");
2660 seq_printf(seq, "\tPMT Remote wake up: %s\n",
2661 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2662 seq_printf(seq, "\tPMT Magic Frame: %s\n",
2663 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2664 seq_printf(seq, "\tRMON module: %s\n",
2665 (priv->dma_cap.rmon) ? "Y" : "N");
2666 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2667 (priv->dma_cap.time_stamp) ? "Y" : "N");
2668 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2669 (priv->dma_cap.atime_stamp) ? "Y" : "N");
2670 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2671 (priv->dma_cap.eee) ? "Y" : "N");
2672 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2673 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2674 (priv->dma_cap.tx_coe) ? "Y" : "N");
2675 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2676 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2677 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2678 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2679 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2680 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2681 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2682 priv->dma_cap.number_rx_channel);
2683 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2684 priv->dma_cap.number_tx_channel);
2685 seq_printf(seq, "\tEnhanced descriptors: %s\n",
2686 (priv->dma_cap.enh_desc) ? "Y" : "N");
2687
2688 return 0;
2689}
2690
2691static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2692{
2693 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2694}
2695
2696static const struct file_operations stmmac_dma_cap_fops = {
2697 .owner = THIS_MODULE,
2698 .open = stmmac_sysfs_dma_cap_open,
2699 .read = seq_read,
2700 .llseek = seq_lseek,
74863948 2701 .release = single_release,
e7434821
GC
2702};
2703
7ac29055
GC
2704static int stmmac_init_fs(struct net_device *dev)
2705{
466c5ac8
MO
2706 struct stmmac_priv *priv = netdev_priv(dev);
2707
2708 /* Create per netdev entries */
2709 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 2710
466c5ac8
MO
2711 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
2712 pr_err("ERROR %s/%s, debugfs create directory failed\n",
2713 STMMAC_RESOURCE_NAME, dev->name);
7ac29055
GC
2714
2715 return -ENOMEM;
2716 }
2717
2718 /* Entry to report DMA RX/TX rings */
466c5ac8
MO
2719 priv->dbgfs_rings_status =
2720 debugfs_create_file("descriptors_status", S_IRUGO,
2721 priv->dbgfs_dir, dev,
2722 &stmmac_rings_status_fops);
7ac29055 2723
466c5ac8 2724 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
7ac29055 2725 pr_info("ERROR creating stmmac ring debugfs file\n");
466c5ac8 2726 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055
GC
2727
2728 return -ENOMEM;
2729 }
2730
e7434821 2731 /* Entry to report the DMA HW features */
466c5ac8
MO
2732 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
2733 priv->dbgfs_dir,
2734 dev, &stmmac_dma_cap_fops);
e7434821 2735
466c5ac8 2736 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
e7434821 2737 pr_info("ERROR creating stmmac MMC debugfs file\n");
466c5ac8 2738 debugfs_remove_recursive(priv->dbgfs_dir);
e7434821
GC
2739
2740 return -ENOMEM;
2741 }
2742
7ac29055
GC
2743 return 0;
2744}
2745
466c5ac8 2746static void stmmac_exit_fs(struct net_device *dev)
7ac29055 2747{
466c5ac8
MO
2748 struct stmmac_priv *priv = netdev_priv(dev);
2749
2750 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 2751}
50fb4f74 2752#endif /* CONFIG_DEBUG_FS */
7ac29055 2753
47dd7a54
GC
2754static const struct net_device_ops stmmac_netdev_ops = {
2755 .ndo_open = stmmac_open,
2756 .ndo_start_xmit = stmmac_xmit,
2757 .ndo_stop = stmmac_release,
2758 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 2759 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 2760 .ndo_set_features = stmmac_set_features,
01789349 2761 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
2762 .ndo_tx_timeout = stmmac_tx_timeout,
2763 .ndo_do_ioctl = stmmac_ioctl,
47dd7a54
GC
2764#ifdef CONFIG_NET_POLL_CONTROLLER
2765 .ndo_poll_controller = stmmac_poll_controller,
2766#endif
2767 .ndo_set_mac_address = eth_mac_addr,
2768};
2769
cf3f047b
GC
2770/**
2771 * stmmac_hw_init - Init the MAC device
32ceabca 2772 * @priv: driver private structure
732fdf0e
GC
2773 * Description: this function is to configure the MAC device according to
2774 * some platform parameters or the HW capability register. It prepares the
2775 * driver to use either ring or chain modes and to setup either enhanced or
2776 * normal descriptors.
cf3f047b
GC
2777 */
2778static int stmmac_hw_init(struct stmmac_priv *priv)
2779{
cf3f047b
GC
2780 struct mac_device_info *mac;
2781
2782 /* Identify the MAC HW device */
03f2eecd
MKB
2783 if (priv->plat->has_gmac) {
2784 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3b57de95
VB
2785 mac = dwmac1000_setup(priv->ioaddr,
2786 priv->plat->multicast_filter_bins,
2787 priv->plat->unicast_filter_entries);
03f2eecd 2788 } else {
cf3f047b 2789 mac = dwmac100_setup(priv->ioaddr);
03f2eecd 2790 }
cf3f047b
GC
2791 if (!mac)
2792 return -ENOMEM;
2793
2794 priv->hw = mac;
2795
cf3f047b 2796 /* Get and dump the chip ID */
cffb13f4 2797 priv->synopsys_id = stmmac_get_synopsys_id(priv);
cf3f047b 2798
4a7d666a 2799 /* To use the chained or ring mode */
ceb69499 2800 if (chain_mode) {
29896a67 2801 priv->hw->mode = &chain_mode_ops;
4a7d666a
GC
2802 pr_info(" Chain mode enabled\n");
2803 priv->mode = STMMAC_CHAIN_MODE;
2804 } else {
29896a67 2805 priv->hw->mode = &ring_mode_ops;
4a7d666a
GC
2806 pr_info(" Ring mode enabled\n");
2807 priv->mode = STMMAC_RING_MODE;
2808 }
2809
cf3f047b
GC
2810 /* Get the HW capability (new GMAC newer than 3.50a) */
2811 priv->hw_cap_support = stmmac_get_hw_features(priv);
2812 if (priv->hw_cap_support) {
2813 pr_info(" DMA HW capability register supported");
2814
2815 /* We can override some gmac/dma configuration fields: e.g.
2816 * enh_desc, tx_coe (e.g. that are passed through the
2817 * platform) with the values from the HW capability
2818 * register (if supported).
2819 */
2820 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 2821 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
38912bdb 2822
dec2165f
SZ
2823 /* TXCOE doesn't work in thresh DMA mode */
2824 if (priv->plat->force_thresh_dma_mode)
2825 priv->plat->tx_coe = 0;
2826 else
2827 priv->plat->tx_coe = priv->dma_cap.tx_coe;
38912bdb
DS
2828
2829 if (priv->dma_cap.rx_coe_type2)
2830 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2831 else if (priv->dma_cap.rx_coe_type1)
2832 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2833
cf3f047b
GC
2834 } else
2835 pr_info(" No HW DMA feature register supported");
2836
61369d02
BA
2837 /* To use alternate (extended) or normal descriptor structures */
2838 stmmac_selec_desc_mode(priv);
2839
d2afb5bd
GC
2840 if (priv->plat->rx_coe) {
2841 priv->hw->rx_csum = priv->plat->rx_coe;
38912bdb
DS
2842 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2843 priv->plat->rx_coe);
d2afb5bd 2844 }
cf3f047b
GC
2845 if (priv->plat->tx_coe)
2846 pr_info(" TX Checksum insertion supported\n");
2847
2848 if (priv->plat->pmt) {
2849 pr_info(" Wake-Up On Lan supported\n");
2850 device_set_wakeup_capable(priv->device, 1);
2851 }
2852
c24602ef 2853 return 0;
cf3f047b
GC
2854}
2855
47dd7a54 2856/**
bfab27a1
GC
2857 * stmmac_dvr_probe
2858 * @device: device pointer
ff3dd78c 2859 * @plat_dat: platform data pointer
e56788cf 2860 * @res: stmmac resource pointer
bfab27a1
GC
2861 * Description: this is the main probe function used to
2862 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 2863 * Return:
15ffac73 2864 * returns 0 on success, otherwise errno.
47dd7a54 2865 */
15ffac73
JE
2866int stmmac_dvr_probe(struct device *device,
2867 struct plat_stmmacenet_data *plat_dat,
2868 struct stmmac_resources *res)
47dd7a54
GC
2869{
2870 int ret = 0;
bfab27a1
GC
2871 struct net_device *ndev = NULL;
2872 struct stmmac_priv *priv;
47dd7a54 2873
bfab27a1 2874 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
41de8d4c 2875 if (!ndev)
15ffac73 2876 return -ENOMEM;
bfab27a1
GC
2877
2878 SET_NETDEV_DEV(ndev, device);
2879
2880 priv = netdev_priv(ndev);
2881 priv->device = device;
2882 priv->dev = ndev;
47dd7a54 2883
bfab27a1 2884 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
2885 priv->pause = pause;
2886 priv->plat = plat_dat;
e56788cf
JE
2887 priv->ioaddr = res->addr;
2888 priv->dev->base_addr = (unsigned long)res->addr;
2889
2890 priv->dev->irq = res->irq;
2891 priv->wol_irq = res->wol_irq;
2892 priv->lpi_irq = res->lpi_irq;
2893
2894 if (res->mac)
2895 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 2896
a7a62685 2897 dev_set_drvdata(device, priv->dev);
803f8fc4 2898
cf3f047b
GC
2899 /* Verify driver arguments */
2900 stmmac_verify_args();
bfab27a1 2901
cf3f047b 2902 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
2903 * this needs to have multiple instances
2904 */
cf3f047b
GC
2905 if ((phyaddr >= 0) && (phyaddr <= 31))
2906 priv->plat->phy_addr = phyaddr;
2907
62866e98
CYT
2908 priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
2909 if (IS_ERR(priv->stmmac_clk)) {
2910 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2911 __func__);
c5bb86c3
KHL
2912 /* If failed to obtain stmmac_clk and specific clk_csr value
2913 * is NOT passed from the platform, probe fail.
2914 */
2915 if (!priv->plat->clk_csr) {
2916 ret = PTR_ERR(priv->stmmac_clk);
2917 goto error_clk_get;
2918 } else {
2919 priv->stmmac_clk = NULL;
2920 }
62866e98
CYT
2921 }
2922 clk_prepare_enable(priv->stmmac_clk);
2923
5f9755d2
AB
2924 priv->pclk = devm_clk_get(priv->device, "pclk");
2925 if (IS_ERR(priv->pclk)) {
2926 if (PTR_ERR(priv->pclk) == -EPROBE_DEFER) {
2927 ret = -EPROBE_DEFER;
2928 goto error_pclk_get;
2929 }
2930 priv->pclk = NULL;
2931 }
2932 clk_prepare_enable(priv->pclk);
2933
c5e4ddbd
CYT
2934 priv->stmmac_rst = devm_reset_control_get(priv->device,
2935 STMMAC_RESOURCE_NAME);
2936 if (IS_ERR(priv->stmmac_rst)) {
2937 if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
2938 ret = -EPROBE_DEFER;
2939 goto error_hw_init;
2940 }
2941 dev_info(priv->device, "no reset control found\n");
2942 priv->stmmac_rst = NULL;
2943 }
2944 if (priv->stmmac_rst)
2945 reset_control_deassert(priv->stmmac_rst);
2946
cf3f047b 2947 /* Init MAC and get the capabilities */
c24602ef
GC
2948 ret = stmmac_hw_init(priv);
2949 if (ret)
62866e98 2950 goto error_hw_init;
cf3f047b
GC
2951
2952 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 2953
cf3f047b
GC
2954 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2955 NETIF_F_RXCSUM;
bfab27a1
GC
2956 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2957 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
2958#ifdef STMMAC_VLAN_TAG_USED
2959 /* Both mac100 and gmac support receive VLAN tag detection */
f646968f 2960 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
47dd7a54
GC
2961#endif
2962 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2963
47dd7a54
GC
2964 if (flow_ctrl)
2965 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
2966
62a2ab93
GC
2967 /* Rx Watchdog is available in the COREs newer than the 3.40.
2968 * In some case, for example on bugged HW this feature
2969 * has to be disable and this can be done by passing the
2970 * riwt_off field from the platform.
2971 */
2972 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2973 priv->use_riwt = 1;
2974 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2975 }
2976
bfab27a1 2977 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
47dd7a54 2978
f8e96161 2979 spin_lock_init(&priv->lock);
a9097a96 2980 spin_lock_init(&priv->tx_lock);
f8e96161 2981
bfab27a1 2982 ret = register_netdev(ndev);
47dd7a54 2983 if (ret) {
cf3f047b 2984 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
6a81c26f 2985 goto error_netdev_register;
47dd7a54
GC
2986 }
2987
cd7201f4
GC
2988 /* If a specific clk_csr value is passed from the platform
2989 * this means that the CSR Clock Range selection cannot be
2990 * changed at run-time and it is fixed. Viceversa the driver'll try to
2991 * set the MDC clock dynamically according to the csr actual
2992 * clock input.
2993 */
2994 if (!priv->plat->clk_csr)
2995 stmmac_clk_csr_set(priv);
2996 else
2997 priv->clk_csr = priv->plat->clk_csr;
2998
e58bb43f
GC
2999 stmmac_check_pcs_mode(priv);
3000
4d8f0825
BA
3001 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
3002 priv->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
3003 /* MDIO bus Registration */
3004 ret = stmmac_mdio_register(ndev);
3005 if (ret < 0) {
3006 pr_debug("%s: MDIO bus (id: %d) registration failed",
3007 __func__, priv->plat->bus_id);
3008 goto error_mdio_register;
3009 }
4bfcbd7a
FV
3010 }
3011
15ffac73 3012 return 0;
47dd7a54 3013
6a81c26f 3014error_mdio_register:
34a52f36 3015 unregister_netdev(ndev);
6a81c26f
VK
3016error_netdev_register:
3017 netif_napi_del(&priv->napi);
62866e98 3018error_hw_init:
5f9755d2
AB
3019 clk_disable_unprepare(priv->pclk);
3020error_pclk_get:
62866e98
CYT
3021 clk_disable_unprepare(priv->stmmac_clk);
3022error_clk_get:
34a52f36 3023 free_netdev(ndev);
47dd7a54 3024
15ffac73 3025 return ret;
47dd7a54 3026}
b2e2f0c7 3027EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
3028
3029/**
3030 * stmmac_dvr_remove
bfab27a1 3031 * @ndev: net device pointer
47dd7a54 3032 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 3033 * changes the link status, releases the DMA descriptor rings.
47dd7a54 3034 */
bfab27a1 3035int stmmac_dvr_remove(struct net_device *ndev)
47dd7a54 3036{
aec7ff27 3037 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54
GC
3038
3039 pr_info("%s:\n\tremoving driver", __func__);
3040
ad01b7d4
GC
3041 priv->hw->dma->stop_rx(priv->ioaddr);
3042 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 3043
bfab27a1 3044 stmmac_set_mac(priv->ioaddr, false);
47dd7a54 3045 netif_carrier_off(ndev);
47dd7a54 3046 unregister_netdev(ndev);
c5e4ddbd
CYT
3047 if (priv->stmmac_rst)
3048 reset_control_assert(priv->stmmac_rst);
5f9755d2 3049 clk_disable_unprepare(priv->pclk);
62866e98 3050 clk_disable_unprepare(priv->stmmac_clk);
e743471f
BD
3051 if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
3052 priv->pcs != STMMAC_PCS_RTBI)
3053 stmmac_mdio_unregister(ndev);
47dd7a54
GC
3054 free_netdev(ndev);
3055
3056 return 0;
3057}
b2e2f0c7 3058EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 3059
732fdf0e
GC
3060/**
3061 * stmmac_suspend - suspend callback
3062 * @ndev: net device pointer
3063 * Description: this is the function to suspend the device and it is called
3064 * by the platform driver to stop the network queue, release the resources,
3065 * program the PMT register (for WoL), clean and release driver resources.
3066 */
bfab27a1 3067int stmmac_suspend(struct net_device *ndev)
47dd7a54 3068{
874bd42d 3069 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 3070 unsigned long flags;
47dd7a54 3071
874bd42d 3072 if (!ndev || !netif_running(ndev))
47dd7a54
GC
3073 return 0;
3074
102463b1
FV
3075 if (priv->phydev)
3076 phy_stop(priv->phydev);
3077
f8c5a875 3078 spin_lock_irqsave(&priv->lock, flags);
47dd7a54 3079
874bd42d
GC
3080 netif_device_detach(ndev);
3081 netif_stop_queue(ndev);
47dd7a54 3082
874bd42d
GC
3083 napi_disable(&priv->napi);
3084
3085 /* Stop TX/RX DMA */
3086 priv->hw->dma->stop_tx(priv->ioaddr);
3087 priv->hw->dma->stop_rx(priv->ioaddr);
c24602ef 3088
874bd42d 3089 /* Enable Power down mode by programming the PMT regs */
89f7f2cf 3090 if (device_may_wakeup(priv->device)) {
7ed24bbe 3091 priv->hw->mac->pmt(priv->hw, priv->wolopts);
89f7f2cf
SK
3092 priv->irq_wake = 1;
3093 } else {
bfab27a1 3094 stmmac_set_mac(priv->ioaddr, false);
db88f10a 3095 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 3096 /* Disable clock in case of PWM is off */
5f9755d2 3097 clk_disable(priv->pclk);
777da230 3098 clk_disable(priv->stmmac_clk);
ba1377ff 3099 }
f8c5a875 3100 spin_unlock_irqrestore(&priv->lock, flags);
2d871aa0
VB
3101
3102 priv->oldlink = 0;
3103 priv->speed = 0;
3104 priv->oldduplex = -1;
47dd7a54
GC
3105 return 0;
3106}
b2e2f0c7 3107EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 3108
732fdf0e
GC
3109/**
3110 * stmmac_resume - resume callback
3111 * @ndev: net device pointer
3112 * Description: when resume this function is invoked to setup the DMA and CORE
3113 * in a usable state.
3114 */
bfab27a1 3115int stmmac_resume(struct net_device *ndev)
47dd7a54 3116{
874bd42d 3117 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 3118 unsigned long flags;
47dd7a54 3119
874bd42d 3120 if (!netif_running(ndev))
47dd7a54
GC
3121 return 0;
3122
f8c5a875 3123 spin_lock_irqsave(&priv->lock, flags);
c4433be6 3124
47dd7a54
GC
3125 /* Power Down bit, into the PM register, is cleared
3126 * automatically as soon as a magic packet or a Wake-up frame
3127 * is received. Anyway, it's better to manually clear
3128 * this bit because it can generate problems while resuming
ceb69499
GC
3129 * from another devices (e.g. serial console).
3130 */
623997fb 3131 if (device_may_wakeup(priv->device)) {
7ed24bbe 3132 priv->hw->mac->pmt(priv->hw, 0);
89f7f2cf 3133 priv->irq_wake = 0;
623997fb 3134 } else {
db88f10a 3135 pinctrl_pm_select_default_state(priv->device);
ba1377ff 3136 /* enable the clk prevously disabled */
777da230 3137 clk_enable(priv->stmmac_clk);
5f9755d2 3138 clk_enable(priv->pclk);
623997fb
SK
3139 /* reset the phy so that it's ready */
3140 if (priv->mii)
3141 stmmac_mdio_reset(priv->mii);
3142 }
47dd7a54 3143
874bd42d 3144 netif_device_attach(ndev);
47dd7a54 3145
ae79a639
GC
3146 priv->cur_rx = 0;
3147 priv->dirty_rx = 0;
3148 priv->dirty_tx = 0;
3149 priv->cur_tx = 0;
3150 stmmac_clear_descriptors(priv);
3151
fe131929 3152 stmmac_hw_setup(ndev, false);
777da230 3153 stmmac_init_tx_coalesce(priv);
ac316c78 3154 stmmac_set_rx_mode(ndev);
47dd7a54 3155
47dd7a54
GC
3156 napi_enable(&priv->napi);
3157
874bd42d 3158 netif_start_queue(ndev);
47dd7a54 3159
f8c5a875 3160 spin_unlock_irqrestore(&priv->lock, flags);
102463b1
FV
3161
3162 if (priv->phydev)
3163 phy_start(priv->phydev);
3164
47dd7a54
GC
3165 return 0;
3166}
b2e2f0c7 3167EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 3168
47dd7a54
GC
3169#ifndef MODULE
3170static int __init stmmac_cmdline_opt(char *str)
3171{
3172 char *opt;
3173
3174 if (!str || !*str)
3175 return -EINVAL;
3176 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 3177 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 3178 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
3179 goto err;
3180 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 3181 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 3182 goto err;
f3240e28 3183 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 3184 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
3185 goto err;
3186 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 3187 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
3188 goto err;
3189 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 3190 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
3191 goto err;
3192 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 3193 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
3194 goto err;
3195 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 3196 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 3197 goto err;
506f669c 3198 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
3199 if (kstrtoint(opt + 10, 0, &eee_timer))
3200 goto err;
4a7d666a
GC
3201 } else if (!strncmp(opt, "chain_mode:", 11)) {
3202 if (kstrtoint(opt + 11, 0, &chain_mode))
3203 goto err;
f3240e28 3204 }
47dd7a54
GC
3205 }
3206 return 0;
f3240e28
GC
3207
3208err:
3209 pr_err("%s: ERROR broken module parameter conversion", __func__);
3210 return -EINVAL;
47dd7a54
GC
3211}
3212
3213__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 3214#endif /* MODULE */
6fc0d0f2 3215
466c5ac8
MO
3216static int __init stmmac_init(void)
3217{
3218#ifdef CONFIG_DEBUG_FS
3219 /* Create debugfs main directory if it doesn't exist yet */
3220 if (!stmmac_fs_dir) {
3221 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3222
3223 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3224 pr_err("ERROR %s, debugfs create directory failed\n",
3225 STMMAC_RESOURCE_NAME);
3226
3227 return -ENOMEM;
3228 }
3229 }
3230#endif
3231
3232 return 0;
3233}
3234
3235static void __exit stmmac_exit(void)
3236{
3237#ifdef CONFIG_DEBUG_FS
3238 debugfs_remove_recursive(stmmac_fs_dir);
3239#endif
3240}
3241
3242module_init(stmmac_init)
3243module_exit(stmmac_exit)
3244
6fc0d0f2
GC
3245MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3246MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3247MODULE_LICENSE("GPL");