]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: of_mdio: scan mdiobus for PHYs without reg property
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
286a8372 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54
GC
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 You should have received a copy of the GNU General Public License along with
17 this program; if not, write to the Free Software Foundation, Inc.,
18 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20 The full GNU General Public License is included in this distribution in
21 the file called "COPYING".
22
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25 Documentation available at:
26 http://www.stlinux.com
27 Support available at:
28 https://bugzilla.stlinux.com/
29*******************************************************************************/
30
6a81c26f 31#include <linux/clk.h>
47dd7a54
GC
32#include <linux/kernel.h>
33#include <linux/interrupt.h>
47dd7a54
GC
34#include <linux/ip.h>
35#include <linux/tcp.h>
36#include <linux/skbuff.h>
37#include <linux/ethtool.h>
38#include <linux/if_ether.h>
39#include <linux/crc32.h>
40#include <linux/mii.h>
01789349 41#include <linux/if.h>
47dd7a54
GC
42#include <linux/if_vlan.h>
43#include <linux/dma-mapping.h>
5a0e3ad6 44#include <linux/slab.h>
70c71606 45#include <linux/prefetch.h>
7ac29055
GC
46#ifdef CONFIG_STMMAC_DEBUG_FS
47#include <linux/debugfs.h>
48#include <linux/seq_file.h>
49#endif
891434b1
RK
50#include <linux/net_tstamp.h>
51#include "stmmac_ptp.h"
286a8372 52#include "stmmac.h"
47dd7a54 53
47dd7a54
GC
54#undef STMMAC_DEBUG
55/*#define STMMAC_DEBUG*/
56#ifdef STMMAC_DEBUG
57#define DBG(nlevel, klevel, fmt, args...) \
58 ((void)(netif_msg_##nlevel(priv) && \
59 printk(KERN_##klevel fmt, ## args)))
60#else
61#define DBG(nlevel, klevel, fmt, args...) do { } while (0)
62#endif
63
64#undef STMMAC_RX_DEBUG
65/*#define STMMAC_RX_DEBUG*/
66#ifdef STMMAC_RX_DEBUG
67#define RX_DBG(fmt, args...) printk(fmt, ## args)
68#else
69#define RX_DBG(fmt, args...) do { } while (0)
70#endif
71
72#undef STMMAC_XMIT_DEBUG
73/*#define STMMAC_XMIT_DEBUG*/
de53d557 74#ifdef STMMAC_XMIT_DEBUG
47dd7a54
GC
75#define TX_DBG(fmt, args...) printk(fmt, ## args)
76#else
77#define TX_DBG(fmt, args...) do { } while (0)
78#endif
79
80#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
81#define JUMBO_LEN 9000
82
83/* Module parameters */
84#define TX_TIMEO 5000 /* default 5 seconds */
85static int watchdog = TX_TIMEO;
86module_param(watchdog, int, S_IRUGO | S_IWUSR);
87MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds");
88
89static int debug = -1; /* -1: default, 0: no output, 16: all */
90module_param(debug, int, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
92
bfab27a1 93int phyaddr = -1;
47dd7a54
GC
94module_param(phyaddr, int, S_IRUGO);
95MODULE_PARM_DESC(phyaddr, "Physical device address");
96
97#define DMA_TX_SIZE 256
98static int dma_txsize = DMA_TX_SIZE;
99module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
101
102#define DMA_RX_SIZE 256
103static int dma_rxsize = DMA_RX_SIZE;
104module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
106
107static int flow_ctrl = FLOW_OFF;
108module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
110
111static int pause = PAUSE_TIME;
112module_param(pause, int, S_IRUGO | S_IWUSR);
113MODULE_PARM_DESC(pause, "Flow Control Pause Time");
114
115#define TC_DEFAULT 64
116static int tc = TC_DEFAULT;
117module_param(tc, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(tc, "DMA threshold control value");
119
47dd7a54
GC
120#define DMA_BUFFER_SIZE BUF_SIZE_2KiB
121static int buf_sz = DMA_BUFFER_SIZE;
122module_param(buf_sz, int, S_IRUGO | S_IWUSR);
123MODULE_PARM_DESC(buf_sz, "DMA buffer size");
124
47dd7a54
GC
125static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
126 NETIF_MSG_LINK | NETIF_MSG_IFUP |
127 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
128
d765955d
GC
129#define STMMAC_DEFAULT_LPI_TIMER 1000
130static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
131module_param(eee_timer, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
133#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
134
4a7d666a
GC
135/* By default the driver will use the ring mode to manage tx and rx descriptors
136 * but passing this value so user can force to use the chain instead of the ring
137 */
138static unsigned int chain_mode;
139module_param(chain_mode, int, S_IRUGO);
140MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
141
47dd7a54 142static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 143
bfab27a1
GC
144#ifdef CONFIG_STMMAC_DEBUG_FS
145static int stmmac_init_fs(struct net_device *dev);
146static void stmmac_exit_fs(void);
147#endif
148
9125cdd1
GC
149#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
150
47dd7a54
GC
151/**
152 * stmmac_verify_args - verify the driver parameters.
153 * Description: it verifies if some wrong parameter is passed to the driver.
154 * Note that wrong parameters are replaced with the default values.
155 */
156static void stmmac_verify_args(void)
157{
158 if (unlikely(watchdog < 0))
159 watchdog = TX_TIMEO;
160 if (unlikely(dma_rxsize < 0))
161 dma_rxsize = DMA_RX_SIZE;
162 if (unlikely(dma_txsize < 0))
163 dma_txsize = DMA_TX_SIZE;
164 if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
165 buf_sz = DMA_BUFFER_SIZE;
166 if (unlikely(flow_ctrl > 1))
167 flow_ctrl = FLOW_AUTO;
168 else if (likely(flow_ctrl < 0))
169 flow_ctrl = FLOW_OFF;
170 if (unlikely((pause < 0) || (pause > 0xffff)))
171 pause = PAUSE_TIME;
d765955d
GC
172 if (eee_timer < 0)
173 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
174}
175
cd7201f4
GC
176static void stmmac_clk_csr_set(struct stmmac_priv *priv)
177{
cd7201f4
GC
178 u32 clk_rate;
179
180 clk_rate = clk_get_rate(priv->stmmac_clk);
181
182 /* Platform provided default clk_csr would be assumed valid
183 * for all other cases except for the below mentioned ones. */
184 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
185 if (clk_rate < CSR_F_35M)
186 priv->clk_csr = STMMAC_CSR_20_35M;
187 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
188 priv->clk_csr = STMMAC_CSR_35_60M;
189 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
190 priv->clk_csr = STMMAC_CSR_60_100M;
191 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
192 priv->clk_csr = STMMAC_CSR_100_150M;
193 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
194 priv->clk_csr = STMMAC_CSR_150_250M;
195 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
196 priv->clk_csr = STMMAC_CSR_250_300M;
197 } /* For values higher than the IEEE 802.3 specified frequency
198 * we can not estimate the proper divider as it is not known
199 * the frequency of clk_csr_i. So we do not change the default
200 * divider. */
cd7201f4
GC
201}
202
47dd7a54
GC
203#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
204static void print_pkt(unsigned char *buf, int len)
205{
206 int j;
207 pr_info("len = %d byte, buf addr: 0x%p", len, buf);
208 for (j = 0; j < len; j++) {
209 if ((j % 16) == 0)
210 pr_info("\n %03x:", j);
211 pr_info(" %02x", buf[j]);
212 }
213 pr_info("\n");
47dd7a54
GC
214}
215#endif
216
217/* minimum number of free TX descriptors required to wake up TX process */
218#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4)
219
220static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
221{
222 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
223}
224
9dfeb4d9
GC
225/* On some ST platforms, some HW system configuraton registers have to be
226 * set according to the link speed negotiated.
227 */
228static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
229{
230 struct phy_device *phydev = priv->phydev;
231
232 if (likely(priv->plat->fix_mac_speed))
233 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
234 phydev->speed);
235}
236
d765955d
GC
237static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
238{
239 /* Check and enter in LPI mode */
240 if ((priv->dirty_tx == priv->cur_tx) &&
241 (priv->tx_path_in_lpi_mode == false))
242 priv->hw->mac->set_eee_mode(priv->ioaddr);
243}
244
245void stmmac_disable_eee_mode(struct stmmac_priv *priv)
246{
247 /* Exit and disable EEE in case of we are are in LPI state. */
248 priv->hw->mac->reset_eee_mode(priv->ioaddr);
249 del_timer_sync(&priv->eee_ctrl_timer);
250 priv->tx_path_in_lpi_mode = false;
251}
252
253/**
254 * stmmac_eee_ctrl_timer
255 * @arg : data hook
256 * Description:
257 * If there is no data transfer and if we are not in LPI state,
258 * then MAC Transmitter can be moved to LPI state.
259 */
260static void stmmac_eee_ctrl_timer(unsigned long arg)
261{
262 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
263
264 stmmac_enable_eee_mode(priv);
265 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
266}
267
268/**
269 * stmmac_eee_init
270 * @priv: private device pointer
271 * Description:
272 * If the EEE support has been enabled while configuring the driver,
273 * if the GMAC actually supports the EEE (from the HW cap reg) and the
274 * phy can also manage EEE, so enable the LPI state and start the timer
275 * to verify if the tx path can enter in LPI state.
276 */
277bool stmmac_eee_init(struct stmmac_priv *priv)
278{
279 bool ret = false;
280
281 /* MAC core supports the EEE feature. */
282 if (priv->dma_cap.eee) {
283 /* Check if the PHY supports EEE */
284 if (phy_init_eee(priv->phydev, 1))
285 goto out;
286
287 priv->eee_active = 1;
288 init_timer(&priv->eee_ctrl_timer);
289 priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
290 priv->eee_ctrl_timer.data = (unsigned long)priv;
291 priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
292 add_timer(&priv->eee_ctrl_timer);
293
294 priv->hw->mac->set_eee_timer(priv->ioaddr,
295 STMMAC_DEFAULT_LIT_LS_TIMER,
296 priv->tx_lpi_timer);
297
298 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
299
300 ret = true;
301 }
302out:
303 return ret;
304}
305
306static void stmmac_eee_adjust(struct stmmac_priv *priv)
307{
308 /* When the EEE has been already initialised we have to
309 * modify the PLS bit in the LPI ctrl & status reg according
310 * to the PHY link status. For this reason.
311 */
312 if (priv->eee_enabled)
313 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
314}
315
891434b1
RK
316/* stmmac_get_tx_hwtstamp:
317 * @priv : pointer to private device structure.
318 * @entry : descriptor index to be used.
319 * @skb : the socket buffer
320 * Description :
321 * This function will read timestamp from the descriptor & pass it to stack.
322 * and also perform some sanity checks.
323 */
324static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
325 unsigned int entry,
326 struct sk_buff *skb)
327{
328 struct skb_shared_hwtstamps shhwtstamp;
329 u64 ns;
330 void *desc = NULL;
331
332 if (!priv->hwts_tx_en)
333 return;
334
335 /* if skb doesn't support hw tstamp */
336 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
337 return;
338
339 if (priv->adv_ts)
340 desc = (priv->dma_etx + entry);
341 else
342 desc = (priv->dma_tx + entry);
343
344 /* check tx tstamp status */
345 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
346 return;
347
348 /* get the valid tstamp */
349 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
350
351 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
352 shhwtstamp.hwtstamp = ns_to_ktime(ns);
353 /* pass tstamp to stack */
354 skb_tstamp_tx(skb, &shhwtstamp);
355
356 return;
357}
358
359/* stmmac_get_rx_hwtstamp:
360 * @priv : pointer to private device structure.
361 * @entry : descriptor index to be used.
362 * @skb : the socket buffer
363 * Description :
364 * This function will read received packet's timestamp from the descriptor
365 * and pass it to stack. It also perform some sanity checks.
366 */
367static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
368 unsigned int entry,
369 struct sk_buff *skb)
370{
371 struct skb_shared_hwtstamps *shhwtstamp = NULL;
372 u64 ns;
373 void *desc = NULL;
374
375 if (!priv->hwts_rx_en)
376 return;
377
378 if (priv->adv_ts)
379 desc = (priv->dma_erx + entry);
380 else
381 desc = (priv->dma_rx + entry);
382
383 /* if rx tstamp is not valid */
384 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
385 return;
386
387 /* get valid tstamp */
388 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
389 shhwtstamp = skb_hwtstamps(skb);
390 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
391 shhwtstamp->hwtstamp = ns_to_ktime(ns);
392}
393
394/**
395 * stmmac_hwtstamp_ioctl - control hardware timestamping.
396 * @dev: device pointer.
397 * @ifr: An IOCTL specefic structure, that can contain a pointer to
398 * a proprietary structure used to pass information to the driver.
399 * Description:
400 * This function configures the MAC to enable/disable both outgoing(TX)
401 * and incoming(RX) packets time stamping based on user input.
402 * Return Value:
403 * 0 on success and an appropriate -ve integer on failure.
404 */
405static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
406{
407 struct stmmac_priv *priv = netdev_priv(dev);
408 struct hwtstamp_config config;
409 struct timespec now;
410 u64 temp = 0;
411 u32 ptp_v2 = 0;
412 u32 tstamp_all = 0;
413 u32 ptp_over_ipv4_udp = 0;
414 u32 ptp_over_ipv6_udp = 0;
415 u32 ptp_over_ethernet = 0;
416 u32 snap_type_sel = 0;
417 u32 ts_master_en = 0;
418 u32 ts_event_en = 0;
419 u32 value = 0;
420
421 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
422 netdev_alert(priv->dev, "No support for HW time stamping\n");
423 priv->hwts_tx_en = 0;
424 priv->hwts_rx_en = 0;
425
426 return -EOPNOTSUPP;
427 }
428
429 if (copy_from_user(&config, ifr->ifr_data,
430 sizeof(struct hwtstamp_config)))
431 return -EFAULT;
432
433 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
434 __func__, config.flags, config.tx_type, config.rx_filter);
435
436 /* reserved for future extensions */
437 if (config.flags)
438 return -EINVAL;
439
440 switch (config.tx_type) {
441 case HWTSTAMP_TX_OFF:
442 priv->hwts_tx_en = 0;
443 break;
444 case HWTSTAMP_TX_ON:
445 priv->hwts_tx_en = 1;
446 break;
447 default:
448 return -ERANGE;
449 }
450
451 if (priv->adv_ts) {
452 switch (config.rx_filter) {
453 /* time stamp no incoming packet at all */
454 case HWTSTAMP_FILTER_NONE:
455 config.rx_filter = HWTSTAMP_FILTER_NONE;
456 break;
457
458 /* PTP v1, UDP, any kind of event packet */
459 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
460 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
461 /* take time stamp for all event messages */
462 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
463
464 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
465 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
466 break;
467
468 /* PTP v1, UDP, Sync packet */
469 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
470 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
471 /* take time stamp for SYNC messages only */
472 ts_event_en = PTP_TCR_TSEVNTENA;
473
474 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
475 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
476 break;
477
478 /* PTP v1, UDP, Delay_req packet */
479 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
480 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
481 /* take time stamp for Delay_Req messages only */
482 ts_master_en = PTP_TCR_TSMSTRENA;
483 ts_event_en = PTP_TCR_TSEVNTENA;
484
485 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
486 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
487 break;
488
489 /* PTP v2, UDP, any kind of event packet */
490 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
491 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
492 ptp_v2 = PTP_TCR_TSVER2ENA;
493 /* take time stamp for all event messages */
494 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
495
496 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
497 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
498 break;
499
500 /* PTP v2, UDP, Sync packet */
501 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
502 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
503 ptp_v2 = PTP_TCR_TSVER2ENA;
504 /* take time stamp for SYNC messages only */
505 ts_event_en = PTP_TCR_TSEVNTENA;
506
507 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
508 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
509 break;
510
511 /* PTP v2, UDP, Delay_req packet */
512 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
513 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
514 ptp_v2 = PTP_TCR_TSVER2ENA;
515 /* take time stamp for Delay_Req messages only */
516 ts_master_en = PTP_TCR_TSMSTRENA;
517 ts_event_en = PTP_TCR_TSEVNTENA;
518
519 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521 break;
522
523 /* PTP v2/802.AS1, any layer, any kind of event packet */
524 case HWTSTAMP_FILTER_PTP_V2_EVENT:
525 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
526 ptp_v2 = PTP_TCR_TSVER2ENA;
527 /* take time stamp for all event messages */
528 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529
530 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
531 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
532 ptp_over_ethernet = PTP_TCR_TSIPENA;
533 break;
534
535 /* PTP v2/802.AS1, any layer, Sync packet */
536 case HWTSTAMP_FILTER_PTP_V2_SYNC:
537 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
538 ptp_v2 = PTP_TCR_TSVER2ENA;
539 /* take time stamp for SYNC messages only */
540 ts_event_en = PTP_TCR_TSEVNTENA;
541
542 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544 ptp_over_ethernet = PTP_TCR_TSIPENA;
545 break;
546
547 /* PTP v2/802.AS1, any layer, Delay_req packet */
548 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
549 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
550 ptp_v2 = PTP_TCR_TSVER2ENA;
551 /* take time stamp for Delay_Req messages only */
552 ts_master_en = PTP_TCR_TSMSTRENA;
553 ts_event_en = PTP_TCR_TSEVNTENA;
554
555 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
556 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
557 ptp_over_ethernet = PTP_TCR_TSIPENA;
558 break;
559
560 /* time stamp any incoming packet */
561 case HWTSTAMP_FILTER_ALL:
562 config.rx_filter = HWTSTAMP_FILTER_ALL;
563 tstamp_all = PTP_TCR_TSENALL;
564 break;
565
566 default:
567 return -ERANGE;
568 }
569 } else {
570 switch (config.rx_filter) {
571 case HWTSTAMP_FILTER_NONE:
572 config.rx_filter = HWTSTAMP_FILTER_NONE;
573 break;
574 default:
575 /* PTP v1, UDP, any kind of event packet */
576 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 break;
578 }
579 }
580 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
581
582 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
583 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
584 else {
585 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
586 tstamp_all | ptp_v2 | ptp_over_ethernet |
587 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
588 ts_master_en | snap_type_sel);
589
590 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
591
592 /* program Sub Second Increment reg */
593 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
594
595 /* calculate default added value:
596 * formula is :
597 * addend = (2^32)/freq_div_ratio;
598 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
599 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
600 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
601 * achive 20ns accuracy.
602 *
603 * 2^x * y == (y << x), hence
604 * 2^32 * 50000000 ==> (50000000 << 32)
605 */
606 temp = (u64)(50000000ULL << 32);
607 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
608 priv->hw->ptp->config_addend(priv->ioaddr,
609 priv->default_addend);
610
611 /* initialize system time */
612 getnstimeofday(&now);
613 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
614 now.tv_nsec);
615 }
616
617 return copy_to_user(ifr->ifr_data, &config,
618 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
619}
620
92ba6888 621static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 622{
92ba6888
RK
623 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
624 return -EOPNOTSUPP;
625
626 if (netif_msg_hw(priv)) {
627 if (priv->dma_cap.time_stamp) {
628 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
629 priv->adv_ts = 0;
630 }
631 if (priv->dma_cap.atime_stamp && priv->extend_desc) {
632 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
633 priv->adv_ts = 1;
634 }
891434b1
RK
635 }
636
637 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0;
639 priv->hwts_rx_en = 0;
92ba6888
RK
640
641 return stmmac_ptp_register(priv);
642}
643
644static void stmmac_release_ptp(struct stmmac_priv *priv)
645{
646 stmmac_ptp_unregister(priv);
891434b1
RK
647}
648
47dd7a54
GC
649/**
650 * stmmac_adjust_link
651 * @dev: net device structure
652 * Description: it adjusts the link parameters.
653 */
654static void stmmac_adjust_link(struct net_device *dev)
655{
656 struct stmmac_priv *priv = netdev_priv(dev);
657 struct phy_device *phydev = priv->phydev;
47dd7a54
GC
658 unsigned long flags;
659 int new_state = 0;
660 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
661
662 if (phydev == NULL)
663 return;
664
665 DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n",
666 phydev->addr, phydev->link);
667
668 spin_lock_irqsave(&priv->lock, flags);
d765955d 669
47dd7a54 670 if (phydev->link) {
ad01b7d4 671 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
672
673 /* Now we make sure that we can be in full duplex mode.
674 * If not, we operate in half-duplex mode. */
675 if (phydev->duplex != priv->oldduplex) {
676 new_state = 1;
677 if (!(phydev->duplex))
db98a0b0 678 ctrl &= ~priv->hw->link.duplex;
47dd7a54 679 else
db98a0b0 680 ctrl |= priv->hw->link.duplex;
47dd7a54
GC
681 priv->oldduplex = phydev->duplex;
682 }
683 /* Flow Control operation */
684 if (phydev->pause)
ad01b7d4 685 priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
db98a0b0 686 fc, pause_time);
47dd7a54
GC
687
688 if (phydev->speed != priv->speed) {
689 new_state = 1;
690 switch (phydev->speed) {
691 case 1000:
9dfeb4d9 692 if (likely(priv->plat->has_gmac))
db98a0b0 693 ctrl &= ~priv->hw->link.port;
cf3f047b 694 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
695 break;
696 case 100:
697 case 10:
9dfeb4d9 698 if (priv->plat->has_gmac) {
db98a0b0 699 ctrl |= priv->hw->link.port;
47dd7a54 700 if (phydev->speed == SPEED_100) {
db98a0b0 701 ctrl |= priv->hw->link.speed;
47dd7a54 702 } else {
db98a0b0 703 ctrl &= ~(priv->hw->link.speed);
47dd7a54
GC
704 }
705 } else {
db98a0b0 706 ctrl &= ~priv->hw->link.port;
47dd7a54 707 }
9dfeb4d9 708 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
709 break;
710 default:
711 if (netif_msg_link(priv))
712 pr_warning("%s: Speed (%d) is not 10"
713 " or 100!\n", dev->name, phydev->speed);
714 break;
715 }
716
717 priv->speed = phydev->speed;
718 }
719
ad01b7d4 720 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
721
722 if (!priv->oldlink) {
723 new_state = 1;
724 priv->oldlink = 1;
725 }
726 } else if (priv->oldlink) {
727 new_state = 1;
728 priv->oldlink = 0;
729 priv->speed = 0;
730 priv->oldduplex = -1;
731 }
732
733 if (new_state && netif_msg_link(priv))
734 phy_print_status(phydev);
735
d765955d
GC
736 stmmac_eee_adjust(priv);
737
47dd7a54
GC
738 spin_unlock_irqrestore(&priv->lock, flags);
739
740 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
741}
742
e58bb43f
GC
743static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
744{
745 int interface = priv->plat->interface;
746
747 if (priv->dma_cap.pcs) {
748 if ((interface & PHY_INTERFACE_MODE_RGMII) ||
749 (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
750 (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
751 (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
752 pr_debug("STMMAC: PCS RGMII support enable\n");
753 priv->pcs = STMMAC_PCS_RGMII;
754 } else if (interface & PHY_INTERFACE_MODE_SGMII) {
755 pr_debug("STMMAC: PCS SGMII support enable\n");
756 priv->pcs = STMMAC_PCS_SGMII;
757 }
758 }
759}
760
47dd7a54
GC
761/**
762 * stmmac_init_phy - PHY initialization
763 * @dev: net device structure
764 * Description: it initializes the driver's PHY state, and attaches the PHY
765 * to the mac driver.
766 * Return value:
767 * 0 on success
768 */
769static int stmmac_init_phy(struct net_device *dev)
770{
771 struct stmmac_priv *priv = netdev_priv(dev);
772 struct phy_device *phydev;
d765955d 773 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
109cdd66 774 char bus_id[MII_BUS_ID_SIZE];
79ee1dc3 775 int interface = priv->plat->interface;
47dd7a54
GC
776 priv->oldlink = 0;
777 priv->speed = 0;
778 priv->oldduplex = -1;
779
f142af2e
SK
780 if (priv->plat->phy_bus_name)
781 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
782 priv->plat->phy_bus_name, priv->plat->bus_id);
783 else
784 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
785 priv->plat->bus_id);
786
d765955d 787 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
36bcfe7d 788 priv->plat->phy_addr);
d765955d 789 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
47dd7a54 790
f9a8f83b 791 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
47dd7a54
GC
792
793 if (IS_ERR(phydev)) {
794 pr_err("%s: Could not attach to PHY\n", dev->name);
795 return PTR_ERR(phydev);
796 }
797
79ee1dc3 798 /* Stop Advertising 1000BASE Capability if interface is not GMII */
c5b9b4e4
SK
799 if ((interface == PHY_INTERFACE_MODE_MII) ||
800 (interface == PHY_INTERFACE_MODE_RMII))
801 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
802 SUPPORTED_1000baseT_Full);
79ee1dc3 803
47dd7a54
GC
804 /*
805 * Broken HW is sometimes missing the pull-up resistor on the
806 * MDIO line, which results in reads to non-existent devices returning
807 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
808 * device as well.
809 * Note: phydev->phy_id is the result of reading the UID PHY registers.
810 */
811 if (phydev->phy_id == 0) {
812 phy_disconnect(phydev);
813 return -ENODEV;
814 }
815 pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
36bcfe7d 816 " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
47dd7a54
GC
817
818 priv->phydev = phydev;
819
820 return 0;
821}
822
47dd7a54 823/**
c24602ef 824 * stmmac_display_ring
47dd7a54
GC
825 * @p: pointer to the ring.
826 * @size: size of the ring.
c24602ef 827 * Description: display the control/status and buffer descriptors.
47dd7a54 828 */
c24602ef 829static void stmmac_display_ring(void *head, int size, int extend_desc)
47dd7a54 830{
47dd7a54 831 int i;
c24602ef
GC
832 struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
833 struct dma_desc *p = (struct dma_desc *) head;
834
47dd7a54 835 for (i = 0; i < size; i++) {
c24602ef
GC
836 u64 x;
837 if (extend_desc) {
838 x = *(u64 *) ep;
839 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
840 i, (unsigned int) virt_to_phys(ep),
841 (unsigned int) x, (unsigned int) (x >> 32),
842 ep->basic.des2, ep->basic.des3);
843 ep++;
844 } else {
845 x = *(u64 *) p;
846 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
847 i, (unsigned int) virt_to_phys(p),
848 (unsigned int) x, (unsigned int) (x >> 32),
849 p->des2, p->des3);
850 p++;
851 }
47dd7a54
GC
852 pr_info("\n");
853 }
854}
855
c24602ef
GC
856static void stmmac_display_rings(struct stmmac_priv *priv)
857{
858 unsigned int txsize = priv->dma_tx_size;
859 unsigned int rxsize = priv->dma_rx_size;
860
861 if (priv->extend_desc) {
862 pr_info("Extended RX descriptor ring:\n");
863 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
864 pr_info("Extended TX descriptor ring:\n");
865 stmmac_display_ring((void *) priv->dma_etx, txsize, 1);
866 } else {
867 pr_info("RX descriptor ring:\n");
868 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
869 pr_info("TX descriptor ring:\n");
870 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
871 }
872}
873
286a8372
GC
874static int stmmac_set_bfsize(int mtu, int bufsize)
875{
876 int ret = bufsize;
877
878 if (mtu >= BUF_SIZE_4KiB)
879 ret = BUF_SIZE_8KiB;
880 else if (mtu >= BUF_SIZE_2KiB)
881 ret = BUF_SIZE_4KiB;
882 else if (mtu >= DMA_BUFFER_SIZE)
883 ret = BUF_SIZE_2KiB;
884 else
885 ret = DMA_BUFFER_SIZE;
886
887 return ret;
888}
889
c24602ef
GC
890static void stmmac_clear_descriptors(struct stmmac_priv *priv)
891{
892 int i;
893 unsigned int txsize = priv->dma_tx_size;
894 unsigned int rxsize = priv->dma_rx_size;
895
896 /* Clear the Rx/Tx descriptors */
897 for (i = 0; i < rxsize; i++)
898 if (priv->extend_desc)
899 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
900 priv->use_riwt, priv->mode,
901 (i == rxsize - 1));
902 else
903 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
904 priv->use_riwt, priv->mode,
905 (i == rxsize - 1));
906 for (i = 0; i < txsize; i++)
907 if (priv->extend_desc)
908 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
909 priv->mode,
910 (i == txsize - 1));
911 else
912 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
913 priv->mode,
914 (i == txsize - 1));
915}
916
917static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
918 int i)
919{
920 struct sk_buff *skb;
921
922 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
923 GFP_KERNEL);
924 if (unlikely(skb == NULL)) {
925 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
926 return 1;
927 }
928 skb_reserve(skb, NET_IP_ALIGN);
929 priv->rx_skbuff[i] = skb;
930 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
931 priv->dma_buf_sz,
932 DMA_FROM_DEVICE);
933
934 p->des2 = priv->rx_skbuff_dma[i];
935
936 if ((priv->mode == STMMAC_RING_MODE) &&
937 (priv->dma_buf_sz == BUF_SIZE_16KiB))
938 priv->hw->ring->init_desc3(p);
939
940 return 0;
941}
942
47dd7a54
GC
943/**
944 * init_dma_desc_rings - init the RX/TX descriptor rings
945 * @dev: net device structure
946 * Description: this function initializes the DMA RX/TX descriptors
286a8372
GC
947 * and allocates the socket buffers. It suppors the chained and ring
948 * modes.
47dd7a54
GC
949 */
950static void init_dma_desc_rings(struct net_device *dev)
951{
952 int i;
953 struct stmmac_priv *priv = netdev_priv(dev);
47dd7a54
GC
954 unsigned int txsize = priv->dma_tx_size;
955 unsigned int rxsize = priv->dma_rx_size;
4a7d666a 956 unsigned int bfsize = 0;
47dd7a54 957
286a8372
GC
958 /* Set the max buffer size according to the DESC mode
959 * and the MTU. Note that RING mode allows 16KiB bsize. */
4a7d666a
GC
960 if (priv->mode == STMMAC_RING_MODE)
961 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
286a8372 962
4a7d666a 963 if (bfsize < BUF_SIZE_16KiB)
286a8372 964 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
47dd7a54 965
47dd7a54
GC
966 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
967 txsize, rxsize, bfsize);
968
c24602ef
GC
969 if (priv->extend_desc) {
970 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
971 sizeof(struct
972 dma_extended_desc),
973 &priv->dma_rx_phy,
974 GFP_KERNEL);
975 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
976 sizeof(struct
977 dma_extended_desc),
978 &priv->dma_tx_phy,
979 GFP_KERNEL);
980 if ((!priv->dma_erx) || (!priv->dma_etx))
981 return;
982 } else {
983 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
984 sizeof(struct dma_desc),
985 &priv->dma_rx_phy,
986 GFP_KERNEL);
987 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
988 sizeof(struct dma_desc),
989 &priv->dma_tx_phy,
990 GFP_KERNEL);
991 if ((!priv->dma_rx) || (!priv->dma_tx))
992 return;
993 }
994
b2adaca9
JP
995 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
996 GFP_KERNEL);
997 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
998 GFP_KERNEL);
cf32deec
RK
999 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1000 GFP_KERNEL);
b2adaca9
JP
1001 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1002 GFP_KERNEL);
c24602ef
GC
1003 if (netif_msg_drv(priv))
1004 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1005 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
47dd7a54
GC
1006
1007 /* RX INITIALIZATION */
c24602ef 1008 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
47dd7a54 1009 for (i = 0; i < rxsize; i++) {
c24602ef
GC
1010 struct dma_desc *p;
1011 if (priv->extend_desc)
1012 p = &((priv->dma_erx + i)->basic);
1013 else
1014 p = priv->dma_rx + i;
47dd7a54 1015
c24602ef 1016 if (stmmac_init_rx_buffers(priv, p, i))
47dd7a54 1017 break;
286a8372 1018
47dd7a54
GC
1019 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1020 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
1021 }
1022 priv->cur_rx = 0;
1023 priv->dirty_rx = (unsigned int)(i - rxsize);
1024 priv->dma_buf_sz = bfsize;
1025 buf_sz = bfsize;
1026
c24602ef
GC
1027 /* Setup the chained descriptor addresses */
1028 if (priv->mode == STMMAC_CHAIN_MODE) {
1029 if (priv->extend_desc) {
1030 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
1031 rxsize, 1);
1032 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
1033 txsize, 1);
1034 } else {
1035 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
1036 rxsize, 0);
1037 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
1038 txsize, 0);
1039 }
1040 }
1041
47dd7a54
GC
1042 /* TX INITIALIZATION */
1043 for (i = 0; i < txsize; i++) {
c24602ef
GC
1044 struct dma_desc *p;
1045 if (priv->extend_desc)
1046 p = &((priv->dma_etx + i)->basic);
1047 else
1048 p = priv->dma_tx + i;
1049 p->des2 = 0;
cf32deec 1050 priv->tx_skbuff_dma[i] = 0;
47dd7a54 1051 priv->tx_skbuff[i] = NULL;
47dd7a54 1052 }
286a8372 1053
47dd7a54
GC
1054 priv->dirty_tx = 0;
1055 priv->cur_tx = 0;
1056
c24602ef 1057 stmmac_clear_descriptors(priv);
47dd7a54 1058
c24602ef
GC
1059 if (netif_msg_hw(priv))
1060 stmmac_display_rings(priv);
47dd7a54
GC
1061}
1062
1063static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1064{
1065 int i;
1066
1067 for (i = 0; i < priv->dma_rx_size; i++) {
1068 if (priv->rx_skbuff[i]) {
1069 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
1070 priv->dma_buf_sz, DMA_FROM_DEVICE);
1071 dev_kfree_skb_any(priv->rx_skbuff[i]);
1072 }
1073 priv->rx_skbuff[i] = NULL;
1074 }
47dd7a54
GC
1075}
1076
1077static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1078{
1079 int i;
1080
1081 for (i = 0; i < priv->dma_tx_size; i++) {
1082 if (priv->tx_skbuff[i] != NULL) {
c24602ef
GC
1083 struct dma_desc *p;
1084 if (priv->extend_desc)
1085 p = &((priv->dma_etx + i)->basic);
1086 else
1087 p = priv->dma_tx + i;
1088
cf32deec
RK
1089 if (priv->tx_skbuff_dma[i])
1090 dma_unmap_single(priv->device,
1091 priv->tx_skbuff_dma[i],
db98a0b0
GC
1092 priv->hw->desc->get_tx_len(p),
1093 DMA_TO_DEVICE);
47dd7a54
GC
1094 dev_kfree_skb_any(priv->tx_skbuff[i]);
1095 priv->tx_skbuff[i] = NULL;
cf32deec 1096 priv->tx_skbuff_dma[i] = 0;
47dd7a54
GC
1097 }
1098 }
47dd7a54
GC
1099}
1100
1101static void free_dma_desc_resources(struct stmmac_priv *priv)
1102{
1103 /* Release the DMA TX/RX socket buffers */
1104 dma_free_rx_skbufs(priv);
1105 dma_free_tx_skbufs(priv);
1106
1107 /* Free the region of consistent memory previously allocated for
1108 * the DMA */
c24602ef
GC
1109 if (!priv->extend_desc) {
1110 dma_free_coherent(priv->device,
1111 priv->dma_tx_size * sizeof(struct dma_desc),
1112 priv->dma_tx, priv->dma_tx_phy);
1113 dma_free_coherent(priv->device,
1114 priv->dma_rx_size * sizeof(struct dma_desc),
1115 priv->dma_rx, priv->dma_rx_phy);
1116 } else {
1117 dma_free_coherent(priv->device, priv->dma_tx_size *
1118 sizeof(struct dma_extended_desc),
1119 priv->dma_etx, priv->dma_tx_phy);
1120 dma_free_coherent(priv->device, priv->dma_rx_size *
1121 sizeof(struct dma_extended_desc),
1122 priv->dma_erx, priv->dma_rx_phy);
1123 }
47dd7a54
GC
1124 kfree(priv->rx_skbuff_dma);
1125 kfree(priv->rx_skbuff);
cf32deec 1126 kfree(priv->tx_skbuff_dma);
47dd7a54 1127 kfree(priv->tx_skbuff);
47dd7a54
GC
1128}
1129
47dd7a54
GC
1130/**
1131 * stmmac_dma_operation_mode - HW DMA operation mode
1132 * @priv : pointer to the private device structure.
1133 * Description: it sets the DMA operation mode: tx/rx DMA thresholds
ebbb293f 1134 * or Store-And-Forward capability.
47dd7a54
GC
1135 */
1136static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1137{
61b8013a
SK
1138 if (likely(priv->plat->force_sf_dma_mode ||
1139 ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
1140 /*
1141 * In case of GMAC, SF mode can be enabled
1142 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1143 * 1) TX COE if actually supported
1144 * 2) There is no bugged Jumbo frame support
1145 * that needs to not insert csum in the TDES.
1146 */
1147 priv->hw->dma->dma_mode(priv->ioaddr,
1148 SF_DMA_MODE, SF_DMA_MODE);
1149 tc = SF_DMA_MODE;
1150 } else
1151 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
47dd7a54
GC
1152}
1153
47dd7a54 1154/**
9125cdd1
GC
1155 * stmmac_tx_clean:
1156 * @priv: private data pointer
47dd7a54
GC
1157 * Description: it reclaims resources after transmission completes.
1158 */
9125cdd1 1159static void stmmac_tx_clean(struct stmmac_priv *priv)
47dd7a54
GC
1160{
1161 unsigned int txsize = priv->dma_tx_size;
47dd7a54 1162
a9097a96
GC
1163 spin_lock(&priv->tx_lock);
1164
9125cdd1
GC
1165 priv->xstats.tx_clean++;
1166
47dd7a54
GC
1167 while (priv->dirty_tx != priv->cur_tx) {
1168 int last;
1169 unsigned int entry = priv->dirty_tx % txsize;
1170 struct sk_buff *skb = priv->tx_skbuff[entry];
c24602ef
GC
1171 struct dma_desc *p;
1172
1173 if (priv->extend_desc)
1174 p = (struct dma_desc *) (priv->dma_etx + entry);
1175 else
1176 p = priv->dma_tx + entry;
47dd7a54
GC
1177
1178 /* Check if the descriptor is owned by the DMA. */
db98a0b0 1179 if (priv->hw->desc->get_tx_owner(p))
47dd7a54
GC
1180 break;
1181
c24602ef 1182 /* Verify tx error by looking at the last segment. */
db98a0b0 1183 last = priv->hw->desc->get_tx_ls(p);
47dd7a54
GC
1184 if (likely(last)) {
1185 int tx_error =
db98a0b0
GC
1186 priv->hw->desc->tx_status(&priv->dev->stats,
1187 &priv->xstats, p,
ad01b7d4 1188 priv->ioaddr);
47dd7a54
GC
1189 if (likely(tx_error == 0)) {
1190 priv->dev->stats.tx_packets++;
1191 priv->xstats.tx_pkt_n++;
1192 } else
1193 priv->dev->stats.tx_errors++;
891434b1
RK
1194
1195 stmmac_get_tx_hwtstamp(priv, entry, skb);
47dd7a54
GC
1196 }
1197 TX_DBG("%s: curr %d, dirty %d\n", __func__,
1198 priv->cur_tx, priv->dirty_tx);
1199
cf32deec
RK
1200 if (likely(priv->tx_skbuff_dma[entry])) {
1201 dma_unmap_single(priv->device,
1202 priv->tx_skbuff_dma[entry],
db98a0b0 1203 priv->hw->desc->get_tx_len(p),
47dd7a54 1204 DMA_TO_DEVICE);
cf32deec
RK
1205 priv->tx_skbuff_dma[entry] = 0;
1206 }
891434b1 1207 priv->hw->ring->clean_desc3(priv, p);
47dd7a54
GC
1208
1209 if (likely(skb != NULL)) {
acb600de 1210 dev_kfree_skb(skb);
47dd7a54
GC
1211 priv->tx_skbuff[entry] = NULL;
1212 }
1213
4a7d666a 1214 priv->hw->desc->release_tx_desc(p, priv->mode);
47dd7a54 1215
13497f58 1216 priv->dirty_tx++;
47dd7a54
GC
1217 }
1218 if (unlikely(netif_queue_stopped(priv->dev) &&
1219 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1220 netif_tx_lock(priv->dev);
1221 if (netif_queue_stopped(priv->dev) &&
1222 stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1223 TX_DBG("%s: restart transmit\n", __func__);
1224 netif_wake_queue(priv->dev);
1225 }
1226 netif_tx_unlock(priv->dev);
1227 }
d765955d
GC
1228
1229 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1230 stmmac_enable_eee_mode(priv);
1231 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
1232 }
a9097a96 1233 spin_unlock(&priv->tx_lock);
47dd7a54
GC
1234}
1235
9125cdd1 1236static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1237{
7284a3f1 1238 priv->hw->dma->enable_dma_irq(priv->ioaddr);
47dd7a54
GC
1239}
1240
9125cdd1 1241static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
47dd7a54 1242{
7284a3f1 1243 priv->hw->dma->disable_dma_irq(priv->ioaddr);
47dd7a54
GC
1244}
1245
47dd7a54 1246
47dd7a54
GC
1247/**
1248 * stmmac_tx_err:
1249 * @priv: pointer to the private device structure
1250 * Description: it cleans the descriptors and restarts the transmission
1251 * in case of errors.
1252 */
1253static void stmmac_tx_err(struct stmmac_priv *priv)
1254{
c24602ef
GC
1255 int i;
1256 int txsize = priv->dma_tx_size;
47dd7a54
GC
1257 netif_stop_queue(priv->dev);
1258
ad01b7d4 1259 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 1260 dma_free_tx_skbufs(priv);
c24602ef
GC
1261 for (i = 0; i < txsize; i++)
1262 if (priv->extend_desc)
1263 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1264 priv->mode,
1265 (i == txsize - 1));
1266 else
1267 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1268 priv->mode,
1269 (i == txsize - 1));
47dd7a54
GC
1270 priv->dirty_tx = 0;
1271 priv->cur_tx = 0;
ad01b7d4 1272 priv->hw->dma->start_tx(priv->ioaddr);
47dd7a54
GC
1273
1274 priv->dev->stats.tx_errors++;
1275 netif_wake_queue(priv->dev);
47dd7a54
GC
1276}
1277
aec7ff27
GC
1278static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1279{
aec7ff27
GC
1280 int status;
1281
ad01b7d4 1282 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
9125cdd1
GC
1283 if (likely((status & handle_rx)) || (status & handle_tx)) {
1284 if (likely(napi_schedule_prep(&priv->napi))) {
1285 stmmac_disable_dma_irq(priv);
1286 __napi_schedule(&priv->napi);
1287 }
1288 }
1289 if (unlikely(status & tx_hard_error_bump_tc)) {
aec7ff27
GC
1290 /* Try to bump up the dma threshold on this failure */
1291 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
1292 tc += 64;
ad01b7d4 1293 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
aec7ff27 1294 priv->xstats.threshold = tc;
47dd7a54 1295 }
aec7ff27
GC
1296 } else if (unlikely(status == tx_hard_error))
1297 stmmac_tx_err(priv);
47dd7a54
GC
1298}
1299
1c901a46
GC
1300static void stmmac_mmc_setup(struct stmmac_priv *priv)
1301{
1302 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1303 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1304
4f795b25
GC
1305 /* Mask MMC irq, counters are managed in SW and registers
1306 * are cleared on each READ eventually. */
1c901a46 1307 dwmac_mmc_intr_all_mask(priv->ioaddr);
4f795b25
GC
1308
1309 if (priv->dma_cap.rmon) {
1310 dwmac_mmc_ctrl(priv->ioaddr, mode);
1311 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1312 } else
aae54cff 1313 pr_info(" No MAC Management Counters available\n");
1c901a46
GC
1314}
1315
f0b9d786
GC
1316static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1317{
1318 u32 hwid = priv->hw->synopsys_uid;
1319
1320 /* Only check valid Synopsys Id because old MAC chips
1321 * have no HW registers where get the ID */
1322 if (likely(hwid)) {
1323 u32 uid = ((hwid & 0x0000ff00) >> 8);
1324 u32 synid = (hwid & 0x000000ff);
1325
cf3f047b 1326 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
f0b9d786
GC
1327 uid, synid);
1328
1329 return synid;
1330 }
1331 return 0;
1332}
e7434821 1333
19e30c14
GC
1334/**
1335 * stmmac_selec_desc_mode
ff3dd78c
GC
1336 * @priv : private structure
1337 * Description: select the Enhanced/Alternate or Normal descriptors
1338 */
19e30c14
GC
1339static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1340{
1341 if (priv->plat->enh_desc) {
1342 pr_info(" Enhanced/Alternate descriptors\n");
c24602ef
GC
1343
1344 /* GMAC older than 3.50 has no extended descriptors */
1345 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1346 pr_info("\tEnabled extended descriptors\n");
1347 priv->extend_desc = 1;
1348 } else
1349 pr_warn("Extended descriptors not supported\n");
1350
19e30c14
GC
1351 priv->hw->desc = &enh_desc_ops;
1352 } else {
1353 pr_info(" Normal descriptors\n");
1354 priv->hw->desc = &ndesc_ops;
1355 }
1356}
1357
1358/**
1359 * stmmac_get_hw_features
1360 * @priv : private device pointer
1361 * Description:
1362 * new GMAC chip generations have a new register to indicate the
1363 * presence of the optional feature/functions.
1364 * This can be also used to override the value passed through the
1365 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
1366 */
1367static int stmmac_get_hw_features(struct stmmac_priv *priv)
1368{
5e6efe88 1369 u32 hw_cap = 0;
3c20f72f 1370
5e6efe88
GC
1371 if (priv->hw->dma->get_hw_feature) {
1372 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
e7434821 1373
1db123fb
RK
1374 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1375 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1376 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1377 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1378 priv->dma_cap.multi_addr =
1379 (hw_cap & DMA_HW_FEAT_ADDMACADRSEL) >> 5;
1380 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1381 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1382 priv->dma_cap.pmt_remote_wake_up =
1383 (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1384 priv->dma_cap.pmt_magic_frame =
1385 (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
19e30c14 1386 /* MMC */
1db123fb 1387 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
e7434821 1388 /* IEEE 1588-2002*/
1db123fb
RK
1389 priv->dma_cap.time_stamp =
1390 (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
e7434821 1391 /* IEEE 1588-2008*/
1db123fb
RK
1392 priv->dma_cap.atime_stamp =
1393 (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
e7434821 1394 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1db123fb
RK
1395 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1396 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
e7434821 1397 /* TX and RX csum */
1db123fb
RK
1398 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1399 priv->dma_cap.rx_coe_type1 =
1400 (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1401 priv->dma_cap.rx_coe_type2 =
1402 (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1403 priv->dma_cap.rxfifo_over_2048 =
1404 (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
e7434821 1405 /* TX and RX number of channels */
1db123fb
RK
1406 priv->dma_cap.number_rx_channel =
1407 (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1408 priv->dma_cap.number_tx_channel =
1409 (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
e7434821 1410 /* Alternate (enhanced) DESC mode*/
1db123fb
RK
1411 priv->dma_cap.enh_desc =
1412 (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
19e30c14 1413 }
e7434821
GC
1414
1415 return hw_cap;
1416}
1417
bfab27a1
GC
1418static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1419{
1420 /* verify if the MAC address is valid, in case of failures it
1421 * generates a random MAC address */
1422 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1423 priv->hw->mac->get_umac_addr((void __iomem *)
1424 priv->dev->base_addr,
1425 priv->dev->dev_addr, 0);
1426 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 1427 eth_hw_addr_random(priv->dev);
bfab27a1
GC
1428 }
1429 pr_warning("%s: device MAC address %pM\n", priv->dev->name,
1430 priv->dev->dev_addr);
1431}
1432
0f1f88a8
GC
1433static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1434{
1435 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
b9cde0a8 1436 int mixed_burst = 0;
c24602ef 1437 int atds = 0;
0f1f88a8
GC
1438
1439 /* Some DMA parameters can be passed from the platform;
1440 * in case of these are not passed we keep a default
1441 * (good for all the chips) and init the DMA! */
1442 if (priv->plat->dma_cfg) {
1443 pbl = priv->plat->dma_cfg->pbl;
1444 fixed_burst = priv->plat->dma_cfg->fixed_burst;
b9cde0a8 1445 mixed_burst = priv->plat->dma_cfg->mixed_burst;
0f1f88a8
GC
1446 burst_len = priv->plat->dma_cfg->burst_len;
1447 }
1448
c24602ef
GC
1449 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1450 atds = 1;
1451
b9cde0a8 1452 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
0f1f88a8 1453 burst_len, priv->dma_tx_phy,
c24602ef 1454 priv->dma_rx_phy, atds);
0f1f88a8
GC
1455}
1456
9125cdd1
GC
1457/**
1458 * stmmac_tx_timer:
1459 * @data: data pointer
1460 * Description:
1461 * This is the timer handler to directly invoke the stmmac_tx_clean.
1462 */
1463static void stmmac_tx_timer(unsigned long data)
1464{
1465 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1466
1467 stmmac_tx_clean(priv);
1468}
1469
1470/**
1471 * stmmac_tx_timer:
1472 * @priv: private data structure
1473 * Description:
1474 * This inits the transmit coalesce parameters: i.e. timer rate,
1475 * timer handler and default threshold used for enabling the
1476 * interrupt on completion bit.
1477 */
1478static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1479{
1480 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1481 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1482 init_timer(&priv->txtimer);
1483 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1484 priv->txtimer.data = (unsigned long)priv;
1485 priv->txtimer.function = stmmac_tx_timer;
1486 add_timer(&priv->txtimer);
1487}
1488
47dd7a54
GC
1489/**
1490 * stmmac_open - open entry point of the driver
1491 * @dev : pointer to the device structure.
1492 * Description:
1493 * This function is the open entry point of the driver.
1494 * Return value:
1495 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1496 * file on failure.
1497 */
1498static int stmmac_open(struct net_device *dev)
1499{
1500 struct stmmac_priv *priv = netdev_priv(dev);
47dd7a54
GC
1501 int ret;
1502
a630844d 1503 clk_prepare_enable(priv->stmmac_clk);
4bfcbd7a
FV
1504
1505 stmmac_check_ether_addr(priv);
1506
e58bb43f
GC
1507 if (!priv->pcs) {
1508 ret = stmmac_init_phy(dev);
1509 if (ret) {
1510 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1511 __func__, ret);
1512 goto open_error;
1513 }
f66ffe28 1514 }
47dd7a54
GC
1515
1516 /* Create and initialize the TX/RX descriptors chains. */
1517 priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1518 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1519 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1520 init_dma_desc_rings(dev);
1521
1522 /* DMA initialization and SW reset */
0f1f88a8 1523 ret = stmmac_init_dma_engine(priv);
f66ffe28 1524 if (ret < 0) {
47dd7a54 1525 pr_err("%s: DMA initialization failed\n", __func__);
f66ffe28 1526 goto open_error;
47dd7a54
GC
1527 }
1528
1529 /* Copy the MAC addr into the HW */
ad01b7d4 1530 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
cf3f047b 1531
ca5f12c1 1532 /* If required, perform hw setup of the bus. */
9dfeb4d9
GC
1533 if (priv->plat->bus_setup)
1534 priv->plat->bus_setup(priv->ioaddr);
cf3f047b 1535
47dd7a54 1536 /* Initialize the MAC Core */
ad01b7d4 1537 priv->hw->mac->core_init(priv->ioaddr);
47dd7a54 1538
f66ffe28
GC
1539 /* Request the IRQ lines */
1540 ret = request_irq(dev->irq, stmmac_interrupt,
1541 IRQF_SHARED, dev->name, dev);
1542 if (unlikely(ret < 0)) {
1543 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1544 __func__, dev->irq, ret);
1545 goto open_error;
1546 }
1547
7a13f8f5
FV
1548 /* Request the Wake IRQ in case of another line is used for WoL */
1549 if (priv->wol_irq != dev->irq) {
1550 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1551 IRQF_SHARED, dev->name, dev);
1552 if (unlikely(ret < 0)) {
1553 pr_err("%s: ERROR: allocating the ext WoL IRQ %d "
1554 "(error: %d)\n", __func__, priv->wol_irq, ret);
1555 goto open_error_wolirq;
1556 }
1557 }
1558
d765955d
GC
1559 /* Request the IRQ lines */
1560 if (priv->lpi_irq != -ENXIO) {
1561 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1562 dev->name, dev);
1563 if (unlikely(ret < 0)) {
1564 pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1565 __func__, priv->lpi_irq, ret);
1566 goto open_error_lpiirq;
1567 }
1568 }
1569
47dd7a54 1570 /* Enable the MAC Rx/Tx */
bfab27a1 1571 stmmac_set_mac(priv->ioaddr, true);
47dd7a54
GC
1572
1573 /* Set the HW DMA mode and the COE */
1574 stmmac_dma_operation_mode(priv);
1575
1576 /* Extra statistics */
1577 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1578 priv->xstats.threshold = tc;
1579
4f795b25 1580 stmmac_mmc_setup(priv);
1c901a46 1581
92ba6888
RK
1582 ret = stmmac_init_ptp(priv);
1583 if (ret)
1584 pr_warn("%s: failed PTP initialisation\n", __func__);
891434b1 1585
bfab27a1
GC
1586#ifdef CONFIG_STMMAC_DEBUG_FS
1587 ret = stmmac_init_fs(dev);
1588 if (ret < 0)
cf3f047b 1589 pr_warning("%s: failed debugFS registration\n", __func__);
bfab27a1 1590#endif
47dd7a54
GC
1591 /* Start the ball rolling... */
1592 DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
ad01b7d4
GC
1593 priv->hw->dma->start_tx(priv->ioaddr);
1594 priv->hw->dma->start_rx(priv->ioaddr);
47dd7a54 1595
47dd7a54
GC
1596 /* Dump DMA/MAC registers */
1597 if (netif_msg_hw(priv)) {
ad01b7d4
GC
1598 priv->hw->mac->dump_regs(priv->ioaddr);
1599 priv->hw->dma->dump_regs(priv->ioaddr);
47dd7a54
GC
1600 }
1601
1602 if (priv->phydev)
1603 phy_start(priv->phydev);
1604
d765955d 1605 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
e58bb43f
GC
1606
1607 /* Using PCS we cannot dial with the phy registers at this stage
1608 * so we do not support extra feature like EEE.
1609 */
1610 if (!priv->pcs)
1611 priv->eee_enabled = stmmac_eee_init(priv);
d765955d 1612
9125cdd1
GC
1613 stmmac_init_tx_coalesce(priv);
1614
62a2ab93
GC
1615 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1616 priv->rx_riwt = MAX_DMA_RIWT;
1617 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1618 }
1619
e58bb43f
GC
1620 if (priv->pcs && priv->hw->mac->ctrl_ane)
1621 priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
1622
47dd7a54 1623 napi_enable(&priv->napi);
47dd7a54 1624 netif_start_queue(dev);
f66ffe28 1625
47dd7a54 1626 return 0;
f66ffe28 1627
d765955d
GC
1628open_error_lpiirq:
1629 if (priv->wol_irq != dev->irq)
1630 free_irq(priv->wol_irq, dev);
1631
7a13f8f5
FV
1632open_error_wolirq:
1633 free_irq(dev->irq, dev);
1634
f66ffe28 1635open_error:
f66ffe28
GC
1636 if (priv->phydev)
1637 phy_disconnect(priv->phydev);
1638
a630844d 1639 clk_disable_unprepare(priv->stmmac_clk);
4bfcbd7a 1640
f66ffe28 1641 return ret;
47dd7a54
GC
1642}
1643
1644/**
1645 * stmmac_release - close entry point of the driver
1646 * @dev : device pointer.
1647 * Description:
1648 * This is the stop entry point of the driver.
1649 */
1650static int stmmac_release(struct net_device *dev)
1651{
1652 struct stmmac_priv *priv = netdev_priv(dev);
1653
d765955d
GC
1654 if (priv->eee_enabled)
1655 del_timer_sync(&priv->eee_ctrl_timer);
1656
47dd7a54
GC
1657 /* Stop and disconnect the PHY */
1658 if (priv->phydev) {
1659 phy_stop(priv->phydev);
1660 phy_disconnect(priv->phydev);
1661 priv->phydev = NULL;
1662 }
1663
1664 netif_stop_queue(dev);
1665
47dd7a54 1666 napi_disable(&priv->napi);
47dd7a54 1667
9125cdd1
GC
1668 del_timer_sync(&priv->txtimer);
1669
47dd7a54
GC
1670 /* Free the IRQ lines */
1671 free_irq(dev->irq, dev);
7a13f8f5
FV
1672 if (priv->wol_irq != dev->irq)
1673 free_irq(priv->wol_irq, dev);
d765955d
GC
1674 if (priv->lpi_irq != -ENXIO)
1675 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
1676
1677 /* Stop TX/RX DMA and clear the descriptors */
ad01b7d4
GC
1678 priv->hw->dma->stop_tx(priv->ioaddr);
1679 priv->hw->dma->stop_rx(priv->ioaddr);
47dd7a54
GC
1680
1681 /* Release and free the Rx/Tx resources */
1682 free_dma_desc_resources(priv);
1683
19449bfc 1684 /* Disable the MAC Rx/Tx */
bfab27a1 1685 stmmac_set_mac(priv->ioaddr, false);
47dd7a54
GC
1686
1687 netif_carrier_off(dev);
1688
bfab27a1
GC
1689#ifdef CONFIG_STMMAC_DEBUG_FS
1690 stmmac_exit_fs();
1691#endif
a630844d 1692 clk_disable_unprepare(priv->stmmac_clk);
bfab27a1 1693
92ba6888
RK
1694 stmmac_release_ptp(priv);
1695
47dd7a54
GC
1696 return 0;
1697}
1698
47dd7a54
GC
1699/**
1700 * stmmac_xmit:
1701 * @skb : the socket buffer
1702 * @dev : device pointer
1703 * Description : Tx entry point of the driver.
1704 */
1705static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1706{
1707 struct stmmac_priv *priv = netdev_priv(dev);
1708 unsigned int txsize = priv->dma_tx_size;
1709 unsigned int entry;
4a7d666a 1710 int i, csum_insertion = 0, is_jumbo = 0;
47dd7a54
GC
1711 int nfrags = skb_shinfo(skb)->nr_frags;
1712 struct dma_desc *desc, *first;
286a8372 1713 unsigned int nopaged_len = skb_headlen(skb);
47dd7a54
GC
1714
1715 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1716 if (!netif_queue_stopped(dev)) {
1717 netif_stop_queue(dev);
1718 /* This is a hard error, log it. */
1719 pr_err("%s: BUG! Tx Ring full when queue awake\n",
1720 __func__);
1721 }
1722 return NETDEV_TX_BUSY;
1723 }
1724
a9097a96
GC
1725 spin_lock(&priv->tx_lock);
1726
d765955d
GC
1727 if (priv->tx_path_in_lpi_mode)
1728 stmmac_disable_eee_mode(priv);
1729
47dd7a54
GC
1730 entry = priv->cur_tx % txsize;
1731
1732#ifdef STMMAC_XMIT_DEBUG
1733 if ((skb->len > ETH_FRAME_LEN) || nfrags)
9125cdd1
GC
1734 pr_debug("stmmac xmit: [entry %d]\n"
1735 "\tskb addr %p - len: %d - nopaged_len: %d\n"
1736 "\tn_frags: %d - ip_summed: %d - %s gso\n"
1737 "\ttx_count_frames %d\n", entry,
1738 skb, skb->len, nopaged_len, nfrags, skb->ip_summed,
1739 !skb_is_gso(skb) ? "isn't" : "is",
1740 priv->tx_count_frames);
47dd7a54
GC
1741#endif
1742
5e982f3b 1743 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 1744
c24602ef
GC
1745 if (priv->extend_desc)
1746 desc = (struct dma_desc *) (priv->dma_etx + entry);
1747 else
1748 desc = priv->dma_tx + entry;
1749
47dd7a54
GC
1750 first = desc;
1751
1752#ifdef STMMAC_XMIT_DEBUG
1753 if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN))
9125cdd1
GC
1754 pr_debug("\tskb len: %d, nopaged_len: %d,\n"
1755 "\t\tn_frags: %d, ip_summed: %d\n",
1756 skb->len, nopaged_len, nfrags, skb->ip_summed);
47dd7a54
GC
1757#endif
1758 priv->tx_skbuff[entry] = skb;
286a8372 1759
4a7d666a
GC
1760 /* To program the descriptors according to the size of the frame */
1761 if (priv->mode == STMMAC_RING_MODE) {
1762 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1763 priv->plat->enh_desc);
1764 if (unlikely(is_jumbo))
1765 entry = priv->hw->ring->jumbo_frm(priv, skb,
1766 csum_insertion);
47dd7a54 1767 } else {
4a7d666a
GC
1768 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1769 priv->plat->enh_desc);
1770 if (unlikely(is_jumbo))
1771 entry = priv->hw->chain->jumbo_frm(priv, skb,
1772 csum_insertion);
1773 }
1774 if (likely(!is_jumbo)) {
47dd7a54
GC
1775 desc->des2 = dma_map_single(priv->device, skb->data,
1776 nopaged_len, DMA_TO_DEVICE);
cf32deec 1777 priv->tx_skbuff_dma[entry] = desc->des2;
db98a0b0 1778 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
4a7d666a
GC
1779 csum_insertion, priv->mode);
1780 } else
c24602ef 1781 desc = first;
47dd7a54
GC
1782
1783 for (i = 0; i < nfrags; i++) {
9e903e08
ED
1784 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1785 int len = skb_frag_size(frag);
47dd7a54
GC
1786
1787 entry = (++priv->cur_tx) % txsize;
c24602ef
GC
1788 if (priv->extend_desc)
1789 desc = (struct dma_desc *) (priv->dma_etx + entry);
1790 else
1791 desc = priv->dma_tx + entry;
47dd7a54
GC
1792
1793 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
f722380d
IC
1794 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1795 DMA_TO_DEVICE);
cf32deec 1796 priv->tx_skbuff_dma[entry] = desc->des2;
47dd7a54 1797 priv->tx_skbuff[entry] = NULL;
4a7d666a
GC
1798 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1799 priv->mode);
eb0dc4bb 1800 wmb();
db98a0b0 1801 priv->hw->desc->set_tx_owner(desc);
8e839891 1802 wmb();
47dd7a54
GC
1803 }
1804
9125cdd1 1805 /* Finalize the latest segment. */
db98a0b0 1806 priv->hw->desc->close_tx_desc(desc);
73cfe264 1807
eb0dc4bb 1808 wmb();
9125cdd1
GC
1809 /* According to the coalesce parameter the IC bit for the latest
1810 * segment could be reset and the timer re-started to invoke the
1811 * stmmac_tx function. This approach takes care about the fragments.
1812 */
1813 priv->tx_count_frames += nfrags + 1;
1814 if (priv->tx_coal_frames > priv->tx_count_frames) {
1815 priv->hw->desc->clear_tx_ic(desc);
1816 priv->xstats.tx_reset_ic_bit++;
1817 TX_DBG("\t[entry %d]: tx_count_frames %d\n", entry,
1818 priv->tx_count_frames);
1819 mod_timer(&priv->txtimer,
1820 STMMAC_COAL_TIMER(priv->tx_coal_timer));
1821 } else
1822 priv->tx_count_frames = 0;
eb0dc4bb 1823
47dd7a54 1824 /* To avoid raise condition */
db98a0b0 1825 priv->hw->desc->set_tx_owner(first);
8e839891 1826 wmb();
47dd7a54
GC
1827
1828 priv->cur_tx++;
1829
1830#ifdef STMMAC_XMIT_DEBUG
1831 if (netif_msg_pktdata(priv)) {
1832 pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, "
1833 "first=%p, nfrags=%d\n",
1834 (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1835 entry, first, nfrags);
c24602ef
GC
1836 if (priv->extend_desc)
1837 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1838 else
1839 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1840
47dd7a54
GC
1841 pr_info(">>> frame to be transmitted: ");
1842 print_pkt(skb->data, skb->len);
1843 }
1844#endif
1845 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1846 TX_DBG("%s: stop transmitted packets\n", __func__);
1847 netif_stop_queue(dev);
1848 }
1849
1850 dev->stats.tx_bytes += skb->len;
1851
891434b1
RK
1852 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1853 priv->hwts_tx_en)) {
1854 /* declare that device is doing timestamping */
1855 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1856 priv->hw->desc->enable_tx_timestamp(first);
1857 }
1858
1859 if (!priv->hwts_tx_en)
1860 skb_tx_timestamp(skb);
3e82ce12 1861
52f64fae
RC
1862 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1863
a9097a96
GC
1864 spin_unlock(&priv->tx_lock);
1865
47dd7a54
GC
1866 return NETDEV_TX_OK;
1867}
1868
1869static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1870{
1871 unsigned int rxsize = priv->dma_rx_size;
1872 int bfsize = priv->dma_buf_sz;
47dd7a54
GC
1873
1874 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1875 unsigned int entry = priv->dirty_rx % rxsize;
c24602ef
GC
1876 struct dma_desc *p;
1877
1878 if (priv->extend_desc)
1879 p = (struct dma_desc *) (priv->dma_erx + entry);
1880 else
1881 p = priv->dma_rx + entry;
1882
47dd7a54
GC
1883 if (likely(priv->rx_skbuff[entry] == NULL)) {
1884 struct sk_buff *skb;
1885
acb600de 1886 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
47dd7a54
GC
1887
1888 if (unlikely(skb == NULL))
1889 break;
1890
1891 priv->rx_skbuff[entry] = skb;
1892 priv->rx_skbuff_dma[entry] =
1893 dma_map_single(priv->device, skb->data, bfsize,
1894 DMA_FROM_DEVICE);
1895
c24602ef 1896 p->des2 = priv->rx_skbuff_dma[entry];
286a8372 1897
891434b1 1898 priv->hw->ring->refill_desc3(priv, p);
286a8372 1899
47dd7a54
GC
1900 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1901 }
eb0dc4bb 1902 wmb();
c24602ef 1903 priv->hw->desc->set_rx_owner(p);
8e839891 1904 wmb();
47dd7a54 1905 }
47dd7a54
GC
1906}
1907
1908static int stmmac_rx(struct stmmac_priv *priv, int limit)
1909{
1910 unsigned int rxsize = priv->dma_rx_size;
1911 unsigned int entry = priv->cur_rx % rxsize;
1912 unsigned int next_entry;
1913 unsigned int count = 0;
47dd7a54
GC
1914
1915#ifdef STMMAC_RX_DEBUG
1916 if (netif_msg_hw(priv)) {
1917 pr_debug(">>> stmmac_rx: descriptor ring:\n");
c24602ef
GC
1918 if (priv->extend_desc)
1919 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
1920 else
1921 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
47dd7a54
GC
1922 }
1923#endif
c24602ef 1924 while (count < limit) {
47dd7a54 1925 int status;
c24602ef 1926 struct dma_desc *p, *p_next;
47dd7a54 1927
c24602ef
GC
1928 if (priv->extend_desc)
1929 p = (struct dma_desc *) (priv->dma_erx + entry);
1930 else
1931 p = priv->dma_rx + entry ;
1932
1933 if (priv->hw->desc->get_rx_owner(p))
47dd7a54
GC
1934 break;
1935
1936 count++;
1937
1938 next_entry = (++priv->cur_rx) % rxsize;
c24602ef
GC
1939 if (priv->extend_desc)
1940 p_next = (struct dma_desc *) (priv->dma_erx +
1941 next_entry);
1942 else
1943 p_next = priv->dma_rx + next_entry;
1944
47dd7a54
GC
1945 prefetch(p_next);
1946
1947 /* read the status of the incoming frame */
c24602ef
GC
1948 status = priv->hw->desc->rx_status(&priv->dev->stats,
1949 &priv->xstats, p);
1950 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
1951 priv->hw->desc->rx_extended_status(&priv->dev->stats,
1952 &priv->xstats,
1953 priv->dma_erx +
1954 entry);
891434b1 1955 if (unlikely(status == discard_frame)) {
47dd7a54 1956 priv->dev->stats.rx_errors++;
891434b1
RK
1957 if (priv->hwts_rx_en && !priv->extend_desc) {
1958 /* DESC2 & DESC3 will be overwitten by device
1959 * with timestamp value, hence reinitialize
1960 * them in stmmac_rx_refill() function so that
1961 * device can reuse it.
1962 */
1963 priv->rx_skbuff[entry] = NULL;
1964 dma_unmap_single(priv->device,
1965 priv->rx_skbuff_dma[entry],
1966 priv->dma_buf_sz, DMA_FROM_DEVICE);
1967 }
1968 } else {
47dd7a54 1969 struct sk_buff *skb;
3eeb2997 1970 int frame_len;
47dd7a54 1971
38912bdb
DS
1972 frame_len = priv->hw->desc->get_rx_frame_len(p,
1973 priv->plat->rx_coe);
3eeb2997
GC
1974 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1975 * Type frames (LLC/LLC-SNAP) */
1976 if (unlikely(status != llc_snap))
1977 frame_len -= ETH_FCS_LEN;
47dd7a54
GC
1978#ifdef STMMAC_RX_DEBUG
1979 if (frame_len > ETH_FRAME_LEN)
1980 pr_debug("\tRX frame size %d, COE status: %d\n",
1981 frame_len, status);
1982
1983 if (netif_msg_hw(priv))
1984 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
1985 p, entry, p->des2);
1986#endif
1987 skb = priv->rx_skbuff[entry];
1988 if (unlikely(!skb)) {
1989 pr_err("%s: Inconsistent Rx descriptor chain\n",
1990 priv->dev->name);
1991 priv->dev->stats.rx_dropped++;
1992 break;
1993 }
1994 prefetch(skb->data - NET_IP_ALIGN);
1995 priv->rx_skbuff[entry] = NULL;
1996
891434b1
RK
1997 stmmac_get_rx_hwtstamp(priv, entry, skb);
1998
47dd7a54
GC
1999 skb_put(skb, frame_len);
2000 dma_unmap_single(priv->device,
2001 priv->rx_skbuff_dma[entry],
2002 priv->dma_buf_sz, DMA_FROM_DEVICE);
2003#ifdef STMMAC_RX_DEBUG
2004 if (netif_msg_pktdata(priv)) {
2005 pr_info(" frame received (%dbytes)", frame_len);
2006 print_pkt(skb->data, frame_len);
2007 }
2008#endif
2009 skb->protocol = eth_type_trans(skb, priv->dev);
2010
62a2ab93 2011 if (unlikely(!priv->plat->rx_coe))
bc8acf2c 2012 skb_checksum_none_assert(skb);
62a2ab93 2013 else
47dd7a54 2014 skb->ip_summed = CHECKSUM_UNNECESSARY;
62a2ab93
GC
2015
2016 napi_gro_receive(&priv->napi, skb);
47dd7a54
GC
2017
2018 priv->dev->stats.rx_packets++;
2019 priv->dev->stats.rx_bytes += frame_len;
47dd7a54
GC
2020 }
2021 entry = next_entry;
47dd7a54
GC
2022 }
2023
2024 stmmac_rx_refill(priv);
2025
2026 priv->xstats.rx_pkt_n += count;
2027
2028 return count;
2029}
2030
2031/**
2032 * stmmac_poll - stmmac poll method (NAPI)
2033 * @napi : pointer to the napi structure.
2034 * @budget : maximum number of packets that the current CPU can receive from
2035 * all interfaces.
2036 * Description :
9125cdd1 2037 * To look at the incoming frames and clear the tx resources.
47dd7a54
GC
2038 */
2039static int stmmac_poll(struct napi_struct *napi, int budget)
2040{
2041 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2042 int work_done = 0;
2043
9125cdd1
GC
2044 priv->xstats.napi_poll++;
2045 stmmac_tx_clean(priv);
47dd7a54 2046
9125cdd1 2047 work_done = stmmac_rx(priv, budget);
47dd7a54
GC
2048 if (work_done < budget) {
2049 napi_complete(napi);
9125cdd1 2050 stmmac_enable_dma_irq(priv);
47dd7a54
GC
2051 }
2052 return work_done;
2053}
2054
2055/**
2056 * stmmac_tx_timeout
2057 * @dev : Pointer to net device structure
2058 * Description: this function is called when a packet transmission fails to
7284a3f1 2059 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
2060 * netdev structure and arrange for the device to be reset to a sane state
2061 * in order to transmit a new packet.
2062 */
2063static void stmmac_tx_timeout(struct net_device *dev)
2064{
2065 struct stmmac_priv *priv = netdev_priv(dev);
2066
2067 /* Clear Tx resources and restart transmitting again */
2068 stmmac_tx_err(priv);
47dd7a54
GC
2069}
2070
2071/* Configuration changes (passed on by ifconfig) */
2072static int stmmac_config(struct net_device *dev, struct ifmap *map)
2073{
2074 if (dev->flags & IFF_UP) /* can't act on a running interface */
2075 return -EBUSY;
2076
2077 /* Don't allow changing the I/O address */
2078 if (map->base_addr != dev->base_addr) {
2079 pr_warning("%s: can't change I/O address\n", dev->name);
2080 return -EOPNOTSUPP;
2081 }
2082
2083 /* Don't allow changing the IRQ */
2084 if (map->irq != dev->irq) {
2085 pr_warning("%s: can't change IRQ number %d\n",
2086 dev->name, dev->irq);
2087 return -EOPNOTSUPP;
2088 }
2089
2090 /* ignore other fields */
2091 return 0;
2092}
2093
2094/**
01789349 2095 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
2096 * @dev : pointer to the device structure
2097 * Description:
2098 * This function is a driver entry point which gets called by the kernel
2099 * whenever multicast addresses must be enabled/disabled.
2100 * Return value:
2101 * void.
2102 */
01789349 2103static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
2104{
2105 struct stmmac_priv *priv = netdev_priv(dev);
2106
2107 spin_lock(&priv->lock);
cffb13f4 2108 priv->hw->mac->set_filter(dev, priv->synopsys_id);
47dd7a54 2109 spin_unlock(&priv->lock);
47dd7a54
GC
2110}
2111
2112/**
2113 * stmmac_change_mtu - entry point to change MTU size for the device.
2114 * @dev : device pointer.
2115 * @new_mtu : the new MTU size for the device.
2116 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
2117 * to drive packet transmission. Ethernet has an MTU of 1500 octets
2118 * (ETH_DATA_LEN). This value can be changed with ifconfig.
2119 * Return value:
2120 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2121 * file on failure.
2122 */
2123static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2124{
2125 struct stmmac_priv *priv = netdev_priv(dev);
2126 int max_mtu;
2127
2128 if (netif_running(dev)) {
2129 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2130 return -EBUSY;
2131 }
2132
48febf7e 2133 if (priv->plat->enh_desc)
47dd7a54
GC
2134 max_mtu = JUMBO_LEN;
2135 else
45db81e1 2136 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
47dd7a54
GC
2137
2138 if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2139 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2140 return -EINVAL;
2141 }
2142
5e982f3b
MM
2143 dev->mtu = new_mtu;
2144 netdev_update_features(dev);
2145
2146 return 0;
2147}
2148
c8f44aff
MM
2149static netdev_features_t stmmac_fix_features(struct net_device *dev,
2150 netdev_features_t features)
5e982f3b
MM
2151{
2152 struct stmmac_priv *priv = netdev_priv(dev);
2153
38912bdb 2154 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 2155 features &= ~NETIF_F_RXCSUM;
38912bdb
DS
2156 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
2157 features &= ~NETIF_F_IPV6_CSUM;
5e982f3b
MM
2158 if (!priv->plat->tx_coe)
2159 features &= ~NETIF_F_ALL_CSUM;
2160
ebbb293f
GC
2161 /* Some GMAC devices have a bugged Jumbo frame support that
2162 * needs to have the Tx COE disabled for oversized frames
2163 * (due to limited buffer sizes). In this case we disable
2164 * the TX csum insertionin the TDES and not use SF. */
5e982f3b
MM
2165 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2166 features &= ~NETIF_F_ALL_CSUM;
ebbb293f 2167
5e982f3b 2168 return features;
47dd7a54
GC
2169}
2170
2171static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2172{
2173 struct net_device *dev = (struct net_device *)dev_id;
2174 struct stmmac_priv *priv = netdev_priv(dev);
2175
2176 if (unlikely(!dev)) {
2177 pr_err("%s: invalid dev pointer\n", __func__);
2178 return IRQ_NONE;
2179 }
2180
d765955d
GC
2181 /* To handle GMAC own interrupts */
2182 if (priv->plat->has_gmac) {
2183 int status = priv->hw->mac->host_irq_status((void __iomem *)
0982a0f6
GC
2184 dev->base_addr,
2185 &priv->xstats);
d765955d 2186 if (unlikely(status)) {
d765955d 2187 /* For LPI we need to save the tx status */
0982a0f6 2188 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 2189 priv->tx_path_in_lpi_mode = true;
0982a0f6 2190 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 2191 priv->tx_path_in_lpi_mode = false;
d765955d
GC
2192 }
2193 }
aec7ff27 2194
d765955d 2195 /* To handle DMA interrupts */
aec7ff27 2196 stmmac_dma_interrupt(priv);
47dd7a54
GC
2197
2198 return IRQ_HANDLED;
2199}
2200
2201#ifdef CONFIG_NET_POLL_CONTROLLER
2202/* Polling receive - used by NETCONSOLE and other diagnostic tools
2203 * to allow network I/O with interrupts disabled. */
2204static void stmmac_poll_controller(struct net_device *dev)
2205{
2206 disable_irq(dev->irq);
2207 stmmac_interrupt(dev->irq, dev);
2208 enable_irq(dev->irq);
2209}
2210#endif
2211
2212/**
2213 * stmmac_ioctl - Entry point for the Ioctl
2214 * @dev: Device pointer.
2215 * @rq: An IOCTL specefic structure, that can contain a pointer to
2216 * a proprietary structure used to pass information to the driver.
2217 * @cmd: IOCTL command
2218 * Description:
891434b1 2219 * Currently it supports just the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
2220 */
2221static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2222{
2223 struct stmmac_priv *priv = netdev_priv(dev);
891434b1 2224 int ret = -EOPNOTSUPP;
47dd7a54
GC
2225
2226 if (!netif_running(dev))
2227 return -EINVAL;
2228
891434b1
RK
2229 switch (cmd) {
2230 case SIOCGMIIPHY:
2231 case SIOCGMIIREG:
2232 case SIOCSMIIREG:
2233 if (!priv->phydev)
2234 return -EINVAL;
2235 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2236 break;
2237 case SIOCSHWTSTAMP:
2238 ret = stmmac_hwtstamp_ioctl(dev, rq);
2239 break;
2240 default:
2241 break;
2242 }
28b04113 2243
47dd7a54
GC
2244 return ret;
2245}
2246
7ac29055
GC
2247#ifdef CONFIG_STMMAC_DEBUG_FS
2248static struct dentry *stmmac_fs_dir;
2249static struct dentry *stmmac_rings_status;
e7434821 2250static struct dentry *stmmac_dma_cap;
7ac29055 2251
c24602ef
GC
2252static void sysfs_display_ring(void *head, int size, int extend_desc,
2253 struct seq_file *seq)
7ac29055 2254{
7ac29055 2255 int i;
c24602ef
GC
2256 struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
2257 struct dma_desc *p = (struct dma_desc *) head;
7ac29055 2258
c24602ef
GC
2259 for (i = 0; i < size; i++) {
2260 u64 x;
2261 if (extend_desc) {
2262 x = *(u64 *) ep;
2263 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2264 i, (unsigned int) virt_to_phys(ep),
2265 (unsigned int) x, (unsigned int) (x >> 32),
2266 ep->basic.des2, ep->basic.des3);
2267 ep++;
2268 } else {
2269 x = *(u64 *) p;
2270 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2271 i, (unsigned int) virt_to_phys(ep),
2272 (unsigned int) x, (unsigned int) (x >> 32),
2273 p->des2, p->des3);
2274 p++;
2275 }
7ac29055
GC
2276 seq_printf(seq, "\n");
2277 }
c24602ef 2278}
7ac29055 2279
c24602ef
GC
2280static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2281{
2282 struct net_device *dev = seq->private;
2283 struct stmmac_priv *priv = netdev_priv(dev);
2284 unsigned int txsize = priv->dma_tx_size;
2285 unsigned int rxsize = priv->dma_rx_size;
7ac29055 2286
c24602ef
GC
2287 if (priv->extend_desc) {
2288 seq_printf(seq, "Extended RX descriptor ring:\n");
2289 sysfs_display_ring((void *) priv->dma_erx, rxsize, 1, seq);
2290 seq_printf(seq, "Extended TX descriptor ring:\n");
2291 sysfs_display_ring((void *) priv->dma_etx, txsize, 1, seq);
2292 } else {
2293 seq_printf(seq, "RX descriptor ring:\n");
2294 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2295 seq_printf(seq, "TX descriptor ring:\n");
2296 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
7ac29055
GC
2297 }
2298
2299 return 0;
2300}
2301
2302static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2303{
2304 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2305}
2306
2307static const struct file_operations stmmac_rings_status_fops = {
2308 .owner = THIS_MODULE,
2309 .open = stmmac_sysfs_ring_open,
2310 .read = seq_read,
2311 .llseek = seq_lseek,
74863948 2312 .release = single_release,
7ac29055
GC
2313};
2314
e7434821
GC
2315static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2316{
2317 struct net_device *dev = seq->private;
2318 struct stmmac_priv *priv = netdev_priv(dev);
2319
19e30c14 2320 if (!priv->hw_cap_support) {
e7434821
GC
2321 seq_printf(seq, "DMA HW features not supported\n");
2322 return 0;
2323 }
2324
2325 seq_printf(seq, "==============================\n");
2326 seq_printf(seq, "\tDMA HW features\n");
2327 seq_printf(seq, "==============================\n");
2328
2329 seq_printf(seq, "\t10/100 Mbps %s\n",
2330 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2331 seq_printf(seq, "\t1000 Mbps %s\n",
2332 (priv->dma_cap.mbps_1000) ? "Y" : "N");
2333 seq_printf(seq, "\tHalf duple %s\n",
2334 (priv->dma_cap.half_duplex) ? "Y" : "N");
2335 seq_printf(seq, "\tHash Filter: %s\n",
2336 (priv->dma_cap.hash_filter) ? "Y" : "N");
2337 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2338 (priv->dma_cap.multi_addr) ? "Y" : "N");
2339 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2340 (priv->dma_cap.pcs) ? "Y" : "N");
2341 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2342 (priv->dma_cap.sma_mdio) ? "Y" : "N");
2343 seq_printf(seq, "\tPMT Remote wake up: %s\n",
2344 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2345 seq_printf(seq, "\tPMT Magic Frame: %s\n",
2346 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2347 seq_printf(seq, "\tRMON module: %s\n",
2348 (priv->dma_cap.rmon) ? "Y" : "N");
2349 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2350 (priv->dma_cap.time_stamp) ? "Y" : "N");
2351 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2352 (priv->dma_cap.atime_stamp) ? "Y" : "N");
2353 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2354 (priv->dma_cap.eee) ? "Y" : "N");
2355 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2356 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2357 (priv->dma_cap.tx_coe) ? "Y" : "N");
2358 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2359 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2360 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2361 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2362 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2363 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2364 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2365 priv->dma_cap.number_rx_channel);
2366 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2367 priv->dma_cap.number_tx_channel);
2368 seq_printf(seq, "\tEnhanced descriptors: %s\n",
2369 (priv->dma_cap.enh_desc) ? "Y" : "N");
2370
2371 return 0;
2372}
2373
2374static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2375{
2376 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2377}
2378
2379static const struct file_operations stmmac_dma_cap_fops = {
2380 .owner = THIS_MODULE,
2381 .open = stmmac_sysfs_dma_cap_open,
2382 .read = seq_read,
2383 .llseek = seq_lseek,
74863948 2384 .release = single_release,
e7434821
GC
2385};
2386
7ac29055
GC
2387static int stmmac_init_fs(struct net_device *dev)
2388{
2389 /* Create debugfs entries */
2390 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
2391
2392 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
2393 pr_err("ERROR %s, debugfs create directory failed\n",
2394 STMMAC_RESOURCE_NAME);
2395
2396 return -ENOMEM;
2397 }
2398
2399 /* Entry to report DMA RX/TX rings */
2400 stmmac_rings_status = debugfs_create_file("descriptors_status",
2401 S_IRUGO, stmmac_fs_dir, dev,
2402 &stmmac_rings_status_fops);
2403
2404 if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
2405 pr_info("ERROR creating stmmac ring debugfs file\n");
2406 debugfs_remove(stmmac_fs_dir);
2407
2408 return -ENOMEM;
2409 }
2410
e7434821
GC
2411 /* Entry to report the DMA HW features */
2412 stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
2413 dev, &stmmac_dma_cap_fops);
2414
2415 if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
2416 pr_info("ERROR creating stmmac MMC debugfs file\n");
2417 debugfs_remove(stmmac_rings_status);
2418 debugfs_remove(stmmac_fs_dir);
2419
2420 return -ENOMEM;
2421 }
2422
7ac29055
GC
2423 return 0;
2424}
2425
2426static void stmmac_exit_fs(void)
2427{
2428 debugfs_remove(stmmac_rings_status);
e7434821 2429 debugfs_remove(stmmac_dma_cap);
7ac29055
GC
2430 debugfs_remove(stmmac_fs_dir);
2431}
2432#endif /* CONFIG_STMMAC_DEBUG_FS */
2433
47dd7a54
GC
2434static const struct net_device_ops stmmac_netdev_ops = {
2435 .ndo_open = stmmac_open,
2436 .ndo_start_xmit = stmmac_xmit,
2437 .ndo_stop = stmmac_release,
2438 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 2439 .ndo_fix_features = stmmac_fix_features,
01789349 2440 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
2441 .ndo_tx_timeout = stmmac_tx_timeout,
2442 .ndo_do_ioctl = stmmac_ioctl,
2443 .ndo_set_config = stmmac_config,
47dd7a54
GC
2444#ifdef CONFIG_NET_POLL_CONTROLLER
2445 .ndo_poll_controller = stmmac_poll_controller,
2446#endif
2447 .ndo_set_mac_address = eth_mac_addr,
2448};
2449
cf3f047b
GC
2450/**
2451 * stmmac_hw_init - Init the MAC device
2452 * @priv : pointer to the private device structure.
2453 * Description: this function detects which MAC device
2454 * (GMAC/MAC10-100) has to attached, checks the HW capability
2455 * (if supported) and sets the driver's features (for example
2456 * to use the ring or chaine mode or support the normal/enh
2457 * descriptor structure).
2458 */
2459static int stmmac_hw_init(struct stmmac_priv *priv)
2460{
c24602ef 2461 int ret;
cf3f047b
GC
2462 struct mac_device_info *mac;
2463
2464 /* Identify the MAC HW device */
03f2eecd
MKB
2465 if (priv->plat->has_gmac) {
2466 priv->dev->priv_flags |= IFF_UNICAST_FLT;
cf3f047b 2467 mac = dwmac1000_setup(priv->ioaddr);
03f2eecd 2468 } else {
cf3f047b 2469 mac = dwmac100_setup(priv->ioaddr);
03f2eecd 2470 }
cf3f047b
GC
2471 if (!mac)
2472 return -ENOMEM;
2473
2474 priv->hw = mac;
2475
cf3f047b 2476 /* Get and dump the chip ID */
cffb13f4 2477 priv->synopsys_id = stmmac_get_synopsys_id(priv);
cf3f047b 2478
c24602ef
GC
2479 /* To use alternate (extended) or normal descriptor structures */
2480 stmmac_selec_desc_mode(priv);
2481
4a7d666a
GC
2482 /* To use the chained or ring mode */
2483 if (chain_mode) {
2484 priv->hw->chain = &chain_mode_ops;
2485 pr_info(" Chain mode enabled\n");
2486 priv->mode = STMMAC_CHAIN_MODE;
2487 } else {
2488 priv->hw->ring = &ring_mode_ops;
2489 pr_info(" Ring mode enabled\n");
2490 priv->mode = STMMAC_RING_MODE;
2491 }
2492
cf3f047b
GC
2493 /* Get the HW capability (new GMAC newer than 3.50a) */
2494 priv->hw_cap_support = stmmac_get_hw_features(priv);
2495 if (priv->hw_cap_support) {
2496 pr_info(" DMA HW capability register supported");
2497
2498 /* We can override some gmac/dma configuration fields: e.g.
2499 * enh_desc, tx_coe (e.g. that are passed through the
2500 * platform) with the values from the HW capability
2501 * register (if supported).
2502 */
2503 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 2504 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
38912bdb
DS
2505
2506 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2507
2508 if (priv->dma_cap.rx_coe_type2)
2509 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2510 else if (priv->dma_cap.rx_coe_type1)
2511 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2512
cf3f047b
GC
2513 } else
2514 pr_info(" No HW DMA feature register supported");
2515
38912bdb
DS
2516 /* Enable the IPC (Checksum Offload) and check if the feature has been
2517 * enabled during the core configuration. */
2518 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2519 if (!ret) {
2520 pr_warning(" RX IPC Checksum Offload not configured.\n");
2521 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2522 }
2523
2524 if (priv->plat->rx_coe)
2525 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2526 priv->plat->rx_coe);
cf3f047b
GC
2527 if (priv->plat->tx_coe)
2528 pr_info(" TX Checksum insertion supported\n");
2529
2530 if (priv->plat->pmt) {
2531 pr_info(" Wake-Up On Lan supported\n");
2532 device_set_wakeup_capable(priv->device, 1);
2533 }
2534
c24602ef 2535 return 0;
cf3f047b
GC
2536}
2537
47dd7a54 2538/**
bfab27a1
GC
2539 * stmmac_dvr_probe
2540 * @device: device pointer
ff3dd78c
GC
2541 * @plat_dat: platform data pointer
2542 * @addr: iobase memory address
bfab27a1
GC
2543 * Description: this is the main probe function used to
2544 * call the alloc_etherdev, allocate the priv structure.
47dd7a54 2545 */
bfab27a1 2546struct stmmac_priv *stmmac_dvr_probe(struct device *device,
cf3f047b
GC
2547 struct plat_stmmacenet_data *plat_dat,
2548 void __iomem *addr)
47dd7a54
GC
2549{
2550 int ret = 0;
bfab27a1
GC
2551 struct net_device *ndev = NULL;
2552 struct stmmac_priv *priv;
47dd7a54 2553
bfab27a1 2554 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
41de8d4c 2555 if (!ndev)
bfab27a1 2556 return NULL;
bfab27a1
GC
2557
2558 SET_NETDEV_DEV(ndev, device);
2559
2560 priv = netdev_priv(ndev);
2561 priv->device = device;
2562 priv->dev = ndev;
47dd7a54 2563
bfab27a1 2564 ether_setup(ndev);
47dd7a54 2565
bfab27a1 2566 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
2567 priv->pause = pause;
2568 priv->plat = plat_dat;
2569 priv->ioaddr = addr;
2570 priv->dev->base_addr = (unsigned long)addr;
2571
2572 /* Verify driver arguments */
2573 stmmac_verify_args();
bfab27a1 2574
cf3f047b
GC
2575 /* Override with kernel parameters if supplied XXX CRS XXX
2576 * this needs to have multiple instances */
2577 if ((phyaddr >= 0) && (phyaddr <= 31))
2578 priv->plat->phy_addr = phyaddr;
2579
2580 /* Init MAC and get the capabilities */
c24602ef
GC
2581 ret = stmmac_hw_init(priv);
2582 if (ret)
2583 goto error_free_netdev;
cf3f047b
GC
2584
2585 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 2586
cf3f047b
GC
2587 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2588 NETIF_F_RXCSUM;
bfab27a1
GC
2589 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2590 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
2591#ifdef STMMAC_VLAN_TAG_USED
2592 /* Both mac100 and gmac support receive VLAN tag detection */
bfab27a1 2593 ndev->features |= NETIF_F_HW_VLAN_RX;
47dd7a54
GC
2594#endif
2595 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2596
47dd7a54
GC
2597 if (flow_ctrl)
2598 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
2599
62a2ab93
GC
2600 /* Rx Watchdog is available in the COREs newer than the 3.40.
2601 * In some case, for example on bugged HW this feature
2602 * has to be disable and this can be done by passing the
2603 * riwt_off field from the platform.
2604 */
2605 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2606 priv->use_riwt = 1;
2607 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2608 }
2609
bfab27a1 2610 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
47dd7a54 2611
f8e96161 2612 spin_lock_init(&priv->lock);
a9097a96 2613 spin_lock_init(&priv->tx_lock);
f8e96161 2614
bfab27a1 2615 ret = register_netdev(ndev);
47dd7a54 2616 if (ret) {
cf3f047b 2617 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
6a81c26f 2618 goto error_netdev_register;
47dd7a54
GC
2619 }
2620
ae4d8cf2 2621 priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME);
6a81c26f 2622 if (IS_ERR(priv->stmmac_clk)) {
31ea38ee 2623 pr_warning("%s: warning: cannot get CSR clock\n", __func__);
6a81c26f
VK
2624 goto error_clk_get;
2625 }
ba1377ff 2626
cd7201f4
GC
2627 /* If a specific clk_csr value is passed from the platform
2628 * this means that the CSR Clock Range selection cannot be
2629 * changed at run-time and it is fixed. Viceversa the driver'll try to
2630 * set the MDC clock dynamically according to the csr actual
2631 * clock input.
2632 */
2633 if (!priv->plat->clk_csr)
2634 stmmac_clk_csr_set(priv);
2635 else
2636 priv->clk_csr = priv->plat->clk_csr;
2637
e58bb43f
GC
2638 stmmac_check_pcs_mode(priv);
2639
2640 if (!priv->pcs) {
2641 /* MDIO bus Registration */
2642 ret = stmmac_mdio_register(ndev);
2643 if (ret < 0) {
2644 pr_debug("%s: MDIO bus (id: %d) registration failed",
2645 __func__, priv->plat->bus_id);
2646 goto error_mdio_register;
2647 }
4bfcbd7a
FV
2648 }
2649
bfab27a1 2650 return priv;
47dd7a54 2651
6a81c26f
VK
2652error_mdio_register:
2653 clk_put(priv->stmmac_clk);
2654error_clk_get:
34a52f36 2655 unregister_netdev(ndev);
6a81c26f
VK
2656error_netdev_register:
2657 netif_napi_del(&priv->napi);
c24602ef 2658error_free_netdev:
34a52f36 2659 free_netdev(ndev);
47dd7a54 2660
bfab27a1 2661 return NULL;
47dd7a54
GC
2662}
2663
2664/**
2665 * stmmac_dvr_remove
bfab27a1 2666 * @ndev: net device pointer
47dd7a54 2667 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 2668 * changes the link status, releases the DMA descriptor rings.
47dd7a54 2669 */
bfab27a1 2670int stmmac_dvr_remove(struct net_device *ndev)
47dd7a54 2671{
aec7ff27 2672 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54
GC
2673
2674 pr_info("%s:\n\tremoving driver", __func__);
2675
ad01b7d4
GC
2676 priv->hw->dma->stop_rx(priv->ioaddr);
2677 priv->hw->dma->stop_tx(priv->ioaddr);
47dd7a54 2678
bfab27a1 2679 stmmac_set_mac(priv->ioaddr, false);
e58bb43f
GC
2680 if (!priv->pcs)
2681 stmmac_mdio_unregister(ndev);
47dd7a54 2682 netif_carrier_off(ndev);
47dd7a54 2683 unregister_netdev(ndev);
47dd7a54
GC
2684 free_netdev(ndev);
2685
2686 return 0;
2687}
2688
2689#ifdef CONFIG_PM
bfab27a1 2690int stmmac_suspend(struct net_device *ndev)
47dd7a54 2691{
874bd42d 2692 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 2693 unsigned long flags;
47dd7a54 2694
874bd42d 2695 if (!ndev || !netif_running(ndev))
47dd7a54
GC
2696 return 0;
2697
102463b1
FV
2698 if (priv->phydev)
2699 phy_stop(priv->phydev);
2700
f8c5a875 2701 spin_lock_irqsave(&priv->lock, flags);
47dd7a54 2702
874bd42d
GC
2703 netif_device_detach(ndev);
2704 netif_stop_queue(ndev);
47dd7a54 2705
874bd42d
GC
2706 napi_disable(&priv->napi);
2707
2708 /* Stop TX/RX DMA */
2709 priv->hw->dma->stop_tx(priv->ioaddr);
2710 priv->hw->dma->stop_rx(priv->ioaddr);
c24602ef
GC
2711
2712 stmmac_clear_descriptors(priv);
874bd42d
GC
2713
2714 /* Enable Power down mode by programming the PMT regs */
2715 if (device_may_wakeup(priv->device))
2716 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
ba1377ff 2717 else {
bfab27a1 2718 stmmac_set_mac(priv->ioaddr, false);
ba1377ff 2719 /* Disable clock in case of PWM is off */
a630844d 2720 clk_disable_unprepare(priv->stmmac_clk);
ba1377ff 2721 }
f8c5a875 2722 spin_unlock_irqrestore(&priv->lock, flags);
47dd7a54
GC
2723 return 0;
2724}
2725
bfab27a1 2726int stmmac_resume(struct net_device *ndev)
47dd7a54 2727{
874bd42d 2728 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 2729 unsigned long flags;
47dd7a54 2730
874bd42d 2731 if (!netif_running(ndev))
47dd7a54
GC
2732 return 0;
2733
f8c5a875 2734 spin_lock_irqsave(&priv->lock, flags);
c4433be6 2735
47dd7a54
GC
2736 /* Power Down bit, into the PM register, is cleared
2737 * automatically as soon as a magic packet or a Wake-up frame
2738 * is received. Anyway, it's better to manually clear
2739 * this bit because it can generate problems while resuming
2740 * from another devices (e.g. serial console). */
874bd42d 2741 if (device_may_wakeup(priv->device))
543876c9 2742 priv->hw->mac->pmt(priv->ioaddr, 0);
ba1377ff
GC
2743 else
2744 /* enable the clk prevously disabled */
a630844d 2745 clk_prepare_enable(priv->stmmac_clk);
47dd7a54 2746
874bd42d 2747 netif_device_attach(ndev);
47dd7a54
GC
2748
2749 /* Enable the MAC and DMA */
bfab27a1 2750 stmmac_set_mac(priv->ioaddr, true);
ad01b7d4
GC
2751 priv->hw->dma->start_tx(priv->ioaddr);
2752 priv->hw->dma->start_rx(priv->ioaddr);
47dd7a54 2753
47dd7a54
GC
2754 napi_enable(&priv->napi);
2755
874bd42d 2756 netif_start_queue(ndev);
47dd7a54 2757
f8c5a875 2758 spin_unlock_irqrestore(&priv->lock, flags);
102463b1
FV
2759
2760 if (priv->phydev)
2761 phy_start(priv->phydev);
2762
47dd7a54
GC
2763 return 0;
2764}
47dd7a54 2765
bfab27a1 2766int stmmac_freeze(struct net_device *ndev)
874bd42d 2767{
874bd42d
GC
2768 if (!ndev || !netif_running(ndev))
2769 return 0;
2770
2771 return stmmac_release(ndev);
2772}
2773
bfab27a1 2774int stmmac_restore(struct net_device *ndev)
874bd42d 2775{
874bd42d
GC
2776 if (!ndev || !netif_running(ndev))
2777 return 0;
2778
2779 return stmmac_open(ndev);
2780}
874bd42d 2781#endif /* CONFIG_PM */
47dd7a54 2782
33d5e332
GC
2783/* Driver can be configured w/ and w/ both PCI and Platf drivers
2784 * depending on the configuration selected.
2785 */
ba27ec66
GC
2786static int __init stmmac_init(void)
2787{
493682b8 2788 int ret;
ba27ec66 2789
493682b8
KK
2790 ret = stmmac_register_platform();
2791 if (ret)
2792 goto err;
2793 ret = stmmac_register_pci();
2794 if (ret)
2795 goto err_pci;
33d5e332 2796 return 0;
493682b8
KK
2797err_pci:
2798 stmmac_unregister_platform();
2799err:
2800 pr_err("stmmac: driver registration failed\n");
2801 return ret;
ba27ec66
GC
2802}
2803
2804static void __exit stmmac_exit(void)
2805{
33d5e332
GC
2806 stmmac_unregister_platform();
2807 stmmac_unregister_pci();
ba27ec66
GC
2808}
2809
2810module_init(stmmac_init);
2811module_exit(stmmac_exit);
2812
47dd7a54
GC
2813#ifndef MODULE
2814static int __init stmmac_cmdline_opt(char *str)
2815{
2816 char *opt;
2817
2818 if (!str || !*str)
2819 return -EINVAL;
2820 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 2821 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 2822 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
2823 goto err;
2824 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 2825 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28
GC
2826 goto err;
2827 } else if (!strncmp(opt, "dma_txsize:", 11)) {
ea2ab871 2828 if (kstrtoint(opt + 11, 0, &dma_txsize))
f3240e28
GC
2829 goto err;
2830 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
ea2ab871 2831 if (kstrtoint(opt + 11, 0, &dma_rxsize))
f3240e28
GC
2832 goto err;
2833 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 2834 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
2835 goto err;
2836 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 2837 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
2838 goto err;
2839 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 2840 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
2841 goto err;
2842 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 2843 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
2844 goto err;
2845 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 2846 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 2847 goto err;
506f669c 2848 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
2849 if (kstrtoint(opt + 10, 0, &eee_timer))
2850 goto err;
4a7d666a
GC
2851 } else if (!strncmp(opt, "chain_mode:", 11)) {
2852 if (kstrtoint(opt + 11, 0, &chain_mode))
2853 goto err;
f3240e28 2854 }
47dd7a54
GC
2855 }
2856 return 0;
f3240e28
GC
2857
2858err:
2859 pr_err("%s: ERROR broken module parameter conversion", __func__);
2860 return -EINVAL;
47dd7a54
GC
2861}
2862
2863__setup("stmmaceth=", stmmac_cmdline_opt);
2864#endif
6fc0d0f2
GC
2865
2866MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
2867MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
2868MODULE_LICENSE("GPL");