]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: configure mtl rx and tx algorithms
[mirror_ubuntu-artful-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO 5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK 256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER 1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
106 */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121 * stmmac_verify_args - verify the driver parameters.
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
124 */
125 static void stmmac_verify_args(void)
126 {
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142 * stmmac_clk_csr_set - dynamically set the MDC clock
143 * @priv: driver private structure
144 * Description: this is to dynamically set the MDC clock according to the csr
145 * clock input.
146 * Note:
147 * If a specific clk_csr value is passed from the platform
148 * this means that the CSR Clock Range selection cannot be
149 * changed at run-time and it is fixed (as reported in the driver
150 * documentation). Viceversa the driver will try to set the MDC
151 * clock dynamically according to the actual clock input.
152 */
153 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
154 {
155 u32 clk_rate;
156
157 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
158
159 /* Platform provided default clk_csr would be assumed valid
160 * for all other cases except for the below mentioned ones.
161 * For values higher than the IEEE 802.3 specified frequency
162 * we can not estimate the proper divider as it is not known
163 * the frequency of clk_csr_i. So we do not change the default
164 * divider.
165 */
166 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
167 if (clk_rate < CSR_F_35M)
168 priv->clk_csr = STMMAC_CSR_20_35M;
169 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
170 priv->clk_csr = STMMAC_CSR_35_60M;
171 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
172 priv->clk_csr = STMMAC_CSR_60_100M;
173 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
174 priv->clk_csr = STMMAC_CSR_100_150M;
175 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
176 priv->clk_csr = STMMAC_CSR_150_250M;
177 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
178 priv->clk_csr = STMMAC_CSR_250_300M;
179 }
180 }
181
182 static void print_pkt(unsigned char *buf, int len)
183 {
184 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
185 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
186 }
187
188 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
189 {
190 u32 avail;
191
192 if (priv->dirty_tx > priv->cur_tx)
193 avail = priv->dirty_tx - priv->cur_tx - 1;
194 else
195 avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
196
197 return avail;
198 }
199
200 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
201 {
202 u32 dirty;
203
204 if (priv->dirty_rx <= priv->cur_rx)
205 dirty = priv->cur_rx - priv->dirty_rx;
206 else
207 dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
208
209 return dirty;
210 }
211
212 /**
213 * stmmac_hw_fix_mac_speed - callback for speed selection
214 * @priv: driver private structure
215 * Description: on some platforms (e.g. ST), some HW system configuration
216 * registers have to be set according to the link speed negotiated.
217 */
218 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
219 {
220 struct net_device *ndev = priv->dev;
221 struct phy_device *phydev = ndev->phydev;
222
223 if (likely(priv->plat->fix_mac_speed))
224 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228 * stmmac_enable_eee_mode - check and enter in LPI mode
229 * @priv: driver private structure
230 * Description: this function is to verify and enter in LPI mode in case of
231 * EEE.
232 */
233 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
234 {
235 /* Check and enter in LPI mode */
236 if ((priv->dirty_tx == priv->cur_tx) &&
237 (priv->tx_path_in_lpi_mode == false))
238 priv->hw->mac->set_eee_mode(priv->hw,
239 priv->plat->en_tx_lpi_clockgating);
240 }
241
242 /**
243 * stmmac_disable_eee_mode - disable and exit from LPI mode
244 * @priv: driver private structure
245 * Description: this function is to exit and disable EEE in case of
246 * LPI state is true. This is called by the xmit.
247 */
248 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
249 {
250 priv->hw->mac->reset_eee_mode(priv->hw);
251 del_timer_sync(&priv->eee_ctrl_timer);
252 priv->tx_path_in_lpi_mode = false;
253 }
254
255 /**
256 * stmmac_eee_ctrl_timer - EEE TX SW timer.
257 * @arg : data hook
258 * Description:
259 * if there is no data transfer and if we are not in LPI state,
260 * then MAC Transmitter can be moved to LPI state.
261 */
262 static void stmmac_eee_ctrl_timer(unsigned long arg)
263 {
264 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
265
266 stmmac_enable_eee_mode(priv);
267 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
268 }
269
270 /**
271 * stmmac_eee_init - init EEE
272 * @priv: driver private structure
273 * Description:
274 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
275 * can also manage EEE, this function enable the LPI state and start related
276 * timer.
277 */
278 bool stmmac_eee_init(struct stmmac_priv *priv)
279 {
280 struct net_device *ndev = priv->dev;
281 unsigned long flags;
282 bool ret = false;
283
284 /* Using PCS we cannot dial with the phy registers at this stage
285 * so we do not support extra feature like EEE.
286 */
287 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
288 (priv->hw->pcs == STMMAC_PCS_TBI) ||
289 (priv->hw->pcs == STMMAC_PCS_RTBI))
290 goto out;
291
292 /* MAC core supports the EEE feature. */
293 if (priv->dma_cap.eee) {
294 int tx_lpi_timer = priv->tx_lpi_timer;
295
296 /* Check if the PHY supports EEE */
297 if (phy_init_eee(ndev->phydev, 1)) {
298 /* To manage at run-time if the EEE cannot be supported
299 * anymore (for example because the lp caps have been
300 * changed).
301 * In that case the driver disable own timers.
302 */
303 spin_lock_irqsave(&priv->lock, flags);
304 if (priv->eee_active) {
305 netdev_dbg(priv->dev, "disable EEE\n");
306 del_timer_sync(&priv->eee_ctrl_timer);
307 priv->hw->mac->set_eee_timer(priv->hw, 0,
308 tx_lpi_timer);
309 }
310 priv->eee_active = 0;
311 spin_unlock_irqrestore(&priv->lock, flags);
312 goto out;
313 }
314 /* Activate the EEE and start timers */
315 spin_lock_irqsave(&priv->lock, flags);
316 if (!priv->eee_active) {
317 priv->eee_active = 1;
318 setup_timer(&priv->eee_ctrl_timer,
319 stmmac_eee_ctrl_timer,
320 (unsigned long)priv);
321 mod_timer(&priv->eee_ctrl_timer,
322 STMMAC_LPI_T(eee_timer));
323
324 priv->hw->mac->set_eee_timer(priv->hw,
325 STMMAC_DEFAULT_LIT_LS,
326 tx_lpi_timer);
327 }
328 /* Set HW EEE according to the speed */
329 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
330
331 ret = true;
332 spin_unlock_irqrestore(&priv->lock, flags);
333
334 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
335 }
336 out:
337 return ret;
338 }
339
340 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
341 * @priv: driver private structure
342 * @p : descriptor pointer
343 * @skb : the socket buffer
344 * Description :
345 * This function will read timestamp from the descriptor & pass it to stack.
346 * and also perform some sanity checks.
347 */
348 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
349 struct dma_desc *p, struct sk_buff *skb)
350 {
351 struct skb_shared_hwtstamps shhwtstamp;
352 u64 ns;
353
354 if (!priv->hwts_tx_en)
355 return;
356
357 /* exit if skb doesn't support hw tstamp */
358 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
359 return;
360
361 /* check tx tstamp status */
362 if (!priv->hw->desc->get_tx_timestamp_status(p)) {
363 /* get the valid tstamp */
364 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
365
366 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
367 shhwtstamp.hwtstamp = ns_to_ktime(ns);
368
369 netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
370 /* pass tstamp to stack */
371 skb_tstamp_tx(skb, &shhwtstamp);
372 }
373
374 return;
375 }
376
377 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
378 * @priv: driver private structure
379 * @p : descriptor pointer
380 * @np : next descriptor pointer
381 * @skb : the socket buffer
382 * Description :
383 * This function will read received packet's timestamp from the descriptor
384 * and pass it to stack. It also perform some sanity checks.
385 */
386 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
387 struct dma_desc *np, struct sk_buff *skb)
388 {
389 struct skb_shared_hwtstamps *shhwtstamp = NULL;
390 u64 ns;
391
392 if (!priv->hwts_rx_en)
393 return;
394
395 /* Check if timestamp is available */
396 if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
397 /* For GMAC4, the valid timestamp is from CTX next desc. */
398 if (priv->plat->has_gmac4)
399 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
400 else
401 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
402
403 netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
404 shhwtstamp = skb_hwtstamps(skb);
405 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
406 shhwtstamp->hwtstamp = ns_to_ktime(ns);
407 } else {
408 netdev_err(priv->dev, "cannot get RX hw timestamp\n");
409 }
410 }
411
412 /**
413 * stmmac_hwtstamp_ioctl - control hardware timestamping.
414 * @dev: device pointer.
415 * @ifr: An IOCTL specific structure, that can contain a pointer to
416 * a proprietary structure used to pass information to the driver.
417 * Description:
418 * This function configures the MAC to enable/disable both outgoing(TX)
419 * and incoming(RX) packets time stamping based on user input.
420 * Return Value:
421 * 0 on success and an appropriate -ve integer on failure.
422 */
423 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
424 {
425 struct stmmac_priv *priv = netdev_priv(dev);
426 struct hwtstamp_config config;
427 struct timespec64 now;
428 u64 temp = 0;
429 u32 ptp_v2 = 0;
430 u32 tstamp_all = 0;
431 u32 ptp_over_ipv4_udp = 0;
432 u32 ptp_over_ipv6_udp = 0;
433 u32 ptp_over_ethernet = 0;
434 u32 snap_type_sel = 0;
435 u32 ts_master_en = 0;
436 u32 ts_event_en = 0;
437 u32 value = 0;
438 u32 sec_inc;
439
440 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
441 netdev_alert(priv->dev, "No support for HW time stamping\n");
442 priv->hwts_tx_en = 0;
443 priv->hwts_rx_en = 0;
444
445 return -EOPNOTSUPP;
446 }
447
448 if (copy_from_user(&config, ifr->ifr_data,
449 sizeof(struct hwtstamp_config)))
450 return -EFAULT;
451
452 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
453 __func__, config.flags, config.tx_type, config.rx_filter);
454
455 /* reserved for future extensions */
456 if (config.flags)
457 return -EINVAL;
458
459 if (config.tx_type != HWTSTAMP_TX_OFF &&
460 config.tx_type != HWTSTAMP_TX_ON)
461 return -ERANGE;
462
463 if (priv->adv_ts) {
464 switch (config.rx_filter) {
465 case HWTSTAMP_FILTER_NONE:
466 /* time stamp no incoming packet at all */
467 config.rx_filter = HWTSTAMP_FILTER_NONE;
468 break;
469
470 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
471 /* PTP v1, UDP, any kind of event packet */
472 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
473 /* take time stamp for all event messages */
474 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
475
476 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
477 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
478 break;
479
480 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
481 /* PTP v1, UDP, Sync packet */
482 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
483 /* take time stamp for SYNC messages only */
484 ts_event_en = PTP_TCR_TSEVNTENA;
485
486 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
487 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
488 break;
489
490 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
491 /* PTP v1, UDP, Delay_req packet */
492 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
493 /* take time stamp for Delay_Req messages only */
494 ts_master_en = PTP_TCR_TSMSTRENA;
495 ts_event_en = PTP_TCR_TSEVNTENA;
496
497 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
498 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
499 break;
500
501 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
502 /* PTP v2, UDP, any kind of event packet */
503 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
504 ptp_v2 = PTP_TCR_TSVER2ENA;
505 /* take time stamp for all event messages */
506 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
507
508 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
509 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
510 break;
511
512 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
513 /* PTP v2, UDP, Sync packet */
514 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
515 ptp_v2 = PTP_TCR_TSVER2ENA;
516 /* take time stamp for SYNC messages only */
517 ts_event_en = PTP_TCR_TSEVNTENA;
518
519 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521 break;
522
523 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
524 /* PTP v2, UDP, Delay_req packet */
525 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
526 ptp_v2 = PTP_TCR_TSVER2ENA;
527 /* take time stamp for Delay_Req messages only */
528 ts_master_en = PTP_TCR_TSMSTRENA;
529 ts_event_en = PTP_TCR_TSEVNTENA;
530
531 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
532 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
533 break;
534
535 case HWTSTAMP_FILTER_PTP_V2_EVENT:
536 /* PTP v2/802.AS1 any layer, any kind of event packet */
537 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
538 ptp_v2 = PTP_TCR_TSVER2ENA;
539 /* take time stamp for all event messages */
540 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
541
542 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544 ptp_over_ethernet = PTP_TCR_TSIPENA;
545 break;
546
547 case HWTSTAMP_FILTER_PTP_V2_SYNC:
548 /* PTP v2/802.AS1, any layer, Sync packet */
549 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
550 ptp_v2 = PTP_TCR_TSVER2ENA;
551 /* take time stamp for SYNC messages only */
552 ts_event_en = PTP_TCR_TSEVNTENA;
553
554 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
555 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
556 ptp_over_ethernet = PTP_TCR_TSIPENA;
557 break;
558
559 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
560 /* PTP v2/802.AS1, any layer, Delay_req packet */
561 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
562 ptp_v2 = PTP_TCR_TSVER2ENA;
563 /* take time stamp for Delay_Req messages only */
564 ts_master_en = PTP_TCR_TSMSTRENA;
565 ts_event_en = PTP_TCR_TSEVNTENA;
566
567 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
568 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
569 ptp_over_ethernet = PTP_TCR_TSIPENA;
570 break;
571
572 case HWTSTAMP_FILTER_ALL:
573 /* time stamp any incoming packet */
574 config.rx_filter = HWTSTAMP_FILTER_ALL;
575 tstamp_all = PTP_TCR_TSENALL;
576 break;
577
578 default:
579 return -ERANGE;
580 }
581 } else {
582 switch (config.rx_filter) {
583 case HWTSTAMP_FILTER_NONE:
584 config.rx_filter = HWTSTAMP_FILTER_NONE;
585 break;
586 default:
587 /* PTP v1, UDP, any kind of event packet */
588 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
589 break;
590 }
591 }
592 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
593 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
594
595 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
596 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
597 else {
598 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
599 tstamp_all | ptp_v2 | ptp_over_ethernet |
600 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
601 ts_master_en | snap_type_sel);
602 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
603
604 /* program Sub Second Increment reg */
605 sec_inc = priv->hw->ptp->config_sub_second_increment(
606 priv->ptpaddr, priv->plat->clk_ptp_rate,
607 priv->plat->has_gmac4);
608 temp = div_u64(1000000000ULL, sec_inc);
609
610 /* calculate default added value:
611 * formula is :
612 * addend = (2^32)/freq_div_ratio;
613 * where, freq_div_ratio = 1e9ns/sec_inc
614 */
615 temp = (u64)(temp << 32);
616 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
617 priv->hw->ptp->config_addend(priv->ptpaddr,
618 priv->default_addend);
619
620 /* initialize system time */
621 ktime_get_real_ts64(&now);
622
623 /* lower 32 bits of tv_sec are safe until y2106 */
624 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
625 now.tv_nsec);
626 }
627
628 return copy_to_user(ifr->ifr_data, &config,
629 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
630 }
631
632 /**
633 * stmmac_init_ptp - init PTP
634 * @priv: driver private structure
635 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
636 * This is done by looking at the HW cap. register.
637 * This function also registers the ptp driver.
638 */
639 static int stmmac_init_ptp(struct stmmac_priv *priv)
640 {
641 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
642 return -EOPNOTSUPP;
643
644 priv->adv_ts = 0;
645 /* Check if adv_ts can be enabled for dwmac 4.x core */
646 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
647 priv->adv_ts = 1;
648 /* Dwmac 3.x core with extend_desc can support adv_ts */
649 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
650 priv->adv_ts = 1;
651
652 if (priv->dma_cap.time_stamp)
653 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
654
655 if (priv->adv_ts)
656 netdev_info(priv->dev,
657 "IEEE 1588-2008 Advanced Timestamp supported\n");
658
659 priv->hw->ptp = &stmmac_ptp;
660 priv->hwts_tx_en = 0;
661 priv->hwts_rx_en = 0;
662
663 stmmac_ptp_register(priv);
664
665 return 0;
666 }
667
668 static void stmmac_release_ptp(struct stmmac_priv *priv)
669 {
670 if (priv->plat->clk_ptp_ref)
671 clk_disable_unprepare(priv->plat->clk_ptp_ref);
672 stmmac_ptp_unregister(priv);
673 }
674
675 /**
676 * stmmac_adjust_link - adjusts the link parameters
677 * @dev: net device structure
678 * Description: this is the helper called by the physical abstraction layer
679 * drivers to communicate the phy link status. According the speed and duplex
680 * this driver can invoke registered glue-logic as well.
681 * It also invoke the eee initialization because it could happen when switch
682 * on different networks (that are eee capable).
683 */
684 static void stmmac_adjust_link(struct net_device *dev)
685 {
686 struct stmmac_priv *priv = netdev_priv(dev);
687 struct phy_device *phydev = dev->phydev;
688 unsigned long flags;
689 int new_state = 0;
690 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
691
692 if (!phydev)
693 return;
694
695 spin_lock_irqsave(&priv->lock, flags);
696
697 if (phydev->link) {
698 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
699
700 /* Now we make sure that we can be in full duplex mode.
701 * If not, we operate in half-duplex mode. */
702 if (phydev->duplex != priv->oldduplex) {
703 new_state = 1;
704 if (!(phydev->duplex))
705 ctrl &= ~priv->hw->link.duplex;
706 else
707 ctrl |= priv->hw->link.duplex;
708 priv->oldduplex = phydev->duplex;
709 }
710 /* Flow Control operation */
711 if (phydev->pause)
712 priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
713 fc, pause_time);
714
715 if (phydev->speed != priv->speed) {
716 new_state = 1;
717 switch (phydev->speed) {
718 case 1000:
719 if (priv->plat->has_gmac ||
720 priv->plat->has_gmac4)
721 ctrl &= ~priv->hw->link.port;
722 break;
723 case 100:
724 if (priv->plat->has_gmac ||
725 priv->plat->has_gmac4) {
726 ctrl |= priv->hw->link.port;
727 ctrl |= priv->hw->link.speed;
728 } else {
729 ctrl &= ~priv->hw->link.port;
730 }
731 break;
732 case 10:
733 if (priv->plat->has_gmac ||
734 priv->plat->has_gmac4) {
735 ctrl |= priv->hw->link.port;
736 ctrl &= ~(priv->hw->link.speed);
737 } else {
738 ctrl &= ~priv->hw->link.port;
739 }
740 break;
741 default:
742 netif_warn(priv, link, priv->dev,
743 "broken speed: %d\n", phydev->speed);
744 phydev->speed = SPEED_UNKNOWN;
745 break;
746 }
747 if (phydev->speed != SPEED_UNKNOWN)
748 stmmac_hw_fix_mac_speed(priv);
749 priv->speed = phydev->speed;
750 }
751
752 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
753
754 if (!priv->oldlink) {
755 new_state = 1;
756 priv->oldlink = 1;
757 }
758 } else if (priv->oldlink) {
759 new_state = 1;
760 priv->oldlink = 0;
761 priv->speed = SPEED_UNKNOWN;
762 priv->oldduplex = DUPLEX_UNKNOWN;
763 }
764
765 if (new_state && netif_msg_link(priv))
766 phy_print_status(phydev);
767
768 spin_unlock_irqrestore(&priv->lock, flags);
769
770 if (phydev->is_pseudo_fixed_link)
771 /* Stop PHY layer to call the hook to adjust the link in case
772 * of a switch is attached to the stmmac driver.
773 */
774 phydev->irq = PHY_IGNORE_INTERRUPT;
775 else
776 /* At this stage, init the EEE if supported.
777 * Never called in case of fixed_link.
778 */
779 priv->eee_enabled = stmmac_eee_init(priv);
780 }
781
782 /**
783 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
784 * @priv: driver private structure
785 * Description: this is to verify if the HW supports the PCS.
786 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
787 * configured for the TBI, RTBI, or SGMII PHY interface.
788 */
789 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
790 {
791 int interface = priv->plat->interface;
792
793 if (priv->dma_cap.pcs) {
794 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
795 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
796 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
797 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
798 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
799 priv->hw->pcs = STMMAC_PCS_RGMII;
800 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
801 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
802 priv->hw->pcs = STMMAC_PCS_SGMII;
803 }
804 }
805 }
806
807 /**
808 * stmmac_init_phy - PHY initialization
809 * @dev: net device structure
810 * Description: it initializes the driver's PHY state, and attaches the PHY
811 * to the mac driver.
812 * Return value:
813 * 0 on success
814 */
815 static int stmmac_init_phy(struct net_device *dev)
816 {
817 struct stmmac_priv *priv = netdev_priv(dev);
818 struct phy_device *phydev;
819 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
820 char bus_id[MII_BUS_ID_SIZE];
821 int interface = priv->plat->interface;
822 int max_speed = priv->plat->max_speed;
823 priv->oldlink = 0;
824 priv->speed = SPEED_UNKNOWN;
825 priv->oldduplex = DUPLEX_UNKNOWN;
826
827 if (priv->plat->phy_node) {
828 phydev = of_phy_connect(dev, priv->plat->phy_node,
829 &stmmac_adjust_link, 0, interface);
830 } else {
831 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
832 priv->plat->bus_id);
833
834 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
835 priv->plat->phy_addr);
836 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
837 phy_id_fmt);
838
839 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
840 interface);
841 }
842
843 if (IS_ERR_OR_NULL(phydev)) {
844 netdev_err(priv->dev, "Could not attach to PHY\n");
845 if (!phydev)
846 return -ENODEV;
847
848 return PTR_ERR(phydev);
849 }
850
851 /* Stop Advertising 1000BASE Capability if interface is not GMII */
852 if ((interface == PHY_INTERFACE_MODE_MII) ||
853 (interface == PHY_INTERFACE_MODE_RMII) ||
854 (max_speed < 1000 && max_speed > 0))
855 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
856 SUPPORTED_1000baseT_Full);
857
858 /*
859 * Broken HW is sometimes missing the pull-up resistor on the
860 * MDIO line, which results in reads to non-existent devices returning
861 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
862 * device as well.
863 * Note: phydev->phy_id is the result of reading the UID PHY registers.
864 */
865 if (!priv->plat->phy_node && phydev->phy_id == 0) {
866 phy_disconnect(phydev);
867 return -ENODEV;
868 }
869
870 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
871 * subsequent PHY polling, make sure we force a link transition if
872 * we have a UP/DOWN/UP transition
873 */
874 if (phydev->is_pseudo_fixed_link)
875 phydev->irq = PHY_POLL;
876
877 phy_attached_info(phydev);
878 return 0;
879 }
880
881 static void stmmac_display_rings(struct stmmac_priv *priv)
882 {
883 void *head_rx, *head_tx;
884
885 if (priv->extend_desc) {
886 head_rx = (void *)priv->dma_erx;
887 head_tx = (void *)priv->dma_etx;
888 } else {
889 head_rx = (void *)priv->dma_rx;
890 head_tx = (void *)priv->dma_tx;
891 }
892
893 /* Display Rx ring */
894 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
895 /* Display Tx ring */
896 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
897 }
898
899 static int stmmac_set_bfsize(int mtu, int bufsize)
900 {
901 int ret = bufsize;
902
903 if (mtu >= BUF_SIZE_4KiB)
904 ret = BUF_SIZE_8KiB;
905 else if (mtu >= BUF_SIZE_2KiB)
906 ret = BUF_SIZE_4KiB;
907 else if (mtu > DEFAULT_BUFSIZE)
908 ret = BUF_SIZE_2KiB;
909 else
910 ret = DEFAULT_BUFSIZE;
911
912 return ret;
913 }
914
915 /**
916 * stmmac_clear_descriptors - clear descriptors
917 * @priv: driver private structure
918 * Description: this function is called to clear the tx and rx descriptors
919 * in case of both basic and extended descriptors are used.
920 */
921 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
922 {
923 int i;
924
925 /* Clear the Rx/Tx descriptors */
926 for (i = 0; i < DMA_RX_SIZE; i++)
927 if (priv->extend_desc)
928 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
929 priv->use_riwt, priv->mode,
930 (i == DMA_RX_SIZE - 1));
931 else
932 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
933 priv->use_riwt, priv->mode,
934 (i == DMA_RX_SIZE - 1));
935 for (i = 0; i < DMA_TX_SIZE; i++)
936 if (priv->extend_desc)
937 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
938 priv->mode,
939 (i == DMA_TX_SIZE - 1));
940 else
941 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
942 priv->mode,
943 (i == DMA_TX_SIZE - 1));
944 }
945
946 /**
947 * stmmac_init_rx_buffers - init the RX descriptor buffer.
948 * @priv: driver private structure
949 * @p: descriptor pointer
950 * @i: descriptor index
951 * @flags: gfp flag.
952 * Description: this function is called to allocate a receive buffer, perform
953 * the DMA mapping and init the descriptor.
954 */
955 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
956 int i, gfp_t flags)
957 {
958 struct sk_buff *skb;
959
960 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
961 if (!skb) {
962 netdev_err(priv->dev,
963 "%s: Rx init fails; skb is NULL\n", __func__);
964 return -ENOMEM;
965 }
966 priv->rx_skbuff[i] = skb;
967 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
968 priv->dma_buf_sz,
969 DMA_FROM_DEVICE);
970 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
971 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
972 dev_kfree_skb_any(skb);
973 return -EINVAL;
974 }
975
976 if (priv->synopsys_id >= DWMAC_CORE_4_00)
977 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
978 else
979 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
980
981 if ((priv->hw->mode->init_desc3) &&
982 (priv->dma_buf_sz == BUF_SIZE_16KiB))
983 priv->hw->mode->init_desc3(p);
984
985 return 0;
986 }
987
988 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
989 {
990 if (priv->rx_skbuff[i]) {
991 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
992 priv->dma_buf_sz, DMA_FROM_DEVICE);
993 dev_kfree_skb_any(priv->rx_skbuff[i]);
994 }
995 priv->rx_skbuff[i] = NULL;
996 }
997
998 /**
999 * init_dma_desc_rings - init the RX/TX descriptor rings
1000 * @dev: net device structure
1001 * @flags: gfp flag.
1002 * Description: this function initializes the DMA RX/TX descriptors
1003 * and allocates the socket buffers. It supports the chained and ring
1004 * modes.
1005 */
1006 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1007 {
1008 int i;
1009 struct stmmac_priv *priv = netdev_priv(dev);
1010 unsigned int bfsize = 0;
1011 int ret = -ENOMEM;
1012
1013 if (priv->hw->mode->set_16kib_bfsize)
1014 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1015
1016 if (bfsize < BUF_SIZE_16KiB)
1017 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1018
1019 priv->dma_buf_sz = bfsize;
1020
1021 netif_dbg(priv, probe, priv->dev,
1022 "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
1023 __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
1024
1025 /* RX INITIALIZATION */
1026 netif_dbg(priv, probe, priv->dev,
1027 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1028
1029 for (i = 0; i < DMA_RX_SIZE; i++) {
1030 struct dma_desc *p;
1031 if (priv->extend_desc)
1032 p = &((priv->dma_erx + i)->basic);
1033 else
1034 p = priv->dma_rx + i;
1035
1036 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1037 if (ret)
1038 goto err_init_rx_buffers;
1039
1040 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1041 priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
1042 (unsigned int)priv->rx_skbuff_dma[i]);
1043 }
1044 priv->cur_rx = 0;
1045 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1046 buf_sz = bfsize;
1047
1048 /* Setup the chained descriptor addresses */
1049 if (priv->mode == STMMAC_CHAIN_MODE) {
1050 if (priv->extend_desc) {
1051 priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
1052 DMA_RX_SIZE, 1);
1053 priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
1054 DMA_TX_SIZE, 1);
1055 } else {
1056 priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
1057 DMA_RX_SIZE, 0);
1058 priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
1059 DMA_TX_SIZE, 0);
1060 }
1061 }
1062
1063 /* TX INITIALIZATION */
1064 for (i = 0; i < DMA_TX_SIZE; i++) {
1065 struct dma_desc *p;
1066 if (priv->extend_desc)
1067 p = &((priv->dma_etx + i)->basic);
1068 else
1069 p = priv->dma_tx + i;
1070
1071 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1072 p->des0 = 0;
1073 p->des1 = 0;
1074 p->des2 = 0;
1075 p->des3 = 0;
1076 } else {
1077 p->des2 = 0;
1078 }
1079
1080 priv->tx_skbuff_dma[i].buf = 0;
1081 priv->tx_skbuff_dma[i].map_as_page = false;
1082 priv->tx_skbuff_dma[i].len = 0;
1083 priv->tx_skbuff_dma[i].last_segment = false;
1084 priv->tx_skbuff[i] = NULL;
1085 }
1086
1087 priv->dirty_tx = 0;
1088 priv->cur_tx = 0;
1089 netdev_reset_queue(priv->dev);
1090
1091 stmmac_clear_descriptors(priv);
1092
1093 if (netif_msg_hw(priv))
1094 stmmac_display_rings(priv);
1095
1096 return 0;
1097 err_init_rx_buffers:
1098 while (--i >= 0)
1099 stmmac_free_rx_buffers(priv, i);
1100 return ret;
1101 }
1102
1103 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1104 {
1105 int i;
1106
1107 for (i = 0; i < DMA_RX_SIZE; i++)
1108 stmmac_free_rx_buffers(priv, i);
1109 }
1110
1111 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112 {
1113 int i;
1114
1115 for (i = 0; i < DMA_TX_SIZE; i++) {
1116 if (priv->tx_skbuff_dma[i].buf) {
1117 if (priv->tx_skbuff_dma[i].map_as_page)
1118 dma_unmap_page(priv->device,
1119 priv->tx_skbuff_dma[i].buf,
1120 priv->tx_skbuff_dma[i].len,
1121 DMA_TO_DEVICE);
1122 else
1123 dma_unmap_single(priv->device,
1124 priv->tx_skbuff_dma[i].buf,
1125 priv->tx_skbuff_dma[i].len,
1126 DMA_TO_DEVICE);
1127 }
1128
1129 if (priv->tx_skbuff[i]) {
1130 dev_kfree_skb_any(priv->tx_skbuff[i]);
1131 priv->tx_skbuff[i] = NULL;
1132 priv->tx_skbuff_dma[i].buf = 0;
1133 priv->tx_skbuff_dma[i].map_as_page = false;
1134 }
1135 }
1136 }
1137
1138 /**
1139 * alloc_dma_desc_resources - alloc TX/RX resources.
1140 * @priv: private structure
1141 * Description: according to which descriptor can be used (extend or basic)
1142 * this function allocates the resources for TX and RX paths. In case of
1143 * reception, for example, it pre-allocated the RX socket buffer in order to
1144 * allow zero-copy mechanism.
1145 */
1146 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1147 {
1148 int ret = -ENOMEM;
1149
1150 priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
1151 GFP_KERNEL);
1152 if (!priv->rx_skbuff_dma)
1153 return -ENOMEM;
1154
1155 priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
1156 GFP_KERNEL);
1157 if (!priv->rx_skbuff)
1158 goto err_rx_skbuff;
1159
1160 priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1161 sizeof(*priv->tx_skbuff_dma),
1162 GFP_KERNEL);
1163 if (!priv->tx_skbuff_dma)
1164 goto err_tx_skbuff_dma;
1165
1166 priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
1167 GFP_KERNEL);
1168 if (!priv->tx_skbuff)
1169 goto err_tx_skbuff;
1170
1171 if (priv->extend_desc) {
1172 priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1173 sizeof(struct
1174 dma_extended_desc),
1175 &priv->dma_rx_phy,
1176 GFP_KERNEL);
1177 if (!priv->dma_erx)
1178 goto err_dma;
1179
1180 priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1181 sizeof(struct
1182 dma_extended_desc),
1183 &priv->dma_tx_phy,
1184 GFP_KERNEL);
1185 if (!priv->dma_etx) {
1186 dma_free_coherent(priv->device, DMA_RX_SIZE *
1187 sizeof(struct dma_extended_desc),
1188 priv->dma_erx, priv->dma_rx_phy);
1189 goto err_dma;
1190 }
1191 } else {
1192 priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
1193 sizeof(struct dma_desc),
1194 &priv->dma_rx_phy,
1195 GFP_KERNEL);
1196 if (!priv->dma_rx)
1197 goto err_dma;
1198
1199 priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
1200 sizeof(struct dma_desc),
1201 &priv->dma_tx_phy,
1202 GFP_KERNEL);
1203 if (!priv->dma_tx) {
1204 dma_free_coherent(priv->device, DMA_RX_SIZE *
1205 sizeof(struct dma_desc),
1206 priv->dma_rx, priv->dma_rx_phy);
1207 goto err_dma;
1208 }
1209 }
1210
1211 return 0;
1212
1213 err_dma:
1214 kfree(priv->tx_skbuff);
1215 err_tx_skbuff:
1216 kfree(priv->tx_skbuff_dma);
1217 err_tx_skbuff_dma:
1218 kfree(priv->rx_skbuff);
1219 err_rx_skbuff:
1220 kfree(priv->rx_skbuff_dma);
1221 return ret;
1222 }
1223
1224 static void free_dma_desc_resources(struct stmmac_priv *priv)
1225 {
1226 /* Release the DMA TX/RX socket buffers */
1227 dma_free_rx_skbufs(priv);
1228 dma_free_tx_skbufs(priv);
1229
1230 /* Free DMA regions of consistent memory previously allocated */
1231 if (!priv->extend_desc) {
1232 dma_free_coherent(priv->device,
1233 DMA_TX_SIZE * sizeof(struct dma_desc),
1234 priv->dma_tx, priv->dma_tx_phy);
1235 dma_free_coherent(priv->device,
1236 DMA_RX_SIZE * sizeof(struct dma_desc),
1237 priv->dma_rx, priv->dma_rx_phy);
1238 } else {
1239 dma_free_coherent(priv->device, DMA_TX_SIZE *
1240 sizeof(struct dma_extended_desc),
1241 priv->dma_etx, priv->dma_tx_phy);
1242 dma_free_coherent(priv->device, DMA_RX_SIZE *
1243 sizeof(struct dma_extended_desc),
1244 priv->dma_erx, priv->dma_rx_phy);
1245 }
1246 kfree(priv->rx_skbuff_dma);
1247 kfree(priv->rx_skbuff);
1248 kfree(priv->tx_skbuff_dma);
1249 kfree(priv->tx_skbuff);
1250 }
1251
1252 /**
1253 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1254 * @priv: driver private structure
1255 * Description: It is used for enabling the rx queues in the MAC
1256 */
1257 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1258 {
1259 int rx_count = priv->dma_cap.number_rx_queues;
1260 int queue = 0;
1261
1262 /* If GMAC does not have multiple queues, then this is not necessary*/
1263 if (rx_count == 1)
1264 return;
1265
1266 /**
1267 * If the core is synthesized with multiple rx queues / multiple
1268 * dma channels, then rx queues will be disabled by default.
1269 * For now only rx queue 0 is enabled.
1270 */
1271 priv->hw->mac->rx_queue_enable(priv->hw, queue);
1272 }
1273
1274 /**
1275 * stmmac_dma_operation_mode - HW DMA operation mode
1276 * @priv: driver private structure
1277 * Description: it is used for configuring the DMA operation mode register in
1278 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1279 */
1280 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1281 {
1282 int rxfifosz = priv->plat->rx_fifo_size;
1283
1284 if (rxfifosz == 0)
1285 rxfifosz = priv->dma_cap.rx_fifo_size;
1286
1287 if (priv->plat->force_thresh_dma_mode)
1288 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
1289 else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1290 /*
1291 * In case of GMAC, SF mode can be enabled
1292 * to perform the TX COE in HW. This depends on:
1293 * 1) TX COE if actually supported
1294 * 2) There is no bugged Jumbo frame support
1295 * that needs to not insert csum in the TDES.
1296 */
1297 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
1298 rxfifosz);
1299 priv->xstats.threshold = SF_DMA_MODE;
1300 } else
1301 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
1302 rxfifosz);
1303 }
1304
1305 /**
1306 * stmmac_tx_clean - to manage the transmission completion
1307 * @priv: driver private structure
1308 * Description: it reclaims the transmit resources after transmission completes.
1309 */
1310 static void stmmac_tx_clean(struct stmmac_priv *priv)
1311 {
1312 unsigned int bytes_compl = 0, pkts_compl = 0;
1313 unsigned int entry = priv->dirty_tx;
1314
1315 netif_tx_lock(priv->dev);
1316
1317 priv->xstats.tx_clean++;
1318
1319 while (entry != priv->cur_tx) {
1320 struct sk_buff *skb = priv->tx_skbuff[entry];
1321 struct dma_desc *p;
1322 int status;
1323
1324 if (priv->extend_desc)
1325 p = (struct dma_desc *)(priv->dma_etx + entry);
1326 else
1327 p = priv->dma_tx + entry;
1328
1329 status = priv->hw->desc->tx_status(&priv->dev->stats,
1330 &priv->xstats, p,
1331 priv->ioaddr);
1332 /* Check if the descriptor is owned by the DMA */
1333 if (unlikely(status & tx_dma_own))
1334 break;
1335
1336 /* Just consider the last segment and ...*/
1337 if (likely(!(status & tx_not_ls))) {
1338 /* ... verify the status error condition */
1339 if (unlikely(status & tx_err)) {
1340 priv->dev->stats.tx_errors++;
1341 } else {
1342 priv->dev->stats.tx_packets++;
1343 priv->xstats.tx_pkt_n++;
1344 }
1345 stmmac_get_tx_hwtstamp(priv, p, skb);
1346 }
1347
1348 if (likely(priv->tx_skbuff_dma[entry].buf)) {
1349 if (priv->tx_skbuff_dma[entry].map_as_page)
1350 dma_unmap_page(priv->device,
1351 priv->tx_skbuff_dma[entry].buf,
1352 priv->tx_skbuff_dma[entry].len,
1353 DMA_TO_DEVICE);
1354 else
1355 dma_unmap_single(priv->device,
1356 priv->tx_skbuff_dma[entry].buf,
1357 priv->tx_skbuff_dma[entry].len,
1358 DMA_TO_DEVICE);
1359 priv->tx_skbuff_dma[entry].buf = 0;
1360 priv->tx_skbuff_dma[entry].len = 0;
1361 priv->tx_skbuff_dma[entry].map_as_page = false;
1362 }
1363
1364 if (priv->hw->mode->clean_desc3)
1365 priv->hw->mode->clean_desc3(priv, p);
1366
1367 priv->tx_skbuff_dma[entry].last_segment = false;
1368 priv->tx_skbuff_dma[entry].is_jumbo = false;
1369
1370 if (likely(skb != NULL)) {
1371 pkts_compl++;
1372 bytes_compl += skb->len;
1373 dev_consume_skb_any(skb);
1374 priv->tx_skbuff[entry] = NULL;
1375 }
1376
1377 priv->hw->desc->release_tx_desc(p, priv->mode);
1378
1379 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1380 }
1381 priv->dirty_tx = entry;
1382
1383 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
1384
1385 if (unlikely(netif_queue_stopped(priv->dev) &&
1386 stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
1387 netif_dbg(priv, tx_done, priv->dev,
1388 "%s: restart transmit\n", __func__);
1389 netif_wake_queue(priv->dev);
1390 }
1391
1392 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1393 stmmac_enable_eee_mode(priv);
1394 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1395 }
1396 netif_tx_unlock(priv->dev);
1397 }
1398
1399 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1400 {
1401 priv->hw->dma->enable_dma_irq(priv->ioaddr);
1402 }
1403
1404 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1405 {
1406 priv->hw->dma->disable_dma_irq(priv->ioaddr);
1407 }
1408
1409 /**
1410 * stmmac_tx_err - to manage the tx error
1411 * @priv: driver private structure
1412 * Description: it cleans the descriptors and restarts the transmission
1413 * in case of transmission errors.
1414 */
1415 static void stmmac_tx_err(struct stmmac_priv *priv)
1416 {
1417 int i;
1418 netif_stop_queue(priv->dev);
1419
1420 priv->hw->dma->stop_tx(priv->ioaddr);
1421 dma_free_tx_skbufs(priv);
1422 for (i = 0; i < DMA_TX_SIZE; i++)
1423 if (priv->extend_desc)
1424 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1425 priv->mode,
1426 (i == DMA_TX_SIZE - 1));
1427 else
1428 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1429 priv->mode,
1430 (i == DMA_TX_SIZE - 1));
1431 priv->dirty_tx = 0;
1432 priv->cur_tx = 0;
1433 netdev_reset_queue(priv->dev);
1434 priv->hw->dma->start_tx(priv->ioaddr);
1435
1436 priv->dev->stats.tx_errors++;
1437 netif_wake_queue(priv->dev);
1438 }
1439
1440 /**
1441 * stmmac_dma_interrupt - DMA ISR
1442 * @priv: driver private structure
1443 * Description: this is the DMA ISR. It is called by the main ISR.
1444 * It calls the dwmac dma routine and schedule poll method in case of some
1445 * work can be done.
1446 */
1447 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1448 {
1449 int status;
1450 int rxfifosz = priv->plat->rx_fifo_size;
1451
1452 status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1453 if (likely((status & handle_rx)) || (status & handle_tx)) {
1454 if (likely(napi_schedule_prep(&priv->napi))) {
1455 stmmac_disable_dma_irq(priv);
1456 __napi_schedule(&priv->napi);
1457 }
1458 }
1459 if (unlikely(status & tx_hard_error_bump_tc)) {
1460 /* Try to bump up the dma threshold on this failure */
1461 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1462 (tc <= 256)) {
1463 tc += 64;
1464 if (priv->plat->force_thresh_dma_mode)
1465 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
1466 rxfifosz);
1467 else
1468 priv->hw->dma->dma_mode(priv->ioaddr, tc,
1469 SF_DMA_MODE, rxfifosz);
1470 priv->xstats.threshold = tc;
1471 }
1472 } else if (unlikely(status == tx_hard_error))
1473 stmmac_tx_err(priv);
1474 }
1475
1476 /**
1477 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1478 * @priv: driver private structure
1479 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1480 */
1481 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1482 {
1483 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1484 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1485
1486 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1487 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
1488 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
1489 } else {
1490 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
1491 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
1492 }
1493
1494 dwmac_mmc_intr_all_mask(priv->mmcaddr);
1495
1496 if (priv->dma_cap.rmon) {
1497 dwmac_mmc_ctrl(priv->mmcaddr, mode);
1498 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1499 } else
1500 netdev_info(priv->dev, "No MAC Management Counters available\n");
1501 }
1502
1503 /**
1504 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
1505 * @priv: driver private structure
1506 * Description: select the Enhanced/Alternate or Normal descriptors.
1507 * In case of Enhanced/Alternate, it checks if the extended descriptors are
1508 * supported by the HW capability register.
1509 */
1510 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1511 {
1512 if (priv->plat->enh_desc) {
1513 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
1514
1515 /* GMAC older than 3.50 has no extended descriptors */
1516 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1517 dev_info(priv->device, "Enabled extended descriptors\n");
1518 priv->extend_desc = 1;
1519 } else
1520 dev_warn(priv->device, "Extended descriptors not supported\n");
1521
1522 priv->hw->desc = &enh_desc_ops;
1523 } else {
1524 dev_info(priv->device, "Normal descriptors\n");
1525 priv->hw->desc = &ndesc_ops;
1526 }
1527 }
1528
1529 /**
1530 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
1531 * @priv: driver private structure
1532 * Description:
1533 * new GMAC chip generations have a new register to indicate the
1534 * presence of the optional feature/functions.
1535 * This can be also used to override the value passed through the
1536 * platform and necessary for old MAC10/100 and GMAC chips.
1537 */
1538 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1539 {
1540 u32 ret = 0;
1541
1542 if (priv->hw->dma->get_hw_feature) {
1543 priv->hw->dma->get_hw_feature(priv->ioaddr,
1544 &priv->dma_cap);
1545 ret = 1;
1546 }
1547
1548 return ret;
1549 }
1550
1551 /**
1552 * stmmac_check_ether_addr - check if the MAC addr is valid
1553 * @priv: driver private structure
1554 * Description:
1555 * it is to verify if the MAC address is valid, in case of failures it
1556 * generates a random MAC address
1557 */
1558 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1559 {
1560 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1561 priv->hw->mac->get_umac_addr(priv->hw,
1562 priv->dev->dev_addr, 0);
1563 if (!is_valid_ether_addr(priv->dev->dev_addr))
1564 eth_hw_addr_random(priv->dev);
1565 netdev_info(priv->dev, "device MAC address %pM\n",
1566 priv->dev->dev_addr);
1567 }
1568 }
1569
1570 /**
1571 * stmmac_init_dma_engine - DMA init.
1572 * @priv: driver private structure
1573 * Description:
1574 * It inits the DMA invoking the specific MAC/GMAC callback.
1575 * Some DMA parameters can be passed from the platform;
1576 * in case of these are not passed a default is kept for the MAC or GMAC.
1577 */
1578 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1579 {
1580 int atds = 0;
1581 int ret = 0;
1582
1583 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
1584 dev_err(priv->device, "Invalid DMA configuration\n");
1585 return -EINVAL;
1586 }
1587
1588 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1589 atds = 1;
1590
1591 ret = priv->hw->dma->reset(priv->ioaddr);
1592 if (ret) {
1593 dev_err(priv->device, "Failed to reset the dma\n");
1594 return ret;
1595 }
1596
1597 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
1598 priv->dma_tx_phy, priv->dma_rx_phy, atds);
1599
1600 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1601 priv->rx_tail_addr = priv->dma_rx_phy +
1602 (DMA_RX_SIZE * sizeof(struct dma_desc));
1603 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
1604 STMMAC_CHAN0);
1605
1606 priv->tx_tail_addr = priv->dma_tx_phy +
1607 (DMA_TX_SIZE * sizeof(struct dma_desc));
1608 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
1609 STMMAC_CHAN0);
1610 }
1611
1612 if (priv->plat->axi && priv->hw->dma->axi)
1613 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
1614
1615 return ret;
1616 }
1617
1618 /**
1619 * stmmac_tx_timer - mitigation sw timer for tx.
1620 * @data: data pointer
1621 * Description:
1622 * This is the timer handler to directly invoke the stmmac_tx_clean.
1623 */
1624 static void stmmac_tx_timer(unsigned long data)
1625 {
1626 struct stmmac_priv *priv = (struct stmmac_priv *)data;
1627
1628 stmmac_tx_clean(priv);
1629 }
1630
1631 /**
1632 * stmmac_init_tx_coalesce - init tx mitigation options.
1633 * @priv: driver private structure
1634 * Description:
1635 * This inits the transmit coalesce parameters: i.e. timer rate,
1636 * timer handler and default threshold used for enabling the
1637 * interrupt on completion bit.
1638 */
1639 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1640 {
1641 priv->tx_coal_frames = STMMAC_TX_FRAMES;
1642 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1643 init_timer(&priv->txtimer);
1644 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1645 priv->txtimer.data = (unsigned long)priv;
1646 priv->txtimer.function = stmmac_tx_timer;
1647 add_timer(&priv->txtimer);
1648 }
1649
1650 /**
1651 * stmmac_mtl_configuration - Configure MTL
1652 * @priv: driver private structure
1653 * Description: It is used for configurring MTL
1654 */
1655 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
1656 {
1657 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1658 u32 tx_queues_count = priv->plat->tx_queues_to_use;
1659
1660 /* Configure MTL RX algorithms */
1661 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
1662 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
1663 priv->plat->rx_sched_algorithm);
1664
1665 /* Configure MTL TX algorithms */
1666 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
1667 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
1668 priv->plat->tx_sched_algorithm);
1669
1670 /* Enable MAC RX Queues */
1671 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_enable)
1672 stmmac_mac_enable_rx_queues(priv);
1673 }
1674
1675 /**
1676 * stmmac_hw_setup - setup mac in a usable state.
1677 * @dev : pointer to the device structure.
1678 * Description:
1679 * this is the main function to setup the HW in a usable state because the
1680 * dma engine is reset, the core registers are configured (e.g. AXI,
1681 * Checksum features, timers). The DMA is ready to start receiving and
1682 * transmitting.
1683 * Return value:
1684 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1685 * file on failure.
1686 */
1687 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
1688 {
1689 struct stmmac_priv *priv = netdev_priv(dev);
1690 int ret;
1691
1692 /* DMA initialization and SW reset */
1693 ret = stmmac_init_dma_engine(priv);
1694 if (ret < 0) {
1695 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
1696 __func__);
1697 return ret;
1698 }
1699
1700 /* Copy the MAC addr into the HW */
1701 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
1702
1703 /* PS and related bits will be programmed according to the speed */
1704 if (priv->hw->pcs) {
1705 int speed = priv->plat->mac_port_sel_speed;
1706
1707 if ((speed == SPEED_10) || (speed == SPEED_100) ||
1708 (speed == SPEED_1000)) {
1709 priv->hw->ps = speed;
1710 } else {
1711 dev_warn(priv->device, "invalid port speed\n");
1712 priv->hw->ps = 0;
1713 }
1714 }
1715
1716 /* Initialize the MAC Core */
1717 priv->hw->mac->core_init(priv->hw, dev->mtu);
1718
1719 /* Initialize MTL*/
1720 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1721 stmmac_mtl_configuration(priv);
1722
1723 ret = priv->hw->mac->rx_ipc(priv->hw);
1724 if (!ret) {
1725 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
1726 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1727 priv->hw->rx_csum = 0;
1728 }
1729
1730 /* Enable the MAC Rx/Tx */
1731 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1732 stmmac_dwmac4_set_mac(priv->ioaddr, true);
1733 else
1734 stmmac_set_mac(priv->ioaddr, true);
1735
1736 /* Set the HW DMA mode and the COE */
1737 stmmac_dma_operation_mode(priv);
1738
1739 stmmac_mmc_setup(priv);
1740
1741 if (init_ptp) {
1742 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
1743 if (ret < 0)
1744 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
1745
1746 ret = stmmac_init_ptp(priv);
1747 if (ret == -EOPNOTSUPP)
1748 netdev_warn(priv->dev, "PTP not supported by HW\n");
1749 else if (ret)
1750 netdev_warn(priv->dev, "PTP init failed\n");
1751 }
1752
1753 #ifdef CONFIG_DEBUG_FS
1754 ret = stmmac_init_fs(dev);
1755 if (ret < 0)
1756 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
1757 __func__);
1758 #endif
1759 /* Start the ball rolling... */
1760 netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
1761 priv->hw->dma->start_tx(priv->ioaddr);
1762 priv->hw->dma->start_rx(priv->ioaddr);
1763
1764 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1765
1766 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1767 priv->rx_riwt = MAX_DMA_RIWT;
1768 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1769 }
1770
1771 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
1772 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
1773
1774 /* set TX ring length */
1775 if (priv->hw->dma->set_tx_ring_len)
1776 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
1777 (DMA_TX_SIZE - 1));
1778 /* set RX ring length */
1779 if (priv->hw->dma->set_rx_ring_len)
1780 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
1781 (DMA_RX_SIZE - 1));
1782 /* Enable TSO */
1783 if (priv->tso)
1784 priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
1785
1786 return 0;
1787 }
1788
1789 static void stmmac_hw_teardown(struct net_device *dev)
1790 {
1791 struct stmmac_priv *priv = netdev_priv(dev);
1792
1793 clk_disable_unprepare(priv->plat->clk_ptp_ref);
1794 }
1795
1796 /**
1797 * stmmac_open - open entry point of the driver
1798 * @dev : pointer to the device structure.
1799 * Description:
1800 * This function is the open entry point of the driver.
1801 * Return value:
1802 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1803 * file on failure.
1804 */
1805 static int stmmac_open(struct net_device *dev)
1806 {
1807 struct stmmac_priv *priv = netdev_priv(dev);
1808 int ret;
1809
1810 stmmac_check_ether_addr(priv);
1811
1812 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
1813 priv->hw->pcs != STMMAC_PCS_TBI &&
1814 priv->hw->pcs != STMMAC_PCS_RTBI) {
1815 ret = stmmac_init_phy(dev);
1816 if (ret) {
1817 netdev_err(priv->dev,
1818 "%s: Cannot attach to PHY (error: %d)\n",
1819 __func__, ret);
1820 return ret;
1821 }
1822 }
1823
1824 /* Extra statistics */
1825 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1826 priv->xstats.threshold = tc;
1827
1828 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1829 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
1830
1831 ret = alloc_dma_desc_resources(priv);
1832 if (ret < 0) {
1833 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
1834 __func__);
1835 goto dma_desc_error;
1836 }
1837
1838 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1839 if (ret < 0) {
1840 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
1841 __func__);
1842 goto init_error;
1843 }
1844
1845 ret = stmmac_hw_setup(dev, true);
1846 if (ret < 0) {
1847 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
1848 goto init_error;
1849 }
1850
1851 stmmac_init_tx_coalesce(priv);
1852
1853 if (dev->phydev)
1854 phy_start(dev->phydev);
1855
1856 /* Request the IRQ lines */
1857 ret = request_irq(dev->irq, stmmac_interrupt,
1858 IRQF_SHARED, dev->name, dev);
1859 if (unlikely(ret < 0)) {
1860 netdev_err(priv->dev,
1861 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1862 __func__, dev->irq, ret);
1863 goto irq_error;
1864 }
1865
1866 /* Request the Wake IRQ in case of another line is used for WoL */
1867 if (priv->wol_irq != dev->irq) {
1868 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1869 IRQF_SHARED, dev->name, dev);
1870 if (unlikely(ret < 0)) {
1871 netdev_err(priv->dev,
1872 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1873 __func__, priv->wol_irq, ret);
1874 goto wolirq_error;
1875 }
1876 }
1877
1878 /* Request the IRQ lines */
1879 if (priv->lpi_irq > 0) {
1880 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1881 dev->name, dev);
1882 if (unlikely(ret < 0)) {
1883 netdev_err(priv->dev,
1884 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1885 __func__, priv->lpi_irq, ret);
1886 goto lpiirq_error;
1887 }
1888 }
1889
1890 napi_enable(&priv->napi);
1891 netif_start_queue(dev);
1892
1893 return 0;
1894
1895 lpiirq_error:
1896 if (priv->wol_irq != dev->irq)
1897 free_irq(priv->wol_irq, dev);
1898 wolirq_error:
1899 free_irq(dev->irq, dev);
1900 irq_error:
1901 if (dev->phydev)
1902 phy_stop(dev->phydev);
1903
1904 del_timer_sync(&priv->txtimer);
1905 stmmac_hw_teardown(dev);
1906 init_error:
1907 free_dma_desc_resources(priv);
1908 dma_desc_error:
1909 if (dev->phydev)
1910 phy_disconnect(dev->phydev);
1911
1912 return ret;
1913 }
1914
1915 /**
1916 * stmmac_release - close entry point of the driver
1917 * @dev : device pointer.
1918 * Description:
1919 * This is the stop entry point of the driver.
1920 */
1921 static int stmmac_release(struct net_device *dev)
1922 {
1923 struct stmmac_priv *priv = netdev_priv(dev);
1924
1925 if (priv->eee_enabled)
1926 del_timer_sync(&priv->eee_ctrl_timer);
1927
1928 /* Stop and disconnect the PHY */
1929 if (dev->phydev) {
1930 phy_stop(dev->phydev);
1931 phy_disconnect(dev->phydev);
1932 }
1933
1934 netif_stop_queue(dev);
1935
1936 napi_disable(&priv->napi);
1937
1938 del_timer_sync(&priv->txtimer);
1939
1940 /* Free the IRQ lines */
1941 free_irq(dev->irq, dev);
1942 if (priv->wol_irq != dev->irq)
1943 free_irq(priv->wol_irq, dev);
1944 if (priv->lpi_irq > 0)
1945 free_irq(priv->lpi_irq, dev);
1946
1947 /* Stop TX/RX DMA and clear the descriptors */
1948 priv->hw->dma->stop_tx(priv->ioaddr);
1949 priv->hw->dma->stop_rx(priv->ioaddr);
1950
1951 /* Release and free the Rx/Tx resources */
1952 free_dma_desc_resources(priv);
1953
1954 /* Disable the MAC Rx/Tx */
1955 stmmac_set_mac(priv->ioaddr, false);
1956
1957 netif_carrier_off(dev);
1958
1959 #ifdef CONFIG_DEBUG_FS
1960 stmmac_exit_fs(dev);
1961 #endif
1962
1963 stmmac_release_ptp(priv);
1964
1965 return 0;
1966 }
1967
1968 /**
1969 * stmmac_tso_allocator - close entry point of the driver
1970 * @priv: driver private structure
1971 * @des: buffer start address
1972 * @total_len: total length to fill in descriptors
1973 * @last_segmant: condition for the last descriptor
1974 * Description:
1975 * This function fills descriptor and request new descriptors according to
1976 * buffer length to fill
1977 */
1978 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
1979 int total_len, bool last_segment)
1980 {
1981 struct dma_desc *desc;
1982 int tmp_len;
1983 u32 buff_size;
1984
1985 tmp_len = total_len;
1986
1987 while (tmp_len > 0) {
1988 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
1989 desc = priv->dma_tx + priv->cur_tx;
1990
1991 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
1992 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
1993 TSO_MAX_BUFF_SIZE : tmp_len;
1994
1995 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
1996 0, 1,
1997 (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
1998 0, 0);
1999
2000 tmp_len -= TSO_MAX_BUFF_SIZE;
2001 }
2002 }
2003
2004 /**
2005 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2006 * @skb : the socket buffer
2007 * @dev : device pointer
2008 * Description: this is the transmit function that is called on TSO frames
2009 * (support available on GMAC4 and newer chips).
2010 * Diagram below show the ring programming in case of TSO frames:
2011 *
2012 * First Descriptor
2013 * --------
2014 * | DES0 |---> buffer1 = L2/L3/L4 header
2015 * | DES1 |---> TCP Payload (can continue on next descr...)
2016 * | DES2 |---> buffer 1 and 2 len
2017 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2018 * --------
2019 * |
2020 * ...
2021 * |
2022 * --------
2023 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2024 * | DES1 | --|
2025 * | DES2 | --> buffer 1 and 2 len
2026 * | DES3 |
2027 * --------
2028 *
2029 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2030 */
2031 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2032 {
2033 u32 pay_len, mss;
2034 int tmp_pay_len = 0;
2035 struct stmmac_priv *priv = netdev_priv(dev);
2036 int nfrags = skb_shinfo(skb)->nr_frags;
2037 unsigned int first_entry, des;
2038 struct dma_desc *desc, *first, *mss_desc = NULL;
2039 u8 proto_hdr_len;
2040 int i;
2041
2042 /* Compute header lengths */
2043 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2044
2045 /* Desc availability based on threshold should be enough safe */
2046 if (unlikely(stmmac_tx_avail(priv) <
2047 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2048 if (!netif_queue_stopped(dev)) {
2049 netif_stop_queue(dev);
2050 /* This is a hard error, log it. */
2051 netdev_err(priv->dev,
2052 "%s: Tx Ring full when queue awake\n",
2053 __func__);
2054 }
2055 return NETDEV_TX_BUSY;
2056 }
2057
2058 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2059
2060 mss = skb_shinfo(skb)->gso_size;
2061
2062 /* set new MSS value if needed */
2063 if (mss != priv->mss) {
2064 mss_desc = priv->dma_tx + priv->cur_tx;
2065 priv->hw->desc->set_mss(mss_desc, mss);
2066 priv->mss = mss;
2067 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2068 }
2069
2070 if (netif_msg_tx_queued(priv)) {
2071 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2072 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2073 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2074 skb->data_len);
2075 }
2076
2077 first_entry = priv->cur_tx;
2078
2079 desc = priv->dma_tx + first_entry;
2080 first = desc;
2081
2082 /* first descriptor: fill Headers on Buf1 */
2083 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2084 DMA_TO_DEVICE);
2085 if (dma_mapping_error(priv->device, des))
2086 goto dma_map_err;
2087
2088 priv->tx_skbuff_dma[first_entry].buf = des;
2089 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2090 priv->tx_skbuff[first_entry] = skb;
2091
2092 first->des0 = cpu_to_le32(des);
2093
2094 /* Fill start of payload in buff2 of first descriptor */
2095 if (pay_len)
2096 first->des1 = cpu_to_le32(des + proto_hdr_len);
2097
2098 /* If needed take extra descriptors to fill the remaining payload */
2099 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2100
2101 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
2102
2103 /* Prepare fragments */
2104 for (i = 0; i < nfrags; i++) {
2105 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2106
2107 des = skb_frag_dma_map(priv->device, frag, 0,
2108 skb_frag_size(frag),
2109 DMA_TO_DEVICE);
2110 if (dma_mapping_error(priv->device, des))
2111 goto dma_map_err;
2112
2113 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2114 (i == nfrags - 1));
2115
2116 priv->tx_skbuff_dma[priv->cur_tx].buf = des;
2117 priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
2118 priv->tx_skbuff[priv->cur_tx] = NULL;
2119 priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
2120 }
2121
2122 priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
2123
2124 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2125
2126 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2127 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2128 __func__);
2129 netif_stop_queue(dev);
2130 }
2131
2132 dev->stats.tx_bytes += skb->len;
2133 priv->xstats.tx_tso_frames++;
2134 priv->xstats.tx_tso_nfrags += nfrags;
2135
2136 /* Manage tx mitigation */
2137 priv->tx_count_frames += nfrags + 1;
2138 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2139 mod_timer(&priv->txtimer,
2140 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2141 } else {
2142 priv->tx_count_frames = 0;
2143 priv->hw->desc->set_tx_ic(desc);
2144 priv->xstats.tx_set_ic_bit++;
2145 }
2146
2147 if (!priv->hwts_tx_en)
2148 skb_tx_timestamp(skb);
2149
2150 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2151 priv->hwts_tx_en)) {
2152 /* declare that device is doing timestamping */
2153 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2154 priv->hw->desc->enable_tx_timestamp(first);
2155 }
2156
2157 /* Complete the first descriptor before granting the DMA */
2158 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2159 proto_hdr_len,
2160 pay_len,
2161 1, priv->tx_skbuff_dma[first_entry].last_segment,
2162 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2163
2164 /* If context desc is used to change MSS */
2165 if (mss_desc)
2166 priv->hw->desc->set_tx_owner(mss_desc);
2167
2168 /* The own bit must be the latest setting done when prepare the
2169 * descriptor and then barrier is needed to make sure that
2170 * all is coherent before granting the DMA engine.
2171 */
2172 dma_wmb();
2173
2174 if (netif_msg_pktdata(priv)) {
2175 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2176 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2177 priv->cur_tx, first, nfrags);
2178
2179 priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
2180 0);
2181
2182 pr_info(">>> frame to be transmitted: ");
2183 print_pkt(skb->data, skb_headlen(skb));
2184 }
2185
2186 netdev_sent_queue(dev, skb->len);
2187
2188 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2189 STMMAC_CHAN0);
2190
2191 return NETDEV_TX_OK;
2192
2193 dma_map_err:
2194 dev_err(priv->device, "Tx dma map failed\n");
2195 dev_kfree_skb(skb);
2196 priv->dev->stats.tx_dropped++;
2197 return NETDEV_TX_OK;
2198 }
2199
2200 /**
2201 * stmmac_xmit - Tx entry point of the driver
2202 * @skb : the socket buffer
2203 * @dev : device pointer
2204 * Description : this is the tx entry point of the driver.
2205 * It programs the chain or the ring and supports oversized frames
2206 * and SG feature.
2207 */
2208 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2209 {
2210 struct stmmac_priv *priv = netdev_priv(dev);
2211 unsigned int nopaged_len = skb_headlen(skb);
2212 int i, csum_insertion = 0, is_jumbo = 0;
2213 int nfrags = skb_shinfo(skb)->nr_frags;
2214 unsigned int entry, first_entry;
2215 struct dma_desc *desc, *first;
2216 unsigned int enh_desc;
2217 unsigned int des;
2218
2219 /* Manage oversized TCP frames for GMAC4 device */
2220 if (skb_is_gso(skb) && priv->tso) {
2221 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2222 return stmmac_tso_xmit(skb, dev);
2223 }
2224
2225 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2226 if (!netif_queue_stopped(dev)) {
2227 netif_stop_queue(dev);
2228 /* This is a hard error, log it. */
2229 netdev_err(priv->dev,
2230 "%s: Tx Ring full when queue awake\n",
2231 __func__);
2232 }
2233 return NETDEV_TX_BUSY;
2234 }
2235
2236 if (priv->tx_path_in_lpi_mode)
2237 stmmac_disable_eee_mode(priv);
2238
2239 entry = priv->cur_tx;
2240 first_entry = entry;
2241
2242 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
2243
2244 if (likely(priv->extend_desc))
2245 desc = (struct dma_desc *)(priv->dma_etx + entry);
2246 else
2247 desc = priv->dma_tx + entry;
2248
2249 first = desc;
2250
2251 priv->tx_skbuff[first_entry] = skb;
2252
2253 enh_desc = priv->plat->enh_desc;
2254 /* To program the descriptors according to the size of the frame */
2255 if (enh_desc)
2256 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
2257
2258 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
2259 DWMAC_CORE_4_00)) {
2260 entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
2261 if (unlikely(entry < 0))
2262 goto dma_map_err;
2263 }
2264
2265 for (i = 0; i < nfrags; i++) {
2266 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2267 int len = skb_frag_size(frag);
2268 bool last_segment = (i == (nfrags - 1));
2269
2270 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2271
2272 if (likely(priv->extend_desc))
2273 desc = (struct dma_desc *)(priv->dma_etx + entry);
2274 else
2275 desc = priv->dma_tx + entry;
2276
2277 des = skb_frag_dma_map(priv->device, frag, 0, len,
2278 DMA_TO_DEVICE);
2279 if (dma_mapping_error(priv->device, des))
2280 goto dma_map_err; /* should reuse desc w/o issues */
2281
2282 priv->tx_skbuff[entry] = NULL;
2283
2284 priv->tx_skbuff_dma[entry].buf = des;
2285 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2286 desc->des0 = cpu_to_le32(des);
2287 else
2288 desc->des2 = cpu_to_le32(des);
2289
2290 priv->tx_skbuff_dma[entry].map_as_page = true;
2291 priv->tx_skbuff_dma[entry].len = len;
2292 priv->tx_skbuff_dma[entry].last_segment = last_segment;
2293
2294 /* Prepare the descriptor and set the own bit too */
2295 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
2296 priv->mode, 1, last_segment);
2297 }
2298
2299 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2300
2301 priv->cur_tx = entry;
2302
2303 if (netif_msg_pktdata(priv)) {
2304 void *tx_head;
2305
2306 netdev_dbg(priv->dev,
2307 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2308 __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2309 entry, first, nfrags);
2310
2311 if (priv->extend_desc)
2312 tx_head = (void *)priv->dma_etx;
2313 else
2314 tx_head = (void *)priv->dma_tx;
2315
2316 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2317
2318 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2319 print_pkt(skb->data, skb->len);
2320 }
2321
2322 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2323 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2324 __func__);
2325 netif_stop_queue(dev);
2326 }
2327
2328 dev->stats.tx_bytes += skb->len;
2329
2330 /* According to the coalesce parameter the IC bit for the latest
2331 * segment is reset and the timer re-started to clean the tx status.
2332 * This approach takes care about the fragments: desc is the first
2333 * element in case of no SG.
2334 */
2335 priv->tx_count_frames += nfrags + 1;
2336 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2337 mod_timer(&priv->txtimer,
2338 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2339 } else {
2340 priv->tx_count_frames = 0;
2341 priv->hw->desc->set_tx_ic(desc);
2342 priv->xstats.tx_set_ic_bit++;
2343 }
2344
2345 if (!priv->hwts_tx_en)
2346 skb_tx_timestamp(skb);
2347
2348 /* Ready to fill the first descriptor and set the OWN bit w/o any
2349 * problems because all the descriptors are actually ready to be
2350 * passed to the DMA engine.
2351 */
2352 if (likely(!is_jumbo)) {
2353 bool last_segment = (nfrags == 0);
2354
2355 des = dma_map_single(priv->device, skb->data,
2356 nopaged_len, DMA_TO_DEVICE);
2357 if (dma_mapping_error(priv->device, des))
2358 goto dma_map_err;
2359
2360 priv->tx_skbuff_dma[first_entry].buf = des;
2361 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2362 first->des0 = cpu_to_le32(des);
2363 else
2364 first->des2 = cpu_to_le32(des);
2365
2366 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2367 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2368
2369 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2370 priv->hwts_tx_en)) {
2371 /* declare that device is doing timestamping */
2372 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2373 priv->hw->desc->enable_tx_timestamp(first);
2374 }
2375
2376 /* Prepare the first descriptor setting the OWN bit too */
2377 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
2378 csum_insertion, priv->mode, 1,
2379 last_segment);
2380
2381 /* The own bit must be the latest setting done when prepare the
2382 * descriptor and then barrier is needed to make sure that
2383 * all is coherent before granting the DMA engine.
2384 */
2385 dma_wmb();
2386 }
2387
2388 netdev_sent_queue(dev, skb->len);
2389
2390 if (priv->synopsys_id < DWMAC_CORE_4_00)
2391 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
2392 else
2393 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2394 STMMAC_CHAN0);
2395
2396 return NETDEV_TX_OK;
2397
2398 dma_map_err:
2399 netdev_err(priv->dev, "Tx DMA map failed\n");
2400 dev_kfree_skb(skb);
2401 priv->dev->stats.tx_dropped++;
2402 return NETDEV_TX_OK;
2403 }
2404
2405 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
2406 {
2407 struct ethhdr *ehdr;
2408 u16 vlanid;
2409
2410 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
2411 NETIF_F_HW_VLAN_CTAG_RX &&
2412 !__vlan_get_tag(skb, &vlanid)) {
2413 /* pop the vlan tag */
2414 ehdr = (struct ethhdr *)skb->data;
2415 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
2416 skb_pull(skb, VLAN_HLEN);
2417 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
2418 }
2419 }
2420
2421
2422 static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
2423 {
2424 if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
2425 return 0;
2426
2427 return 1;
2428 }
2429
2430 /**
2431 * stmmac_rx_refill - refill used skb preallocated buffers
2432 * @priv: driver private structure
2433 * Description : this is to reallocate the skb for the reception process
2434 * that is based on zero-copy.
2435 */
2436 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2437 {
2438 int bfsize = priv->dma_buf_sz;
2439 unsigned int entry = priv->dirty_rx;
2440 int dirty = stmmac_rx_dirty(priv);
2441
2442 while (dirty-- > 0) {
2443 struct dma_desc *p;
2444
2445 if (priv->extend_desc)
2446 p = (struct dma_desc *)(priv->dma_erx + entry);
2447 else
2448 p = priv->dma_rx + entry;
2449
2450 if (likely(priv->rx_skbuff[entry] == NULL)) {
2451 struct sk_buff *skb;
2452
2453 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2454 if (unlikely(!skb)) {
2455 /* so for a while no zero-copy! */
2456 priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
2457 if (unlikely(net_ratelimit()))
2458 dev_err(priv->device,
2459 "fail to alloc skb entry %d\n",
2460 entry);
2461 break;
2462 }
2463
2464 priv->rx_skbuff[entry] = skb;
2465 priv->rx_skbuff_dma[entry] =
2466 dma_map_single(priv->device, skb->data, bfsize,
2467 DMA_FROM_DEVICE);
2468 if (dma_mapping_error(priv->device,
2469 priv->rx_skbuff_dma[entry])) {
2470 netdev_err(priv->dev, "Rx DMA map failed\n");
2471 dev_kfree_skb(skb);
2472 break;
2473 }
2474
2475 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2476 p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2477 p->des1 = 0;
2478 } else {
2479 p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2480 }
2481 if (priv->hw->mode->refill_desc3)
2482 priv->hw->mode->refill_desc3(priv, p);
2483
2484 if (priv->rx_zeroc_thresh > 0)
2485 priv->rx_zeroc_thresh--;
2486
2487 netif_dbg(priv, rx_status, priv->dev,
2488 "refill entry #%d\n", entry);
2489 }
2490 dma_wmb();
2491
2492 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2493 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2494 else
2495 priv->hw->desc->set_rx_owner(p);
2496
2497 dma_wmb();
2498
2499 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2500 }
2501 priv->dirty_rx = entry;
2502 }
2503
2504 /**
2505 * stmmac_rx - manage the receive process
2506 * @priv: driver private structure
2507 * @limit: napi bugget.
2508 * Description : this the function called by the napi poll method.
2509 * It gets all the frames inside the ring.
2510 */
2511 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2512 {
2513 unsigned int entry = priv->cur_rx;
2514 unsigned int next_entry;
2515 unsigned int count = 0;
2516 int coe = priv->hw->rx_csum;
2517
2518 if (netif_msg_rx_status(priv)) {
2519 void *rx_head;
2520
2521 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2522 if (priv->extend_desc)
2523 rx_head = (void *)priv->dma_erx;
2524 else
2525 rx_head = (void *)priv->dma_rx;
2526
2527 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
2528 }
2529 while (count < limit) {
2530 int status;
2531 struct dma_desc *p;
2532 struct dma_desc *np;
2533
2534 if (priv->extend_desc)
2535 p = (struct dma_desc *)(priv->dma_erx + entry);
2536 else
2537 p = priv->dma_rx + entry;
2538
2539 /* read the status of the incoming frame */
2540 status = priv->hw->desc->rx_status(&priv->dev->stats,
2541 &priv->xstats, p);
2542 /* check if managed by the DMA otherwise go ahead */
2543 if (unlikely(status & dma_own))
2544 break;
2545
2546 count++;
2547
2548 priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
2549 next_entry = priv->cur_rx;
2550
2551 if (priv->extend_desc)
2552 np = (struct dma_desc *)(priv->dma_erx + next_entry);
2553 else
2554 np = priv->dma_rx + next_entry;
2555
2556 prefetch(np);
2557
2558 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2559 priv->hw->desc->rx_extended_status(&priv->dev->stats,
2560 &priv->xstats,
2561 priv->dma_erx +
2562 entry);
2563 if (unlikely(status == discard_frame)) {
2564 priv->dev->stats.rx_errors++;
2565 if (priv->hwts_rx_en && !priv->extend_desc) {
2566 /* DESC2 & DESC3 will be overwritten by device
2567 * with timestamp value, hence reinitialize
2568 * them in stmmac_rx_refill() function so that
2569 * device can reuse it.
2570 */
2571 priv->rx_skbuff[entry] = NULL;
2572 dma_unmap_single(priv->device,
2573 priv->rx_skbuff_dma[entry],
2574 priv->dma_buf_sz,
2575 DMA_FROM_DEVICE);
2576 }
2577 } else {
2578 struct sk_buff *skb;
2579 int frame_len;
2580 unsigned int des;
2581
2582 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2583 des = le32_to_cpu(p->des0);
2584 else
2585 des = le32_to_cpu(p->des2);
2586
2587 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2588
2589 /* If frame length is greater than skb buffer size
2590 * (preallocated during init) then the packet is
2591 * ignored
2592 */
2593 if (frame_len > priv->dma_buf_sz) {
2594 netdev_err(priv->dev,
2595 "len %d larger than size (%d)\n",
2596 frame_len, priv->dma_buf_sz);
2597 priv->dev->stats.rx_length_errors++;
2598 break;
2599 }
2600
2601 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2602 * Type frames (LLC/LLC-SNAP)
2603 */
2604 if (unlikely(status != llc_snap))
2605 frame_len -= ETH_FCS_LEN;
2606
2607 if (netif_msg_rx_status(priv)) {
2608 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2609 p, entry, des);
2610 if (frame_len > ETH_FRAME_LEN)
2611 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2612 frame_len, status);
2613 }
2614
2615 /* The zero-copy is always used for all the sizes
2616 * in case of GMAC4 because it needs
2617 * to refill the used descriptors, always.
2618 */
2619 if (unlikely(!priv->plat->has_gmac4 &&
2620 ((frame_len < priv->rx_copybreak) ||
2621 stmmac_rx_threshold_count(priv)))) {
2622 skb = netdev_alloc_skb_ip_align(priv->dev,
2623 frame_len);
2624 if (unlikely(!skb)) {
2625 if (net_ratelimit())
2626 dev_warn(priv->device,
2627 "packet dropped\n");
2628 priv->dev->stats.rx_dropped++;
2629 break;
2630 }
2631
2632 dma_sync_single_for_cpu(priv->device,
2633 priv->rx_skbuff_dma
2634 [entry], frame_len,
2635 DMA_FROM_DEVICE);
2636 skb_copy_to_linear_data(skb,
2637 priv->
2638 rx_skbuff[entry]->data,
2639 frame_len);
2640
2641 skb_put(skb, frame_len);
2642 dma_sync_single_for_device(priv->device,
2643 priv->rx_skbuff_dma
2644 [entry], frame_len,
2645 DMA_FROM_DEVICE);
2646 } else {
2647 skb = priv->rx_skbuff[entry];
2648 if (unlikely(!skb)) {
2649 netdev_err(priv->dev,
2650 "%s: Inconsistent Rx chain\n",
2651 priv->dev->name);
2652 priv->dev->stats.rx_dropped++;
2653 break;
2654 }
2655 prefetch(skb->data - NET_IP_ALIGN);
2656 priv->rx_skbuff[entry] = NULL;
2657 priv->rx_zeroc_thresh++;
2658
2659 skb_put(skb, frame_len);
2660 dma_unmap_single(priv->device,
2661 priv->rx_skbuff_dma[entry],
2662 priv->dma_buf_sz,
2663 DMA_FROM_DEVICE);
2664 }
2665
2666 if (netif_msg_pktdata(priv)) {
2667 netdev_dbg(priv->dev, "frame received (%dbytes)",
2668 frame_len);
2669 print_pkt(skb->data, frame_len);
2670 }
2671
2672 stmmac_get_rx_hwtstamp(priv, p, np, skb);
2673
2674 stmmac_rx_vlan(priv->dev, skb);
2675
2676 skb->protocol = eth_type_trans(skb, priv->dev);
2677
2678 if (unlikely(!coe))
2679 skb_checksum_none_assert(skb);
2680 else
2681 skb->ip_summed = CHECKSUM_UNNECESSARY;
2682
2683 napi_gro_receive(&priv->napi, skb);
2684
2685 priv->dev->stats.rx_packets++;
2686 priv->dev->stats.rx_bytes += frame_len;
2687 }
2688 entry = next_entry;
2689 }
2690
2691 stmmac_rx_refill(priv);
2692
2693 priv->xstats.rx_pkt_n += count;
2694
2695 return count;
2696 }
2697
2698 /**
2699 * stmmac_poll - stmmac poll method (NAPI)
2700 * @napi : pointer to the napi structure.
2701 * @budget : maximum number of packets that the current CPU can receive from
2702 * all interfaces.
2703 * Description :
2704 * To look at the incoming frames and clear the tx resources.
2705 */
2706 static int stmmac_poll(struct napi_struct *napi, int budget)
2707 {
2708 struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2709 int work_done = 0;
2710
2711 priv->xstats.napi_poll++;
2712 stmmac_tx_clean(priv);
2713
2714 work_done = stmmac_rx(priv, budget);
2715 if (work_done < budget) {
2716 napi_complete_done(napi, work_done);
2717 stmmac_enable_dma_irq(priv);
2718 }
2719 return work_done;
2720 }
2721
2722 /**
2723 * stmmac_tx_timeout
2724 * @dev : Pointer to net device structure
2725 * Description: this function is called when a packet transmission fails to
2726 * complete within a reasonable time. The driver will mark the error in the
2727 * netdev structure and arrange for the device to be reset to a sane state
2728 * in order to transmit a new packet.
2729 */
2730 static void stmmac_tx_timeout(struct net_device *dev)
2731 {
2732 struct stmmac_priv *priv = netdev_priv(dev);
2733
2734 /* Clear Tx resources and restart transmitting again */
2735 stmmac_tx_err(priv);
2736 }
2737
2738 /**
2739 * stmmac_set_rx_mode - entry point for multicast addressing
2740 * @dev : pointer to the device structure
2741 * Description:
2742 * This function is a driver entry point which gets called by the kernel
2743 * whenever multicast addresses must be enabled/disabled.
2744 * Return value:
2745 * void.
2746 */
2747 static void stmmac_set_rx_mode(struct net_device *dev)
2748 {
2749 struct stmmac_priv *priv = netdev_priv(dev);
2750
2751 priv->hw->mac->set_filter(priv->hw, dev);
2752 }
2753
2754 /**
2755 * stmmac_change_mtu - entry point to change MTU size for the device.
2756 * @dev : device pointer.
2757 * @new_mtu : the new MTU size for the device.
2758 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
2759 * to drive packet transmission. Ethernet has an MTU of 1500 octets
2760 * (ETH_DATA_LEN). This value can be changed with ifconfig.
2761 * Return value:
2762 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2763 * file on failure.
2764 */
2765 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2766 {
2767 struct stmmac_priv *priv = netdev_priv(dev);
2768
2769 if (netif_running(dev)) {
2770 netdev_err(priv->dev, "must be stopped to change its MTU\n");
2771 return -EBUSY;
2772 }
2773
2774 dev->mtu = new_mtu;
2775
2776 netdev_update_features(dev);
2777
2778 return 0;
2779 }
2780
2781 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2782 netdev_features_t features)
2783 {
2784 struct stmmac_priv *priv = netdev_priv(dev);
2785
2786 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2787 features &= ~NETIF_F_RXCSUM;
2788
2789 if (!priv->plat->tx_coe)
2790 features &= ~NETIF_F_CSUM_MASK;
2791
2792 /* Some GMAC devices have a bugged Jumbo frame support that
2793 * needs to have the Tx COE disabled for oversized frames
2794 * (due to limited buffer sizes). In this case we disable
2795 * the TX csum insertion in the TDES and not use SF.
2796 */
2797 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2798 features &= ~NETIF_F_CSUM_MASK;
2799
2800 /* Disable tso if asked by ethtool */
2801 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
2802 if (features & NETIF_F_TSO)
2803 priv->tso = true;
2804 else
2805 priv->tso = false;
2806 }
2807
2808 return features;
2809 }
2810
2811 static int stmmac_set_features(struct net_device *netdev,
2812 netdev_features_t features)
2813 {
2814 struct stmmac_priv *priv = netdev_priv(netdev);
2815
2816 /* Keep the COE Type in case of csum is supporting */
2817 if (features & NETIF_F_RXCSUM)
2818 priv->hw->rx_csum = priv->plat->rx_coe;
2819 else
2820 priv->hw->rx_csum = 0;
2821 /* No check needed because rx_coe has been set before and it will be
2822 * fixed in case of issue.
2823 */
2824 priv->hw->mac->rx_ipc(priv->hw);
2825
2826 return 0;
2827 }
2828
2829 /**
2830 * stmmac_interrupt - main ISR
2831 * @irq: interrupt number.
2832 * @dev_id: to pass the net device pointer.
2833 * Description: this is the main driver interrupt service routine.
2834 * It can call:
2835 * o DMA service routine (to manage incoming frame reception and transmission
2836 * status)
2837 * o Core interrupts to manage: remote wake-up, management counter, LPI
2838 * interrupts.
2839 */
2840 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2841 {
2842 struct net_device *dev = (struct net_device *)dev_id;
2843 struct stmmac_priv *priv = netdev_priv(dev);
2844
2845 if (priv->irq_wake)
2846 pm_wakeup_event(priv->device, 0);
2847
2848 if (unlikely(!dev)) {
2849 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2850 return IRQ_NONE;
2851 }
2852
2853 /* To handle GMAC own interrupts */
2854 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
2855 int status = priv->hw->mac->host_irq_status(priv->hw,
2856 &priv->xstats);
2857 if (unlikely(status)) {
2858 /* For LPI we need to save the tx status */
2859 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2860 priv->tx_path_in_lpi_mode = true;
2861 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2862 priv->tx_path_in_lpi_mode = false;
2863 if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
2864 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2865 priv->rx_tail_addr,
2866 STMMAC_CHAN0);
2867 }
2868
2869 /* PCS link status */
2870 if (priv->hw->pcs) {
2871 if (priv->xstats.pcs_link)
2872 netif_carrier_on(dev);
2873 else
2874 netif_carrier_off(dev);
2875 }
2876 }
2877
2878 /* To handle DMA interrupts */
2879 stmmac_dma_interrupt(priv);
2880
2881 return IRQ_HANDLED;
2882 }
2883
2884 #ifdef CONFIG_NET_POLL_CONTROLLER
2885 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2886 * to allow network I/O with interrupts disabled.
2887 */
2888 static void stmmac_poll_controller(struct net_device *dev)
2889 {
2890 disable_irq(dev->irq);
2891 stmmac_interrupt(dev->irq, dev);
2892 enable_irq(dev->irq);
2893 }
2894 #endif
2895
2896 /**
2897 * stmmac_ioctl - Entry point for the Ioctl
2898 * @dev: Device pointer.
2899 * @rq: An IOCTL specefic structure, that can contain a pointer to
2900 * a proprietary structure used to pass information to the driver.
2901 * @cmd: IOCTL command
2902 * Description:
2903 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2904 */
2905 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2906 {
2907 int ret = -EOPNOTSUPP;
2908
2909 if (!netif_running(dev))
2910 return -EINVAL;
2911
2912 switch (cmd) {
2913 case SIOCGMIIPHY:
2914 case SIOCGMIIREG:
2915 case SIOCSMIIREG:
2916 if (!dev->phydev)
2917 return -EINVAL;
2918 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2919 break;
2920 case SIOCSHWTSTAMP:
2921 ret = stmmac_hwtstamp_ioctl(dev, rq);
2922 break;
2923 default:
2924 break;
2925 }
2926
2927 return ret;
2928 }
2929
2930 #ifdef CONFIG_DEBUG_FS
2931 static struct dentry *stmmac_fs_dir;
2932
2933 static void sysfs_display_ring(void *head, int size, int extend_desc,
2934 struct seq_file *seq)
2935 {
2936 int i;
2937 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2938 struct dma_desc *p = (struct dma_desc *)head;
2939
2940 for (i = 0; i < size; i++) {
2941 if (extend_desc) {
2942 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2943 i, (unsigned int)virt_to_phys(ep),
2944 le32_to_cpu(ep->basic.des0),
2945 le32_to_cpu(ep->basic.des1),
2946 le32_to_cpu(ep->basic.des2),
2947 le32_to_cpu(ep->basic.des3));
2948 ep++;
2949 } else {
2950 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2951 i, (unsigned int)virt_to_phys(ep),
2952 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2953 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2954 p++;
2955 }
2956 seq_printf(seq, "\n");
2957 }
2958 }
2959
2960 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2961 {
2962 struct net_device *dev = seq->private;
2963 struct stmmac_priv *priv = netdev_priv(dev);
2964
2965 if (priv->extend_desc) {
2966 seq_printf(seq, "Extended RX descriptor ring:\n");
2967 sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
2968 seq_printf(seq, "Extended TX descriptor ring:\n");
2969 sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
2970 } else {
2971 seq_printf(seq, "RX descriptor ring:\n");
2972 sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
2973 seq_printf(seq, "TX descriptor ring:\n");
2974 sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
2975 }
2976
2977 return 0;
2978 }
2979
2980 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2981 {
2982 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2983 }
2984
2985 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
2986
2987 static const struct file_operations stmmac_rings_status_fops = {
2988 .owner = THIS_MODULE,
2989 .open = stmmac_sysfs_ring_open,
2990 .read = seq_read,
2991 .llseek = seq_lseek,
2992 .release = single_release,
2993 };
2994
2995 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2996 {
2997 struct net_device *dev = seq->private;
2998 struct stmmac_priv *priv = netdev_priv(dev);
2999
3000 if (!priv->hw_cap_support) {
3001 seq_printf(seq, "DMA HW features not supported\n");
3002 return 0;
3003 }
3004
3005 seq_printf(seq, "==============================\n");
3006 seq_printf(seq, "\tDMA HW features\n");
3007 seq_printf(seq, "==============================\n");
3008
3009 seq_printf(seq, "\t10/100 Mbps: %s\n",
3010 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3011 seq_printf(seq, "\t1000 Mbps: %s\n",
3012 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3013 seq_printf(seq, "\tHalf duplex: %s\n",
3014 (priv->dma_cap.half_duplex) ? "Y" : "N");
3015 seq_printf(seq, "\tHash Filter: %s\n",
3016 (priv->dma_cap.hash_filter) ? "Y" : "N");
3017 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3018 (priv->dma_cap.multi_addr) ? "Y" : "N");
3019 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3020 (priv->dma_cap.pcs) ? "Y" : "N");
3021 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3022 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3023 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3024 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3025 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3026 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3027 seq_printf(seq, "\tRMON module: %s\n",
3028 (priv->dma_cap.rmon) ? "Y" : "N");
3029 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3030 (priv->dma_cap.time_stamp) ? "Y" : "N");
3031 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3032 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3033 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3034 (priv->dma_cap.eee) ? "Y" : "N");
3035 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3036 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3037 (priv->dma_cap.tx_coe) ? "Y" : "N");
3038 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3039 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3040 (priv->dma_cap.rx_coe) ? "Y" : "N");
3041 } else {
3042 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3043 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3044 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3045 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3046 }
3047 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3048 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3049 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3050 priv->dma_cap.number_rx_channel);
3051 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3052 priv->dma_cap.number_tx_channel);
3053 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3054 (priv->dma_cap.enh_desc) ? "Y" : "N");
3055
3056 return 0;
3057 }
3058
3059 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3060 {
3061 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3062 }
3063
3064 static const struct file_operations stmmac_dma_cap_fops = {
3065 .owner = THIS_MODULE,
3066 .open = stmmac_sysfs_dma_cap_open,
3067 .read = seq_read,
3068 .llseek = seq_lseek,
3069 .release = single_release,
3070 };
3071
3072 static int stmmac_init_fs(struct net_device *dev)
3073 {
3074 struct stmmac_priv *priv = netdev_priv(dev);
3075
3076 /* Create per netdev entries */
3077 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3078
3079 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3080 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3081
3082 return -ENOMEM;
3083 }
3084
3085 /* Entry to report DMA RX/TX rings */
3086 priv->dbgfs_rings_status =
3087 debugfs_create_file("descriptors_status", S_IRUGO,
3088 priv->dbgfs_dir, dev,
3089 &stmmac_rings_status_fops);
3090
3091 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3092 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3093 debugfs_remove_recursive(priv->dbgfs_dir);
3094
3095 return -ENOMEM;
3096 }
3097
3098 /* Entry to report the DMA HW features */
3099 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3100 priv->dbgfs_dir,
3101 dev, &stmmac_dma_cap_fops);
3102
3103 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3104 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3105 debugfs_remove_recursive(priv->dbgfs_dir);
3106
3107 return -ENOMEM;
3108 }
3109
3110 return 0;
3111 }
3112
3113 static void stmmac_exit_fs(struct net_device *dev)
3114 {
3115 struct stmmac_priv *priv = netdev_priv(dev);
3116
3117 debugfs_remove_recursive(priv->dbgfs_dir);
3118 }
3119 #endif /* CONFIG_DEBUG_FS */
3120
3121 static const struct net_device_ops stmmac_netdev_ops = {
3122 .ndo_open = stmmac_open,
3123 .ndo_start_xmit = stmmac_xmit,
3124 .ndo_stop = stmmac_release,
3125 .ndo_change_mtu = stmmac_change_mtu,
3126 .ndo_fix_features = stmmac_fix_features,
3127 .ndo_set_features = stmmac_set_features,
3128 .ndo_set_rx_mode = stmmac_set_rx_mode,
3129 .ndo_tx_timeout = stmmac_tx_timeout,
3130 .ndo_do_ioctl = stmmac_ioctl,
3131 #ifdef CONFIG_NET_POLL_CONTROLLER
3132 .ndo_poll_controller = stmmac_poll_controller,
3133 #endif
3134 .ndo_set_mac_address = eth_mac_addr,
3135 };
3136
3137 /**
3138 * stmmac_hw_init - Init the MAC device
3139 * @priv: driver private structure
3140 * Description: this function is to configure the MAC device according to
3141 * some platform parameters or the HW capability register. It prepares the
3142 * driver to use either ring or chain modes and to setup either enhanced or
3143 * normal descriptors.
3144 */
3145 static int stmmac_hw_init(struct stmmac_priv *priv)
3146 {
3147 struct mac_device_info *mac;
3148
3149 /* Identify the MAC HW device */
3150 if (priv->plat->has_gmac) {
3151 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3152 mac = dwmac1000_setup(priv->ioaddr,
3153 priv->plat->multicast_filter_bins,
3154 priv->plat->unicast_filter_entries,
3155 &priv->synopsys_id);
3156 } else if (priv->plat->has_gmac4) {
3157 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3158 mac = dwmac4_setup(priv->ioaddr,
3159 priv->plat->multicast_filter_bins,
3160 priv->plat->unicast_filter_entries,
3161 &priv->synopsys_id);
3162 } else {
3163 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
3164 }
3165 if (!mac)
3166 return -ENOMEM;
3167
3168 priv->hw = mac;
3169
3170 /* To use the chained or ring mode */
3171 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3172 priv->hw->mode = &dwmac4_ring_mode_ops;
3173 } else {
3174 if (chain_mode) {
3175 priv->hw->mode = &chain_mode_ops;
3176 dev_info(priv->device, "Chain mode enabled\n");
3177 priv->mode = STMMAC_CHAIN_MODE;
3178 } else {
3179 priv->hw->mode = &ring_mode_ops;
3180 dev_info(priv->device, "Ring mode enabled\n");
3181 priv->mode = STMMAC_RING_MODE;
3182 }
3183 }
3184
3185 /* Get the HW capability (new GMAC newer than 3.50a) */
3186 priv->hw_cap_support = stmmac_get_hw_features(priv);
3187 if (priv->hw_cap_support) {
3188 dev_info(priv->device, "DMA HW capability register supported\n");
3189
3190 /* We can override some gmac/dma configuration fields: e.g.
3191 * enh_desc, tx_coe (e.g. that are passed through the
3192 * platform) with the values from the HW capability
3193 * register (if supported).
3194 */
3195 priv->plat->enh_desc = priv->dma_cap.enh_desc;
3196 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3197 priv->hw->pmt = priv->plat->pmt;
3198
3199 /* TXCOE doesn't work in thresh DMA mode */
3200 if (priv->plat->force_thresh_dma_mode)
3201 priv->plat->tx_coe = 0;
3202 else
3203 priv->plat->tx_coe = priv->dma_cap.tx_coe;
3204
3205 /* In case of GMAC4 rx_coe is from HW cap register. */
3206 priv->plat->rx_coe = priv->dma_cap.rx_coe;
3207
3208 if (priv->dma_cap.rx_coe_type2)
3209 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
3210 else if (priv->dma_cap.rx_coe_type1)
3211 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3212
3213 } else {
3214 dev_info(priv->device, "No HW DMA feature register supported\n");
3215 }
3216
3217 /* To use alternate (extended), normal or GMAC4 descriptor structures */
3218 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3219 priv->hw->desc = &dwmac4_desc_ops;
3220 else
3221 stmmac_selec_desc_mode(priv);
3222
3223 if (priv->plat->rx_coe) {
3224 priv->hw->rx_csum = priv->plat->rx_coe;
3225 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3226 if (priv->synopsys_id < DWMAC_CORE_4_00)
3227 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3228 }
3229 if (priv->plat->tx_coe)
3230 dev_info(priv->device, "TX Checksum insertion supported\n");
3231
3232 if (priv->plat->pmt) {
3233 dev_info(priv->device, "Wake-Up On Lan supported\n");
3234 device_set_wakeup_capable(priv->device, 1);
3235 }
3236
3237 if (priv->dma_cap.tsoen)
3238 dev_info(priv->device, "TSO supported\n");
3239
3240 return 0;
3241 }
3242
3243 /**
3244 * stmmac_dvr_probe
3245 * @device: device pointer
3246 * @plat_dat: platform data pointer
3247 * @res: stmmac resource pointer
3248 * Description: this is the main probe function used to
3249 * call the alloc_etherdev, allocate the priv structure.
3250 * Return:
3251 * returns 0 on success, otherwise errno.
3252 */
3253 int stmmac_dvr_probe(struct device *device,
3254 struct plat_stmmacenet_data *plat_dat,
3255 struct stmmac_resources *res)
3256 {
3257 int ret = 0;
3258 struct net_device *ndev = NULL;
3259 struct stmmac_priv *priv;
3260
3261 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
3262 if (!ndev)
3263 return -ENOMEM;
3264
3265 SET_NETDEV_DEV(ndev, device);
3266
3267 priv = netdev_priv(ndev);
3268 priv->device = device;
3269 priv->dev = ndev;
3270
3271 stmmac_set_ethtool_ops(ndev);
3272 priv->pause = pause;
3273 priv->plat = plat_dat;
3274 priv->ioaddr = res->addr;
3275 priv->dev->base_addr = (unsigned long)res->addr;
3276
3277 priv->dev->irq = res->irq;
3278 priv->wol_irq = res->wol_irq;
3279 priv->lpi_irq = res->lpi_irq;
3280
3281 if (res->mac)
3282 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
3283
3284 dev_set_drvdata(device, priv->dev);
3285
3286 /* Verify driver arguments */
3287 stmmac_verify_args();
3288
3289 /* Override with kernel parameters if supplied XXX CRS XXX
3290 * this needs to have multiple instances
3291 */
3292 if ((phyaddr >= 0) && (phyaddr <= 31))
3293 priv->plat->phy_addr = phyaddr;
3294
3295 if (priv->plat->stmmac_rst)
3296 reset_control_deassert(priv->plat->stmmac_rst);
3297
3298 /* Init MAC and get the capabilities */
3299 ret = stmmac_hw_init(priv);
3300 if (ret)
3301 goto error_hw_init;
3302
3303 ndev->netdev_ops = &stmmac_netdev_ops;
3304
3305 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3306 NETIF_F_RXCSUM;
3307
3308 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3309 ndev->hw_features |= NETIF_F_TSO;
3310 priv->tso = true;
3311 dev_info(priv->device, "TSO feature enabled\n");
3312 }
3313 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3314 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3315 #ifdef STMMAC_VLAN_TAG_USED
3316 /* Both mac100 and gmac support receive VLAN tag detection */
3317 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3318 #endif
3319 priv->msg_enable = netif_msg_init(debug, default_msg_level);
3320
3321 /* MTU range: 46 - hw-specific max */
3322 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
3323 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
3324 ndev->max_mtu = JUMBO_LEN;
3325 else
3326 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
3327 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
3328 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
3329 */
3330 if ((priv->plat->maxmtu < ndev->max_mtu) &&
3331 (priv->plat->maxmtu >= ndev->min_mtu))
3332 ndev->max_mtu = priv->plat->maxmtu;
3333 else if (priv->plat->maxmtu < ndev->min_mtu)
3334 dev_warn(priv->device,
3335 "%s: warning: maxmtu having invalid value (%d)\n",
3336 __func__, priv->plat->maxmtu);
3337
3338 if (flow_ctrl)
3339 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
3340
3341 /* Rx Watchdog is available in the COREs newer than the 3.40.
3342 * In some case, for example on bugged HW this feature
3343 * has to be disable and this can be done by passing the
3344 * riwt_off field from the platform.
3345 */
3346 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3347 priv->use_riwt = 1;
3348 dev_info(priv->device,
3349 "Enable RX Mitigation via HW Watchdog Timer\n");
3350 }
3351
3352 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3353
3354 spin_lock_init(&priv->lock);
3355
3356 /* If a specific clk_csr value is passed from the platform
3357 * this means that the CSR Clock Range selection cannot be
3358 * changed at run-time and it is fixed. Viceversa the driver'll try to
3359 * set the MDC clock dynamically according to the csr actual
3360 * clock input.
3361 */
3362 if (!priv->plat->clk_csr)
3363 stmmac_clk_csr_set(priv);
3364 else
3365 priv->clk_csr = priv->plat->clk_csr;
3366
3367 stmmac_check_pcs_mode(priv);
3368
3369 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3370 priv->hw->pcs != STMMAC_PCS_TBI &&
3371 priv->hw->pcs != STMMAC_PCS_RTBI) {
3372 /* MDIO bus Registration */
3373 ret = stmmac_mdio_register(ndev);
3374 if (ret < 0) {
3375 dev_err(priv->device,
3376 "%s: MDIO bus (id: %d) registration failed",
3377 __func__, priv->plat->bus_id);
3378 goto error_mdio_register;
3379 }
3380 }
3381
3382 ret = register_netdev(ndev);
3383 if (ret) {
3384 dev_err(priv->device, "%s: ERROR %i registering the device\n",
3385 __func__, ret);
3386 goto error_netdev_register;
3387 }
3388
3389 return ret;
3390
3391 error_netdev_register:
3392 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3393 priv->hw->pcs != STMMAC_PCS_TBI &&
3394 priv->hw->pcs != STMMAC_PCS_RTBI)
3395 stmmac_mdio_unregister(ndev);
3396 error_mdio_register:
3397 netif_napi_del(&priv->napi);
3398 error_hw_init:
3399 free_netdev(ndev);
3400
3401 return ret;
3402 }
3403 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
3404
3405 /**
3406 * stmmac_dvr_remove
3407 * @dev: device pointer
3408 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
3409 * changes the link status, releases the DMA descriptor rings.
3410 */
3411 int stmmac_dvr_remove(struct device *dev)
3412 {
3413 struct net_device *ndev = dev_get_drvdata(dev);
3414 struct stmmac_priv *priv = netdev_priv(ndev);
3415
3416 netdev_info(priv->dev, "%s: removing driver", __func__);
3417
3418 priv->hw->dma->stop_rx(priv->ioaddr);
3419 priv->hw->dma->stop_tx(priv->ioaddr);
3420
3421 stmmac_set_mac(priv->ioaddr, false);
3422 netif_carrier_off(ndev);
3423 unregister_netdev(ndev);
3424 if (priv->plat->stmmac_rst)
3425 reset_control_assert(priv->plat->stmmac_rst);
3426 clk_disable_unprepare(priv->plat->pclk);
3427 clk_disable_unprepare(priv->plat->stmmac_clk);
3428 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
3429 priv->hw->pcs != STMMAC_PCS_TBI &&
3430 priv->hw->pcs != STMMAC_PCS_RTBI)
3431 stmmac_mdio_unregister(ndev);
3432 free_netdev(ndev);
3433
3434 return 0;
3435 }
3436 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
3437
3438 /**
3439 * stmmac_suspend - suspend callback
3440 * @dev: device pointer
3441 * Description: this is the function to suspend the device and it is called
3442 * by the platform driver to stop the network queue, release the resources,
3443 * program the PMT register (for WoL), clean and release driver resources.
3444 */
3445 int stmmac_suspend(struct device *dev)
3446 {
3447 struct net_device *ndev = dev_get_drvdata(dev);
3448 struct stmmac_priv *priv = netdev_priv(ndev);
3449 unsigned long flags;
3450
3451 if (!ndev || !netif_running(ndev))
3452 return 0;
3453
3454 if (ndev->phydev)
3455 phy_stop(ndev->phydev);
3456
3457 spin_lock_irqsave(&priv->lock, flags);
3458
3459 netif_device_detach(ndev);
3460 netif_stop_queue(ndev);
3461
3462 napi_disable(&priv->napi);
3463
3464 /* Stop TX/RX DMA */
3465 priv->hw->dma->stop_tx(priv->ioaddr);
3466 priv->hw->dma->stop_rx(priv->ioaddr);
3467
3468 /* Enable Power down mode by programming the PMT regs */
3469 if (device_may_wakeup(priv->device)) {
3470 priv->hw->mac->pmt(priv->hw, priv->wolopts);
3471 priv->irq_wake = 1;
3472 } else {
3473 stmmac_set_mac(priv->ioaddr, false);
3474 pinctrl_pm_select_sleep_state(priv->device);
3475 /* Disable clock in case of PWM is off */
3476 clk_disable(priv->plat->pclk);
3477 clk_disable(priv->plat->stmmac_clk);
3478 }
3479 spin_unlock_irqrestore(&priv->lock, flags);
3480
3481 priv->oldlink = 0;
3482 priv->speed = SPEED_UNKNOWN;
3483 priv->oldduplex = DUPLEX_UNKNOWN;
3484 return 0;
3485 }
3486 EXPORT_SYMBOL_GPL(stmmac_suspend);
3487
3488 /**
3489 * stmmac_resume - resume callback
3490 * @dev: device pointer
3491 * Description: when resume this function is invoked to setup the DMA and CORE
3492 * in a usable state.
3493 */
3494 int stmmac_resume(struct device *dev)
3495 {
3496 struct net_device *ndev = dev_get_drvdata(dev);
3497 struct stmmac_priv *priv = netdev_priv(ndev);
3498 unsigned long flags;
3499
3500 if (!netif_running(ndev))
3501 return 0;
3502
3503 /* Power Down bit, into the PM register, is cleared
3504 * automatically as soon as a magic packet or a Wake-up frame
3505 * is received. Anyway, it's better to manually clear
3506 * this bit because it can generate problems while resuming
3507 * from another devices (e.g. serial console).
3508 */
3509 if (device_may_wakeup(priv->device)) {
3510 spin_lock_irqsave(&priv->lock, flags);
3511 priv->hw->mac->pmt(priv->hw, 0);
3512 spin_unlock_irqrestore(&priv->lock, flags);
3513 priv->irq_wake = 0;
3514 } else {
3515 pinctrl_pm_select_default_state(priv->device);
3516 /* enable the clk previously disabled */
3517 clk_enable(priv->plat->stmmac_clk);
3518 clk_enable(priv->plat->pclk);
3519 /* reset the phy so that it's ready */
3520 if (priv->mii)
3521 stmmac_mdio_reset(priv->mii);
3522 }
3523
3524 netif_device_attach(ndev);
3525
3526 spin_lock_irqsave(&priv->lock, flags);
3527
3528 priv->cur_rx = 0;
3529 priv->dirty_rx = 0;
3530 priv->dirty_tx = 0;
3531 priv->cur_tx = 0;
3532 /* reset private mss value to force mss context settings at
3533 * next tso xmit (only used for gmac4).
3534 */
3535 priv->mss = 0;
3536
3537 stmmac_clear_descriptors(priv);
3538
3539 stmmac_hw_setup(ndev, false);
3540 stmmac_init_tx_coalesce(priv);
3541 stmmac_set_rx_mode(ndev);
3542
3543 napi_enable(&priv->napi);
3544
3545 netif_start_queue(ndev);
3546
3547 spin_unlock_irqrestore(&priv->lock, flags);
3548
3549 if (ndev->phydev)
3550 phy_start(ndev->phydev);
3551
3552 return 0;
3553 }
3554 EXPORT_SYMBOL_GPL(stmmac_resume);
3555
3556 #ifndef MODULE
3557 static int __init stmmac_cmdline_opt(char *str)
3558 {
3559 char *opt;
3560
3561 if (!str || !*str)
3562 return -EINVAL;
3563 while ((opt = strsep(&str, ",")) != NULL) {
3564 if (!strncmp(opt, "debug:", 6)) {
3565 if (kstrtoint(opt + 6, 0, &debug))
3566 goto err;
3567 } else if (!strncmp(opt, "phyaddr:", 8)) {
3568 if (kstrtoint(opt + 8, 0, &phyaddr))
3569 goto err;
3570 } else if (!strncmp(opt, "buf_sz:", 7)) {
3571 if (kstrtoint(opt + 7, 0, &buf_sz))
3572 goto err;
3573 } else if (!strncmp(opt, "tc:", 3)) {
3574 if (kstrtoint(opt + 3, 0, &tc))
3575 goto err;
3576 } else if (!strncmp(opt, "watchdog:", 9)) {
3577 if (kstrtoint(opt + 9, 0, &watchdog))
3578 goto err;
3579 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3580 if (kstrtoint(opt + 10, 0, &flow_ctrl))
3581 goto err;
3582 } else if (!strncmp(opt, "pause:", 6)) {
3583 if (kstrtoint(opt + 6, 0, &pause))
3584 goto err;
3585 } else if (!strncmp(opt, "eee_timer:", 10)) {
3586 if (kstrtoint(opt + 10, 0, &eee_timer))
3587 goto err;
3588 } else if (!strncmp(opt, "chain_mode:", 11)) {
3589 if (kstrtoint(opt + 11, 0, &chain_mode))
3590 goto err;
3591 }
3592 }
3593 return 0;
3594
3595 err:
3596 pr_err("%s: ERROR broken module parameter conversion", __func__);
3597 return -EINVAL;
3598 }
3599
3600 __setup("stmmaceth=", stmmac_cmdline_opt);
3601 #endif /* MODULE */
3602
3603 static int __init stmmac_init(void)
3604 {
3605 #ifdef CONFIG_DEBUG_FS
3606 /* Create debugfs main directory if it doesn't exist yet */
3607 if (!stmmac_fs_dir) {
3608 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
3609
3610 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
3611 pr_err("ERROR %s, debugfs create directory failed\n",
3612 STMMAC_RESOURCE_NAME);
3613
3614 return -ENOMEM;
3615 }
3616 }
3617 #endif
3618
3619 return 0;
3620 }
3621
3622 static void __exit stmmac_exit(void)
3623 {
3624 #ifdef CONFIG_DEBUG_FS
3625 debugfs_remove_recursive(stmmac_fs_dir);
3626 #endif
3627 }
3628
3629 module_init(stmmac_init)
3630 module_exit(stmmac_exit)
3631
3632 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3633 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3634 MODULE_LICENSE("GPL");