]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
ff4fb5eae1af3f0c7f3dcb61492a530a12f19bce
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
5 Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25 *******************************************************************************/
26
27 #include <linux/clk.h>
28 #include <linux/kernel.h>
29 #include <linux/interrupt.h>
30 #include <linux/ip.h>
31 #include <linux/tcp.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/if_ether.h>
35 #include <linux/crc32.h>
36 #include <linux/mii.h>
37 #include <linux/if.h>
38 #include <linux/if_vlan.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <linux/prefetch.h>
42 #include <linux/pinctrl/consumer.h>
43 #ifdef CONFIG_DEBUG_FS
44 #include <linux/debugfs.h>
45 #include <linux/seq_file.h>
46 #endif /* CONFIG_DEBUG_FS */
47 #include <linux/net_tstamp.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac.h"
50 #include <linux/reset.h>
51 #include <linux/of_mdio.h>
52 #include "dwmac1000.h"
53
54 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
55 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
56
57 /* Module parameters */
58 #define TX_TIMEO 5000
59 static int watchdog = TX_TIMEO;
60 module_param(watchdog, int, S_IRUGO | S_IWUSR);
61 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
62
63 static int debug = -1;
64 module_param(debug, int, S_IRUGO | S_IWUSR);
65 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
66
67 static int phyaddr = -1;
68 module_param(phyaddr, int, S_IRUGO);
69 MODULE_PARM_DESC(phyaddr, "Physical device address");
70
71 #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
72 #define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
73
74 static int flow_ctrl = FLOW_OFF;
75 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78 static int pause = PAUSE_TIME;
79 module_param(pause, int, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82 #define TC_DEFAULT 64
83 static int tc = TC_DEFAULT;
84 module_param(tc, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(tc, "DMA threshold control value");
86
87 #define DEFAULT_BUFSIZE 1536
88 static int buf_sz = DEFAULT_BUFSIZE;
89 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
92 #define STMMAC_RX_COPYBREAK 256
93
94 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
98 #define STMMAC_DEFAULT_LPI_TIMER 1000
99 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
102 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
103
104 /* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
106 */
107 static unsigned int chain_mode;
108 module_param(chain_mode, int, S_IRUGO);
109 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
111 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
112
113 #ifdef CONFIG_DEBUG_FS
114 static int stmmac_init_fs(struct net_device *dev);
115 static void stmmac_exit_fs(struct net_device *dev);
116 #endif
117
118 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
120 /**
121 * stmmac_verify_args - verify the driver parameters.
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
124 */
125 static void stmmac_verify_args(void)
126 {
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
139 }
140
141 /**
142 * stmmac_disable_all_queues - Disable all queues
143 * @priv: driver private structure
144 */
145 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146 {
147 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 u32 queue;
149
150 for (queue = 0; queue < rx_queues_cnt; queue++) {
151 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153 napi_disable(&rx_q->napi);
154 }
155 }
156
157 /**
158 * stmmac_enable_all_queues - Enable all queues
159 * @priv: driver private structure
160 */
161 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162 {
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 u32 queue;
165
166 for (queue = 0; queue < rx_queues_cnt; queue++) {
167 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169 napi_enable(&rx_q->napi);
170 }
171 }
172
173 /**
174 * stmmac_stop_all_queues - Stop all queues
175 * @priv: driver private structure
176 */
177 static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178 {
179 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 u32 queue;
181
182 for (queue = 0; queue < tx_queues_cnt; queue++)
183 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184 }
185
186 /**
187 * stmmac_start_all_queues - Start all queues
188 * @priv: driver private structure
189 */
190 static void stmmac_start_all_queues(struct stmmac_priv *priv)
191 {
192 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 u32 queue;
194
195 for (queue = 0; queue < tx_queues_cnt; queue++)
196 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197 }
198
199 /**
200 * stmmac_clk_csr_set - dynamically set the MDC clock
201 * @priv: driver private structure
202 * Description: this is to dynamically set the MDC clock according to the csr
203 * clock input.
204 * Note:
205 * If a specific clk_csr value is passed from the platform
206 * this means that the CSR Clock Range selection cannot be
207 * changed at run-time and it is fixed (as reported in the driver
208 * documentation). Viceversa the driver will try to set the MDC
209 * clock dynamically according to the actual clock input.
210 */
211 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212 {
213 u32 clk_rate;
214
215 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
216
217 /* Platform provided default clk_csr would be assumed valid
218 * for all other cases except for the below mentioned ones.
219 * For values higher than the IEEE 802.3 specified frequency
220 * we can not estimate the proper divider as it is not known
221 * the frequency of clk_csr_i. So we do not change the default
222 * divider.
223 */
224 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 if (clk_rate < CSR_F_35M)
226 priv->clk_csr = STMMAC_CSR_20_35M;
227 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 priv->clk_csr = STMMAC_CSR_35_60M;
229 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 priv->clk_csr = STMMAC_CSR_60_100M;
231 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 priv->clk_csr = STMMAC_CSR_100_150M;
233 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 priv->clk_csr = STMMAC_CSR_150_250M;
235 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
236 priv->clk_csr = STMMAC_CSR_250_300M;
237 }
238
239 if (priv->plat->has_sun8i) {
240 if (clk_rate > 160000000)
241 priv->clk_csr = 0x03;
242 else if (clk_rate > 80000000)
243 priv->clk_csr = 0x02;
244 else if (clk_rate > 40000000)
245 priv->clk_csr = 0x01;
246 else
247 priv->clk_csr = 0;
248 }
249 }
250
251 static void print_pkt(unsigned char *buf, int len)
252 {
253 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
255 }
256
257 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
258 {
259 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
260 u32 avail;
261
262 if (tx_q->dirty_tx > tx_q->cur_tx)
263 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
264 else
265 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
266
267 return avail;
268 }
269
270 /**
271 * stmmac_rx_dirty - Get RX queue dirty
272 * @priv: driver private structure
273 * @queue: RX queue index
274 */
275 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
276 {
277 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
278 u32 dirty;
279
280 if (rx_q->dirty_rx <= rx_q->cur_rx)
281 dirty = rx_q->cur_rx - rx_q->dirty_rx;
282 else
283 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
284
285 return dirty;
286 }
287
288 /**
289 * stmmac_hw_fix_mac_speed - callback for speed selection
290 * @priv: driver private structure
291 * Description: on some platforms (e.g. ST), some HW system configuration
292 * registers have to be set according to the link speed negotiated.
293 */
294 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295 {
296 struct net_device *ndev = priv->dev;
297 struct phy_device *phydev = ndev->phydev;
298
299 if (likely(priv->plat->fix_mac_speed))
300 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
301 }
302
303 /**
304 * stmmac_enable_eee_mode - check and enter in LPI mode
305 * @priv: driver private structure
306 * Description: this function is to verify and enter in LPI mode in case of
307 * EEE.
308 */
309 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310 {
311 u32 tx_cnt = priv->plat->tx_queues_to_use;
312 u32 queue;
313
314 /* check if all TX queues have the work finished */
315 for (queue = 0; queue < tx_cnt; queue++) {
316 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318 if (tx_q->dirty_tx != tx_q->cur_tx)
319 return; /* still unfinished work */
320 }
321
322 /* Check and enter in LPI mode */
323 if (!priv->tx_path_in_lpi_mode)
324 priv->hw->mac->set_eee_mode(priv->hw,
325 priv->plat->en_tx_lpi_clockgating);
326 }
327
328 /**
329 * stmmac_disable_eee_mode - disable and exit from LPI mode
330 * @priv: driver private structure
331 * Description: this function is to exit and disable EEE in case of
332 * LPI state is true. This is called by the xmit.
333 */
334 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335 {
336 priv->hw->mac->reset_eee_mode(priv->hw);
337 del_timer_sync(&priv->eee_ctrl_timer);
338 priv->tx_path_in_lpi_mode = false;
339 }
340
341 /**
342 * stmmac_eee_ctrl_timer - EEE TX SW timer.
343 * @arg : data hook
344 * Description:
345 * if there is no data transfer and if we are not in LPI state,
346 * then MAC Transmitter can be moved to LPI state.
347 */
348 static void stmmac_eee_ctrl_timer(unsigned long arg)
349 {
350 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352 stmmac_enable_eee_mode(priv);
353 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
354 }
355
356 /**
357 * stmmac_eee_init - init EEE
358 * @priv: driver private structure
359 * Description:
360 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
361 * can also manage EEE, this function enable the LPI state and start related
362 * timer.
363 */
364 bool stmmac_eee_init(struct stmmac_priv *priv)
365 {
366 struct net_device *ndev = priv->dev;
367 unsigned long flags;
368 bool ret = false;
369
370 /* Using PCS we cannot dial with the phy registers at this stage
371 * so we do not support extra feature like EEE.
372 */
373 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374 (priv->hw->pcs == STMMAC_PCS_TBI) ||
375 (priv->hw->pcs == STMMAC_PCS_RTBI))
376 goto out;
377
378 /* MAC core supports the EEE feature. */
379 if (priv->dma_cap.eee) {
380 int tx_lpi_timer = priv->tx_lpi_timer;
381
382 /* Check if the PHY supports EEE */
383 if (phy_init_eee(ndev->phydev, 1)) {
384 /* To manage at run-time if the EEE cannot be supported
385 * anymore (for example because the lp caps have been
386 * changed).
387 * In that case the driver disable own timers.
388 */
389 spin_lock_irqsave(&priv->lock, flags);
390 if (priv->eee_active) {
391 netdev_dbg(priv->dev, "disable EEE\n");
392 del_timer_sync(&priv->eee_ctrl_timer);
393 priv->hw->mac->set_eee_timer(priv->hw, 0,
394 tx_lpi_timer);
395 }
396 priv->eee_active = 0;
397 spin_unlock_irqrestore(&priv->lock, flags);
398 goto out;
399 }
400 /* Activate the EEE and start timers */
401 spin_lock_irqsave(&priv->lock, flags);
402 if (!priv->eee_active) {
403 priv->eee_active = 1;
404 setup_timer(&priv->eee_ctrl_timer,
405 stmmac_eee_ctrl_timer,
406 (unsigned long)priv);
407 mod_timer(&priv->eee_ctrl_timer,
408 STMMAC_LPI_T(eee_timer));
409
410 priv->hw->mac->set_eee_timer(priv->hw,
411 STMMAC_DEFAULT_LIT_LS,
412 tx_lpi_timer);
413 }
414 /* Set HW EEE according to the speed */
415 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
416
417 ret = true;
418 spin_unlock_irqrestore(&priv->lock, flags);
419
420 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
421 }
422 out:
423 return ret;
424 }
425
426 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
427 * @priv: driver private structure
428 * @p : descriptor pointer
429 * @skb : the socket buffer
430 * Description :
431 * This function will read timestamp from the descriptor & pass it to stack.
432 * and also perform some sanity checks.
433 */
434 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
435 struct dma_desc *p, struct sk_buff *skb)
436 {
437 struct skb_shared_hwtstamps shhwtstamp;
438 u64 ns;
439
440 if (!priv->hwts_tx_en)
441 return;
442
443 /* exit if skb doesn't support hw tstamp */
444 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
445 return;
446
447 /* check tx tstamp status */
448 if (priv->hw->desc->get_tx_timestamp_status(p)) {
449 /* get the valid tstamp */
450 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
451
452 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453 shhwtstamp.hwtstamp = ns_to_ktime(ns);
454
455 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
456 /* pass tstamp to stack */
457 skb_tstamp_tx(skb, &shhwtstamp);
458 }
459
460 return;
461 }
462
463 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
464 * @priv: driver private structure
465 * @p : descriptor pointer
466 * @np : next descriptor pointer
467 * @skb : the socket buffer
468 * Description :
469 * This function will read received packet's timestamp from the descriptor
470 * and pass it to stack. It also perform some sanity checks.
471 */
472 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 struct dma_desc *np, struct sk_buff *skb)
474 {
475 struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 struct dma_desc *desc = p;
477 u64 ns;
478
479 if (!priv->hwts_rx_en)
480 return;
481 /* For GMAC4, the valid timestamp is from CTX next desc. */
482 if (priv->plat->has_gmac4)
483 desc = np;
484
485 /* Check if timestamp is available */
486 if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) {
487 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
488 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
489 shhwtstamp = skb_hwtstamps(skb);
490 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
491 shhwtstamp->hwtstamp = ns_to_ktime(ns);
492 } else {
493 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
494 }
495 }
496
497 /**
498 * stmmac_hwtstamp_ioctl - control hardware timestamping.
499 * @dev: device pointer.
500 * @ifr: An IOCTL specific structure, that can contain a pointer to
501 * a proprietary structure used to pass information to the driver.
502 * Description:
503 * This function configures the MAC to enable/disable both outgoing(TX)
504 * and incoming(RX) packets time stamping based on user input.
505 * Return Value:
506 * 0 on success and an appropriate -ve integer on failure.
507 */
508 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
509 {
510 struct stmmac_priv *priv = netdev_priv(dev);
511 struct hwtstamp_config config;
512 struct timespec64 now;
513 u64 temp = 0;
514 u32 ptp_v2 = 0;
515 u32 tstamp_all = 0;
516 u32 ptp_over_ipv4_udp = 0;
517 u32 ptp_over_ipv6_udp = 0;
518 u32 ptp_over_ethernet = 0;
519 u32 snap_type_sel = 0;
520 u32 ts_master_en = 0;
521 u32 ts_event_en = 0;
522 u32 value = 0;
523 u32 sec_inc;
524
525 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
526 netdev_alert(priv->dev, "No support for HW time stamping\n");
527 priv->hwts_tx_en = 0;
528 priv->hwts_rx_en = 0;
529
530 return -EOPNOTSUPP;
531 }
532
533 if (copy_from_user(&config, ifr->ifr_data,
534 sizeof(struct hwtstamp_config)))
535 return -EFAULT;
536
537 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
538 __func__, config.flags, config.tx_type, config.rx_filter);
539
540 /* reserved for future extensions */
541 if (config.flags)
542 return -EINVAL;
543
544 if (config.tx_type != HWTSTAMP_TX_OFF &&
545 config.tx_type != HWTSTAMP_TX_ON)
546 return -ERANGE;
547
548 if (priv->adv_ts) {
549 switch (config.rx_filter) {
550 case HWTSTAMP_FILTER_NONE:
551 /* time stamp no incoming packet at all */
552 config.rx_filter = HWTSTAMP_FILTER_NONE;
553 break;
554
555 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
556 /* PTP v1, UDP, any kind of event packet */
557 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
558 /* take time stamp for all event messages */
559 if (priv->plat->has_gmac4)
560 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
561 else
562 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
563
564 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
565 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
566 break;
567
568 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
569 /* PTP v1, UDP, Sync packet */
570 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
571 /* take time stamp for SYNC messages only */
572 ts_event_en = PTP_TCR_TSEVNTENA;
573
574 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
575 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
576 break;
577
578 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
579 /* PTP v1, UDP, Delay_req packet */
580 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
581 /* take time stamp for Delay_Req messages only */
582 ts_master_en = PTP_TCR_TSMSTRENA;
583 ts_event_en = PTP_TCR_TSEVNTENA;
584
585 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
586 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
587 break;
588
589 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
590 /* PTP v2, UDP, any kind of event packet */
591 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
592 ptp_v2 = PTP_TCR_TSVER2ENA;
593 /* take time stamp for all event messages */
594 if (priv->plat->has_gmac4)
595 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
596 else
597 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
598
599 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
600 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
601 break;
602
603 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
604 /* PTP v2, UDP, Sync packet */
605 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
606 ptp_v2 = PTP_TCR_TSVER2ENA;
607 /* take time stamp for SYNC messages only */
608 ts_event_en = PTP_TCR_TSEVNTENA;
609
610 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
611 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
612 break;
613
614 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
615 /* PTP v2, UDP, Delay_req packet */
616 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
617 ptp_v2 = PTP_TCR_TSVER2ENA;
618 /* take time stamp for Delay_Req messages only */
619 ts_master_en = PTP_TCR_TSMSTRENA;
620 ts_event_en = PTP_TCR_TSEVNTENA;
621
622 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624 break;
625
626 case HWTSTAMP_FILTER_PTP_V2_EVENT:
627 /* PTP v2/802.AS1 any layer, any kind of event packet */
628 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
629 ptp_v2 = PTP_TCR_TSVER2ENA;
630 /* take time stamp for all event messages */
631 if (priv->plat->has_gmac4)
632 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
633 else
634 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
635
636 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
637 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
638 ptp_over_ethernet = PTP_TCR_TSIPENA;
639 break;
640
641 case HWTSTAMP_FILTER_PTP_V2_SYNC:
642 /* PTP v2/802.AS1, any layer, Sync packet */
643 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
644 ptp_v2 = PTP_TCR_TSVER2ENA;
645 /* take time stamp for SYNC messages only */
646 ts_event_en = PTP_TCR_TSEVNTENA;
647
648 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
649 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
650 ptp_over_ethernet = PTP_TCR_TSIPENA;
651 break;
652
653 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
654 /* PTP v2/802.AS1, any layer, Delay_req packet */
655 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
656 ptp_v2 = PTP_TCR_TSVER2ENA;
657 /* take time stamp for Delay_Req messages only */
658 ts_master_en = PTP_TCR_TSMSTRENA;
659 ts_event_en = PTP_TCR_TSEVNTENA;
660
661 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
662 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
663 ptp_over_ethernet = PTP_TCR_TSIPENA;
664 break;
665
666 case HWTSTAMP_FILTER_NTP_ALL:
667 case HWTSTAMP_FILTER_ALL:
668 /* time stamp any incoming packet */
669 config.rx_filter = HWTSTAMP_FILTER_ALL;
670 tstamp_all = PTP_TCR_TSENALL;
671 break;
672
673 default:
674 return -ERANGE;
675 }
676 } else {
677 switch (config.rx_filter) {
678 case HWTSTAMP_FILTER_NONE:
679 config.rx_filter = HWTSTAMP_FILTER_NONE;
680 break;
681 default:
682 /* PTP v1, UDP, any kind of event packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
684 break;
685 }
686 }
687 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
688 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
689
690 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
691 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
692 else {
693 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
694 tstamp_all | ptp_v2 | ptp_over_ethernet |
695 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
696 ts_master_en | snap_type_sel);
697 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
698
699 /* program Sub Second Increment reg */
700 sec_inc = priv->hw->ptp->config_sub_second_increment(
701 priv->ptpaddr, priv->plat->clk_ptp_rate,
702 priv->plat->has_gmac4);
703 temp = div_u64(1000000000ULL, sec_inc);
704
705 /* calculate default added value:
706 * formula is :
707 * addend = (2^32)/freq_div_ratio;
708 * where, freq_div_ratio = 1e9ns/sec_inc
709 */
710 temp = (u64)(temp << 32);
711 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
712 priv->hw->ptp->config_addend(priv->ptpaddr,
713 priv->default_addend);
714
715 /* initialize system time */
716 ktime_get_real_ts64(&now);
717
718 /* lower 32 bits of tv_sec are safe until y2106 */
719 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
720 now.tv_nsec);
721 }
722
723 return copy_to_user(ifr->ifr_data, &config,
724 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
725 }
726
727 /**
728 * stmmac_init_ptp - init PTP
729 * @priv: driver private structure
730 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
731 * This is done by looking at the HW cap. register.
732 * This function also registers the ptp driver.
733 */
734 static int stmmac_init_ptp(struct stmmac_priv *priv)
735 {
736 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
737 return -EOPNOTSUPP;
738
739 priv->adv_ts = 0;
740 /* Check if adv_ts can be enabled for dwmac 4.x core */
741 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
742 priv->adv_ts = 1;
743 /* Dwmac 3.x core with extend_desc can support adv_ts */
744 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
745 priv->adv_ts = 1;
746
747 if (priv->dma_cap.time_stamp)
748 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
749
750 if (priv->adv_ts)
751 netdev_info(priv->dev,
752 "IEEE 1588-2008 Advanced Timestamp supported\n");
753
754 priv->hw->ptp = &stmmac_ptp;
755 priv->hwts_tx_en = 0;
756 priv->hwts_rx_en = 0;
757
758 stmmac_ptp_register(priv);
759
760 return 0;
761 }
762
763 static void stmmac_release_ptp(struct stmmac_priv *priv)
764 {
765 if (priv->plat->clk_ptp_ref)
766 clk_disable_unprepare(priv->plat->clk_ptp_ref);
767 stmmac_ptp_unregister(priv);
768 }
769
770 /**
771 * stmmac_mac_flow_ctrl - Configure flow control in all queues
772 * @priv: driver private structure
773 * Description: It is used for configuring the flow control in all queues
774 */
775 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
776 {
777 u32 tx_cnt = priv->plat->tx_queues_to_use;
778
779 priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
780 priv->pause, tx_cnt);
781 }
782
783 /**
784 * stmmac_adjust_link - adjusts the link parameters
785 * @dev: net device structure
786 * Description: this is the helper called by the physical abstraction layer
787 * drivers to communicate the phy link status. According the speed and duplex
788 * this driver can invoke registered glue-logic as well.
789 * It also invoke the eee initialization because it could happen when switch
790 * on different networks (that are eee capable).
791 */
792 static void stmmac_adjust_link(struct net_device *dev)
793 {
794 struct stmmac_priv *priv = netdev_priv(dev);
795 struct phy_device *phydev = dev->phydev;
796 unsigned long flags;
797 bool new_state = false;
798
799 if (!phydev)
800 return;
801
802 spin_lock_irqsave(&priv->lock, flags);
803
804 if (phydev->link) {
805 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
806
807 /* Now we make sure that we can be in full duplex mode.
808 * If not, we operate in half-duplex mode. */
809 if (phydev->duplex != priv->oldduplex) {
810 new_state = true;
811 if (!phydev->duplex)
812 ctrl &= ~priv->hw->link.duplex;
813 else
814 ctrl |= priv->hw->link.duplex;
815 priv->oldduplex = phydev->duplex;
816 }
817 /* Flow Control operation */
818 if (phydev->pause)
819 stmmac_mac_flow_ctrl(priv, phydev->duplex);
820
821 if (phydev->speed != priv->speed) {
822 new_state = true;
823 ctrl &= ~priv->hw->link.speed_mask;
824 switch (phydev->speed) {
825 case SPEED_1000:
826 ctrl |= priv->hw->link.speed1000;
827 break;
828 case SPEED_100:
829 ctrl |= priv->hw->link.speed100;
830 break;
831 case SPEED_10:
832 ctrl |= priv->hw->link.speed10;
833 break;
834 default:
835 netif_warn(priv, link, priv->dev,
836 "broken speed: %d\n", phydev->speed);
837 phydev->speed = SPEED_UNKNOWN;
838 break;
839 }
840 if (phydev->speed != SPEED_UNKNOWN)
841 stmmac_hw_fix_mac_speed(priv);
842 priv->speed = phydev->speed;
843 }
844
845 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
846
847 if (!priv->oldlink) {
848 new_state = true;
849 priv->oldlink = true;
850 }
851 } else if (priv->oldlink) {
852 new_state = true;
853 priv->oldlink = false;
854 priv->speed = SPEED_UNKNOWN;
855 priv->oldduplex = DUPLEX_UNKNOWN;
856 }
857
858 if (new_state && netif_msg_link(priv))
859 phy_print_status(phydev);
860
861 spin_unlock_irqrestore(&priv->lock, flags);
862
863 if (phydev->is_pseudo_fixed_link)
864 /* Stop PHY layer to call the hook to adjust the link in case
865 * of a switch is attached to the stmmac driver.
866 */
867 phydev->irq = PHY_IGNORE_INTERRUPT;
868 else
869 /* At this stage, init the EEE if supported.
870 * Never called in case of fixed_link.
871 */
872 priv->eee_enabled = stmmac_eee_init(priv);
873 }
874
875 /**
876 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
877 * @priv: driver private structure
878 * Description: this is to verify if the HW supports the PCS.
879 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
880 * configured for the TBI, RTBI, or SGMII PHY interface.
881 */
882 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
883 {
884 int interface = priv->plat->interface;
885
886 if (priv->dma_cap.pcs) {
887 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
888 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
889 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
890 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
891 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
892 priv->hw->pcs = STMMAC_PCS_RGMII;
893 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
894 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
895 priv->hw->pcs = STMMAC_PCS_SGMII;
896 }
897 }
898 }
899
900 /**
901 * stmmac_init_phy - PHY initialization
902 * @dev: net device structure
903 * Description: it initializes the driver's PHY state, and attaches the PHY
904 * to the mac driver.
905 * Return value:
906 * 0 on success
907 */
908 static int stmmac_init_phy(struct net_device *dev)
909 {
910 struct stmmac_priv *priv = netdev_priv(dev);
911 struct phy_device *phydev;
912 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
913 char bus_id[MII_BUS_ID_SIZE];
914 int interface = priv->plat->interface;
915 int max_speed = priv->plat->max_speed;
916 priv->oldlink = false;
917 priv->speed = SPEED_UNKNOWN;
918 priv->oldduplex = DUPLEX_UNKNOWN;
919
920 if (priv->plat->phy_node) {
921 phydev = of_phy_connect(dev, priv->plat->phy_node,
922 &stmmac_adjust_link, 0, interface);
923 } else {
924 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
925 priv->plat->bus_id);
926
927 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
928 priv->plat->phy_addr);
929 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
930 phy_id_fmt);
931
932 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
933 interface);
934 }
935
936 if (IS_ERR_OR_NULL(phydev)) {
937 netdev_err(priv->dev, "Could not attach to PHY\n");
938 if (!phydev)
939 return -ENODEV;
940
941 return PTR_ERR(phydev);
942 }
943
944 /* Stop Advertising 1000BASE Capability if interface is not GMII */
945 if ((interface == PHY_INTERFACE_MODE_MII) ||
946 (interface == PHY_INTERFACE_MODE_RMII) ||
947 (max_speed < 1000 && max_speed > 0))
948 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
949 SUPPORTED_1000baseT_Full);
950
951 /*
952 * Broken HW is sometimes missing the pull-up resistor on the
953 * MDIO line, which results in reads to non-existent devices returning
954 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
955 * device as well.
956 * Note: phydev->phy_id is the result of reading the UID PHY registers.
957 */
958 if (!priv->plat->phy_node && phydev->phy_id == 0) {
959 phy_disconnect(phydev);
960 return -ENODEV;
961 }
962
963 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
964 * subsequent PHY polling, make sure we force a link transition if
965 * we have a UP/DOWN/UP transition
966 */
967 if (phydev->is_pseudo_fixed_link)
968 phydev->irq = PHY_POLL;
969
970 phy_attached_info(phydev);
971 return 0;
972 }
973
974 static void stmmac_display_rx_rings(struct stmmac_priv *priv)
975 {
976 u32 rx_cnt = priv->plat->rx_queues_to_use;
977 void *head_rx;
978 u32 queue;
979
980 /* Display RX rings */
981 for (queue = 0; queue < rx_cnt; queue++) {
982 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
983
984 pr_info("\tRX Queue %u rings\n", queue);
985
986 if (priv->extend_desc)
987 head_rx = (void *)rx_q->dma_erx;
988 else
989 head_rx = (void *)rx_q->dma_rx;
990
991 /* Display RX ring */
992 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
993 }
994 }
995
996 static void stmmac_display_tx_rings(struct stmmac_priv *priv)
997 {
998 u32 tx_cnt = priv->plat->tx_queues_to_use;
999 void *head_tx;
1000 u32 queue;
1001
1002 /* Display TX rings */
1003 for (queue = 0; queue < tx_cnt; queue++) {
1004 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1005
1006 pr_info("\tTX Queue %d rings\n", queue);
1007
1008 if (priv->extend_desc)
1009 head_tx = (void *)tx_q->dma_etx;
1010 else
1011 head_tx = (void *)tx_q->dma_tx;
1012
1013 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1014 }
1015 }
1016
1017 static void stmmac_display_rings(struct stmmac_priv *priv)
1018 {
1019 /* Display RX ring */
1020 stmmac_display_rx_rings(priv);
1021
1022 /* Display TX ring */
1023 stmmac_display_tx_rings(priv);
1024 }
1025
1026 static int stmmac_set_bfsize(int mtu, int bufsize)
1027 {
1028 int ret = bufsize;
1029
1030 if (mtu >= BUF_SIZE_4KiB)
1031 ret = BUF_SIZE_8KiB;
1032 else if (mtu >= BUF_SIZE_2KiB)
1033 ret = BUF_SIZE_4KiB;
1034 else if (mtu > DEFAULT_BUFSIZE)
1035 ret = BUF_SIZE_2KiB;
1036 else
1037 ret = DEFAULT_BUFSIZE;
1038
1039 return ret;
1040 }
1041
1042 /**
1043 * stmmac_clear_rx_descriptors - clear RX descriptors
1044 * @priv: driver private structure
1045 * @queue: RX queue index
1046 * Description: this function is called to clear the RX descriptors
1047 * in case of both basic and extended descriptors are used.
1048 */
1049 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1050 {
1051 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1052 int i;
1053
1054 /* Clear the RX descriptors */
1055 for (i = 0; i < DMA_RX_SIZE; i++)
1056 if (priv->extend_desc)
1057 priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
1058 priv->use_riwt, priv->mode,
1059 (i == DMA_RX_SIZE - 1));
1060 else
1061 priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
1062 priv->use_riwt, priv->mode,
1063 (i == DMA_RX_SIZE - 1));
1064 }
1065
1066 /**
1067 * stmmac_clear_tx_descriptors - clear tx descriptors
1068 * @priv: driver private structure
1069 * @queue: TX queue index.
1070 * Description: this function is called to clear the TX descriptors
1071 * in case of both basic and extended descriptors are used.
1072 */
1073 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
1074 {
1075 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1076 int i;
1077
1078 /* Clear the TX descriptors */
1079 for (i = 0; i < DMA_TX_SIZE; i++)
1080 if (priv->extend_desc)
1081 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1082 priv->mode,
1083 (i == DMA_TX_SIZE - 1));
1084 else
1085 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1086 priv->mode,
1087 (i == DMA_TX_SIZE - 1));
1088 }
1089
1090 /**
1091 * stmmac_clear_descriptors - clear descriptors
1092 * @priv: driver private structure
1093 * Description: this function is called to clear the TX and RX descriptors
1094 * in case of both basic and extended descriptors are used.
1095 */
1096 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1097 {
1098 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1099 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1100 u32 queue;
1101
1102 /* Clear the RX descriptors */
1103 for (queue = 0; queue < rx_queue_cnt; queue++)
1104 stmmac_clear_rx_descriptors(priv, queue);
1105
1106 /* Clear the TX descriptors */
1107 for (queue = 0; queue < tx_queue_cnt; queue++)
1108 stmmac_clear_tx_descriptors(priv, queue);
1109 }
1110
1111 /**
1112 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1113 * @priv: driver private structure
1114 * @p: descriptor pointer
1115 * @i: descriptor index
1116 * @flags: gfp flag
1117 * @queue: RX queue index
1118 * Description: this function is called to allocate a receive buffer, perform
1119 * the DMA mapping and init the descriptor.
1120 */
1121 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
1122 int i, gfp_t flags, u32 queue)
1123 {
1124 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1125 struct sk_buff *skb;
1126
1127 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
1128 if (!skb) {
1129 netdev_err(priv->dev,
1130 "%s: Rx init fails; skb is NULL\n", __func__);
1131 return -ENOMEM;
1132 }
1133 rx_q->rx_skbuff[i] = skb;
1134 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
1135 priv->dma_buf_sz,
1136 DMA_FROM_DEVICE);
1137 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
1138 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
1139 dev_kfree_skb_any(skb);
1140 return -EINVAL;
1141 }
1142
1143 if (priv->synopsys_id >= DWMAC_CORE_4_00)
1144 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1145 else
1146 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
1147
1148 if ((priv->hw->mode->init_desc3) &&
1149 (priv->dma_buf_sz == BUF_SIZE_16KiB))
1150 priv->hw->mode->init_desc3(p);
1151
1152 return 0;
1153 }
1154
1155 /**
1156 * stmmac_free_rx_buffer - free RX dma buffers
1157 * @priv: private structure
1158 * @queue: RX queue index
1159 * @i: buffer index.
1160 */
1161 static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1162 {
1163 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1164
1165 if (rx_q->rx_skbuff[i]) {
1166 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
1167 priv->dma_buf_sz, DMA_FROM_DEVICE);
1168 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
1169 }
1170 rx_q->rx_skbuff[i] = NULL;
1171 }
1172
1173 /**
1174 * stmmac_free_tx_buffer - free RX dma buffers
1175 * @priv: private structure
1176 * @queue: RX queue index
1177 * @i: buffer index.
1178 */
1179 static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
1180 {
1181 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1182
1183 if (tx_q->tx_skbuff_dma[i].buf) {
1184 if (tx_q->tx_skbuff_dma[i].map_as_page)
1185 dma_unmap_page(priv->device,
1186 tx_q->tx_skbuff_dma[i].buf,
1187 tx_q->tx_skbuff_dma[i].len,
1188 DMA_TO_DEVICE);
1189 else
1190 dma_unmap_single(priv->device,
1191 tx_q->tx_skbuff_dma[i].buf,
1192 tx_q->tx_skbuff_dma[i].len,
1193 DMA_TO_DEVICE);
1194 }
1195
1196 if (tx_q->tx_skbuff[i]) {
1197 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1198 tx_q->tx_skbuff[i] = NULL;
1199 tx_q->tx_skbuff_dma[i].buf = 0;
1200 tx_q->tx_skbuff_dma[i].map_as_page = false;
1201 }
1202 }
1203
1204 /**
1205 * init_dma_rx_desc_rings - init the RX descriptor rings
1206 * @dev: net device structure
1207 * @flags: gfp flag.
1208 * Description: this function initializes the DMA RX descriptors
1209 * and allocates the socket buffers. It supports the chained and ring
1210 * modes.
1211 */
1212 static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1213 {
1214 struct stmmac_priv *priv = netdev_priv(dev);
1215 u32 rx_count = priv->plat->rx_queues_to_use;
1216 unsigned int bfsize = 0;
1217 int ret = -ENOMEM;
1218 int queue;
1219 int i;
1220
1221 if (priv->hw->mode->set_16kib_bfsize)
1222 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
1223
1224 if (bfsize < BUF_SIZE_16KiB)
1225 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
1226
1227 priv->dma_buf_sz = bfsize;
1228
1229 /* RX INITIALIZATION */
1230 netif_dbg(priv, probe, priv->dev,
1231 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1232
1233 for (queue = 0; queue < rx_count; queue++) {
1234 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1235
1236 netif_dbg(priv, probe, priv->dev,
1237 "(%s) dma_rx_phy=0x%08x\n", __func__,
1238 (u32)rx_q->dma_rx_phy);
1239
1240 for (i = 0; i < DMA_RX_SIZE; i++) {
1241 struct dma_desc *p;
1242
1243 if (priv->extend_desc)
1244 p = &((rx_q->dma_erx + i)->basic);
1245 else
1246 p = rx_q->dma_rx + i;
1247
1248 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1249 queue);
1250 if (ret)
1251 goto err_init_rx_buffers;
1252
1253 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1254 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1255 (unsigned int)rx_q->rx_skbuff_dma[i]);
1256 }
1257
1258 rx_q->cur_rx = 0;
1259 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1260
1261 stmmac_clear_rx_descriptors(priv, queue);
1262
1263 /* Setup the chained descriptor addresses */
1264 if (priv->mode == STMMAC_CHAIN_MODE) {
1265 if (priv->extend_desc)
1266 priv->hw->mode->init(rx_q->dma_erx,
1267 rx_q->dma_rx_phy,
1268 DMA_RX_SIZE, 1);
1269 else
1270 priv->hw->mode->init(rx_q->dma_rx,
1271 rx_q->dma_rx_phy,
1272 DMA_RX_SIZE, 0);
1273 }
1274 }
1275
1276 buf_sz = bfsize;
1277
1278 return 0;
1279
1280 err_init_rx_buffers:
1281 while (queue >= 0) {
1282 while (--i >= 0)
1283 stmmac_free_rx_buffer(priv, queue, i);
1284
1285 if (queue == 0)
1286 break;
1287
1288 i = DMA_RX_SIZE;
1289 queue--;
1290 }
1291
1292 return ret;
1293 }
1294
1295 /**
1296 * init_dma_tx_desc_rings - init the TX descriptor rings
1297 * @dev: net device structure.
1298 * Description: this function initializes the DMA TX descriptors
1299 * and allocates the socket buffers. It supports the chained and ring
1300 * modes.
1301 */
1302 static int init_dma_tx_desc_rings(struct net_device *dev)
1303 {
1304 struct stmmac_priv *priv = netdev_priv(dev);
1305 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1306 u32 queue;
1307 int i;
1308
1309 for (queue = 0; queue < tx_queue_cnt; queue++) {
1310 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1311
1312 netif_dbg(priv, probe, priv->dev,
1313 "(%s) dma_tx_phy=0x%08x\n", __func__,
1314 (u32)tx_q->dma_tx_phy);
1315
1316 /* Setup the chained descriptor addresses */
1317 if (priv->mode == STMMAC_CHAIN_MODE) {
1318 if (priv->extend_desc)
1319 priv->hw->mode->init(tx_q->dma_etx,
1320 tx_q->dma_tx_phy,
1321 DMA_TX_SIZE, 1);
1322 else
1323 priv->hw->mode->init(tx_q->dma_tx,
1324 tx_q->dma_tx_phy,
1325 DMA_TX_SIZE, 0);
1326 }
1327
1328 for (i = 0; i < DMA_TX_SIZE; i++) {
1329 struct dma_desc *p;
1330 if (priv->extend_desc)
1331 p = &((tx_q->dma_etx + i)->basic);
1332 else
1333 p = tx_q->dma_tx + i;
1334
1335 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1336 p->des0 = 0;
1337 p->des1 = 0;
1338 p->des2 = 0;
1339 p->des3 = 0;
1340 } else {
1341 p->des2 = 0;
1342 }
1343
1344 tx_q->tx_skbuff_dma[i].buf = 0;
1345 tx_q->tx_skbuff_dma[i].map_as_page = false;
1346 tx_q->tx_skbuff_dma[i].len = 0;
1347 tx_q->tx_skbuff_dma[i].last_segment = false;
1348 tx_q->tx_skbuff[i] = NULL;
1349 }
1350
1351 tx_q->dirty_tx = 0;
1352 tx_q->cur_tx = 0;
1353
1354 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1355 }
1356
1357 return 0;
1358 }
1359
1360 /**
1361 * init_dma_desc_rings - init the RX/TX descriptor rings
1362 * @dev: net device structure
1363 * @flags: gfp flag.
1364 * Description: this function initializes the DMA RX/TX descriptors
1365 * and allocates the socket buffers. It supports the chained and ring
1366 * modes.
1367 */
1368 static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1369 {
1370 struct stmmac_priv *priv = netdev_priv(dev);
1371 int ret;
1372
1373 ret = init_dma_rx_desc_rings(dev, flags);
1374 if (ret)
1375 return ret;
1376
1377 ret = init_dma_tx_desc_rings(dev);
1378
1379 stmmac_clear_descriptors(priv);
1380
1381 if (netif_msg_hw(priv))
1382 stmmac_display_rings(priv);
1383
1384 return ret;
1385 }
1386
1387 /**
1388 * dma_free_rx_skbufs - free RX dma buffers
1389 * @priv: private structure
1390 * @queue: RX queue index
1391 */
1392 static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1393 {
1394 int i;
1395
1396 for (i = 0; i < DMA_RX_SIZE; i++)
1397 stmmac_free_rx_buffer(priv, queue, i);
1398 }
1399
1400 /**
1401 * dma_free_tx_skbufs - free TX dma buffers
1402 * @priv: private structure
1403 * @queue: TX queue index
1404 */
1405 static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
1406 {
1407 int i;
1408
1409 for (i = 0; i < DMA_TX_SIZE; i++)
1410 stmmac_free_tx_buffer(priv, queue, i);
1411 }
1412
1413 /**
1414 * free_dma_rx_desc_resources - free RX dma desc resources
1415 * @priv: private structure
1416 */
1417 static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1418 {
1419 u32 rx_count = priv->plat->rx_queues_to_use;
1420 u32 queue;
1421
1422 /* Free RX queue resources */
1423 for (queue = 0; queue < rx_count; queue++) {
1424 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1425
1426 /* Release the DMA RX socket buffers */
1427 dma_free_rx_skbufs(priv, queue);
1428
1429 /* Free DMA regions of consistent memory previously allocated */
1430 if (!priv->extend_desc)
1431 dma_free_coherent(priv->device,
1432 DMA_RX_SIZE * sizeof(struct dma_desc),
1433 rx_q->dma_rx, rx_q->dma_rx_phy);
1434 else
1435 dma_free_coherent(priv->device, DMA_RX_SIZE *
1436 sizeof(struct dma_extended_desc),
1437 rx_q->dma_erx, rx_q->dma_rx_phy);
1438
1439 kfree(rx_q->rx_skbuff_dma);
1440 kfree(rx_q->rx_skbuff);
1441 }
1442 }
1443
1444 /**
1445 * free_dma_tx_desc_resources - free TX dma desc resources
1446 * @priv: private structure
1447 */
1448 static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1449 {
1450 u32 tx_count = priv->plat->tx_queues_to_use;
1451 u32 queue;
1452
1453 /* Free TX queue resources */
1454 for (queue = 0; queue < tx_count; queue++) {
1455 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1456
1457 /* Release the DMA TX socket buffers */
1458 dma_free_tx_skbufs(priv, queue);
1459
1460 /* Free DMA regions of consistent memory previously allocated */
1461 if (!priv->extend_desc)
1462 dma_free_coherent(priv->device,
1463 DMA_TX_SIZE * sizeof(struct dma_desc),
1464 tx_q->dma_tx, tx_q->dma_tx_phy);
1465 else
1466 dma_free_coherent(priv->device, DMA_TX_SIZE *
1467 sizeof(struct dma_extended_desc),
1468 tx_q->dma_etx, tx_q->dma_tx_phy);
1469
1470 kfree(tx_q->tx_skbuff_dma);
1471 kfree(tx_q->tx_skbuff);
1472 }
1473 }
1474
1475 /**
1476 * alloc_dma_rx_desc_resources - alloc RX resources.
1477 * @priv: private structure
1478 * Description: according to which descriptor can be used (extend or basic)
1479 * this function allocates the resources for TX and RX paths. In case of
1480 * reception, for example, it pre-allocated the RX socket buffer in order to
1481 * allow zero-copy mechanism.
1482 */
1483 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
1484 {
1485 u32 rx_count = priv->plat->rx_queues_to_use;
1486 int ret = -ENOMEM;
1487 u32 queue;
1488
1489 /* RX queues buffers and DMA */
1490 for (queue = 0; queue < rx_count; queue++) {
1491 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1492
1493 rx_q->queue_index = queue;
1494 rx_q->priv_data = priv;
1495
1496 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1497 sizeof(dma_addr_t),
1498 GFP_KERNEL);
1499 if (!rx_q->rx_skbuff_dma)
1500 goto err_dma;
1501
1502 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1503 sizeof(struct sk_buff *),
1504 GFP_KERNEL);
1505 if (!rx_q->rx_skbuff)
1506 goto err_dma;
1507
1508 if (priv->extend_desc) {
1509 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1510 DMA_RX_SIZE *
1511 sizeof(struct
1512 dma_extended_desc),
1513 &rx_q->dma_rx_phy,
1514 GFP_KERNEL);
1515 if (!rx_q->dma_erx)
1516 goto err_dma;
1517
1518 } else {
1519 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1520 DMA_RX_SIZE *
1521 sizeof(struct
1522 dma_desc),
1523 &rx_q->dma_rx_phy,
1524 GFP_KERNEL);
1525 if (!rx_q->dma_rx)
1526 goto err_dma;
1527 }
1528 }
1529
1530 return 0;
1531
1532 err_dma:
1533 free_dma_rx_desc_resources(priv);
1534
1535 return ret;
1536 }
1537
1538 /**
1539 * alloc_dma_tx_desc_resources - alloc TX resources.
1540 * @priv: private structure
1541 * Description: according to which descriptor can be used (extend or basic)
1542 * this function allocates the resources for TX and RX paths. In case of
1543 * reception, for example, it pre-allocated the RX socket buffer in order to
1544 * allow zero-copy mechanism.
1545 */
1546 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1547 {
1548 u32 tx_count = priv->plat->tx_queues_to_use;
1549 int ret = -ENOMEM;
1550 u32 queue;
1551
1552 /* TX queues buffers and DMA */
1553 for (queue = 0; queue < tx_count; queue++) {
1554 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1555
1556 tx_q->queue_index = queue;
1557 tx_q->priv_data = priv;
1558
1559 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1560 sizeof(*tx_q->tx_skbuff_dma),
1561 GFP_KERNEL);
1562 if (!tx_q->tx_skbuff_dma)
1563 goto err_dma;
1564
1565 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1566 sizeof(struct sk_buff *),
1567 GFP_KERNEL);
1568 if (!tx_q->tx_skbuff)
1569 goto err_dma;
1570
1571 if (priv->extend_desc) {
1572 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1573 DMA_TX_SIZE *
1574 sizeof(struct
1575 dma_extended_desc),
1576 &tx_q->dma_tx_phy,
1577 GFP_KERNEL);
1578 if (!tx_q->dma_etx)
1579 goto err_dma;
1580 } else {
1581 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1582 DMA_TX_SIZE *
1583 sizeof(struct
1584 dma_desc),
1585 &tx_q->dma_tx_phy,
1586 GFP_KERNEL);
1587 if (!tx_q->dma_tx)
1588 goto err_dma;
1589 }
1590 }
1591
1592 return 0;
1593
1594 err_dma:
1595 free_dma_tx_desc_resources(priv);
1596
1597 return ret;
1598 }
1599
1600 /**
1601 * alloc_dma_desc_resources - alloc TX/RX resources.
1602 * @priv: private structure
1603 * Description: according to which descriptor can be used (extend or basic)
1604 * this function allocates the resources for TX and RX paths. In case of
1605 * reception, for example, it pre-allocated the RX socket buffer in order to
1606 * allow zero-copy mechanism.
1607 */
1608 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1609 {
1610 /* RX Allocation */
1611 int ret = alloc_dma_rx_desc_resources(priv);
1612
1613 if (ret)
1614 return ret;
1615
1616 ret = alloc_dma_tx_desc_resources(priv);
1617
1618 return ret;
1619 }
1620
1621 /**
1622 * free_dma_desc_resources - free dma desc resources
1623 * @priv: private structure
1624 */
1625 static void free_dma_desc_resources(struct stmmac_priv *priv)
1626 {
1627 /* Release the DMA RX socket buffers */
1628 free_dma_rx_desc_resources(priv);
1629
1630 /* Release the DMA TX socket buffers */
1631 free_dma_tx_desc_resources(priv);
1632 }
1633
1634 /**
1635 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1636 * @priv: driver private structure
1637 * Description: It is used for enabling the rx queues in the MAC
1638 */
1639 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1640 {
1641 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1642 int queue;
1643 u8 mode;
1644
1645 for (queue = 0; queue < rx_queues_count; queue++) {
1646 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1647 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1648 }
1649 }
1650
1651 /**
1652 * stmmac_start_rx_dma - start RX DMA channel
1653 * @priv: driver private structure
1654 * @chan: RX channel index
1655 * Description:
1656 * This starts a RX DMA channel
1657 */
1658 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1659 {
1660 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1661 priv->hw->dma->start_rx(priv->ioaddr, chan);
1662 }
1663
1664 /**
1665 * stmmac_start_tx_dma - start TX DMA channel
1666 * @priv: driver private structure
1667 * @chan: TX channel index
1668 * Description:
1669 * This starts a TX DMA channel
1670 */
1671 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1672 {
1673 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1674 priv->hw->dma->start_tx(priv->ioaddr, chan);
1675 }
1676
1677 /**
1678 * stmmac_stop_rx_dma - stop RX DMA channel
1679 * @priv: driver private structure
1680 * @chan: RX channel index
1681 * Description:
1682 * This stops a RX DMA channel
1683 */
1684 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1685 {
1686 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1687 priv->hw->dma->stop_rx(priv->ioaddr, chan);
1688 }
1689
1690 /**
1691 * stmmac_stop_tx_dma - stop TX DMA channel
1692 * @priv: driver private structure
1693 * @chan: TX channel index
1694 * Description:
1695 * This stops a TX DMA channel
1696 */
1697 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1698 {
1699 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1700 priv->hw->dma->stop_tx(priv->ioaddr, chan);
1701 }
1702
1703 /**
1704 * stmmac_start_all_dma - start all RX and TX DMA channels
1705 * @priv: driver private structure
1706 * Description:
1707 * This starts all the RX and TX DMA channels
1708 */
1709 static void stmmac_start_all_dma(struct stmmac_priv *priv)
1710 {
1711 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1712 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1713 u32 chan = 0;
1714
1715 for (chan = 0; chan < rx_channels_count; chan++)
1716 stmmac_start_rx_dma(priv, chan);
1717
1718 for (chan = 0; chan < tx_channels_count; chan++)
1719 stmmac_start_tx_dma(priv, chan);
1720 }
1721
1722 /**
1723 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1724 * @priv: driver private structure
1725 * Description:
1726 * This stops the RX and TX DMA channels
1727 */
1728 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1729 {
1730 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1731 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1732 u32 chan = 0;
1733
1734 for (chan = 0; chan < rx_channels_count; chan++)
1735 stmmac_stop_rx_dma(priv, chan);
1736
1737 for (chan = 0; chan < tx_channels_count; chan++)
1738 stmmac_stop_tx_dma(priv, chan);
1739 }
1740
1741 /**
1742 * stmmac_dma_operation_mode - HW DMA operation mode
1743 * @priv: driver private structure
1744 * Description: it is used for configuring the DMA operation mode register in
1745 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1746 */
1747 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1748 {
1749 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1750 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1751 int rxfifosz = priv->plat->rx_fifo_size;
1752 int txfifosz = priv->plat->tx_fifo_size;
1753 u32 txmode = 0;
1754 u32 rxmode = 0;
1755 u32 chan = 0;
1756 u8 qmode = 0;
1757
1758 if (rxfifosz == 0)
1759 rxfifosz = priv->dma_cap.rx_fifo_size;
1760 if (txfifosz == 0)
1761 txfifosz = priv->dma_cap.tx_fifo_size;
1762
1763 /* Adjust for real per queue fifo size */
1764 rxfifosz /= rx_channels_count;
1765 txfifosz /= tx_channels_count;
1766
1767 if (priv->plat->force_thresh_dma_mode) {
1768 txmode = tc;
1769 rxmode = tc;
1770 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1771 /*
1772 * In case of GMAC, SF mode can be enabled
1773 * to perform the TX COE in HW. This depends on:
1774 * 1) TX COE if actually supported
1775 * 2) There is no bugged Jumbo frame support
1776 * that needs to not insert csum in the TDES.
1777 */
1778 txmode = SF_DMA_MODE;
1779 rxmode = SF_DMA_MODE;
1780 priv->xstats.threshold = SF_DMA_MODE;
1781 } else {
1782 txmode = tc;
1783 rxmode = SF_DMA_MODE;
1784 }
1785
1786 /* configure all channels */
1787 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1788 for (chan = 0; chan < rx_channels_count; chan++) {
1789 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1790
1791 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1792 rxfifosz, qmode);
1793 }
1794
1795 for (chan = 0; chan < tx_channels_count; chan++) {
1796 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1797
1798 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1799 txfifosz, qmode);
1800 }
1801 } else {
1802 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1803 rxfifosz);
1804 }
1805 }
1806
1807 /**
1808 * stmmac_tx_clean - to manage the transmission completion
1809 * @priv: driver private structure
1810 * @queue: TX queue index
1811 * Description: it reclaims the transmit resources after transmission completes.
1812 */
1813 static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1814 {
1815 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1816 unsigned int bytes_compl = 0, pkts_compl = 0;
1817 unsigned int entry;
1818
1819 netif_tx_lock(priv->dev);
1820
1821 priv->xstats.tx_clean++;
1822
1823 entry = tx_q->dirty_tx;
1824 while (entry != tx_q->cur_tx) {
1825 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1826 struct dma_desc *p;
1827 int status;
1828
1829 if (priv->extend_desc)
1830 p = (struct dma_desc *)(tx_q->dma_etx + entry);
1831 else
1832 p = tx_q->dma_tx + entry;
1833
1834 status = priv->hw->desc->tx_status(&priv->dev->stats,
1835 &priv->xstats, p,
1836 priv->ioaddr);
1837 /* Check if the descriptor is owned by the DMA */
1838 if (unlikely(status & tx_dma_own))
1839 break;
1840
1841 /* Just consider the last segment and ...*/
1842 if (likely(!(status & tx_not_ls))) {
1843 /* ... verify the status error condition */
1844 if (unlikely(status & tx_err)) {
1845 priv->dev->stats.tx_errors++;
1846 } else {
1847 priv->dev->stats.tx_packets++;
1848 priv->xstats.tx_pkt_n++;
1849 }
1850 stmmac_get_tx_hwtstamp(priv, p, skb);
1851 }
1852
1853 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1854 if (tx_q->tx_skbuff_dma[entry].map_as_page)
1855 dma_unmap_page(priv->device,
1856 tx_q->tx_skbuff_dma[entry].buf,
1857 tx_q->tx_skbuff_dma[entry].len,
1858 DMA_TO_DEVICE);
1859 else
1860 dma_unmap_single(priv->device,
1861 tx_q->tx_skbuff_dma[entry].buf,
1862 tx_q->tx_skbuff_dma[entry].len,
1863 DMA_TO_DEVICE);
1864 tx_q->tx_skbuff_dma[entry].buf = 0;
1865 tx_q->tx_skbuff_dma[entry].len = 0;
1866 tx_q->tx_skbuff_dma[entry].map_as_page = false;
1867 }
1868
1869 if (priv->hw->mode->clean_desc3)
1870 priv->hw->mode->clean_desc3(tx_q, p);
1871
1872 tx_q->tx_skbuff_dma[entry].last_segment = false;
1873 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1874
1875 if (likely(skb != NULL)) {
1876 pkts_compl++;
1877 bytes_compl += skb->len;
1878 dev_consume_skb_any(skb);
1879 tx_q->tx_skbuff[entry] = NULL;
1880 }
1881
1882 priv->hw->desc->release_tx_desc(p, priv->mode);
1883
1884 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
1885 }
1886 tx_q->dirty_tx = entry;
1887
1888 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1889 pkts_compl, bytes_compl);
1890
1891 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1892 queue))) &&
1893 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
1894
1895 netif_dbg(priv, tx_done, priv->dev,
1896 "%s: restart transmit\n", __func__);
1897 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
1898 }
1899
1900 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1901 stmmac_enable_eee_mode(priv);
1902 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1903 }
1904 netif_tx_unlock(priv->dev);
1905 }
1906
1907 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
1908 {
1909 priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
1910 }
1911
1912 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
1913 {
1914 priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
1915 }
1916
1917 /**
1918 * stmmac_tx_err - to manage the tx error
1919 * @priv: driver private structure
1920 * @chan: channel index
1921 * Description: it cleans the descriptors and restarts the transmission
1922 * in case of transmission errors.
1923 */
1924 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
1925 {
1926 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
1927 int i;
1928
1929 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
1930
1931 stmmac_stop_tx_dma(priv, chan);
1932 dma_free_tx_skbufs(priv, chan);
1933 for (i = 0; i < DMA_TX_SIZE; i++)
1934 if (priv->extend_desc)
1935 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
1936 priv->mode,
1937 (i == DMA_TX_SIZE - 1));
1938 else
1939 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
1940 priv->mode,
1941 (i == DMA_TX_SIZE - 1));
1942 tx_q->dirty_tx = 0;
1943 tx_q->cur_tx = 0;
1944 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
1945 stmmac_start_tx_dma(priv, chan);
1946
1947 priv->dev->stats.tx_errors++;
1948 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
1949 }
1950
1951 /**
1952 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1953 * @priv: driver private structure
1954 * @txmode: TX operating mode
1955 * @rxmode: RX operating mode
1956 * @chan: channel index
1957 * Description: it is used for configuring of the DMA operation mode in
1958 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1959 * mode.
1960 */
1961 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1962 u32 rxmode, u32 chan)
1963 {
1964 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
1965 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
1966 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1967 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1968 int rxfifosz = priv->plat->rx_fifo_size;
1969 int txfifosz = priv->plat->tx_fifo_size;
1970
1971 if (rxfifosz == 0)
1972 rxfifosz = priv->dma_cap.rx_fifo_size;
1973 if (txfifosz == 0)
1974 txfifosz = priv->dma_cap.tx_fifo_size;
1975
1976 /* Adjust for real per queue fifo size */
1977 rxfifosz /= rx_channels_count;
1978 txfifosz /= tx_channels_count;
1979
1980 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1981 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1982 rxfifosz, rxqmode);
1983 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan,
1984 txfifosz, txqmode);
1985 } else {
1986 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1987 rxfifosz);
1988 }
1989 }
1990
1991 /**
1992 * stmmac_dma_interrupt - DMA ISR
1993 * @priv: driver private structure
1994 * Description: this is the DMA ISR. It is called by the main ISR.
1995 * It calls the dwmac dma routine and schedule poll method in case of some
1996 * work can be done.
1997 */
1998 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1999 {
2000 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2001 int status;
2002 u32 chan;
2003
2004 for (chan = 0; chan < tx_channel_count; chan++) {
2005 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2006
2007 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
2008 &priv->xstats, chan);
2009 if (likely((status & handle_rx)) || (status & handle_tx)) {
2010 if (likely(napi_schedule_prep(&rx_q->napi))) {
2011 stmmac_disable_dma_irq(priv, chan);
2012 __napi_schedule(&rx_q->napi);
2013 }
2014 }
2015
2016 if (unlikely(status & tx_hard_error_bump_tc)) {
2017 /* Try to bump up the dma threshold on this failure */
2018 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2019 (tc <= 256)) {
2020 tc += 64;
2021 if (priv->plat->force_thresh_dma_mode)
2022 stmmac_set_dma_operation_mode(priv,
2023 tc,
2024 tc,
2025 chan);
2026 else
2027 stmmac_set_dma_operation_mode(priv,
2028 tc,
2029 SF_DMA_MODE,
2030 chan);
2031 priv->xstats.threshold = tc;
2032 }
2033 } else if (unlikely(status == tx_hard_error)) {
2034 stmmac_tx_err(priv, chan);
2035 }
2036 }
2037 }
2038
2039 /**
2040 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2041 * @priv: driver private structure
2042 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2043 */
2044 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2045 {
2046 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2047 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2048
2049 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2050 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
2051 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
2052 } else {
2053 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
2054 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
2055 }
2056
2057 dwmac_mmc_intr_all_mask(priv->mmcaddr);
2058
2059 if (priv->dma_cap.rmon) {
2060 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2061 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2062 } else
2063 netdev_info(priv->dev, "No MAC Management Counters available\n");
2064 }
2065
2066 /**
2067 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
2068 * @priv: driver private structure
2069 * Description: select the Enhanced/Alternate or Normal descriptors.
2070 * In case of Enhanced/Alternate, it checks if the extended descriptors are
2071 * supported by the HW capability register.
2072 */
2073 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2074 {
2075 if (priv->plat->enh_desc) {
2076 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2077
2078 /* GMAC older than 3.50 has no extended descriptors */
2079 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2080 dev_info(priv->device, "Enabled extended descriptors\n");
2081 priv->extend_desc = 1;
2082 } else
2083 dev_warn(priv->device, "Extended descriptors not supported\n");
2084
2085 priv->hw->desc = &enh_desc_ops;
2086 } else {
2087 dev_info(priv->device, "Normal descriptors\n");
2088 priv->hw->desc = &ndesc_ops;
2089 }
2090 }
2091
2092 /**
2093 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2094 * @priv: driver private structure
2095 * Description:
2096 * new GMAC chip generations have a new register to indicate the
2097 * presence of the optional feature/functions.
2098 * This can be also used to override the value passed through the
2099 * platform and necessary for old MAC10/100 and GMAC chips.
2100 */
2101 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2102 {
2103 u32 ret = 0;
2104
2105 if (priv->hw->dma->get_hw_feature) {
2106 priv->hw->dma->get_hw_feature(priv->ioaddr,
2107 &priv->dma_cap);
2108 ret = 1;
2109 }
2110
2111 return ret;
2112 }
2113
2114 /**
2115 * stmmac_check_ether_addr - check if the MAC addr is valid
2116 * @priv: driver private structure
2117 * Description:
2118 * it is to verify if the MAC address is valid, in case of failures it
2119 * generates a random MAC address
2120 */
2121 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2122 {
2123 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2124 priv->hw->mac->get_umac_addr(priv->hw,
2125 priv->dev->dev_addr, 0);
2126 if (!is_valid_ether_addr(priv->dev->dev_addr))
2127 eth_hw_addr_random(priv->dev);
2128 netdev_info(priv->dev, "device MAC address %pM\n",
2129 priv->dev->dev_addr);
2130 }
2131 }
2132
2133 /**
2134 * stmmac_init_dma_engine - DMA init.
2135 * @priv: driver private structure
2136 * Description:
2137 * It inits the DMA invoking the specific MAC/GMAC callback.
2138 * Some DMA parameters can be passed from the platform;
2139 * in case of these are not passed a default is kept for the MAC or GMAC.
2140 */
2141 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2142 {
2143 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2144 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2145 struct stmmac_rx_queue *rx_q;
2146 struct stmmac_tx_queue *tx_q;
2147 u32 dummy_dma_rx_phy = 0;
2148 u32 dummy_dma_tx_phy = 0;
2149 u32 chan = 0;
2150 int atds = 0;
2151 int ret = 0;
2152
2153 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2154 dev_err(priv->device, "Invalid DMA configuration\n");
2155 return -EINVAL;
2156 }
2157
2158 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2159 atds = 1;
2160
2161 ret = priv->hw->dma->reset(priv->ioaddr);
2162 if (ret) {
2163 dev_err(priv->device, "Failed to reset the dma\n");
2164 return ret;
2165 }
2166
2167 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2168 /* DMA Configuration */
2169 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2170 dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2171
2172 /* DMA RX Channel Configuration */
2173 for (chan = 0; chan < rx_channels_count; chan++) {
2174 rx_q = &priv->rx_queue[chan];
2175
2176 priv->hw->dma->init_rx_chan(priv->ioaddr,
2177 priv->plat->dma_cfg,
2178 rx_q->dma_rx_phy, chan);
2179
2180 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
2181 (DMA_RX_SIZE * sizeof(struct dma_desc));
2182 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
2183 rx_q->rx_tail_addr,
2184 chan);
2185 }
2186
2187 /* DMA TX Channel Configuration */
2188 for (chan = 0; chan < tx_channels_count; chan++) {
2189 tx_q = &priv->tx_queue[chan];
2190
2191 priv->hw->dma->init_chan(priv->ioaddr,
2192 priv->plat->dma_cfg,
2193 chan);
2194
2195 priv->hw->dma->init_tx_chan(priv->ioaddr,
2196 priv->plat->dma_cfg,
2197 tx_q->dma_tx_phy, chan);
2198
2199 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
2200 (DMA_TX_SIZE * sizeof(struct dma_desc));
2201 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
2202 tx_q->tx_tail_addr,
2203 chan);
2204 }
2205 } else {
2206 rx_q = &priv->rx_queue[chan];
2207 tx_q = &priv->tx_queue[chan];
2208 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2209 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
2210 }
2211
2212 if (priv->plat->axi && priv->hw->dma->axi)
2213 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2214
2215 return ret;
2216 }
2217
2218 /**
2219 * stmmac_tx_timer - mitigation sw timer for tx.
2220 * @data: data pointer
2221 * Description:
2222 * This is the timer handler to directly invoke the stmmac_tx_clean.
2223 */
2224 static void stmmac_tx_timer(unsigned long data)
2225 {
2226 struct stmmac_priv *priv = (struct stmmac_priv *)data;
2227 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2228 u32 queue;
2229
2230 /* let's scan all the tx queues */
2231 for (queue = 0; queue < tx_queues_count; queue++)
2232 stmmac_tx_clean(priv, queue);
2233 }
2234
2235 /**
2236 * stmmac_init_tx_coalesce - init tx mitigation options.
2237 * @priv: driver private structure
2238 * Description:
2239 * This inits the transmit coalesce parameters: i.e. timer rate,
2240 * timer handler and default threshold used for enabling the
2241 * interrupt on completion bit.
2242 */
2243 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2244 {
2245 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2246 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2247 setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
2248 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2249 add_timer(&priv->txtimer);
2250 }
2251
2252 static void stmmac_set_rings_length(struct stmmac_priv *priv)
2253 {
2254 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2255 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2256 u32 chan;
2257
2258 /* set TX ring length */
2259 if (priv->hw->dma->set_tx_ring_len) {
2260 for (chan = 0; chan < tx_channels_count; chan++)
2261 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2262 (DMA_TX_SIZE - 1), chan);
2263 }
2264
2265 /* set RX ring length */
2266 if (priv->hw->dma->set_rx_ring_len) {
2267 for (chan = 0; chan < rx_channels_count; chan++)
2268 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2269 (DMA_RX_SIZE - 1), chan);
2270 }
2271 }
2272
2273 /**
2274 * stmmac_set_tx_queue_weight - Set TX queue weight
2275 * @priv: driver private structure
2276 * Description: It is used for setting TX queues weight
2277 */
2278 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2279 {
2280 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2281 u32 weight;
2282 u32 queue;
2283
2284 for (queue = 0; queue < tx_queues_count; queue++) {
2285 weight = priv->plat->tx_queues_cfg[queue].weight;
2286 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2287 }
2288 }
2289
2290 /**
2291 * stmmac_configure_cbs - Configure CBS in TX queue
2292 * @priv: driver private structure
2293 * Description: It is used for configuring CBS in AVB TX queues
2294 */
2295 static void stmmac_configure_cbs(struct stmmac_priv *priv)
2296 {
2297 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2298 u32 mode_to_use;
2299 u32 queue;
2300
2301 /* queue 0 is reserved for legacy traffic */
2302 for (queue = 1; queue < tx_queues_count; queue++) {
2303 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2304 if (mode_to_use == MTL_QUEUE_DCB)
2305 continue;
2306
2307 priv->hw->mac->config_cbs(priv->hw,
2308 priv->plat->tx_queues_cfg[queue].send_slope,
2309 priv->plat->tx_queues_cfg[queue].idle_slope,
2310 priv->plat->tx_queues_cfg[queue].high_credit,
2311 priv->plat->tx_queues_cfg[queue].low_credit,
2312 queue);
2313 }
2314 }
2315
2316 /**
2317 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2318 * @priv: driver private structure
2319 * Description: It is used for mapping RX queues to RX dma channels
2320 */
2321 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2322 {
2323 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2324 u32 queue;
2325 u32 chan;
2326
2327 for (queue = 0; queue < rx_queues_count; queue++) {
2328 chan = priv->plat->rx_queues_cfg[queue].chan;
2329 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2330 }
2331 }
2332
2333 /**
2334 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2335 * @priv: driver private structure
2336 * Description: It is used for configuring the RX Queue Priority
2337 */
2338 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2339 {
2340 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2341 u32 queue;
2342 u32 prio;
2343
2344 for (queue = 0; queue < rx_queues_count; queue++) {
2345 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2346 continue;
2347
2348 prio = priv->plat->rx_queues_cfg[queue].prio;
2349 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2350 }
2351 }
2352
2353 /**
2354 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2355 * @priv: driver private structure
2356 * Description: It is used for configuring the TX Queue Priority
2357 */
2358 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2359 {
2360 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2361 u32 queue;
2362 u32 prio;
2363
2364 for (queue = 0; queue < tx_queues_count; queue++) {
2365 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2366 continue;
2367
2368 prio = priv->plat->tx_queues_cfg[queue].prio;
2369 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2370 }
2371 }
2372
2373 /**
2374 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2375 * @priv: driver private structure
2376 * Description: It is used for configuring the RX queue routing
2377 */
2378 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2379 {
2380 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2381 u32 queue;
2382 u8 packet;
2383
2384 for (queue = 0; queue < rx_queues_count; queue++) {
2385 /* no specific packet type routing specified for the queue */
2386 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2387 continue;
2388
2389 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2390 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2391 }
2392 }
2393
2394 /**
2395 * stmmac_mtl_configuration - Configure MTL
2396 * @priv: driver private structure
2397 * Description: It is used for configurring MTL
2398 */
2399 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2400 {
2401 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2402 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2403
2404 if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2405 stmmac_set_tx_queue_weight(priv);
2406
2407 /* Configure MTL RX algorithms */
2408 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2409 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2410 priv->plat->rx_sched_algorithm);
2411
2412 /* Configure MTL TX algorithms */
2413 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2414 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2415 priv->plat->tx_sched_algorithm);
2416
2417 /* Configure CBS in AVB TX queues */
2418 if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2419 stmmac_configure_cbs(priv);
2420
2421 /* Map RX MTL to DMA channels */
2422 if (priv->hw->mac->map_mtl_to_dma)
2423 stmmac_rx_queue_dma_chan_map(priv);
2424
2425 /* Enable MAC RX Queues */
2426 if (priv->hw->mac->rx_queue_enable)
2427 stmmac_mac_enable_rx_queues(priv);
2428
2429 /* Set RX priorities */
2430 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2431 stmmac_mac_config_rx_queues_prio(priv);
2432
2433 /* Set TX priorities */
2434 if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2435 stmmac_mac_config_tx_queues_prio(priv);
2436
2437 /* Set RX routing */
2438 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2439 stmmac_mac_config_rx_queues_routing(priv);
2440 }
2441
2442 /**
2443 * stmmac_hw_setup - setup mac in a usable state.
2444 * @dev : pointer to the device structure.
2445 * Description:
2446 * this is the main function to setup the HW in a usable state because the
2447 * dma engine is reset, the core registers are configured (e.g. AXI,
2448 * Checksum features, timers). The DMA is ready to start receiving and
2449 * transmitting.
2450 * Return value:
2451 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2452 * file on failure.
2453 */
2454 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
2455 {
2456 struct stmmac_priv *priv = netdev_priv(dev);
2457 u32 rx_cnt = priv->plat->rx_queues_to_use;
2458 u32 tx_cnt = priv->plat->tx_queues_to_use;
2459 u32 chan;
2460 int ret;
2461
2462 /* DMA initialization and SW reset */
2463 ret = stmmac_init_dma_engine(priv);
2464 if (ret < 0) {
2465 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2466 __func__);
2467 return ret;
2468 }
2469
2470 /* Copy the MAC addr into the HW */
2471 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
2472
2473 /* PS and related bits will be programmed according to the speed */
2474 if (priv->hw->pcs) {
2475 int speed = priv->plat->mac_port_sel_speed;
2476
2477 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2478 (speed == SPEED_1000)) {
2479 priv->hw->ps = speed;
2480 } else {
2481 dev_warn(priv->device, "invalid port speed\n");
2482 priv->hw->ps = 0;
2483 }
2484 }
2485
2486 /* Initialize the MAC Core */
2487 priv->hw->mac->core_init(priv->hw, dev->mtu);
2488
2489 /* Initialize MTL*/
2490 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2491 stmmac_mtl_configuration(priv);
2492
2493 ret = priv->hw->mac->rx_ipc(priv->hw);
2494 if (!ret) {
2495 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2496 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2497 priv->hw->rx_csum = 0;
2498 }
2499
2500 /* Enable the MAC Rx/Tx */
2501 priv->hw->mac->set_mac(priv->ioaddr, true);
2502
2503 /* Set the HW DMA mode and the COE */
2504 stmmac_dma_operation_mode(priv);
2505
2506 stmmac_mmc_setup(priv);
2507
2508 if (init_ptp) {
2509 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2510 if (ret < 0)
2511 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2512
2513 ret = stmmac_init_ptp(priv);
2514 if (ret == -EOPNOTSUPP)
2515 netdev_warn(priv->dev, "PTP not supported by HW\n");
2516 else if (ret)
2517 netdev_warn(priv->dev, "PTP init failed\n");
2518 }
2519
2520 #ifdef CONFIG_DEBUG_FS
2521 ret = stmmac_init_fs(dev);
2522 if (ret < 0)
2523 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2524 __func__);
2525 #endif
2526 /* Start the ball rolling... */
2527 stmmac_start_all_dma(priv);
2528
2529 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2530
2531 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2532 priv->rx_riwt = MAX_DMA_RIWT;
2533 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
2534 }
2535
2536 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
2537 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
2538
2539 /* set TX and RX rings length */
2540 stmmac_set_rings_length(priv);
2541
2542 /* Enable TSO */
2543 if (priv->tso) {
2544 for (chan = 0; chan < tx_cnt; chan++)
2545 priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2546 }
2547
2548 return 0;
2549 }
2550
2551 static void stmmac_hw_teardown(struct net_device *dev)
2552 {
2553 struct stmmac_priv *priv = netdev_priv(dev);
2554
2555 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2556 }
2557
2558 /**
2559 * stmmac_open - open entry point of the driver
2560 * @dev : pointer to the device structure.
2561 * Description:
2562 * This function is the open entry point of the driver.
2563 * Return value:
2564 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2565 * file on failure.
2566 */
2567 static int stmmac_open(struct net_device *dev)
2568 {
2569 struct stmmac_priv *priv = netdev_priv(dev);
2570 int ret;
2571
2572 stmmac_check_ether_addr(priv);
2573
2574 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2575 priv->hw->pcs != STMMAC_PCS_TBI &&
2576 priv->hw->pcs != STMMAC_PCS_RTBI) {
2577 ret = stmmac_init_phy(dev);
2578 if (ret) {
2579 netdev_err(priv->dev,
2580 "%s: Cannot attach to PHY (error: %d)\n",
2581 __func__, ret);
2582 return ret;
2583 }
2584 }
2585
2586 /* Extra statistics */
2587 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2588 priv->xstats.threshold = tc;
2589
2590 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
2591 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
2592
2593 ret = alloc_dma_desc_resources(priv);
2594 if (ret < 0) {
2595 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2596 __func__);
2597 goto dma_desc_error;
2598 }
2599
2600 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2601 if (ret < 0) {
2602 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2603 __func__);
2604 goto init_error;
2605 }
2606
2607 ret = stmmac_hw_setup(dev, true);
2608 if (ret < 0) {
2609 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2610 goto init_error;
2611 }
2612
2613 stmmac_init_tx_coalesce(priv);
2614
2615 if (dev->phydev)
2616 phy_start(dev->phydev);
2617
2618 /* Request the IRQ lines */
2619 ret = request_irq(dev->irq, stmmac_interrupt,
2620 IRQF_SHARED, dev->name, dev);
2621 if (unlikely(ret < 0)) {
2622 netdev_err(priv->dev,
2623 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2624 __func__, dev->irq, ret);
2625 goto irq_error;
2626 }
2627
2628 /* Request the Wake IRQ in case of another line is used for WoL */
2629 if (priv->wol_irq != dev->irq) {
2630 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2631 IRQF_SHARED, dev->name, dev);
2632 if (unlikely(ret < 0)) {
2633 netdev_err(priv->dev,
2634 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2635 __func__, priv->wol_irq, ret);
2636 goto wolirq_error;
2637 }
2638 }
2639
2640 /* Request the IRQ lines */
2641 if (priv->lpi_irq > 0) {
2642 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2643 dev->name, dev);
2644 if (unlikely(ret < 0)) {
2645 netdev_err(priv->dev,
2646 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2647 __func__, priv->lpi_irq, ret);
2648 goto lpiirq_error;
2649 }
2650 }
2651
2652 stmmac_enable_all_queues(priv);
2653 stmmac_start_all_queues(priv);
2654
2655 return 0;
2656
2657 lpiirq_error:
2658 if (priv->wol_irq != dev->irq)
2659 free_irq(priv->wol_irq, dev);
2660 wolirq_error:
2661 free_irq(dev->irq, dev);
2662 irq_error:
2663 if (dev->phydev)
2664 phy_stop(dev->phydev);
2665
2666 del_timer_sync(&priv->txtimer);
2667 stmmac_hw_teardown(dev);
2668 init_error:
2669 free_dma_desc_resources(priv);
2670 dma_desc_error:
2671 if (dev->phydev)
2672 phy_disconnect(dev->phydev);
2673
2674 return ret;
2675 }
2676
2677 /**
2678 * stmmac_release - close entry point of the driver
2679 * @dev : device pointer.
2680 * Description:
2681 * This is the stop entry point of the driver.
2682 */
2683 static int stmmac_release(struct net_device *dev)
2684 {
2685 struct stmmac_priv *priv = netdev_priv(dev);
2686
2687 if (priv->eee_enabled)
2688 del_timer_sync(&priv->eee_ctrl_timer);
2689
2690 /* Stop and disconnect the PHY */
2691 if (dev->phydev) {
2692 phy_stop(dev->phydev);
2693 phy_disconnect(dev->phydev);
2694 }
2695
2696 stmmac_stop_all_queues(priv);
2697
2698 stmmac_disable_all_queues(priv);
2699
2700 del_timer_sync(&priv->txtimer);
2701
2702 /* Free the IRQ lines */
2703 free_irq(dev->irq, dev);
2704 if (priv->wol_irq != dev->irq)
2705 free_irq(priv->wol_irq, dev);
2706 if (priv->lpi_irq > 0)
2707 free_irq(priv->lpi_irq, dev);
2708
2709 /* Stop TX/RX DMA and clear the descriptors */
2710 stmmac_stop_all_dma(priv);
2711
2712 /* Release and free the Rx/Tx resources */
2713 free_dma_desc_resources(priv);
2714
2715 /* Disable the MAC Rx/Tx */
2716 priv->hw->mac->set_mac(priv->ioaddr, false);
2717
2718 netif_carrier_off(dev);
2719
2720 #ifdef CONFIG_DEBUG_FS
2721 stmmac_exit_fs(dev);
2722 #endif
2723
2724 stmmac_release_ptp(priv);
2725
2726 return 0;
2727 }
2728
2729 /**
2730 * stmmac_tso_allocator - close entry point of the driver
2731 * @priv: driver private structure
2732 * @des: buffer start address
2733 * @total_len: total length to fill in descriptors
2734 * @last_segmant: condition for the last descriptor
2735 * @queue: TX queue index
2736 * Description:
2737 * This function fills descriptor and request new descriptors according to
2738 * buffer length to fill
2739 */
2740 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
2741 int total_len, bool last_segment, u32 queue)
2742 {
2743 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2744 struct dma_desc *desc;
2745 u32 buff_size;
2746 int tmp_len;
2747
2748 tmp_len = total_len;
2749
2750 while (tmp_len > 0) {
2751 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2752 desc = tx_q->dma_tx + tx_q->cur_tx;
2753
2754 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2755 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2756 TSO_MAX_BUFF_SIZE : tmp_len;
2757
2758 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2759 0, 1,
2760 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2761 0, 0);
2762
2763 tmp_len -= TSO_MAX_BUFF_SIZE;
2764 }
2765 }
2766
2767 /**
2768 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2769 * @skb : the socket buffer
2770 * @dev : device pointer
2771 * Description: this is the transmit function that is called on TSO frames
2772 * (support available on GMAC4 and newer chips).
2773 * Diagram below show the ring programming in case of TSO frames:
2774 *
2775 * First Descriptor
2776 * --------
2777 * | DES0 |---> buffer1 = L2/L3/L4 header
2778 * | DES1 |---> TCP Payload (can continue on next descr...)
2779 * | DES2 |---> buffer 1 and 2 len
2780 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2781 * --------
2782 * |
2783 * ...
2784 * |
2785 * --------
2786 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2787 * | DES1 | --|
2788 * | DES2 | --> buffer 1 and 2 len
2789 * | DES3 |
2790 * --------
2791 *
2792 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2793 */
2794 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2795 {
2796 struct dma_desc *desc, *first, *mss_desc = NULL;
2797 struct stmmac_priv *priv = netdev_priv(dev);
2798 int nfrags = skb_shinfo(skb)->nr_frags;
2799 u32 queue = skb_get_queue_mapping(skb);
2800 unsigned int first_entry, des;
2801 struct stmmac_tx_queue *tx_q;
2802 int tmp_pay_len = 0;
2803 u32 pay_len, mss;
2804 u8 proto_hdr_len;
2805 int i;
2806
2807 tx_q = &priv->tx_queue[queue];
2808
2809 /* Compute header lengths */
2810 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2811
2812 /* Desc availability based on threshold should be enough safe */
2813 if (unlikely(stmmac_tx_avail(priv, queue) <
2814 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
2815 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2816 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2817 queue));
2818 /* This is a hard error, log it. */
2819 netdev_err(priv->dev,
2820 "%s: Tx Ring full when queue awake\n",
2821 __func__);
2822 }
2823 return NETDEV_TX_BUSY;
2824 }
2825
2826 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2827
2828 mss = skb_shinfo(skb)->gso_size;
2829
2830 /* set new MSS value if needed */
2831 if (mss != priv->mss) {
2832 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
2833 priv->hw->desc->set_mss(mss_desc, mss);
2834 priv->mss = mss;
2835 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2836 }
2837
2838 if (netif_msg_tx_queued(priv)) {
2839 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2840 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2841 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2842 skb->data_len);
2843 }
2844
2845 first_entry = tx_q->cur_tx;
2846
2847 desc = tx_q->dma_tx + first_entry;
2848 first = desc;
2849
2850 /* first descriptor: fill Headers on Buf1 */
2851 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2852 DMA_TO_DEVICE);
2853 if (dma_mapping_error(priv->device, des))
2854 goto dma_map_err;
2855
2856 tx_q->tx_skbuff_dma[first_entry].buf = des;
2857 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2858
2859 first->des0 = cpu_to_le32(des);
2860
2861 /* Fill start of payload in buff2 of first descriptor */
2862 if (pay_len)
2863 first->des1 = cpu_to_le32(des + proto_hdr_len);
2864
2865 /* If needed take extra descriptors to fill the remaining payload */
2866 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2867
2868 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
2869
2870 /* Prepare fragments */
2871 for (i = 0; i < nfrags; i++) {
2872 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2873
2874 des = skb_frag_dma_map(priv->device, frag, 0,
2875 skb_frag_size(frag),
2876 DMA_TO_DEVICE);
2877 if (dma_mapping_error(priv->device, des))
2878 goto dma_map_err;
2879
2880 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
2881 (i == nfrags - 1), queue);
2882
2883 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2884 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2885 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2886 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
2887 }
2888
2889 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
2890
2891 /* Only the last descriptor gets to point to the skb. */
2892 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2893
2894 /* We've used all descriptors we need for this skb, however,
2895 * advance cur_tx so that it references a fresh descriptor.
2896 * ndo_start_xmit will fill this descriptor the next time it's
2897 * called and stmmac_tx_clean may clean up to this descriptor.
2898 */
2899 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2900
2901 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
2902 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2903 __func__);
2904 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2905 }
2906
2907 dev->stats.tx_bytes += skb->len;
2908 priv->xstats.tx_tso_frames++;
2909 priv->xstats.tx_tso_nfrags += nfrags;
2910
2911 /* Manage tx mitigation */
2912 priv->tx_count_frames += nfrags + 1;
2913 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2914 mod_timer(&priv->txtimer,
2915 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2916 } else {
2917 priv->tx_count_frames = 0;
2918 priv->hw->desc->set_tx_ic(desc);
2919 priv->xstats.tx_set_ic_bit++;
2920 }
2921
2922 skb_tx_timestamp(skb);
2923
2924 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2925 priv->hwts_tx_en)) {
2926 /* declare that device is doing timestamping */
2927 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2928 priv->hw->desc->enable_tx_timestamp(first);
2929 }
2930
2931 /* Complete the first descriptor before granting the DMA */
2932 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2933 proto_hdr_len,
2934 pay_len,
2935 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
2936 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2937
2938 /* If context desc is used to change MSS */
2939 if (mss_desc)
2940 priv->hw->desc->set_tx_owner(mss_desc);
2941
2942 /* The own bit must be the latest setting done when prepare the
2943 * descriptor and then barrier is needed to make sure that
2944 * all is coherent before granting the DMA engine.
2945 */
2946 dma_wmb();
2947
2948 if (netif_msg_pktdata(priv)) {
2949 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2950 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2951 tx_q->cur_tx, first, nfrags);
2952
2953 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
2954 0);
2955
2956 pr_info(">>> frame to be transmitted: ");
2957 print_pkt(skb->data, skb_headlen(skb));
2958 }
2959
2960 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2961
2962 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2963 queue);
2964
2965 return NETDEV_TX_OK;
2966
2967 dma_map_err:
2968 dev_err(priv->device, "Tx dma map failed\n");
2969 dev_kfree_skb(skb);
2970 priv->dev->stats.tx_dropped++;
2971 return NETDEV_TX_OK;
2972 }
2973
2974 /**
2975 * stmmac_xmit - Tx entry point of the driver
2976 * @skb : the socket buffer
2977 * @dev : device pointer
2978 * Description : this is the tx entry point of the driver.
2979 * It programs the chain or the ring and supports oversized frames
2980 * and SG feature.
2981 */
2982 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2983 {
2984 struct stmmac_priv *priv = netdev_priv(dev);
2985 unsigned int nopaged_len = skb_headlen(skb);
2986 int i, csum_insertion = 0, is_jumbo = 0;
2987 u32 queue = skb_get_queue_mapping(skb);
2988 int nfrags = skb_shinfo(skb)->nr_frags;
2989 int entry;
2990 unsigned int first_entry;
2991 struct dma_desc *desc, *first;
2992 struct stmmac_tx_queue *tx_q;
2993 unsigned int enh_desc;
2994 unsigned int des;
2995
2996 tx_q = &priv->tx_queue[queue];
2997
2998 /* Manage oversized TCP frames for GMAC4 device */
2999 if (skb_is_gso(skb) && priv->tso) {
3000 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
3001 return stmmac_tso_xmit(skb, dev);
3002 }
3003
3004 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
3005 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
3006 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
3007 queue));
3008 /* This is a hard error, log it. */
3009 netdev_err(priv->dev,
3010 "%s: Tx Ring full when queue awake\n",
3011 __func__);
3012 }
3013 return NETDEV_TX_BUSY;
3014 }
3015
3016 if (priv->tx_path_in_lpi_mode)
3017 stmmac_disable_eee_mode(priv);
3018
3019 entry = tx_q->cur_tx;
3020 first_entry = entry;
3021
3022 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
3023
3024 if (likely(priv->extend_desc))
3025 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3026 else
3027 desc = tx_q->dma_tx + entry;
3028
3029 first = desc;
3030
3031 enh_desc = priv->plat->enh_desc;
3032 /* To program the descriptors according to the size of the frame */
3033 if (enh_desc)
3034 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3035
3036 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3037 DWMAC_CORE_4_00)) {
3038 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
3039 if (unlikely(entry < 0))
3040 goto dma_map_err;
3041 }
3042
3043 for (i = 0; i < nfrags; i++) {
3044 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3045 int len = skb_frag_size(frag);
3046 bool last_segment = (i == (nfrags - 1));
3047
3048 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3049
3050 if (likely(priv->extend_desc))
3051 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
3052 else
3053 desc = tx_q->dma_tx + entry;
3054
3055 des = skb_frag_dma_map(priv->device, frag, 0, len,
3056 DMA_TO_DEVICE);
3057 if (dma_mapping_error(priv->device, des))
3058 goto dma_map_err; /* should reuse desc w/o issues */
3059
3060 tx_q->tx_skbuff[entry] = NULL;
3061
3062 tx_q->tx_skbuff_dma[entry].buf = des;
3063 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3064 desc->des0 = cpu_to_le32(des);
3065 else
3066 desc->des2 = cpu_to_le32(des);
3067
3068 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3069 tx_q->tx_skbuff_dma[entry].len = len;
3070 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
3071
3072 /* Prepare the descriptor and set the own bit too */
3073 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
3074 priv->mode, 1, last_segment,
3075 skb->len);
3076 }
3077
3078 /* Only the last descriptor gets to point to the skb. */
3079 tx_q->tx_skbuff[entry] = skb;
3080
3081 /* We've used all descriptors we need for this skb, however,
3082 * advance cur_tx so that it references a fresh descriptor.
3083 * ndo_start_xmit will fill this descriptor the next time it's
3084 * called and stmmac_tx_clean may clean up to this descriptor.
3085 */
3086 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3087 tx_q->cur_tx = entry;
3088
3089 if (netif_msg_pktdata(priv)) {
3090 void *tx_head;
3091
3092 netdev_dbg(priv->dev,
3093 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
3094 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
3095 entry, first, nfrags);
3096
3097 if (priv->extend_desc)
3098 tx_head = (void *)tx_q->dma_etx;
3099 else
3100 tx_head = (void *)tx_q->dma_tx;
3101
3102 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
3103
3104 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
3105 print_pkt(skb->data, skb->len);
3106 }
3107
3108 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
3109 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3110 __func__);
3111 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
3112 }
3113
3114 dev->stats.tx_bytes += skb->len;
3115
3116 /* According to the coalesce parameter the IC bit for the latest
3117 * segment is reset and the timer re-started to clean the tx status.
3118 * This approach takes care about the fragments: desc is the first
3119 * element in case of no SG.
3120 */
3121 priv->tx_count_frames += nfrags + 1;
3122 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3123 mod_timer(&priv->txtimer,
3124 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3125 } else {
3126 priv->tx_count_frames = 0;
3127 priv->hw->desc->set_tx_ic(desc);
3128 priv->xstats.tx_set_ic_bit++;
3129 }
3130
3131 skb_tx_timestamp(skb);
3132
3133 /* Ready to fill the first descriptor and set the OWN bit w/o any
3134 * problems because all the descriptors are actually ready to be
3135 * passed to the DMA engine.
3136 */
3137 if (likely(!is_jumbo)) {
3138 bool last_segment = (nfrags == 0);
3139
3140 des = dma_map_single(priv->device, skb->data,
3141 nopaged_len, DMA_TO_DEVICE);
3142 if (dma_mapping_error(priv->device, des))
3143 goto dma_map_err;
3144
3145 tx_q->tx_skbuff_dma[first_entry].buf = des;
3146 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3147 first->des0 = cpu_to_le32(des);
3148 else
3149 first->des2 = cpu_to_le32(des);
3150
3151 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3152 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
3153
3154 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3155 priv->hwts_tx_en)) {
3156 /* declare that device is doing timestamping */
3157 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3158 priv->hw->desc->enable_tx_timestamp(first);
3159 }
3160
3161 /* Prepare the first descriptor setting the OWN bit too */
3162 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3163 csum_insertion, priv->mode, 1,
3164 last_segment, skb->len);
3165
3166 /* The own bit must be the latest setting done when prepare the
3167 * descriptor and then barrier is needed to make sure that
3168 * all is coherent before granting the DMA engine.
3169 */
3170 dma_wmb();
3171 }
3172
3173 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3174
3175 if (priv->synopsys_id < DWMAC_CORE_4_00)
3176 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3177 else
3178 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3179 queue);
3180
3181 return NETDEV_TX_OK;
3182
3183 dma_map_err:
3184 netdev_err(priv->dev, "Tx DMA map failed\n");
3185 dev_kfree_skb(skb);
3186 priv->dev->stats.tx_dropped++;
3187 return NETDEV_TX_OK;
3188 }
3189
3190 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3191 {
3192 struct ethhdr *ehdr;
3193 u16 vlanid;
3194
3195 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3196 NETIF_F_HW_VLAN_CTAG_RX &&
3197 !__vlan_get_tag(skb, &vlanid)) {
3198 /* pop the vlan tag */
3199 ehdr = (struct ethhdr *)skb->data;
3200 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3201 skb_pull(skb, VLAN_HLEN);
3202 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3203 }
3204 }
3205
3206
3207 static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
3208 {
3209 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
3210 return 0;
3211
3212 return 1;
3213 }
3214
3215 /**
3216 * stmmac_rx_refill - refill used skb preallocated buffers
3217 * @priv: driver private structure
3218 * @queue: RX queue index
3219 * Description : this is to reallocate the skb for the reception process
3220 * that is based on zero-copy.
3221 */
3222 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3223 {
3224 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3225 int dirty = stmmac_rx_dirty(priv, queue);
3226 unsigned int entry = rx_q->dirty_rx;
3227
3228 int bfsize = priv->dma_buf_sz;
3229
3230 while (dirty-- > 0) {
3231 struct dma_desc *p;
3232
3233 if (priv->extend_desc)
3234 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3235 else
3236 p = rx_q->dma_rx + entry;
3237
3238 if (likely(!rx_q->rx_skbuff[entry])) {
3239 struct sk_buff *skb;
3240
3241 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
3242 if (unlikely(!skb)) {
3243 /* so for a while no zero-copy! */
3244 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
3245 if (unlikely(net_ratelimit()))
3246 dev_err(priv->device,
3247 "fail to alloc skb entry %d\n",
3248 entry);
3249 break;
3250 }
3251
3252 rx_q->rx_skbuff[entry] = skb;
3253 rx_q->rx_skbuff_dma[entry] =
3254 dma_map_single(priv->device, skb->data, bfsize,
3255 DMA_FROM_DEVICE);
3256 if (dma_mapping_error(priv->device,
3257 rx_q->rx_skbuff_dma[entry])) {
3258 netdev_err(priv->dev, "Rx DMA map failed\n");
3259 dev_kfree_skb(skb);
3260 break;
3261 }
3262
3263 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
3264 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3265 p->des1 = 0;
3266 } else {
3267 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
3268 }
3269 if (priv->hw->mode->refill_desc3)
3270 priv->hw->mode->refill_desc3(rx_q, p);
3271
3272 if (rx_q->rx_zeroc_thresh > 0)
3273 rx_q->rx_zeroc_thresh--;
3274
3275 netif_dbg(priv, rx_status, priv->dev,
3276 "refill entry #%d\n", entry);
3277 }
3278 dma_wmb();
3279
3280 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3281 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3282 else
3283 priv->hw->desc->set_rx_owner(p);
3284
3285 dma_wmb();
3286
3287 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
3288 }
3289 rx_q->dirty_rx = entry;
3290 }
3291
3292 /**
3293 * stmmac_rx - manage the receive process
3294 * @priv: driver private structure
3295 * @limit: napi bugget
3296 * @queue: RX queue index.
3297 * Description : this the function called by the napi poll method.
3298 * It gets all the frames inside the ring.
3299 */
3300 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3301 {
3302 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3303 unsigned int entry = rx_q->cur_rx;
3304 int coe = priv->hw->rx_csum;
3305 unsigned int next_entry;
3306 unsigned int count = 0;
3307
3308 if (netif_msg_rx_status(priv)) {
3309 void *rx_head;
3310
3311 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
3312 if (priv->extend_desc)
3313 rx_head = (void *)rx_q->dma_erx;
3314 else
3315 rx_head = (void *)rx_q->dma_rx;
3316
3317 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
3318 }
3319 while (count < limit) {
3320 int status;
3321 struct dma_desc *p;
3322 struct dma_desc *np;
3323
3324 if (priv->extend_desc)
3325 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3326 else
3327 p = rx_q->dma_rx + entry;
3328
3329 /* read the status of the incoming frame */
3330 status = priv->hw->desc->rx_status(&priv->dev->stats,
3331 &priv->xstats, p);
3332 /* check if managed by the DMA otherwise go ahead */
3333 if (unlikely(status & dma_own))
3334 break;
3335
3336 count++;
3337
3338 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3339 next_entry = rx_q->cur_rx;
3340
3341 if (priv->extend_desc)
3342 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
3343 else
3344 np = rx_q->dma_rx + next_entry;
3345
3346 prefetch(np);
3347
3348 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3349 priv->hw->desc->rx_extended_status(&priv->dev->stats,
3350 &priv->xstats,
3351 rx_q->dma_erx +
3352 entry);
3353 if (unlikely(status == discard_frame)) {
3354 priv->dev->stats.rx_errors++;
3355 if (priv->hwts_rx_en && !priv->extend_desc) {
3356 /* DESC2 & DESC3 will be overwritten by device
3357 * with timestamp value, hence reinitialize
3358 * them in stmmac_rx_refill() function so that
3359 * device can reuse it.
3360 */
3361 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
3362 rx_q->rx_skbuff[entry] = NULL;
3363 dma_unmap_single(priv->device,
3364 rx_q->rx_skbuff_dma[entry],
3365 priv->dma_buf_sz,
3366 DMA_FROM_DEVICE);
3367 }
3368 } else {
3369 struct sk_buff *skb;
3370 int frame_len;
3371 unsigned int des;
3372
3373 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3374 des = le32_to_cpu(p->des0);
3375 else
3376 des = le32_to_cpu(p->des2);
3377
3378 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3379
3380 /* If frame length is greater than skb buffer size
3381 * (preallocated during init) then the packet is
3382 * ignored
3383 */
3384 if (frame_len > priv->dma_buf_sz) {
3385 netdev_err(priv->dev,
3386 "len %d larger than size (%d)\n",
3387 frame_len, priv->dma_buf_sz);
3388 priv->dev->stats.rx_length_errors++;
3389 break;
3390 }
3391
3392 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
3393 * Type frames (LLC/LLC-SNAP)
3394 */
3395 if (unlikely(status != llc_snap))
3396 frame_len -= ETH_FCS_LEN;
3397
3398 if (netif_msg_rx_status(priv)) {
3399 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3400 p, entry, des);
3401 if (frame_len > ETH_FRAME_LEN)
3402 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3403 frame_len, status);
3404 }
3405
3406 /* The zero-copy is always used for all the sizes
3407 * in case of GMAC4 because it needs
3408 * to refill the used descriptors, always.
3409 */
3410 if (unlikely(!priv->plat->has_gmac4 &&
3411 ((frame_len < priv->rx_copybreak) ||
3412 stmmac_rx_threshold_count(rx_q)))) {
3413 skb = netdev_alloc_skb_ip_align(priv->dev,
3414 frame_len);
3415 if (unlikely(!skb)) {
3416 if (net_ratelimit())
3417 dev_warn(priv->device,
3418 "packet dropped\n");
3419 priv->dev->stats.rx_dropped++;
3420 break;
3421 }
3422
3423 dma_sync_single_for_cpu(priv->device,
3424 rx_q->rx_skbuff_dma
3425 [entry], frame_len,
3426 DMA_FROM_DEVICE);
3427 skb_copy_to_linear_data(skb,
3428 rx_q->
3429 rx_skbuff[entry]->data,
3430 frame_len);
3431
3432 skb_put(skb, frame_len);
3433 dma_sync_single_for_device(priv->device,
3434 rx_q->rx_skbuff_dma
3435 [entry], frame_len,
3436 DMA_FROM_DEVICE);
3437 } else {
3438 skb = rx_q->rx_skbuff[entry];
3439 if (unlikely(!skb)) {
3440 netdev_err(priv->dev,
3441 "%s: Inconsistent Rx chain\n",
3442 priv->dev->name);
3443 priv->dev->stats.rx_dropped++;
3444 break;
3445 }
3446 prefetch(skb->data - NET_IP_ALIGN);
3447 rx_q->rx_skbuff[entry] = NULL;
3448 rx_q->rx_zeroc_thresh++;
3449
3450 skb_put(skb, frame_len);
3451 dma_unmap_single(priv->device,
3452 rx_q->rx_skbuff_dma[entry],
3453 priv->dma_buf_sz,
3454 DMA_FROM_DEVICE);
3455 }
3456
3457 if (netif_msg_pktdata(priv)) {
3458 netdev_dbg(priv->dev, "frame received (%dbytes)",
3459 frame_len);
3460 print_pkt(skb->data, frame_len);
3461 }
3462
3463 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3464
3465 stmmac_rx_vlan(priv->dev, skb);
3466
3467 skb->protocol = eth_type_trans(skb, priv->dev);
3468
3469 if (unlikely(!coe))
3470 skb_checksum_none_assert(skb);
3471 else
3472 skb->ip_summed = CHECKSUM_UNNECESSARY;
3473
3474 napi_gro_receive(&rx_q->napi, skb);
3475
3476 priv->dev->stats.rx_packets++;
3477 priv->dev->stats.rx_bytes += frame_len;
3478 }
3479 entry = next_entry;
3480 }
3481
3482 stmmac_rx_refill(priv, queue);
3483
3484 priv->xstats.rx_pkt_n += count;
3485
3486 return count;
3487 }
3488
3489 /**
3490 * stmmac_poll - stmmac poll method (NAPI)
3491 * @napi : pointer to the napi structure.
3492 * @budget : maximum number of packets that the current CPU can receive from
3493 * all interfaces.
3494 * Description :
3495 * To look at the incoming frames and clear the tx resources.
3496 */
3497 static int stmmac_poll(struct napi_struct *napi, int budget)
3498 {
3499 struct stmmac_rx_queue *rx_q =
3500 container_of(napi, struct stmmac_rx_queue, napi);
3501 struct stmmac_priv *priv = rx_q->priv_data;
3502 u32 tx_count = priv->plat->tx_queues_to_use;
3503 u32 chan = rx_q->queue_index;
3504 int work_done = 0;
3505 u32 queue;
3506
3507 priv->xstats.napi_poll++;
3508
3509 /* check all the queues */
3510 for (queue = 0; queue < tx_count; queue++)
3511 stmmac_tx_clean(priv, queue);
3512
3513 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
3514 if (work_done < budget) {
3515 napi_complete_done(napi, work_done);
3516 stmmac_enable_dma_irq(priv, chan);
3517 }
3518 return work_done;
3519 }
3520
3521 /**
3522 * stmmac_tx_timeout
3523 * @dev : Pointer to net device structure
3524 * Description: this function is called when a packet transmission fails to
3525 * complete within a reasonable time. The driver will mark the error in the
3526 * netdev structure and arrange for the device to be reset to a sane state
3527 * in order to transmit a new packet.
3528 */
3529 static void stmmac_tx_timeout(struct net_device *dev)
3530 {
3531 struct stmmac_priv *priv = netdev_priv(dev);
3532 u32 tx_count = priv->plat->tx_queues_to_use;
3533 u32 chan;
3534
3535 /* Clear Tx resources and restart transmitting again */
3536 for (chan = 0; chan < tx_count; chan++)
3537 stmmac_tx_err(priv, chan);
3538 }
3539
3540 /**
3541 * stmmac_set_rx_mode - entry point for multicast addressing
3542 * @dev : pointer to the device structure
3543 * Description:
3544 * This function is a driver entry point which gets called by the kernel
3545 * whenever multicast addresses must be enabled/disabled.
3546 * Return value:
3547 * void.
3548 */
3549 static void stmmac_set_rx_mode(struct net_device *dev)
3550 {
3551 struct stmmac_priv *priv = netdev_priv(dev);
3552
3553 priv->hw->mac->set_filter(priv->hw, dev);
3554 }
3555
3556 /**
3557 * stmmac_change_mtu - entry point to change MTU size for the device.
3558 * @dev : device pointer.
3559 * @new_mtu : the new MTU size for the device.
3560 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3561 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3562 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3563 * Return value:
3564 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3565 * file on failure.
3566 */
3567 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3568 {
3569 struct stmmac_priv *priv = netdev_priv(dev);
3570
3571 if (netif_running(dev)) {
3572 netdev_err(priv->dev, "must be stopped to change its MTU\n");
3573 return -EBUSY;
3574 }
3575
3576 dev->mtu = new_mtu;
3577
3578 netdev_update_features(dev);
3579
3580 return 0;
3581 }
3582
3583 static netdev_features_t stmmac_fix_features(struct net_device *dev,
3584 netdev_features_t features)
3585 {
3586 struct stmmac_priv *priv = netdev_priv(dev);
3587
3588 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
3589 features &= ~NETIF_F_RXCSUM;
3590
3591 if (!priv->plat->tx_coe)
3592 features &= ~NETIF_F_CSUM_MASK;
3593
3594 /* Some GMAC devices have a bugged Jumbo frame support that
3595 * needs to have the Tx COE disabled for oversized frames
3596 * (due to limited buffer sizes). In this case we disable
3597 * the TX csum insertion in the TDES and not use SF.
3598 */
3599 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
3600 features &= ~NETIF_F_CSUM_MASK;
3601
3602 /* Disable tso if asked by ethtool */
3603 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3604 if (features & NETIF_F_TSO)
3605 priv->tso = true;
3606 else
3607 priv->tso = false;
3608 }
3609
3610 return features;
3611 }
3612
3613 static int stmmac_set_features(struct net_device *netdev,
3614 netdev_features_t features)
3615 {
3616 struct stmmac_priv *priv = netdev_priv(netdev);
3617
3618 /* Keep the COE Type in case of csum is supporting */
3619 if (features & NETIF_F_RXCSUM)
3620 priv->hw->rx_csum = priv->plat->rx_coe;
3621 else
3622 priv->hw->rx_csum = 0;
3623 /* No check needed because rx_coe has been set before and it will be
3624 * fixed in case of issue.
3625 */
3626 priv->hw->mac->rx_ipc(priv->hw);
3627
3628 return 0;
3629 }
3630
3631 /**
3632 * stmmac_interrupt - main ISR
3633 * @irq: interrupt number.
3634 * @dev_id: to pass the net device pointer.
3635 * Description: this is the main driver interrupt service routine.
3636 * It can call:
3637 * o DMA service routine (to manage incoming frame reception and transmission
3638 * status)
3639 * o Core interrupts to manage: remote wake-up, management counter, LPI
3640 * interrupts.
3641 */
3642 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3643 {
3644 struct net_device *dev = (struct net_device *)dev_id;
3645 struct stmmac_priv *priv = netdev_priv(dev);
3646 u32 rx_cnt = priv->plat->rx_queues_to_use;
3647 u32 tx_cnt = priv->plat->tx_queues_to_use;
3648 u32 queues_count;
3649 u32 queue;
3650
3651 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
3652
3653 if (priv->irq_wake)
3654 pm_wakeup_event(priv->device, 0);
3655
3656 if (unlikely(!dev)) {
3657 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
3658 return IRQ_NONE;
3659 }
3660
3661 /* To handle GMAC own interrupts */
3662 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
3663 int status = priv->hw->mac->host_irq_status(priv->hw,
3664 &priv->xstats);
3665
3666 if (unlikely(status)) {
3667 /* For LPI we need to save the tx status */
3668 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
3669 priv->tx_path_in_lpi_mode = true;
3670 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
3671 priv->tx_path_in_lpi_mode = false;
3672 }
3673
3674 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3675 for (queue = 0; queue < queues_count; queue++) {
3676 struct stmmac_rx_queue *rx_q =
3677 &priv->rx_queue[queue];
3678
3679 status |=
3680 priv->hw->mac->host_mtl_irq_status(priv->hw,
3681 queue);
3682
3683 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3684 priv->hw->dma->set_rx_tail_ptr)
3685 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
3686 rx_q->rx_tail_addr,
3687 queue);
3688 }
3689 }
3690
3691 /* PCS link status */
3692 if (priv->hw->pcs) {
3693 if (priv->xstats.pcs_link)
3694 netif_carrier_on(dev);
3695 else
3696 netif_carrier_off(dev);
3697 }
3698 }
3699
3700 /* To handle DMA interrupts */
3701 stmmac_dma_interrupt(priv);
3702
3703 return IRQ_HANDLED;
3704 }
3705
3706 #ifdef CONFIG_NET_POLL_CONTROLLER
3707 /* Polling receive - used by NETCONSOLE and other diagnostic tools
3708 * to allow network I/O with interrupts disabled.
3709 */
3710 static void stmmac_poll_controller(struct net_device *dev)
3711 {
3712 disable_irq(dev->irq);
3713 stmmac_interrupt(dev->irq, dev);
3714 enable_irq(dev->irq);
3715 }
3716 #endif
3717
3718 /**
3719 * stmmac_ioctl - Entry point for the Ioctl
3720 * @dev: Device pointer.
3721 * @rq: An IOCTL specefic structure, that can contain a pointer to
3722 * a proprietary structure used to pass information to the driver.
3723 * @cmd: IOCTL command
3724 * Description:
3725 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
3726 */
3727 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3728 {
3729 int ret = -EOPNOTSUPP;
3730
3731 if (!netif_running(dev))
3732 return -EINVAL;
3733
3734 switch (cmd) {
3735 case SIOCGMIIPHY:
3736 case SIOCGMIIREG:
3737 case SIOCSMIIREG:
3738 if (!dev->phydev)
3739 return -EINVAL;
3740 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
3741 break;
3742 case SIOCSHWTSTAMP:
3743 ret = stmmac_hwtstamp_ioctl(dev, rq);
3744 break;
3745 default:
3746 break;
3747 }
3748
3749 return ret;
3750 }
3751
3752 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
3753 {
3754 struct stmmac_priv *priv = netdev_priv(ndev);
3755 int ret = 0;
3756
3757 ret = eth_mac_addr(ndev, addr);
3758 if (ret)
3759 return ret;
3760
3761 priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
3762
3763 return ret;
3764 }
3765
3766 #ifdef CONFIG_DEBUG_FS
3767 static struct dentry *stmmac_fs_dir;
3768
3769 static void sysfs_display_ring(void *head, int size, int extend_desc,
3770 struct seq_file *seq)
3771 {
3772 int i;
3773 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3774 struct dma_desc *p = (struct dma_desc *)head;
3775
3776 for (i = 0; i < size; i++) {
3777 if (extend_desc) {
3778 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3779 i, (unsigned int)virt_to_phys(ep),
3780 le32_to_cpu(ep->basic.des0),
3781 le32_to_cpu(ep->basic.des1),
3782 le32_to_cpu(ep->basic.des2),
3783 le32_to_cpu(ep->basic.des3));
3784 ep++;
3785 } else {
3786 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
3787 i, (unsigned int)virt_to_phys(p),
3788 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3789 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
3790 p++;
3791 }
3792 seq_printf(seq, "\n");
3793 }
3794 }
3795
3796 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3797 {
3798 struct net_device *dev = seq->private;
3799 struct stmmac_priv *priv = netdev_priv(dev);
3800 u32 rx_count = priv->plat->rx_queues_to_use;
3801 u32 tx_count = priv->plat->tx_queues_to_use;
3802 u32 queue;
3803
3804 for (queue = 0; queue < rx_count; queue++) {
3805 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3806
3807 seq_printf(seq, "RX Queue %d:\n", queue);
3808
3809 if (priv->extend_desc) {
3810 seq_printf(seq, "Extended descriptor ring:\n");
3811 sysfs_display_ring((void *)rx_q->dma_erx,
3812 DMA_RX_SIZE, 1, seq);
3813 } else {
3814 seq_printf(seq, "Descriptor ring:\n");
3815 sysfs_display_ring((void *)rx_q->dma_rx,
3816 DMA_RX_SIZE, 0, seq);
3817 }
3818 }
3819
3820 for (queue = 0; queue < tx_count; queue++) {
3821 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3822
3823 seq_printf(seq, "TX Queue %d:\n", queue);
3824
3825 if (priv->extend_desc) {
3826 seq_printf(seq, "Extended descriptor ring:\n");
3827 sysfs_display_ring((void *)tx_q->dma_etx,
3828 DMA_TX_SIZE, 1, seq);
3829 } else {
3830 seq_printf(seq, "Descriptor ring:\n");
3831 sysfs_display_ring((void *)tx_q->dma_tx,
3832 DMA_TX_SIZE, 0, seq);
3833 }
3834 }
3835
3836 return 0;
3837 }
3838
3839 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3840 {
3841 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3842 }
3843
3844 /* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3845
3846 static const struct file_operations stmmac_rings_status_fops = {
3847 .owner = THIS_MODULE,
3848 .open = stmmac_sysfs_ring_open,
3849 .read = seq_read,
3850 .llseek = seq_lseek,
3851 .release = single_release,
3852 };
3853
3854 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3855 {
3856 struct net_device *dev = seq->private;
3857 struct stmmac_priv *priv = netdev_priv(dev);
3858
3859 if (!priv->hw_cap_support) {
3860 seq_printf(seq, "DMA HW features not supported\n");
3861 return 0;
3862 }
3863
3864 seq_printf(seq, "==============================\n");
3865 seq_printf(seq, "\tDMA HW features\n");
3866 seq_printf(seq, "==============================\n");
3867
3868 seq_printf(seq, "\t10/100 Mbps: %s\n",
3869 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
3870 seq_printf(seq, "\t1000 Mbps: %s\n",
3871 (priv->dma_cap.mbps_1000) ? "Y" : "N");
3872 seq_printf(seq, "\tHalf duplex: %s\n",
3873 (priv->dma_cap.half_duplex) ? "Y" : "N");
3874 seq_printf(seq, "\tHash Filter: %s\n",
3875 (priv->dma_cap.hash_filter) ? "Y" : "N");
3876 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3877 (priv->dma_cap.multi_addr) ? "Y" : "N");
3878 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
3879 (priv->dma_cap.pcs) ? "Y" : "N");
3880 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3881 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3882 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3883 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3884 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3885 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3886 seq_printf(seq, "\tRMON module: %s\n",
3887 (priv->dma_cap.rmon) ? "Y" : "N");
3888 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3889 (priv->dma_cap.time_stamp) ? "Y" : "N");
3890 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
3891 (priv->dma_cap.atime_stamp) ? "Y" : "N");
3892 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
3893 (priv->dma_cap.eee) ? "Y" : "N");
3894 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3895 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3896 (priv->dma_cap.tx_coe) ? "Y" : "N");
3897 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3898 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3899 (priv->dma_cap.rx_coe) ? "Y" : "N");
3900 } else {
3901 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3902 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3903 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3904 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3905 }
3906 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3907 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3908 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3909 priv->dma_cap.number_rx_channel);
3910 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3911 priv->dma_cap.number_tx_channel);
3912 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3913 (priv->dma_cap.enh_desc) ? "Y" : "N");
3914
3915 return 0;
3916 }
3917
3918 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3919 {
3920 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3921 }
3922
3923 static const struct file_operations stmmac_dma_cap_fops = {
3924 .owner = THIS_MODULE,
3925 .open = stmmac_sysfs_dma_cap_open,
3926 .read = seq_read,
3927 .llseek = seq_lseek,
3928 .release = single_release,
3929 };
3930
3931 static int stmmac_init_fs(struct net_device *dev)
3932 {
3933 struct stmmac_priv *priv = netdev_priv(dev);
3934
3935 /* Create per netdev entries */
3936 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
3937
3938 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
3939 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
3940
3941 return -ENOMEM;
3942 }
3943
3944 /* Entry to report DMA RX/TX rings */
3945 priv->dbgfs_rings_status =
3946 debugfs_create_file("descriptors_status", S_IRUGO,
3947 priv->dbgfs_dir, dev,
3948 &stmmac_rings_status_fops);
3949
3950 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
3951 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
3952 debugfs_remove_recursive(priv->dbgfs_dir);
3953
3954 return -ENOMEM;
3955 }
3956
3957 /* Entry to report the DMA HW features */
3958 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3959 priv->dbgfs_dir,
3960 dev, &stmmac_dma_cap_fops);
3961
3962 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
3963 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
3964 debugfs_remove_recursive(priv->dbgfs_dir);
3965
3966 return -ENOMEM;
3967 }
3968
3969 return 0;
3970 }
3971
3972 static void stmmac_exit_fs(struct net_device *dev)
3973 {
3974 struct stmmac_priv *priv = netdev_priv(dev);
3975
3976 debugfs_remove_recursive(priv->dbgfs_dir);
3977 }
3978 #endif /* CONFIG_DEBUG_FS */
3979
3980 static const struct net_device_ops stmmac_netdev_ops = {
3981 .ndo_open = stmmac_open,
3982 .ndo_start_xmit = stmmac_xmit,
3983 .ndo_stop = stmmac_release,
3984 .ndo_change_mtu = stmmac_change_mtu,
3985 .ndo_fix_features = stmmac_fix_features,
3986 .ndo_set_features = stmmac_set_features,
3987 .ndo_set_rx_mode = stmmac_set_rx_mode,
3988 .ndo_tx_timeout = stmmac_tx_timeout,
3989 .ndo_do_ioctl = stmmac_ioctl,
3990 #ifdef CONFIG_NET_POLL_CONTROLLER
3991 .ndo_poll_controller = stmmac_poll_controller,
3992 #endif
3993 .ndo_set_mac_address = stmmac_set_mac_address,
3994 };
3995
3996 /**
3997 * stmmac_hw_init - Init the MAC device
3998 * @priv: driver private structure
3999 * Description: this function is to configure the MAC device according to
4000 * some platform parameters or the HW capability register. It prepares the
4001 * driver to use either ring or chain modes and to setup either enhanced or
4002 * normal descriptors.
4003 */
4004 static int stmmac_hw_init(struct stmmac_priv *priv)
4005 {
4006 struct mac_device_info *mac;
4007
4008 /* Identify the MAC HW device */
4009 if (priv->plat->setup) {
4010 mac = priv->plat->setup(priv);
4011 } else if (priv->plat->has_gmac) {
4012 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4013 mac = dwmac1000_setup(priv->ioaddr,
4014 priv->plat->multicast_filter_bins,
4015 priv->plat->unicast_filter_entries,
4016 &priv->synopsys_id);
4017 } else if (priv->plat->has_gmac4) {
4018 priv->dev->priv_flags |= IFF_UNICAST_FLT;
4019 mac = dwmac4_setup(priv->ioaddr,
4020 priv->plat->multicast_filter_bins,
4021 priv->plat->unicast_filter_entries,
4022 &priv->synopsys_id);
4023 } else {
4024 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
4025 }
4026 if (!mac)
4027 return -ENOMEM;
4028
4029 priv->hw = mac;
4030
4031 /* dwmac-sun8i only work in chain mode */
4032 if (priv->plat->has_sun8i)
4033 chain_mode = 1;
4034
4035 /* To use the chained or ring mode */
4036 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4037 priv->hw->mode = &dwmac4_ring_mode_ops;
4038 } else {
4039 if (chain_mode) {
4040 priv->hw->mode = &chain_mode_ops;
4041 dev_info(priv->device, "Chain mode enabled\n");
4042 priv->mode = STMMAC_CHAIN_MODE;
4043 } else {
4044 priv->hw->mode = &ring_mode_ops;
4045 dev_info(priv->device, "Ring mode enabled\n");
4046 priv->mode = STMMAC_RING_MODE;
4047 }
4048 }
4049
4050 /* Get the HW capability (new GMAC newer than 3.50a) */
4051 priv->hw_cap_support = stmmac_get_hw_features(priv);
4052 if (priv->hw_cap_support) {
4053 dev_info(priv->device, "DMA HW capability register supported\n");
4054
4055 /* We can override some gmac/dma configuration fields: e.g.
4056 * enh_desc, tx_coe (e.g. that are passed through the
4057 * platform) with the values from the HW capability
4058 * register (if supported).
4059 */
4060 priv->plat->enh_desc = priv->dma_cap.enh_desc;
4061 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
4062 priv->hw->pmt = priv->plat->pmt;
4063
4064 /* TXCOE doesn't work in thresh DMA mode */
4065 if (priv->plat->force_thresh_dma_mode)
4066 priv->plat->tx_coe = 0;
4067 else
4068 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4069
4070 /* In case of GMAC4 rx_coe is from HW cap register. */
4071 priv->plat->rx_coe = priv->dma_cap.rx_coe;
4072
4073 if (priv->dma_cap.rx_coe_type2)
4074 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4075 else if (priv->dma_cap.rx_coe_type1)
4076 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4077
4078 } else {
4079 dev_info(priv->device, "No HW DMA feature register supported\n");
4080 }
4081
4082 /* To use alternate (extended), normal or GMAC4 descriptor structures */
4083 if (priv->synopsys_id >= DWMAC_CORE_4_00)
4084 priv->hw->desc = &dwmac4_desc_ops;
4085 else
4086 stmmac_selec_desc_mode(priv);
4087
4088 if (priv->plat->rx_coe) {
4089 priv->hw->rx_csum = priv->plat->rx_coe;
4090 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
4091 if (priv->synopsys_id < DWMAC_CORE_4_00)
4092 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
4093 }
4094 if (priv->plat->tx_coe)
4095 dev_info(priv->device, "TX Checksum insertion supported\n");
4096
4097 if (priv->plat->pmt) {
4098 dev_info(priv->device, "Wake-Up On Lan supported\n");
4099 device_set_wakeup_capable(priv->device, 1);
4100 }
4101
4102 if (priv->dma_cap.tsoen)
4103 dev_info(priv->device, "TSO supported\n");
4104
4105 return 0;
4106 }
4107
4108 /**
4109 * stmmac_dvr_probe
4110 * @device: device pointer
4111 * @plat_dat: platform data pointer
4112 * @res: stmmac resource pointer
4113 * Description: this is the main probe function used to
4114 * call the alloc_etherdev, allocate the priv structure.
4115 * Return:
4116 * returns 0 on success, otherwise errno.
4117 */
4118 int stmmac_dvr_probe(struct device *device,
4119 struct plat_stmmacenet_data *plat_dat,
4120 struct stmmac_resources *res)
4121 {
4122 struct net_device *ndev = NULL;
4123 struct stmmac_priv *priv;
4124 int ret = 0;
4125 u32 queue;
4126
4127 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4128 MTL_MAX_TX_QUEUES,
4129 MTL_MAX_RX_QUEUES);
4130 if (!ndev)
4131 return -ENOMEM;
4132
4133 SET_NETDEV_DEV(ndev, device);
4134
4135 priv = netdev_priv(ndev);
4136 priv->device = device;
4137 priv->dev = ndev;
4138
4139 stmmac_set_ethtool_ops(ndev);
4140 priv->pause = pause;
4141 priv->plat = plat_dat;
4142 priv->ioaddr = res->addr;
4143 priv->dev->base_addr = (unsigned long)res->addr;
4144
4145 priv->dev->irq = res->irq;
4146 priv->wol_irq = res->wol_irq;
4147 priv->lpi_irq = res->lpi_irq;
4148
4149 if (res->mac)
4150 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
4151
4152 dev_set_drvdata(device, priv->dev);
4153
4154 /* Verify driver arguments */
4155 stmmac_verify_args();
4156
4157 /* Override with kernel parameters if supplied XXX CRS XXX
4158 * this needs to have multiple instances
4159 */
4160 if ((phyaddr >= 0) && (phyaddr <= 31))
4161 priv->plat->phy_addr = phyaddr;
4162
4163 if (priv->plat->stmmac_rst) {
4164 ret = reset_control_assert(priv->plat->stmmac_rst);
4165 reset_control_deassert(priv->plat->stmmac_rst);
4166 /* Some reset controllers have only reset callback instead of
4167 * assert + deassert callbacks pair.
4168 */
4169 if (ret == -ENOTSUPP)
4170 reset_control_reset(priv->plat->stmmac_rst);
4171 }
4172
4173 /* Init MAC and get the capabilities */
4174 ret = stmmac_hw_init(priv);
4175 if (ret)
4176 goto error_hw_init;
4177
4178 /* Configure real RX and TX queues */
4179 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4180 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
4181
4182 ndev->netdev_ops = &stmmac_netdev_ops;
4183
4184 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4185 NETIF_F_RXCSUM;
4186
4187 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
4188 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4189 priv->tso = true;
4190 dev_info(priv->device, "TSO feature enabled\n");
4191 }
4192 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4193 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
4194 #ifdef STMMAC_VLAN_TAG_USED
4195 /* Both mac100 and gmac support receive VLAN tag detection */
4196 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4197 #endif
4198 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4199
4200 /* MTU range: 46 - hw-specific max */
4201 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4202 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4203 ndev->max_mtu = JUMBO_LEN;
4204 else
4205 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
4206 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4207 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4208 */
4209 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4210 (priv->plat->maxmtu >= ndev->min_mtu))
4211 ndev->max_mtu = priv->plat->maxmtu;
4212 else if (priv->plat->maxmtu < ndev->min_mtu)
4213 dev_warn(priv->device,
4214 "%s: warning: maxmtu having invalid value (%d)\n",
4215 __func__, priv->plat->maxmtu);
4216
4217 if (flow_ctrl)
4218 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4219
4220 /* Rx Watchdog is available in the COREs newer than the 3.40.
4221 * In some case, for example on bugged HW this feature
4222 * has to be disable and this can be done by passing the
4223 * riwt_off field from the platform.
4224 */
4225 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4226 priv->use_riwt = 1;
4227 dev_info(priv->device,
4228 "Enable RX Mitigation via HW Watchdog Timer\n");
4229 }
4230
4231 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4232 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4233
4234 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4235 (8 * priv->plat->rx_queues_to_use));
4236 }
4237
4238 spin_lock_init(&priv->lock);
4239
4240 /* If a specific clk_csr value is passed from the platform
4241 * this means that the CSR Clock Range selection cannot be
4242 * changed at run-time and it is fixed. Viceversa the driver'll try to
4243 * set the MDC clock dynamically according to the csr actual
4244 * clock input.
4245 */
4246 if (!priv->plat->clk_csr)
4247 stmmac_clk_csr_set(priv);
4248 else
4249 priv->clk_csr = priv->plat->clk_csr;
4250
4251 stmmac_check_pcs_mode(priv);
4252
4253 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4254 priv->hw->pcs != STMMAC_PCS_TBI &&
4255 priv->hw->pcs != STMMAC_PCS_RTBI) {
4256 /* MDIO bus Registration */
4257 ret = stmmac_mdio_register(ndev);
4258 if (ret < 0) {
4259 dev_err(priv->device,
4260 "%s: MDIO bus (id: %d) registration failed",
4261 __func__, priv->plat->bus_id);
4262 goto error_mdio_register;
4263 }
4264 }
4265
4266 ret = register_netdev(ndev);
4267 if (ret) {
4268 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4269 __func__, ret);
4270 goto error_netdev_register;
4271 }
4272
4273 return ret;
4274
4275 error_netdev_register:
4276 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4277 priv->hw->pcs != STMMAC_PCS_TBI &&
4278 priv->hw->pcs != STMMAC_PCS_RTBI)
4279 stmmac_mdio_unregister(ndev);
4280 error_mdio_register:
4281 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4282 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4283
4284 netif_napi_del(&rx_q->napi);
4285 }
4286 error_hw_init:
4287 free_netdev(ndev);
4288
4289 return ret;
4290 }
4291 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
4292
4293 /**
4294 * stmmac_dvr_remove
4295 * @dev: device pointer
4296 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
4297 * changes the link status, releases the DMA descriptor rings.
4298 */
4299 int stmmac_dvr_remove(struct device *dev)
4300 {
4301 struct net_device *ndev = dev_get_drvdata(dev);
4302 struct stmmac_priv *priv = netdev_priv(ndev);
4303
4304 netdev_info(priv->dev, "%s: removing driver", __func__);
4305
4306 stmmac_stop_all_dma(priv);
4307
4308 priv->hw->mac->set_mac(priv->ioaddr, false);
4309 netif_carrier_off(ndev);
4310 unregister_netdev(ndev);
4311 if (priv->plat->stmmac_rst)
4312 reset_control_assert(priv->plat->stmmac_rst);
4313 clk_disable_unprepare(priv->plat->pclk);
4314 clk_disable_unprepare(priv->plat->stmmac_clk);
4315 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4316 priv->hw->pcs != STMMAC_PCS_TBI &&
4317 priv->hw->pcs != STMMAC_PCS_RTBI)
4318 stmmac_mdio_unregister(ndev);
4319 free_netdev(ndev);
4320
4321 return 0;
4322 }
4323 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
4324
4325 /**
4326 * stmmac_suspend - suspend callback
4327 * @dev: device pointer
4328 * Description: this is the function to suspend the device and it is called
4329 * by the platform driver to stop the network queue, release the resources,
4330 * program the PMT register (for WoL), clean and release driver resources.
4331 */
4332 int stmmac_suspend(struct device *dev)
4333 {
4334 struct net_device *ndev = dev_get_drvdata(dev);
4335 struct stmmac_priv *priv = netdev_priv(ndev);
4336 unsigned long flags;
4337
4338 if (!ndev || !netif_running(ndev))
4339 return 0;
4340
4341 if (ndev->phydev)
4342 phy_stop(ndev->phydev);
4343
4344 spin_lock_irqsave(&priv->lock, flags);
4345
4346 netif_device_detach(ndev);
4347 stmmac_stop_all_queues(priv);
4348
4349 stmmac_disable_all_queues(priv);
4350
4351 /* Stop TX/RX DMA */
4352 stmmac_stop_all_dma(priv);
4353
4354 /* Enable Power down mode by programming the PMT regs */
4355 if (device_may_wakeup(priv->device)) {
4356 priv->hw->mac->pmt(priv->hw, priv->wolopts);
4357 priv->irq_wake = 1;
4358 } else {
4359 priv->hw->mac->set_mac(priv->ioaddr, false);
4360 pinctrl_pm_select_sleep_state(priv->device);
4361 /* Disable clock in case of PWM is off */
4362 clk_disable(priv->plat->pclk);
4363 clk_disable(priv->plat->stmmac_clk);
4364 }
4365 spin_unlock_irqrestore(&priv->lock, flags);
4366
4367 priv->oldlink = false;
4368 priv->speed = SPEED_UNKNOWN;
4369 priv->oldduplex = DUPLEX_UNKNOWN;
4370 return 0;
4371 }
4372 EXPORT_SYMBOL_GPL(stmmac_suspend);
4373
4374 /**
4375 * stmmac_reset_queues_param - reset queue parameters
4376 * @dev: device pointer
4377 */
4378 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4379 {
4380 u32 rx_cnt = priv->plat->rx_queues_to_use;
4381 u32 tx_cnt = priv->plat->tx_queues_to_use;
4382 u32 queue;
4383
4384 for (queue = 0; queue < rx_cnt; queue++) {
4385 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4386
4387 rx_q->cur_rx = 0;
4388 rx_q->dirty_rx = 0;
4389 }
4390
4391 for (queue = 0; queue < tx_cnt; queue++) {
4392 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4393
4394 tx_q->cur_tx = 0;
4395 tx_q->dirty_tx = 0;
4396 }
4397 }
4398
4399 /**
4400 * stmmac_resume - resume callback
4401 * @dev: device pointer
4402 * Description: when resume this function is invoked to setup the DMA and CORE
4403 * in a usable state.
4404 */
4405 int stmmac_resume(struct device *dev)
4406 {
4407 struct net_device *ndev = dev_get_drvdata(dev);
4408 struct stmmac_priv *priv = netdev_priv(ndev);
4409 unsigned long flags;
4410
4411 if (!netif_running(ndev))
4412 return 0;
4413
4414 /* Power Down bit, into the PM register, is cleared
4415 * automatically as soon as a magic packet or a Wake-up frame
4416 * is received. Anyway, it's better to manually clear
4417 * this bit because it can generate problems while resuming
4418 * from another devices (e.g. serial console).
4419 */
4420 if (device_may_wakeup(priv->device)) {
4421 spin_lock_irqsave(&priv->lock, flags);
4422 priv->hw->mac->pmt(priv->hw, 0);
4423 spin_unlock_irqrestore(&priv->lock, flags);
4424 priv->irq_wake = 0;
4425 } else {
4426 pinctrl_pm_select_default_state(priv->device);
4427 /* enable the clk previously disabled */
4428 clk_enable(priv->plat->stmmac_clk);
4429 clk_enable(priv->plat->pclk);
4430 /* reset the phy so that it's ready */
4431 if (priv->mii)
4432 stmmac_mdio_reset(priv->mii);
4433 }
4434
4435 netif_device_attach(ndev);
4436
4437 spin_lock_irqsave(&priv->lock, flags);
4438
4439 stmmac_reset_queues_param(priv);
4440
4441 /* reset private mss value to force mss context settings at
4442 * next tso xmit (only used for gmac4).
4443 */
4444 priv->mss = 0;
4445
4446 stmmac_clear_descriptors(priv);
4447
4448 stmmac_hw_setup(ndev, false);
4449 stmmac_init_tx_coalesce(priv);
4450 stmmac_set_rx_mode(ndev);
4451
4452 stmmac_enable_all_queues(priv);
4453
4454 stmmac_start_all_queues(priv);
4455
4456 spin_unlock_irqrestore(&priv->lock, flags);
4457
4458 if (ndev->phydev)
4459 phy_start(ndev->phydev);
4460
4461 return 0;
4462 }
4463 EXPORT_SYMBOL_GPL(stmmac_resume);
4464
4465 #ifndef MODULE
4466 static int __init stmmac_cmdline_opt(char *str)
4467 {
4468 char *opt;
4469
4470 if (!str || !*str)
4471 return -EINVAL;
4472 while ((opt = strsep(&str, ",")) != NULL) {
4473 if (!strncmp(opt, "debug:", 6)) {
4474 if (kstrtoint(opt + 6, 0, &debug))
4475 goto err;
4476 } else if (!strncmp(opt, "phyaddr:", 8)) {
4477 if (kstrtoint(opt + 8, 0, &phyaddr))
4478 goto err;
4479 } else if (!strncmp(opt, "buf_sz:", 7)) {
4480 if (kstrtoint(opt + 7, 0, &buf_sz))
4481 goto err;
4482 } else if (!strncmp(opt, "tc:", 3)) {
4483 if (kstrtoint(opt + 3, 0, &tc))
4484 goto err;
4485 } else if (!strncmp(opt, "watchdog:", 9)) {
4486 if (kstrtoint(opt + 9, 0, &watchdog))
4487 goto err;
4488 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
4489 if (kstrtoint(opt + 10, 0, &flow_ctrl))
4490 goto err;
4491 } else if (!strncmp(opt, "pause:", 6)) {
4492 if (kstrtoint(opt + 6, 0, &pause))
4493 goto err;
4494 } else if (!strncmp(opt, "eee_timer:", 10)) {
4495 if (kstrtoint(opt + 10, 0, &eee_timer))
4496 goto err;
4497 } else if (!strncmp(opt, "chain_mode:", 11)) {
4498 if (kstrtoint(opt + 11, 0, &chain_mode))
4499 goto err;
4500 }
4501 }
4502 return 0;
4503
4504 err:
4505 pr_err("%s: ERROR broken module parameter conversion", __func__);
4506 return -EINVAL;
4507 }
4508
4509 __setup("stmmaceth=", stmmac_cmdline_opt);
4510 #endif /* MODULE */
4511
4512 static int __init stmmac_init(void)
4513 {
4514 #ifdef CONFIG_DEBUG_FS
4515 /* Create debugfs main directory if it doesn't exist yet */
4516 if (!stmmac_fs_dir) {
4517 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4518
4519 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4520 pr_err("ERROR %s, debugfs create directory failed\n",
4521 STMMAC_RESOURCE_NAME);
4522
4523 return -ENOMEM;
4524 }
4525 }
4526 #endif
4527
4528 return 0;
4529 }
4530
4531 static void __exit stmmac_exit(void)
4532 {
4533 #ifdef CONFIG_DEBUG_FS
4534 debugfs_remove_recursive(stmmac_fs_dir);
4535 #endif
4536 }
4537
4538 module_init(stmmac_init)
4539 module_exit(stmmac_exit)
4540
4541 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4542 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4543 MODULE_LICENSE("GPL");