]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Add missing call to dev_kfree_skb()
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
47dd7a54
GC
1/*******************************************************************************
2 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3 ST Ethernet IPs are built around a Synopsys IP Core.
4
286a8372 5 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54
GC
6
7 This program is free software; you can redistribute it and/or modify it
8 under the terms and conditions of the GNU General Public License,
9 version 2, as published by the Free Software Foundation.
10
11 This program is distributed in the hope it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 more details.
15
47dd7a54
GC
16 The full GNU General Public License is included in this distribution in
17 the file called "COPYING".
18
19 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
20
21 Documentation available at:
22 http://www.stlinux.com
23 Support available at:
24 https://bugzilla.stlinux.com/
25*******************************************************************************/
26
6a81c26f 27#include <linux/clk.h>
47dd7a54
GC
28#include <linux/kernel.h>
29#include <linux/interrupt.h>
47dd7a54
GC
30#include <linux/ip.h>
31#include <linux/tcp.h>
32#include <linux/skbuff.h>
33#include <linux/ethtool.h>
34#include <linux/if_ether.h>
35#include <linux/crc32.h>
36#include <linux/mii.h>
01789349 37#include <linux/if.h>
47dd7a54
GC
38#include <linux/if_vlan.h>
39#include <linux/dma-mapping.h>
5a0e3ad6 40#include <linux/slab.h>
70c71606 41#include <linux/prefetch.h>
db88f10a 42#include <linux/pinctrl/consumer.h>
50fb4f74 43#ifdef CONFIG_DEBUG_FS
7ac29055
GC
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
50fb4f74 46#endif /* CONFIG_DEBUG_FS */
891434b1
RK
47#include <linux/net_tstamp.h>
48#include "stmmac_ptp.h"
286a8372 49#include "stmmac.h"
c5e4ddbd 50#include <linux/reset.h>
5790cf3c 51#include <linux/of_mdio.h>
19d857c9 52#include "dwmac1000.h"
47dd7a54 53
47dd7a54 54#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
f748be53 55#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
47dd7a54
GC
56
57/* Module parameters */
32ceabca 58#define TX_TIMEO 5000
47dd7a54
GC
59static int watchdog = TX_TIMEO;
60module_param(watchdog, int, S_IRUGO | S_IWUSR);
32ceabca 61MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 62
32ceabca 63static int debug = -1;
47dd7a54 64module_param(debug, int, S_IRUGO | S_IWUSR);
32ceabca 65MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 66
47d1f71f 67static int phyaddr = -1;
47dd7a54
GC
68module_param(phyaddr, int, S_IRUGO);
69MODULE_PARM_DESC(phyaddr, "Physical device address");
70
e3ad57c9 71#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
120e87f9 72#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
47dd7a54
GC
73
74static int flow_ctrl = FLOW_OFF;
75module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
76MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
77
78static int pause = PAUSE_TIME;
79module_param(pause, int, S_IRUGO | S_IWUSR);
80MODULE_PARM_DESC(pause, "Flow Control Pause Time");
81
82#define TC_DEFAULT 64
83static int tc = TC_DEFAULT;
84module_param(tc, int, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(tc, "DMA threshold control value");
86
d916701c
GC
87#define DEFAULT_BUFSIZE 1536
88static int buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
89module_param(buf_sz, int, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(buf_sz, "DMA buffer size");
91
22ad3838
GC
92#define STMMAC_RX_COPYBREAK 256
93
47dd7a54
GC
94static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
95 NETIF_MSG_LINK | NETIF_MSG_IFUP |
96 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
97
d765955d
GC
98#define STMMAC_DEFAULT_LPI_TIMER 1000
99static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
100module_param(eee_timer, int, S_IRUGO | S_IWUSR);
101MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
f5351ef7 102#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
d765955d 103
22d3efe5
PM
104/* By default the driver will use the ring mode to manage tx and rx descriptors,
105 * but allow user to force to use the chain instead of the ring
4a7d666a
GC
106 */
107static unsigned int chain_mode;
108module_param(chain_mode, int, S_IRUGO);
109MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
110
47dd7a54 111static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
47dd7a54 112
50fb4f74 113#ifdef CONFIG_DEBUG_FS
bfab27a1 114static int stmmac_init_fs(struct net_device *dev);
466c5ac8 115static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
116#endif
117
9125cdd1
GC
118#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
119
47dd7a54
GC
120/**
121 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
122 * Description: it checks the driver parameters and set a default in case of
123 * errors.
47dd7a54
GC
124 */
125static void stmmac_verify_args(void)
126{
127 if (unlikely(watchdog < 0))
128 watchdog = TX_TIMEO;
d916701c
GC
129 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
130 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
131 if (unlikely(flow_ctrl > 1))
132 flow_ctrl = FLOW_AUTO;
133 else if (likely(flow_ctrl < 0))
134 flow_ctrl = FLOW_OFF;
135 if (unlikely((pause < 0) || (pause > 0xffff)))
136 pause = PAUSE_TIME;
d765955d
GC
137 if (eee_timer < 0)
138 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
139}
140
c22a3f48
JP
141/**
142 * stmmac_disable_all_queues - Disable all queues
143 * @priv: driver private structure
144 */
145static void stmmac_disable_all_queues(struct stmmac_priv *priv)
146{
147 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
148 u32 queue;
149
150 for (queue = 0; queue < rx_queues_cnt; queue++) {
151 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
152
153 napi_disable(&rx_q->napi);
154 }
155}
156
157/**
158 * stmmac_enable_all_queues - Enable all queues
159 * @priv: driver private structure
160 */
161static void stmmac_enable_all_queues(struct stmmac_priv *priv)
162{
163 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
164 u32 queue;
165
166 for (queue = 0; queue < rx_queues_cnt; queue++) {
167 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
168
169 napi_enable(&rx_q->napi);
170 }
171}
172
173/**
174 * stmmac_stop_all_queues - Stop all queues
175 * @priv: driver private structure
176 */
177static void stmmac_stop_all_queues(struct stmmac_priv *priv)
178{
179 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
180 u32 queue;
181
182 for (queue = 0; queue < tx_queues_cnt; queue++)
183 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
184}
185
186/**
187 * stmmac_start_all_queues - Start all queues
188 * @priv: driver private structure
189 */
190static void stmmac_start_all_queues(struct stmmac_priv *priv)
191{
192 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
193 u32 queue;
194
195 for (queue = 0; queue < tx_queues_cnt; queue++)
196 netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
197}
198
32ceabca
GC
199/**
200 * stmmac_clk_csr_set - dynamically set the MDC clock
201 * @priv: driver private structure
202 * Description: this is to dynamically set the MDC clock according to the csr
203 * clock input.
204 * Note:
205 * If a specific clk_csr value is passed from the platform
206 * this means that the CSR Clock Range selection cannot be
207 * changed at run-time and it is fixed (as reported in the driver
208 * documentation). Viceversa the driver will try to set the MDC
209 * clock dynamically according to the actual clock input.
210 */
cd7201f4
GC
211static void stmmac_clk_csr_set(struct stmmac_priv *priv)
212{
cd7201f4
GC
213 u32 clk_rate;
214
f573c0b9 215 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
cd7201f4
GC
216
217 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
218 * for all other cases except for the below mentioned ones.
219 * For values higher than the IEEE 802.3 specified frequency
220 * we can not estimate the proper divider as it is not known
221 * the frequency of clk_csr_i. So we do not change the default
222 * divider.
223 */
cd7201f4
GC
224 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
225 if (clk_rate < CSR_F_35M)
226 priv->clk_csr = STMMAC_CSR_20_35M;
227 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
228 priv->clk_csr = STMMAC_CSR_35_60M;
229 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
230 priv->clk_csr = STMMAC_CSR_60_100M;
231 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
232 priv->clk_csr = STMMAC_CSR_100_150M;
233 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
234 priv->clk_csr = STMMAC_CSR_150_250M;
19d857c9 235 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
cd7201f4 236 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 237 }
9f93ac8d
LC
238
239 if (priv->plat->has_sun8i) {
240 if (clk_rate > 160000000)
241 priv->clk_csr = 0x03;
242 else if (clk_rate > 80000000)
243 priv->clk_csr = 0x02;
244 else if (clk_rate > 40000000)
245 priv->clk_csr = 0x01;
246 else
247 priv->clk_csr = 0;
248 }
cd7201f4
GC
249}
250
47dd7a54
GC
251static void print_pkt(unsigned char *buf, int len)
252{
424c4f78
AS
253 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
254 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 255}
47dd7a54 256
ce736788 257static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
47dd7a54 258{
ce736788 259 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
a6a3e026 260 u32 avail;
e3ad57c9 261
ce736788
JP
262 if (tx_q->dirty_tx > tx_q->cur_tx)
263 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
e3ad57c9 264 else
ce736788 265 avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
e3ad57c9
GC
266
267 return avail;
268}
269
54139cf3
JP
270/**
271 * stmmac_rx_dirty - Get RX queue dirty
272 * @priv: driver private structure
273 * @queue: RX queue index
274 */
275static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
e3ad57c9 276{
54139cf3 277 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
a6a3e026 278 u32 dirty;
e3ad57c9 279
54139cf3
JP
280 if (rx_q->dirty_rx <= rx_q->cur_rx)
281 dirty = rx_q->cur_rx - rx_q->dirty_rx;
e3ad57c9 282 else
54139cf3 283 dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
e3ad57c9
GC
284
285 return dirty;
47dd7a54
GC
286}
287
32ceabca 288/**
732fdf0e 289 * stmmac_hw_fix_mac_speed - callback for speed selection
32ceabca 290 * @priv: driver private structure
8d45e42b 291 * Description: on some platforms (e.g. ST), some HW system configuration
32ceabca 292 * registers have to be set according to the link speed negotiated.
9dfeb4d9
GC
293 */
294static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
295{
d6d50c7e
PR
296 struct net_device *ndev = priv->dev;
297 struct phy_device *phydev = ndev->phydev;
9dfeb4d9
GC
298
299 if (likely(priv->plat->fix_mac_speed))
ceb69499 300 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
9dfeb4d9
GC
301}
302
32ceabca 303/**
732fdf0e 304 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 305 * @priv: driver private structure
732fdf0e
GC
306 * Description: this function is to verify and enter in LPI mode in case of
307 * EEE.
32ceabca 308 */
d765955d
GC
309static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
310{
ce736788
JP
311 u32 tx_cnt = priv->plat->tx_queues_to_use;
312 u32 queue;
313
314 /* check if all TX queues have the work finished */
315 for (queue = 0; queue < tx_cnt; queue++) {
316 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
317
318 if (tx_q->dirty_tx != tx_q->cur_tx)
319 return; /* still unfinished work */
320 }
321
d765955d 322 /* Check and enter in LPI mode */
ce736788 323 if (!priv->tx_path_in_lpi_mode)
b4b7b772 324 priv->hw->mac->set_eee_mode(priv->hw,
325 priv->plat->en_tx_lpi_clockgating);
d765955d
GC
326}
327
32ceabca 328/**
732fdf0e 329 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
330 * @priv: driver private structure
331 * Description: this function is to exit and disable EEE in case of
332 * LPI state is true. This is called by the xmit.
333 */
d765955d
GC
334void stmmac_disable_eee_mode(struct stmmac_priv *priv)
335{
7ed24bbe 336 priv->hw->mac->reset_eee_mode(priv->hw);
d765955d
GC
337 del_timer_sync(&priv->eee_ctrl_timer);
338 priv->tx_path_in_lpi_mode = false;
339}
340
341/**
732fdf0e 342 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d765955d
GC
343 * @arg : data hook
344 * Description:
32ceabca 345 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
346 * then MAC Transmitter can be moved to LPI state.
347 */
348static void stmmac_eee_ctrl_timer(unsigned long arg)
349{
350 struct stmmac_priv *priv = (struct stmmac_priv *)arg;
351
352 stmmac_enable_eee_mode(priv);
f5351ef7 353 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d
GC
354}
355
356/**
732fdf0e 357 * stmmac_eee_init - init EEE
32ceabca 358 * @priv: driver private structure
d765955d 359 * Description:
732fdf0e
GC
360 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
361 * can also manage EEE, this function enable the LPI state and start related
362 * timer.
d765955d
GC
363 */
364bool stmmac_eee_init(struct stmmac_priv *priv)
365{
d6d50c7e 366 struct net_device *ndev = priv->dev;
4741cf9c 367 unsigned long flags;
d765955d
GC
368 bool ret = false;
369
f5351ef7
GC
370 /* Using PCS we cannot dial with the phy registers at this stage
371 * so we do not support extra feature like EEE.
372 */
3fe5cadb
GC
373 if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
374 (priv->hw->pcs == STMMAC_PCS_TBI) ||
375 (priv->hw->pcs == STMMAC_PCS_RTBI))
f5351ef7
GC
376 goto out;
377
d765955d
GC
378 /* MAC core supports the EEE feature. */
379 if (priv->dma_cap.eee) {
83bf79b6
GC
380 int tx_lpi_timer = priv->tx_lpi_timer;
381
d765955d 382 /* Check if the PHY supports EEE */
d6d50c7e 383 if (phy_init_eee(ndev->phydev, 1)) {
83bf79b6
GC
384 /* To manage at run-time if the EEE cannot be supported
385 * anymore (for example because the lp caps have been
386 * changed).
387 * In that case the driver disable own timers.
388 */
4741cf9c 389 spin_lock_irqsave(&priv->lock, flags);
83bf79b6 390 if (priv->eee_active) {
38ddc59d 391 netdev_dbg(priv->dev, "disable EEE\n");
83bf79b6 392 del_timer_sync(&priv->eee_ctrl_timer);
7ed24bbe 393 priv->hw->mac->set_eee_timer(priv->hw, 0,
83bf79b6
GC
394 tx_lpi_timer);
395 }
396 priv->eee_active = 0;
4741cf9c 397 spin_unlock_irqrestore(&priv->lock, flags);
d765955d 398 goto out;
83bf79b6
GC
399 }
400 /* Activate the EEE and start timers */
4741cf9c 401 spin_lock_irqsave(&priv->lock, flags);
f5351ef7
GC
402 if (!priv->eee_active) {
403 priv->eee_active = 1;
ccb36da1
VT
404 setup_timer(&priv->eee_ctrl_timer,
405 stmmac_eee_ctrl_timer,
406 (unsigned long)priv);
407 mod_timer(&priv->eee_ctrl_timer,
408 STMMAC_LPI_T(eee_timer));
f5351ef7 409
7ed24bbe 410 priv->hw->mac->set_eee_timer(priv->hw,
f5351ef7 411 STMMAC_DEFAULT_LIT_LS,
83bf79b6 412 tx_lpi_timer);
71965352
GC
413 }
414 /* Set HW EEE according to the speed */
d6d50c7e 415 priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
d765955d 416
d765955d 417 ret = true;
4741cf9c
GC
418 spin_unlock_irqrestore(&priv->lock, flags);
419
38ddc59d 420 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
d765955d
GC
421 }
422out:
423 return ret;
424}
425
732fdf0e 426/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 427 * @priv: driver private structure
ba1ffd74 428 * @p : descriptor pointer
891434b1
RK
429 * @skb : the socket buffer
430 * Description :
431 * This function will read timestamp from the descriptor & pass it to stack.
432 * and also perform some sanity checks.
433 */
434static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ba1ffd74 435 struct dma_desc *p, struct sk_buff *skb)
891434b1
RK
436{
437 struct skb_shared_hwtstamps shhwtstamp;
438 u64 ns;
891434b1
RK
439
440 if (!priv->hwts_tx_en)
441 return;
442
ceb69499 443 /* exit if skb doesn't support hw tstamp */
75e4364f 444 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
445 return;
446
891434b1 447 /* check tx tstamp status */
33d4c482 448 if (priv->hw->desc->get_tx_timestamp_status(p)) {
ba1ffd74
GC
449 /* get the valid tstamp */
450 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
891434b1 451
ba1ffd74
GC
452 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
453 shhwtstamp.hwtstamp = ns_to_ktime(ns);
891434b1 454
33d4c482 455 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
ba1ffd74
GC
456 /* pass tstamp to stack */
457 skb_tstamp_tx(skb, &shhwtstamp);
458 }
891434b1
RK
459
460 return;
461}
462
732fdf0e 463/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 464 * @priv: driver private structure
ba1ffd74
GC
465 * @p : descriptor pointer
466 * @np : next descriptor pointer
891434b1
RK
467 * @skb : the socket buffer
468 * Description :
469 * This function will read received packet's timestamp from the descriptor
470 * and pass it to stack. It also perform some sanity checks.
471 */
ba1ffd74
GC
472static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
473 struct dma_desc *np, struct sk_buff *skb)
891434b1
RK
474{
475 struct skb_shared_hwtstamps *shhwtstamp = NULL;
476 u64 ns;
891434b1
RK
477
478 if (!priv->hwts_rx_en)
479 return;
480
ba1ffd74 481 /* Check if timestamp is available */
33d4c482 482 if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
ba1ffd74
GC
483 /* For GMAC4, the valid timestamp is from CTX next desc. */
484 if (priv->plat->has_gmac4)
485 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
486 else
487 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
891434b1 488
33d4c482 489 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
ba1ffd74
GC
490 shhwtstamp = skb_hwtstamps(skb);
491 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
492 shhwtstamp->hwtstamp = ns_to_ktime(ns);
493 } else {
33d4c482 494 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
ba1ffd74 495 }
891434b1
RK
496}
497
498/**
499 * stmmac_hwtstamp_ioctl - control hardware timestamping.
500 * @dev: device pointer.
8d45e42b 501 * @ifr: An IOCTL specific structure, that can contain a pointer to
891434b1
RK
502 * a proprietary structure used to pass information to the driver.
503 * Description:
504 * This function configures the MAC to enable/disable both outgoing(TX)
505 * and incoming(RX) packets time stamping based on user input.
506 * Return Value:
507 * 0 on success and an appropriate -ve integer on failure.
508 */
509static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
510{
511 struct stmmac_priv *priv = netdev_priv(dev);
512 struct hwtstamp_config config;
0a624155 513 struct timespec64 now;
891434b1
RK
514 u64 temp = 0;
515 u32 ptp_v2 = 0;
516 u32 tstamp_all = 0;
517 u32 ptp_over_ipv4_udp = 0;
518 u32 ptp_over_ipv6_udp = 0;
519 u32 ptp_over_ethernet = 0;
520 u32 snap_type_sel = 0;
521 u32 ts_master_en = 0;
522 u32 ts_event_en = 0;
523 u32 value = 0;
19d857c9 524 u32 sec_inc;
891434b1
RK
525
526 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
527 netdev_alert(priv->dev, "No support for HW time stamping\n");
528 priv->hwts_tx_en = 0;
529 priv->hwts_rx_en = 0;
530
531 return -EOPNOTSUPP;
532 }
533
534 if (copy_from_user(&config, ifr->ifr_data,
ceb69499 535 sizeof(struct hwtstamp_config)))
891434b1
RK
536 return -EFAULT;
537
38ddc59d
LC
538 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
539 __func__, config.flags, config.tx_type, config.rx_filter);
891434b1
RK
540
541 /* reserved for future extensions */
542 if (config.flags)
543 return -EINVAL;
544
5f3da328
BH
545 if (config.tx_type != HWTSTAMP_TX_OFF &&
546 config.tx_type != HWTSTAMP_TX_ON)
891434b1 547 return -ERANGE;
891434b1
RK
548
549 if (priv->adv_ts) {
550 switch (config.rx_filter) {
891434b1 551 case HWTSTAMP_FILTER_NONE:
ceb69499 552 /* time stamp no incoming packet at all */
891434b1
RK
553 config.rx_filter = HWTSTAMP_FILTER_NONE;
554 break;
555
891434b1 556 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 557 /* PTP v1, UDP, any kind of event packet */
891434b1
RK
558 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
559 /* take time stamp for all event messages */
fd6720ae
MM
560 if (priv->plat->has_gmac4)
561 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
562 else
563 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
564
565 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
566 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
567 break;
568
891434b1 569 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 570 /* PTP v1, UDP, Sync packet */
891434b1
RK
571 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
572 /* take time stamp for SYNC messages only */
573 ts_event_en = PTP_TCR_TSEVNTENA;
574
575 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
576 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
577 break;
578
891434b1 579 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 580 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
581 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
582 /* take time stamp for Delay_Req messages only */
583 ts_master_en = PTP_TCR_TSMSTRENA;
584 ts_event_en = PTP_TCR_TSEVNTENA;
585
586 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
587 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
588 break;
589
891434b1 590 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 591 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
592 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
593 ptp_v2 = PTP_TCR_TSVER2ENA;
594 /* take time stamp for all event messages */
fd6720ae
MM
595 if (priv->plat->has_gmac4)
596 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
597 else
598 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
599
600 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
601 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
602 break;
603
891434b1 604 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 605 /* PTP v2, UDP, Sync packet */
891434b1
RK
606 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
607 ptp_v2 = PTP_TCR_TSVER2ENA;
608 /* take time stamp for SYNC messages only */
609 ts_event_en = PTP_TCR_TSEVNTENA;
610
611 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
612 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
613 break;
614
891434b1 615 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 616 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
617 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
618 ptp_v2 = PTP_TCR_TSVER2ENA;
619 /* take time stamp for Delay_Req messages only */
620 ts_master_en = PTP_TCR_TSMSTRENA;
621 ts_event_en = PTP_TCR_TSEVNTENA;
622
623 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
624 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
625 break;
626
891434b1 627 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 628 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
629 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
630 ptp_v2 = PTP_TCR_TSVER2ENA;
631 /* take time stamp for all event messages */
fd6720ae
MM
632 if (priv->plat->has_gmac4)
633 snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
634 else
635 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
636
637 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
638 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
639 ptp_over_ethernet = PTP_TCR_TSIPENA;
640 break;
641
891434b1 642 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 643 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
644 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
645 ptp_v2 = PTP_TCR_TSVER2ENA;
646 /* take time stamp for SYNC messages only */
647 ts_event_en = PTP_TCR_TSEVNTENA;
648
649 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
650 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
651 ptp_over_ethernet = PTP_TCR_TSIPENA;
652 break;
653
891434b1 654 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 655 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
656 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
657 ptp_v2 = PTP_TCR_TSVER2ENA;
658 /* take time stamp for Delay_Req messages only */
659 ts_master_en = PTP_TCR_TSMSTRENA;
660 ts_event_en = PTP_TCR_TSEVNTENA;
661
662 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
663 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
664 ptp_over_ethernet = PTP_TCR_TSIPENA;
665 break;
666
e3412575 667 case HWTSTAMP_FILTER_NTP_ALL:
891434b1 668 case HWTSTAMP_FILTER_ALL:
ceb69499 669 /* time stamp any incoming packet */
891434b1
RK
670 config.rx_filter = HWTSTAMP_FILTER_ALL;
671 tstamp_all = PTP_TCR_TSENALL;
672 break;
673
674 default:
675 return -ERANGE;
676 }
677 } else {
678 switch (config.rx_filter) {
679 case HWTSTAMP_FILTER_NONE:
680 config.rx_filter = HWTSTAMP_FILTER_NONE;
681 break;
682 default:
683 /* PTP v1, UDP, any kind of event packet */
684 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
685 break;
686 }
687 }
688 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 689 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1
RK
690
691 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
ba1ffd74 692 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
891434b1
RK
693 else {
694 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
ceb69499
GC
695 tstamp_all | ptp_v2 | ptp_over_ethernet |
696 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
697 ts_master_en | snap_type_sel);
ba1ffd74 698 priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
891434b1
RK
699
700 /* program Sub Second Increment reg */
19d857c9 701 sec_inc = priv->hw->ptp->config_sub_second_increment(
f573c0b9 702 priv->ptpaddr, priv->plat->clk_ptp_rate,
ba1ffd74 703 priv->plat->has_gmac4);
19d857c9 704 temp = div_u64(1000000000ULL, sec_inc);
891434b1
RK
705
706 /* calculate default added value:
707 * formula is :
708 * addend = (2^32)/freq_div_ratio;
19d857c9 709 * where, freq_div_ratio = 1e9ns/sec_inc
891434b1 710 */
19d857c9 711 temp = (u64)(temp << 32);
f573c0b9 712 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
ba1ffd74 713 priv->hw->ptp->config_addend(priv->ptpaddr,
891434b1
RK
714 priv->default_addend);
715
716 /* initialize system time */
0a624155
AB
717 ktime_get_real_ts64(&now);
718
719 /* lower 32 bits of tv_sec are safe until y2106 */
ba1ffd74 720 priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
891434b1
RK
721 now.tv_nsec);
722 }
723
724 return copy_to_user(ifr->ifr_data, &config,
725 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
726}
727
32ceabca 728/**
732fdf0e 729 * stmmac_init_ptp - init PTP
32ceabca 730 * @priv: driver private structure
732fdf0e 731 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 732 * This is done by looking at the HW cap. register.
732fdf0e 733 * This function also registers the ptp driver.
32ceabca 734 */
92ba6888 735static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 736{
92ba6888
RK
737 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
738 return -EOPNOTSUPP;
739
7cd01399 740 priv->adv_ts = 0;
be9b3174
GC
741 /* Check if adv_ts can be enabled for dwmac 4.x core */
742 if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
743 priv->adv_ts = 1;
744 /* Dwmac 3.x core with extend_desc can support adv_ts */
745 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
7cd01399
VB
746 priv->adv_ts = 1;
747
be9b3174
GC
748 if (priv->dma_cap.time_stamp)
749 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7cd01399 750
be9b3174
GC
751 if (priv->adv_ts)
752 netdev_info(priv->dev,
753 "IEEE 1588-2008 Advanced Timestamp supported\n");
891434b1
RK
754
755 priv->hw->ptp = &stmmac_ptp;
756 priv->hwts_tx_en = 0;
757 priv->hwts_rx_en = 0;
92ba6888 758
c30a70d3
GC
759 stmmac_ptp_register(priv);
760
761 return 0;
92ba6888
RK
762}
763
764static void stmmac_release_ptp(struct stmmac_priv *priv)
765{
f573c0b9 766 if (priv->plat->clk_ptp_ref)
767 clk_disable_unprepare(priv->plat->clk_ptp_ref);
92ba6888 768 stmmac_ptp_unregister(priv);
891434b1
RK
769}
770
29feff39
JP
771/**
772 * stmmac_mac_flow_ctrl - Configure flow control in all queues
773 * @priv: driver private structure
774 * Description: It is used for configuring the flow control in all queues
775 */
776static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
777{
778 u32 tx_cnt = priv->plat->tx_queues_to_use;
779
780 priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
781 priv->pause, tx_cnt);
782}
783
47dd7a54 784/**
732fdf0e 785 * stmmac_adjust_link - adjusts the link parameters
47dd7a54 786 * @dev: net device structure
732fdf0e
GC
787 * Description: this is the helper called by the physical abstraction layer
788 * drivers to communicate the phy link status. According the speed and duplex
789 * this driver can invoke registered glue-logic as well.
790 * It also invoke the eee initialization because it could happen when switch
791 * on different networks (that are eee capable).
47dd7a54
GC
792 */
793static void stmmac_adjust_link(struct net_device *dev)
794{
795 struct stmmac_priv *priv = netdev_priv(dev);
d6d50c7e 796 struct phy_device *phydev = dev->phydev;
47dd7a54 797 unsigned long flags;
99a4cca2 798 bool new_state = false;
47dd7a54 799
662ec2b7 800 if (!phydev)
47dd7a54
GC
801 return;
802
47dd7a54 803 spin_lock_irqsave(&priv->lock, flags);
d765955d 804
47dd7a54 805 if (phydev->link) {
ad01b7d4 806 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
807
808 /* Now we make sure that we can be in full duplex mode.
809 * If not, we operate in half-duplex mode. */
810 if (phydev->duplex != priv->oldduplex) {
99a4cca2 811 new_state = true;
50cb16d4 812 if (!phydev->duplex)
db98a0b0 813 ctrl &= ~priv->hw->link.duplex;
47dd7a54 814 else
db98a0b0 815 ctrl |= priv->hw->link.duplex;
47dd7a54
GC
816 priv->oldduplex = phydev->duplex;
817 }
818 /* Flow Control operation */
819 if (phydev->pause)
29feff39 820 stmmac_mac_flow_ctrl(priv, phydev->duplex);
47dd7a54
GC
821
822 if (phydev->speed != priv->speed) {
99a4cca2 823 new_state = true;
ca84dfb9 824 ctrl &= ~priv->hw->link.speed_mask;
47dd7a54 825 switch (phydev->speed) {
afbe17a3 826 case SPEED_1000:
ca84dfb9 827 ctrl |= priv->hw->link.speed1000;
47dd7a54 828 break;
afbe17a3 829 case SPEED_100:
ca84dfb9 830 ctrl |= priv->hw->link.speed100;
9beae261 831 break;
afbe17a3 832 case SPEED_10:
ca84dfb9 833 ctrl |= priv->hw->link.speed10;
47dd7a54
GC
834 break;
835 default:
b3e51069 836 netif_warn(priv, link, priv->dev,
cba920af 837 "broken speed: %d\n", phydev->speed);
688495b1 838 phydev->speed = SPEED_UNKNOWN;
47dd7a54
GC
839 break;
840 }
5db13556
LC
841 if (phydev->speed != SPEED_UNKNOWN)
842 stmmac_hw_fix_mac_speed(priv);
47dd7a54
GC
843 priv->speed = phydev->speed;
844 }
845
ad01b7d4 846 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
47dd7a54
GC
847
848 if (!priv->oldlink) {
99a4cca2 849 new_state = true;
4d869b03 850 priv->oldlink = true;
47dd7a54
GC
851 }
852 } else if (priv->oldlink) {
99a4cca2 853 new_state = true;
4d869b03 854 priv->oldlink = false;
bd00632c
LC
855 priv->speed = SPEED_UNKNOWN;
856 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
857 }
858
859 if (new_state && netif_msg_link(priv))
860 phy_print_status(phydev);
861
4741cf9c
GC
862 spin_unlock_irqrestore(&priv->lock, flags);
863
52f95bbf
GC
864 if (phydev->is_pseudo_fixed_link)
865 /* Stop PHY layer to call the hook to adjust the link in case
866 * of a switch is attached to the stmmac driver.
867 */
868 phydev->irq = PHY_IGNORE_INTERRUPT;
869 else
870 /* At this stage, init the EEE if supported.
871 * Never called in case of fixed_link.
872 */
873 priv->eee_enabled = stmmac_eee_init(priv);
47dd7a54
GC
874}
875
32ceabca 876/**
732fdf0e 877 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PCS.
880 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
881 * configured for the TBI, RTBI, or SGMII PHY interface.
882 */
e58bb43f
GC
883static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
884{
885 int interface = priv->plat->interface;
886
887 if (priv->dma_cap.pcs) {
0d909dcd
BA
888 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
889 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
890 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
891 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
38ddc59d 892 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
3fe5cadb 893 priv->hw->pcs = STMMAC_PCS_RGMII;
0d909dcd 894 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
38ddc59d 895 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
3fe5cadb 896 priv->hw->pcs = STMMAC_PCS_SGMII;
e58bb43f
GC
897 }
898 }
899}
900
47dd7a54
GC
901/**
902 * stmmac_init_phy - PHY initialization
903 * @dev: net device structure
904 * Description: it initializes the driver's PHY state, and attaches the PHY
905 * to the mac driver.
906 * Return value:
907 * 0 on success
908 */
909static int stmmac_init_phy(struct net_device *dev)
910{
911 struct stmmac_priv *priv = netdev_priv(dev);
912 struct phy_device *phydev;
d765955d 913 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
109cdd66 914 char bus_id[MII_BUS_ID_SIZE];
79ee1dc3 915 int interface = priv->plat->interface;
9cbadf09 916 int max_speed = priv->plat->max_speed;
4d869b03 917 priv->oldlink = false;
bd00632c
LC
918 priv->speed = SPEED_UNKNOWN;
919 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54 920
5790cf3c
MO
921 if (priv->plat->phy_node) {
922 phydev = of_phy_connect(dev, priv->plat->phy_node,
923 &stmmac_adjust_link, 0, interface);
924 } else {
a7657f12
GC
925 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
926 priv->plat->bus_id);
5790cf3c
MO
927
928 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
929 priv->plat->phy_addr);
de9a2165 930 netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
38ddc59d 931 phy_id_fmt);
5790cf3c
MO
932
933 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
934 interface);
935 }
47dd7a54 936
dfc50fca 937 if (IS_ERR_OR_NULL(phydev)) {
38ddc59d 938 netdev_err(priv->dev, "Could not attach to PHY\n");
dfc50fca
AB
939 if (!phydev)
940 return -ENODEV;
941
47dd7a54
GC
942 return PTR_ERR(phydev);
943 }
944
79ee1dc3 945 /* Stop Advertising 1000BASE Capability if interface is not GMII */
c5b9b4e4 946 if ((interface == PHY_INTERFACE_MODE_MII) ||
9cbadf09 947 (interface == PHY_INTERFACE_MODE_RMII) ||
a77e4acc 948 (max_speed < 1000 && max_speed > 0))
c5b9b4e4
SK
949 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
950 SUPPORTED_1000baseT_Full);
79ee1dc3 951
47dd7a54
GC
952 /*
953 * Broken HW is sometimes missing the pull-up resistor on the
954 * MDIO line, which results in reads to non-existent devices returning
955 * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
956 * device as well.
957 * Note: phydev->phy_id is the result of reading the UID PHY registers.
958 */
27732381 959 if (!priv->plat->phy_node && phydev->phy_id == 0) {
47dd7a54
GC
960 phy_disconnect(phydev);
961 return -ENODEV;
962 }
8e99fc5f 963
c51e424d
FF
964 /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
965 * subsequent PHY polling, make sure we force a link transition if
966 * we have a UP/DOWN/UP transition
967 */
968 if (phydev->is_pseudo_fixed_link)
969 phydev->irq = PHY_POLL;
970
b05c76a1 971 phy_attached_info(phydev);
47dd7a54
GC
972 return 0;
973}
974
71fedb01 975static void stmmac_display_rx_rings(struct stmmac_priv *priv)
c24602ef 976{
54139cf3 977 u32 rx_cnt = priv->plat->rx_queues_to_use;
71fedb01 978 void *head_rx;
54139cf3 979 u32 queue;
aff3d9ef 980
54139cf3
JP
981 /* Display RX rings */
982 for (queue = 0; queue < rx_cnt; queue++) {
983 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
d0225e7d 984
54139cf3
JP
985 pr_info("\tRX Queue %u rings\n", queue);
986
987 if (priv->extend_desc)
988 head_rx = (void *)rx_q->dma_erx;
989 else
990 head_rx = (void *)rx_q->dma_rx;
991
992 /* Display RX ring */
993 priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
994 }
71fedb01
JP
995}
996
997static void stmmac_display_tx_rings(struct stmmac_priv *priv)
998{
ce736788 999 u32 tx_cnt = priv->plat->tx_queues_to_use;
71fedb01 1000 void *head_tx;
ce736788 1001 u32 queue;
71fedb01 1002
ce736788
JP
1003 /* Display TX rings */
1004 for (queue = 0; queue < tx_cnt; queue++) {
1005 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1006
ce736788
JP
1007 pr_info("\tTX Queue %d rings\n", queue);
1008
1009 if (priv->extend_desc)
1010 head_tx = (void *)tx_q->dma_etx;
1011 else
1012 head_tx = (void *)tx_q->dma_tx;
1013
1014 priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
1015 }
c24602ef
GC
1016}
1017
71fedb01
JP
1018static void stmmac_display_rings(struct stmmac_priv *priv)
1019{
1020 /* Display RX ring */
1021 stmmac_display_rx_rings(priv);
1022
1023 /* Display TX ring */
1024 stmmac_display_tx_rings(priv);
1025}
1026
286a8372
GC
1027static int stmmac_set_bfsize(int mtu, int bufsize)
1028{
1029 int ret = bufsize;
1030
1031 if (mtu >= BUF_SIZE_4KiB)
1032 ret = BUF_SIZE_8KiB;
1033 else if (mtu >= BUF_SIZE_2KiB)
1034 ret = BUF_SIZE_4KiB;
d916701c 1035 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
1036 ret = BUF_SIZE_2KiB;
1037 else
d916701c 1038 ret = DEFAULT_BUFSIZE;
286a8372
GC
1039
1040 return ret;
1041}
1042
32ceabca 1043/**
71fedb01 1044 * stmmac_clear_rx_descriptors - clear RX descriptors
32ceabca 1045 * @priv: driver private structure
54139cf3 1046 * @queue: RX queue index
71fedb01 1047 * Description: this function is called to clear the RX descriptors
32ceabca
GC
1048 * in case of both basic and extended descriptors are used.
1049 */
54139cf3 1050static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
c24602ef 1051{
54139cf3 1052 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5bacd778 1053 int i;
c24602ef 1054
71fedb01 1055 /* Clear the RX descriptors */
e3ad57c9 1056 for (i = 0; i < DMA_RX_SIZE; i++)
c24602ef 1057 if (priv->extend_desc)
54139cf3 1058 priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
c24602ef 1059 priv->use_riwt, priv->mode,
e3ad57c9 1060 (i == DMA_RX_SIZE - 1));
c24602ef 1061 else
54139cf3 1062 priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
c24602ef 1063 priv->use_riwt, priv->mode,
e3ad57c9 1064 (i == DMA_RX_SIZE - 1));
71fedb01
JP
1065}
1066
1067/**
1068 * stmmac_clear_tx_descriptors - clear tx descriptors
1069 * @priv: driver private structure
ce736788 1070 * @queue: TX queue index.
71fedb01
JP
1071 * Description: this function is called to clear the TX descriptors
1072 * in case of both basic and extended descriptors are used.
1073 */
ce736788 1074static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
71fedb01 1075{
ce736788 1076 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01
JP
1077 int i;
1078
1079 /* Clear the TX descriptors */
e3ad57c9 1080 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef 1081 if (priv->extend_desc)
ce736788 1082 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
c24602ef 1083 priv->mode,
e3ad57c9 1084 (i == DMA_TX_SIZE - 1));
c24602ef 1085 else
ce736788 1086 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
c24602ef 1087 priv->mode,
e3ad57c9 1088 (i == DMA_TX_SIZE - 1));
c24602ef
GC
1089}
1090
71fedb01
JP
1091/**
1092 * stmmac_clear_descriptors - clear descriptors
1093 * @priv: driver private structure
1094 * Description: this function is called to clear the TX and RX descriptors
1095 * in case of both basic and extended descriptors are used.
1096 */
1097static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1098{
54139cf3 1099 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
ce736788 1100 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
1101 u32 queue;
1102
71fedb01 1103 /* Clear the RX descriptors */
54139cf3
JP
1104 for (queue = 0; queue < rx_queue_cnt; queue++)
1105 stmmac_clear_rx_descriptors(priv, queue);
71fedb01
JP
1106
1107 /* Clear the TX descriptors */
ce736788
JP
1108 for (queue = 0; queue < tx_queue_cnt; queue++)
1109 stmmac_clear_tx_descriptors(priv, queue);
71fedb01
JP
1110}
1111
732fdf0e
GC
1112/**
1113 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1114 * @priv: driver private structure
1115 * @p: descriptor pointer
1116 * @i: descriptor index
54139cf3
JP
1117 * @flags: gfp flag
1118 * @queue: RX queue index
732fdf0e
GC
1119 * Description: this function is called to allocate a receive buffer, perform
1120 * the DMA mapping and init the descriptor.
1121 */
c24602ef 1122static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
54139cf3 1123 int i, gfp_t flags, u32 queue)
c24602ef 1124{
54139cf3 1125 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
c24602ef
GC
1126 struct sk_buff *skb;
1127
4ec49a37 1128 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
56329137 1129 if (!skb) {
38ddc59d
LC
1130 netdev_err(priv->dev,
1131 "%s: Rx init fails; skb is NULL\n", __func__);
56329137 1132 return -ENOMEM;
c24602ef 1133 }
54139cf3
JP
1134 rx_q->rx_skbuff[i] = skb;
1135 rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
c24602ef
GC
1136 priv->dma_buf_sz,
1137 DMA_FROM_DEVICE);
54139cf3 1138 if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
38ddc59d 1139 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
56329137
BZ
1140 dev_kfree_skb_any(skb);
1141 return -EINVAL;
1142 }
c24602ef 1143
f748be53 1144 if (priv->synopsys_id >= DWMAC_CORE_4_00)
54139cf3 1145 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
f748be53 1146 else
54139cf3 1147 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
c24602ef 1148
29896a67 1149 if ((priv->hw->mode->init_desc3) &&
c24602ef 1150 (priv->dma_buf_sz == BUF_SIZE_16KiB))
29896a67 1151 priv->hw->mode->init_desc3(p);
c24602ef
GC
1152
1153 return 0;
1154}
1155
71fedb01
JP
1156/**
1157 * stmmac_free_rx_buffer - free RX dma buffers
1158 * @priv: private structure
54139cf3 1159 * @queue: RX queue index
71fedb01
JP
1160 * @i: buffer index.
1161 */
54139cf3 1162static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
56329137 1163{
54139cf3
JP
1164 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1165
1166 if (rx_q->rx_skbuff[i]) {
1167 dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
56329137 1168 priv->dma_buf_sz, DMA_FROM_DEVICE);
54139cf3 1169 dev_kfree_skb_any(rx_q->rx_skbuff[i]);
aff3d9ef 1170 }
54139cf3 1171 rx_q->rx_skbuff[i] = NULL;
aff3d9ef
JP
1172}
1173
1174/**
71fedb01
JP
1175 * stmmac_free_tx_buffer - free RX dma buffers
1176 * @priv: private structure
ce736788 1177 * @queue: RX queue index
71fedb01
JP
1178 * @i: buffer index.
1179 */
ce736788 1180static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
71fedb01 1181{
ce736788
JP
1182 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1183
1184 if (tx_q->tx_skbuff_dma[i].buf) {
1185 if (tx_q->tx_skbuff_dma[i].map_as_page)
71fedb01 1186 dma_unmap_page(priv->device,
ce736788
JP
1187 tx_q->tx_skbuff_dma[i].buf,
1188 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1189 DMA_TO_DEVICE);
1190 else
1191 dma_unmap_single(priv->device,
ce736788
JP
1192 tx_q->tx_skbuff_dma[i].buf,
1193 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1194 DMA_TO_DEVICE);
1195 }
1196
ce736788
JP
1197 if (tx_q->tx_skbuff[i]) {
1198 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1199 tx_q->tx_skbuff[i] = NULL;
1200 tx_q->tx_skbuff_dma[i].buf = 0;
1201 tx_q->tx_skbuff_dma[i].map_as_page = false;
71fedb01
JP
1202 }
1203}
1204
1205/**
1206 * init_dma_rx_desc_rings - init the RX descriptor rings
47dd7a54 1207 * @dev: net device structure
732fdf0e 1208 * @flags: gfp flag.
71fedb01 1209 * Description: this function initializes the DMA RX descriptors
5bacd778 1210 * and allocates the socket buffers. It supports the chained and ring
286a8372 1211 * modes.
47dd7a54 1212 */
71fedb01 1213static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
47dd7a54 1214{
47dd7a54 1215 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 1216 u32 rx_count = priv->plat->rx_queues_to_use;
4a7d666a 1217 unsigned int bfsize = 0;
56329137 1218 int ret = -ENOMEM;
1d3028f4 1219 int queue;
54139cf3 1220 int i;
47dd7a54 1221
29896a67
GC
1222 if (priv->hw->mode->set_16kib_bfsize)
1223 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
286a8372 1224
4a7d666a 1225 if (bfsize < BUF_SIZE_16KiB)
286a8372 1226 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
47dd7a54 1227
2618abb7
VB
1228 priv->dma_buf_sz = bfsize;
1229
54139cf3 1230 /* RX INITIALIZATION */
b3e51069
LC
1231 netif_dbg(priv, probe, priv->dev,
1232 "SKB addresses:\nskb\t\tskb data\tdma data\n");
47dd7a54 1233
54139cf3
JP
1234 for (queue = 0; queue < rx_count; queue++) {
1235 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
c24602ef 1236
54139cf3
JP
1237 netif_dbg(priv, probe, priv->dev,
1238 "(%s) dma_rx_phy=0x%08x\n", __func__,
1239 (u32)rx_q->dma_rx_phy);
f748be53 1240
54139cf3
JP
1241 for (i = 0; i < DMA_RX_SIZE; i++) {
1242 struct dma_desc *p;
aff3d9ef 1243
54139cf3
JP
1244 if (priv->extend_desc)
1245 p = &((rx_q->dma_erx + i)->basic);
1246 else
1247 p = rx_q->dma_rx + i;
1248
1249 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1250 queue);
1251 if (ret)
1252 goto err_init_rx_buffers;
1253
1254 netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
1255 rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
1256 (unsigned int)rx_q->rx_skbuff_dma[i]);
1257 }
1258
1259 rx_q->cur_rx = 0;
1260 rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
1261
1262 stmmac_clear_rx_descriptors(priv, queue);
1263
1264 /* Setup the chained descriptor addresses */
1265 if (priv->mode == STMMAC_CHAIN_MODE) {
1266 if (priv->extend_desc)
1267 priv->hw->mode->init(rx_q->dma_erx,
1268 rx_q->dma_rx_phy,
1269 DMA_RX_SIZE, 1);
1270 else
1271 priv->hw->mode->init(rx_q->dma_rx,
1272 rx_q->dma_rx_phy,
1273 DMA_RX_SIZE, 0);
1274 }
71fedb01
JP
1275 }
1276
54139cf3
JP
1277 buf_sz = bfsize;
1278
71fedb01 1279 return 0;
54139cf3 1280
71fedb01 1281err_init_rx_buffers:
54139cf3
JP
1282 while (queue >= 0) {
1283 while (--i >= 0)
1284 stmmac_free_rx_buffer(priv, queue, i);
1285
1286 if (queue == 0)
1287 break;
1288
1289 i = DMA_RX_SIZE;
1290 queue--;
1291 }
1292
71fedb01
JP
1293 return ret;
1294}
1295
1296/**
1297 * init_dma_tx_desc_rings - init the TX descriptor rings
1298 * @dev: net device structure.
1299 * Description: this function initializes the DMA TX descriptors
1300 * and allocates the socket buffers. It supports the chained and ring
1301 * modes.
1302 */
1303static int init_dma_tx_desc_rings(struct net_device *dev)
1304{
1305 struct stmmac_priv *priv = netdev_priv(dev);
ce736788
JP
1306 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1307 u32 queue;
71fedb01
JP
1308 int i;
1309
ce736788
JP
1310 for (queue = 0; queue < tx_queue_cnt; queue++) {
1311 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1312
ce736788
JP
1313 netif_dbg(priv, probe, priv->dev,
1314 "(%s) dma_tx_phy=0x%08x\n", __func__,
1315 (u32)tx_q->dma_tx_phy);
f748be53 1316
ce736788
JP
1317 /* Setup the chained descriptor addresses */
1318 if (priv->mode == STMMAC_CHAIN_MODE) {
1319 if (priv->extend_desc)
1320 priv->hw->mode->init(tx_q->dma_etx,
1321 tx_q->dma_tx_phy,
1322 DMA_TX_SIZE, 1);
1323 else
1324 priv->hw->mode->init(tx_q->dma_tx,
1325 tx_q->dma_tx_phy,
1326 DMA_TX_SIZE, 0);
1327 }
aff3d9ef 1328
ce736788
JP
1329 for (i = 0; i < DMA_TX_SIZE; i++) {
1330 struct dma_desc *p;
ce736788
JP
1331 if (priv->extend_desc)
1332 p = &((tx_q->dma_etx + i)->basic);
1333 else
1334 p = tx_q->dma_tx + i;
1335
1336 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1337 p->des0 = 0;
1338 p->des1 = 0;
1339 p->des2 = 0;
1340 p->des3 = 0;
1341 } else {
1342 p->des2 = 0;
1343 }
1344
1345 tx_q->tx_skbuff_dma[i].buf = 0;
1346 tx_q->tx_skbuff_dma[i].map_as_page = false;
1347 tx_q->tx_skbuff_dma[i].len = 0;
1348 tx_q->tx_skbuff_dma[i].last_segment = false;
1349 tx_q->tx_skbuff[i] = NULL;
5bacd778 1350 }
aff3d9ef 1351
ce736788
JP
1352 tx_q->dirty_tx = 0;
1353 tx_q->cur_tx = 0;
286a8372 1354
c22a3f48
JP
1355 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1356 }
aff3d9ef 1357
71fedb01
JP
1358 return 0;
1359}
1360
1361/**
1362 * init_dma_desc_rings - init the RX/TX descriptor rings
1363 * @dev: net device structure
1364 * @flags: gfp flag.
1365 * Description: this function initializes the DMA RX/TX descriptors
1366 * and allocates the socket buffers. It supports the chained and ring
1367 * modes.
1368 */
1369static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1370{
1371 struct stmmac_priv *priv = netdev_priv(dev);
1372 int ret;
1373
1374 ret = init_dma_rx_desc_rings(dev, flags);
1375 if (ret)
1376 return ret;
1377
1378 ret = init_dma_tx_desc_rings(dev);
1379
5bacd778 1380 stmmac_clear_descriptors(priv);
47dd7a54 1381
c24602ef
GC
1382 if (netif_msg_hw(priv))
1383 stmmac_display_rings(priv);
56329137 1384
56329137 1385 return ret;
47dd7a54
GC
1386}
1387
71fedb01
JP
1388/**
1389 * dma_free_rx_skbufs - free RX dma buffers
1390 * @priv: private structure
54139cf3 1391 * @queue: RX queue index
71fedb01 1392 */
54139cf3 1393static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54
GC
1394{
1395 int i;
1396
e3ad57c9 1397 for (i = 0; i < DMA_RX_SIZE; i++)
54139cf3 1398 stmmac_free_rx_buffer(priv, queue, i);
47dd7a54
GC
1399}
1400
71fedb01
JP
1401/**
1402 * dma_free_tx_skbufs - free TX dma buffers
1403 * @priv: private structure
ce736788 1404 * @queue: TX queue index
71fedb01 1405 */
ce736788 1406static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54
GC
1407{
1408 int i;
1409
71fedb01 1410 for (i = 0; i < DMA_TX_SIZE; i++)
ce736788 1411 stmmac_free_tx_buffer(priv, queue, i);
47dd7a54
GC
1412}
1413
54139cf3
JP
1414/**
1415 * free_dma_rx_desc_resources - free RX dma desc resources
1416 * @priv: private structure
1417 */
1418static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1419{
1420 u32 rx_count = priv->plat->rx_queues_to_use;
1421 u32 queue;
1422
1423 /* Free RX queue resources */
1424 for (queue = 0; queue < rx_count; queue++) {
1425 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1426
1427 /* Release the DMA RX socket buffers */
1428 dma_free_rx_skbufs(priv, queue);
1429
1430 /* Free DMA regions of consistent memory previously allocated */
1431 if (!priv->extend_desc)
1432 dma_free_coherent(priv->device,
1433 DMA_RX_SIZE * sizeof(struct dma_desc),
1434 rx_q->dma_rx, rx_q->dma_rx_phy);
1435 else
1436 dma_free_coherent(priv->device, DMA_RX_SIZE *
1437 sizeof(struct dma_extended_desc),
1438 rx_q->dma_erx, rx_q->dma_rx_phy);
1439
1440 kfree(rx_q->rx_skbuff_dma);
1441 kfree(rx_q->rx_skbuff);
1442 }
1443}
1444
ce736788
JP
1445/**
1446 * free_dma_tx_desc_resources - free TX dma desc resources
1447 * @priv: private structure
1448 */
1449static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1450{
1451 u32 tx_count = priv->plat->tx_queues_to_use;
62242260 1452 u32 queue;
ce736788
JP
1453
1454 /* Free TX queue resources */
1455 for (queue = 0; queue < tx_count; queue++) {
1456 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1457
1458 /* Release the DMA TX socket buffers */
1459 dma_free_tx_skbufs(priv, queue);
1460
1461 /* Free DMA regions of consistent memory previously allocated */
1462 if (!priv->extend_desc)
1463 dma_free_coherent(priv->device,
1464 DMA_TX_SIZE * sizeof(struct dma_desc),
1465 tx_q->dma_tx, tx_q->dma_tx_phy);
1466 else
1467 dma_free_coherent(priv->device, DMA_TX_SIZE *
1468 sizeof(struct dma_extended_desc),
1469 tx_q->dma_etx, tx_q->dma_tx_phy);
1470
1471 kfree(tx_q->tx_skbuff_dma);
1472 kfree(tx_q->tx_skbuff);
1473 }
1474}
1475
732fdf0e 1476/**
71fedb01 1477 * alloc_dma_rx_desc_resources - alloc RX resources.
732fdf0e
GC
1478 * @priv: private structure
1479 * Description: according to which descriptor can be used (extend or basic)
5bacd778
LC
1480 * this function allocates the resources for TX and RX paths. In case of
1481 * reception, for example, it pre-allocated the RX socket buffer in order to
1482 * allow zero-copy mechanism.
732fdf0e 1483 */
71fedb01 1484static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
09f8d696 1485{
54139cf3 1486 u32 rx_count = priv->plat->rx_queues_to_use;
09f8d696 1487 int ret = -ENOMEM;
54139cf3 1488 u32 queue;
09f8d696 1489
54139cf3
JP
1490 /* RX queues buffers and DMA */
1491 for (queue = 0; queue < rx_count; queue++) {
1492 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
09f8d696 1493
54139cf3
JP
1494 rx_q->queue_index = queue;
1495 rx_q->priv_data = priv;
5bacd778 1496
54139cf3
JP
1497 rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
1498 sizeof(dma_addr_t),
71fedb01 1499 GFP_KERNEL);
54139cf3 1500 if (!rx_q->rx_skbuff_dma)
63c3aa6b 1501 goto err_dma;
71fedb01 1502
54139cf3
JP
1503 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
1504 sizeof(struct sk_buff *),
1505 GFP_KERNEL);
1506 if (!rx_q->rx_skbuff)
71fedb01 1507 goto err_dma;
54139cf3
JP
1508
1509 if (priv->extend_desc) {
1510 rx_q->dma_erx = dma_zalloc_coherent(priv->device,
1511 DMA_RX_SIZE *
1512 sizeof(struct
1513 dma_extended_desc),
1514 &rx_q->dma_rx_phy,
1515 GFP_KERNEL);
1516 if (!rx_q->dma_erx)
1517 goto err_dma;
1518
1519 } else {
1520 rx_q->dma_rx = dma_zalloc_coherent(priv->device,
1521 DMA_RX_SIZE *
1522 sizeof(struct
1523 dma_desc),
1524 &rx_q->dma_rx_phy,
1525 GFP_KERNEL);
1526 if (!rx_q->dma_rx)
1527 goto err_dma;
1528 }
71fedb01
JP
1529 }
1530
1531 return 0;
1532
1533err_dma:
54139cf3
JP
1534 free_dma_rx_desc_resources(priv);
1535
71fedb01
JP
1536 return ret;
1537}
1538
1539/**
1540 * alloc_dma_tx_desc_resources - alloc TX resources.
1541 * @priv: private structure
1542 * Description: according to which descriptor can be used (extend or basic)
1543 * this function allocates the resources for TX and RX paths. In case of
1544 * reception, for example, it pre-allocated the RX socket buffer in order to
1545 * allow zero-copy mechanism.
1546 */
1547static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
1548{
ce736788 1549 u32 tx_count = priv->plat->tx_queues_to_use;
71fedb01 1550 int ret = -ENOMEM;
ce736788 1551 u32 queue;
71fedb01 1552
ce736788
JP
1553 /* TX queues buffers and DMA */
1554 for (queue = 0; queue < tx_count; queue++) {
1555 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5bacd778 1556
ce736788
JP
1557 tx_q->queue_index = queue;
1558 tx_q->priv_data = priv;
5bacd778 1559
ce736788
JP
1560 tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
1561 sizeof(*tx_q->tx_skbuff_dma),
5bacd778 1562 GFP_KERNEL);
ce736788 1563 if (!tx_q->tx_skbuff_dma)
62242260 1564 goto err_dma;
ce736788
JP
1565
1566 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
1567 sizeof(struct sk_buff *),
1568 GFP_KERNEL);
1569 if (!tx_q->tx_skbuff)
62242260 1570 goto err_dma;
ce736788
JP
1571
1572 if (priv->extend_desc) {
1573 tx_q->dma_etx = dma_zalloc_coherent(priv->device,
1574 DMA_TX_SIZE *
1575 sizeof(struct
1576 dma_extended_desc),
1577 &tx_q->dma_tx_phy,
1578 GFP_KERNEL);
1579 if (!tx_q->dma_etx)
62242260 1580 goto err_dma;
ce736788
JP
1581 } else {
1582 tx_q->dma_tx = dma_zalloc_coherent(priv->device,
1583 DMA_TX_SIZE *
1584 sizeof(struct
1585 dma_desc),
1586 &tx_q->dma_tx_phy,
1587 GFP_KERNEL);
1588 if (!tx_q->dma_tx)
62242260 1589 goto err_dma;
ce736788 1590 }
09f8d696
SK
1591 }
1592
1593 return 0;
1594
62242260 1595err_dma:
ce736788
JP
1596 free_dma_tx_desc_resources(priv);
1597
09f8d696
SK
1598 return ret;
1599}
1600
71fedb01
JP
1601/**
1602 * alloc_dma_desc_resources - alloc TX/RX resources.
1603 * @priv: private structure
1604 * Description: according to which descriptor can be used (extend or basic)
1605 * this function allocates the resources for TX and RX paths. In case of
1606 * reception, for example, it pre-allocated the RX socket buffer in order to
1607 * allow zero-copy mechanism.
1608 */
1609static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1610{
54139cf3 1611 /* RX Allocation */
71fedb01
JP
1612 int ret = alloc_dma_rx_desc_resources(priv);
1613
1614 if (ret)
1615 return ret;
1616
1617 ret = alloc_dma_tx_desc_resources(priv);
1618
1619 return ret;
1620}
1621
71fedb01
JP
1622/**
1623 * free_dma_desc_resources - free dma desc resources
1624 * @priv: private structure
1625 */
1626static void free_dma_desc_resources(struct stmmac_priv *priv)
1627{
1628 /* Release the DMA RX socket buffers */
1629 free_dma_rx_desc_resources(priv);
1630
1631 /* Release the DMA TX socket buffers */
1632 free_dma_tx_desc_resources(priv);
1633}
1634
9eb12474 1635/**
1636 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1637 * @priv: driver private structure
1638 * Description: It is used for enabling the rx queues in the MAC
1639 */
1640static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
1641{
4f6046f5
JP
1642 u32 rx_queues_count = priv->plat->rx_queues_to_use;
1643 int queue;
1644 u8 mode;
9eb12474 1645
4f6046f5
JP
1646 for (queue = 0; queue < rx_queues_count; queue++) {
1647 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
1648 priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
1649 }
9eb12474 1650}
1651
ae4f0d46
JP
1652/**
1653 * stmmac_start_rx_dma - start RX DMA channel
1654 * @priv: driver private structure
1655 * @chan: RX channel index
1656 * Description:
1657 * This starts a RX DMA channel
1658 */
1659static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
1660{
1661 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
1662 priv->hw->dma->start_rx(priv->ioaddr, chan);
1663}
1664
1665/**
1666 * stmmac_start_tx_dma - start TX DMA channel
1667 * @priv: driver private structure
1668 * @chan: TX channel index
1669 * Description:
1670 * This starts a TX DMA channel
1671 */
1672static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
1673{
1674 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
1675 priv->hw->dma->start_tx(priv->ioaddr, chan);
1676}
1677
1678/**
1679 * stmmac_stop_rx_dma - stop RX DMA channel
1680 * @priv: driver private structure
1681 * @chan: RX channel index
1682 * Description:
1683 * This stops a RX DMA channel
1684 */
1685static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
1686{
1687 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
1688 priv->hw->dma->stop_rx(priv->ioaddr, chan);
1689}
1690
1691/**
1692 * stmmac_stop_tx_dma - stop TX DMA channel
1693 * @priv: driver private structure
1694 * @chan: TX channel index
1695 * Description:
1696 * This stops a TX DMA channel
1697 */
1698static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
1699{
1700 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
1701 priv->hw->dma->stop_tx(priv->ioaddr, chan);
1702}
1703
1704/**
1705 * stmmac_start_all_dma - start all RX and TX DMA channels
1706 * @priv: driver private structure
1707 * Description:
1708 * This starts all the RX and TX DMA channels
1709 */
1710static void stmmac_start_all_dma(struct stmmac_priv *priv)
1711{
1712 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1713 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1714 u32 chan = 0;
1715
1716 for (chan = 0; chan < rx_channels_count; chan++)
1717 stmmac_start_rx_dma(priv, chan);
1718
1719 for (chan = 0; chan < tx_channels_count; chan++)
1720 stmmac_start_tx_dma(priv, chan);
1721}
1722
1723/**
1724 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1725 * @priv: driver private structure
1726 * Description:
1727 * This stops the RX and TX DMA channels
1728 */
1729static void stmmac_stop_all_dma(struct stmmac_priv *priv)
1730{
1731 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1732 u32 tx_channels_count = priv->plat->tx_queues_to_use;
1733 u32 chan = 0;
1734
1735 for (chan = 0; chan < rx_channels_count; chan++)
1736 stmmac_stop_rx_dma(priv, chan);
1737
1738 for (chan = 0; chan < tx_channels_count; chan++)
1739 stmmac_stop_tx_dma(priv, chan);
1740}
1741
47dd7a54
GC
1742/**
1743 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 1744 * @priv: driver private structure
732fdf0e
GC
1745 * Description: it is used for configuring the DMA operation mode register in
1746 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
1747 */
1748static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1749{
6deee222
JP
1750 u32 rx_channels_count = priv->plat->rx_queues_to_use;
1751 u32 tx_channels_count = priv->plat->tx_queues_to_use;
f88203a2 1752 int rxfifosz = priv->plat->rx_fifo_size;
6deee222
JP
1753 u32 txmode = 0;
1754 u32 rxmode = 0;
1755 u32 chan = 0;
f88203a2 1756
11fbf811
TR
1757 if (rxfifosz == 0)
1758 rxfifosz = priv->dma_cap.rx_fifo_size;
1759
6deee222
JP
1760 if (priv->plat->force_thresh_dma_mode) {
1761 txmode = tc;
1762 rxmode = tc;
1763 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
1764 /*
1765 * In case of GMAC, SF mode can be enabled
1766 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
1767 * 1) TX COE if actually supported
1768 * 2) There is no bugged Jumbo frame support
1769 * that needs to not insert csum in the TDES.
1770 */
6deee222
JP
1771 txmode = SF_DMA_MODE;
1772 rxmode = SF_DMA_MODE;
b2dec116 1773 priv->xstats.threshold = SF_DMA_MODE;
6deee222
JP
1774 } else {
1775 txmode = tc;
1776 rxmode = SF_DMA_MODE;
1777 }
1778
1779 /* configure all channels */
1780 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1781 for (chan = 0; chan < rx_channels_count; chan++)
1782 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1783 rxfifosz);
1784
1785 for (chan = 0; chan < tx_channels_count; chan++)
1786 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1787 } else {
1788 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
f88203a2 1789 rxfifosz);
6deee222 1790 }
47dd7a54
GC
1791}
1792
47dd7a54 1793/**
732fdf0e 1794 * stmmac_tx_clean - to manage the transmission completion
32ceabca 1795 * @priv: driver private structure
ce736788 1796 * @queue: TX queue index
732fdf0e 1797 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 1798 */
ce736788 1799static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
47dd7a54 1800{
ce736788 1801 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
38979574 1802 unsigned int bytes_compl = 0, pkts_compl = 0;
ce736788 1803 unsigned int entry = tx_q->dirty_tx;
47dd7a54 1804
739c8e14 1805 netif_tx_lock(priv->dev);
a9097a96 1806
9125cdd1
GC
1807 priv->xstats.tx_clean++;
1808
ce736788
JP
1809 while (entry != tx_q->cur_tx) {
1810 struct sk_buff *skb = tx_q->tx_skbuff[entry];
c24602ef 1811 struct dma_desc *p;
c363b658 1812 int status;
c24602ef
GC
1813
1814 if (priv->extend_desc)
ce736788 1815 p = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 1816 else
ce736788 1817 p = tx_q->dma_tx + entry;
47dd7a54 1818
c363b658 1819 status = priv->hw->desc->tx_status(&priv->dev->stats,
ceb69499
GC
1820 &priv->xstats, p,
1821 priv->ioaddr);
c363b658
FG
1822 /* Check if the descriptor is owned by the DMA */
1823 if (unlikely(status & tx_dma_own))
1824 break;
1825
1826 /* Just consider the last segment and ...*/
1827 if (likely(!(status & tx_not_ls))) {
1828 /* ... verify the status error condition */
1829 if (unlikely(status & tx_err)) {
1830 priv->dev->stats.tx_errors++;
1831 } else {
47dd7a54
GC
1832 priv->dev->stats.tx_packets++;
1833 priv->xstats.tx_pkt_n++;
c363b658 1834 }
ba1ffd74 1835 stmmac_get_tx_hwtstamp(priv, p, skb);
47dd7a54 1836 }
47dd7a54 1837
ce736788
JP
1838 if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
1839 if (tx_q->tx_skbuff_dma[entry].map_as_page)
362b37be 1840 dma_unmap_page(priv->device,
ce736788
JP
1841 tx_q->tx_skbuff_dma[entry].buf,
1842 tx_q->tx_skbuff_dma[entry].len,
362b37be
GC
1843 DMA_TO_DEVICE);
1844 else
1845 dma_unmap_single(priv->device,
ce736788
JP
1846 tx_q->tx_skbuff_dma[entry].buf,
1847 tx_q->tx_skbuff_dma[entry].len,
362b37be 1848 DMA_TO_DEVICE);
ce736788
JP
1849 tx_q->tx_skbuff_dma[entry].buf = 0;
1850 tx_q->tx_skbuff_dma[entry].len = 0;
1851 tx_q->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 1852 }
f748be53
AT
1853
1854 if (priv->hw->mode->clean_desc3)
ce736788 1855 priv->hw->mode->clean_desc3(tx_q, p);
f748be53 1856
ce736788
JP
1857 tx_q->tx_skbuff_dma[entry].last_segment = false;
1858 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54
GC
1859
1860 if (likely(skb != NULL)) {
38979574
BG
1861 pkts_compl++;
1862 bytes_compl += skb->len;
7c565c33 1863 dev_consume_skb_any(skb);
ce736788 1864 tx_q->tx_skbuff[entry] = NULL;
47dd7a54
GC
1865 }
1866
4a7d666a 1867 priv->hw->desc->release_tx_desc(p, priv->mode);
47dd7a54 1868
e3ad57c9 1869 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
47dd7a54 1870 }
ce736788 1871 tx_q->dirty_tx = entry;
38979574 1872
c22a3f48
JP
1873 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
1874 pkts_compl, bytes_compl);
1875
1876 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
1877 queue))) &&
1878 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
38979574 1879
739c8e14
LS
1880 netif_dbg(priv, tx_done, priv->dev,
1881 "%s: restart transmit\n", __func__);
c22a3f48 1882 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54 1883 }
d765955d
GC
1884
1885 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1886 stmmac_enable_eee_mode(priv);
f5351ef7 1887 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
d765955d 1888 }
739c8e14 1889 netif_tx_unlock(priv->dev);
47dd7a54
GC
1890}
1891
4f513ecd 1892static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
47dd7a54 1893{
4f513ecd 1894 priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
47dd7a54
GC
1895}
1896
4f513ecd 1897static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
47dd7a54 1898{
4f513ecd 1899 priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
47dd7a54
GC
1900}
1901
47dd7a54 1902/**
732fdf0e 1903 * stmmac_tx_err - to manage the tx error
32ceabca 1904 * @priv: driver private structure
5bacd778 1905 * @chan: channel index
47dd7a54 1906 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 1907 * in case of transmission errors.
47dd7a54 1908 */
5bacd778 1909static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
47dd7a54 1910{
ce736788 1911 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
c24602ef 1912 int i;
ce736788 1913
c22a3f48 1914 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54 1915
ae4f0d46 1916 stmmac_stop_tx_dma(priv, chan);
ce736788 1917 dma_free_tx_skbufs(priv, chan);
e3ad57c9 1918 for (i = 0; i < DMA_TX_SIZE; i++)
c24602ef 1919 if (priv->extend_desc)
ce736788 1920 priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
c24602ef 1921 priv->mode,
e3ad57c9 1922 (i == DMA_TX_SIZE - 1));
c24602ef 1923 else
ce736788 1924 priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
c24602ef 1925 priv->mode,
e3ad57c9 1926 (i == DMA_TX_SIZE - 1));
ce736788
JP
1927 tx_q->dirty_tx = 0;
1928 tx_q->cur_tx = 0;
c22a3f48 1929 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
ae4f0d46 1930 stmmac_start_tx_dma(priv, chan);
47dd7a54
GC
1931
1932 priv->dev->stats.tx_errors++;
c22a3f48 1933 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54
GC
1934}
1935
6deee222
JP
1936/**
1937 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
1938 * @priv: driver private structure
1939 * @txmode: TX operating mode
1940 * @rxmode: RX operating mode
1941 * @chan: channel index
1942 * Description: it is used for configuring of the DMA operation mode in
1943 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
1944 * mode.
1945 */
1946static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
1947 u32 rxmode, u32 chan)
1948{
1949 int rxfifosz = priv->plat->rx_fifo_size;
1950
1951 if (rxfifosz == 0)
1952 rxfifosz = priv->dma_cap.rx_fifo_size;
1953
1954 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
1955 priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
1956 rxfifosz);
1957 priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
1958 } else {
1959 priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
1960 rxfifosz);
1961 }
1962}
1963
32ceabca 1964/**
732fdf0e 1965 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
1966 * @priv: driver private structure
1967 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
1968 * It calls the dwmac dma routine and schedule poll method in case of some
1969 * work can be done.
32ceabca 1970 */
aec7ff27
GC
1971static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1972{
d62a107a 1973 u32 tx_channel_count = priv->plat->tx_queues_to_use;
aec7ff27 1974 int status;
d62a107a
JP
1975 u32 chan;
1976
1977 for (chan = 0; chan < tx_channel_count; chan++) {
c22a3f48
JP
1978 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
1979
d62a107a
JP
1980 status = priv->hw->dma->dma_interrupt(priv->ioaddr,
1981 &priv->xstats, chan);
1982 if (likely((status & handle_rx)) || (status & handle_tx)) {
c22a3f48 1983 if (likely(napi_schedule_prep(&rx_q->napi))) {
d62a107a 1984 stmmac_disable_dma_irq(priv, chan);
c22a3f48 1985 __napi_schedule(&rx_q->napi);
d62a107a 1986 }
9125cdd1 1987 }
6deee222 1988
d62a107a
JP
1989 if (unlikely(status & tx_hard_error_bump_tc)) {
1990 /* Try to bump up the dma threshold on this failure */
1991 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
1992 (tc <= 256)) {
1993 tc += 64;
1994 if (priv->plat->force_thresh_dma_mode)
1995 stmmac_set_dma_operation_mode(priv,
1996 tc,
1997 tc,
1998 chan);
1999 else
2000 stmmac_set_dma_operation_mode(priv,
2001 tc,
2002 SF_DMA_MODE,
2003 chan);
2004 priv->xstats.threshold = tc;
2005 }
2006 } else if (unlikely(status == tx_hard_error)) {
2007 stmmac_tx_err(priv, chan);
47dd7a54 2008 }
d62a107a 2009 }
47dd7a54
GC
2010}
2011
32ceabca
GC
2012/**
2013 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2014 * @priv: driver private structure
2015 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2016 */
1c901a46
GC
2017static void stmmac_mmc_setup(struct stmmac_priv *priv)
2018{
2019 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
36ff7c1e 2020 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 2021
ba1ffd74
GC
2022 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2023 priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
f748be53 2024 priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
ba1ffd74
GC
2025 } else {
2026 priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
f748be53 2027 priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
ba1ffd74 2028 }
36ff7c1e
AT
2029
2030 dwmac_mmc_intr_all_mask(priv->mmcaddr);
4f795b25
GC
2031
2032 if (priv->dma_cap.rmon) {
36ff7c1e 2033 dwmac_mmc_ctrl(priv->mmcaddr, mode);
4f795b25
GC
2034 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2035 } else
38ddc59d 2036 netdev_info(priv->dev, "No MAC Management Counters available\n");
1c901a46
GC
2037}
2038
19e30c14 2039/**
732fdf0e 2040 * stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
32ceabca
GC
2041 * @priv: driver private structure
2042 * Description: select the Enhanced/Alternate or Normal descriptors.
732fdf0e
GC
2043 * In case of Enhanced/Alternate, it checks if the extended descriptors are
2044 * supported by the HW capability register.
ff3dd78c 2045 */
19e30c14
GC
2046static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2047{
2048 if (priv->plat->enh_desc) {
38ddc59d 2049 dev_info(priv->device, "Enhanced/Alternate descriptors\n");
c24602ef
GC
2050
2051 /* GMAC older than 3.50 has no extended descriptors */
2052 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
38ddc59d 2053 dev_info(priv->device, "Enabled extended descriptors\n");
c24602ef
GC
2054 priv->extend_desc = 1;
2055 } else
38ddc59d 2056 dev_warn(priv->device, "Extended descriptors not supported\n");
c24602ef 2057
19e30c14
GC
2058 priv->hw->desc = &enh_desc_ops;
2059 } else {
38ddc59d 2060 dev_info(priv->device, "Normal descriptors\n");
19e30c14
GC
2061 priv->hw->desc = &ndesc_ops;
2062 }
2063}
2064
2065/**
732fdf0e 2066 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 2067 * @priv: driver private structure
19e30c14
GC
2068 * Description:
2069 * new GMAC chip generations have a new register to indicate the
2070 * presence of the optional feature/functions.
2071 * This can be also used to override the value passed through the
2072 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
2073 */
2074static int stmmac_get_hw_features(struct stmmac_priv *priv)
2075{
f10a6a35 2076 u32 ret = 0;
3c20f72f 2077
5e6efe88 2078 if (priv->hw->dma->get_hw_feature) {
f10a6a35
AT
2079 priv->hw->dma->get_hw_feature(priv->ioaddr,
2080 &priv->dma_cap);
2081 ret = 1;
19e30c14 2082 }
e7434821 2083
f10a6a35 2084 return ret;
e7434821
GC
2085}
2086
32ceabca 2087/**
732fdf0e 2088 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
2089 * @priv: driver private structure
2090 * Description:
2091 * it is to verify if the MAC address is valid, in case of failures it
2092 * generates a random MAC address
2093 */
bfab27a1
GC
2094static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2095{
bfab27a1 2096 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
7ed24bbe 2097 priv->hw->mac->get_umac_addr(priv->hw,
bfab27a1 2098 priv->dev->dev_addr, 0);
ceb69499 2099 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 2100 eth_hw_addr_random(priv->dev);
38ddc59d
LC
2101 netdev_info(priv->dev, "device MAC address %pM\n",
2102 priv->dev->dev_addr);
bfab27a1 2103 }
bfab27a1
GC
2104}
2105
32ceabca 2106/**
732fdf0e 2107 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
2108 * @priv: driver private structure
2109 * Description:
2110 * It inits the DMA invoking the specific MAC/GMAC callback.
2111 * Some DMA parameters can be passed from the platform;
2112 * in case of these are not passed a default is kept for the MAC or GMAC.
2113 */
0f1f88a8
GC
2114static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2115{
47f2a9ce
JP
2116 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2117 u32 tx_channels_count = priv->plat->tx_queues_to_use;
54139cf3 2118 struct stmmac_rx_queue *rx_q;
ce736788 2119 struct stmmac_tx_queue *tx_q;
47f2a9ce
JP
2120 u32 dummy_dma_rx_phy = 0;
2121 u32 dummy_dma_tx_phy = 0;
2122 u32 chan = 0;
c24602ef 2123 int atds = 0;
495db273 2124 int ret = 0;
0f1f88a8 2125
a332e2fa
NC
2126 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2127 dev_err(priv->device, "Invalid DMA configuration\n");
89ab75bf 2128 return -EINVAL;
0f1f88a8
GC
2129 }
2130
c24602ef
GC
2131 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2132 atds = 1;
2133
495db273
GC
2134 ret = priv->hw->dma->reset(priv->ioaddr);
2135 if (ret) {
2136 dev_err(priv->device, "Failed to reset the dma\n");
2137 return ret;
2138 }
2139
f748be53 2140 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
47f2a9ce
JP
2141 /* DMA Configuration */
2142 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2143 dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
2144
2145 /* DMA RX Channel Configuration */
2146 for (chan = 0; chan < rx_channels_count; chan++) {
54139cf3
JP
2147 rx_q = &priv->rx_queue[chan];
2148
47f2a9ce
JP
2149 priv->hw->dma->init_rx_chan(priv->ioaddr,
2150 priv->plat->dma_cfg,
54139cf3 2151 rx_q->dma_rx_phy, chan);
47f2a9ce 2152
54139cf3 2153 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
47f2a9ce
JP
2154 (DMA_RX_SIZE * sizeof(struct dma_desc));
2155 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
54139cf3 2156 rx_q->rx_tail_addr,
47f2a9ce
JP
2157 chan);
2158 }
2159
2160 /* DMA TX Channel Configuration */
2161 for (chan = 0; chan < tx_channels_count; chan++) {
ce736788
JP
2162 tx_q = &priv->tx_queue[chan];
2163
47f2a9ce 2164 priv->hw->dma->init_chan(priv->ioaddr,
ce736788
JP
2165 priv->plat->dma_cfg,
2166 chan);
47f2a9ce
JP
2167
2168 priv->hw->dma->init_tx_chan(priv->ioaddr,
2169 priv->plat->dma_cfg,
ce736788 2170 tx_q->dma_tx_phy, chan);
47f2a9ce 2171
ce736788 2172 tx_q->tx_tail_addr = tx_q->dma_tx_phy +
47f2a9ce
JP
2173 (DMA_TX_SIZE * sizeof(struct dma_desc));
2174 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
ce736788 2175 tx_q->tx_tail_addr,
47f2a9ce
JP
2176 chan);
2177 }
2178 } else {
54139cf3 2179 rx_q = &priv->rx_queue[chan];
ce736788 2180 tx_q = &priv->tx_queue[chan];
47f2a9ce 2181 priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
ce736788 2182 tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
f748be53
AT
2183 }
2184
2185 if (priv->plat->axi && priv->hw->dma->axi)
afea0365
GC
2186 priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
2187
495db273 2188 return ret;
0f1f88a8
GC
2189}
2190
9125cdd1 2191/**
732fdf0e 2192 * stmmac_tx_timer - mitigation sw timer for tx.
9125cdd1
GC
2193 * @data: data pointer
2194 * Description:
2195 * This is the timer handler to directly invoke the stmmac_tx_clean.
2196 */
2197static void stmmac_tx_timer(unsigned long data)
2198{
2199 struct stmmac_priv *priv = (struct stmmac_priv *)data;
ce736788
JP
2200 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2201 u32 queue;
9125cdd1 2202
ce736788
JP
2203 /* let's scan all the tx queues */
2204 for (queue = 0; queue < tx_queues_count; queue++)
2205 stmmac_tx_clean(priv, queue);
9125cdd1
GC
2206}
2207
2208/**
732fdf0e 2209 * stmmac_init_tx_coalesce - init tx mitigation options.
32ceabca 2210 * @priv: driver private structure
9125cdd1
GC
2211 * Description:
2212 * This inits the transmit coalesce parameters: i.e. timer rate,
2213 * timer handler and default threshold used for enabling the
2214 * interrupt on completion bit.
2215 */
2216static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2217{
2218 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2219 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2220 init_timer(&priv->txtimer);
2221 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
2222 priv->txtimer.data = (unsigned long)priv;
2223 priv->txtimer.function = stmmac_tx_timer;
2224 add_timer(&priv->txtimer);
2225}
2226
4854ab99
JP
2227static void stmmac_set_rings_length(struct stmmac_priv *priv)
2228{
2229 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2230 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2231 u32 chan;
2232
2233 /* set TX ring length */
2234 if (priv->hw->dma->set_tx_ring_len) {
2235 for (chan = 0; chan < tx_channels_count; chan++)
2236 priv->hw->dma->set_tx_ring_len(priv->ioaddr,
2237 (DMA_TX_SIZE - 1), chan);
2238 }
2239
2240 /* set RX ring length */
2241 if (priv->hw->dma->set_rx_ring_len) {
2242 for (chan = 0; chan < rx_channels_count; chan++)
2243 priv->hw->dma->set_rx_ring_len(priv->ioaddr,
2244 (DMA_RX_SIZE - 1), chan);
2245 }
2246}
2247
6a3a7193
JP
2248/**
2249 * stmmac_set_tx_queue_weight - Set TX queue weight
2250 * @priv: driver private structure
2251 * Description: It is used for setting TX queues weight
2252 */
2253static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
2254{
2255 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2256 u32 weight;
2257 u32 queue;
2258
2259 for (queue = 0; queue < tx_queues_count; queue++) {
2260 weight = priv->plat->tx_queues_cfg[queue].weight;
2261 priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
2262 }
2263}
2264
19d91873
JP
2265/**
2266 * stmmac_configure_cbs - Configure CBS in TX queue
2267 * @priv: driver private structure
2268 * Description: It is used for configuring CBS in AVB TX queues
2269 */
2270static void stmmac_configure_cbs(struct stmmac_priv *priv)
2271{
2272 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2273 u32 mode_to_use;
2274 u32 queue;
2275
44781fef
JP
2276 /* queue 0 is reserved for legacy traffic */
2277 for (queue = 1; queue < tx_queues_count; queue++) {
19d91873
JP
2278 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
2279 if (mode_to_use == MTL_QUEUE_DCB)
2280 continue;
2281
2282 priv->hw->mac->config_cbs(priv->hw,
2283 priv->plat->tx_queues_cfg[queue].send_slope,
2284 priv->plat->tx_queues_cfg[queue].idle_slope,
2285 priv->plat->tx_queues_cfg[queue].high_credit,
2286 priv->plat->tx_queues_cfg[queue].low_credit,
2287 queue);
2288 }
2289}
2290
d43042f4
JP
2291/**
2292 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2293 * @priv: driver private structure
2294 * Description: It is used for mapping RX queues to RX dma channels
2295 */
2296static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
2297{
2298 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2299 u32 queue;
2300 u32 chan;
2301
2302 for (queue = 0; queue < rx_queues_count; queue++) {
2303 chan = priv->plat->rx_queues_cfg[queue].chan;
2304 priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
2305 }
2306}
2307
a8f5102a
JP
2308/**
2309 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2310 * @priv: driver private structure
2311 * Description: It is used for configuring the RX Queue Priority
2312 */
2313static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
2314{
2315 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2316 u32 queue;
2317 u32 prio;
2318
2319 for (queue = 0; queue < rx_queues_count; queue++) {
2320 if (!priv->plat->rx_queues_cfg[queue].use_prio)
2321 continue;
2322
2323 prio = priv->plat->rx_queues_cfg[queue].prio;
2324 priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
2325 }
2326}
2327
2328/**
2329 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2330 * @priv: driver private structure
2331 * Description: It is used for configuring the TX Queue Priority
2332 */
2333static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
2334{
2335 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2336 u32 queue;
2337 u32 prio;
2338
2339 for (queue = 0; queue < tx_queues_count; queue++) {
2340 if (!priv->plat->tx_queues_cfg[queue].use_prio)
2341 continue;
2342
2343 prio = priv->plat->tx_queues_cfg[queue].prio;
2344 priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
2345 }
2346}
2347
abe80fdc
JP
2348/**
2349 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2350 * @priv: driver private structure
2351 * Description: It is used for configuring the RX queue routing
2352 */
2353static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
2354{
2355 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2356 u32 queue;
2357 u8 packet;
2358
2359 for (queue = 0; queue < rx_queues_count; queue++) {
2360 /* no specific packet type routing specified for the queue */
2361 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
2362 continue;
2363
2364 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
2365 priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
2366 }
2367}
2368
d0a9c9f9
JP
2369/**
2370 * stmmac_mtl_configuration - Configure MTL
2371 * @priv: driver private structure
2372 * Description: It is used for configurring MTL
2373 */
2374static void stmmac_mtl_configuration(struct stmmac_priv *priv)
2375{
2376 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2377 u32 tx_queues_count = priv->plat->tx_queues_to_use;
2378
6a3a7193
JP
2379 if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
2380 stmmac_set_tx_queue_weight(priv);
2381
d0a9c9f9
JP
2382 /* Configure MTL RX algorithms */
2383 if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
2384 priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
2385 priv->plat->rx_sched_algorithm);
2386
2387 /* Configure MTL TX algorithms */
2388 if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
2389 priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
2390 priv->plat->tx_sched_algorithm);
2391
19d91873
JP
2392 /* Configure CBS in AVB TX queues */
2393 if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
2394 stmmac_configure_cbs(priv);
2395
d43042f4 2396 /* Map RX MTL to DMA channels */
03cf65a9 2397 if (priv->hw->mac->map_mtl_to_dma)
d43042f4
JP
2398 stmmac_rx_queue_dma_chan_map(priv);
2399
d0a9c9f9 2400 /* Enable MAC RX Queues */
f3976874 2401 if (priv->hw->mac->rx_queue_enable)
d0a9c9f9 2402 stmmac_mac_enable_rx_queues(priv);
6deee222 2403
a8f5102a
JP
2404 /* Set RX priorities */
2405 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
2406 stmmac_mac_config_rx_queues_prio(priv);
2407
2408 /* Set TX priorities */
2409 if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
2410 stmmac_mac_config_tx_queues_prio(priv);
abe80fdc
JP
2411
2412 /* Set RX routing */
2413 if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
2414 stmmac_mac_config_rx_queues_routing(priv);
d0a9c9f9
JP
2415}
2416
523f11b5 2417/**
732fdf0e 2418 * stmmac_hw_setup - setup mac in a usable state.
523f11b5
SK
2419 * @dev : pointer to the device structure.
2420 * Description:
732fdf0e
GC
2421 * this is the main function to setup the HW in a usable state because the
2422 * dma engine is reset, the core registers are configured (e.g. AXI,
2423 * Checksum features, timers). The DMA is ready to start receiving and
2424 * transmitting.
523f11b5
SK
2425 * Return value:
2426 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2427 * file on failure.
2428 */
fe131929 2429static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
523f11b5
SK
2430{
2431 struct stmmac_priv *priv = netdev_priv(dev);
3c55d4d0 2432 u32 rx_cnt = priv->plat->rx_queues_to_use;
146617b8
JP
2433 u32 tx_cnt = priv->plat->tx_queues_to_use;
2434 u32 chan;
523f11b5
SK
2435 int ret;
2436
523f11b5
SK
2437 /* DMA initialization and SW reset */
2438 ret = stmmac_init_dma_engine(priv);
2439 if (ret < 0) {
38ddc59d
LC
2440 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2441 __func__);
523f11b5
SK
2442 return ret;
2443 }
2444
2445 /* Copy the MAC addr into the HW */
7ed24bbe 2446 priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
523f11b5 2447
02e57b9d
GC
2448 /* PS and related bits will be programmed according to the speed */
2449 if (priv->hw->pcs) {
2450 int speed = priv->plat->mac_port_sel_speed;
2451
2452 if ((speed == SPEED_10) || (speed == SPEED_100) ||
2453 (speed == SPEED_1000)) {
2454 priv->hw->ps = speed;
2455 } else {
2456 dev_warn(priv->device, "invalid port speed\n");
2457 priv->hw->ps = 0;
2458 }
2459 }
2460
523f11b5 2461 /* Initialize the MAC Core */
7ed24bbe 2462 priv->hw->mac->core_init(priv->hw, dev->mtu);
523f11b5 2463
d0a9c9f9
JP
2464 /* Initialize MTL*/
2465 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2466 stmmac_mtl_configuration(priv);
9eb12474 2467
978aded4
GC
2468 ret = priv->hw->mac->rx_ipc(priv->hw);
2469 if (!ret) {
38ddc59d 2470 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
978aded4 2471 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 2472 priv->hw->rx_csum = 0;
978aded4
GC
2473 }
2474
523f11b5 2475 /* Enable the MAC Rx/Tx */
270c7759 2476 priv->hw->mac->set_mac(priv->ioaddr, true);
523f11b5 2477
b4f0a661
JP
2478 /* Set the HW DMA mode and the COE */
2479 stmmac_dma_operation_mode(priv);
2480
523f11b5
SK
2481 stmmac_mmc_setup(priv);
2482
fe131929 2483 if (init_ptp) {
0ad2be79
TR
2484 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
2485 if (ret < 0)
2486 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
2487
fe131929 2488 ret = stmmac_init_ptp(priv);
722eef28
HK
2489 if (ret == -EOPNOTSUPP)
2490 netdev_warn(priv->dev, "PTP not supported by HW\n");
2491 else if (ret)
2492 netdev_warn(priv->dev, "PTP init failed\n");
fe131929 2493 }
523f11b5 2494
50fb4f74 2495#ifdef CONFIG_DEBUG_FS
523f11b5
SK
2496 ret = stmmac_init_fs(dev);
2497 if (ret < 0)
38ddc59d
LC
2498 netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2499 __func__);
523f11b5
SK
2500#endif
2501 /* Start the ball rolling... */
ae4f0d46 2502 stmmac_start_all_dma(priv);
523f11b5 2503
523f11b5
SK
2504 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
2505
523f11b5
SK
2506 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
2507 priv->rx_riwt = MAX_DMA_RIWT;
3c55d4d0 2508 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
523f11b5
SK
2509 }
2510
3fe5cadb 2511 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
02e57b9d 2512 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
523f11b5 2513
4854ab99
JP
2514 /* set TX and RX rings length */
2515 stmmac_set_rings_length(priv);
2516
f748be53 2517 /* Enable TSO */
146617b8
JP
2518 if (priv->tso) {
2519 for (chan = 0; chan < tx_cnt; chan++)
2520 priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
2521 }
f748be53 2522
523f11b5
SK
2523 return 0;
2524}
2525
c66f6c37
TR
2526static void stmmac_hw_teardown(struct net_device *dev)
2527{
2528 struct stmmac_priv *priv = netdev_priv(dev);
2529
2530 clk_disable_unprepare(priv->plat->clk_ptp_ref);
2531}
2532
47dd7a54
GC
2533/**
2534 * stmmac_open - open entry point of the driver
2535 * @dev : pointer to the device structure.
2536 * Description:
2537 * This function is the open entry point of the driver.
2538 * Return value:
2539 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2540 * file on failure.
2541 */
2542static int stmmac_open(struct net_device *dev)
2543{
2544 struct stmmac_priv *priv = netdev_priv(dev);
47dd7a54
GC
2545 int ret;
2546
4bfcbd7a
FV
2547 stmmac_check_ether_addr(priv);
2548
3fe5cadb
GC
2549 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
2550 priv->hw->pcs != STMMAC_PCS_TBI &&
2551 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
2552 ret = stmmac_init_phy(dev);
2553 if (ret) {
38ddc59d
LC
2554 netdev_err(priv->dev,
2555 "%s: Cannot attach to PHY (error: %d)\n",
2556 __func__, ret);
89df20d9 2557 return ret;
e58bb43f 2558 }
f66ffe28 2559 }
47dd7a54 2560
523f11b5
SK
2561 /* Extra statistics */
2562 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
2563 priv->xstats.threshold = tc;
2564
5bacd778 2565 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
22ad3838 2566 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 2567
5bacd778
LC
2568 ret = alloc_dma_desc_resources(priv);
2569 if (ret < 0) {
2570 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2571 __func__);
2572 goto dma_desc_error;
2573 }
2574
2575 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2576 if (ret < 0) {
2577 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2578 __func__);
2579 goto init_error;
2580 }
2581
fe131929 2582 ret = stmmac_hw_setup(dev, true);
56329137 2583 if (ret < 0) {
38ddc59d 2584 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
c9324d18 2585 goto init_error;
47dd7a54
GC
2586 }
2587
777da230
GC
2588 stmmac_init_tx_coalesce(priv);
2589
d6d50c7e
PR
2590 if (dev->phydev)
2591 phy_start(dev->phydev);
47dd7a54 2592
f66ffe28
GC
2593 /* Request the IRQ lines */
2594 ret = request_irq(dev->irq, stmmac_interrupt,
ceb69499 2595 IRQF_SHARED, dev->name, dev);
f66ffe28 2596 if (unlikely(ret < 0)) {
38ddc59d
LC
2597 netdev_err(priv->dev,
2598 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2599 __func__, dev->irq, ret);
6c1e5abe 2600 goto irq_error;
f66ffe28
GC
2601 }
2602
7a13f8f5
FV
2603 /* Request the Wake IRQ in case of another line is used for WoL */
2604 if (priv->wol_irq != dev->irq) {
2605 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2606 IRQF_SHARED, dev->name, dev);
2607 if (unlikely(ret < 0)) {
38ddc59d
LC
2608 netdev_err(priv->dev,
2609 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2610 __func__, priv->wol_irq, ret);
c9324d18 2611 goto wolirq_error;
7a13f8f5
FV
2612 }
2613 }
2614
d765955d 2615 /* Request the IRQ lines */
d7ec8584 2616 if (priv->lpi_irq > 0) {
d765955d
GC
2617 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2618 dev->name, dev);
2619 if (unlikely(ret < 0)) {
38ddc59d
LC
2620 netdev_err(priv->dev,
2621 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2622 __func__, priv->lpi_irq, ret);
c9324d18 2623 goto lpiirq_error;
d765955d
GC
2624 }
2625 }
2626
c22a3f48
JP
2627 stmmac_enable_all_queues(priv);
2628 stmmac_start_all_queues(priv);
f66ffe28 2629
47dd7a54 2630 return 0;
f66ffe28 2631
c9324d18 2632lpiirq_error:
d765955d
GC
2633 if (priv->wol_irq != dev->irq)
2634 free_irq(priv->wol_irq, dev);
c9324d18 2635wolirq_error:
7a13f8f5 2636 free_irq(dev->irq, dev);
6c1e5abe
TR
2637irq_error:
2638 if (dev->phydev)
2639 phy_stop(dev->phydev);
7a13f8f5 2640
6c1e5abe 2641 del_timer_sync(&priv->txtimer);
c66f6c37 2642 stmmac_hw_teardown(dev);
c9324d18
GC
2643init_error:
2644 free_dma_desc_resources(priv);
5bacd778 2645dma_desc_error:
d6d50c7e
PR
2646 if (dev->phydev)
2647 phy_disconnect(dev->phydev);
4bfcbd7a 2648
f66ffe28 2649 return ret;
47dd7a54
GC
2650}
2651
2652/**
2653 * stmmac_release - close entry point of the driver
2654 * @dev : device pointer.
2655 * Description:
2656 * This is the stop entry point of the driver.
2657 */
2658static int stmmac_release(struct net_device *dev)
2659{
2660 struct stmmac_priv *priv = netdev_priv(dev);
2661
d765955d
GC
2662 if (priv->eee_enabled)
2663 del_timer_sync(&priv->eee_ctrl_timer);
2664
47dd7a54 2665 /* Stop and disconnect the PHY */
d6d50c7e
PR
2666 if (dev->phydev) {
2667 phy_stop(dev->phydev);
2668 phy_disconnect(dev->phydev);
47dd7a54
GC
2669 }
2670
c22a3f48 2671 stmmac_stop_all_queues(priv);
47dd7a54 2672
c22a3f48 2673 stmmac_disable_all_queues(priv);
47dd7a54 2674
9125cdd1
GC
2675 del_timer_sync(&priv->txtimer);
2676
47dd7a54
GC
2677 /* Free the IRQ lines */
2678 free_irq(dev->irq, dev);
7a13f8f5
FV
2679 if (priv->wol_irq != dev->irq)
2680 free_irq(priv->wol_irq, dev);
d7ec8584 2681 if (priv->lpi_irq > 0)
d765955d 2682 free_irq(priv->lpi_irq, dev);
47dd7a54
GC
2683
2684 /* Stop TX/RX DMA and clear the descriptors */
ae4f0d46 2685 stmmac_stop_all_dma(priv);
47dd7a54
GC
2686
2687 /* Release and free the Rx/Tx resources */
2688 free_dma_desc_resources(priv);
2689
19449bfc 2690 /* Disable the MAC Rx/Tx */
270c7759 2691 priv->hw->mac->set_mac(priv->ioaddr, false);
47dd7a54
GC
2692
2693 netif_carrier_off(dev);
2694
50fb4f74 2695#ifdef CONFIG_DEBUG_FS
466c5ac8 2696 stmmac_exit_fs(dev);
bfab27a1 2697#endif
bfab27a1 2698
92ba6888
RK
2699 stmmac_release_ptp(priv);
2700
47dd7a54
GC
2701 return 0;
2702}
2703
f748be53
AT
2704/**
2705 * stmmac_tso_allocator - close entry point of the driver
2706 * @priv: driver private structure
2707 * @des: buffer start address
2708 * @total_len: total length to fill in descriptors
2709 * @last_segmant: condition for the last descriptor
ce736788 2710 * @queue: TX queue index
f748be53
AT
2711 * Description:
2712 * This function fills descriptor and request new descriptors according to
2713 * buffer length to fill
2714 */
2715static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
ce736788 2716 int total_len, bool last_segment, u32 queue)
f748be53 2717{
ce736788 2718 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
f748be53 2719 struct dma_desc *desc;
5bacd778 2720 u32 buff_size;
ce736788 2721 int tmp_len;
f748be53
AT
2722
2723 tmp_len = total_len;
2724
2725 while (tmp_len > 0) {
ce736788
JP
2726 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
2727 desc = tx_q->dma_tx + tx_q->cur_tx;
f748be53 2728
f8be0d78 2729 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
f748be53
AT
2730 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2731 TSO_MAX_BUFF_SIZE : tmp_len;
2732
2733 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2734 0, 1,
426849e6 2735 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
f748be53
AT
2736 0, 0);
2737
2738 tmp_len -= TSO_MAX_BUFF_SIZE;
2739 }
2740}
2741
2742/**
2743 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
2744 * @skb : the socket buffer
2745 * @dev : device pointer
2746 * Description: this is the transmit function that is called on TSO frames
2747 * (support available on GMAC4 and newer chips).
2748 * Diagram below show the ring programming in case of TSO frames:
2749 *
2750 * First Descriptor
2751 * --------
2752 * | DES0 |---> buffer1 = L2/L3/L4 header
2753 * | DES1 |---> TCP Payload (can continue on next descr...)
2754 * | DES2 |---> buffer 1 and 2 len
2755 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
2756 * --------
2757 * |
2758 * ...
2759 * |
2760 * --------
2761 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
2762 * | DES1 | --|
2763 * | DES2 | --> buffer 1 and 2 len
2764 * | DES3 |
2765 * --------
2766 *
2767 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
2768 */
2769static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2770{
ce736788 2771 struct dma_desc *desc, *first, *mss_desc = NULL;
f748be53
AT
2772 struct stmmac_priv *priv = netdev_priv(dev);
2773 int nfrags = skb_shinfo(skb)->nr_frags;
ce736788 2774 u32 queue = skb_get_queue_mapping(skb);
f748be53 2775 unsigned int first_entry, des;
ce736788
JP
2776 struct stmmac_tx_queue *tx_q;
2777 int tmp_pay_len = 0;
2778 u32 pay_len, mss;
f748be53
AT
2779 u8 proto_hdr_len;
2780 int i;
2781
ce736788
JP
2782 tx_q = &priv->tx_queue[queue];
2783
f748be53
AT
2784 /* Compute header lengths */
2785 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2786
2787 /* Desc availability based on threshold should be enough safe */
ce736788 2788 if (unlikely(stmmac_tx_avail(priv, queue) <
f748be53 2789 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
c22a3f48
JP
2790 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2791 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2792 queue));
f748be53 2793 /* This is a hard error, log it. */
38ddc59d
LC
2794 netdev_err(priv->dev,
2795 "%s: Tx Ring full when queue awake\n",
2796 __func__);
f748be53 2797 }
f748be53
AT
2798 return NETDEV_TX_BUSY;
2799 }
2800
2801 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
2802
2803 mss = skb_shinfo(skb)->gso_size;
2804
2805 /* set new MSS value if needed */
2806 if (mss != priv->mss) {
ce736788 2807 mss_desc = tx_q->dma_tx + tx_q->cur_tx;
f748be53
AT
2808 priv->hw->desc->set_mss(mss_desc, mss);
2809 priv->mss = mss;
ce736788 2810 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
f748be53
AT
2811 }
2812
2813 if (netif_msg_tx_queued(priv)) {
2814 pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
2815 __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
2816 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
2817 skb->data_len);
2818 }
2819
ce736788 2820 first_entry = tx_q->cur_tx;
f748be53 2821
ce736788 2822 desc = tx_q->dma_tx + first_entry;
f748be53
AT
2823 first = desc;
2824
2825 /* first descriptor: fill Headers on Buf1 */
2826 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
2827 DMA_TO_DEVICE);
2828 if (dma_mapping_error(priv->device, des))
2829 goto dma_map_err;
2830
ce736788
JP
2831 tx_q->tx_skbuff_dma[first_entry].buf = des;
2832 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
f748be53 2833
f8be0d78 2834 first->des0 = cpu_to_le32(des);
f748be53
AT
2835
2836 /* Fill start of payload in buff2 of first descriptor */
2837 if (pay_len)
f8be0d78 2838 first->des1 = cpu_to_le32(des + proto_hdr_len);
f748be53
AT
2839
2840 /* If needed take extra descriptors to fill the remaining payload */
2841 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2842
ce736788 2843 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
f748be53
AT
2844
2845 /* Prepare fragments */
2846 for (i = 0; i < nfrags; i++) {
2847 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2848
2849 des = skb_frag_dma_map(priv->device, frag, 0,
2850 skb_frag_size(frag),
2851 DMA_TO_DEVICE);
937071c1
TR
2852 if (dma_mapping_error(priv->device, des))
2853 goto dma_map_err;
f748be53
AT
2854
2855 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
ce736788 2856 (i == nfrags - 1), queue);
f748be53 2857
ce736788
JP
2858 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
2859 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
2860 tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
2861 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
f748be53
AT
2862 }
2863
ce736788 2864 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
f748be53 2865
05cf0d1b
NC
2866 /* Only the last descriptor gets to point to the skb. */
2867 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
2868
2869 /* We've used all descriptors we need for this skb, however,
2870 * advance cur_tx so that it references a fresh descriptor.
2871 * ndo_start_xmit will fill this descriptor the next time it's
2872 * called and stmmac_tx_clean may clean up to this descriptor.
2873 */
ce736788 2874 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
f748be53 2875
ce736788 2876 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
2877 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2878 __func__);
c22a3f48 2879 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
f748be53
AT
2880 }
2881
2882 dev->stats.tx_bytes += skb->len;
2883 priv->xstats.tx_tso_frames++;
2884 priv->xstats.tx_tso_nfrags += nfrags;
2885
2886 /* Manage tx mitigation */
2887 priv->tx_count_frames += nfrags + 1;
2888 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
2889 mod_timer(&priv->txtimer,
2890 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2891 } else {
2892 priv->tx_count_frames = 0;
2893 priv->hw->desc->set_tx_ic(desc);
2894 priv->xstats.tx_set_ic_bit++;
2895 }
2896
74abc9b1 2897 skb_tx_timestamp(skb);
f748be53
AT
2898
2899 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2900 priv->hwts_tx_en)) {
2901 /* declare that device is doing timestamping */
2902 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2903 priv->hw->desc->enable_tx_timestamp(first);
2904 }
2905
2906 /* Complete the first descriptor before granting the DMA */
2907 priv->hw->desc->prepare_tso_tx_desc(first, 1,
2908 proto_hdr_len,
2909 pay_len,
ce736788 2910 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
f748be53
AT
2911 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
2912
2913 /* If context desc is used to change MSS */
2914 if (mss_desc)
2915 priv->hw->desc->set_tx_owner(mss_desc);
2916
2917 /* The own bit must be the latest setting done when prepare the
2918 * descriptor and then barrier is needed to make sure that
2919 * all is coherent before granting the DMA engine.
2920 */
ad688cdb 2921 dma_wmb();
f748be53
AT
2922
2923 if (netif_msg_pktdata(priv)) {
2924 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
ce736788
JP
2925 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
2926 tx_q->cur_tx, first, nfrags);
f748be53 2927
ce736788 2928 priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
f748be53
AT
2929 0);
2930
2931 pr_info(">>> frame to be transmitted: ");
2932 print_pkt(skb->data, skb_headlen(skb));
2933 }
2934
c22a3f48 2935 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 2936
ce736788
JP
2937 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
2938 queue);
f748be53 2939
f748be53
AT
2940 return NETDEV_TX_OK;
2941
2942dma_map_err:
f748be53
AT
2943 dev_err(priv->device, "Tx dma map failed\n");
2944 dev_kfree_skb(skb);
2945 priv->dev->stats.tx_dropped++;
2946 return NETDEV_TX_OK;
2947}
2948
47dd7a54 2949/**
732fdf0e 2950 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
2951 * @skb : the socket buffer
2952 * @dev : device pointer
32ceabca
GC
2953 * Description : this is the tx entry point of the driver.
2954 * It programs the chain or the ring and supports oversized frames
2955 * and SG feature.
47dd7a54
GC
2956 */
2957static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2958{
2959 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 2960 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 2961 int i, csum_insertion = 0, is_jumbo = 0;
ce736788 2962 u32 queue = skb_get_queue_mapping(skb);
47dd7a54 2963 int nfrags = skb_shinfo(skb)->nr_frags;
59423815
CIK
2964 int entry;
2965 unsigned int first_entry;
47dd7a54 2966 struct dma_desc *desc, *first;
ce736788 2967 struct stmmac_tx_queue *tx_q;
0e80bdc9 2968 unsigned int enh_desc;
f748be53
AT
2969 unsigned int des;
2970
ce736788
JP
2971 tx_q = &priv->tx_queue[queue];
2972
f748be53
AT
2973 /* Manage oversized TCP frames for GMAC4 device */
2974 if (skb_is_gso(skb) && priv->tso) {
9edfa7da 2975 if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
f748be53
AT
2976 return stmmac_tso_xmit(skb, dev);
2977 }
47dd7a54 2978
ce736788 2979 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
c22a3f48
JP
2980 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
2981 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
2982 queue));
47dd7a54 2983 /* This is a hard error, log it. */
38ddc59d
LC
2984 netdev_err(priv->dev,
2985 "%s: Tx Ring full when queue awake\n",
2986 __func__);
47dd7a54
GC
2987 }
2988 return NETDEV_TX_BUSY;
2989 }
2990
d765955d
GC
2991 if (priv->tx_path_in_lpi_mode)
2992 stmmac_disable_eee_mode(priv);
2993
ce736788 2994 entry = tx_q->cur_tx;
0e80bdc9 2995 first_entry = entry;
47dd7a54 2996
5e982f3b 2997 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 2998
0e80bdc9 2999 if (likely(priv->extend_desc))
ce736788 3000 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 3001 else
ce736788 3002 desc = tx_q->dma_tx + entry;
c24602ef 3003
47dd7a54
GC
3004 first = desc;
3005
0e80bdc9 3006 enh_desc = priv->plat->enh_desc;
4a7d666a 3007 /* To program the descriptors according to the size of the frame */
29896a67
GC
3008 if (enh_desc)
3009 is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
3010
f748be53
AT
3011 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
3012 DWMAC_CORE_4_00)) {
ce736788 3013 entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
362b37be
GC
3014 if (unlikely(entry < 0))
3015 goto dma_map_err;
29896a67 3016 }
47dd7a54
GC
3017
3018 for (i = 0; i < nfrags; i++) {
9e903e08
ED
3019 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3020 int len = skb_frag_size(frag);
be434d50 3021 bool last_segment = (i == (nfrags - 1));
47dd7a54 3022
e3ad57c9
GC
3023 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3024
0e80bdc9 3025 if (likely(priv->extend_desc))
ce736788 3026 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
c24602ef 3027 else
ce736788 3028 desc = tx_q->dma_tx + entry;
47dd7a54 3029
f748be53
AT
3030 des = skb_frag_dma_map(priv->device, frag, 0, len,
3031 DMA_TO_DEVICE);
3032 if (dma_mapping_error(priv->device, des))
362b37be
GC
3033 goto dma_map_err; /* should reuse desc w/o issues */
3034
ce736788 3035 tx_q->tx_skbuff[entry] = NULL;
f748be53 3036
ce736788 3037 tx_q->tx_skbuff_dma[entry].buf = des;
f8be0d78
MW
3038 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3039 desc->des0 = cpu_to_le32(des);
3040 else
3041 desc->des2 = cpu_to_le32(des);
f748be53 3042
ce736788
JP
3043 tx_q->tx_skbuff_dma[entry].map_as_page = true;
3044 tx_q->tx_skbuff_dma[entry].len = len;
3045 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
0e80bdc9
GC
3046
3047 /* Prepare the descriptor and set the own bit too */
4a7d666a 3048 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
fe6af0e1
NC
3049 priv->mode, 1, last_segment,
3050 skb->len);
47dd7a54
GC
3051 }
3052
05cf0d1b
NC
3053 /* Only the last descriptor gets to point to the skb. */
3054 tx_q->tx_skbuff[entry] = skb;
e3ad57c9 3055
05cf0d1b
NC
3056 /* We've used all descriptors we need for this skb, however,
3057 * advance cur_tx so that it references a fresh descriptor.
3058 * ndo_start_xmit will fill this descriptor the next time it's
3059 * called and stmmac_tx_clean may clean up to this descriptor.
3060 */
3061 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
ce736788 3062 tx_q->cur_tx = entry;
47dd7a54 3063
47dd7a54 3064 if (netif_msg_pktdata(priv)) {
d0225e7d
AT
3065 void *tx_head;
3066
38ddc59d
LC
3067 netdev_dbg(priv->dev,
3068 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
ce736788 3069 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
38ddc59d 3070 entry, first, nfrags);
83d7af64 3071
c24602ef 3072 if (priv->extend_desc)
ce736788 3073 tx_head = (void *)tx_q->dma_etx;
c24602ef 3074 else
ce736788 3075 tx_head = (void *)tx_q->dma_tx;
d0225e7d
AT
3076
3077 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
c24602ef 3078
38ddc59d 3079 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
47dd7a54
GC
3080 print_pkt(skb->data, skb->len);
3081 }
0e80bdc9 3082
ce736788 3083 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
3084 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
3085 __func__);
c22a3f48 3086 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54
GC
3087 }
3088
3089 dev->stats.tx_bytes += skb->len;
3090
0e80bdc9
GC
3091 /* According to the coalesce parameter the IC bit for the latest
3092 * segment is reset and the timer re-started to clean the tx status.
3093 * This approach takes care about the fragments: desc is the first
3094 * element in case of no SG.
3095 */
3096 priv->tx_count_frames += nfrags + 1;
3097 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
3098 mod_timer(&priv->txtimer,
3099 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3100 } else {
3101 priv->tx_count_frames = 0;
3102 priv->hw->desc->set_tx_ic(desc);
3103 priv->xstats.tx_set_ic_bit++;
891434b1
RK
3104 }
3105
74abc9b1 3106 skb_tx_timestamp(skb);
3e82ce12 3107
0e80bdc9
GC
3108 /* Ready to fill the first descriptor and set the OWN bit w/o any
3109 * problems because all the descriptors are actually ready to be
3110 * passed to the DMA engine.
3111 */
3112 if (likely(!is_jumbo)) {
3113 bool last_segment = (nfrags == 0);
3114
f748be53
AT
3115 des = dma_map_single(priv->device, skb->data,
3116 nopaged_len, DMA_TO_DEVICE);
3117 if (dma_mapping_error(priv->device, des))
0e80bdc9
GC
3118 goto dma_map_err;
3119
ce736788 3120 tx_q->tx_skbuff_dma[first_entry].buf = des;
f8be0d78
MW
3121 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3122 first->des0 = cpu_to_le32(des);
3123 else
3124 first->des2 = cpu_to_le32(des);
f748be53 3125
ce736788
JP
3126 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
3127 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
0e80bdc9
GC
3128
3129 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3130 priv->hwts_tx_en)) {
3131 /* declare that device is doing timestamping */
3132 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3133 priv->hw->desc->enable_tx_timestamp(first);
3134 }
3135
3136 /* Prepare the first descriptor setting the OWN bit too */
3137 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
3138 csum_insertion, priv->mode, 1,
fe6af0e1 3139 last_segment, skb->len);
0e80bdc9
GC
3140
3141 /* The own bit must be the latest setting done when prepare the
3142 * descriptor and then barrier is needed to make sure that
3143 * all is coherent before granting the DMA engine.
3144 */
ad688cdb 3145 dma_wmb();
0e80bdc9
GC
3146 }
3147
c22a3f48 3148 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53
AT
3149
3150 if (priv->synopsys_id < DWMAC_CORE_4_00)
3151 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
3152 else
ce736788
JP
3153 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
3154 queue);
52f64fae 3155
362b37be 3156 return NETDEV_TX_OK;
a9097a96 3157
362b37be 3158dma_map_err:
38ddc59d 3159 netdev_err(priv->dev, "Tx DMA map failed\n");
362b37be
GC
3160 dev_kfree_skb(skb);
3161 priv->dev->stats.tx_dropped++;
47dd7a54
GC
3162 return NETDEV_TX_OK;
3163}
3164
b9381985
VB
3165static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
3166{
3167 struct ethhdr *ehdr;
3168 u16 vlanid;
3169
3170 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
3171 NETIF_F_HW_VLAN_CTAG_RX &&
3172 !__vlan_get_tag(skb, &vlanid)) {
3173 /* pop the vlan tag */
3174 ehdr = (struct ethhdr *)skb->data;
3175 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
3176 skb_pull(skb, VLAN_HLEN);
3177 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
3178 }
3179}
3180
3181
54139cf3 3182static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
120e87f9 3183{
54139cf3 3184 if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
120e87f9
GC
3185 return 0;
3186
3187 return 1;
3188}
3189
32ceabca 3190/**
732fdf0e 3191 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca 3192 * @priv: driver private structure
54139cf3 3193 * @queue: RX queue index
32ceabca
GC
3194 * Description : this is to reallocate the skb for the reception process
3195 * that is based on zero-copy.
3196 */
54139cf3 3197static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
47dd7a54 3198{
54139cf3
JP
3199 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3200 int dirty = stmmac_rx_dirty(priv, queue);
3201 unsigned int entry = rx_q->dirty_rx;
3202
47dd7a54 3203 int bfsize = priv->dma_buf_sz;
47dd7a54 3204
e3ad57c9 3205 while (dirty-- > 0) {
c24602ef
GC
3206 struct dma_desc *p;
3207
3208 if (priv->extend_desc)
54139cf3 3209 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 3210 else
54139cf3 3211 p = rx_q->dma_rx + entry;
c24602ef 3212
54139cf3 3213 if (likely(!rx_q->rx_skbuff[entry])) {
47dd7a54
GC
3214 struct sk_buff *skb;
3215
acb600de 3216 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
120e87f9
GC
3217 if (unlikely(!skb)) {
3218 /* so for a while no zero-copy! */
54139cf3 3219 rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
120e87f9
GC
3220 if (unlikely(net_ratelimit()))
3221 dev_err(priv->device,
3222 "fail to alloc skb entry %d\n",
3223 entry);
47dd7a54 3224 break;
120e87f9 3225 }
47dd7a54 3226
54139cf3
JP
3227 rx_q->rx_skbuff[entry] = skb;
3228 rx_q->rx_skbuff_dma[entry] =
47dd7a54
GC
3229 dma_map_single(priv->device, skb->data, bfsize,
3230 DMA_FROM_DEVICE);
362b37be 3231 if (dma_mapping_error(priv->device,
54139cf3 3232 rx_q->rx_skbuff_dma[entry])) {
38ddc59d 3233 netdev_err(priv->dev, "Rx DMA map failed\n");
362b37be
GC
3234 dev_kfree_skb(skb);
3235 break;
3236 }
286a8372 3237
f748be53 3238 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
54139cf3 3239 p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
f748be53
AT
3240 p->des1 = 0;
3241 } else {
54139cf3 3242 p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
f748be53
AT
3243 }
3244 if (priv->hw->mode->refill_desc3)
54139cf3 3245 priv->hw->mode->refill_desc3(rx_q, p);
286a8372 3246
54139cf3
JP
3247 if (rx_q->rx_zeroc_thresh > 0)
3248 rx_q->rx_zeroc_thresh--;
120e87f9 3249
b3e51069
LC
3250 netif_dbg(priv, rx_status, priv->dev,
3251 "refill entry #%d\n", entry);
47dd7a54 3252 }
ad688cdb 3253 dma_wmb();
f748be53
AT
3254
3255 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
3256 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
3257 else
3258 priv->hw->desc->set_rx_owner(p);
3259
ad688cdb 3260 dma_wmb();
e3ad57c9
GC
3261
3262 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
47dd7a54 3263 }
54139cf3 3264 rx_q->dirty_rx = entry;
47dd7a54
GC
3265}
3266
32ceabca 3267/**
732fdf0e 3268 * stmmac_rx - manage the receive process
32ceabca 3269 * @priv: driver private structure
54139cf3
JP
3270 * @limit: napi bugget
3271 * @queue: RX queue index.
32ceabca
GC
3272 * Description : this the function called by the napi poll method.
3273 * It gets all the frames inside the ring.
3274 */
54139cf3 3275static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
47dd7a54 3276{
54139cf3
JP
3277 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3278 unsigned int entry = rx_q->cur_rx;
3279 int coe = priv->hw->rx_csum;
47dd7a54
GC
3280 unsigned int next_entry;
3281 unsigned int count = 0;
47dd7a54 3282
83d7af64 3283 if (netif_msg_rx_status(priv)) {
d0225e7d
AT
3284 void *rx_head;
3285
38ddc59d 3286 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
c24602ef 3287 if (priv->extend_desc)
54139cf3 3288 rx_head = (void *)rx_q->dma_erx;
c24602ef 3289 else
54139cf3 3290 rx_head = (void *)rx_q->dma_rx;
d0225e7d
AT
3291
3292 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
47dd7a54 3293 }
c24602ef 3294 while (count < limit) {
47dd7a54 3295 int status;
9401bb5c 3296 struct dma_desc *p;
ba1ffd74 3297 struct dma_desc *np;
47dd7a54 3298
c24602ef 3299 if (priv->extend_desc)
54139cf3 3300 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 3301 else
54139cf3 3302 p = rx_q->dma_rx + entry;
c24602ef 3303
c1fa3212
FG
3304 /* read the status of the incoming frame */
3305 status = priv->hw->desc->rx_status(&priv->dev->stats,
3306 &priv->xstats, p);
3307 /* check if managed by the DMA otherwise go ahead */
3308 if (unlikely(status & dma_own))
47dd7a54
GC
3309 break;
3310
3311 count++;
3312
54139cf3
JP
3313 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3314 next_entry = rx_q->cur_rx;
e3ad57c9 3315
c24602ef 3316 if (priv->extend_desc)
54139cf3 3317 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
c24602ef 3318 else
54139cf3 3319 np = rx_q->dma_rx + next_entry;
ba1ffd74
GC
3320
3321 prefetch(np);
47dd7a54 3322
c24602ef
GC
3323 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
3324 priv->hw->desc->rx_extended_status(&priv->dev->stats,
3325 &priv->xstats,
54139cf3 3326 rx_q->dma_erx +
c24602ef 3327 entry);
891434b1 3328 if (unlikely(status == discard_frame)) {
47dd7a54 3329 priv->dev->stats.rx_errors++;
891434b1 3330 if (priv->hwts_rx_en && !priv->extend_desc) {
8d45e42b 3331 /* DESC2 & DESC3 will be overwritten by device
891434b1
RK
3332 * with timestamp value, hence reinitialize
3333 * them in stmmac_rx_refill() function so that
3334 * device can reuse it.
3335 */
9c8080d0 3336 dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
54139cf3 3337 rx_q->rx_skbuff[entry] = NULL;
891434b1 3338 dma_unmap_single(priv->device,
54139cf3 3339 rx_q->rx_skbuff_dma[entry],
ceb69499
GC
3340 priv->dma_buf_sz,
3341 DMA_FROM_DEVICE);
891434b1
RK
3342 }
3343 } else {
47dd7a54 3344 struct sk_buff *skb;
3eeb2997 3345 int frame_len;
f748be53
AT
3346 unsigned int des;
3347
3348 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
f8be0d78 3349 des = le32_to_cpu(p->des0);
f748be53 3350 else
f8be0d78 3351 des = le32_to_cpu(p->des2);
47dd7a54 3352
ceb69499
GC
3353 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
3354
8d45e42b 3355 /* If frame length is greater than skb buffer size
f748be53
AT
3356 * (preallocated during init) then the packet is
3357 * ignored
3358 */
e527c4a7 3359 if (frame_len > priv->dma_buf_sz) {
38ddc59d
LC
3360 netdev_err(priv->dev,
3361 "len %d larger than size (%d)\n",
3362 frame_len, priv->dma_buf_sz);
e527c4a7
GC
3363 priv->dev->stats.rx_length_errors++;
3364 break;
3365 }
3366
3eeb2997 3367 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
ceb69499
GC
3368 * Type frames (LLC/LLC-SNAP)
3369 */
3eeb2997
GC
3370 if (unlikely(status != llc_snap))
3371 frame_len -= ETH_FCS_LEN;
47dd7a54 3372
83d7af64 3373 if (netif_msg_rx_status(priv)) {
38ddc59d
LC
3374 netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
3375 p, entry, des);
83d7af64 3376 if (frame_len > ETH_FRAME_LEN)
38ddc59d
LC
3377 netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
3378 frame_len, status);
83d7af64 3379 }
22ad3838 3380
f748be53
AT
3381 /* The zero-copy is always used for all the sizes
3382 * in case of GMAC4 because it needs
3383 * to refill the used descriptors, always.
3384 */
3385 if (unlikely(!priv->plat->has_gmac4 &&
3386 ((frame_len < priv->rx_copybreak) ||
54139cf3 3387 stmmac_rx_threshold_count(rx_q)))) {
22ad3838
GC
3388 skb = netdev_alloc_skb_ip_align(priv->dev,
3389 frame_len);
3390 if (unlikely(!skb)) {
3391 if (net_ratelimit())
3392 dev_warn(priv->device,
3393 "packet dropped\n");
3394 priv->dev->stats.rx_dropped++;
3395 break;
3396 }
3397
3398 dma_sync_single_for_cpu(priv->device,
54139cf3 3399 rx_q->rx_skbuff_dma
22ad3838
GC
3400 [entry], frame_len,
3401 DMA_FROM_DEVICE);
3402 skb_copy_to_linear_data(skb,
54139cf3 3403 rx_q->
22ad3838
GC
3404 rx_skbuff[entry]->data,
3405 frame_len);
3406
3407 skb_put(skb, frame_len);
3408 dma_sync_single_for_device(priv->device,
54139cf3 3409 rx_q->rx_skbuff_dma
22ad3838
GC
3410 [entry], frame_len,
3411 DMA_FROM_DEVICE);
3412 } else {
54139cf3 3413 skb = rx_q->rx_skbuff[entry];
22ad3838 3414 if (unlikely(!skb)) {
38ddc59d
LC
3415 netdev_err(priv->dev,
3416 "%s: Inconsistent Rx chain\n",
3417 priv->dev->name);
22ad3838
GC
3418 priv->dev->stats.rx_dropped++;
3419 break;
3420 }
3421 prefetch(skb->data - NET_IP_ALIGN);
54139cf3
JP
3422 rx_q->rx_skbuff[entry] = NULL;
3423 rx_q->rx_zeroc_thresh++;
22ad3838
GC
3424
3425 skb_put(skb, frame_len);
3426 dma_unmap_single(priv->device,
54139cf3 3427 rx_q->rx_skbuff_dma[entry],
22ad3838
GC
3428 priv->dma_buf_sz,
3429 DMA_FROM_DEVICE);
47dd7a54 3430 }
47dd7a54 3431
47dd7a54 3432 if (netif_msg_pktdata(priv)) {
38ddc59d
LC
3433 netdev_dbg(priv->dev, "frame received (%dbytes)",
3434 frame_len);
47dd7a54
GC
3435 print_pkt(skb->data, frame_len);
3436 }
83d7af64 3437
ba1ffd74
GC
3438 stmmac_get_rx_hwtstamp(priv, p, np, skb);
3439
b9381985
VB
3440 stmmac_rx_vlan(priv->dev, skb);
3441
47dd7a54
GC
3442 skb->protocol = eth_type_trans(skb, priv->dev);
3443
ceb69499 3444 if (unlikely(!coe))
bc8acf2c 3445 skb_checksum_none_assert(skb);
62a2ab93 3446 else
47dd7a54 3447 skb->ip_summed = CHECKSUM_UNNECESSARY;
62a2ab93 3448
c22a3f48 3449 napi_gro_receive(&rx_q->napi, skb);
47dd7a54
GC
3450
3451 priv->dev->stats.rx_packets++;
3452 priv->dev->stats.rx_bytes += frame_len;
47dd7a54
GC
3453 }
3454 entry = next_entry;
47dd7a54
GC
3455 }
3456
54139cf3 3457 stmmac_rx_refill(priv, queue);
47dd7a54
GC
3458
3459 priv->xstats.rx_pkt_n += count;
3460
3461 return count;
3462}
3463
3464/**
3465 * stmmac_poll - stmmac poll method (NAPI)
3466 * @napi : pointer to the napi structure.
3467 * @budget : maximum number of packets that the current CPU can receive from
3468 * all interfaces.
3469 * Description :
9125cdd1 3470 * To look at the incoming frames and clear the tx resources.
47dd7a54
GC
3471 */
3472static int stmmac_poll(struct napi_struct *napi, int budget)
3473{
c22a3f48
JP
3474 struct stmmac_rx_queue *rx_q =
3475 container_of(napi, struct stmmac_rx_queue, napi);
3476 struct stmmac_priv *priv = rx_q->priv_data;
ce736788 3477 u32 tx_count = priv->plat->tx_queues_to_use;
c22a3f48 3478 u32 chan = rx_q->queue_index;
54139cf3 3479 int work_done = 0;
c22a3f48 3480 u32 queue;
47dd7a54 3481
9125cdd1 3482 priv->xstats.napi_poll++;
ce736788
JP
3483
3484 /* check all the queues */
3485 for (queue = 0; queue < tx_count; queue++)
3486 stmmac_tx_clean(priv, queue);
3487
c22a3f48 3488 work_done = stmmac_rx(priv, budget, rx_q->queue_index);
47dd7a54 3489 if (work_done < budget) {
6ad20165 3490 napi_complete_done(napi, work_done);
4f513ecd 3491 stmmac_enable_dma_irq(priv, chan);
47dd7a54
GC
3492 }
3493 return work_done;
3494}
3495
3496/**
3497 * stmmac_tx_timeout
3498 * @dev : Pointer to net device structure
3499 * Description: this function is called when a packet transmission fails to
7284a3f1 3500 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
3501 * netdev structure and arrange for the device to be reset to a sane state
3502 * in order to transmit a new packet.
3503 */
3504static void stmmac_tx_timeout(struct net_device *dev)
3505{
3506 struct stmmac_priv *priv = netdev_priv(dev);
ce736788
JP
3507 u32 tx_count = priv->plat->tx_queues_to_use;
3508 u32 chan;
47dd7a54
GC
3509
3510 /* Clear Tx resources and restart transmitting again */
ce736788
JP
3511 for (chan = 0; chan < tx_count; chan++)
3512 stmmac_tx_err(priv, chan);
47dd7a54
GC
3513}
3514
47dd7a54 3515/**
01789349 3516 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
3517 * @dev : pointer to the device structure
3518 * Description:
3519 * This function is a driver entry point which gets called by the kernel
3520 * whenever multicast addresses must be enabled/disabled.
3521 * Return value:
3522 * void.
3523 */
01789349 3524static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
3525{
3526 struct stmmac_priv *priv = netdev_priv(dev);
3527
3b57de95 3528 priv->hw->mac->set_filter(priv->hw, dev);
47dd7a54
GC
3529}
3530
3531/**
3532 * stmmac_change_mtu - entry point to change MTU size for the device.
3533 * @dev : device pointer.
3534 * @new_mtu : the new MTU size for the device.
3535 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
3536 * to drive packet transmission. Ethernet has an MTU of 1500 octets
3537 * (ETH_DATA_LEN). This value can be changed with ifconfig.
3538 * Return value:
3539 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3540 * file on failure.
3541 */
3542static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
3543{
38ddc59d
LC
3544 struct stmmac_priv *priv = netdev_priv(dev);
3545
47dd7a54 3546 if (netif_running(dev)) {
38ddc59d 3547 netdev_err(priv->dev, "must be stopped to change its MTU\n");
47dd7a54
GC
3548 return -EBUSY;
3549 }
3550
5e982f3b 3551 dev->mtu = new_mtu;
f748be53 3552
5e982f3b
MM
3553 netdev_update_features(dev);
3554
3555 return 0;
3556}
3557
c8f44aff 3558static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 3559 netdev_features_t features)
5e982f3b
MM
3560{
3561 struct stmmac_priv *priv = netdev_priv(dev);
3562
38912bdb 3563 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 3564 features &= ~NETIF_F_RXCSUM;
d2afb5bd 3565
5e982f3b 3566 if (!priv->plat->tx_coe)
a188222b 3567 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 3568
ebbb293f
GC
3569 /* Some GMAC devices have a bugged Jumbo frame support that
3570 * needs to have the Tx COE disabled for oversized frames
3571 * (due to limited buffer sizes). In this case we disable
8d45e42b 3572 * the TX csum insertion in the TDES and not use SF.
ceb69499 3573 */
5e982f3b 3574 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 3575 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 3576
f748be53
AT
3577 /* Disable tso if asked by ethtool */
3578 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3579 if (features & NETIF_F_TSO)
3580 priv->tso = true;
3581 else
3582 priv->tso = false;
3583 }
3584
5e982f3b 3585 return features;
47dd7a54
GC
3586}
3587
d2afb5bd
GC
3588static int stmmac_set_features(struct net_device *netdev,
3589 netdev_features_t features)
3590{
3591 struct stmmac_priv *priv = netdev_priv(netdev);
3592
3593 /* Keep the COE Type in case of csum is supporting */
3594 if (features & NETIF_F_RXCSUM)
3595 priv->hw->rx_csum = priv->plat->rx_coe;
3596 else
3597 priv->hw->rx_csum = 0;
3598 /* No check needed because rx_coe has been set before and it will be
3599 * fixed in case of issue.
3600 */
3601 priv->hw->mac->rx_ipc(priv->hw);
3602
3603 return 0;
3604}
3605
32ceabca
GC
3606/**
3607 * stmmac_interrupt - main ISR
3608 * @irq: interrupt number.
3609 * @dev_id: to pass the net device pointer.
3610 * Description: this is the main driver interrupt service routine.
732fdf0e
GC
3611 * It can call:
3612 * o DMA service routine (to manage incoming frame reception and transmission
3613 * status)
3614 * o Core interrupts to manage: remote wake-up, management counter, LPI
3615 * interrupts.
32ceabca 3616 */
47dd7a54
GC
3617static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
3618{
3619 struct net_device *dev = (struct net_device *)dev_id;
3620 struct stmmac_priv *priv = netdev_priv(dev);
7bac4e1e
JP
3621 u32 rx_cnt = priv->plat->rx_queues_to_use;
3622 u32 tx_cnt = priv->plat->tx_queues_to_use;
3623 u32 queues_count;
3624 u32 queue;
3625
3626 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
47dd7a54 3627
89f7f2cf
SK
3628 if (priv->irq_wake)
3629 pm_wakeup_event(priv->device, 0);
3630
47dd7a54 3631 if (unlikely(!dev)) {
38ddc59d 3632 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
47dd7a54
GC
3633 return IRQ_NONE;
3634 }
3635
d765955d 3636 /* To handle GMAC own interrupts */
f748be53 3637 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
7ed24bbe 3638 int status = priv->hw->mac->host_irq_status(priv->hw,
0982a0f6 3639 &priv->xstats);
8f71a88d 3640
d765955d 3641 if (unlikely(status)) {
d765955d 3642 /* For LPI we need to save the tx status */
0982a0f6 3643 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 3644 priv->tx_path_in_lpi_mode = true;
0982a0f6 3645 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 3646 priv->tx_path_in_lpi_mode = false;
7bac4e1e
JP
3647 }
3648
3649 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3650 for (queue = 0; queue < queues_count; queue++) {
54139cf3
JP
3651 struct stmmac_rx_queue *rx_q =
3652 &priv->rx_queue[queue];
3653
7bac4e1e
JP
3654 status |=
3655 priv->hw->mac->host_mtl_irq_status(priv->hw,
3656 queue);
3657
3658 if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
3659 priv->hw->dma->set_rx_tail_ptr)
3660 priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
54139cf3 3661 rx_q->rx_tail_addr,
7bac4e1e
JP
3662 queue);
3663 }
d765955d 3664 }
70523e63
GC
3665
3666 /* PCS link status */
3fe5cadb 3667 if (priv->hw->pcs) {
70523e63
GC
3668 if (priv->xstats.pcs_link)
3669 netif_carrier_on(dev);
3670 else
3671 netif_carrier_off(dev);
3672 }
d765955d 3673 }
aec7ff27 3674
d765955d 3675 /* To handle DMA interrupts */
aec7ff27 3676 stmmac_dma_interrupt(priv);
47dd7a54
GC
3677
3678 return IRQ_HANDLED;
3679}
3680
3681#ifdef CONFIG_NET_POLL_CONTROLLER
3682/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
3683 * to allow network I/O with interrupts disabled.
3684 */
47dd7a54
GC
3685static void stmmac_poll_controller(struct net_device *dev)
3686{
3687 disable_irq(dev->irq);
3688 stmmac_interrupt(dev->irq, dev);
3689 enable_irq(dev->irq);
3690}
3691#endif
3692
3693/**
3694 * stmmac_ioctl - Entry point for the Ioctl
3695 * @dev: Device pointer.
3696 * @rq: An IOCTL specefic structure, that can contain a pointer to
3697 * a proprietary structure used to pass information to the driver.
3698 * @cmd: IOCTL command
3699 * Description:
32ceabca 3700 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
3701 */
3702static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3703{
891434b1 3704 int ret = -EOPNOTSUPP;
47dd7a54
GC
3705
3706 if (!netif_running(dev))
3707 return -EINVAL;
3708
891434b1
RK
3709 switch (cmd) {
3710 case SIOCGMIIPHY:
3711 case SIOCGMIIREG:
3712 case SIOCSMIIREG:
d6d50c7e 3713 if (!dev->phydev)
891434b1 3714 return -EINVAL;
d6d50c7e 3715 ret = phy_mii_ioctl(dev->phydev, rq, cmd);
891434b1
RK
3716 break;
3717 case SIOCSHWTSTAMP:
3718 ret = stmmac_hwtstamp_ioctl(dev, rq);
3719 break;
3720 default:
3721 break;
3722 }
28b04113 3723
47dd7a54
GC
3724 return ret;
3725}
3726
50fb4f74 3727#ifdef CONFIG_DEBUG_FS
7ac29055 3728static struct dentry *stmmac_fs_dir;
7ac29055 3729
c24602ef 3730static void sysfs_display_ring(void *head, int size, int extend_desc,
ceb69499 3731 struct seq_file *seq)
7ac29055 3732{
7ac29055 3733 int i;
ceb69499
GC
3734 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
3735 struct dma_desc *p = (struct dma_desc *)head;
7ac29055 3736
c24602ef 3737 for (i = 0; i < size; i++) {
c24602ef 3738 if (extend_desc) {
c24602ef 3739 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
ceb69499 3740 i, (unsigned int)virt_to_phys(ep),
f8be0d78
MW
3741 le32_to_cpu(ep->basic.des0),
3742 le32_to_cpu(ep->basic.des1),
3743 le32_to_cpu(ep->basic.des2),
3744 le32_to_cpu(ep->basic.des3));
c24602ef
GC
3745 ep++;
3746 } else {
c24602ef 3747 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
66c25f6e 3748 i, (unsigned int)virt_to_phys(p),
f8be0d78
MW
3749 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
3750 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
c24602ef
GC
3751 p++;
3752 }
7ac29055
GC
3753 seq_printf(seq, "\n");
3754 }
c24602ef 3755}
7ac29055 3756
c24602ef
GC
3757static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
3758{
3759 struct net_device *dev = seq->private;
3760 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 3761 u32 rx_count = priv->plat->rx_queues_to_use;
ce736788 3762 u32 tx_count = priv->plat->tx_queues_to_use;
54139cf3
JP
3763 u32 queue;
3764
3765 for (queue = 0; queue < rx_count; queue++) {
3766 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3767
3768 seq_printf(seq, "RX Queue %d:\n", queue);
3769
3770 if (priv->extend_desc) {
3771 seq_printf(seq, "Extended descriptor ring:\n");
3772 sysfs_display_ring((void *)rx_q->dma_erx,
3773 DMA_RX_SIZE, 1, seq);
3774 } else {
3775 seq_printf(seq, "Descriptor ring:\n");
3776 sysfs_display_ring((void *)rx_q->dma_rx,
3777 DMA_RX_SIZE, 0, seq);
3778 }
3779 }
aff3d9ef 3780
ce736788
JP
3781 for (queue = 0; queue < tx_count; queue++) {
3782 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3783
3784 seq_printf(seq, "TX Queue %d:\n", queue);
3785
3786 if (priv->extend_desc) {
3787 seq_printf(seq, "Extended descriptor ring:\n");
3788 sysfs_display_ring((void *)tx_q->dma_etx,
3789 DMA_TX_SIZE, 1, seq);
3790 } else {
3791 seq_printf(seq, "Descriptor ring:\n");
3792 sysfs_display_ring((void *)tx_q->dma_tx,
3793 DMA_TX_SIZE, 0, seq);
3794 }
7ac29055
GC
3795 }
3796
3797 return 0;
3798}
3799
3800static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
3801{
3802 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
3803}
3804
22d3efe5
PM
3805/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
3806
7ac29055
GC
3807static const struct file_operations stmmac_rings_status_fops = {
3808 .owner = THIS_MODULE,
3809 .open = stmmac_sysfs_ring_open,
3810 .read = seq_read,
3811 .llseek = seq_lseek,
74863948 3812 .release = single_release,
7ac29055
GC
3813};
3814
e7434821
GC
3815static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
3816{
3817 struct net_device *dev = seq->private;
3818 struct stmmac_priv *priv = netdev_priv(dev);
3819
19e30c14 3820 if (!priv->hw_cap_support) {
e7434821
GC
3821 seq_printf(seq, "DMA HW features not supported\n");
3822 return 0;
3823 }
3824
3825 seq_printf(seq, "==============================\n");
3826 seq_printf(seq, "\tDMA HW features\n");
3827 seq_printf(seq, "==============================\n");
3828
22d3efe5 3829 seq_printf(seq, "\t10/100 Mbps: %s\n",
e7434821 3830 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
22d3efe5 3831 seq_printf(seq, "\t1000 Mbps: %s\n",
e7434821 3832 (priv->dma_cap.mbps_1000) ? "Y" : "N");
22d3efe5 3833 seq_printf(seq, "\tHalf duplex: %s\n",
e7434821
GC
3834 (priv->dma_cap.half_duplex) ? "Y" : "N");
3835 seq_printf(seq, "\tHash Filter: %s\n",
3836 (priv->dma_cap.hash_filter) ? "Y" : "N");
3837 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
3838 (priv->dma_cap.multi_addr) ? "Y" : "N");
8d45e42b 3839 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
e7434821
GC
3840 (priv->dma_cap.pcs) ? "Y" : "N");
3841 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
3842 (priv->dma_cap.sma_mdio) ? "Y" : "N");
3843 seq_printf(seq, "\tPMT Remote wake up: %s\n",
3844 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
3845 seq_printf(seq, "\tPMT Magic Frame: %s\n",
3846 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
3847 seq_printf(seq, "\tRMON module: %s\n",
3848 (priv->dma_cap.rmon) ? "Y" : "N");
3849 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
3850 (priv->dma_cap.time_stamp) ? "Y" : "N");
22d3efe5 3851 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
e7434821 3852 (priv->dma_cap.atime_stamp) ? "Y" : "N");
22d3efe5 3853 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
e7434821
GC
3854 (priv->dma_cap.eee) ? "Y" : "N");
3855 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
3856 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
3857 (priv->dma_cap.tx_coe) ? "Y" : "N");
f748be53
AT
3858 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3859 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
3860 (priv->dma_cap.rx_coe) ? "Y" : "N");
3861 } else {
3862 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
3863 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
3864 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
3865 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
3866 }
e7434821
GC
3867 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
3868 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
3869 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
3870 priv->dma_cap.number_rx_channel);
3871 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
3872 priv->dma_cap.number_tx_channel);
3873 seq_printf(seq, "\tEnhanced descriptors: %s\n",
3874 (priv->dma_cap.enh_desc) ? "Y" : "N");
3875
3876 return 0;
3877}
3878
3879static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
3880{
3881 return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
3882}
3883
3884static const struct file_operations stmmac_dma_cap_fops = {
3885 .owner = THIS_MODULE,
3886 .open = stmmac_sysfs_dma_cap_open,
3887 .read = seq_read,
3888 .llseek = seq_lseek,
74863948 3889 .release = single_release,
e7434821
GC
3890};
3891
7ac29055
GC
3892static int stmmac_init_fs(struct net_device *dev)
3893{
466c5ac8
MO
3894 struct stmmac_priv *priv = netdev_priv(dev);
3895
3896 /* Create per netdev entries */
3897 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 3898
466c5ac8 3899 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
38ddc59d 3900 netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
7ac29055
GC
3901
3902 return -ENOMEM;
3903 }
3904
3905 /* Entry to report DMA RX/TX rings */
466c5ac8
MO
3906 priv->dbgfs_rings_status =
3907 debugfs_create_file("descriptors_status", S_IRUGO,
3908 priv->dbgfs_dir, dev,
3909 &stmmac_rings_status_fops);
7ac29055 3910
466c5ac8 3911 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
38ddc59d 3912 netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
466c5ac8 3913 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055
GC
3914
3915 return -ENOMEM;
3916 }
3917
e7434821 3918 /* Entry to report the DMA HW features */
466c5ac8
MO
3919 priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
3920 priv->dbgfs_dir,
3921 dev, &stmmac_dma_cap_fops);
e7434821 3922
466c5ac8 3923 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
38ddc59d 3924 netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
466c5ac8 3925 debugfs_remove_recursive(priv->dbgfs_dir);
e7434821
GC
3926
3927 return -ENOMEM;
3928 }
3929
7ac29055
GC
3930 return 0;
3931}
3932
466c5ac8 3933static void stmmac_exit_fs(struct net_device *dev)
7ac29055 3934{
466c5ac8
MO
3935 struct stmmac_priv *priv = netdev_priv(dev);
3936
3937 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 3938}
50fb4f74 3939#endif /* CONFIG_DEBUG_FS */
7ac29055 3940
47dd7a54
GC
3941static const struct net_device_ops stmmac_netdev_ops = {
3942 .ndo_open = stmmac_open,
3943 .ndo_start_xmit = stmmac_xmit,
3944 .ndo_stop = stmmac_release,
3945 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 3946 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 3947 .ndo_set_features = stmmac_set_features,
01789349 3948 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54
GC
3949 .ndo_tx_timeout = stmmac_tx_timeout,
3950 .ndo_do_ioctl = stmmac_ioctl,
47dd7a54
GC
3951#ifdef CONFIG_NET_POLL_CONTROLLER
3952 .ndo_poll_controller = stmmac_poll_controller,
3953#endif
3954 .ndo_set_mac_address = eth_mac_addr,
3955};
3956
cf3f047b
GC
3957/**
3958 * stmmac_hw_init - Init the MAC device
32ceabca 3959 * @priv: driver private structure
732fdf0e
GC
3960 * Description: this function is to configure the MAC device according to
3961 * some platform parameters or the HW capability register. It prepares the
3962 * driver to use either ring or chain modes and to setup either enhanced or
3963 * normal descriptors.
cf3f047b
GC
3964 */
3965static int stmmac_hw_init(struct stmmac_priv *priv)
3966{
cf3f047b
GC
3967 struct mac_device_info *mac;
3968
3969 /* Identify the MAC HW device */
ec33d71d
LC
3970 if (priv->plat->setup) {
3971 mac = priv->plat->setup(priv);
3972 } else if (priv->plat->has_gmac) {
03f2eecd 3973 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3b57de95
VB
3974 mac = dwmac1000_setup(priv->ioaddr,
3975 priv->plat->multicast_filter_bins,
c623d149
AT
3976 priv->plat->unicast_filter_entries,
3977 &priv->synopsys_id);
f748be53
AT
3978 } else if (priv->plat->has_gmac4) {
3979 priv->dev->priv_flags |= IFF_UNICAST_FLT;
3980 mac = dwmac4_setup(priv->ioaddr,
3981 priv->plat->multicast_filter_bins,
3982 priv->plat->unicast_filter_entries,
3983 &priv->synopsys_id);
03f2eecd 3984 } else {
c623d149 3985 mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
03f2eecd 3986 }
cf3f047b
GC
3987 if (!mac)
3988 return -ENOMEM;
3989
3990 priv->hw = mac;
3991
9f93ac8d
LC
3992 /* dwmac-sun8i only work in chain mode */
3993 if (priv->plat->has_sun8i)
3994 chain_mode = 1;
3995
4a7d666a 3996 /* To use the chained or ring mode */
f748be53
AT
3997 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3998 priv->hw->mode = &dwmac4_ring_mode_ops;
4a7d666a 3999 } else {
f748be53
AT
4000 if (chain_mode) {
4001 priv->hw->mode = &chain_mode_ops;
38ddc59d 4002 dev_info(priv->device, "Chain mode enabled\n");
f748be53
AT
4003 priv->mode = STMMAC_CHAIN_MODE;
4004 } else {
4005 priv->hw->mode = &ring_mode_ops;
38ddc59d 4006 dev_info(priv->device, "Ring mode enabled\n");
f748be53
AT
4007 priv->mode = STMMAC_RING_MODE;
4008 }
4a7d666a
GC
4009 }
4010
cf3f047b
GC
4011 /* Get the HW capability (new GMAC newer than 3.50a) */
4012 priv->hw_cap_support = stmmac_get_hw_features(priv);
4013 if (priv->hw_cap_support) {
38ddc59d 4014 dev_info(priv->device, "DMA HW capability register supported\n");
cf3f047b
GC
4015
4016 /* We can override some gmac/dma configuration fields: e.g.
4017 * enh_desc, tx_coe (e.g. that are passed through the
4018 * platform) with the values from the HW capability
4019 * register (if supported).
4020 */
4021 priv->plat->enh_desc = priv->dma_cap.enh_desc;
cf3f047b 4022 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
3fe5cadb 4023 priv->hw->pmt = priv->plat->pmt;
38912bdb 4024
a8df35d4
EG
4025 /* TXCOE doesn't work in thresh DMA mode */
4026 if (priv->plat->force_thresh_dma_mode)
4027 priv->plat->tx_coe = 0;
4028 else
4029 priv->plat->tx_coe = priv->dma_cap.tx_coe;
4030
f748be53
AT
4031 /* In case of GMAC4 rx_coe is from HW cap register. */
4032 priv->plat->rx_coe = priv->dma_cap.rx_coe;
38912bdb
DS
4033
4034 if (priv->dma_cap.rx_coe_type2)
4035 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
4036 else if (priv->dma_cap.rx_coe_type1)
4037 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
4038
38ddc59d
LC
4039 } else {
4040 dev_info(priv->device, "No HW DMA feature register supported\n");
4041 }
cf3f047b 4042
f748be53
AT
4043 /* To use alternate (extended), normal or GMAC4 descriptor structures */
4044 if (priv->synopsys_id >= DWMAC_CORE_4_00)
4045 priv->hw->desc = &dwmac4_desc_ops;
4046 else
4047 stmmac_selec_desc_mode(priv);
61369d02 4048
d2afb5bd
GC
4049 if (priv->plat->rx_coe) {
4050 priv->hw->rx_csum = priv->plat->rx_coe;
38ddc59d 4051 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
f748be53 4052 if (priv->synopsys_id < DWMAC_CORE_4_00)
38ddc59d 4053 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
d2afb5bd 4054 }
cf3f047b 4055 if (priv->plat->tx_coe)
38ddc59d 4056 dev_info(priv->device, "TX Checksum insertion supported\n");
cf3f047b
GC
4057
4058 if (priv->plat->pmt) {
38ddc59d 4059 dev_info(priv->device, "Wake-Up On Lan supported\n");
cf3f047b
GC
4060 device_set_wakeup_capable(priv->device, 1);
4061 }
4062
f748be53 4063 if (priv->dma_cap.tsoen)
38ddc59d 4064 dev_info(priv->device, "TSO supported\n");
f748be53 4065
c24602ef 4066 return 0;
cf3f047b
GC
4067}
4068
47dd7a54 4069/**
bfab27a1
GC
4070 * stmmac_dvr_probe
4071 * @device: device pointer
ff3dd78c 4072 * @plat_dat: platform data pointer
e56788cf 4073 * @res: stmmac resource pointer
bfab27a1
GC
4074 * Description: this is the main probe function used to
4075 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 4076 * Return:
15ffac73 4077 * returns 0 on success, otherwise errno.
47dd7a54 4078 */
15ffac73
JE
4079int stmmac_dvr_probe(struct device *device,
4080 struct plat_stmmacenet_data *plat_dat,
4081 struct stmmac_resources *res)
47dd7a54 4082{
bfab27a1
GC
4083 struct net_device *ndev = NULL;
4084 struct stmmac_priv *priv;
c22a3f48
JP
4085 int ret = 0;
4086 u32 queue;
47dd7a54 4087
c22a3f48
JP
4088 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4089 MTL_MAX_TX_QUEUES,
4090 MTL_MAX_RX_QUEUES);
41de8d4c 4091 if (!ndev)
15ffac73 4092 return -ENOMEM;
bfab27a1
GC
4093
4094 SET_NETDEV_DEV(ndev, device);
4095
4096 priv = netdev_priv(ndev);
4097 priv->device = device;
4098 priv->dev = ndev;
47dd7a54 4099
bfab27a1 4100 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
4101 priv->pause = pause;
4102 priv->plat = plat_dat;
e56788cf
JE
4103 priv->ioaddr = res->addr;
4104 priv->dev->base_addr = (unsigned long)res->addr;
4105
4106 priv->dev->irq = res->irq;
4107 priv->wol_irq = res->wol_irq;
4108 priv->lpi_irq = res->lpi_irq;
4109
4110 if (res->mac)
4111 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 4112
a7a62685 4113 dev_set_drvdata(device, priv->dev);
803f8fc4 4114
cf3f047b
GC
4115 /* Verify driver arguments */
4116 stmmac_verify_args();
bfab27a1 4117
cf3f047b 4118 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
4119 * this needs to have multiple instances
4120 */
cf3f047b
GC
4121 if ((phyaddr >= 0) && (phyaddr <= 31))
4122 priv->plat->phy_addr = phyaddr;
4123
90f522a2
EP
4124 if (priv->plat->stmmac_rst) {
4125 ret = reset_control_assert(priv->plat->stmmac_rst);
f573c0b9 4126 reset_control_deassert(priv->plat->stmmac_rst);
90f522a2
EP
4127 /* Some reset controllers have only reset callback instead of
4128 * assert + deassert callbacks pair.
4129 */
4130 if (ret == -ENOTSUPP)
4131 reset_control_reset(priv->plat->stmmac_rst);
4132 }
c5e4ddbd 4133
cf3f047b 4134 /* Init MAC and get the capabilities */
c24602ef
GC
4135 ret = stmmac_hw_init(priv);
4136 if (ret)
62866e98 4137 goto error_hw_init;
cf3f047b 4138
c22a3f48 4139 /* Configure real RX and TX queues */
c02b7a91
JP
4140 netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
4141 netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
c22a3f48 4142
cf3f047b 4143 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 4144
cf3f047b
GC
4145 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4146 NETIF_F_RXCSUM;
f748be53
AT
4147
4148 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
9edfa7da 4149 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
f748be53 4150 priv->tso = true;
38ddc59d 4151 dev_info(priv->device, "TSO feature enabled\n");
f748be53 4152 }
bfab27a1
GC
4153 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
4154 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
4155#ifdef STMMAC_VLAN_TAG_USED
4156 /* Both mac100 and gmac support receive VLAN tag detection */
f646968f 4157 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
47dd7a54
GC
4158#endif
4159 priv->msg_enable = netif_msg_init(debug, default_msg_level);
4160
44770e11
JW
4161 /* MTU range: 46 - hw-specific max */
4162 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
4163 if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
4164 ndev->max_mtu = JUMBO_LEN;
4165 else
4166 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
a2cd64f3
KHL
4167 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
4168 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
4169 */
4170 if ((priv->plat->maxmtu < ndev->max_mtu) &&
4171 (priv->plat->maxmtu >= ndev->min_mtu))
44770e11 4172 ndev->max_mtu = priv->plat->maxmtu;
a2cd64f3 4173 else if (priv->plat->maxmtu < ndev->min_mtu)
b618ab45
HK
4174 dev_warn(priv->device,
4175 "%s: warning: maxmtu having invalid value (%d)\n",
4176 __func__, priv->plat->maxmtu);
44770e11 4177
47dd7a54
GC
4178 if (flow_ctrl)
4179 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
4180
62a2ab93
GC
4181 /* Rx Watchdog is available in the COREs newer than the 3.40.
4182 * In some case, for example on bugged HW this feature
4183 * has to be disable and this can be done by passing the
4184 * riwt_off field from the platform.
4185 */
4186 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
4187 priv->use_riwt = 1;
b618ab45
HK
4188 dev_info(priv->device,
4189 "Enable RX Mitigation via HW Watchdog Timer\n");
62a2ab93
GC
4190 }
4191
c22a3f48
JP
4192 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4193 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4194
4195 netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
4196 (8 * priv->plat->rx_queues_to_use));
4197 }
47dd7a54 4198
f8e96161
VL
4199 spin_lock_init(&priv->lock);
4200
cd7201f4
GC
4201 /* If a specific clk_csr value is passed from the platform
4202 * this means that the CSR Clock Range selection cannot be
4203 * changed at run-time and it is fixed. Viceversa the driver'll try to
4204 * set the MDC clock dynamically according to the csr actual
4205 * clock input.
4206 */
4207 if (!priv->plat->clk_csr)
4208 stmmac_clk_csr_set(priv);
4209 else
4210 priv->clk_csr = priv->plat->clk_csr;
4211
e58bb43f
GC
4212 stmmac_check_pcs_mode(priv);
4213
3fe5cadb
GC
4214 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4215 priv->hw->pcs != STMMAC_PCS_TBI &&
4216 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
4217 /* MDIO bus Registration */
4218 ret = stmmac_mdio_register(ndev);
4219 if (ret < 0) {
b618ab45
HK
4220 dev_err(priv->device,
4221 "%s: MDIO bus (id: %d) registration failed",
4222 __func__, priv->plat->bus_id);
e58bb43f
GC
4223 goto error_mdio_register;
4224 }
4bfcbd7a
FV
4225 }
4226
57016590 4227 ret = register_netdev(ndev);
b2eb09af 4228 if (ret) {
b618ab45
HK
4229 dev_err(priv->device, "%s: ERROR %i registering the device\n",
4230 __func__, ret);
b2eb09af
FF
4231 goto error_netdev_register;
4232 }
57016590
FF
4233
4234 return ret;
47dd7a54 4235
6a81c26f 4236error_netdev_register:
b2eb09af
FF
4237 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4238 priv->hw->pcs != STMMAC_PCS_TBI &&
4239 priv->hw->pcs != STMMAC_PCS_RTBI)
4240 stmmac_mdio_unregister(ndev);
6a81c26f 4241error_mdio_register:
c22a3f48
JP
4242 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
4243 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4244
4245 netif_napi_del(&rx_q->napi);
4246 }
62866e98 4247error_hw_init:
34a52f36 4248 free_netdev(ndev);
47dd7a54 4249
15ffac73 4250 return ret;
47dd7a54 4251}
b2e2f0c7 4252EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
4253
4254/**
4255 * stmmac_dvr_remove
f4e7bd81 4256 * @dev: device pointer
47dd7a54 4257 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 4258 * changes the link status, releases the DMA descriptor rings.
47dd7a54 4259 */
f4e7bd81 4260int stmmac_dvr_remove(struct device *dev)
47dd7a54 4261{
f4e7bd81 4262 struct net_device *ndev = dev_get_drvdata(dev);
aec7ff27 4263 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 4264
38ddc59d 4265 netdev_info(priv->dev, "%s: removing driver", __func__);
47dd7a54 4266
ae4f0d46 4267 stmmac_stop_all_dma(priv);
47dd7a54 4268
270c7759 4269 priv->hw->mac->set_mac(priv->ioaddr, false);
47dd7a54 4270 netif_carrier_off(ndev);
47dd7a54 4271 unregister_netdev(ndev);
f573c0b9 4272 if (priv->plat->stmmac_rst)
4273 reset_control_assert(priv->plat->stmmac_rst);
4274 clk_disable_unprepare(priv->plat->pclk);
4275 clk_disable_unprepare(priv->plat->stmmac_clk);
3fe5cadb
GC
4276 if (priv->hw->pcs != STMMAC_PCS_RGMII &&
4277 priv->hw->pcs != STMMAC_PCS_TBI &&
4278 priv->hw->pcs != STMMAC_PCS_RTBI)
e743471f 4279 stmmac_mdio_unregister(ndev);
47dd7a54
GC
4280 free_netdev(ndev);
4281
4282 return 0;
4283}
b2e2f0c7 4284EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 4285
732fdf0e
GC
4286/**
4287 * stmmac_suspend - suspend callback
f4e7bd81 4288 * @dev: device pointer
732fdf0e
GC
4289 * Description: this is the function to suspend the device and it is called
4290 * by the platform driver to stop the network queue, release the resources,
4291 * program the PMT register (for WoL), clean and release driver resources.
4292 */
f4e7bd81 4293int stmmac_suspend(struct device *dev)
47dd7a54 4294{
f4e7bd81 4295 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 4296 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 4297 unsigned long flags;
47dd7a54 4298
874bd42d 4299 if (!ndev || !netif_running(ndev))
47dd7a54
GC
4300 return 0;
4301
d6d50c7e
PR
4302 if (ndev->phydev)
4303 phy_stop(ndev->phydev);
102463b1 4304
f8c5a875 4305 spin_lock_irqsave(&priv->lock, flags);
47dd7a54 4306
874bd42d 4307 netif_device_detach(ndev);
c22a3f48 4308 stmmac_stop_all_queues(priv);
47dd7a54 4309
c22a3f48 4310 stmmac_disable_all_queues(priv);
874bd42d
GC
4311
4312 /* Stop TX/RX DMA */
ae4f0d46 4313 stmmac_stop_all_dma(priv);
c24602ef 4314
874bd42d 4315 /* Enable Power down mode by programming the PMT regs */
89f7f2cf 4316 if (device_may_wakeup(priv->device)) {
7ed24bbe 4317 priv->hw->mac->pmt(priv->hw, priv->wolopts);
89f7f2cf
SK
4318 priv->irq_wake = 1;
4319 } else {
270c7759 4320 priv->hw->mac->set_mac(priv->ioaddr, false);
db88f10a 4321 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 4322 /* Disable clock in case of PWM is off */
f573c0b9 4323 clk_disable(priv->plat->pclk);
4324 clk_disable(priv->plat->stmmac_clk);
ba1377ff 4325 }
f8c5a875 4326 spin_unlock_irqrestore(&priv->lock, flags);
2d871aa0 4327
4d869b03 4328 priv->oldlink = false;
bd00632c
LC
4329 priv->speed = SPEED_UNKNOWN;
4330 priv->oldduplex = DUPLEX_UNKNOWN;
47dd7a54
GC
4331 return 0;
4332}
b2e2f0c7 4333EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 4334
54139cf3
JP
4335/**
4336 * stmmac_reset_queues_param - reset queue parameters
4337 * @dev: device pointer
4338 */
4339static void stmmac_reset_queues_param(struct stmmac_priv *priv)
4340{
4341 u32 rx_cnt = priv->plat->rx_queues_to_use;
ce736788 4342 u32 tx_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
4343 u32 queue;
4344
4345 for (queue = 0; queue < rx_cnt; queue++) {
4346 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4347
4348 rx_q->cur_rx = 0;
4349 rx_q->dirty_rx = 0;
4350 }
4351
ce736788
JP
4352 for (queue = 0; queue < tx_cnt; queue++) {
4353 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4354
4355 tx_q->cur_tx = 0;
4356 tx_q->dirty_tx = 0;
4357 }
54139cf3
JP
4358}
4359
732fdf0e
GC
4360/**
4361 * stmmac_resume - resume callback
f4e7bd81 4362 * @dev: device pointer
732fdf0e
GC
4363 * Description: when resume this function is invoked to setup the DMA and CORE
4364 * in a usable state.
4365 */
f4e7bd81 4366int stmmac_resume(struct device *dev)
47dd7a54 4367{
f4e7bd81 4368 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 4369 struct stmmac_priv *priv = netdev_priv(ndev);
f8c5a875 4370 unsigned long flags;
47dd7a54 4371
874bd42d 4372 if (!netif_running(ndev))
47dd7a54
GC
4373 return 0;
4374
47dd7a54
GC
4375 /* Power Down bit, into the PM register, is cleared
4376 * automatically as soon as a magic packet or a Wake-up frame
4377 * is received. Anyway, it's better to manually clear
4378 * this bit because it can generate problems while resuming
ceb69499
GC
4379 * from another devices (e.g. serial console).
4380 */
623997fb 4381 if (device_may_wakeup(priv->device)) {
f55d84b0 4382 spin_lock_irqsave(&priv->lock, flags);
7ed24bbe 4383 priv->hw->mac->pmt(priv->hw, 0);
f55d84b0 4384 spin_unlock_irqrestore(&priv->lock, flags);
89f7f2cf 4385 priv->irq_wake = 0;
623997fb 4386 } else {
db88f10a 4387 pinctrl_pm_select_default_state(priv->device);
8d45e42b 4388 /* enable the clk previously disabled */
f573c0b9 4389 clk_enable(priv->plat->stmmac_clk);
4390 clk_enable(priv->plat->pclk);
623997fb
SK
4391 /* reset the phy so that it's ready */
4392 if (priv->mii)
4393 stmmac_mdio_reset(priv->mii);
4394 }
47dd7a54 4395
874bd42d 4396 netif_device_attach(ndev);
47dd7a54 4397
f55d84b0
VP
4398 spin_lock_irqsave(&priv->lock, flags);
4399
54139cf3
JP
4400 stmmac_reset_queues_param(priv);
4401
f748be53
AT
4402 /* reset private mss value to force mss context settings at
4403 * next tso xmit (only used for gmac4).
4404 */
4405 priv->mss = 0;
4406
ae79a639
GC
4407 stmmac_clear_descriptors(priv);
4408
fe131929 4409 stmmac_hw_setup(ndev, false);
777da230 4410 stmmac_init_tx_coalesce(priv);
ac316c78 4411 stmmac_set_rx_mode(ndev);
47dd7a54 4412
c22a3f48 4413 stmmac_enable_all_queues(priv);
47dd7a54 4414
c22a3f48 4415 stmmac_start_all_queues(priv);
47dd7a54 4416
f8c5a875 4417 spin_unlock_irqrestore(&priv->lock, flags);
102463b1 4418
d6d50c7e
PR
4419 if (ndev->phydev)
4420 phy_start(ndev->phydev);
102463b1 4421
47dd7a54
GC
4422 return 0;
4423}
b2e2f0c7 4424EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 4425
47dd7a54
GC
4426#ifndef MODULE
4427static int __init stmmac_cmdline_opt(char *str)
4428{
4429 char *opt;
4430
4431 if (!str || !*str)
4432 return -EINVAL;
4433 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 4434 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 4435 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
4436 goto err;
4437 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 4438 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 4439 goto err;
f3240e28 4440 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 4441 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
4442 goto err;
4443 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 4444 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
4445 goto err;
4446 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 4447 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
4448 goto err;
4449 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 4450 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
4451 goto err;
4452 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 4453 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 4454 goto err;
506f669c 4455 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
4456 if (kstrtoint(opt + 10, 0, &eee_timer))
4457 goto err;
4a7d666a
GC
4458 } else if (!strncmp(opt, "chain_mode:", 11)) {
4459 if (kstrtoint(opt + 11, 0, &chain_mode))
4460 goto err;
f3240e28 4461 }
47dd7a54
GC
4462 }
4463 return 0;
f3240e28
GC
4464
4465err:
4466 pr_err("%s: ERROR broken module parameter conversion", __func__);
4467 return -EINVAL;
47dd7a54
GC
4468}
4469
4470__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 4471#endif /* MODULE */
6fc0d0f2 4472
466c5ac8
MO
4473static int __init stmmac_init(void)
4474{
4475#ifdef CONFIG_DEBUG_FS
4476 /* Create debugfs main directory if it doesn't exist yet */
4477 if (!stmmac_fs_dir) {
4478 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
4479
4480 if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
4481 pr_err("ERROR %s, debugfs create directory failed\n",
4482 STMMAC_RESOURCE_NAME);
4483
4484 return -ENOMEM;
4485 }
4486 }
4487#endif
4488
4489 return 0;
4490}
4491
4492static void __exit stmmac_exit(void)
4493{
4494#ifdef CONFIG_DEBUG_FS
4495 debugfs_remove_recursive(stmmac_fs_dir);
4496#endif
4497}
4498
4499module_init(stmmac_init)
4500module_exit(stmmac_exit)
4501
6fc0d0f2
GC
4502MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
4503MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
4504MODULE_LICENSE("GPL");