]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: only enable DMA interrupts when ready
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
CommitLineData
4fa9c49f 1// SPDX-License-Identifier: GPL-2.0-only
47dd7a54
GC
2/*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
286a8372 6 Copyright(C) 2007-2011 STMicroelectronics Ltd
47dd7a54 7
47dd7a54
GC
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15*******************************************************************************/
16
6a81c26f 17#include <linux/clk.h>
47dd7a54
GC
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
47dd7a54
GC
20#include <linux/ip.h>
21#include <linux/tcp.h>
22#include <linux/skbuff.h>
23#include <linux/ethtool.h>
24#include <linux/if_ether.h>
25#include <linux/crc32.h>
26#include <linux/mii.h>
01789349 27#include <linux/if.h>
47dd7a54
GC
28#include <linux/if_vlan.h>
29#include <linux/dma-mapping.h>
5a0e3ad6 30#include <linux/slab.h>
5ec55823 31#include <linux/pm_runtime.h>
70c71606 32#include <linux/prefetch.h>
db88f10a 33#include <linux/pinctrl/consumer.h>
50fb4f74 34#ifdef CONFIG_DEBUG_FS
7ac29055
GC
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
50fb4f74 37#endif /* CONFIG_DEBUG_FS */
891434b1 38#include <linux/net_tstamp.h>
eeef2f6b 39#include <linux/phylink.h>
b7766206 40#include <linux/udp.h>
5fabb012 41#include <linux/bpf_trace.h>
4dbbe8dd 42#include <net/pkt_cls.h>
bba2556e 43#include <net/xdp_sock_drv.h>
891434b1 44#include "stmmac_ptp.h"
286a8372 45#include "stmmac.h"
5fabb012 46#include "stmmac_xdp.h"
c5e4ddbd 47#include <linux/reset.h>
5790cf3c 48#include <linux/of_mdio.h>
19d857c9 49#include "dwmac1000.h"
7d9e6c5a 50#include "dwxgmac2.h"
42de047d 51#include "hwif.h"
47dd7a54 52
e50937c1
HA
53/* As long as the interface is active, we keep the timestamping counter enabled
54 * with fine resolution and binary rollover. This avoid non-monotonic behavior
55 * (clock jumps) when changing timestamping settings at runtime.
56 */
57#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
58 PTP_TCR_TSCTRLSSR)
59
8d558f02 60#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
f748be53 61#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
47dd7a54
GC
62
63/* Module parameters */
32ceabca 64#define TX_TIMEO 5000
47dd7a54 65static int watchdog = TX_TIMEO;
d3757ba4 66module_param(watchdog, int, 0644);
32ceabca 67MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
47dd7a54 68
32ceabca 69static int debug = -1;
d3757ba4 70module_param(debug, int, 0644);
32ceabca 71MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
47dd7a54 72
47d1f71f 73static int phyaddr = -1;
d3757ba4 74module_param(phyaddr, int, 0444);
47dd7a54
GC
75MODULE_PARM_DESC(phyaddr, "Physical device address");
76
aa042f60
SYS
77#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
78#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
47dd7a54 79
132c32ee
OBL
80/* Limit to make sure XDP TX and slow path can coexist */
81#define STMMAC_XSK_TX_BUDGET_MAX 256
82#define STMMAC_TX_XSK_AVAIL 16
bba2556e
OBL
83#define STMMAC_RX_FILL_BATCH 16
84
5fabb012
OBL
85#define STMMAC_XDP_PASS 0
86#define STMMAC_XDP_CONSUMED BIT(0)
be8b38a7 87#define STMMAC_XDP_TX BIT(1)
8b278a5b 88#define STMMAC_XDP_REDIRECT BIT(2)
5fabb012 89
e9989339 90static int flow_ctrl = FLOW_AUTO;
d3757ba4 91module_param(flow_ctrl, int, 0644);
47dd7a54
GC
92MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
93
94static int pause = PAUSE_TIME;
d3757ba4 95module_param(pause, int, 0644);
47dd7a54
GC
96MODULE_PARM_DESC(pause, "Flow Control Pause Time");
97
98#define TC_DEFAULT 64
99static int tc = TC_DEFAULT;
d3757ba4 100module_param(tc, int, 0644);
47dd7a54
GC
101MODULE_PARM_DESC(tc, "DMA threshold control value");
102
d916701c
GC
103#define DEFAULT_BUFSIZE 1536
104static int buf_sz = DEFAULT_BUFSIZE;
d3757ba4 105module_param(buf_sz, int, 0644);
47dd7a54
GC
106MODULE_PARM_DESC(buf_sz, "DMA buffer size");
107
22ad3838
GC
108#define STMMAC_RX_COPYBREAK 256
109
47dd7a54
GC
110static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
111 NETIF_MSG_LINK | NETIF_MSG_IFUP |
112 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
113
d765955d
GC
114#define STMMAC_DEFAULT_LPI_TIMER 1000
115static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
d3757ba4 116module_param(eee_timer, int, 0644);
d765955d 117MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
388e201d 118#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
d765955d 119
22d3efe5
PM
120/* By default the driver will use the ring mode to manage tx and rx descriptors,
121 * but allow user to force to use the chain instead of the ring
4a7d666a
GC
122 */
123static unsigned int chain_mode;
d3757ba4 124module_param(chain_mode, int, 0444);
4a7d666a
GC
125MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
126
47dd7a54 127static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
8532f613
OBL
128/* For MSI interrupts handling */
129static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
130static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
131static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
132static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
132c32ee
OBL
133static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
134static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
47dd7a54 135
50fb4f74 136#ifdef CONFIG_DEBUG_FS
481a7d15 137static const struct net_device_ops stmmac_netdev_ops;
8d72ab11 138static void stmmac_init_fs(struct net_device *dev);
466c5ac8 139static void stmmac_exit_fs(struct net_device *dev);
bfab27a1
GC
140#endif
141
d5a05e69 142#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
9125cdd1 143
5ec55823
JZ
144int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
145{
146 int ret = 0;
147
148 if (enabled) {
149 ret = clk_prepare_enable(priv->plat->stmmac_clk);
150 if (ret)
151 return ret;
152 ret = clk_prepare_enable(priv->plat->pclk);
153 if (ret) {
154 clk_disable_unprepare(priv->plat->stmmac_clk);
155 return ret;
156 }
b4d45aee
JZ
157 if (priv->plat->clks_config) {
158 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 clk_disable_unprepare(priv->plat->pclk);
162 return ret;
163 }
164 }
5ec55823
JZ
165 } else {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
b4d45aee
JZ
168 if (priv->plat->clks_config)
169 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
5ec55823
JZ
170 }
171
172 return ret;
173}
174EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
175
47dd7a54
GC
176/**
177 * stmmac_verify_args - verify the driver parameters.
732fdf0e
GC
178 * Description: it checks the driver parameters and set a default in case of
179 * errors.
47dd7a54
GC
180 */
181static void stmmac_verify_args(void)
182{
183 if (unlikely(watchdog < 0))
184 watchdog = TX_TIMEO;
d916701c
GC
185 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
186 buf_sz = DEFAULT_BUFSIZE;
47dd7a54
GC
187 if (unlikely(flow_ctrl > 1))
188 flow_ctrl = FLOW_AUTO;
189 else if (likely(flow_ctrl < 0))
190 flow_ctrl = FLOW_OFF;
191 if (unlikely((pause < 0) || (pause > 0xffff)))
192 pause = PAUSE_TIME;
d765955d
GC
193 if (eee_timer < 0)
194 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
47dd7a54
GC
195}
196
bba2556e 197static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
c22a3f48
JP
198{
199 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
8fce3331
JA
200 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
201 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
c22a3f48
JP
202 u32 queue;
203
8fce3331
JA
204 for (queue = 0; queue < maxq; queue++) {
205 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 206
132c32ee
OBL
207 if (stmmac_xdp_is_enabled(priv) &&
208 test_bit(queue, priv->af_xdp_zc_qps)) {
209 napi_disable(&ch->rxtx_napi);
210 continue;
211 }
212
4ccb4585
JA
213 if (queue < rx_queues_cnt)
214 napi_disable(&ch->rx_napi);
215 if (queue < tx_queues_cnt)
216 napi_disable(&ch->tx_napi);
c22a3f48
JP
217 }
218}
219
bba2556e
OBL
220/**
221 * stmmac_disable_all_queues - Disable all queues
222 * @priv: driver private structure
223 */
224static void stmmac_disable_all_queues(struct stmmac_priv *priv)
225{
226 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
227 struct stmmac_rx_queue *rx_q;
228 u32 queue;
229
230 /* synchronize_rcu() needed for pending XDP buffers to drain */
231 for (queue = 0; queue < rx_queues_cnt; queue++) {
232 rx_q = &priv->rx_queue[queue];
233 if (rx_q->xsk_pool) {
234 synchronize_rcu();
235 break;
236 }
237 }
238
239 __stmmac_disable_all_queues(priv);
240}
241
c22a3f48
JP
242/**
243 * stmmac_enable_all_queues - Enable all queues
244 * @priv: driver private structure
245 */
246static void stmmac_enable_all_queues(struct stmmac_priv *priv)
247{
248 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
8fce3331
JA
249 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
250 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
c22a3f48
JP
251 u32 queue;
252
8fce3331
JA
253 for (queue = 0; queue < maxq; queue++) {
254 struct stmmac_channel *ch = &priv->channel[queue];
c22a3f48 255
132c32ee
OBL
256 if (stmmac_xdp_is_enabled(priv) &&
257 test_bit(queue, priv->af_xdp_zc_qps)) {
258 napi_enable(&ch->rxtx_napi);
259 continue;
260 }
261
4ccb4585
JA
262 if (queue < rx_queues_cnt)
263 napi_enable(&ch->rx_napi);
264 if (queue < tx_queues_cnt)
265 napi_enable(&ch->tx_napi);
c22a3f48
JP
266 }
267}
268
34877a15
JA
269static void stmmac_service_event_schedule(struct stmmac_priv *priv)
270{
271 if (!test_bit(STMMAC_DOWN, &priv->state) &&
272 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
273 queue_work(priv->wq, &priv->service_task);
274}
275
276static void stmmac_global_err(struct stmmac_priv *priv)
277{
278 netif_carrier_off(priv->dev);
279 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
280 stmmac_service_event_schedule(priv);
281}
282
32ceabca
GC
283/**
284 * stmmac_clk_csr_set - dynamically set the MDC clock
285 * @priv: driver private structure
286 * Description: this is to dynamically set the MDC clock according to the csr
287 * clock input.
288 * Note:
289 * If a specific clk_csr value is passed from the platform
290 * this means that the CSR Clock Range selection cannot be
291 * changed at run-time and it is fixed (as reported in the driver
292 * documentation). Viceversa the driver will try to set the MDC
293 * clock dynamically according to the actual clock input.
294 */
cd7201f4
GC
295static void stmmac_clk_csr_set(struct stmmac_priv *priv)
296{
cd7201f4
GC
297 u32 clk_rate;
298
f573c0b9 299 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
cd7201f4
GC
300
301 /* Platform provided default clk_csr would be assumed valid
ceb69499
GC
302 * for all other cases except for the below mentioned ones.
303 * For values higher than the IEEE 802.3 specified frequency
304 * we can not estimate the proper divider as it is not known
305 * the frequency of clk_csr_i. So we do not change the default
306 * divider.
307 */
cd7201f4
GC
308 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
309 if (clk_rate < CSR_F_35M)
310 priv->clk_csr = STMMAC_CSR_20_35M;
311 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
312 priv->clk_csr = STMMAC_CSR_35_60M;
313 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
314 priv->clk_csr = STMMAC_CSR_60_100M;
315 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
316 priv->clk_csr = STMMAC_CSR_100_150M;
317 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
318 priv->clk_csr = STMMAC_CSR_150_250M;
08dad2f4 319 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
cd7201f4 320 priv->clk_csr = STMMAC_CSR_250_300M;
ceb69499 321 }
9f93ac8d
LC
322
323 if (priv->plat->has_sun8i) {
324 if (clk_rate > 160000000)
325 priv->clk_csr = 0x03;
326 else if (clk_rate > 80000000)
327 priv->clk_csr = 0x02;
328 else if (clk_rate > 40000000)
329 priv->clk_csr = 0x01;
330 else
331 priv->clk_csr = 0;
332 }
7d9e6c5a
JA
333
334 if (priv->plat->has_xgmac) {
335 if (clk_rate > 400000000)
336 priv->clk_csr = 0x5;
337 else if (clk_rate > 350000000)
338 priv->clk_csr = 0x4;
339 else if (clk_rate > 300000000)
340 priv->clk_csr = 0x3;
341 else if (clk_rate > 250000000)
342 priv->clk_csr = 0x2;
343 else if (clk_rate > 150000000)
344 priv->clk_csr = 0x1;
345 else
346 priv->clk_csr = 0x0;
347 }
cd7201f4
GC
348}
349
47dd7a54
GC
350static void print_pkt(unsigned char *buf, int len)
351{
424c4f78
AS
352 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
353 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
47dd7a54 354}
47dd7a54 355
ce736788 356static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
47dd7a54 357{
ce736788 358 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
a6a3e026 359 u32 avail;
e3ad57c9 360
ce736788
JP
361 if (tx_q->dirty_tx > tx_q->cur_tx)
362 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
e3ad57c9 363 else
aa042f60 364 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
e3ad57c9
GC
365
366 return avail;
367}
368
54139cf3
JP
369/**
370 * stmmac_rx_dirty - Get RX queue dirty
371 * @priv: driver private structure
372 * @queue: RX queue index
373 */
374static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
e3ad57c9 375{
54139cf3 376 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
a6a3e026 377 u32 dirty;
e3ad57c9 378
54139cf3
JP
379 if (rx_q->dirty_rx <= rx_q->cur_rx)
380 dirty = rx_q->cur_rx - rx_q->dirty_rx;
e3ad57c9 381 else
aa042f60 382 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
e3ad57c9
GC
383
384 return dirty;
47dd7a54
GC
385}
386
be1c7eae
VJK
387static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
388{
389 int tx_lpi_timer;
390
391 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
392 priv->eee_sw_timer_en = en ? 0 : 1;
393 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
394 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
395}
396
32ceabca 397/**
732fdf0e 398 * stmmac_enable_eee_mode - check and enter in LPI mode
32ceabca 399 * @priv: driver private structure
732fdf0e
GC
400 * Description: this function is to verify and enter in LPI mode in case of
401 * EEE.
32ceabca 402 */
3b439926 403static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
d765955d 404{
ce736788
JP
405 u32 tx_cnt = priv->plat->tx_queues_to_use;
406 u32 queue;
407
408 /* check if all TX queues have the work finished */
409 for (queue = 0; queue < tx_cnt; queue++) {
410 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
411
412 if (tx_q->dirty_tx != tx_q->cur_tx)
3b439926 413 return -EBUSY; /* still unfinished work */
ce736788
JP
414 }
415
d765955d 416 /* Check and enter in LPI mode */
ce736788 417 if (!priv->tx_path_in_lpi_mode)
c10d4c82
JA
418 stmmac_set_eee_mode(priv, priv->hw,
419 priv->plat->en_tx_lpi_clockgating);
3b439926 420 return 0;
d765955d
GC
421}
422
32ceabca 423/**
732fdf0e 424 * stmmac_disable_eee_mode - disable and exit from LPI mode
32ceabca
GC
425 * @priv: driver private structure
426 * Description: this function is to exit and disable EEE in case of
427 * LPI state is true. This is called by the xmit.
428 */
d765955d
GC
429void stmmac_disable_eee_mode(struct stmmac_priv *priv)
430{
be1c7eae
VJK
431 if (!priv->eee_sw_timer_en) {
432 stmmac_lpi_entry_timer_config(priv, 0);
433 return;
434 }
435
c10d4c82 436 stmmac_reset_eee_mode(priv, priv->hw);
d765955d
GC
437 del_timer_sync(&priv->eee_ctrl_timer);
438 priv->tx_path_in_lpi_mode = false;
439}
440
441/**
732fdf0e 442 * stmmac_eee_ctrl_timer - EEE TX SW timer.
d0ea5cbd 443 * @t: timer_list struct containing private info
d765955d 444 * Description:
32ceabca 445 * if there is no data transfer and if we are not in LPI state,
d765955d
GC
446 * then MAC Transmitter can be moved to LPI state.
447 */
e99e88a9 448static void stmmac_eee_ctrl_timer(struct timer_list *t)
d765955d 449{
e99e88a9 450 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
d765955d 451
3b439926
JZ
452 if (stmmac_enable_eee_mode(priv))
453 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
d765955d
GC
454}
455
456/**
732fdf0e 457 * stmmac_eee_init - init EEE
32ceabca 458 * @priv: driver private structure
d765955d 459 * Description:
732fdf0e
GC
460 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
461 * can also manage EEE, this function enable the LPI state and start related
462 * timer.
d765955d
GC
463 */
464bool stmmac_eee_init(struct stmmac_priv *priv)
465{
388e201d 466 int eee_tw_timer = priv->eee_tw_timer;
879626e3 467
f5351ef7
GC
468 /* Using PCS we cannot dial with the phy registers at this stage
469 * so we do not support extra feature like EEE.
470 */
a47b9e15
DZ
471 if (priv->hw->pcs == STMMAC_PCS_TBI ||
472 priv->hw->pcs == STMMAC_PCS_RTBI)
74371272 473 return false;
d765955d 474
74371272
JA
475 /* Check if MAC core supports the EEE feature. */
476 if (!priv->dma_cap.eee)
477 return false;
478
479 mutex_lock(&priv->lock);
4741cf9c 480
74371272 481 /* Check if it needs to be deactivated */
177d935a
JH
482 if (!priv->eee_active) {
483 if (priv->eee_enabled) {
484 netdev_dbg(priv->dev, "disable EEE\n");
be1c7eae 485 stmmac_lpi_entry_timer_config(priv, 0);
177d935a 486 del_timer_sync(&priv->eee_ctrl_timer);
388e201d 487 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
d4aeaed8
WVK
488 if (priv->hw->xpcs)
489 xpcs_config_eee(priv->hw->xpcs,
490 priv->plat->mult_fact_100ns,
491 false);
177d935a 492 }
0867bb97 493 mutex_unlock(&priv->lock);
74371272 494 return false;
d765955d 495 }
74371272
JA
496
497 if (priv->eee_active && !priv->eee_enabled) {
498 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
74371272 499 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
388e201d 500 eee_tw_timer);
656ed8b0
WVK
501 if (priv->hw->xpcs)
502 xpcs_config_eee(priv->hw->xpcs,
503 priv->plat->mult_fact_100ns,
504 true);
74371272
JA
505 }
506
be1c7eae
VJK
507 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
508 del_timer_sync(&priv->eee_ctrl_timer);
509 priv->tx_path_in_lpi_mode = false;
510 stmmac_lpi_entry_timer_config(priv, 1);
511 } else {
512 stmmac_lpi_entry_timer_config(priv, 0);
513 mod_timer(&priv->eee_ctrl_timer,
514 STMMAC_LPI_T(priv->tx_lpi_timer));
515 }
388e201d 516
74371272
JA
517 mutex_unlock(&priv->lock);
518 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
519 return true;
d765955d
GC
520}
521
e4b5bd5e
TG
522static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
523{
524 /* Correct the clk domain crossing(CDC) error */
525 if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
526 return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
527 return 0;
528}
529
732fdf0e 530/* stmmac_get_tx_hwtstamp - get HW TX timestamps
32ceabca 531 * @priv: driver private structure
ba1ffd74 532 * @p : descriptor pointer
891434b1
RK
533 * @skb : the socket buffer
534 * Description :
535 * This function will read timestamp from the descriptor & pass it to stack.
536 * and also perform some sanity checks.
537 */
538static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
ba1ffd74 539 struct dma_desc *p, struct sk_buff *skb)
891434b1
RK
540{
541 struct skb_shared_hwtstamps shhwtstamp;
25e80cd0 542 bool found = false;
df103170 543 u64 ns = 0;
891434b1
RK
544
545 if (!priv->hwts_tx_en)
546 return;
547
ceb69499 548 /* exit if skb doesn't support hw tstamp */
75e4364f 549 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
891434b1
RK
550 return;
551
891434b1 552 /* check tx tstamp status */
42de047d 553 if (stmmac_get_tx_timestamp_status(priv, p)) {
42de047d 554 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
25e80cd0
JA
555 found = true;
556 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
557 found = true;
558 }
891434b1 559
25e80cd0 560 if (found) {
e4b5bd5e 561 ns -= stmmac_cdc_adjust(priv);
3600be5f 562
ba1ffd74
GC
563 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
564 shhwtstamp.hwtstamp = ns_to_ktime(ns);
891434b1 565
33d4c482 566 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
ba1ffd74
GC
567 /* pass tstamp to stack */
568 skb_tstamp_tx(skb, &shhwtstamp);
569 }
891434b1
RK
570}
571
732fdf0e 572/* stmmac_get_rx_hwtstamp - get HW RX timestamps
32ceabca 573 * @priv: driver private structure
ba1ffd74
GC
574 * @p : descriptor pointer
575 * @np : next descriptor pointer
891434b1
RK
576 * @skb : the socket buffer
577 * Description :
578 * This function will read received packet's timestamp from the descriptor
579 * and pass it to stack. It also perform some sanity checks.
580 */
ba1ffd74
GC
581static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
582 struct dma_desc *np, struct sk_buff *skb)
891434b1
RK
583{
584 struct skb_shared_hwtstamps *shhwtstamp = NULL;
98870943 585 struct dma_desc *desc = p;
df103170 586 u64 ns = 0;
891434b1
RK
587
588 if (!priv->hwts_rx_en)
589 return;
98870943 590 /* For GMAC4, the valid timestamp is from CTX next desc. */
7d9e6c5a 591 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
98870943 592 desc = np;
891434b1 593
ba1ffd74 594 /* Check if timestamp is available */
42de047d
JA
595 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
596 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
3600be5f 597
e4b5bd5e 598 ns -= stmmac_cdc_adjust(priv);
3600be5f 599
33d4c482 600 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
ba1ffd74
GC
601 shhwtstamp = skb_hwtstamps(skb);
602 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
603 shhwtstamp->hwtstamp = ns_to_ktime(ns);
604 } else {
33d4c482 605 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
ba1ffd74 606 }
891434b1
RK
607}
608
609/**
d6228b7c 610 * stmmac_hwtstamp_set - control hardware timestamping.
891434b1 611 * @dev: device pointer.
8d45e42b 612 * @ifr: An IOCTL specific structure, that can contain a pointer to
891434b1
RK
613 * a proprietary structure used to pass information to the driver.
614 * Description:
615 * This function configures the MAC to enable/disable both outgoing(TX)
616 * and incoming(RX) packets time stamping based on user input.
617 * Return Value:
618 * 0 on success and an appropriate -ve integer on failure.
619 */
d6228b7c 620static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
891434b1
RK
621{
622 struct stmmac_priv *priv = netdev_priv(dev);
623 struct hwtstamp_config config;
891434b1
RK
624 u32 ptp_v2 = 0;
625 u32 tstamp_all = 0;
626 u32 ptp_over_ipv4_udp = 0;
627 u32 ptp_over_ipv6_udp = 0;
628 u32 ptp_over_ethernet = 0;
629 u32 snap_type_sel = 0;
630 u32 ts_master_en = 0;
631 u32 ts_event_en = 0;
891434b1
RK
632
633 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
634 netdev_alert(priv->dev, "No support for HW time stamping\n");
635 priv->hwts_tx_en = 0;
636 priv->hwts_rx_en = 0;
637
638 return -EOPNOTSUPP;
639 }
640
641 if (copy_from_user(&config, ifr->ifr_data,
d6228b7c 642 sizeof(config)))
891434b1
RK
643 return -EFAULT;
644
38ddc59d
LC
645 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
646 __func__, config.flags, config.tx_type, config.rx_filter);
891434b1
RK
647
648 /* reserved for future extensions */
649 if (config.flags)
650 return -EINVAL;
651
5f3da328
BH
652 if (config.tx_type != HWTSTAMP_TX_OFF &&
653 config.tx_type != HWTSTAMP_TX_ON)
891434b1 654 return -ERANGE;
891434b1
RK
655
656 if (priv->adv_ts) {
657 switch (config.rx_filter) {
891434b1 658 case HWTSTAMP_FILTER_NONE:
ceb69499 659 /* time stamp no incoming packet at all */
891434b1
RK
660 config.rx_filter = HWTSTAMP_FILTER_NONE;
661 break;
662
891434b1 663 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
ceb69499 664 /* PTP v1, UDP, any kind of event packet */
891434b1 665 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
7d8e249f
IA
666 /* 'xmac' hardware can support Sync, Pdelay_Req and
667 * Pdelay_resp by setting bit14 and bits17/16 to 01
668 * This leaves Delay_Req timestamps out.
669 * Enable all events *and* general purpose message
670 * timestamping
671 */
672 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
673 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
674 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
675 break;
676
891434b1 677 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
ceb69499 678 /* PTP v1, UDP, Sync packet */
891434b1
RK
679 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
680 /* take time stamp for SYNC messages only */
681 ts_event_en = PTP_TCR_TSEVNTENA;
682
683 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
684 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
685 break;
686
891434b1 687 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
ceb69499 688 /* PTP v1, UDP, Delay_req packet */
891434b1
RK
689 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
690 /* take time stamp for Delay_Req messages only */
691 ts_master_en = PTP_TCR_TSMSTRENA;
692 ts_event_en = PTP_TCR_TSEVNTENA;
693
694 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 break;
697
891434b1 698 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
ceb69499 699 /* PTP v2, UDP, any kind of event packet */
891434b1
RK
700 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
701 ptp_v2 = PTP_TCR_TSVER2ENA;
702 /* take time stamp for all event messages */
7d8e249f 703 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
891434b1
RK
704
705 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
706 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
707 break;
708
891434b1 709 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
ceb69499 710 /* PTP v2, UDP, Sync packet */
891434b1
RK
711 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
712 ptp_v2 = PTP_TCR_TSVER2ENA;
713 /* take time stamp for SYNC messages only */
714 ts_event_en = PTP_TCR_TSEVNTENA;
715
716 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
717 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
718 break;
719
891434b1 720 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
ceb69499 721 /* PTP v2, UDP, Delay_req packet */
891434b1
RK
722 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
723 ptp_v2 = PTP_TCR_TSVER2ENA;
724 /* take time stamp for Delay_Req messages only */
725 ts_master_en = PTP_TCR_TSMSTRENA;
726 ts_event_en = PTP_TCR_TSEVNTENA;
727
728 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
729 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
730 break;
731
891434b1 732 case HWTSTAMP_FILTER_PTP_V2_EVENT:
ceb69499 733 /* PTP v2/802.AS1 any layer, any kind of event packet */
891434b1
RK
734 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
735 ptp_v2 = PTP_TCR_TSVER2ENA;
7d8e249f 736 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
3cb95802 737 if (priv->synopsys_id < DWMAC_CORE_4_10)
f2fb6b62 738 ts_event_en = PTP_TCR_TSEVNTENA;
891434b1
RK
739 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
740 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
741 ptp_over_ethernet = PTP_TCR_TSIPENA;
742 break;
743
891434b1 744 case HWTSTAMP_FILTER_PTP_V2_SYNC:
ceb69499 745 /* PTP v2/802.AS1, any layer, Sync packet */
891434b1
RK
746 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
747 ptp_v2 = PTP_TCR_TSVER2ENA;
748 /* take time stamp for SYNC messages only */
749 ts_event_en = PTP_TCR_TSEVNTENA;
750
751 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
752 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
753 ptp_over_ethernet = PTP_TCR_TSIPENA;
754 break;
755
891434b1 756 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
ceb69499 757 /* PTP v2/802.AS1, any layer, Delay_req packet */
891434b1
RK
758 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
759 ptp_v2 = PTP_TCR_TSVER2ENA;
760 /* take time stamp for Delay_Req messages only */
761 ts_master_en = PTP_TCR_TSMSTRENA;
762 ts_event_en = PTP_TCR_TSEVNTENA;
763
764 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
765 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
766 ptp_over_ethernet = PTP_TCR_TSIPENA;
767 break;
768
e3412575 769 case HWTSTAMP_FILTER_NTP_ALL:
891434b1 770 case HWTSTAMP_FILTER_ALL:
ceb69499 771 /* time stamp any incoming packet */
891434b1
RK
772 config.rx_filter = HWTSTAMP_FILTER_ALL;
773 tstamp_all = PTP_TCR_TSENALL;
774 break;
775
776 default:
777 return -ERANGE;
778 }
779 } else {
780 switch (config.rx_filter) {
781 case HWTSTAMP_FILTER_NONE:
782 config.rx_filter = HWTSTAMP_FILTER_NONE;
783 break;
784 default:
785 /* PTP v1, UDP, any kind of event packet */
786 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
787 break;
788 }
789 }
790 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
5f3da328 791 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
891434b1 792
e50937c1 793 priv->systime_flags = STMMAC_HWTS_ACTIVE;
0a624155 794
e50937c1
HA
795 if (priv->hwts_tx_en || priv->hwts_rx_en) {
796 priv->systime_flags |= tstamp_all | ptp_v2 |
797 ptp_over_ethernet | ptp_over_ipv6_udp |
798 ptp_over_ipv4_udp | ts_event_en |
799 ts_master_en | snap_type_sel;
891434b1
RK
800 }
801
e50937c1
HA
802 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
803
d6228b7c
AP
804 memcpy(&priv->tstamp_config, &config, sizeof(config));
805
891434b1 806 return copy_to_user(ifr->ifr_data, &config,
d6228b7c
AP
807 sizeof(config)) ? -EFAULT : 0;
808}
809
810/**
811 * stmmac_hwtstamp_get - read hardware timestamping.
812 * @dev: device pointer.
813 * @ifr: An IOCTL specific structure, that can contain a pointer to
814 * a proprietary structure used to pass information to the driver.
815 * Description:
816 * This function obtain the current hardware timestamping settings
d0ea5cbd 817 * as requested.
d6228b7c
AP
818 */
819static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
820{
821 struct stmmac_priv *priv = netdev_priv(dev);
822 struct hwtstamp_config *config = &priv->tstamp_config;
823
824 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
825 return -EOPNOTSUPP;
826
827 return copy_to_user(ifr->ifr_data, config,
828 sizeof(*config)) ? -EFAULT : 0;
891434b1
RK
829}
830
e50937c1
HA
831/**
832 * stmmac_init_tstamp_counter - init hardware timestamping counter
833 * @priv: driver private structure
834 * @systime_flags: timestamping flags
835 * Description:
836 * Initialize hardware counter for packet timestamping.
837 * This is valid as long as the interface is open and not suspended.
838 * Will be rerun after resuming from suspend, case in which the timestamping
839 * flags updated by stmmac_hwtstamp_set() also need to be restored.
840 */
841int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
842{
843 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
844 struct timespec64 now;
845 u32 sec_inc = 0;
846 u64 temp = 0;
847 int ret;
848
849 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
850 return -EOPNOTSUPP;
851
852 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
853 if (ret < 0) {
854 netdev_warn(priv->dev,
855 "failed to enable PTP reference clock: %pe\n",
856 ERR_PTR(ret));
857 return ret;
858 }
859
860 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
861 priv->systime_flags = systime_flags;
862
863 /* program Sub Second Increment reg */
864 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
865 priv->plat->clk_ptp_rate,
866 xmac, &sec_inc);
867 temp = div_u64(1000000000ULL, sec_inc);
868
869 /* Store sub second increment for later use */
870 priv->sub_second_inc = sec_inc;
871
872 /* calculate default added value:
873 * formula is :
874 * addend = (2^32)/freq_div_ratio;
875 * where, freq_div_ratio = 1e9ns/sec_inc
876 */
877 temp = (u64)(temp << 32);
878 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
879 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
880
881 /* initialize system time */
882 ktime_get_real_ts64(&now);
883
884 /* lower 32 bits of tv_sec are safe until y2106 */
885 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
886
887 return 0;
888}
889EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
890
32ceabca 891/**
732fdf0e 892 * stmmac_init_ptp - init PTP
32ceabca 893 * @priv: driver private structure
732fdf0e 894 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
32ceabca 895 * This is done by looking at the HW cap. register.
732fdf0e 896 * This function also registers the ptp driver.
32ceabca 897 */
92ba6888 898static int stmmac_init_ptp(struct stmmac_priv *priv)
891434b1 899{
7d9e6c5a 900 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
e50937c1 901 int ret;
7d9e6c5a 902
899da089
MABI
903 if (priv->plat->ptp_clk_freq_config)
904 priv->plat->ptp_clk_freq_config(priv);
905
e50937c1
HA
906 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
907 if (ret)
908 return ret;
92ba6888 909
7cd01399 910 priv->adv_ts = 0;
7d9e6c5a
JA
911 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
912 if (xmac && priv->dma_cap.atime_stamp)
be9b3174
GC
913 priv->adv_ts = 1;
914 /* Dwmac 3.x core with extend_desc can support adv_ts */
915 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
7cd01399
VB
916 priv->adv_ts = 1;
917
be9b3174
GC
918 if (priv->dma_cap.time_stamp)
919 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
7cd01399 920
be9b3174
GC
921 if (priv->adv_ts)
922 netdev_info(priv->dev,
923 "IEEE 1588-2008 Advanced Timestamp supported\n");
891434b1 924
891434b1
RK
925 priv->hwts_tx_en = 0;
926 priv->hwts_rx_en = 0;
92ba6888 927
c30a70d3 928 return 0;
92ba6888
RK
929}
930
931static void stmmac_release_ptp(struct stmmac_priv *priv)
932{
1c35cc9c 933 clk_disable_unprepare(priv->plat->clk_ptp_ref);
92ba6888 934 stmmac_ptp_unregister(priv);
891434b1
RK
935}
936
29feff39
JP
937/**
938 * stmmac_mac_flow_ctrl - Configure flow control in all queues
939 * @priv: driver private structure
d0ea5cbd 940 * @duplex: duplex passed to the next function
29feff39
JP
941 * Description: It is used for configuring the flow control in all queues
942 */
943static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
944{
945 u32 tx_cnt = priv->plat->tx_queues_to_use;
946
c10d4c82
JA
947 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
948 priv->pause, tx_cnt);
29feff39
JP
949}
950
eeef2f6b
JA
951static void stmmac_validate(struct phylink_config *config,
952 unsigned long *supported,
953 struct phylink_link_state *state)
954{
955 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
5b0d7d7d 956 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
eeef2f6b
JA
957 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
958 int tx_cnt = priv->plat->tx_queues_to_use;
959 int max_speed = priv->plat->max_speed;
960
5b0d7d7d
JA
961 phylink_set(mac_supported, 10baseT_Half);
962 phylink_set(mac_supported, 10baseT_Full);
963 phylink_set(mac_supported, 100baseT_Half);
964 phylink_set(mac_supported, 100baseT_Full);
df7699c7
JA
965 phylink_set(mac_supported, 1000baseT_Half);
966 phylink_set(mac_supported, 1000baseT_Full);
967 phylink_set(mac_supported, 1000baseKX_Full);
5b0d7d7d
JA
968
969 phylink_set(mac_supported, Autoneg);
970 phylink_set(mac_supported, Pause);
971 phylink_set(mac_supported, Asym_Pause);
972 phylink_set_port_modes(mac_supported);
973
eeef2f6b
JA
974 /* Cut down 1G if asked to */
975 if ((max_speed > 0) && (max_speed < 1000)) {
976 phylink_set(mask, 1000baseT_Full);
977 phylink_set(mask, 1000baseX_Full);
46682cb8 978 } else if (priv->plat->has_gmac4) {
345502af 979 if (!max_speed || max_speed >= 2500) {
46682cb8
VW
980 phylink_set(mac_supported, 2500baseT_Full);
981 phylink_set(mac_supported, 2500baseX_Full);
345502af 982 }
5b0d7d7d 983 } else if (priv->plat->has_xgmac) {
d9da2c87
JA
984 if (!max_speed || (max_speed >= 2500)) {
985 phylink_set(mac_supported, 2500baseT_Full);
986 phylink_set(mac_supported, 2500baseX_Full);
987 }
988 if (!max_speed || (max_speed >= 5000)) {
989 phylink_set(mac_supported, 5000baseT_Full);
990 }
991 if (!max_speed || (max_speed >= 10000)) {
992 phylink_set(mac_supported, 10000baseSR_Full);
993 phylink_set(mac_supported, 10000baseLR_Full);
994 phylink_set(mac_supported, 10000baseER_Full);
995 phylink_set(mac_supported, 10000baseLRM_Full);
996 phylink_set(mac_supported, 10000baseT_Full);
997 phylink_set(mac_supported, 10000baseKX4_Full);
998 phylink_set(mac_supported, 10000baseKR_Full);
999 }
8a880936
JA
1000 if (!max_speed || (max_speed >= 25000)) {
1001 phylink_set(mac_supported, 25000baseCR_Full);
1002 phylink_set(mac_supported, 25000baseKR_Full);
1003 phylink_set(mac_supported, 25000baseSR_Full);
1004 }
1005 if (!max_speed || (max_speed >= 40000)) {
1006 phylink_set(mac_supported, 40000baseKR4_Full);
1007 phylink_set(mac_supported, 40000baseCR4_Full);
1008 phylink_set(mac_supported, 40000baseSR4_Full);
1009 phylink_set(mac_supported, 40000baseLR4_Full);
1010 }
1011 if (!max_speed || (max_speed >= 50000)) {
1012 phylink_set(mac_supported, 50000baseCR2_Full);
1013 phylink_set(mac_supported, 50000baseKR2_Full);
1014 phylink_set(mac_supported, 50000baseSR2_Full);
1015 phylink_set(mac_supported, 50000baseKR_Full);
1016 phylink_set(mac_supported, 50000baseSR_Full);
1017 phylink_set(mac_supported, 50000baseCR_Full);
1018 phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
1019 phylink_set(mac_supported, 50000baseDR_Full);
1020 }
1021 if (!max_speed || (max_speed >= 100000)) {
1022 phylink_set(mac_supported, 100000baseKR4_Full);
1023 phylink_set(mac_supported, 100000baseSR4_Full);
1024 phylink_set(mac_supported, 100000baseCR4_Full);
1025 phylink_set(mac_supported, 100000baseLR4_ER4_Full);
1026 phylink_set(mac_supported, 100000baseKR2_Full);
1027 phylink_set(mac_supported, 100000baseSR2_Full);
1028 phylink_set(mac_supported, 100000baseCR2_Full);
1029 phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
1030 phylink_set(mac_supported, 100000baseDR2_Full);
1031 }
eeef2f6b
JA
1032 }
1033
1034 /* Half-Duplex can only work with single queue */
1035 if (tx_cnt > 1) {
1036 phylink_set(mask, 10baseT_Half);
1037 phylink_set(mask, 100baseT_Half);
1038 phylink_set(mask, 1000baseT_Half);
1039 }
1040
422829f9
JA
1041 linkmode_and(supported, supported, mac_supported);
1042 linkmode_andnot(supported, supported, mask);
1043
1044 linkmode_and(state->advertising, state->advertising, mac_supported);
1045 linkmode_andnot(state->advertising, state->advertising, mask);
f213bbe8
JA
1046
1047 /* If PCS is supported, check which modes it supports. */
a1a753ed 1048 if (priv->hw->xpcs)
11059740 1049 xpcs_validate(priv->hw->xpcs, supported, state);
eeef2f6b
JA
1050}
1051
74371272
JA
1052static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1053 const struct phylink_link_state *state)
46f69ded 1054{
11059740 1055 /* Nothing to do, xpcs_config() handles everything */
46f69ded
JA
1056}
1057
5a558611
OBL
1058static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
1059{
1060 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
1061 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
1062 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
1063 bool *hs_enable = &fpe_cfg->hs_enable;
1064
1065 if (is_up && *hs_enable) {
1066 stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
1067 } else {
1f7096f0
WVK
1068 *lo_state = FPE_STATE_OFF;
1069 *lp_state = FPE_STATE_OFF;
5a558611
OBL
1070 }
1071}
1072
46f69ded
JA
1073static void stmmac_mac_link_down(struct phylink_config *config,
1074 unsigned int mode, phy_interface_t interface)
1075{
1076 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1077
1078 stmmac_mac_set(priv, priv->ioaddr, false);
1079 priv->eee_active = false;
388e201d 1080 priv->tx_lpi_enabled = false;
d4aeaed8 1081 priv->eee_enabled = stmmac_eee_init(priv);
46f69ded 1082 stmmac_set_eee_pls(priv, priv->hw, false);
5a558611 1083
63c173ff
MABI
1084 if (priv->dma_cap.fpesel)
1085 stmmac_fpe_link_state_handle(priv, false);
46f69ded
JA
1086}
1087
1088static void stmmac_mac_link_up(struct phylink_config *config,
1089 struct phy_device *phy,
1090 unsigned int mode, phy_interface_t interface,
1091 int speed, int duplex,
1092 bool tx_pause, bool rx_pause)
9ad372fc 1093{
74371272 1094 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
9ad372fc
JA
1095 u32 ctrl;
1096
1097 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
74371272 1098 ctrl &= ~priv->hw->link.speed_mask;
9ad372fc 1099
46f69ded
JA
1100 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1101 switch (speed) {
5b0d7d7d
JA
1102 case SPEED_10000:
1103 ctrl |= priv->hw->link.xgmii.speed10000;
1104 break;
1105 case SPEED_5000:
1106 ctrl |= priv->hw->link.xgmii.speed5000;
1107 break;
1108 case SPEED_2500:
1109 ctrl |= priv->hw->link.xgmii.speed2500;
1110 break;
1111 default:
1112 return;
1113 }
8a880936
JA
1114 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1115 switch (speed) {
1116 case SPEED_100000:
1117 ctrl |= priv->hw->link.xlgmii.speed100000;
1118 break;
1119 case SPEED_50000:
1120 ctrl |= priv->hw->link.xlgmii.speed50000;
1121 break;
1122 case SPEED_40000:
1123 ctrl |= priv->hw->link.xlgmii.speed40000;
1124 break;
1125 case SPEED_25000:
1126 ctrl |= priv->hw->link.xlgmii.speed25000;
1127 break;
1128 case SPEED_10000:
1129 ctrl |= priv->hw->link.xgmii.speed10000;
1130 break;
1131 case SPEED_2500:
1132 ctrl |= priv->hw->link.speed2500;
1133 break;
1134 case SPEED_1000:
1135 ctrl |= priv->hw->link.speed1000;
1136 break;
1137 default:
1138 return;
1139 }
5b0d7d7d 1140 } else {
46f69ded 1141 switch (speed) {
5b0d7d7d
JA
1142 case SPEED_2500:
1143 ctrl |= priv->hw->link.speed2500;
1144 break;
1145 case SPEED_1000:
1146 ctrl |= priv->hw->link.speed1000;
1147 break;
1148 case SPEED_100:
1149 ctrl |= priv->hw->link.speed100;
1150 break;
1151 case SPEED_10:
1152 ctrl |= priv->hw->link.speed10;
1153 break;
1154 default:
1155 return;
1156 }
9ad372fc
JA
1157 }
1158
46f69ded 1159 priv->speed = speed;
9ad372fc 1160
74371272 1161 if (priv->plat->fix_mac_speed)
46f69ded 1162 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
74371272 1163
46f69ded 1164 if (!duplex)
74371272
JA
1165 ctrl &= ~priv->hw->link.duplex;
1166 else
1167 ctrl |= priv->hw->link.duplex;
9ad372fc
JA
1168
1169 /* Flow Control operation */
46f69ded
JA
1170 if (tx_pause && rx_pause)
1171 stmmac_mac_flow_ctrl(priv, duplex);
9ad372fc
JA
1172
1173 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
9ad372fc
JA
1174
1175 stmmac_mac_set(priv, priv->ioaddr, true);
5b111770 1176 if (phy && priv->dma_cap.eee) {
74371272
JA
1177 priv->eee_active = phy_init_eee(phy, 1) >= 0;
1178 priv->eee_enabled = stmmac_eee_init(priv);
388e201d 1179 priv->tx_lpi_enabled = priv->eee_enabled;
74371272
JA
1180 stmmac_set_eee_pls(priv, priv->hw, true);
1181 }
5a558611 1182
63c173ff
MABI
1183 if (priv->dma_cap.fpesel)
1184 stmmac_fpe_link_state_handle(priv, true);
9ad372fc
JA
1185}
1186
74371272 1187static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
eeef2f6b 1188 .validate = stmmac_validate,
74371272 1189 .mac_config = stmmac_mac_config,
74371272
JA
1190 .mac_link_down = stmmac_mac_link_down,
1191 .mac_link_up = stmmac_mac_link_up,
eeef2f6b
JA
1192};
1193
32ceabca 1194/**
732fdf0e 1195 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
32ceabca
GC
1196 * @priv: driver private structure
1197 * Description: this is to verify if the HW supports the PCS.
1198 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1199 * configured for the TBI, RTBI, or SGMII PHY interface.
1200 */
e58bb43f
GC
1201static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1202{
1203 int interface = priv->plat->interface;
1204
1205 if (priv->dma_cap.pcs) {
0d909dcd
BA
1206 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1207 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1208 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1209 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
38ddc59d 1210 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
3fe5cadb 1211 priv->hw->pcs = STMMAC_PCS_RGMII;
0d909dcd 1212 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
38ddc59d 1213 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
3fe5cadb 1214 priv->hw->pcs = STMMAC_PCS_SGMII;
e58bb43f
GC
1215 }
1216 }
1217}
1218
47dd7a54
GC
1219/**
1220 * stmmac_init_phy - PHY initialization
1221 * @dev: net device structure
1222 * Description: it initializes the driver's PHY state, and attaches the PHY
1223 * to the mac driver.
1224 * Return value:
1225 * 0 on success
1226 */
1227static int stmmac_init_phy(struct net_device *dev)
1228{
1229 struct stmmac_priv *priv = netdev_priv(dev);
74371272
JA
1230 struct device_node *node;
1231 int ret;
5790cf3c 1232
4838a540 1233 node = priv->plat->phylink_node;
5790cf3c 1234
42e87024 1235 if (node)
74371272 1236 ret = phylink_of_phy_connect(priv->phylink, node, 0);
42e87024
JA
1237
1238 /* Some DT bindings do not set-up the PHY handle. Let's try to
1239 * manually parse it
1240 */
1241 if (!node || ret) {
74371272
JA
1242 int addr = priv->plat->phy_addr;
1243 struct phy_device *phydev;
47dd7a54 1244
74371272
JA
1245 phydev = mdiobus_get_phy(priv->mii, addr);
1246 if (!phydev) {
1247 netdev_err(priv->dev, "no phy at addr %d\n", addr);
dfc50fca 1248 return -ENODEV;
74371272 1249 }
dfc50fca 1250
74371272 1251 ret = phylink_connect_phy(priv->phylink, phydev);
47dd7a54
GC
1252 }
1253
576f9eac
JZ
1254 if (!priv->plat->pmt) {
1255 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1256
1257 phylink_ethtool_get_wol(priv->phylink, &wol);
1258 device_set_wakeup_capable(priv->device, !!wol.supported);
1259 }
1d8e5b0f 1260
74371272
JA
1261 return ret;
1262}
79ee1dc3 1263
74371272
JA
1264static int stmmac_phy_setup(struct stmmac_priv *priv)
1265{
11059740 1266 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
c63d1e5c 1267 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
0060c878 1268 int mode = priv->plat->phy_interface;
74371272 1269 struct phylink *phylink;
b6cfffa7 1270
74371272
JA
1271 priv->phylink_config.dev = &priv->dev->dev;
1272 priv->phylink_config.type = PHYLINK_NETDEV;
f213bbe8 1273 priv->phylink_config.pcs_poll = true;
593f555f
S
1274 if (priv->plat->mdio_bus_data)
1275 priv->phylink_config.ovr_an_inband =
12628565 1276 mdio_bus_data->xpcs_an_inband;
8e99fc5f 1277
8dc6051c
JA
1278 if (!fwnode)
1279 fwnode = dev_fwnode(priv->device);
1280
c63d1e5c 1281 phylink = phylink_create(&priv->phylink_config, fwnode,
74371272
JA
1282 mode, &stmmac_phylink_mac_ops);
1283 if (IS_ERR(phylink))
1284 return PTR_ERR(phylink);
c51e424d 1285
b55b1d50
VO
1286 if (priv->hw->xpcs)
1287 phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
11059740 1288
74371272 1289 priv->phylink = phylink;
47dd7a54
GC
1290 return 0;
1291}
1292
71fedb01 1293static void stmmac_display_rx_rings(struct stmmac_priv *priv)
c24602ef 1294{
54139cf3 1295 u32 rx_cnt = priv->plat->rx_queues_to_use;
bfaf91ca 1296 unsigned int desc_size;
71fedb01 1297 void *head_rx;
54139cf3 1298 u32 queue;
aff3d9ef 1299
54139cf3
JP
1300 /* Display RX rings */
1301 for (queue = 0; queue < rx_cnt; queue++) {
1302 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
d0225e7d 1303
54139cf3
JP
1304 pr_info("\tRX Queue %u rings\n", queue);
1305
bfaf91ca 1306 if (priv->extend_desc) {
54139cf3 1307 head_rx = (void *)rx_q->dma_erx;
bfaf91ca
JZ
1308 desc_size = sizeof(struct dma_extended_desc);
1309 } else {
54139cf3 1310 head_rx = (void *)rx_q->dma_rx;
bfaf91ca
JZ
1311 desc_size = sizeof(struct dma_desc);
1312 }
54139cf3
JP
1313
1314 /* Display RX ring */
bfaf91ca
JZ
1315 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
1316 rx_q->dma_rx_phy, desc_size);
54139cf3 1317 }
71fedb01
JP
1318}
1319
1320static void stmmac_display_tx_rings(struct stmmac_priv *priv)
1321{
ce736788 1322 u32 tx_cnt = priv->plat->tx_queues_to_use;
bfaf91ca 1323 unsigned int desc_size;
71fedb01 1324 void *head_tx;
ce736788 1325 u32 queue;
71fedb01 1326
ce736788
JP
1327 /* Display TX rings */
1328 for (queue = 0; queue < tx_cnt; queue++) {
1329 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01 1330
ce736788
JP
1331 pr_info("\tTX Queue %d rings\n", queue);
1332
bfaf91ca 1333 if (priv->extend_desc) {
ce736788 1334 head_tx = (void *)tx_q->dma_etx;
bfaf91ca
JZ
1335 desc_size = sizeof(struct dma_extended_desc);
1336 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
579a25a8 1337 head_tx = (void *)tx_q->dma_entx;
bfaf91ca
JZ
1338 desc_size = sizeof(struct dma_edesc);
1339 } else {
ce736788 1340 head_tx = (void *)tx_q->dma_tx;
bfaf91ca
JZ
1341 desc_size = sizeof(struct dma_desc);
1342 }
ce736788 1343
bfaf91ca
JZ
1344 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
1345 tx_q->dma_tx_phy, desc_size);
ce736788 1346 }
c24602ef
GC
1347}
1348
71fedb01
JP
1349static void stmmac_display_rings(struct stmmac_priv *priv)
1350{
1351 /* Display RX ring */
1352 stmmac_display_rx_rings(priv);
1353
1354 /* Display TX ring */
1355 stmmac_display_tx_rings(priv);
1356}
1357
286a8372
GC
1358static int stmmac_set_bfsize(int mtu, int bufsize)
1359{
1360 int ret = bufsize;
1361
b2f3a481
JA
1362 if (mtu >= BUF_SIZE_8KiB)
1363 ret = BUF_SIZE_16KiB;
1364 else if (mtu >= BUF_SIZE_4KiB)
286a8372
GC
1365 ret = BUF_SIZE_8KiB;
1366 else if (mtu >= BUF_SIZE_2KiB)
1367 ret = BUF_SIZE_4KiB;
d916701c 1368 else if (mtu > DEFAULT_BUFSIZE)
286a8372
GC
1369 ret = BUF_SIZE_2KiB;
1370 else
d916701c 1371 ret = DEFAULT_BUFSIZE;
286a8372
GC
1372
1373 return ret;
1374}
1375
32ceabca 1376/**
71fedb01 1377 * stmmac_clear_rx_descriptors - clear RX descriptors
32ceabca 1378 * @priv: driver private structure
54139cf3 1379 * @queue: RX queue index
71fedb01 1380 * Description: this function is called to clear the RX descriptors
32ceabca
GC
1381 * in case of both basic and extended descriptors are used.
1382 */
54139cf3 1383static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
c24602ef 1384{
54139cf3 1385 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5bacd778 1386 int i;
c24602ef 1387
71fedb01 1388 /* Clear the RX descriptors */
aa042f60 1389 for (i = 0; i < priv->dma_rx_size; i++)
c24602ef 1390 if (priv->extend_desc)
42de047d
JA
1391 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1392 priv->use_riwt, priv->mode,
aa042f60 1393 (i == priv->dma_rx_size - 1),
583e6361 1394 priv->dma_buf_sz);
c24602ef 1395 else
42de047d
JA
1396 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1397 priv->use_riwt, priv->mode,
aa042f60 1398 (i == priv->dma_rx_size - 1),
583e6361 1399 priv->dma_buf_sz);
71fedb01
JP
1400}
1401
1402/**
1403 * stmmac_clear_tx_descriptors - clear tx descriptors
1404 * @priv: driver private structure
ce736788 1405 * @queue: TX queue index.
71fedb01
JP
1406 * Description: this function is called to clear the TX descriptors
1407 * in case of both basic and extended descriptors are used.
1408 */
ce736788 1409static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
71fedb01 1410{
ce736788 1411 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01
JP
1412 int i;
1413
1414 /* Clear the TX descriptors */
aa042f60
SYS
1415 for (i = 0; i < priv->dma_tx_size; i++) {
1416 int last = (i == (priv->dma_tx_size - 1));
579a25a8
JA
1417 struct dma_desc *p;
1418
c24602ef 1419 if (priv->extend_desc)
579a25a8
JA
1420 p = &tx_q->dma_etx[i].basic;
1421 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1422 p = &tx_q->dma_entx[i].basic;
c24602ef 1423 else
579a25a8
JA
1424 p = &tx_q->dma_tx[i];
1425
1426 stmmac_init_tx_desc(priv, p, priv->mode, last);
1427 }
c24602ef
GC
1428}
1429
71fedb01
JP
1430/**
1431 * stmmac_clear_descriptors - clear descriptors
1432 * @priv: driver private structure
1433 * Description: this function is called to clear the TX and RX descriptors
1434 * in case of both basic and extended descriptors are used.
1435 */
1436static void stmmac_clear_descriptors(struct stmmac_priv *priv)
1437{
54139cf3 1438 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
ce736788 1439 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
1440 u32 queue;
1441
71fedb01 1442 /* Clear the RX descriptors */
54139cf3
JP
1443 for (queue = 0; queue < rx_queue_cnt; queue++)
1444 stmmac_clear_rx_descriptors(priv, queue);
71fedb01
JP
1445
1446 /* Clear the TX descriptors */
ce736788
JP
1447 for (queue = 0; queue < tx_queue_cnt; queue++)
1448 stmmac_clear_tx_descriptors(priv, queue);
71fedb01
JP
1449}
1450
732fdf0e
GC
1451/**
1452 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1453 * @priv: driver private structure
1454 * @p: descriptor pointer
1455 * @i: descriptor index
54139cf3
JP
1456 * @flags: gfp flag
1457 * @queue: RX queue index
732fdf0e
GC
1458 * Description: this function is called to allocate a receive buffer, perform
1459 * the DMA mapping and init the descriptor.
1460 */
c24602ef 1461static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
54139cf3 1462 int i, gfp_t flags, u32 queue)
c24602ef 1463{
54139cf3 1464 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2af6106a 1465 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
c24602ef 1466
da5ec7f2
OBL
1467 if (!buf->page) {
1468 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
1469 if (!buf->page)
1470 return -ENOMEM;
1471 buf->page_offset = stmmac_rx_offset(priv);
1472 }
c24602ef 1473
da5ec7f2 1474 if (priv->sph && !buf->sec_page) {
67afd6d1
JA
1475 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
1476 if (!buf->sec_page)
1477 return -ENOMEM;
1478
1479 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
396e13e1 1480 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
67afd6d1
JA
1481 } else {
1482 buf->sec_page = NULL;
396e13e1 1483 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
67afd6d1
JA
1484 }
1485
5fabb012
OBL
1486 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1487
2af6106a 1488 stmmac_set_desc_addr(priv, p, buf->addr);
2c520b1c
JA
1489 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1490 stmmac_init_desc3(priv, p);
c24602ef
GC
1491
1492 return 0;
1493}
1494
71fedb01
JP
1495/**
1496 * stmmac_free_rx_buffer - free RX dma buffers
1497 * @priv: private structure
54139cf3 1498 * @queue: RX queue index
71fedb01
JP
1499 * @i: buffer index.
1500 */
54139cf3 1501static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
56329137 1502{
54139cf3 1503 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2af6106a 1504 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
54139cf3 1505
2af6106a 1506 if (buf->page)
458de8a9 1507 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
2af6106a 1508 buf->page = NULL;
67afd6d1
JA
1509
1510 if (buf->sec_page)
458de8a9 1511 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
67afd6d1 1512 buf->sec_page = NULL;
aff3d9ef
JP
1513}
1514
1515/**
71fedb01
JP
1516 * stmmac_free_tx_buffer - free RX dma buffers
1517 * @priv: private structure
ce736788 1518 * @queue: RX queue index
71fedb01
JP
1519 * @i: buffer index.
1520 */
ce736788 1521static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
71fedb01 1522{
ce736788
JP
1523 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1524
be8b38a7
OBL
1525 if (tx_q->tx_skbuff_dma[i].buf &&
1526 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
ce736788 1527 if (tx_q->tx_skbuff_dma[i].map_as_page)
71fedb01 1528 dma_unmap_page(priv->device,
ce736788
JP
1529 tx_q->tx_skbuff_dma[i].buf,
1530 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1531 DMA_TO_DEVICE);
1532 else
1533 dma_unmap_single(priv->device,
ce736788
JP
1534 tx_q->tx_skbuff_dma[i].buf,
1535 tx_q->tx_skbuff_dma[i].len,
71fedb01
JP
1536 DMA_TO_DEVICE);
1537 }
1538
be8b38a7 1539 if (tx_q->xdpf[i] &&
8b278a5b
OBL
1540 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1541 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
be8b38a7
OBL
1542 xdp_return_frame(tx_q->xdpf[i]);
1543 tx_q->xdpf[i] = NULL;
1544 }
1545
132c32ee
OBL
1546 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1547 tx_q->xsk_frames_done++;
1548
be8b38a7
OBL
1549 if (tx_q->tx_skbuff[i] &&
1550 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
ce736788
JP
1551 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1552 tx_q->tx_skbuff[i] = NULL;
71fedb01 1553 }
be8b38a7
OBL
1554
1555 tx_q->tx_skbuff_dma[i].buf = 0;
1556 tx_q->tx_skbuff_dma[i].map_as_page = false;
71fedb01
JP
1557}
1558
4298255f
OBL
1559/**
1560 * dma_free_rx_skbufs - free RX dma buffers
1561 * @priv: private structure
1562 * @queue: RX queue index
1563 */
1564static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
1565{
1566 int i;
1567
1568 for (i = 0; i < priv->dma_rx_size; i++)
1569 stmmac_free_rx_buffer(priv, queue, i);
1570}
1571
1572static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
1573 gfp_t flags)
1574{
1575 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1576 int i;
1577
1578 for (i = 0; i < priv->dma_rx_size; i++) {
1579 struct dma_desc *p;
1580 int ret;
1581
1582 if (priv->extend_desc)
1583 p = &((rx_q->dma_erx + i)->basic);
1584 else
1585 p = rx_q->dma_rx + i;
1586
1587 ret = stmmac_init_rx_buffers(priv, p, i, flags,
1588 queue);
1589 if (ret)
1590 return ret;
bba2556e
OBL
1591
1592 rx_q->buf_alloc_num++;
4298255f
OBL
1593 }
1594
1595 return 0;
1596}
1597
bba2556e
OBL
1598/**
1599 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1600 * @priv: private structure
1601 * @queue: RX queue index
1602 */
1603static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
1604{
1605 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1606 int i;
1607
1608 for (i = 0; i < priv->dma_rx_size; i++) {
1609 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1610
1611 if (!buf->xdp)
1612 continue;
1613
1614 xsk_buff_free(buf->xdp);
1615 buf->xdp = NULL;
1616 }
1617}
1618
1619static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
1620{
1621 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1622 int i;
1623
1624 for (i = 0; i < priv->dma_rx_size; i++) {
1625 struct stmmac_rx_buffer *buf;
1626 dma_addr_t dma_addr;
1627 struct dma_desc *p;
1628
1629 if (priv->extend_desc)
1630 p = (struct dma_desc *)(rx_q->dma_erx + i);
1631 else
1632 p = rx_q->dma_rx + i;
1633
1634 buf = &rx_q->buf_pool[i];
1635
1636 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1637 if (!buf->xdp)
1638 return -ENOMEM;
1639
1640 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1641 stmmac_set_desc_addr(priv, p, dma_addr);
1642 rx_q->buf_alloc_num++;
1643 }
1644
1645 return 0;
1646}
1647
bba2556e
OBL
1648static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1649{
1650 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1651 return NULL;
1652
1653 return xsk_get_pool_from_qid(priv->dev, queue);
1654}
1655
71fedb01 1656/**
de0b90e5
OBL
1657 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1658 * @priv: driver private structure
1659 * @queue: RX queue index
732fdf0e 1660 * @flags: gfp flag.
71fedb01 1661 * Description: this function initializes the DMA RX descriptors
5bacd778 1662 * and allocates the socket buffers. It supports the chained and ring
286a8372 1663 * modes.
47dd7a54 1664 */
de0b90e5 1665static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
47dd7a54 1666{
de0b90e5
OBL
1667 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1668 int ret;
47dd7a54 1669
b3e51069 1670 netif_dbg(priv, probe, priv->dev,
de0b90e5
OBL
1671 "(%s) dma_rx_phy=0x%08x\n", __func__,
1672 (u32)rx_q->dma_rx_phy);
47dd7a54 1673
de0b90e5 1674 stmmac_clear_rx_descriptors(priv, queue);
4298255f 1675
bba2556e 1676 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
c24602ef 1677
bba2556e 1678 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
f748be53 1679
bba2556e
OBL
1680 if (rx_q->xsk_pool) {
1681 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1682 MEM_TYPE_XSK_BUFF_POOL,
1683 NULL));
1684 netdev_info(priv->dev,
1685 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1686 rx_q->queue_index);
1687 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1688 } else {
1689 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1690 MEM_TYPE_PAGE_POOL,
1691 rx_q->page_pool));
1692 netdev_info(priv->dev,
1693 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1694 rx_q->queue_index);
1695 }
1696
1697 if (rx_q->xsk_pool) {
1698 /* RX XDP ZC buffer pool may not be populated, e.g.
1699 * xdpsock TX-only.
1700 */
1701 stmmac_alloc_rx_buffers_zc(priv, queue);
1702 } else {
1703 ret = stmmac_alloc_rx_buffers(priv, queue, flags);
1704 if (ret < 0)
1705 return -ENOMEM;
1706 }
cbcf0999 1707
de0b90e5
OBL
1708 rx_q->cur_rx = 0;
1709 rx_q->dirty_rx = 0;
be8b38a7 1710
de0b90e5
OBL
1711 /* Setup the chained descriptor addresses */
1712 if (priv->mode == STMMAC_CHAIN_MODE) {
1713 if (priv->extend_desc)
1714 stmmac_mode_init(priv, rx_q->dma_erx,
1715 rx_q->dma_rx_phy,
1716 priv->dma_rx_size, 1);
1717 else
1718 stmmac_mode_init(priv, rx_q->dma_rx,
1719 rx_q->dma_rx_phy,
1720 priv->dma_rx_size, 0);
1721 }
be8b38a7 1722
de0b90e5
OBL
1723 return 0;
1724}
54139cf3 1725
de0b90e5
OBL
1726static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
1727{
1728 struct stmmac_priv *priv = netdev_priv(dev);
1729 u32 rx_count = priv->plat->rx_queues_to_use;
1730 u32 queue;
1731 int ret;
1732
1733 /* RX INITIALIZATION */
1734 netif_dbg(priv, probe, priv->dev,
1735 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1736
1737 for (queue = 0; queue < rx_count; queue++) {
1738 ret = __init_dma_rx_desc_rings(priv, queue, flags);
1739 if (ret)
1740 goto err_init_rx_buffers;
71fedb01
JP
1741 }
1742
1743 return 0;
54139cf3 1744
71fedb01 1745err_init_rx_buffers:
54139cf3 1746 while (queue >= 0) {
bba2556e
OBL
1747 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1748
1749 if (rx_q->xsk_pool)
1750 dma_free_rx_xskbufs(priv, queue);
1751 else
1752 dma_free_rx_skbufs(priv, queue);
1753
1754 rx_q->buf_alloc_num = 0;
1755 rx_q->xsk_pool = NULL;
54139cf3
JP
1756
1757 if (queue == 0)
1758 break;
1759
54139cf3
JP
1760 queue--;
1761 }
1762
71fedb01
JP
1763 return ret;
1764}
1765
1766/**
de0b90e5
OBL
1767 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1768 * @priv: driver private structure
1769 * @queue : TX queue index
71fedb01
JP
1770 * Description: this function initializes the DMA TX descriptors
1771 * and allocates the socket buffers. It supports the chained and ring
1772 * modes.
1773 */
de0b90e5 1774static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
71fedb01 1775{
de0b90e5 1776 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
71fedb01
JP
1777 int i;
1778
de0b90e5
OBL
1779 netif_dbg(priv, probe, priv->dev,
1780 "(%s) dma_tx_phy=0x%08x\n", __func__,
1781 (u32)tx_q->dma_tx_phy);
71fedb01 1782
de0b90e5
OBL
1783 /* Setup the chained descriptor addresses */
1784 if (priv->mode == STMMAC_CHAIN_MODE) {
1785 if (priv->extend_desc)
1786 stmmac_mode_init(priv, tx_q->dma_etx,
1787 tx_q->dma_tx_phy,
1788 priv->dma_tx_size, 1);
1789 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1790 stmmac_mode_init(priv, tx_q->dma_tx,
1791 tx_q->dma_tx_phy,
1792 priv->dma_tx_size, 0);
1793 }
aff3d9ef 1794
132c32ee
OBL
1795 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1796
de0b90e5
OBL
1797 for (i = 0; i < priv->dma_tx_size; i++) {
1798 struct dma_desc *p;
1799
1800 if (priv->extend_desc)
1801 p = &((tx_q->dma_etx + i)->basic);
1802 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1803 p = &((tx_q->dma_entx + i)->basic);
1804 else
1805 p = tx_q->dma_tx + i;
aff3d9ef 1806
de0b90e5 1807 stmmac_clear_desc(priv, p);
286a8372 1808
de0b90e5
OBL
1809 tx_q->tx_skbuff_dma[i].buf = 0;
1810 tx_q->tx_skbuff_dma[i].map_as_page = false;
1811 tx_q->tx_skbuff_dma[i].len = 0;
1812 tx_q->tx_skbuff_dma[i].last_segment = false;
1813 tx_q->tx_skbuff[i] = NULL;
c22a3f48 1814 }
aff3d9ef 1815
de0b90e5
OBL
1816 tx_q->dirty_tx = 0;
1817 tx_q->cur_tx = 0;
1818 tx_q->mss = 0;
1819
1820 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
1821
1822 return 0;
1823}
1824
1825static int init_dma_tx_desc_rings(struct net_device *dev)
1826{
1827 struct stmmac_priv *priv = netdev_priv(dev);
1828 u32 tx_queue_cnt;
1829 u32 queue;
1830
1831 tx_queue_cnt = priv->plat->tx_queues_to_use;
1832
1833 for (queue = 0; queue < tx_queue_cnt; queue++)
1834 __init_dma_tx_desc_rings(priv, queue);
1835
71fedb01
JP
1836 return 0;
1837}
1838
1839/**
1840 * init_dma_desc_rings - init the RX/TX descriptor rings
1841 * @dev: net device structure
1842 * @flags: gfp flag.
1843 * Description: this function initializes the DMA RX/TX descriptors
1844 * and allocates the socket buffers. It supports the chained and ring
1845 * modes.
1846 */
1847static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1848{
1849 struct stmmac_priv *priv = netdev_priv(dev);
1850 int ret;
1851
1852 ret = init_dma_rx_desc_rings(dev, flags);
1853 if (ret)
1854 return ret;
1855
1856 ret = init_dma_tx_desc_rings(dev);
1857
5bacd778 1858 stmmac_clear_descriptors(priv);
47dd7a54 1859
c24602ef
GC
1860 if (netif_msg_hw(priv))
1861 stmmac_display_rings(priv);
56329137 1862
56329137 1863 return ret;
47dd7a54
GC
1864}
1865
71fedb01
JP
1866/**
1867 * dma_free_tx_skbufs - free TX dma buffers
1868 * @priv: private structure
ce736788 1869 * @queue: TX queue index
71fedb01 1870 */
ce736788 1871static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
47dd7a54 1872{
132c32ee 1873 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
47dd7a54
GC
1874 int i;
1875
132c32ee
OBL
1876 tx_q->xsk_frames_done = 0;
1877
aa042f60 1878 for (i = 0; i < priv->dma_tx_size; i++)
ce736788 1879 stmmac_free_tx_buffer(priv, queue, i);
132c32ee
OBL
1880
1881 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1882 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1883 tx_q->xsk_frames_done = 0;
1884 tx_q->xsk_pool = NULL;
1885 }
47dd7a54
GC
1886}
1887
4ec236c7
FD
1888/**
1889 * stmmac_free_tx_skbufs - free TX skb buffers
1890 * @priv: private structure
1891 */
1892static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1893{
1894 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1895 u32 queue;
1896
1897 for (queue = 0; queue < tx_queue_cnt; queue++)
1898 dma_free_tx_skbufs(priv, queue);
1899}
1900
54139cf3 1901/**
da5ec7f2 1902 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
54139cf3 1903 * @priv: private structure
da5ec7f2 1904 * @queue: RX queue index
54139cf3 1905 */
da5ec7f2
OBL
1906static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
1907{
1908 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
1909
1910 /* Release the DMA RX socket buffers */
bba2556e
OBL
1911 if (rx_q->xsk_pool)
1912 dma_free_rx_xskbufs(priv, queue);
1913 else
1914 dma_free_rx_skbufs(priv, queue);
1915
1916 rx_q->buf_alloc_num = 0;
1917 rx_q->xsk_pool = NULL;
da5ec7f2
OBL
1918
1919 /* Free DMA regions of consistent memory previously allocated */
1920 if (!priv->extend_desc)
1921 dma_free_coherent(priv->device, priv->dma_rx_size *
1922 sizeof(struct dma_desc),
1923 rx_q->dma_rx, rx_q->dma_rx_phy);
1924 else
1925 dma_free_coherent(priv->device, priv->dma_rx_size *
1926 sizeof(struct dma_extended_desc),
1927 rx_q->dma_erx, rx_q->dma_rx_phy);
1928
1929 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1930 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1931
1932 kfree(rx_q->buf_pool);
1933 if (rx_q->page_pool)
1934 page_pool_destroy(rx_q->page_pool);
1935}
1936
54139cf3
JP
1937static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
1938{
1939 u32 rx_count = priv->plat->rx_queues_to_use;
1940 u32 queue;
1941
1942 /* Free RX queue resources */
da5ec7f2
OBL
1943 for (queue = 0; queue < rx_count; queue++)
1944 __free_dma_rx_desc_resources(priv, queue);
1945}
54139cf3 1946
da5ec7f2
OBL
1947/**
1948 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1949 * @priv: private structure
1950 * @queue: TX queue index
1951 */
1952static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
1953{
1954 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1955 size_t size;
1956 void *addr;
1957
1958 /* Release the DMA TX socket buffers */
1959 dma_free_tx_skbufs(priv, queue);
1960
1961 if (priv->extend_desc) {
1962 size = sizeof(struct dma_extended_desc);
1963 addr = tx_q->dma_etx;
1964 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1965 size = sizeof(struct dma_edesc);
1966 addr = tx_q->dma_entx;
1967 } else {
1968 size = sizeof(struct dma_desc);
1969 addr = tx_q->dma_tx;
1970 }
54139cf3 1971
da5ec7f2 1972 size *= priv->dma_tx_size;
54139cf3 1973
da5ec7f2 1974 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
be8b38a7 1975
da5ec7f2
OBL
1976 kfree(tx_q->tx_skbuff_dma);
1977 kfree(tx_q->tx_skbuff);
54139cf3
JP
1978}
1979
ce736788
JP
1980static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
1981{
1982 u32 tx_count = priv->plat->tx_queues_to_use;
62242260 1983 u32 queue;
ce736788
JP
1984
1985 /* Free TX queue resources */
da5ec7f2
OBL
1986 for (queue = 0; queue < tx_count; queue++)
1987 __free_dma_tx_desc_resources(priv, queue);
ce736788
JP
1988}
1989
732fdf0e 1990/**
da5ec7f2 1991 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
732fdf0e 1992 * @priv: private structure
da5ec7f2 1993 * @queue: RX queue index
732fdf0e 1994 * Description: according to which descriptor can be used (extend or basic)
5bacd778
LC
1995 * this function allocates the resources for TX and RX paths. In case of
1996 * reception, for example, it pre-allocated the RX socket buffer in order to
1997 * allow zero-copy mechanism.
732fdf0e 1998 */
da5ec7f2 1999static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
09f8d696 2000{
da5ec7f2
OBL
2001 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2002 struct stmmac_channel *ch = &priv->channel[queue];
5fabb012 2003 bool xdp_prog = stmmac_xdp_is_enabled(priv);
da5ec7f2
OBL
2004 struct page_pool_params pp_params = { 0 };
2005 unsigned int num_pages;
132c32ee 2006 unsigned int napi_id;
da5ec7f2 2007 int ret;
09f8d696 2008
da5ec7f2
OBL
2009 rx_q->queue_index = queue;
2010 rx_q->priv_data = priv;
2011
2012 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2013 pp_params.pool_size = priv->dma_rx_size;
2014 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
2015 pp_params.order = ilog2(num_pages);
2016 pp_params.nid = dev_to_node(priv->device);
2017 pp_params.dev = priv->device;
2018 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2019 pp_params.offset = stmmac_rx_offset(priv);
2020 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2021
2022 rx_q->page_pool = page_pool_create(&pp_params);
2023 if (IS_ERR(rx_q->page_pool)) {
2024 ret = PTR_ERR(rx_q->page_pool);
2025 rx_q->page_pool = NULL;
2026 return ret;
2027 }
09f8d696 2028
da5ec7f2
OBL
2029 rx_q->buf_pool = kcalloc(priv->dma_rx_size,
2030 sizeof(*rx_q->buf_pool),
2031 GFP_KERNEL);
2032 if (!rx_q->buf_pool)
2033 return -ENOMEM;
71fedb01 2034
da5ec7f2
OBL
2035 if (priv->extend_desc) {
2036 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2037 priv->dma_rx_size *
2038 sizeof(struct dma_extended_desc),
2039 &rx_q->dma_rx_phy,
2040 GFP_KERNEL);
2041 if (!rx_q->dma_erx)
2042 return -ENOMEM;
54139cf3 2043
da5ec7f2
OBL
2044 } else {
2045 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2046 priv->dma_rx_size *
2047 sizeof(struct dma_desc),
2048 &rx_q->dma_rx_phy,
2049 GFP_KERNEL);
2050 if (!rx_q->dma_rx)
2051 return -ENOMEM;
2052 }
54139cf3 2053
132c32ee
OBL
2054 if (stmmac_xdp_is_enabled(priv) &&
2055 test_bit(queue, priv->af_xdp_zc_qps))
2056 napi_id = ch->rxtx_napi.napi_id;
2057 else
2058 napi_id = ch->rx_napi.napi_id;
2059
da5ec7f2
OBL
2060 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2061 rx_q->queue_index,
132c32ee 2062 napi_id);
da5ec7f2
OBL
2063 if (ret) {
2064 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2065 return -EINVAL;
2066 }
be8b38a7 2067
da5ec7f2
OBL
2068 return 0;
2069}
2070
2071static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
2072{
2073 u32 rx_count = priv->plat->rx_queues_to_use;
2074 u32 queue;
2075 int ret;
2076
2077 /* RX queues buffers and DMA */
2078 for (queue = 0; queue < rx_count; queue++) {
2079 ret = __alloc_dma_rx_desc_resources(priv, queue);
2080 if (ret)
be8b38a7 2081 goto err_dma;
71fedb01
JP
2082 }
2083
2084 return 0;
2085
2086err_dma:
54139cf3
JP
2087 free_dma_rx_desc_resources(priv);
2088
71fedb01
JP
2089 return ret;
2090}
2091
2092/**
da5ec7f2 2093 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
71fedb01 2094 * @priv: private structure
da5ec7f2 2095 * @queue: TX queue index
71fedb01
JP
2096 * Description: according to which descriptor can be used (extend or basic)
2097 * this function allocates the resources for TX and RX paths. In case of
2098 * reception, for example, it pre-allocated the RX socket buffer in order to
2099 * allow zero-copy mechanism.
2100 */
da5ec7f2 2101static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
71fedb01 2102{
da5ec7f2
OBL
2103 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2104 size_t size;
2105 void *addr;
71fedb01 2106
da5ec7f2
OBL
2107 tx_q->queue_index = queue;
2108 tx_q->priv_data = priv;
5bacd778 2109
da5ec7f2
OBL
2110 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
2111 sizeof(*tx_q->tx_skbuff_dma),
2112 GFP_KERNEL);
2113 if (!tx_q->tx_skbuff_dma)
2114 return -ENOMEM;
5bacd778 2115
da5ec7f2
OBL
2116 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
2117 sizeof(struct sk_buff *),
2118 GFP_KERNEL);
2119 if (!tx_q->tx_skbuff)
2120 return -ENOMEM;
ce736788 2121
da5ec7f2
OBL
2122 if (priv->extend_desc)
2123 size = sizeof(struct dma_extended_desc);
2124 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2125 size = sizeof(struct dma_edesc);
2126 else
2127 size = sizeof(struct dma_desc);
ce736788 2128
da5ec7f2 2129 size *= priv->dma_tx_size;
579a25a8 2130
da5ec7f2
OBL
2131 addr = dma_alloc_coherent(priv->device, size,
2132 &tx_q->dma_tx_phy, GFP_KERNEL);
2133 if (!addr)
2134 return -ENOMEM;
579a25a8 2135
da5ec7f2
OBL
2136 if (priv->extend_desc)
2137 tx_q->dma_etx = addr;
2138 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2139 tx_q->dma_entx = addr;
2140 else
2141 tx_q->dma_tx = addr;
579a25a8 2142
da5ec7f2
OBL
2143 return 0;
2144}
2145
2146static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
2147{
2148 u32 tx_count = priv->plat->tx_queues_to_use;
2149 u32 queue;
2150 int ret;
2151
2152 /* TX queues buffers and DMA */
2153 for (queue = 0; queue < tx_count; queue++) {
2154 ret = __alloc_dma_tx_desc_resources(priv, queue);
2155 if (ret)
2156 goto err_dma;
09f8d696
SK
2157 }
2158
2159 return 0;
2160
62242260 2161err_dma:
ce736788 2162 free_dma_tx_desc_resources(priv);
09f8d696
SK
2163 return ret;
2164}
2165
71fedb01
JP
2166/**
2167 * alloc_dma_desc_resources - alloc TX/RX resources.
2168 * @priv: private structure
2169 * Description: according to which descriptor can be used (extend or basic)
2170 * this function allocates the resources for TX and RX paths. In case of
2171 * reception, for example, it pre-allocated the RX socket buffer in order to
2172 * allow zero-copy mechanism.
2173 */
2174static int alloc_dma_desc_resources(struct stmmac_priv *priv)
2175{
54139cf3 2176 /* RX Allocation */
71fedb01
JP
2177 int ret = alloc_dma_rx_desc_resources(priv);
2178
2179 if (ret)
2180 return ret;
2181
2182 ret = alloc_dma_tx_desc_resources(priv);
2183
2184 return ret;
2185}
2186
71fedb01
JP
2187/**
2188 * free_dma_desc_resources - free dma desc resources
2189 * @priv: private structure
2190 */
2191static void free_dma_desc_resources(struct stmmac_priv *priv)
2192{
71fedb01
JP
2193 /* Release the DMA TX socket buffers */
2194 free_dma_tx_desc_resources(priv);
be8b38a7
OBL
2195
2196 /* Release the DMA RX socket buffers later
2197 * to ensure all pending XDP_TX buffers are returned.
2198 */
2199 free_dma_rx_desc_resources(priv);
71fedb01
JP
2200}
2201
9eb12474 2202/**
2203 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2204 * @priv: driver private structure
2205 * Description: It is used for enabling the rx queues in the MAC
2206 */
2207static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2208{
4f6046f5
JP
2209 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2210 int queue;
2211 u8 mode;
9eb12474 2212
4f6046f5
JP
2213 for (queue = 0; queue < rx_queues_count; queue++) {
2214 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
c10d4c82 2215 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
4f6046f5 2216 }
9eb12474 2217}
2218
ae4f0d46
JP
2219/**
2220 * stmmac_start_rx_dma - start RX DMA channel
2221 * @priv: driver private structure
2222 * @chan: RX channel index
2223 * Description:
2224 * This starts a RX DMA channel
2225 */
2226static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2227{
2228 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
a4e887fa 2229 stmmac_start_rx(priv, priv->ioaddr, chan);
ae4f0d46
JP
2230}
2231
2232/**
2233 * stmmac_start_tx_dma - start TX DMA channel
2234 * @priv: driver private structure
2235 * @chan: TX channel index
2236 * Description:
2237 * This starts a TX DMA channel
2238 */
2239static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2240{
2241 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
a4e887fa 2242 stmmac_start_tx(priv, priv->ioaddr, chan);
ae4f0d46
JP
2243}
2244
2245/**
2246 * stmmac_stop_rx_dma - stop RX DMA channel
2247 * @priv: driver private structure
2248 * @chan: RX channel index
2249 * Description:
2250 * This stops a RX DMA channel
2251 */
2252static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2253{
2254 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
a4e887fa 2255 stmmac_stop_rx(priv, priv->ioaddr, chan);
ae4f0d46
JP
2256}
2257
2258/**
2259 * stmmac_stop_tx_dma - stop TX DMA channel
2260 * @priv: driver private structure
2261 * @chan: TX channel index
2262 * Description:
2263 * This stops a TX DMA channel
2264 */
2265static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2266{
2267 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
a4e887fa 2268 stmmac_stop_tx(priv, priv->ioaddr, chan);
ae4f0d46
JP
2269}
2270
1ea4043a
VW
2271static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2272{
2273 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2274 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2275 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2276 u32 chan;
2277
2278 for (chan = 0; chan < dma_csr_ch; chan++) {
2279 struct stmmac_channel *ch = &priv->channel[chan];
2280 unsigned long flags;
2281
2282 spin_lock_irqsave(&ch->lock, flags);
2283 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2284 spin_unlock_irqrestore(&ch->lock, flags);
2285 }
2286}
2287
ae4f0d46
JP
2288/**
2289 * stmmac_start_all_dma - start all RX and TX DMA channels
2290 * @priv: driver private structure
2291 * Description:
2292 * This starts all the RX and TX DMA channels
2293 */
2294static void stmmac_start_all_dma(struct stmmac_priv *priv)
2295{
2296 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2297 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2298 u32 chan = 0;
2299
2300 for (chan = 0; chan < rx_channels_count; chan++)
2301 stmmac_start_rx_dma(priv, chan);
2302
2303 for (chan = 0; chan < tx_channels_count; chan++)
2304 stmmac_start_tx_dma(priv, chan);
2305}
2306
2307/**
2308 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2309 * @priv: driver private structure
2310 * Description:
2311 * This stops the RX and TX DMA channels
2312 */
2313static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2314{
2315 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2316 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2317 u32 chan = 0;
2318
2319 for (chan = 0; chan < rx_channels_count; chan++)
2320 stmmac_stop_rx_dma(priv, chan);
2321
2322 for (chan = 0; chan < tx_channels_count; chan++)
2323 stmmac_stop_tx_dma(priv, chan);
2324}
2325
47dd7a54
GC
2326/**
2327 * stmmac_dma_operation_mode - HW DMA operation mode
32ceabca 2328 * @priv: driver private structure
732fdf0e
GC
2329 * Description: it is used for configuring the DMA operation mode register in
2330 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
47dd7a54
GC
2331 */
2332static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2333{
6deee222
JP
2334 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2335 u32 tx_channels_count = priv->plat->tx_queues_to_use;
f88203a2 2336 int rxfifosz = priv->plat->rx_fifo_size;
52a76235 2337 int txfifosz = priv->plat->tx_fifo_size;
6deee222
JP
2338 u32 txmode = 0;
2339 u32 rxmode = 0;
2340 u32 chan = 0;
a0daae13 2341 u8 qmode = 0;
f88203a2 2342
11fbf811
TR
2343 if (rxfifosz == 0)
2344 rxfifosz = priv->dma_cap.rx_fifo_size;
52a76235
JA
2345 if (txfifosz == 0)
2346 txfifosz = priv->dma_cap.tx_fifo_size;
2347
2348 /* Adjust for real per queue fifo size */
2349 rxfifosz /= rx_channels_count;
2350 txfifosz /= tx_channels_count;
11fbf811 2351
6deee222
JP
2352 if (priv->plat->force_thresh_dma_mode) {
2353 txmode = tc;
2354 rxmode = tc;
2355 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
61b8013a
SK
2356 /*
2357 * In case of GMAC, SF mode can be enabled
2358 * to perform the TX COE in HW. This depends on:
ebbb293f
GC
2359 * 1) TX COE if actually supported
2360 * 2) There is no bugged Jumbo frame support
2361 * that needs to not insert csum in the TDES.
2362 */
6deee222
JP
2363 txmode = SF_DMA_MODE;
2364 rxmode = SF_DMA_MODE;
b2dec116 2365 priv->xstats.threshold = SF_DMA_MODE;
6deee222
JP
2366 } else {
2367 txmode = tc;
2368 rxmode = SF_DMA_MODE;
2369 }
2370
2371 /* configure all channels */
ab0204e3 2372 for (chan = 0; chan < rx_channels_count; chan++) {
bba2556e
OBL
2373 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2374 u32 buf_size;
2375
ab0204e3 2376 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
a0daae13 2377
ab0204e3
JA
2378 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2379 rxfifosz, qmode);
bba2556e
OBL
2380
2381 if (rx_q->xsk_pool) {
2382 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2383 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2384 buf_size,
2385 chan);
2386 } else {
2387 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2388 priv->dma_buf_sz,
2389 chan);
2390 }
ab0204e3 2391 }
a0daae13 2392
ab0204e3
JA
2393 for (chan = 0; chan < tx_channels_count; chan++) {
2394 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
6deee222 2395
ab0204e3
JA
2396 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2397 txfifosz, qmode);
6deee222 2398 }
47dd7a54
GC
2399}
2400
132c32ee
OBL
2401static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2402{
2403 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2404 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2405 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2406 unsigned int entry = tx_q->cur_tx;
2407 struct dma_desc *tx_desc = NULL;
2408 struct xdp_desc xdp_desc;
2409 bool work_done = true;
2410
2411 /* Avoids TX time-out as we are sharing with slow path */
2412 nq->trans_start = jiffies;
2413
2414 budget = min(budget, stmmac_tx_avail(priv, queue));
2415
2416 while (budget-- > 0) {
2417 dma_addr_t dma_addr;
2418 bool set_ic;
2419
2420 /* We are sharing with slow path and stop XSK TX desc submission when
2421 * available TX ring is less than threshold.
2422 */
2423 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2424 !netif_carrier_ok(priv->dev)) {
2425 work_done = false;
2426 break;
2427 }
2428
2429 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2430 break;
2431
2432 if (likely(priv->extend_desc))
2433 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2434 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2435 tx_desc = &tx_q->dma_entx[entry].basic;
2436 else
2437 tx_desc = tx_q->dma_tx + entry;
2438
2439 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2440 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2441
2442 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2443
2444 /* To return XDP buffer to XSK pool, we simple call
2445 * xsk_tx_completed(), so we don't need to fill up
2446 * 'buf' and 'xdpf'.
2447 */
2448 tx_q->tx_skbuff_dma[entry].buf = 0;
2449 tx_q->xdpf[entry] = NULL;
2450
2451 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2452 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2453 tx_q->tx_skbuff_dma[entry].last_segment = true;
2454 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2455
2456 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2457
2458 tx_q->tx_count_frames++;
2459
2460 if (!priv->tx_coal_frames[queue])
2461 set_ic = false;
2462 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2463 set_ic = true;
2464 else
2465 set_ic = false;
2466
2467 if (set_ic) {
2468 tx_q->tx_count_frames = 0;
2469 stmmac_set_tx_ic(priv, tx_desc);
2470 priv->xstats.tx_set_ic_bit++;
2471 }
2472
2473 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2474 true, priv->mode, true, true,
2475 xdp_desc.len);
2476
2477 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2478
2479 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
2480 entry = tx_q->cur_tx;
2481 }
2482
2483 if (tx_desc) {
2484 stmmac_flush_tx_descriptors(priv, queue);
2485 xsk_tx_release(pool);
2486 }
2487
2488 /* Return true if all of the 3 conditions are met
2489 * a) TX Budget is still available
2490 * b) work_done = true when XSK TX desc peek is empty (no more
2491 * pending XSK TX for transmission)
2492 */
2493 return !!budget && work_done;
2494}
2495
47dd7a54 2496/**
732fdf0e 2497 * stmmac_tx_clean - to manage the transmission completion
32ceabca 2498 * @priv: driver private structure
d0ea5cbd 2499 * @budget: napi budget limiting this functions packet handling
ce736788 2500 * @queue: TX queue index
732fdf0e 2501 * Description: it reclaims the transmit resources after transmission completes.
47dd7a54 2502 */
8fce3331 2503static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
47dd7a54 2504{
ce736788 2505 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
38979574 2506 unsigned int bytes_compl = 0, pkts_compl = 0;
132c32ee 2507 unsigned int entry, xmits = 0, count = 0;
47dd7a54 2508
8fce3331 2509 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
a9097a96 2510
9125cdd1
GC
2511 priv->xstats.tx_clean++;
2512
132c32ee
OBL
2513 tx_q->xsk_frames_done = 0;
2514
8d5f4b07 2515 entry = tx_q->dirty_tx;
132c32ee
OBL
2516
2517 /* Try to clean all TX complete frame in 1 shot */
2518 while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
be8b38a7
OBL
2519 struct xdp_frame *xdpf;
2520 struct sk_buff *skb;
c24602ef 2521 struct dma_desc *p;
c363b658 2522 int status;
c24602ef 2523
8b278a5b
OBL
2524 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2525 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
be8b38a7
OBL
2526 xdpf = tx_q->xdpf[entry];
2527 skb = NULL;
2528 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2529 xdpf = NULL;
2530 skb = tx_q->tx_skbuff[entry];
2531 } else {
2532 xdpf = NULL;
2533 skb = NULL;
2534 }
2535
c24602ef 2536 if (priv->extend_desc)
ce736788 2537 p = (struct dma_desc *)(tx_q->dma_etx + entry);
579a25a8
JA
2538 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2539 p = &tx_q->dma_entx[entry].basic;
c24602ef 2540 else
ce736788 2541 p = tx_q->dma_tx + entry;
47dd7a54 2542
42de047d
JA
2543 status = stmmac_tx_status(priv, &priv->dev->stats,
2544 &priv->xstats, p, priv->ioaddr);
c363b658
FG
2545 /* Check if the descriptor is owned by the DMA */
2546 if (unlikely(status & tx_dma_own))
2547 break;
2548
8fce3331
JA
2549 count++;
2550
a6b25da5
NC
2551 /* Make sure descriptor fields are read after reading
2552 * the own bit.
2553 */
2554 dma_rmb();
2555
c363b658
FG
2556 /* Just consider the last segment and ...*/
2557 if (likely(!(status & tx_not_ls))) {
2558 /* ... verify the status error condition */
2559 if (unlikely(status & tx_err)) {
2560 priv->dev->stats.tx_errors++;
2561 } else {
47dd7a54
GC
2562 priv->dev->stats.tx_packets++;
2563 priv->xstats.tx_pkt_n++;
68e9c5de 2564 priv->xstats.txq_stats[queue].tx_pkt_n++;
c363b658 2565 }
be8b38a7
OBL
2566 if (skb)
2567 stmmac_get_tx_hwtstamp(priv, p, skb);
47dd7a54 2568 }
47dd7a54 2569
be8b38a7
OBL
2570 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2571 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
ce736788 2572 if (tx_q->tx_skbuff_dma[entry].map_as_page)
362b37be 2573 dma_unmap_page(priv->device,
ce736788
JP
2574 tx_q->tx_skbuff_dma[entry].buf,
2575 tx_q->tx_skbuff_dma[entry].len,
362b37be
GC
2576 DMA_TO_DEVICE);
2577 else
2578 dma_unmap_single(priv->device,
ce736788
JP
2579 tx_q->tx_skbuff_dma[entry].buf,
2580 tx_q->tx_skbuff_dma[entry].len,
362b37be 2581 DMA_TO_DEVICE);
ce736788
JP
2582 tx_q->tx_skbuff_dma[entry].buf = 0;
2583 tx_q->tx_skbuff_dma[entry].len = 0;
2584 tx_q->tx_skbuff_dma[entry].map_as_page = false;
cf32deec 2585 }
f748be53 2586
2c520b1c 2587 stmmac_clean_desc3(priv, tx_q, p);
f748be53 2588
ce736788
JP
2589 tx_q->tx_skbuff_dma[entry].last_segment = false;
2590 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
47dd7a54 2591
be8b38a7
OBL
2592 if (xdpf &&
2593 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2594 xdp_return_frame_rx_napi(xdpf);
2595 tx_q->xdpf[entry] = NULL;
2596 }
2597
8b278a5b
OBL
2598 if (xdpf &&
2599 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2600 xdp_return_frame(xdpf);
2601 tx_q->xdpf[entry] = NULL;
2602 }
2603
132c32ee
OBL
2604 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2605 tx_q->xsk_frames_done++;
2606
be8b38a7
OBL
2607 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2608 if (likely(skb)) {
2609 pkts_compl++;
2610 bytes_compl += skb->len;
2611 dev_consume_skb_any(skb);
2612 tx_q->tx_skbuff[entry] = NULL;
2613 }
47dd7a54
GC
2614 }
2615
42de047d 2616 stmmac_release_tx_desc(priv, p, priv->mode);
47dd7a54 2617
aa042f60 2618 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
47dd7a54 2619 }
ce736788 2620 tx_q->dirty_tx = entry;
38979574 2621
c22a3f48
JP
2622 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2623 pkts_compl, bytes_compl);
2624
2625 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2626 queue))) &&
aa042f60 2627 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
38979574 2628
739c8e14
LS
2629 netif_dbg(priv, tx_done, priv->dev,
2630 "%s: restart transmit\n", __func__);
c22a3f48 2631 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54 2632 }
d765955d 2633
132c32ee
OBL
2634 if (tx_q->xsk_pool) {
2635 bool work_done;
2636
2637 if (tx_q->xsk_frames_done)
2638 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2639
2640 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2641 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2642
2643 /* For XSK TX, we try to send as many as possible.
2644 * If XSK work done (XSK TX desc empty and budget still
2645 * available), return "budget - 1" to reenable TX IRQ.
2646 * Else, return "budget" to make NAPI continue polling.
2647 */
2648 work_done = stmmac_xdp_xmit_zc(priv, queue,
2649 STMMAC_XSK_TX_BUDGET_MAX);
2650 if (work_done)
2651 xmits = budget - 1;
2652 else
2653 xmits = budget;
2654 }
2655
be1c7eae
VJK
2656 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2657 priv->eee_sw_timer_en) {
3b439926
JZ
2658 if (stmmac_enable_eee_mode(priv))
2659 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
d765955d 2660 }
8fce3331 2661
4ccb4585
JA
2662 /* We still have pending packets, let's call for a new scheduling */
2663 if (tx_q->dirty_tx != tx_q->cur_tx)
db2f2842
OBL
2664 hrtimer_start(&tx_q->txtimer,
2665 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
d5a05e69 2666 HRTIMER_MODE_REL);
4ccb4585 2667
8fce3331
JA
2668 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2669
132c32ee
OBL
2670 /* Combine decisions from TX clean and XSK TX */
2671 return max(count, xmits);
47dd7a54
GC
2672}
2673
47dd7a54 2674/**
732fdf0e 2675 * stmmac_tx_err - to manage the tx error
32ceabca 2676 * @priv: driver private structure
5bacd778 2677 * @chan: channel index
47dd7a54 2678 * Description: it cleans the descriptors and restarts the transmission
732fdf0e 2679 * in case of transmission errors.
47dd7a54 2680 */
5bacd778 2681static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
47dd7a54 2682{
ce736788 2683 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
ce736788 2684
c22a3f48 2685 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54 2686
ae4f0d46 2687 stmmac_stop_tx_dma(priv, chan);
ce736788 2688 dma_free_tx_skbufs(priv, chan);
579a25a8 2689 stmmac_clear_tx_descriptors(priv, chan);
ce736788
JP
2690 tx_q->dirty_tx = 0;
2691 tx_q->cur_tx = 0;
8d212a9e 2692 tx_q->mss = 0;
c22a3f48 2693 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
f421031e
JK
2694 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2695 tx_q->dma_tx_phy, chan);
ae4f0d46 2696 stmmac_start_tx_dma(priv, chan);
47dd7a54
GC
2697
2698 priv->dev->stats.tx_errors++;
c22a3f48 2699 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
47dd7a54
GC
2700}
2701
6deee222
JP
2702/**
2703 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2704 * @priv: driver private structure
2705 * @txmode: TX operating mode
2706 * @rxmode: RX operating mode
2707 * @chan: channel index
2708 * Description: it is used for configuring of the DMA operation mode in
2709 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2710 * mode.
2711 */
2712static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2713 u32 rxmode, u32 chan)
2714{
a0daae13
JA
2715 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2716 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
52a76235
JA
2717 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2718 u32 tx_channels_count = priv->plat->tx_queues_to_use;
6deee222 2719 int rxfifosz = priv->plat->rx_fifo_size;
52a76235 2720 int txfifosz = priv->plat->tx_fifo_size;
6deee222
JP
2721
2722 if (rxfifosz == 0)
2723 rxfifosz = priv->dma_cap.rx_fifo_size;
52a76235
JA
2724 if (txfifosz == 0)
2725 txfifosz = priv->dma_cap.tx_fifo_size;
2726
2727 /* Adjust for real per queue fifo size */
2728 rxfifosz /= rx_channels_count;
2729 txfifosz /= tx_channels_count;
6deee222 2730
ab0204e3
JA
2731 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2732 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
6deee222
JP
2733}
2734
8bf993a5
JA
2735static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2736{
63a550fc 2737 int ret;
8bf993a5 2738
c10d4c82
JA
2739 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2740 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2741 if (ret && (ret != -EINVAL)) {
8bf993a5 2742 stmmac_global_err(priv);
c10d4c82
JA
2743 return true;
2744 }
2745
2746 return false;
8bf993a5
JA
2747}
2748
7e1c520c 2749static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
8fce3331
JA
2750{
2751 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
7e1c520c 2752 &priv->xstats, chan, dir);
132c32ee
OBL
2753 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2754 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
8fce3331 2755 struct stmmac_channel *ch = &priv->channel[chan];
132c32ee
OBL
2756 struct napi_struct *rx_napi;
2757 struct napi_struct *tx_napi;
021bd5e3 2758 unsigned long flags;
8fce3331 2759
132c32ee
OBL
2760 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2761 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2762
4ccb4585 2763 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
132c32ee 2764 if (napi_schedule_prep(rx_napi)) {
021bd5e3
JA
2765 spin_lock_irqsave(&ch->lock, flags);
2766 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2767 spin_unlock_irqrestore(&ch->lock, flags);
132c32ee 2768 __napi_schedule(rx_napi);
3ba07deb 2769 }
8fce3331
JA
2770 }
2771
021bd5e3 2772 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
132c32ee 2773 if (napi_schedule_prep(tx_napi)) {
021bd5e3
JA
2774 spin_lock_irqsave(&ch->lock, flags);
2775 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2776 spin_unlock_irqrestore(&ch->lock, flags);
132c32ee 2777 __napi_schedule(tx_napi);
021bd5e3
JA
2778 }
2779 }
8fce3331
JA
2780
2781 return status;
2782}
2783
32ceabca 2784/**
732fdf0e 2785 * stmmac_dma_interrupt - DMA ISR
32ceabca
GC
2786 * @priv: driver private structure
2787 * Description: this is the DMA ISR. It is called by the main ISR.
732fdf0e
GC
2788 * It calls the dwmac dma routine and schedule poll method in case of some
2789 * work can be done.
32ceabca 2790 */
aec7ff27
GC
2791static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2792{
d62a107a 2793 u32 tx_channel_count = priv->plat->tx_queues_to_use;
5a6a0445
NC
2794 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2795 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2796 tx_channel_count : rx_channel_count;
d62a107a 2797 u32 chan;
8ac60ffb
KC
2798 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2799
2800 /* Make sure we never check beyond our status buffer. */
2801 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2802 channels_to_check = ARRAY_SIZE(status);
5a6a0445 2803
5a6a0445 2804 for (chan = 0; chan < channels_to_check; chan++)
7e1c520c
OBL
2805 status[chan] = stmmac_napi_check(priv, chan,
2806 DMA_DIR_RXTX);
6deee222 2807
5a6a0445
NC
2808 for (chan = 0; chan < tx_channel_count; chan++) {
2809 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
d62a107a
JP
2810 /* Try to bump up the dma threshold on this failure */
2811 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
2812 (tc <= 256)) {
2813 tc += 64;
2814 if (priv->plat->force_thresh_dma_mode)
2815 stmmac_set_dma_operation_mode(priv,
2816 tc,
2817 tc,
2818 chan);
2819 else
2820 stmmac_set_dma_operation_mode(priv,
2821 tc,
2822 SF_DMA_MODE,
2823 chan);
2824 priv->xstats.threshold = tc;
2825 }
5a6a0445 2826 } else if (unlikely(status[chan] == tx_hard_error)) {
d62a107a 2827 stmmac_tx_err(priv, chan);
47dd7a54 2828 }
d62a107a 2829 }
47dd7a54
GC
2830}
2831
32ceabca
GC
2832/**
2833 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2834 * @priv: driver private structure
2835 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2836 */
1c901a46
GC
2837static void stmmac_mmc_setup(struct stmmac_priv *priv)
2838{
2839 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
36ff7c1e 2840 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1c901a46 2841
3b1dd2c5 2842 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
4f795b25
GC
2843
2844 if (priv->dma_cap.rmon) {
3b1dd2c5 2845 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
4f795b25
GC
2846 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2847 } else
38ddc59d 2848 netdev_info(priv->dev, "No MAC Management Counters available\n");
1c901a46
GC
2849}
2850
19e30c14 2851/**
732fdf0e 2852 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
32ceabca 2853 * @priv: driver private structure
19e30c14
GC
2854 * Description:
2855 * new GMAC chip generations have a new register to indicate the
2856 * presence of the optional feature/functions.
2857 * This can be also used to override the value passed through the
2858 * platform and necessary for old MAC10/100 and GMAC chips.
e7434821
GC
2859 */
2860static int stmmac_get_hw_features(struct stmmac_priv *priv)
2861{
a4e887fa 2862 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
e7434821
GC
2863}
2864
32ceabca 2865/**
732fdf0e 2866 * stmmac_check_ether_addr - check if the MAC addr is valid
32ceabca
GC
2867 * @priv: driver private structure
2868 * Description:
2869 * it is to verify if the MAC address is valid, in case of failures it
2870 * generates a random MAC address
2871 */
bfab27a1
GC
2872static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2873{
bfab27a1 2874 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
c10d4c82 2875 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
ceb69499 2876 if (!is_valid_ether_addr(priv->dev->dev_addr))
f2cedb63 2877 eth_hw_addr_random(priv->dev);
af649352
JZ
2878 dev_info(priv->device, "device MAC address %pM\n",
2879 priv->dev->dev_addr);
bfab27a1 2880 }
bfab27a1
GC
2881}
2882
32ceabca 2883/**
732fdf0e 2884 * stmmac_init_dma_engine - DMA init.
32ceabca
GC
2885 * @priv: driver private structure
2886 * Description:
2887 * It inits the DMA invoking the specific MAC/GMAC callback.
2888 * Some DMA parameters can be passed from the platform;
2889 * in case of these are not passed a default is kept for the MAC or GMAC.
2890 */
0f1f88a8
GC
2891static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2892{
47f2a9ce
JP
2893 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2894 u32 tx_channels_count = priv->plat->tx_queues_to_use;
24aaed0c 2895 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
54139cf3 2896 struct stmmac_rx_queue *rx_q;
ce736788 2897 struct stmmac_tx_queue *tx_q;
47f2a9ce 2898 u32 chan = 0;
c24602ef 2899 int atds = 0;
495db273 2900 int ret = 0;
0f1f88a8 2901
a332e2fa
NC
2902 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2903 dev_err(priv->device, "Invalid DMA configuration\n");
89ab75bf 2904 return -EINVAL;
0f1f88a8
GC
2905 }
2906
c24602ef
GC
2907 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2908 atds = 1;
2909
a4e887fa 2910 ret = stmmac_reset(priv, priv->ioaddr);
495db273
GC
2911 if (ret) {
2912 dev_err(priv->device, "Failed to reset the dma\n");
2913 return ret;
2914 }
2915
7d9e6c5a
JA
2916 /* DMA Configuration */
2917 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
2918
2919 if (priv->plat->axi)
2920 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
2921
af8f3fb7 2922 /* DMA CSR Channel configuration */
1ea4043a 2923 for (chan = 0; chan < dma_csr_ch; chan++) {
af8f3fb7 2924 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
1ea4043a
VW
2925 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2926 }
af8f3fb7 2927
24aaed0c
JA
2928 /* DMA RX Channel Configuration */
2929 for (chan = 0; chan < rx_channels_count; chan++) {
2930 rx_q = &priv->rx_queue[chan];
47f2a9ce 2931
24aaed0c
JA
2932 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2933 rx_q->dma_rx_phy, chan);
54139cf3 2934
24aaed0c 2935 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
bba2556e 2936 (rx_q->buf_alloc_num *
aa042f60 2937 sizeof(struct dma_desc));
24aaed0c
JA
2938 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
2939 rx_q->rx_tail_addr, chan);
2940 }
47f2a9ce 2941
24aaed0c
JA
2942 /* DMA TX Channel Configuration */
2943 for (chan = 0; chan < tx_channels_count; chan++) {
2944 tx_q = &priv->tx_queue[chan];
47f2a9ce 2945
24aaed0c
JA
2946 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2947 tx_q->dma_tx_phy, chan);
ce736788 2948
0431100b 2949 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
24aaed0c
JA
2950 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2951 tx_q->tx_tail_addr, chan);
2952 }
47f2a9ce 2953
495db273 2954 return ret;
0f1f88a8
GC
2955}
2956
8fce3331
JA
2957static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2958{
2959 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2960
db2f2842
OBL
2961 hrtimer_start(&tx_q->txtimer,
2962 STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
d5a05e69 2963 HRTIMER_MODE_REL);
8fce3331
JA
2964}
2965
9125cdd1 2966/**
732fdf0e 2967 * stmmac_tx_timer - mitigation sw timer for tx.
d0ea5cbd 2968 * @t: data pointer
9125cdd1
GC
2969 * Description:
2970 * This is the timer handler to directly invoke the stmmac_tx_clean.
2971 */
d5a05e69 2972static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
9125cdd1 2973{
d5a05e69 2974 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
8fce3331
JA
2975 struct stmmac_priv *priv = tx_q->priv_data;
2976 struct stmmac_channel *ch;
132c32ee 2977 struct napi_struct *napi;
8fce3331
JA
2978
2979 ch = &priv->channel[tx_q->queue_index];
132c32ee 2980 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
9125cdd1 2981
132c32ee 2982 if (likely(napi_schedule_prep(napi))) {
021bd5e3
JA
2983 unsigned long flags;
2984
2985 spin_lock_irqsave(&ch->lock, flags);
2986 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
2987 spin_unlock_irqrestore(&ch->lock, flags);
132c32ee 2988 __napi_schedule(napi);
021bd5e3 2989 }
d5a05e69
VW
2990
2991 return HRTIMER_NORESTART;
9125cdd1
GC
2992}
2993
2994/**
d429b66e 2995 * stmmac_init_coalesce - init mitigation options.
32ceabca 2996 * @priv: driver private structure
9125cdd1 2997 * Description:
d429b66e 2998 * This inits the coalesce parameters: i.e. timer rate,
9125cdd1
GC
2999 * timer handler and default threshold used for enabling the
3000 * interrupt on completion bit.
3001 */
d429b66e 3002static void stmmac_init_coalesce(struct stmmac_priv *priv)
9125cdd1 3003{
8fce3331 3004 u32 tx_channel_count = priv->plat->tx_queues_to_use;
db2f2842 3005 u32 rx_channel_count = priv->plat->rx_queues_to_use;
8fce3331
JA
3006 u32 chan;
3007
8fce3331
JA
3008 for (chan = 0; chan < tx_channel_count; chan++) {
3009 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3010
db2f2842
OBL
3011 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3012 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3013
d5a05e69
VW
3014 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3015 tx_q->txtimer.function = stmmac_tx_timer;
8fce3331 3016 }
db2f2842
OBL
3017
3018 for (chan = 0; chan < rx_channel_count; chan++)
3019 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
9125cdd1
GC
3020}
3021
4854ab99
JP
3022static void stmmac_set_rings_length(struct stmmac_priv *priv)
3023{
3024 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3025 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3026 u32 chan;
3027
3028 /* set TX ring length */
a4e887fa
JA
3029 for (chan = 0; chan < tx_channels_count; chan++)
3030 stmmac_set_tx_ring_len(priv, priv->ioaddr,
aa042f60 3031 (priv->dma_tx_size - 1), chan);
4854ab99
JP
3032
3033 /* set RX ring length */
a4e887fa
JA
3034 for (chan = 0; chan < rx_channels_count; chan++)
3035 stmmac_set_rx_ring_len(priv, priv->ioaddr,
aa042f60 3036 (priv->dma_rx_size - 1), chan);
4854ab99
JP
3037}
3038
6a3a7193
JP
3039/**
3040 * stmmac_set_tx_queue_weight - Set TX queue weight
3041 * @priv: driver private structure
3042 * Description: It is used for setting TX queues weight
3043 */
3044static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3045{
3046 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3047 u32 weight;
3048 u32 queue;
3049
3050 for (queue = 0; queue < tx_queues_count; queue++) {
3051 weight = priv->plat->tx_queues_cfg[queue].weight;
c10d4c82 3052 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
6a3a7193
JP
3053 }
3054}
3055
19d91873
JP
3056/**
3057 * stmmac_configure_cbs - Configure CBS in TX queue
3058 * @priv: driver private structure
3059 * Description: It is used for configuring CBS in AVB TX queues
3060 */
3061static void stmmac_configure_cbs(struct stmmac_priv *priv)
3062{
3063 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3064 u32 mode_to_use;
3065 u32 queue;
3066
44781fef
JP
3067 /* queue 0 is reserved for legacy traffic */
3068 for (queue = 1; queue < tx_queues_count; queue++) {
19d91873
JP
3069 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3070 if (mode_to_use == MTL_QUEUE_DCB)
3071 continue;
3072
c10d4c82 3073 stmmac_config_cbs(priv, priv->hw,
19d91873
JP
3074 priv->plat->tx_queues_cfg[queue].send_slope,
3075 priv->plat->tx_queues_cfg[queue].idle_slope,
3076 priv->plat->tx_queues_cfg[queue].high_credit,
3077 priv->plat->tx_queues_cfg[queue].low_credit,
3078 queue);
3079 }
3080}
3081
d43042f4
JP
3082/**
3083 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3084 * @priv: driver private structure
3085 * Description: It is used for mapping RX queues to RX dma channels
3086 */
3087static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3088{
3089 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3090 u32 queue;
3091 u32 chan;
3092
3093 for (queue = 0; queue < rx_queues_count; queue++) {
3094 chan = priv->plat->rx_queues_cfg[queue].chan;
c10d4c82 3095 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
d43042f4
JP
3096 }
3097}
3098
a8f5102a
JP
3099/**
3100 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3101 * @priv: driver private structure
3102 * Description: It is used for configuring the RX Queue Priority
3103 */
3104static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3105{
3106 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3107 u32 queue;
3108 u32 prio;
3109
3110 for (queue = 0; queue < rx_queues_count; queue++) {
3111 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3112 continue;
3113
3114 prio = priv->plat->rx_queues_cfg[queue].prio;
c10d4c82 3115 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
a8f5102a
JP
3116 }
3117}
3118
3119/**
3120 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3121 * @priv: driver private structure
3122 * Description: It is used for configuring the TX Queue Priority
3123 */
3124static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3125{
3126 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3127 u32 queue;
3128 u32 prio;
3129
3130 for (queue = 0; queue < tx_queues_count; queue++) {
3131 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3132 continue;
3133
3134 prio = priv->plat->tx_queues_cfg[queue].prio;
c10d4c82 3135 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
a8f5102a
JP
3136 }
3137}
3138
abe80fdc
JP
3139/**
3140 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3141 * @priv: driver private structure
3142 * Description: It is used for configuring the RX queue routing
3143 */
3144static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3145{
3146 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3147 u32 queue;
3148 u8 packet;
3149
3150 for (queue = 0; queue < rx_queues_count; queue++) {
3151 /* no specific packet type routing specified for the queue */
3152 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3153 continue;
3154
3155 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
c10d4c82 3156 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
abe80fdc
JP
3157 }
3158}
3159
76067459
JA
3160static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3161{
3162 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3163 priv->rss.enable = false;
3164 return;
3165 }
3166
3167 if (priv->dev->features & NETIF_F_RXHASH)
3168 priv->rss.enable = true;
3169 else
3170 priv->rss.enable = false;
3171
3172 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3173 priv->plat->rx_queues_to_use);
3174}
3175
d0a9c9f9
JP
3176/**
3177 * stmmac_mtl_configuration - Configure MTL
3178 * @priv: driver private structure
3179 * Description: It is used for configurring MTL
3180 */
3181static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3182{
3183 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3184 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3185
c10d4c82 3186 if (tx_queues_count > 1)
6a3a7193
JP
3187 stmmac_set_tx_queue_weight(priv);
3188
d0a9c9f9 3189 /* Configure MTL RX algorithms */
c10d4c82
JA
3190 if (rx_queues_count > 1)
3191 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3192 priv->plat->rx_sched_algorithm);
d0a9c9f9
JP
3193
3194 /* Configure MTL TX algorithms */
c10d4c82
JA
3195 if (tx_queues_count > 1)
3196 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3197 priv->plat->tx_sched_algorithm);
d0a9c9f9 3198
19d91873 3199 /* Configure CBS in AVB TX queues */
c10d4c82 3200 if (tx_queues_count > 1)
19d91873
JP
3201 stmmac_configure_cbs(priv);
3202
d43042f4 3203 /* Map RX MTL to DMA channels */
c10d4c82 3204 stmmac_rx_queue_dma_chan_map(priv);
d43042f4 3205
d0a9c9f9 3206 /* Enable MAC RX Queues */
c10d4c82 3207 stmmac_mac_enable_rx_queues(priv);
6deee222 3208
a8f5102a 3209 /* Set RX priorities */
c10d4c82 3210 if (rx_queues_count > 1)
a8f5102a
JP
3211 stmmac_mac_config_rx_queues_prio(priv);
3212
3213 /* Set TX priorities */
c10d4c82 3214 if (tx_queues_count > 1)
a8f5102a 3215 stmmac_mac_config_tx_queues_prio(priv);
abe80fdc
JP
3216
3217 /* Set RX routing */
c10d4c82 3218 if (rx_queues_count > 1)
abe80fdc 3219 stmmac_mac_config_rx_queues_routing(priv);
76067459
JA
3220
3221 /* Receive Side Scaling */
3222 if (rx_queues_count > 1)
3223 stmmac_mac_config_rss(priv);
d0a9c9f9
JP
3224}
3225
8bf993a5
JA
3226static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3227{
c10d4c82 3228 if (priv->dma_cap.asp) {
8bf993a5 3229 netdev_info(priv->dev, "Enabling Safety Features\n");
5ac712dc
WVK
3230 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3231 priv->plat->safety_feat_cfg);
8bf993a5
JA
3232 } else {
3233 netdev_info(priv->dev, "No Safety Features support found\n");
3234 }
3235}
3236
5a558611
OBL
3237static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3238{
3239 char *name;
3240
3241 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
db7c691d 3242 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
5a558611
OBL
3243
3244 name = priv->wq_name;
3245 sprintf(name, "%s-fpe", priv->dev->name);
3246
3247 priv->fpe_wq = create_singlethread_workqueue(name);
3248 if (!priv->fpe_wq) {
3249 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3250
3251 return -ENOMEM;
3252 }
3253 netdev_info(priv->dev, "FPE workqueue start");
3254
3255 return 0;
3256}
3257
523f11b5 3258/**
732fdf0e 3259 * stmmac_hw_setup - setup mac in a usable state.
523f11b5 3260 * @dev : pointer to the device structure.
1b787d1d 3261 * @ptp_register: register PTP if set
523f11b5 3262 * Description:
732fdf0e
GC
3263 * this is the main function to setup the HW in a usable state because the
3264 * dma engine is reset, the core registers are configured (e.g. AXI,
3265 * Checksum features, timers). The DMA is ready to start receiving and
3266 * transmitting.
523f11b5
SK
3267 * Return value:
3268 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3269 * file on failure.
3270 */
1b787d1d 3271static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
523f11b5
SK
3272{
3273 struct stmmac_priv *priv = netdev_priv(dev);
3c55d4d0 3274 u32 rx_cnt = priv->plat->rx_queues_to_use;
146617b8 3275 u32 tx_cnt = priv->plat->tx_queues_to_use;
d08d32d1 3276 bool sph_en;
146617b8 3277 u32 chan;
523f11b5
SK
3278 int ret;
3279
523f11b5
SK
3280 /* DMA initialization and SW reset */
3281 ret = stmmac_init_dma_engine(priv);
3282 if (ret < 0) {
38ddc59d
LC
3283 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3284 __func__);
523f11b5
SK
3285 return ret;
3286 }
3287
3288 /* Copy the MAC addr into the HW */
c10d4c82 3289 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
523f11b5 3290
02e57b9d
GC
3291 /* PS and related bits will be programmed according to the speed */
3292 if (priv->hw->pcs) {
3293 int speed = priv->plat->mac_port_sel_speed;
3294
3295 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3296 (speed == SPEED_1000)) {
3297 priv->hw->ps = speed;
3298 } else {
3299 dev_warn(priv->device, "invalid port speed\n");
3300 priv->hw->ps = 0;
3301 }
3302 }
3303
523f11b5 3304 /* Initialize the MAC Core */
c10d4c82 3305 stmmac_core_init(priv, priv->hw, dev);
523f11b5 3306
d0a9c9f9 3307 /* Initialize MTL*/
63a550fc 3308 stmmac_mtl_configuration(priv);
9eb12474 3309
8bf993a5 3310 /* Initialize Safety Features */
63a550fc 3311 stmmac_safety_feat_configuration(priv);
8bf993a5 3312
c10d4c82 3313 ret = stmmac_rx_ipc(priv, priv->hw);
978aded4 3314 if (!ret) {
38ddc59d 3315 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
978aded4 3316 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
d2afb5bd 3317 priv->hw->rx_csum = 0;
978aded4
GC
3318 }
3319
523f11b5 3320 /* Enable the MAC Rx/Tx */
c10d4c82 3321 stmmac_mac_set(priv, priv->ioaddr, true);
523f11b5 3322
b4f0a661
JP
3323 /* Set the HW DMA mode and the COE */
3324 stmmac_dma_operation_mode(priv);
3325
523f11b5
SK
3326 stmmac_mmc_setup(priv);
3327
1b787d1d
MABI
3328 ret = stmmac_init_ptp(priv);
3329 if (ret == -EOPNOTSUPP)
3330 netdev_warn(priv->dev, "PTP not supported by HW\n");
3331 else if (ret)
3332 netdev_warn(priv->dev, "PTP init failed\n");
3333 else if (ptp_register)
3334 stmmac_ptp_register(priv);
523f11b5 3335
388e201d
VJK
3336 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3337
3338 /* Convert the timer from msec to usec */
3339 if (!priv->tx_lpi_timer)
3340 priv->tx_lpi_timer = eee_timer * 1000;
523f11b5 3341
a4e887fa 3342 if (priv->use_riwt) {
db2f2842
OBL
3343 u32 queue;
3344
3345 for (queue = 0; queue < rx_cnt; queue++) {
3346 if (!priv->rx_riwt[queue])
3347 priv->rx_riwt[queue] = DEF_DMA_RIWT;
4e4337cc 3348
db2f2842
OBL
3349 stmmac_rx_watchdog(priv, priv->ioaddr,
3350 priv->rx_riwt[queue], queue);
3351 }
523f11b5
SK
3352 }
3353
c10d4c82 3354 if (priv->hw->pcs)
c9ad4c10 3355 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
523f11b5 3356
4854ab99
JP
3357 /* set TX and RX rings length */
3358 stmmac_set_rings_length(priv);
3359
f748be53 3360 /* Enable TSO */
146617b8 3361 if (priv->tso) {
5e6038b8
OBL
3362 for (chan = 0; chan < tx_cnt; chan++) {
3363 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3364
3365 /* TSO and TBS cannot co-exist */
3366 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3367 continue;
3368
a4e887fa 3369 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
5e6038b8 3370 }
146617b8 3371 }
f748be53 3372
67afd6d1 3373 /* Enable Split Header */
d08d32d1
OBL
3374 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3375 for (chan = 0; chan < rx_cnt; chan++)
3376 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3377
67afd6d1 3378
30d93227
JA
3379 /* VLAN Tag Insertion */
3380 if (priv->dma_cap.vlins)
3381 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3382
579a25a8
JA
3383 /* TBS */
3384 for (chan = 0; chan < tx_cnt; chan++) {
3385 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3386 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3387
3388 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3389 }
3390
686cff3d
AV
3391 /* Configure real RX and TX queues */
3392 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3393 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3394
7d9e6c5a
JA
3395 /* Start the ball rolling... */
3396 stmmac_start_all_dma(priv);
3397
5a558611
OBL
3398 if (priv->dma_cap.fpesel) {
3399 stmmac_fpe_start_wq(priv);
3400
3401 if (priv->plat->fpe_cfg->enable)
3402 stmmac_fpe_handshake(priv, true);
3403 }
3404
523f11b5
SK
3405 return 0;
3406}
3407
c66f6c37
TR
3408static void stmmac_hw_teardown(struct net_device *dev)
3409{
3410 struct stmmac_priv *priv = netdev_priv(dev);
3411
3412 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3413}
3414
8532f613
OBL
3415static void stmmac_free_irq(struct net_device *dev,
3416 enum request_irq_err irq_err, int irq_idx)
3417{
3418 struct stmmac_priv *priv = netdev_priv(dev);
3419 int j;
3420
3421 switch (irq_err) {
3422 case REQ_IRQ_ERR_ALL:
3423 irq_idx = priv->plat->tx_queues_to_use;
3424 fallthrough;
3425 case REQ_IRQ_ERR_TX:
3426 for (j = irq_idx - 1; j >= 0; j--) {
8deec94c
OBL
3427 if (priv->tx_irq[j] > 0) {
3428 irq_set_affinity_hint(priv->tx_irq[j], NULL);
8532f613 3429 free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
8deec94c 3430 }
8532f613
OBL
3431 }
3432 irq_idx = priv->plat->rx_queues_to_use;
3433 fallthrough;
3434 case REQ_IRQ_ERR_RX:
3435 for (j = irq_idx - 1; j >= 0; j--) {
8deec94c
OBL
3436 if (priv->rx_irq[j] > 0) {
3437 irq_set_affinity_hint(priv->rx_irq[j], NULL);
8532f613 3438 free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
8deec94c 3439 }
8532f613
OBL
3440 }
3441
3442 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3443 free_irq(priv->sfty_ue_irq, dev);
3444 fallthrough;
3445 case REQ_IRQ_ERR_SFTY_UE:
3446 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3447 free_irq(priv->sfty_ce_irq, dev);
3448 fallthrough;
3449 case REQ_IRQ_ERR_SFTY_CE:
3450 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3451 free_irq(priv->lpi_irq, dev);
3452 fallthrough;
3453 case REQ_IRQ_ERR_LPI:
3454 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3455 free_irq(priv->wol_irq, dev);
3456 fallthrough;
3457 case REQ_IRQ_ERR_WOL:
3458 free_irq(dev->irq, dev);
3459 fallthrough;
3460 case REQ_IRQ_ERR_MAC:
3461 case REQ_IRQ_ERR_NO:
3462 /* If MAC IRQ request error, no more IRQ to free */
3463 break;
3464 }
3465}
3466
3467static int stmmac_request_irq_multi_msi(struct net_device *dev)
3468{
8532f613 3469 struct stmmac_priv *priv = netdev_priv(dev);
3e6dc7b6 3470 enum request_irq_err irq_err;
8deec94c 3471 cpumask_t cpu_mask;
8532f613
OBL
3472 int irq_idx = 0;
3473 char *int_name;
3474 int ret;
3475 int i;
3476
3477 /* For common interrupt */
3478 int_name = priv->int_name_mac;
3479 sprintf(int_name, "%s:%s", dev->name, "mac");
3480 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3481 0, int_name, dev);
3482 if (unlikely(ret < 0)) {
3483 netdev_err(priv->dev,
3484 "%s: alloc mac MSI %d (error: %d)\n",
3485 __func__, dev->irq, ret);
3486 irq_err = REQ_IRQ_ERR_MAC;
3487 goto irq_error;
3488 }
3489
3490 /* Request the Wake IRQ in case of another line
3491 * is used for WoL
3492 */
3493 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3494 int_name = priv->int_name_wol;
3495 sprintf(int_name, "%s:%s", dev->name, "wol");
3496 ret = request_irq(priv->wol_irq,
3497 stmmac_mac_interrupt,
3498 0, int_name, dev);
3499 if (unlikely(ret < 0)) {
3500 netdev_err(priv->dev,
3501 "%s: alloc wol MSI %d (error: %d)\n",
3502 __func__, priv->wol_irq, ret);
3503 irq_err = REQ_IRQ_ERR_WOL;
3504 goto irq_error;
3505 }
3506 }
3507
3508 /* Request the LPI IRQ in case of another line
3509 * is used for LPI
3510 */
3511 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3512 int_name = priv->int_name_lpi;
3513 sprintf(int_name, "%s:%s", dev->name, "lpi");
3514 ret = request_irq(priv->lpi_irq,
3515 stmmac_mac_interrupt,
3516 0, int_name, dev);
3517 if (unlikely(ret < 0)) {
3518 netdev_err(priv->dev,
3519 "%s: alloc lpi MSI %d (error: %d)\n",
3520 __func__, priv->lpi_irq, ret);
3521 irq_err = REQ_IRQ_ERR_LPI;
3522 goto irq_error;
3523 }
3524 }
3525
3526 /* Request the Safety Feature Correctible Error line in
3527 * case of another line is used
3528 */
3529 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3530 int_name = priv->int_name_sfty_ce;
3531 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3532 ret = request_irq(priv->sfty_ce_irq,
3533 stmmac_safety_interrupt,
3534 0, int_name, dev);
3535 if (unlikely(ret < 0)) {
3536 netdev_err(priv->dev,
3537 "%s: alloc sfty ce MSI %d (error: %d)\n",
3538 __func__, priv->sfty_ce_irq, ret);
3539 irq_err = REQ_IRQ_ERR_SFTY_CE;
3540 goto irq_error;
3541 }
3542 }
3543
3544 /* Request the Safety Feature Uncorrectible Error line in
3545 * case of another line is used
3546 */
3547 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3548 int_name = priv->int_name_sfty_ue;
3549 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3550 ret = request_irq(priv->sfty_ue_irq,
3551 stmmac_safety_interrupt,
3552 0, int_name, dev);
3553 if (unlikely(ret < 0)) {
3554 netdev_err(priv->dev,
3555 "%s: alloc sfty ue MSI %d (error: %d)\n",
3556 __func__, priv->sfty_ue_irq, ret);
3557 irq_err = REQ_IRQ_ERR_SFTY_UE;
3558 goto irq_error;
3559 }
3560 }
3561
3562 /* Request Rx MSI irq */
3563 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3564 if (priv->rx_irq[i] == 0)
3565 continue;
3566
3567 int_name = priv->int_name_rx_irq[i];
3568 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3569 ret = request_irq(priv->rx_irq[i],
3570 stmmac_msi_intr_rx,
3571 0, int_name, &priv->rx_queue[i]);
3572 if (unlikely(ret < 0)) {
3573 netdev_err(priv->dev,
3574 "%s: alloc rx-%d MSI %d (error: %d)\n",
3575 __func__, i, priv->rx_irq[i], ret);
3576 irq_err = REQ_IRQ_ERR_RX;
3577 irq_idx = i;
3578 goto irq_error;
3579 }
8deec94c
OBL
3580 cpumask_clear(&cpu_mask);
3581 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3582 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
8532f613
OBL
3583 }
3584
3585 /* Request Tx MSI irq */
3586 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3587 if (priv->tx_irq[i] == 0)
3588 continue;
3589
3590 int_name = priv->int_name_tx_irq[i];
3591 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3592 ret = request_irq(priv->tx_irq[i],
3593 stmmac_msi_intr_tx,
3594 0, int_name, &priv->tx_queue[i]);
3595 if (unlikely(ret < 0)) {
3596 netdev_err(priv->dev,
3597 "%s: alloc tx-%d MSI %d (error: %d)\n",
3598 __func__, i, priv->tx_irq[i], ret);
3599 irq_err = REQ_IRQ_ERR_TX;
3600 irq_idx = i;
3601 goto irq_error;
3602 }
8deec94c
OBL
3603 cpumask_clear(&cpu_mask);
3604 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3605 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
8532f613
OBL
3606 }
3607
3608 return 0;
3609
3610irq_error:
3611 stmmac_free_irq(dev, irq_err, irq_idx);
3612 return ret;
3613}
3614
3615static int stmmac_request_irq_single(struct net_device *dev)
3616{
8532f613 3617 struct stmmac_priv *priv = netdev_priv(dev);
3e6dc7b6 3618 enum request_irq_err irq_err;
8532f613
OBL
3619 int ret;
3620
3621 ret = request_irq(dev->irq, stmmac_interrupt,
3622 IRQF_SHARED, dev->name, dev);
3623 if (unlikely(ret < 0)) {
3624 netdev_err(priv->dev,
3625 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3626 __func__, dev->irq, ret);
3627 irq_err = REQ_IRQ_ERR_MAC;
3e6dc7b6 3628 goto irq_error;
8532f613
OBL
3629 }
3630
3631 /* Request the Wake IRQ in case of another line
3632 * is used for WoL
3633 */
3634 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3635 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3636 IRQF_SHARED, dev->name, dev);
3637 if (unlikely(ret < 0)) {
3638 netdev_err(priv->dev,
3639 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3640 __func__, priv->wol_irq, ret);
3641 irq_err = REQ_IRQ_ERR_WOL;
3e6dc7b6 3642 goto irq_error;
8532f613
OBL
3643 }
3644 }
3645
3646 /* Request the IRQ lines */
3647 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3648 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3649 IRQF_SHARED, dev->name, dev);
3650 if (unlikely(ret < 0)) {
3651 netdev_err(priv->dev,
3652 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3653 __func__, priv->lpi_irq, ret);
3654 irq_err = REQ_IRQ_ERR_LPI;
3655 goto irq_error;
3656 }
3657 }
3658
3659 return 0;
3660
3661irq_error:
3662 stmmac_free_irq(dev, irq_err, 0);
3663 return ret;
3664}
3665
3666static int stmmac_request_irq(struct net_device *dev)
3667{
3668 struct stmmac_priv *priv = netdev_priv(dev);
3669 int ret;
3670
3671 /* Request the IRQ lines */
3672 if (priv->plat->multi_msi_en)
3673 ret = stmmac_request_irq_multi_msi(dev);
3674 else
3675 ret = stmmac_request_irq_single(dev);
3676
3677 return ret;
3678}
3679
47dd7a54
GC
3680/**
3681 * stmmac_open - open entry point of the driver
3682 * @dev : pointer to the device structure.
3683 * Description:
3684 * This function is the open entry point of the driver.
3685 * Return value:
3686 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3687 * file on failure.
3688 */
072a95fd 3689static int stmmac_open(struct net_device *dev)
47dd7a54
GC
3690{
3691 struct stmmac_priv *priv = netdev_priv(dev);
9900074e 3692 int mode = priv->plat->phy_interface;
5d626c87 3693 int bfsize = 0;
8fce3331 3694 u32 chan;
47dd7a54
GC
3695 int ret;
3696
5ec55823
JZ
3697 ret = pm_runtime_get_sync(priv->device);
3698 if (ret < 0) {
3699 pm_runtime_put_noidle(priv->device);
3700 return ret;
3701 }
3702
a47b9e15 3703 if (priv->hw->pcs != STMMAC_PCS_TBI &&
f213bbe8 3704 priv->hw->pcs != STMMAC_PCS_RTBI &&
9900074e 3705 (!priv->hw->xpcs ||
11059740 3706 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
e58bb43f
GC
3707 ret = stmmac_init_phy(dev);
3708 if (ret) {
38ddc59d
LC
3709 netdev_err(priv->dev,
3710 "%s: Cannot attach to PHY (error: %d)\n",
3711 __func__, ret);
5ec55823 3712 goto init_phy_error;
e58bb43f 3713 }
f66ffe28 3714 }
47dd7a54 3715
523f11b5
SK
3716 /* Extra statistics */
3717 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
3718 priv->xstats.threshold = tc;
3719
5d626c87
JA
3720 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
3721 if (bfsize < 0)
3722 bfsize = 0;
3723
3724 if (bfsize < BUF_SIZE_16KiB)
3725 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
3726
3727 priv->dma_buf_sz = bfsize;
3728 buf_sz = bfsize;
3729
22ad3838 3730 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
56329137 3731
aa042f60
SYS
3732 if (!priv->dma_tx_size)
3733 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3734 if (!priv->dma_rx_size)
3735 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3736
579a25a8
JA
3737 /* Earlier check for TBS */
3738 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3739 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3740 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3741
5e6038b8 3742 /* Setup per-TXQ tbs flag before TX descriptor alloc */
579a25a8 3743 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
579a25a8
JA
3744 }
3745
5bacd778
LC
3746 ret = alloc_dma_desc_resources(priv);
3747 if (ret < 0) {
3748 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3749 __func__);
3750 goto dma_desc_error;
3751 }
3752
3753 ret = init_dma_desc_rings(dev, GFP_KERNEL);
3754 if (ret < 0) {
3755 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3756 __func__);
3757 goto init_error;
3758 }
3759
fe131929 3760 ret = stmmac_hw_setup(dev, true);
56329137 3761 if (ret < 0) {
38ddc59d 3762 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
c9324d18 3763 goto init_error;
47dd7a54
GC
3764 }
3765
d429b66e 3766 stmmac_init_coalesce(priv);
777da230 3767
74371272 3768 phylink_start(priv->phylink);
77b28983
JZ
3769 /* We may have called phylink_speed_down before */
3770 phylink_speed_up(priv->phylink);
47dd7a54 3771
8532f613
OBL
3772 ret = stmmac_request_irq(dev);
3773 if (ret)
6c1e5abe 3774 goto irq_error;
d765955d 3775
c22a3f48 3776 stmmac_enable_all_queues(priv);
9f19306d 3777 netif_tx_start_all_queues(priv->dev);
1ea4043a 3778 stmmac_enable_all_dma_irq(priv);
f66ffe28 3779
47dd7a54 3780 return 0;
f66ffe28 3781
6c1e5abe 3782irq_error:
74371272 3783 phylink_stop(priv->phylink);
7a13f8f5 3784
8fce3331 3785 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
d5a05e69 3786 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
8fce3331 3787
c66f6c37 3788 stmmac_hw_teardown(dev);
c9324d18
GC
3789init_error:
3790 free_dma_desc_resources(priv);
5bacd778 3791dma_desc_error:
74371272 3792 phylink_disconnect_phy(priv->phylink);
5ec55823
JZ
3793init_phy_error:
3794 pm_runtime_put(priv->device);
f66ffe28 3795 return ret;
47dd7a54
GC
3796}
3797
5a558611
OBL
3798static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
3799{
3800 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
3801
3802 if (priv->fpe_wq)
3803 destroy_workqueue(priv->fpe_wq);
3804
3805 netdev_info(priv->dev, "FPE workqueue stop");
3806}
3807
47dd7a54
GC
3808/**
3809 * stmmac_release - close entry point of the driver
3810 * @dev : device pointer.
3811 * Description:
3812 * This is the stop entry point of the driver.
3813 */
072a95fd 3814static int stmmac_release(struct net_device *dev)
47dd7a54
GC
3815{
3816 struct stmmac_priv *priv = netdev_priv(dev);
8fce3331 3817 u32 chan;
47dd7a54 3818
48c4175e
YV
3819 netif_tx_disable(dev);
3820
77b28983
JZ
3821 if (device_may_wakeup(priv->device))
3822 phylink_speed_down(priv->phylink, false);
47dd7a54 3823 /* Stop and disconnect the PHY */
74371272
JA
3824 phylink_stop(priv->phylink);
3825 phylink_disconnect_phy(priv->phylink);
47dd7a54 3826
c22a3f48 3827 stmmac_disable_all_queues(priv);
47dd7a54 3828
8fce3331 3829 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
d5a05e69 3830 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
9125cdd1 3831
47dd7a54 3832 /* Free the IRQ lines */
8532f613 3833 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
47dd7a54 3834
5f585913
FD
3835 if (priv->eee_enabled) {
3836 priv->tx_path_in_lpi_mode = false;
3837 del_timer_sync(&priv->eee_ctrl_timer);
3838 }
3839
47dd7a54 3840 /* Stop TX/RX DMA and clear the descriptors */
ae4f0d46 3841 stmmac_stop_all_dma(priv);
47dd7a54
GC
3842
3843 /* Release and free the Rx/Tx resources */
3844 free_dma_desc_resources(priv);
3845
19449bfc 3846 /* Disable the MAC Rx/Tx */
c10d4c82 3847 stmmac_mac_set(priv, priv->ioaddr, false);
47dd7a54
GC
3848
3849 netif_carrier_off(dev);
3850
92ba6888
RK
3851 stmmac_release_ptp(priv);
3852
5ec55823
JZ
3853 pm_runtime_put(priv->device);
3854
5a558611
OBL
3855 if (priv->dma_cap.fpesel)
3856 stmmac_fpe_stop_wq(priv);
3857
47dd7a54
GC
3858 return 0;
3859}
3860
30d93227
JA
3861static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
3862 struct stmmac_tx_queue *tx_q)
3863{
3864 u16 tag = 0x0, inner_tag = 0x0;
3865 u32 inner_type = 0x0;
3866 struct dma_desc *p;
3867
3868 if (!priv->dma_cap.vlins)
3869 return false;
3870 if (!skb_vlan_tag_present(skb))
3871 return false;
3872 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
3873 inner_tag = skb_vlan_tag_get(skb);
3874 inner_type = STMMAC_VLAN_INSERT;
3875 }
3876
3877 tag = skb_vlan_tag_get(skb);
3878
579a25a8
JA
3879 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3880 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
3881 else
3882 p = &tx_q->dma_tx[tx_q->cur_tx];
3883
30d93227
JA
3884 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
3885 return false;
3886
3887 stmmac_set_tx_owner(priv, p);
aa042f60 3888 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
30d93227
JA
3889 return true;
3890}
3891
f748be53
AT
3892/**
3893 * stmmac_tso_allocator - close entry point of the driver
3894 * @priv: driver private structure
3895 * @des: buffer start address
3896 * @total_len: total length to fill in descriptors
d0ea5cbd 3897 * @last_segment: condition for the last descriptor
ce736788 3898 * @queue: TX queue index
f748be53
AT
3899 * Description:
3900 * This function fills descriptor and request new descriptors according to
3901 * buffer length to fill
3902 */
a993db88 3903static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
ce736788 3904 int total_len, bool last_segment, u32 queue)
f748be53 3905{
ce736788 3906 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
f748be53 3907 struct dma_desc *desc;
5bacd778 3908 u32 buff_size;
ce736788 3909 int tmp_len;
f748be53
AT
3910
3911 tmp_len = total_len;
3912
3913 while (tmp_len > 0) {
a993db88
JA
3914 dma_addr_t curr_addr;
3915
aa042f60
SYS
3916 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
3917 priv->dma_tx_size);
b4c9784c 3918 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
579a25a8
JA
3919
3920 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3921 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
3922 else
3923 desc = &tx_q->dma_tx[tx_q->cur_tx];
f748be53 3924
a993db88
JA
3925 curr_addr = des + (total_len - tmp_len);
3926 if (priv->dma_cap.addr64 <= 32)
3927 desc->des0 = cpu_to_le32(curr_addr);
3928 else
3929 stmmac_set_desc_addr(priv, desc, curr_addr);
3930
f748be53
AT
3931 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
3932 TSO_MAX_BUFF_SIZE : tmp_len;
3933
42de047d
JA
3934 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
3935 0, 1,
3936 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
3937 0, 0);
f748be53
AT
3938
3939 tmp_len -= TSO_MAX_BUFF_SIZE;
3940 }
3941}
3942
d96febed
OBL
3943static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
3944{
3945 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3946 int desc_size;
3947
3948 if (likely(priv->extend_desc))
3949 desc_size = sizeof(struct dma_extended_desc);
3950 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
3951 desc_size = sizeof(struct dma_edesc);
3952 else
3953 desc_size = sizeof(struct dma_desc);
3954
3955 /* The own bit must be the latest setting done when prepare the
3956 * descriptor and then barrier is needed to make sure that
3957 * all is coherent before granting the DMA engine.
3958 */
3959 wmb();
3960
3961 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
3962 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3963}
3964
f748be53
AT
3965/**
3966 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3967 * @skb : the socket buffer
3968 * @dev : device pointer
3969 * Description: this is the transmit function that is called on TSO frames
3970 * (support available on GMAC4 and newer chips).
3971 * Diagram below show the ring programming in case of TSO frames:
3972 *
3973 * First Descriptor
3974 * --------
3975 * | DES0 |---> buffer1 = L2/L3/L4 header
3976 * | DES1 |---> TCP Payload (can continue on next descr...)
3977 * | DES2 |---> buffer 1 and 2 len
3978 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3979 * --------
3980 * |
3981 * ...
3982 * |
3983 * --------
3984 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
3985 * | DES1 | --|
3986 * | DES2 | --> buffer 1 and 2 len
3987 * | DES3 |
3988 * --------
3989 *
3990 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3991 */
3992static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3993{
ce736788 3994 struct dma_desc *desc, *first, *mss_desc = NULL;
f748be53
AT
3995 struct stmmac_priv *priv = netdev_priv(dev);
3996 int nfrags = skb_shinfo(skb)->nr_frags;
ce736788 3997 u32 queue = skb_get_queue_mapping(skb);
c2837423 3998 unsigned int first_entry, tx_packets;
d96febed 3999 int tmp_pay_len = 0, first_tx;
ce736788 4000 struct stmmac_tx_queue *tx_q;
c2837423 4001 bool has_vlan, set_ic;
579a25a8 4002 u8 proto_hdr_len, hdr;
ce736788 4003 u32 pay_len, mss;
a993db88 4004 dma_addr_t des;
f748be53
AT
4005 int i;
4006
ce736788 4007 tx_q = &priv->tx_queue[queue];
c2837423 4008 first_tx = tx_q->cur_tx;
ce736788 4009
f748be53 4010 /* Compute header lengths */
b7766206
JA
4011 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4012 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4013 hdr = sizeof(struct udphdr);
4014 } else {
4015 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4016 hdr = tcp_hdrlen(skb);
4017 }
f748be53
AT
4018
4019 /* Desc availability based on threshold should be enough safe */
ce736788 4020 if (unlikely(stmmac_tx_avail(priv, queue) <
f748be53 4021 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
c22a3f48
JP
4022 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4023 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4024 queue));
f748be53 4025 /* This is a hard error, log it. */
38ddc59d
LC
4026 netdev_err(priv->dev,
4027 "%s: Tx Ring full when queue awake\n",
4028 __func__);
f748be53 4029 }
f748be53
AT
4030 return NETDEV_TX_BUSY;
4031 }
4032
4033 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4034
4035 mss = skb_shinfo(skb)->gso_size;
4036
4037 /* set new MSS value if needed */
8d212a9e 4038 if (mss != tx_q->mss) {
579a25a8
JA
4039 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4040 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4041 else
4042 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4043
42de047d 4044 stmmac_set_mss(priv, mss_desc, mss);
8d212a9e 4045 tx_q->mss = mss;
aa042f60
SYS
4046 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4047 priv->dma_tx_size);
b4c9784c 4048 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
f748be53
AT
4049 }
4050
4051 if (netif_msg_tx_queued(priv)) {
b7766206
JA
4052 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4053 __func__, hdr, proto_hdr_len, pay_len, mss);
f748be53
AT
4054 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4055 skb->data_len);
4056 }
4057
30d93227
JA
4058 /* Check if VLAN can be inserted by HW */
4059 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4060
ce736788 4061 first_entry = tx_q->cur_tx;
b4c9784c 4062 WARN_ON(tx_q->tx_skbuff[first_entry]);
f748be53 4063
579a25a8
JA
4064 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4065 desc = &tx_q->dma_entx[first_entry].basic;
4066 else
4067 desc = &tx_q->dma_tx[first_entry];
f748be53
AT
4068 first = desc;
4069
30d93227
JA
4070 if (has_vlan)
4071 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4072
f748be53
AT
4073 /* first descriptor: fill Headers on Buf1 */
4074 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4075 DMA_TO_DEVICE);
4076 if (dma_mapping_error(priv->device, des))
4077 goto dma_map_err;
4078
ce736788
JP
4079 tx_q->tx_skbuff_dma[first_entry].buf = des;
4080 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
be8b38a7
OBL
4081 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4082 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
f748be53 4083
a993db88
JA
4084 if (priv->dma_cap.addr64 <= 32) {
4085 first->des0 = cpu_to_le32(des);
f748be53 4086
a993db88
JA
4087 /* Fill start of payload in buff2 of first descriptor */
4088 if (pay_len)
4089 first->des1 = cpu_to_le32(des + proto_hdr_len);
f748be53 4090
a993db88
JA
4091 /* If needed take extra descriptors to fill the remaining payload */
4092 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4093 } else {
4094 stmmac_set_desc_addr(priv, first, des);
4095 tmp_pay_len = pay_len;
34c15202 4096 des += proto_hdr_len;
b2f07199 4097 pay_len = 0;
a993db88 4098 }
f748be53 4099
ce736788 4100 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
f748be53
AT
4101
4102 /* Prepare fragments */
4103 for (i = 0; i < nfrags; i++) {
4104 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4105
4106 des = skb_frag_dma_map(priv->device, frag, 0,
4107 skb_frag_size(frag),
4108 DMA_TO_DEVICE);
937071c1
TR
4109 if (dma_mapping_error(priv->device, des))
4110 goto dma_map_err;
f748be53
AT
4111
4112 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
ce736788 4113 (i == nfrags - 1), queue);
f748be53 4114
ce736788
JP
4115 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4116 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
ce736788 4117 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
be8b38a7 4118 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
f748be53
AT
4119 }
4120
ce736788 4121 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
f748be53 4122
05cf0d1b
NC
4123 /* Only the last descriptor gets to point to the skb. */
4124 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
be8b38a7 4125 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
05cf0d1b 4126
7df4a3a7 4127 /* Manage tx mitigation */
c2837423
JA
4128 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4129 tx_q->tx_count_frames += tx_packets;
4130
4131 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4132 set_ic = true;
db2f2842 4133 else if (!priv->tx_coal_frames[queue])
c2837423 4134 set_ic = false;
db2f2842 4135 else if (tx_packets > priv->tx_coal_frames[queue])
c2837423 4136 set_ic = true;
db2f2842
OBL
4137 else if ((tx_q->tx_count_frames %
4138 priv->tx_coal_frames[queue]) < tx_packets)
c2837423
JA
4139 set_ic = true;
4140 else
4141 set_ic = false;
4142
4143 if (set_ic) {
579a25a8
JA
4144 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4145 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4146 else
4147 desc = &tx_q->dma_tx[tx_q->cur_tx];
4148
7df4a3a7
JA
4149 tx_q->tx_count_frames = 0;
4150 stmmac_set_tx_ic(priv, desc);
4151 priv->xstats.tx_set_ic_bit++;
4152 }
4153
05cf0d1b
NC
4154 /* We've used all descriptors we need for this skb, however,
4155 * advance cur_tx so that it references a fresh descriptor.
4156 * ndo_start_xmit will fill this descriptor the next time it's
4157 * called and stmmac_tx_clean may clean up to this descriptor.
4158 */
aa042f60 4159 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
f748be53 4160
ce736788 4161 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
4162 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4163 __func__);
c22a3f48 4164 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
f748be53
AT
4165 }
4166
4167 dev->stats.tx_bytes += skb->len;
4168 priv->xstats.tx_tso_frames++;
4169 priv->xstats.tx_tso_nfrags += nfrags;
4170
8000ddc0
JA
4171 if (priv->sarc_type)
4172 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4173
74abc9b1 4174 skb_tx_timestamp(skb);
f748be53
AT
4175
4176 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4177 priv->hwts_tx_en)) {
4178 /* declare that device is doing timestamping */
4179 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
42de047d 4180 stmmac_enable_tx_timestamp(priv, first);
f748be53
AT
4181 }
4182
4183 /* Complete the first descriptor before granting the DMA */
42de047d 4184 stmmac_prepare_tso_tx_desc(priv, first, 1,
f748be53
AT
4185 proto_hdr_len,
4186 pay_len,
ce736788 4187 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
b7766206 4188 hdr / 4, (skb->len - proto_hdr_len));
f748be53
AT
4189
4190 /* If context desc is used to change MSS */
15d2ee42
NC
4191 if (mss_desc) {
4192 /* Make sure that first descriptor has been completely
4193 * written, including its own bit. This is because MSS is
4194 * actually before first descriptor, so we need to make
4195 * sure that MSS's own bit is the last thing written.
4196 */
4197 dma_wmb();
42de047d 4198 stmmac_set_tx_owner(priv, mss_desc);
15d2ee42 4199 }
f748be53 4200
f748be53
AT
4201 if (netif_msg_pktdata(priv)) {
4202 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
ce736788
JP
4203 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4204 tx_q->cur_tx, first, nfrags);
f748be53
AT
4205 pr_info(">>> frame to be transmitted: ");
4206 print_pkt(skb->data, skb_headlen(skb));
4207 }
4208
c22a3f48 4209 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 4210
d96febed 4211 stmmac_flush_tx_descriptors(priv, queue);
4772f26d 4212 stmmac_tx_timer_arm(priv, queue);
f748be53 4213
f748be53
AT
4214 return NETDEV_TX_OK;
4215
4216dma_map_err:
f748be53
AT
4217 dev_err(priv->device, "Tx dma map failed\n");
4218 dev_kfree_skb(skb);
4219 priv->dev->stats.tx_dropped++;
4220 return NETDEV_TX_OK;
4221}
4222
47dd7a54 4223/**
732fdf0e 4224 * stmmac_xmit - Tx entry point of the driver
47dd7a54
GC
4225 * @skb : the socket buffer
4226 * @dev : device pointer
32ceabca
GC
4227 * Description : this is the tx entry point of the driver.
4228 * It programs the chain or the ring and supports oversized frames
4229 * and SG feature.
47dd7a54
GC
4230 */
4231static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4232{
c2837423 4233 unsigned int first_entry, tx_packets, enh_desc;
47dd7a54 4234 struct stmmac_priv *priv = netdev_priv(dev);
0e80bdc9 4235 unsigned int nopaged_len = skb_headlen(skb);
4a7d666a 4236 int i, csum_insertion = 0, is_jumbo = 0;
ce736788 4237 u32 queue = skb_get_queue_mapping(skb);
47dd7a54 4238 int nfrags = skb_shinfo(skb)->nr_frags;
b7766206 4239 int gso = skb_shinfo(skb)->gso_type;
579a25a8 4240 struct dma_edesc *tbs_desc = NULL;
47dd7a54 4241 struct dma_desc *desc, *first;
ce736788 4242 struct stmmac_tx_queue *tx_q;
c2837423 4243 bool has_vlan, set_ic;
d96febed 4244 int entry, first_tx;
a993db88 4245 dma_addr_t des;
f748be53 4246
ce736788 4247 tx_q = &priv->tx_queue[queue];
c2837423 4248 first_tx = tx_q->cur_tx;
ce736788 4249
be1c7eae 4250 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
e2cd682d
JA
4251 stmmac_disable_eee_mode(priv);
4252
f748be53
AT
4253 /* Manage oversized TCP frames for GMAC4 device */
4254 if (skb_is_gso(skb) && priv->tso) {
b7766206
JA
4255 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4256 return stmmac_tso_xmit(skb, dev);
4257 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
f748be53
AT
4258 return stmmac_tso_xmit(skb, dev);
4259 }
47dd7a54 4260
ce736788 4261 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
c22a3f48
JP
4262 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4263 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4264 queue));
47dd7a54 4265 /* This is a hard error, log it. */
38ddc59d
LC
4266 netdev_err(priv->dev,
4267 "%s: Tx Ring full when queue awake\n",
4268 __func__);
47dd7a54
GC
4269 }
4270 return NETDEV_TX_BUSY;
4271 }
4272
30d93227
JA
4273 /* Check if VLAN can be inserted by HW */
4274 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4275
ce736788 4276 entry = tx_q->cur_tx;
0e80bdc9 4277 first_entry = entry;
b4c9784c 4278 WARN_ON(tx_q->tx_skbuff[first_entry]);
47dd7a54 4279
5e982f3b 4280 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
47dd7a54 4281
0e80bdc9 4282 if (likely(priv->extend_desc))
ce736788 4283 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
579a25a8
JA
4284 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4285 desc = &tx_q->dma_entx[entry].basic;
c24602ef 4286 else
ce736788 4287 desc = tx_q->dma_tx + entry;
c24602ef 4288
47dd7a54
GC
4289 first = desc;
4290
30d93227
JA
4291 if (has_vlan)
4292 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4293
0e80bdc9 4294 enh_desc = priv->plat->enh_desc;
4a7d666a 4295 /* To program the descriptors according to the size of the frame */
29896a67 4296 if (enh_desc)
2c520b1c 4297 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
29896a67 4298
63a550fc 4299 if (unlikely(is_jumbo)) {
2c520b1c 4300 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
63a550fc 4301 if (unlikely(entry < 0) && (entry != -EINVAL))
362b37be 4302 goto dma_map_err;
29896a67 4303 }
47dd7a54
GC
4304
4305 for (i = 0; i < nfrags; i++) {
9e903e08
ED
4306 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4307 int len = skb_frag_size(frag);
be434d50 4308 bool last_segment = (i == (nfrags - 1));
47dd7a54 4309
aa042f60 4310 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
b4c9784c 4311 WARN_ON(tx_q->tx_skbuff[entry]);
e3ad57c9 4312
0e80bdc9 4313 if (likely(priv->extend_desc))
ce736788 4314 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
579a25a8
JA
4315 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4316 desc = &tx_q->dma_entx[entry].basic;
c24602ef 4317 else
ce736788 4318 desc = tx_q->dma_tx + entry;
47dd7a54 4319
f748be53
AT
4320 des = skb_frag_dma_map(priv->device, frag, 0, len,
4321 DMA_TO_DEVICE);
4322 if (dma_mapping_error(priv->device, des))
362b37be
GC
4323 goto dma_map_err; /* should reuse desc w/o issues */
4324
ce736788 4325 tx_q->tx_skbuff_dma[entry].buf = des;
6844171d
JA
4326
4327 stmmac_set_desc_addr(priv, desc, des);
f748be53 4328
ce736788
JP
4329 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4330 tx_q->tx_skbuff_dma[entry].len = len;
4331 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
be8b38a7 4332 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
0e80bdc9
GC
4333
4334 /* Prepare the descriptor and set the own bit too */
42de047d
JA
4335 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4336 priv->mode, 1, last_segment, skb->len);
47dd7a54
GC
4337 }
4338
05cf0d1b
NC
4339 /* Only the last descriptor gets to point to the skb. */
4340 tx_q->tx_skbuff[entry] = skb;
be8b38a7 4341 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
e3ad57c9 4342
7df4a3a7
JA
4343 /* According to the coalesce parameter the IC bit for the latest
4344 * segment is reset and the timer re-started to clean the tx status.
4345 * This approach takes care about the fragments: desc is the first
4346 * element in case of no SG.
4347 */
c2837423
JA
4348 tx_packets = (entry + 1) - first_tx;
4349 tx_q->tx_count_frames += tx_packets;
4350
4351 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4352 set_ic = true;
db2f2842 4353 else if (!priv->tx_coal_frames[queue])
c2837423 4354 set_ic = false;
db2f2842 4355 else if (tx_packets > priv->tx_coal_frames[queue])
c2837423 4356 set_ic = true;
db2f2842
OBL
4357 else if ((tx_q->tx_count_frames %
4358 priv->tx_coal_frames[queue]) < tx_packets)
c2837423
JA
4359 set_ic = true;
4360 else
4361 set_ic = false;
4362
4363 if (set_ic) {
7df4a3a7
JA
4364 if (likely(priv->extend_desc))
4365 desc = &tx_q->dma_etx[entry].basic;
579a25a8
JA
4366 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4367 desc = &tx_q->dma_entx[entry].basic;
7df4a3a7
JA
4368 else
4369 desc = &tx_q->dma_tx[entry];
4370
4371 tx_q->tx_count_frames = 0;
4372 stmmac_set_tx_ic(priv, desc);
4373 priv->xstats.tx_set_ic_bit++;
4374 }
4375
05cf0d1b
NC
4376 /* We've used all descriptors we need for this skb, however,
4377 * advance cur_tx so that it references a fresh descriptor.
4378 * ndo_start_xmit will fill this descriptor the next time it's
4379 * called and stmmac_tx_clean may clean up to this descriptor.
4380 */
aa042f60 4381 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
ce736788 4382 tx_q->cur_tx = entry;
47dd7a54 4383
47dd7a54 4384 if (netif_msg_pktdata(priv)) {
38ddc59d
LC
4385 netdev_dbg(priv->dev,
4386 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
ce736788 4387 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
38ddc59d 4388 entry, first, nfrags);
83d7af64 4389
38ddc59d 4390 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
47dd7a54
GC
4391 print_pkt(skb->data, skb->len);
4392 }
0e80bdc9 4393
ce736788 4394 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
b3e51069
LC
4395 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4396 __func__);
c22a3f48 4397 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
47dd7a54
GC
4398 }
4399
4400 dev->stats.tx_bytes += skb->len;
4401
8000ddc0
JA
4402 if (priv->sarc_type)
4403 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4404
74abc9b1 4405 skb_tx_timestamp(skb);
3e82ce12 4406
0e80bdc9
GC
4407 /* Ready to fill the first descriptor and set the OWN bit w/o any
4408 * problems because all the descriptors are actually ready to be
4409 * passed to the DMA engine.
4410 */
4411 if (likely(!is_jumbo)) {
4412 bool last_segment = (nfrags == 0);
4413
f748be53
AT
4414 des = dma_map_single(priv->device, skb->data,
4415 nopaged_len, DMA_TO_DEVICE);
4416 if (dma_mapping_error(priv->device, des))
0e80bdc9
GC
4417 goto dma_map_err;
4418
ce736788 4419 tx_q->tx_skbuff_dma[first_entry].buf = des;
be8b38a7
OBL
4420 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4421 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
6844171d
JA
4422
4423 stmmac_set_desc_addr(priv, first, des);
f748be53 4424
ce736788
JP
4425 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4426 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
0e80bdc9
GC
4427
4428 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4429 priv->hwts_tx_en)) {
4430 /* declare that device is doing timestamping */
4431 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
42de047d 4432 stmmac_enable_tx_timestamp(priv, first);
0e80bdc9
GC
4433 }
4434
4435 /* Prepare the first descriptor setting the OWN bit too */
42de047d 4436 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
579a25a8 4437 csum_insertion, priv->mode, 0, last_segment,
42de047d 4438 skb->len);
0e80bdc9
GC
4439 }
4440
579a25a8
JA
4441 if (tx_q->tbs & STMMAC_TBS_EN) {
4442 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4443
4444 tbs_desc = &tx_q->dma_entx[first_entry];
4445 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4446 }
4447
4448 stmmac_set_tx_owner(priv, first);
4449
c22a3f48 4450 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
f748be53 4451
f1565c60 4452 stmmac_enable_dma_transmission(priv, priv->ioaddr);
8fce3331 4453
d96febed 4454 stmmac_flush_tx_descriptors(priv, queue);
4772f26d 4455 stmmac_tx_timer_arm(priv, queue);
52f64fae 4456
362b37be 4457 return NETDEV_TX_OK;
a9097a96 4458
362b37be 4459dma_map_err:
38ddc59d 4460 netdev_err(priv->dev, "Tx DMA map failed\n");
362b37be
GC
4461 dev_kfree_skb(skb);
4462 priv->dev->stats.tx_dropped++;
47dd7a54
GC
4463 return NETDEV_TX_OK;
4464}
4465
b9381985
VB
4466static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4467{
ab188e8f
EN
4468 struct vlan_ethhdr *veth;
4469 __be16 vlan_proto;
b9381985
VB
4470 u16 vlanid;
4471
ab188e8f
EN
4472 veth = (struct vlan_ethhdr *)skb->data;
4473 vlan_proto = veth->h_vlan_proto;
4474
4475 if ((vlan_proto == htons(ETH_P_8021Q) &&
4476 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4477 (vlan_proto == htons(ETH_P_8021AD) &&
4478 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
b9381985 4479 /* pop the vlan tag */
ab188e8f
EN
4480 vlanid = ntohs(veth->h_vlan_TCI);
4481 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
b9381985 4482 skb_pull(skb, VLAN_HLEN);
ab188e8f 4483 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
b9381985
VB
4484 }
4485}
4486
32ceabca 4487/**
732fdf0e 4488 * stmmac_rx_refill - refill used skb preallocated buffers
32ceabca 4489 * @priv: driver private structure
54139cf3 4490 * @queue: RX queue index
32ceabca
GC
4491 * Description : this is to reallocate the skb for the reception process
4492 * that is based on zero-copy.
4493 */
54139cf3 4494static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
47dd7a54 4495{
54139cf3 4496 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5fabb012 4497 int dirty = stmmac_rx_dirty(priv, queue);
54139cf3
JP
4498 unsigned int entry = rx_q->dirty_rx;
4499
e3ad57c9 4500 while (dirty-- > 0) {
2af6106a 4501 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
c24602ef 4502 struct dma_desc *p;
d429b66e 4503 bool use_rx_wd;
c24602ef
GC
4504
4505 if (priv->extend_desc)
54139cf3 4506 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 4507 else
54139cf3 4508 p = rx_q->dma_rx + entry;
c24602ef 4509
2af6106a
JA
4510 if (!buf->page) {
4511 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
4512 if (!buf->page)
362b37be 4513 break;
47dd7a54 4514 }
2af6106a 4515
67afd6d1
JA
4516 if (priv->sph && !buf->sec_page) {
4517 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
4518 if (!buf->sec_page)
4519 break;
4520
4521 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
67afd6d1
JA
4522 }
4523
5fabb012 4524 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
3caa61c2 4525
2af6106a 4526 stmmac_set_desc_addr(priv, p, buf->addr);
396e13e1
JZ
4527 if (priv->sph)
4528 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4529 else
4530 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
2af6106a 4531 stmmac_refill_desc3(priv, rx_q, p);
f748be53 4532
d429b66e 4533 rx_q->rx_count_frames++;
db2f2842
OBL
4534 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4535 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
6fa9d691 4536 rx_q->rx_count_frames = 0;
09146abe 4537
db2f2842 4538 use_rx_wd = !priv->rx_coal_frames[queue];
09146abe
JA
4539 use_rx_wd |= rx_q->rx_count_frames > 0;
4540 if (!priv->use_riwt)
4541 use_rx_wd = false;
d429b66e 4542
ad688cdb 4543 dma_wmb();
2af6106a 4544 stmmac_set_rx_owner(priv, p, use_rx_wd);
e3ad57c9 4545
aa042f60 4546 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
47dd7a54 4547 }
54139cf3 4548 rx_q->dirty_rx = entry;
858a31ff
JA
4549 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4550 (rx_q->dirty_rx * sizeof(struct dma_desc));
4523a561 4551 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
47dd7a54
GC
4552}
4553
88ebe2cf
JA
4554static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4555 struct dma_desc *p,
4556 int status, unsigned int len)
4557{
88ebe2cf 4558 unsigned int plen = 0, hlen = 0;
31f2760e 4559 int coe = priv->hw->rx_csum;
88ebe2cf
JA
4560
4561 /* Not first descriptor, buffer is always zero */
4562 if (priv->sph && len)
4563 return 0;
4564
4565 /* First descriptor, get split header length */
31f2760e 4566 stmmac_get_rx_header_len(priv, p, &hlen);
88ebe2cf
JA
4567 if (priv->sph && hlen) {
4568 priv->xstats.rx_split_hdr_pkt_n++;
4569 return hlen;
4570 }
4571
4572 /* First descriptor, not last descriptor and not split header */
4573 if (status & rx_not_ls)
4574 return priv->dma_buf_sz;
4575
4576 plen = stmmac_get_rx_frame_len(priv, p, coe);
4577
4578 /* First descriptor and last descriptor and not split header */
4579 return min_t(unsigned int, priv->dma_buf_sz, plen);
4580}
4581
4582static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4583 struct dma_desc *p,
4584 int status, unsigned int len)
4585{
4586 int coe = priv->hw->rx_csum;
4587 unsigned int plen = 0;
4588
4589 /* Not split header, buffer is not available */
4590 if (!priv->sph)
4591 return 0;
4592
4593 /* Not last descriptor */
4594 if (status & rx_not_ls)
4595 return priv->dma_buf_sz;
4596
4597 plen = stmmac_get_rx_frame_len(priv, p, coe);
4598
4599 /* Last descriptor */
4600 return plen - len;
4601}
4602
be8b38a7 4603static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
8b278a5b 4604 struct xdp_frame *xdpf, bool dma_map)
be8b38a7
OBL
4605{
4606 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
be8b38a7
OBL
4607 unsigned int entry = tx_q->cur_tx;
4608 struct dma_desc *tx_desc;
4609 dma_addr_t dma_addr;
4610 bool set_ic;
4611
4612 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4613 return STMMAC_XDP_CONSUMED;
4614
4615 if (likely(priv->extend_desc))
4616 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4617 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4618 tx_desc = &tx_q->dma_entx[entry].basic;
4619 else
4620 tx_desc = tx_q->dma_tx + entry;
4621
8b278a5b
OBL
4622 if (dma_map) {
4623 dma_addr = dma_map_single(priv->device, xdpf->data,
4624 xdpf->len, DMA_TO_DEVICE);
4625 if (dma_mapping_error(priv->device, dma_addr))
4626 return STMMAC_XDP_CONSUMED;
4627
4628 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4629 } else {
4630 struct page *page = virt_to_page(xdpf->data);
4631
4632 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4633 xdpf->headroom;
4634 dma_sync_single_for_device(priv->device, dma_addr,
4635 xdpf->len, DMA_BIDIRECTIONAL);
be8b38a7 4636
8b278a5b
OBL
4637 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4638 }
be8b38a7
OBL
4639
4640 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4641 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4642 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4643 tx_q->tx_skbuff_dma[entry].last_segment = true;
4644 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4645
4646 tx_q->xdpf[entry] = xdpf;
4647
4648 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4649
4650 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4651 true, priv->mode, true, true,
4652 xdpf->len);
4653
4654 tx_q->tx_count_frames++;
4655
4656 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4657 set_ic = true;
4658 else
4659 set_ic = false;
4660
4661 if (set_ic) {
4662 tx_q->tx_count_frames = 0;
4663 stmmac_set_tx_ic(priv, tx_desc);
4664 priv->xstats.tx_set_ic_bit++;
4665 }
4666
4667 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4668
4669 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
4670 tx_q->cur_tx = entry;
4671
4672 return STMMAC_XDP_TX;
4673}
4674
4675static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4676 int cpu)
4677{
4678 int index = cpu;
4679
4680 if (unlikely(index < 0))
4681 index = 0;
4682
4683 while (index >= priv->plat->tx_queues_to_use)
4684 index -= priv->plat->tx_queues_to_use;
4685
4686 return index;
4687}
4688
4689static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4690 struct xdp_buff *xdp)
4691{
4692 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4693 int cpu = smp_processor_id();
4694 struct netdev_queue *nq;
4695 int queue;
4696 int res;
4697
4698 if (unlikely(!xdpf))
4699 return STMMAC_XDP_CONSUMED;
4700
4701 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4702 nq = netdev_get_tx_queue(priv->dev, queue);
4703
4704 __netif_tx_lock(nq, cpu);
4705 /* Avoids TX time-out as we are sharing with slow path */
4706 nq->trans_start = jiffies;
4707
8b278a5b 4708 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
be8b38a7
OBL
4709 if (res == STMMAC_XDP_TX)
4710 stmmac_flush_tx_descriptors(priv, queue);
4711
4712 __netif_tx_unlock(nq);
4713
4714 return res;
4715}
4716
bba71cac
OBL
4717static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4718 struct bpf_prog *prog,
4719 struct xdp_buff *xdp)
5fabb012 4720{
5fabb012 4721 u32 act;
bba71cac 4722 int res;
5fabb012
OBL
4723
4724 act = bpf_prog_run_xdp(prog, xdp);
4725 switch (act) {
4726 case XDP_PASS:
4727 res = STMMAC_XDP_PASS;
4728 break;
be8b38a7
OBL
4729 case XDP_TX:
4730 res = stmmac_xdp_xmit_back(priv, xdp);
4731 break;
8b278a5b
OBL
4732 case XDP_REDIRECT:
4733 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4734 res = STMMAC_XDP_CONSUMED;
4735 else
4736 res = STMMAC_XDP_REDIRECT;
4737 break;
5fabb012
OBL
4738 default:
4739 bpf_warn_invalid_xdp_action(act);
4740 fallthrough;
4741 case XDP_ABORTED:
4742 trace_xdp_exception(priv->dev, prog, act);
4743 fallthrough;
4744 case XDP_DROP:
4745 res = STMMAC_XDP_CONSUMED;
4746 break;
4747 }
4748
bba71cac
OBL
4749 return res;
4750}
4751
4752static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
4753 struct xdp_buff *xdp)
4754{
4755 struct bpf_prog *prog;
4756 int res;
4757
bba71cac
OBL
4758 prog = READ_ONCE(priv->xdp_prog);
4759 if (!prog) {
4760 res = STMMAC_XDP_PASS;
2f1e432d 4761 goto out;
bba71cac
OBL
4762 }
4763
4764 res = __stmmac_xdp_run_prog(priv, prog, xdp);
2f1e432d 4765out:
5fabb012
OBL
4766 return ERR_PTR(-res);
4767}
4768
be8b38a7
OBL
4769static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
4770 int xdp_status)
4771{
4772 int cpu = smp_processor_id();
4773 int queue;
4774
4775 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4776
4777 if (xdp_status & STMMAC_XDP_TX)
4778 stmmac_tx_timer_arm(priv, queue);
8b278a5b
OBL
4779
4780 if (xdp_status & STMMAC_XDP_REDIRECT)
4781 xdp_do_flush();
be8b38a7
OBL
4782}
4783
bba2556e
OBL
4784static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
4785 struct xdp_buff *xdp)
4786{
4787 unsigned int metasize = xdp->data - xdp->data_meta;
4788 unsigned int datasize = xdp->data_end - xdp->data;
4789 struct sk_buff *skb;
4790
132c32ee 4791 skb = __napi_alloc_skb(&ch->rxtx_napi,
bba2556e
OBL
4792 xdp->data_end - xdp->data_hard_start,
4793 GFP_ATOMIC | __GFP_NOWARN);
4794 if (unlikely(!skb))
4795 return NULL;
4796
4797 skb_reserve(skb, xdp->data - xdp->data_hard_start);
4798 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
4799 if (metasize)
4800 skb_metadata_set(skb, metasize);
4801
4802 return skb;
4803}
4804
4805static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
4806 struct dma_desc *p, struct dma_desc *np,
4807 struct xdp_buff *xdp)
4808{
4809 struct stmmac_channel *ch = &priv->channel[queue];
4810 unsigned int len = xdp->data_end - xdp->data;
4811 enum pkt_hash_types hash_type;
4812 int coe = priv->hw->rx_csum;
4813 struct sk_buff *skb;
4814 u32 hash;
4815
4816 skb = stmmac_construct_skb_zc(ch, xdp);
4817 if (!skb) {
4818 priv->dev->stats.rx_dropped++;
4819 return;
4820 }
4821
4822 stmmac_get_rx_hwtstamp(priv, p, np, skb);
4823 stmmac_rx_vlan(priv->dev, skb);
4824 skb->protocol = eth_type_trans(skb, priv->dev);
4825
4826 if (unlikely(!coe))
4827 skb_checksum_none_assert(skb);
4828 else
4829 skb->ip_summed = CHECKSUM_UNNECESSARY;
4830
4831 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
4832 skb_set_hash(skb, hash, hash_type);
4833
4834 skb_record_rx_queue(skb, queue);
132c32ee 4835 napi_gro_receive(&ch->rxtx_napi, skb);
bba2556e
OBL
4836
4837 priv->dev->stats.rx_packets++;
4838 priv->dev->stats.rx_bytes += len;
4839}
4840
4841static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
4842{
4843 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4844 unsigned int entry = rx_q->dirty_rx;
4845 struct dma_desc *rx_desc = NULL;
4846 bool ret = true;
4847
4848 budget = min(budget, stmmac_rx_dirty(priv, queue));
4849
4850 while (budget-- > 0 && entry != rx_q->cur_rx) {
4851 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4852 dma_addr_t dma_addr;
4853 bool use_rx_wd;
4854
4855 if (!buf->xdp) {
4856 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
4857 if (!buf->xdp) {
4858 ret = false;
4859 break;
4860 }
4861 }
4862
4863 if (priv->extend_desc)
4864 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
4865 else
4866 rx_desc = rx_q->dma_rx + entry;
4867
4868 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
4869 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
4870 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
4871 stmmac_refill_desc3(priv, rx_q, rx_desc);
4872
4873 rx_q->rx_count_frames++;
4874 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4875 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4876 rx_q->rx_count_frames = 0;
4877
4878 use_rx_wd = !priv->rx_coal_frames[queue];
4879 use_rx_wd |= rx_q->rx_count_frames > 0;
4880 if (!priv->use_riwt)
4881 use_rx_wd = false;
4882
4883 dma_wmb();
4884 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
4885
4886 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
4887 }
4888
4889 if (rx_desc) {
4890 rx_q->dirty_rx = entry;
4891 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4892 (rx_q->dirty_rx * sizeof(struct dma_desc));
4893 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4894 }
4895
4896 return ret;
4897}
4898
4899static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
4900{
4901 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4902 unsigned int count = 0, error = 0, len = 0;
4903 int dirty = stmmac_rx_dirty(priv, queue);
4904 unsigned int next_entry = rx_q->cur_rx;
4905 unsigned int desc_size;
4906 struct bpf_prog *prog;
4907 bool failure = false;
4908 int xdp_status = 0;
4909 int status = 0;
4910
4911 if (netif_msg_rx_status(priv)) {
4912 void *rx_head;
4913
4914 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
4915 if (priv->extend_desc) {
4916 rx_head = (void *)rx_q->dma_erx;
4917 desc_size = sizeof(struct dma_extended_desc);
4918 } else {
4919 rx_head = (void *)rx_q->dma_rx;
4920 desc_size = sizeof(struct dma_desc);
4921 }
4922
4923 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
4924 rx_q->dma_rx_phy, desc_size);
4925 }
4926 while (count < limit) {
4927 struct stmmac_rx_buffer *buf;
4928 unsigned int buf1_len = 0;
4929 struct dma_desc *np, *p;
4930 int entry;
4931 int res;
4932
4933 if (!count && rx_q->state_saved) {
4934 error = rx_q->state.error;
4935 len = rx_q->state.len;
4936 } else {
4937 rx_q->state_saved = false;
4938 error = 0;
4939 len = 0;
4940 }
4941
4942 if (count >= limit)
4943 break;
4944
4945read_again:
4946 buf1_len = 0;
4947 entry = next_entry;
4948 buf = &rx_q->buf_pool[entry];
4949
4950 if (dirty >= STMMAC_RX_FILL_BATCH) {
4951 failure = failure ||
4952 !stmmac_rx_refill_zc(priv, queue, dirty);
4953 dirty = 0;
4954 }
4955
4956 if (priv->extend_desc)
4957 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4958 else
4959 p = rx_q->dma_rx + entry;
4960
4961 /* read the status of the incoming frame */
4962 status = stmmac_rx_status(priv, &priv->dev->stats,
4963 &priv->xstats, p);
4964 /* check if managed by the DMA otherwise go ahead */
4965 if (unlikely(status & dma_own))
4966 break;
4967
4968 /* Prefetch the next RX descriptor */
4969 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
4970 priv->dma_rx_size);
4971 next_entry = rx_q->cur_rx;
4972
4973 if (priv->extend_desc)
4974 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
4975 else
4976 np = rx_q->dma_rx + next_entry;
4977
4978 prefetch(np);
4979
2b9fff64
SYS
4980 /* Ensure a valid XSK buffer before proceed */
4981 if (!buf->xdp)
4982 break;
4983
bba2556e
OBL
4984 if (priv->extend_desc)
4985 stmmac_rx_extended_status(priv, &priv->dev->stats,
4986 &priv->xstats,
4987 rx_q->dma_erx + entry);
4988 if (unlikely(status == discard_frame)) {
4989 xsk_buff_free(buf->xdp);
4990 buf->xdp = NULL;
4991 dirty++;
4992 error = 1;
4993 if (!priv->hwts_rx_en)
4994 priv->dev->stats.rx_errors++;
4995 }
4996
4997 if (unlikely(error && (status & rx_not_ls)))
4998 goto read_again;
4999 if (unlikely(error)) {
5000 count++;
5001 continue;
5002 }
5003
bba2556e
OBL
5004 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5005 if (likely(status & rx_not_ls)) {
5006 xsk_buff_free(buf->xdp);
5007 buf->xdp = NULL;
5008 dirty++;
5009 count++;
5010 goto read_again;
5011 }
5012
5013 /* XDP ZC Frame only support primary buffers for now */
5014 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5015 len += buf1_len;
5016
5017 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5018 * Type frames (LLC/LLC-SNAP)
5019 *
5020 * llc_snap is never checked in GMAC >= 4, so this ACS
5021 * feature is always disabled and packets need to be
5022 * stripped manually.
5023 */
5024 if (likely(!(status & rx_not_ls)) &&
5025 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5026 unlikely(status != llc_snap))) {
5027 buf1_len -= ETH_FCS_LEN;
5028 len -= ETH_FCS_LEN;
5029 }
5030
5031 /* RX buffer is good and fit into a XSK pool buffer */
5032 buf->xdp->data_end = buf->xdp->data + buf1_len;
5033 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5034
bba2556e
OBL
5035 prog = READ_ONCE(priv->xdp_prog);
5036 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
bba2556e
OBL
5037
5038 switch (res) {
5039 case STMMAC_XDP_PASS:
5040 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5041 xsk_buff_free(buf->xdp);
5042 break;
5043 case STMMAC_XDP_CONSUMED:
5044 xsk_buff_free(buf->xdp);
5045 priv->dev->stats.rx_dropped++;
5046 break;
5047 case STMMAC_XDP_TX:
5048 case STMMAC_XDP_REDIRECT:
5049 xdp_status |= res;
5050 break;
5051 }
5052
5053 buf->xdp = NULL;
5054 dirty++;
5055 count++;
5056 }
5057
5058 if (status & rx_not_ls) {
5059 rx_q->state_saved = true;
5060 rx_q->state.error = error;
5061 rx_q->state.len = len;
5062 }
5063
5064 stmmac_finalize_xdp_rx(priv, xdp_status);
5065
68e9c5de
VA
5066 priv->xstats.rx_pkt_n += count;
5067 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
5068
bba2556e
OBL
5069 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5070 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5071 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5072 else
5073 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5074
5075 return (int)count;
5076 }
5077
5078 return failure ? limit : (int)count;
5079}
5080
32ceabca 5081/**
732fdf0e 5082 * stmmac_rx - manage the receive process
32ceabca 5083 * @priv: driver private structure
54139cf3
JP
5084 * @limit: napi bugget
5085 * @queue: RX queue index.
32ceabca
GC
5086 * Description : this the function called by the napi poll method.
5087 * It gets all the frames inside the ring.
5088 */
54139cf3 5089static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
47dd7a54 5090{
54139cf3 5091 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
8fce3331 5092 struct stmmac_channel *ch = &priv->channel[queue];
ec222003
JA
5093 unsigned int count = 0, error = 0, len = 0;
5094 int status = 0, coe = priv->hw->rx_csum;
07b39753 5095 unsigned int next_entry = rx_q->cur_rx;
5fabb012 5096 enum dma_data_direction dma_dir;
bfaf91ca 5097 unsigned int desc_size;
ec222003 5098 struct sk_buff *skb = NULL;
5fabb012 5099 struct xdp_buff xdp;
be8b38a7 5100 int xdp_status = 0;
5fabb012
OBL
5101 int buf_sz;
5102
5103 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5104 buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
47dd7a54 5105
83d7af64 5106 if (netif_msg_rx_status(priv)) {
d0225e7d
AT
5107 void *rx_head;
5108
38ddc59d 5109 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
bfaf91ca 5110 if (priv->extend_desc) {
54139cf3 5111 rx_head = (void *)rx_q->dma_erx;
bfaf91ca
JZ
5112 desc_size = sizeof(struct dma_extended_desc);
5113 } else {
54139cf3 5114 rx_head = (void *)rx_q->dma_rx;
bfaf91ca
JZ
5115 desc_size = sizeof(struct dma_desc);
5116 }
d0225e7d 5117
bfaf91ca
JZ
5118 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
5119 rx_q->dma_rx_phy, desc_size);
47dd7a54 5120 }
c24602ef 5121 while (count < limit) {
88ebe2cf 5122 unsigned int buf1_len = 0, buf2_len = 0;
ec222003 5123 enum pkt_hash_types hash_type;
2af6106a
JA
5124 struct stmmac_rx_buffer *buf;
5125 struct dma_desc *np, *p;
ec222003
JA
5126 int entry;
5127 u32 hash;
47dd7a54 5128
ec222003
JA
5129 if (!count && rx_q->state_saved) {
5130 skb = rx_q->state.skb;
5131 error = rx_q->state.error;
5132 len = rx_q->state.len;
5133 } else {
5134 rx_q->state_saved = false;
5135 skb = NULL;
5136 error = 0;
5137 len = 0;
5138 }
5139
5140 if (count >= limit)
5141 break;
5142
5143read_again:
88ebe2cf
JA
5144 buf1_len = 0;
5145 buf2_len = 0;
07b39753 5146 entry = next_entry;
2af6106a 5147 buf = &rx_q->buf_pool[entry];
07b39753 5148
c24602ef 5149 if (priv->extend_desc)
54139cf3 5150 p = (struct dma_desc *)(rx_q->dma_erx + entry);
c24602ef 5151 else
54139cf3 5152 p = rx_q->dma_rx + entry;
c24602ef 5153
c1fa3212 5154 /* read the status of the incoming frame */
42de047d
JA
5155 status = stmmac_rx_status(priv, &priv->dev->stats,
5156 &priv->xstats, p);
c1fa3212
FG
5157 /* check if managed by the DMA otherwise go ahead */
5158 if (unlikely(status & dma_own))
47dd7a54
GC
5159 break;
5160
aa042f60
SYS
5161 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5162 priv->dma_rx_size);
54139cf3 5163 next_entry = rx_q->cur_rx;
e3ad57c9 5164
c24602ef 5165 if (priv->extend_desc)
54139cf3 5166 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
c24602ef 5167 else
54139cf3 5168 np = rx_q->dma_rx + next_entry;
ba1ffd74
GC
5169
5170 prefetch(np);
47dd7a54 5171
42de047d
JA
5172 if (priv->extend_desc)
5173 stmmac_rx_extended_status(priv, &priv->dev->stats,
5174 &priv->xstats, rx_q->dma_erx + entry);
891434b1 5175 if (unlikely(status == discard_frame)) {
2af6106a 5176 page_pool_recycle_direct(rx_q->page_pool, buf->page);
2af6106a 5177 buf->page = NULL;
ec222003 5178 error = 1;
0b273ca4
JA
5179 if (!priv->hwts_rx_en)
5180 priv->dev->stats.rx_errors++;
ec222003
JA
5181 }
5182
5183 if (unlikely(error && (status & rx_not_ls)))
5184 goto read_again;
5185 if (unlikely(error)) {
399e06a5 5186 dev_kfree_skb(skb);
88ebe2cf 5187 skb = NULL;
cda4985a 5188 count++;
ec222003
JA
5189 continue;
5190 }
5191
5192 /* Buffer is good. Go on. */
5193
4744bf07 5194 prefetch(page_address(buf->page) + buf->page_offset);
88ebe2cf
JA
5195 if (buf->sec_page)
5196 prefetch(page_address(buf->sec_page));
5197
5198 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5199 len += buf1_len;
5200 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5201 len += buf2_len;
5202
5203 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5204 * Type frames (LLC/LLC-SNAP)
5205 *
5206 * llc_snap is never checked in GMAC >= 4, so this ACS
5207 * feature is always disabled and packets need to be
5208 * stripped manually.
5209 */
93b5dce4
JA
5210 if (likely(!(status & rx_not_ls)) &&
5211 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
5212 unlikely(status != llc_snap))) {
88ebe2cf
JA
5213 if (buf2_len)
5214 buf2_len -= ETH_FCS_LEN;
5215 else
5216 buf1_len -= ETH_FCS_LEN;
5217
5218 len -= ETH_FCS_LEN;
ec222003 5219 }
22ad3838 5220
ec222003 5221 if (!skb) {
be8b38a7
OBL
5222 unsigned int pre_len, sync_len;
5223
5fabb012
OBL
5224 dma_sync_single_for_cpu(priv->device, buf->addr,
5225 buf1_len, dma_dir);
5226
d172268f
MC
5227 xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
5228 xdp_prepare_buff(&xdp, page_address(buf->page),
5229 buf->page_offset, buf1_len, false);
5fabb012 5230
be8b38a7
OBL
5231 pre_len = xdp.data_end - xdp.data_hard_start -
5232 buf->page_offset;
5fabb012 5233 skb = stmmac_xdp_run_prog(priv, &xdp);
be8b38a7
OBL
5234 /* Due xdp_adjust_tail: DMA sync for_device
5235 * cover max len CPU touch
5236 */
5237 sync_len = xdp.data_end - xdp.data_hard_start -
5238 buf->page_offset;
5239 sync_len = max(sync_len, pre_len);
5fabb012
OBL
5240
5241 /* For Not XDP_PASS verdict */
5242 if (IS_ERR(skb)) {
5243 unsigned int xdp_res = -PTR_ERR(skb);
5244
5245 if (xdp_res & STMMAC_XDP_CONSUMED) {
be8b38a7
OBL
5246 page_pool_put_page(rx_q->page_pool,
5247 virt_to_head_page(xdp.data),
5248 sync_len, true);
5fabb012
OBL
5249 buf->page = NULL;
5250 priv->dev->stats.rx_dropped++;
5251
5252 /* Clear skb as it was set as
5253 * status by XDP program.
5254 */
5255 skb = NULL;
5256
5257 if (unlikely((status & rx_not_ls)))
5258 goto read_again;
5259
5260 count++;
5261 continue;
8b278a5b
OBL
5262 } else if (xdp_res & (STMMAC_XDP_TX |
5263 STMMAC_XDP_REDIRECT)) {
be8b38a7
OBL
5264 xdp_status |= xdp_res;
5265 buf->page = NULL;
5266 skb = NULL;
5267 count++;
5268 continue;
5fabb012
OBL
5269 }
5270 }
5271 }
5272
5273 if (!skb) {
5274 /* XDP program may expand or reduce tail */
5275 buf1_len = xdp.data_end - xdp.data;
5276
88ebe2cf 5277 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
ec222003 5278 if (!skb) {
2af6106a 5279 priv->dev->stats.rx_dropped++;
cda4985a 5280 count++;
88ebe2cf 5281 goto drain_data;
47dd7a54 5282 }
47dd7a54 5283
5fabb012
OBL
5284 /* XDP program may adjust header */
5285 skb_copy_to_linear_data(skb, xdp.data, buf1_len);
88ebe2cf 5286 skb_put(skb, buf1_len);
2af6106a 5287
ec222003
JA
5288 /* Data payload copied into SKB, page ready for recycle */
5289 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5290 buf->page = NULL;
88ebe2cf 5291 } else if (buf1_len) {
ec222003 5292 dma_sync_single_for_cpu(priv->device, buf->addr,
5fabb012 5293 buf1_len, dma_dir);
ec222003 5294 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5fabb012 5295 buf->page, buf->page_offset, buf1_len,
ec222003 5296 priv->dma_buf_sz);
b9381985 5297
ec222003
JA
5298 /* Data payload appended into SKB */
5299 page_pool_release_page(rx_q->page_pool, buf->page);
5300 buf->page = NULL;
5301 }
47dd7a54 5302
88ebe2cf 5303 if (buf2_len) {
67afd6d1 5304 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5fabb012 5305 buf2_len, dma_dir);
67afd6d1 5306 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
88ebe2cf 5307 buf->sec_page, 0, buf2_len,
67afd6d1
JA
5308 priv->dma_buf_sz);
5309
67afd6d1
JA
5310 /* Data payload appended into SKB */
5311 page_pool_release_page(rx_q->page_pool, buf->sec_page);
5312 buf->sec_page = NULL;
5313 }
5314
88ebe2cf 5315drain_data:
ec222003
JA
5316 if (likely(status & rx_not_ls))
5317 goto read_again;
88ebe2cf
JA
5318 if (!skb)
5319 continue;
62a2ab93 5320
ec222003 5321 /* Got entire packet into SKB. Finish it. */
76067459 5322
ec222003
JA
5323 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5324 stmmac_rx_vlan(priv->dev, skb);
5325 skb->protocol = eth_type_trans(skb, priv->dev);
47dd7a54 5326
ec222003
JA
5327 if (unlikely(!coe))
5328 skb_checksum_none_assert(skb);
5329 else
5330 skb->ip_summed = CHECKSUM_UNNECESSARY;
2af6106a 5331
ec222003
JA
5332 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5333 skb_set_hash(skb, hash, hash_type);
5334
5335 skb_record_rx_queue(skb, queue);
5336 napi_gro_receive(&ch->rx_napi, skb);
88ebe2cf 5337 skb = NULL;
ec222003
JA
5338
5339 priv->dev->stats.rx_packets++;
5340 priv->dev->stats.rx_bytes += len;
cda4985a 5341 count++;
ec222003
JA
5342 }
5343
88ebe2cf 5344 if (status & rx_not_ls || skb) {
ec222003
JA
5345 rx_q->state_saved = true;
5346 rx_q->state.skb = skb;
5347 rx_q->state.error = error;
5348 rx_q->state.len = len;
47dd7a54
GC
5349 }
5350
be8b38a7
OBL
5351 stmmac_finalize_xdp_rx(priv, xdp_status);
5352
54139cf3 5353 stmmac_rx_refill(priv, queue);
47dd7a54
GC
5354
5355 priv->xstats.rx_pkt_n += count;
68e9c5de 5356 priv->xstats.rxq_stats[queue].rx_pkt_n += count;
47dd7a54
GC
5357
5358 return count;
5359}
5360
4ccb4585 5361static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
47dd7a54 5362{
8fce3331 5363 struct stmmac_channel *ch =
4ccb4585 5364 container_of(napi, struct stmmac_channel, rx_napi);
8fce3331 5365 struct stmmac_priv *priv = ch->priv_data;
8fce3331 5366 u32 chan = ch->index;
4ccb4585 5367 int work_done;
47dd7a54 5368
9125cdd1 5369 priv->xstats.napi_poll++;
ce736788 5370
132c32ee 5371 work_done = stmmac_rx(priv, budget, chan);
021bd5e3
JA
5372 if (work_done < budget && napi_complete_done(napi, work_done)) {
5373 unsigned long flags;
5374
5375 spin_lock_irqsave(&ch->lock, flags);
5376 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5377 spin_unlock_irqrestore(&ch->lock, flags);
5378 }
5379
4ccb4585
JA
5380 return work_done;
5381}
ce736788 5382
4ccb4585
JA
5383static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5384{
5385 struct stmmac_channel *ch =
5386 container_of(napi, struct stmmac_channel, tx_napi);
5387 struct stmmac_priv *priv = ch->priv_data;
4ccb4585
JA
5388 u32 chan = ch->index;
5389 int work_done;
8fce3331 5390
4ccb4585
JA
5391 priv->xstats.napi_poll++;
5392
132c32ee 5393 work_done = stmmac_tx_clean(priv, budget, chan);
4ccb4585 5394 work_done = min(work_done, budget);
8fce3331 5395
021bd5e3
JA
5396 if (work_done < budget && napi_complete_done(napi, work_done)) {
5397 unsigned long flags;
4ccb4585 5398
021bd5e3
JA
5399 spin_lock_irqsave(&ch->lock, flags);
5400 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5401 spin_unlock_irqrestore(&ch->lock, flags);
fa0be0a4 5402 }
8fce3331 5403
47dd7a54
GC
5404 return work_done;
5405}
5406
132c32ee
OBL
5407static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5408{
5409 struct stmmac_channel *ch =
5410 container_of(napi, struct stmmac_channel, rxtx_napi);
5411 struct stmmac_priv *priv = ch->priv_data;
81d0885d 5412 int rx_done, tx_done, rxtx_done;
132c32ee
OBL
5413 u32 chan = ch->index;
5414
5415 priv->xstats.napi_poll++;
5416
5417 tx_done = stmmac_tx_clean(priv, budget, chan);
5418 tx_done = min(tx_done, budget);
5419
5420 rx_done = stmmac_rx_zc(priv, budget, chan);
5421
81d0885d
SYS
5422 rxtx_done = max(tx_done, rx_done);
5423
132c32ee
OBL
5424 /* If either TX or RX work is not complete, return budget
5425 * and keep pooling
5426 */
81d0885d 5427 if (rxtx_done >= budget)
132c32ee
OBL
5428 return budget;
5429
5430 /* all work done, exit the polling mode */
81d0885d 5431 if (napi_complete_done(napi, rxtx_done)) {
132c32ee
OBL
5432 unsigned long flags;
5433
5434 spin_lock_irqsave(&ch->lock, flags);
5435 /* Both RX and TX work done are compelte,
5436 * so enable both RX & TX IRQs.
5437 */
5438 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5439 spin_unlock_irqrestore(&ch->lock, flags);
5440 }
5441
81d0885d 5442 return min(rxtx_done, budget - 1);
132c32ee
OBL
5443}
5444
47dd7a54
GC
5445/**
5446 * stmmac_tx_timeout
5447 * @dev : Pointer to net device structure
d0ea5cbd 5448 * @txqueue: the index of the hanging transmit queue
47dd7a54 5449 * Description: this function is called when a packet transmission fails to
7284a3f1 5450 * complete within a reasonable time. The driver will mark the error in the
47dd7a54
GC
5451 * netdev structure and arrange for the device to be reset to a sane state
5452 * in order to transmit a new packet.
5453 */
0290bd29 5454static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
47dd7a54
GC
5455{
5456 struct stmmac_priv *priv = netdev_priv(dev);
5457
34877a15 5458 stmmac_global_err(priv);
47dd7a54
GC
5459}
5460
47dd7a54 5461/**
01789349 5462 * stmmac_set_rx_mode - entry point for multicast addressing
47dd7a54
GC
5463 * @dev : pointer to the device structure
5464 * Description:
5465 * This function is a driver entry point which gets called by the kernel
5466 * whenever multicast addresses must be enabled/disabled.
5467 * Return value:
5468 * void.
5469 */
01789349 5470static void stmmac_set_rx_mode(struct net_device *dev)
47dd7a54
GC
5471{
5472 struct stmmac_priv *priv = netdev_priv(dev);
5473
c10d4c82 5474 stmmac_set_filter(priv, priv->hw, dev);
47dd7a54
GC
5475}
5476
5477/**
5478 * stmmac_change_mtu - entry point to change MTU size for the device.
5479 * @dev : device pointer.
5480 * @new_mtu : the new MTU size for the device.
5481 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5482 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5483 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5484 * Return value:
5485 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5486 * file on failure.
5487 */
5488static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5489{
38ddc59d 5490 struct stmmac_priv *priv = netdev_priv(dev);
eaf4fac4 5491 int txfifosz = priv->plat->tx_fifo_size;
5b55299e 5492 const int mtu = new_mtu;
eaf4fac4
JA
5493
5494 if (txfifosz == 0)
5495 txfifosz = priv->dma_cap.tx_fifo_size;
5496
5497 txfifosz /= priv->plat->tx_queues_to_use;
38ddc59d 5498
47dd7a54 5499 if (netif_running(dev)) {
38ddc59d 5500 netdev_err(priv->dev, "must be stopped to change its MTU\n");
47dd7a54
GC
5501 return -EBUSY;
5502 }
5503
5fabb012
OBL
5504 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5505 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5506 return -EINVAL;
5507 }
5508
eaf4fac4
JA
5509 new_mtu = STMMAC_ALIGN(new_mtu);
5510
5511 /* If condition true, FIFO is too small or MTU too large */
5512 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5513 return -EINVAL;
5514
5b55299e 5515 dev->mtu = mtu;
f748be53 5516
5e982f3b
MM
5517 netdev_update_features(dev);
5518
5519 return 0;
5520}
5521
c8f44aff 5522static netdev_features_t stmmac_fix_features(struct net_device *dev,
ceb69499 5523 netdev_features_t features)
5e982f3b
MM
5524{
5525 struct stmmac_priv *priv = netdev_priv(dev);
5526
38912bdb 5527 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5e982f3b 5528 features &= ~NETIF_F_RXCSUM;
d2afb5bd 5529
5e982f3b 5530 if (!priv->plat->tx_coe)
a188222b 5531 features &= ~NETIF_F_CSUM_MASK;
5e982f3b 5532
ebbb293f
GC
5533 /* Some GMAC devices have a bugged Jumbo frame support that
5534 * needs to have the Tx COE disabled for oversized frames
5535 * (due to limited buffer sizes). In this case we disable
8d45e42b 5536 * the TX csum insertion in the TDES and not use SF.
ceb69499 5537 */
5e982f3b 5538 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
a188222b 5539 features &= ~NETIF_F_CSUM_MASK;
ebbb293f 5540
f748be53
AT
5541 /* Disable tso if asked by ethtool */
5542 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
5543 if (features & NETIF_F_TSO)
5544 priv->tso = true;
5545 else
5546 priv->tso = false;
5547 }
5548
5e982f3b 5549 return features;
47dd7a54
GC
5550}
5551
d2afb5bd
GC
5552static int stmmac_set_features(struct net_device *netdev,
5553 netdev_features_t features)
5554{
5555 struct stmmac_priv *priv = netdev_priv(netdev);
5556
5557 /* Keep the COE Type in case of csum is supporting */
5558 if (features & NETIF_F_RXCSUM)
5559 priv->hw->rx_csum = priv->plat->rx_coe;
5560 else
5561 priv->hw->rx_csum = 0;
5562 /* No check needed because rx_coe has been set before and it will be
5563 * fixed in case of issue.
5564 */
c10d4c82 5565 stmmac_rx_ipc(priv, priv->hw);
d2afb5bd 5566
e0edaa2e
VW
5567 if (priv->sph_cap) {
5568 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5569 u32 chan;
5fabb012 5570
e0edaa2e
VW
5571 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5572 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5573 }
67afd6d1 5574
d2afb5bd
GC
5575 return 0;
5576}
5577
5a558611
OBL
5578static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5579{
5580 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5581 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5582 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5583 bool *hs_enable = &fpe_cfg->hs_enable;
5584
5585 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5586 return;
5587
5588 /* If LP has sent verify mPacket, LP is FPE capable */
5589 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5590 if (*lp_state < FPE_STATE_CAPABLE)
5591 *lp_state = FPE_STATE_CAPABLE;
5592
5593 /* If user has requested FPE enable, quickly response */
5594 if (*hs_enable)
5595 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5596 MPACKET_RESPONSE);
5597 }
5598
5599 /* If Local has sent verify mPacket, Local is FPE capable */
5600 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5601 if (*lo_state < FPE_STATE_CAPABLE)
5602 *lo_state = FPE_STATE_CAPABLE;
5603 }
5604
5605 /* If LP has sent response mPacket, LP is entering FPE ON */
5606 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5607 *lp_state = FPE_STATE_ENTERING_ON;
5608
5609 /* If Local has sent response mPacket, Local is entering FPE ON */
5610 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5611 *lo_state = FPE_STATE_ENTERING_ON;
5612
5613 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5614 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5615 priv->fpe_wq) {
5616 queue_work(priv->fpe_wq, &priv->fpe_task);
5617 }
5618}
5619
29e6573c 5620static void stmmac_common_interrupt(struct stmmac_priv *priv)
47dd7a54 5621{
7bac4e1e
JP
5622 u32 rx_cnt = priv->plat->rx_queues_to_use;
5623 u32 tx_cnt = priv->plat->tx_queues_to_use;
5624 u32 queues_count;
5625 u32 queue;
7d9e6c5a 5626 bool xmac;
7bac4e1e 5627
7d9e6c5a 5628 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
7bac4e1e 5629 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
47dd7a54 5630
89f7f2cf
SK
5631 if (priv->irq_wake)
5632 pm_wakeup_event(priv->device, 0);
5633
e49aa315 5634 if (priv->dma_cap.estsel)
9f298959
OBL
5635 stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
5636 &priv->xstats, tx_cnt);
e49aa315 5637
5a558611
OBL
5638 if (priv->dma_cap.fpesel) {
5639 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5640 priv->dev);
5641
5642 stmmac_fpe_event_status(priv, status);
5643 }
5644
d765955d 5645 /* To handle GMAC own interrupts */
7d9e6c5a 5646 if ((priv->plat->has_gmac) || xmac) {
c10d4c82 5647 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
8f71a88d 5648
d765955d 5649 if (unlikely(status)) {
d765955d 5650 /* For LPI we need to save the tx status */
0982a0f6 5651 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
d765955d 5652 priv->tx_path_in_lpi_mode = true;
0982a0f6 5653 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
d765955d 5654 priv->tx_path_in_lpi_mode = false;
7bac4e1e
JP
5655 }
5656
61fac60a 5657 for (queue = 0; queue < queues_count; queue++) {
8a7cb245
YV
5658 status = stmmac_host_mtl_irq_status(priv, priv->hw,
5659 queue);
d765955d 5660 }
70523e63
GC
5661
5662 /* PCS link status */
3fe5cadb 5663 if (priv->hw->pcs) {
70523e63 5664 if (priv->xstats.pcs_link)
29e6573c 5665 netif_carrier_on(priv->dev);
70523e63 5666 else
29e6573c 5667 netif_carrier_off(priv->dev);
70523e63 5668 }
f4da5652
TTM
5669
5670 stmmac_timestamp_interrupt(priv, priv);
d765955d 5671 }
29e6573c
OBL
5672}
5673
5674/**
5675 * stmmac_interrupt - main ISR
5676 * @irq: interrupt number.
5677 * @dev_id: to pass the net device pointer.
5678 * Description: this is the main driver interrupt service routine.
5679 * It can call:
5680 * o DMA service routine (to manage incoming frame reception and transmission
5681 * status)
5682 * o Core interrupts to manage: remote wake-up, management counter, LPI
5683 * interrupts.
5684 */
5685static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
5686{
5687 struct net_device *dev = (struct net_device *)dev_id;
5688 struct stmmac_priv *priv = netdev_priv(dev);
5689
5690 /* Check if adapter is up */
5691 if (test_bit(STMMAC_DOWN, &priv->state))
5692 return IRQ_HANDLED;
5693
5694 /* Check if a fatal error happened */
5695 if (stmmac_safety_feat_interrupt(priv))
5696 return IRQ_HANDLED;
5697
5698 /* To handle Common interrupts */
5699 stmmac_common_interrupt(priv);
aec7ff27 5700
d765955d 5701 /* To handle DMA interrupts */
aec7ff27 5702 stmmac_dma_interrupt(priv);
47dd7a54
GC
5703
5704 return IRQ_HANDLED;
5705}
5706
8532f613
OBL
5707static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
5708{
5709 struct net_device *dev = (struct net_device *)dev_id;
5710 struct stmmac_priv *priv = netdev_priv(dev);
5711
5712 if (unlikely(!dev)) {
5713 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5714 return IRQ_NONE;
5715 }
5716
5717 /* Check if adapter is up */
5718 if (test_bit(STMMAC_DOWN, &priv->state))
5719 return IRQ_HANDLED;
5720
5721 /* To handle Common interrupts */
5722 stmmac_common_interrupt(priv);
5723
5724 return IRQ_HANDLED;
5725}
5726
5727static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
5728{
5729 struct net_device *dev = (struct net_device *)dev_id;
5730 struct stmmac_priv *priv = netdev_priv(dev);
5731
5732 if (unlikely(!dev)) {
5733 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5734 return IRQ_NONE;
5735 }
5736
5737 /* Check if adapter is up */
5738 if (test_bit(STMMAC_DOWN, &priv->state))
5739 return IRQ_HANDLED;
5740
5741 /* Check if a fatal error happened */
5742 stmmac_safety_feat_interrupt(priv);
5743
5744 return IRQ_HANDLED;
5745}
5746
5747static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
5748{
5749 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
5750 int chan = tx_q->queue_index;
5751 struct stmmac_priv *priv;
5752 int status;
5753
5754 priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
5755
5756 if (unlikely(!data)) {
5757 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5758 return IRQ_NONE;
5759 }
5760
5761 /* Check if adapter is up */
5762 if (test_bit(STMMAC_DOWN, &priv->state))
5763 return IRQ_HANDLED;
5764
5765 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
5766
5767 if (unlikely(status & tx_hard_error_bump_tc)) {
5768 /* Try to bump up the dma threshold on this failure */
5769 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
5770 tc <= 256) {
5771 tc += 64;
5772 if (priv->plat->force_thresh_dma_mode)
5773 stmmac_set_dma_operation_mode(priv,
5774 tc,
5775 tc,
5776 chan);
5777 else
5778 stmmac_set_dma_operation_mode(priv,
5779 tc,
5780 SF_DMA_MODE,
5781 chan);
5782 priv->xstats.threshold = tc;
5783 }
5784 } else if (unlikely(status == tx_hard_error)) {
5785 stmmac_tx_err(priv, chan);
5786 }
5787
5788 return IRQ_HANDLED;
5789}
5790
5791static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
5792{
5793 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
5794 int chan = rx_q->queue_index;
5795 struct stmmac_priv *priv;
5796
5797 priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
5798
5799 if (unlikely(!data)) {
5800 netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
5801 return IRQ_NONE;
5802 }
5803
5804 /* Check if adapter is up */
5805 if (test_bit(STMMAC_DOWN, &priv->state))
5806 return IRQ_HANDLED;
5807
5808 stmmac_napi_check(priv, chan, DMA_DIR_RX);
5809
5810 return IRQ_HANDLED;
5811}
5812
47dd7a54
GC
5813#ifdef CONFIG_NET_POLL_CONTROLLER
5814/* Polling receive - used by NETCONSOLE and other diagnostic tools
ceb69499
GC
5815 * to allow network I/O with interrupts disabled.
5816 */
47dd7a54
GC
5817static void stmmac_poll_controller(struct net_device *dev)
5818{
8532f613
OBL
5819 struct stmmac_priv *priv = netdev_priv(dev);
5820 int i;
5821
5822 /* If adapter is down, do nothing */
5823 if (test_bit(STMMAC_DOWN, &priv->state))
5824 return;
5825
5826 if (priv->plat->multi_msi_en) {
5827 for (i = 0; i < priv->plat->rx_queues_to_use; i++)
5828 stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
5829
5830 for (i = 0; i < priv->plat->tx_queues_to_use; i++)
5831 stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
5832 } else {
5833 disable_irq(dev->irq);
5834 stmmac_interrupt(dev->irq, dev);
5835 enable_irq(dev->irq);
5836 }
47dd7a54
GC
5837}
5838#endif
5839
5840/**
5841 * stmmac_ioctl - Entry point for the Ioctl
5842 * @dev: Device pointer.
5843 * @rq: An IOCTL specefic structure, that can contain a pointer to
5844 * a proprietary structure used to pass information to the driver.
5845 * @cmd: IOCTL command
5846 * Description:
32ceabca 5847 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
47dd7a54
GC
5848 */
5849static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5850{
74371272 5851 struct stmmac_priv *priv = netdev_priv (dev);
891434b1 5852 int ret = -EOPNOTSUPP;
47dd7a54
GC
5853
5854 if (!netif_running(dev))
5855 return -EINVAL;
5856
891434b1
RK
5857 switch (cmd) {
5858 case SIOCGMIIPHY:
5859 case SIOCGMIIREG:
5860 case SIOCSMIIREG:
74371272 5861 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
891434b1
RK
5862 break;
5863 case SIOCSHWTSTAMP:
d6228b7c
AP
5864 ret = stmmac_hwtstamp_set(dev, rq);
5865 break;
5866 case SIOCGHWTSTAMP:
5867 ret = stmmac_hwtstamp_get(dev, rq);
891434b1
RK
5868 break;
5869 default:
5870 break;
5871 }
28b04113 5872
47dd7a54
GC
5873 return ret;
5874}
5875
4dbbe8dd
JA
5876static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5877 void *cb_priv)
5878{
5879 struct stmmac_priv *priv = cb_priv;
5880 int ret = -EOPNOTSUPP;
5881
425eabdd
JA
5882 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
5883 return ret;
5884
bba2556e 5885 __stmmac_disable_all_queues(priv);
4dbbe8dd
JA
5886
5887 switch (type) {
5888 case TC_SETUP_CLSU32:
425eabdd
JA
5889 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
5890 break;
5891 case TC_SETUP_CLSFLOWER:
5892 ret = stmmac_tc_setup_cls(priv, priv, type_data);
4dbbe8dd
JA
5893 break;
5894 default:
5895 break;
5896 }
5897
5898 stmmac_enable_all_queues(priv);
5899 return ret;
5900}
5901
955bcb6e
PNA
5902static LIST_HEAD(stmmac_block_cb_list);
5903
4dbbe8dd
JA
5904static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
5905 void *type_data)
5906{
5907 struct stmmac_priv *priv = netdev_priv(ndev);
5908
5909 switch (type) {
5910 case TC_SETUP_BLOCK:
955bcb6e
PNA
5911 return flow_block_cb_setup_simple(type_data,
5912 &stmmac_block_cb_list,
4e95bc26
PNA
5913 stmmac_setup_tc_block_cb,
5914 priv, priv, true);
1f705bc6
JA
5915 case TC_SETUP_QDISC_CBS:
5916 return stmmac_tc_setup_cbs(priv, priv, type_data);
b60189e0
JA
5917 case TC_SETUP_QDISC_TAPRIO:
5918 return stmmac_tc_setup_taprio(priv, priv, type_data);
430b383c
JA
5919 case TC_SETUP_QDISC_ETF:
5920 return stmmac_tc_setup_etf(priv, priv, type_data);
4dbbe8dd
JA
5921 default:
5922 return -EOPNOTSUPP;
5923 }
5924}
5925
4993e5b3
JA
5926static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
5927 struct net_device *sb_dev)
5928{
b7766206
JA
5929 int gso = skb_shinfo(skb)->gso_type;
5930
5931 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
4993e5b3 5932 /*
b7766206 5933 * There is no way to determine the number of TSO/USO
4993e5b3 5934 * capable Queues. Let's use always the Queue 0
b7766206 5935 * because if TSO/USO is supported then at least this
4993e5b3
JA
5936 * one will be capable.
5937 */
5938 return 0;
5939 }
5940
5941 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
5942}
5943
a830405e
BV
5944static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
5945{
5946 struct stmmac_priv *priv = netdev_priv(ndev);
5947 int ret = 0;
5948
4691ffb1
JZ
5949 ret = pm_runtime_get_sync(priv->device);
5950 if (ret < 0) {
5951 pm_runtime_put_noidle(priv->device);
5952 return ret;
5953 }
5954
a830405e
BV
5955 ret = eth_mac_addr(ndev, addr);
5956 if (ret)
4691ffb1 5957 goto set_mac_error;
a830405e 5958
c10d4c82 5959 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
a830405e 5960
4691ffb1
JZ
5961set_mac_error:
5962 pm_runtime_put(priv->device);
5963
a830405e
BV
5964 return ret;
5965}
5966
50fb4f74 5967#ifdef CONFIG_DEBUG_FS
7ac29055 5968static struct dentry *stmmac_fs_dir;
7ac29055 5969
c24602ef 5970static void sysfs_display_ring(void *head, int size, int extend_desc,
bfaf91ca 5971 struct seq_file *seq, dma_addr_t dma_phy_addr)
7ac29055 5972{
7ac29055 5973 int i;
ceb69499
GC
5974 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
5975 struct dma_desc *p = (struct dma_desc *)head;
bfaf91ca 5976 dma_addr_t dma_addr;
7ac29055 5977
c24602ef 5978 for (i = 0; i < size; i++) {
c24602ef 5979 if (extend_desc) {
bfaf91ca
JZ
5980 dma_addr = dma_phy_addr + i * sizeof(*ep);
5981 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5982 i, &dma_addr,
f8be0d78
MW
5983 le32_to_cpu(ep->basic.des0),
5984 le32_to_cpu(ep->basic.des1),
5985 le32_to_cpu(ep->basic.des2),
5986 le32_to_cpu(ep->basic.des3));
c24602ef
GC
5987 ep++;
5988 } else {
bfaf91ca
JZ
5989 dma_addr = dma_phy_addr + i * sizeof(*p);
5990 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5991 i, &dma_addr,
f8be0d78
MW
5992 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5993 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
c24602ef
GC
5994 p++;
5995 }
7ac29055
GC
5996 seq_printf(seq, "\n");
5997 }
c24602ef 5998}
7ac29055 5999
fb0d9c63 6000static int stmmac_rings_status_show(struct seq_file *seq, void *v)
c24602ef
GC
6001{
6002 struct net_device *dev = seq->private;
6003 struct stmmac_priv *priv = netdev_priv(dev);
54139cf3 6004 u32 rx_count = priv->plat->rx_queues_to_use;
ce736788 6005 u32 tx_count = priv->plat->tx_queues_to_use;
54139cf3
JP
6006 u32 queue;
6007
5f2b8b62
TR
6008 if ((dev->flags & IFF_UP) == 0)
6009 return 0;
6010
54139cf3
JP
6011 for (queue = 0; queue < rx_count; queue++) {
6012 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6013
6014 seq_printf(seq, "RX Queue %d:\n", queue);
6015
6016 if (priv->extend_desc) {
6017 seq_printf(seq, "Extended descriptor ring:\n");
6018 sysfs_display_ring((void *)rx_q->dma_erx,
bfaf91ca 6019 priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
54139cf3
JP
6020 } else {
6021 seq_printf(seq, "Descriptor ring:\n");
6022 sysfs_display_ring((void *)rx_q->dma_rx,
bfaf91ca 6023 priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
54139cf3
JP
6024 }
6025 }
aff3d9ef 6026
ce736788
JP
6027 for (queue = 0; queue < tx_count; queue++) {
6028 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6029
6030 seq_printf(seq, "TX Queue %d:\n", queue);
6031
6032 if (priv->extend_desc) {
6033 seq_printf(seq, "Extended descriptor ring:\n");
6034 sysfs_display_ring((void *)tx_q->dma_etx,
bfaf91ca 6035 priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
579a25a8 6036 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
ce736788
JP
6037 seq_printf(seq, "Descriptor ring:\n");
6038 sysfs_display_ring((void *)tx_q->dma_tx,
bfaf91ca 6039 priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
ce736788 6040 }
7ac29055
GC
6041 }
6042
6043 return 0;
6044}
fb0d9c63 6045DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
7ac29055 6046
fb0d9c63 6047static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
e7434821
GC
6048{
6049 struct net_device *dev = seq->private;
6050 struct stmmac_priv *priv = netdev_priv(dev);
6051
19e30c14 6052 if (!priv->hw_cap_support) {
e7434821
GC
6053 seq_printf(seq, "DMA HW features not supported\n");
6054 return 0;
6055 }
6056
6057 seq_printf(seq, "==============================\n");
6058 seq_printf(seq, "\tDMA HW features\n");
6059 seq_printf(seq, "==============================\n");
6060
22d3efe5 6061 seq_printf(seq, "\t10/100 Mbps: %s\n",
e7434821 6062 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
22d3efe5 6063 seq_printf(seq, "\t1000 Mbps: %s\n",
e7434821 6064 (priv->dma_cap.mbps_1000) ? "Y" : "N");
22d3efe5 6065 seq_printf(seq, "\tHalf duplex: %s\n",
e7434821
GC
6066 (priv->dma_cap.half_duplex) ? "Y" : "N");
6067 seq_printf(seq, "\tHash Filter: %s\n",
6068 (priv->dma_cap.hash_filter) ? "Y" : "N");
6069 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6070 (priv->dma_cap.multi_addr) ? "Y" : "N");
8d45e42b 6071 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
e7434821
GC
6072 (priv->dma_cap.pcs) ? "Y" : "N");
6073 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6074 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6075 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6076 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6077 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6078 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6079 seq_printf(seq, "\tRMON module: %s\n",
6080 (priv->dma_cap.rmon) ? "Y" : "N");
6081 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6082 (priv->dma_cap.time_stamp) ? "Y" : "N");
22d3efe5 6083 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
e7434821 6084 (priv->dma_cap.atime_stamp) ? "Y" : "N");
22d3efe5 6085 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
e7434821
GC
6086 (priv->dma_cap.eee) ? "Y" : "N");
6087 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6088 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6089 (priv->dma_cap.tx_coe) ? "Y" : "N");
f748be53
AT
6090 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
6091 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6092 (priv->dma_cap.rx_coe) ? "Y" : "N");
6093 } else {
6094 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6095 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6096 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6097 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6098 }
e7434821
GC
6099 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6100 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6101 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6102 priv->dma_cap.number_rx_channel);
6103 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6104 priv->dma_cap.number_tx_channel);
7d0b447a
JA
6105 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6106 priv->dma_cap.number_rx_queues);
6107 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6108 priv->dma_cap.number_tx_queues);
e7434821
GC
6109 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6110 (priv->dma_cap.enh_desc) ? "Y" : "N");
7d0b447a
JA
6111 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6112 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6113 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
6114 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6115 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6116 priv->dma_cap.pps_out_num);
6117 seq_printf(seq, "\tSafety Features: %s\n",
6118 priv->dma_cap.asp ? "Y" : "N");
6119 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6120 priv->dma_cap.frpsel ? "Y" : "N");
6121 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6122 priv->dma_cap.addr64);
6123 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6124 priv->dma_cap.rssen ? "Y" : "N");
6125 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6126 priv->dma_cap.vlhash ? "Y" : "N");
6127 seq_printf(seq, "\tSplit Header: %s\n",
6128 priv->dma_cap.sphen ? "Y" : "N");
6129 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6130 priv->dma_cap.vlins ? "Y" : "N");
6131 seq_printf(seq, "\tDouble VLAN: %s\n",
6132 priv->dma_cap.dvlan ? "Y" : "N");
6133 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6134 priv->dma_cap.l3l4fnum);
6135 seq_printf(seq, "\tARP Offloading: %s\n",
6136 priv->dma_cap.arpoffsel ? "Y" : "N");
44e65475
JA
6137 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6138 priv->dma_cap.estsel ? "Y" : "N");
6139 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6140 priv->dma_cap.fpesel ? "Y" : "N");
6141 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6142 priv->dma_cap.tbssel ? "Y" : "N");
e7434821
GC
6143 return 0;
6144}
fb0d9c63 6145DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
e7434821 6146
481a7d15
JM
6147/* Use network device events to rename debugfs file entries.
6148 */
6149static int stmmac_device_event(struct notifier_block *unused,
6150 unsigned long event, void *ptr)
6151{
6152 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6153 struct stmmac_priv *priv = netdev_priv(dev);
6154
6155 if (dev->netdev_ops != &stmmac_netdev_ops)
6156 goto done;
6157
6158 switch (event) {
6159 case NETDEV_CHANGENAME:
6160 if (priv->dbgfs_dir)
6161 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6162 priv->dbgfs_dir,
6163 stmmac_fs_dir,
6164 dev->name);
6165 break;
6166 }
6167done:
6168 return NOTIFY_DONE;
6169}
6170
6171static struct notifier_block stmmac_notifier = {
6172 .notifier_call = stmmac_device_event,
6173};
6174
8d72ab11 6175static void stmmac_init_fs(struct net_device *dev)
7ac29055 6176{
466c5ac8
MO
6177 struct stmmac_priv *priv = netdev_priv(dev);
6178
474a31e1
AK
6179 rtnl_lock();
6180
466c5ac8
MO
6181 /* Create per netdev entries */
6182 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
7ac29055 6183
7ac29055 6184 /* Entry to report DMA RX/TX rings */
8d72ab11
GKH
6185 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6186 &stmmac_rings_status_fops);
7ac29055 6187
e7434821 6188 /* Entry to report the DMA HW features */
8d72ab11
GKH
6189 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6190 &stmmac_dma_cap_fops);
481a7d15 6191
474a31e1 6192 rtnl_unlock();
7ac29055
GC
6193}
6194
466c5ac8 6195static void stmmac_exit_fs(struct net_device *dev)
7ac29055 6196{
466c5ac8
MO
6197 struct stmmac_priv *priv = netdev_priv(dev);
6198
6199 debugfs_remove_recursive(priv->dbgfs_dir);
7ac29055 6200}
50fb4f74 6201#endif /* CONFIG_DEBUG_FS */
7ac29055 6202
3cd1cfcb
JA
6203static u32 stmmac_vid_crc32_le(__le16 vid_le)
6204{
6205 unsigned char *data = (unsigned char *)&vid_le;
6206 unsigned char data_byte = 0;
6207 u32 crc = ~0x0;
6208 u32 temp = 0;
6209 int i, bits;
6210
6211 bits = get_bitmask_order(VLAN_VID_MASK);
6212 for (i = 0; i < bits; i++) {
6213 if ((i % 8) == 0)
6214 data_byte = data[i / 8];
6215
6216 temp = ((crc & 1) ^ data_byte) & 1;
6217 crc >>= 1;
6218 data_byte >>= 1;
6219
6220 if (temp)
6221 crc ^= 0xedb88320;
6222 }
6223
6224 return crc;
6225}
6226
6227static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6228{
6229 u32 crc, hash = 0;
a24cae70 6230 __le16 pmatch = 0;
c7ab0b80
JA
6231 int count = 0;
6232 u16 vid = 0;
3cd1cfcb
JA
6233
6234 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6235 __le16 vid_le = cpu_to_le16(vid);
6236 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6237 hash |= (1 << crc);
c7ab0b80
JA
6238 count++;
6239 }
6240
6241 if (!priv->dma_cap.vlhash) {
6242 if (count > 2) /* VID = 0 always passes filter */
6243 return -EOPNOTSUPP;
6244
a24cae70 6245 pmatch = cpu_to_le16(vid);
c7ab0b80 6246 hash = 0;
3cd1cfcb
JA
6247 }
6248
a24cae70 6249 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
3cd1cfcb
JA
6250}
6251
6252static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6253{
6254 struct stmmac_priv *priv = netdev_priv(ndev);
6255 bool is_double = false;
6256 int ret;
6257
3cd1cfcb
JA
6258 if (be16_to_cpu(proto) == ETH_P_8021AD)
6259 is_double = true;
6260
6261 set_bit(vid, priv->active_vlans);
6262 ret = stmmac_vlan_update(priv, is_double);
6263 if (ret) {
6264 clear_bit(vid, priv->active_vlans);
6265 return ret;
6266 }
6267
dd6a4998
JA
6268 if (priv->hw->num_vlan) {
6269 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6270 if (ret)
6271 return ret;
6272 }
ed64639b 6273
dd6a4998 6274 return 0;
3cd1cfcb
JA
6275}
6276
6277static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6278{
6279 struct stmmac_priv *priv = netdev_priv(ndev);
6280 bool is_double = false;
ed64639b 6281 int ret;
3cd1cfcb 6282
b3dcb312
JZ
6283 ret = pm_runtime_get_sync(priv->device);
6284 if (ret < 0) {
6285 pm_runtime_put_noidle(priv->device);
6286 return ret;
6287 }
6288
3cd1cfcb
JA
6289 if (be16_to_cpu(proto) == ETH_P_8021AD)
6290 is_double = true;
6291
6292 clear_bit(vid, priv->active_vlans);
dd6a4998
JA
6293
6294 if (priv->hw->num_vlan) {
6295 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6296 if (ret)
5ec55823 6297 goto del_vlan_error;
dd6a4998 6298 }
ed64639b 6299
5ec55823
JZ
6300 ret = stmmac_vlan_update(priv, is_double);
6301
6302del_vlan_error:
6303 pm_runtime_put(priv->device);
6304
6305 return ret;
3cd1cfcb
JA
6306}
6307
5fabb012
OBL
6308static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6309{
6310 struct stmmac_priv *priv = netdev_priv(dev);
6311
6312 switch (bpf->command) {
6313 case XDP_SETUP_PROG:
6314 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
bba2556e
OBL
6315 case XDP_SETUP_XSK_POOL:
6316 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6317 bpf->xsk.queue_id);
5fabb012
OBL
6318 default:
6319 return -EOPNOTSUPP;
6320 }
6321}
6322
8b278a5b
OBL
6323static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6324 struct xdp_frame **frames, u32 flags)
6325{
6326 struct stmmac_priv *priv = netdev_priv(dev);
6327 int cpu = smp_processor_id();
6328 struct netdev_queue *nq;
6329 int i, nxmit = 0;
6330 int queue;
6331
6332 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6333 return -ENETDOWN;
6334
6335 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6336 return -EINVAL;
6337
6338 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6339 nq = netdev_get_tx_queue(priv->dev, queue);
6340
6341 __netif_tx_lock(nq, cpu);
6342 /* Avoids TX time-out as we are sharing with slow path */
6343 nq->trans_start = jiffies;
6344
6345 for (i = 0; i < num_frames; i++) {
6346 int res;
6347
6348 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6349 if (res == STMMAC_XDP_CONSUMED)
6350 break;
6351
6352 nxmit++;
6353 }
6354
6355 if (flags & XDP_XMIT_FLUSH) {
6356 stmmac_flush_tx_descriptors(priv, queue);
6357 stmmac_tx_timer_arm(priv, queue);
6358 }
6359
6360 __netif_tx_unlock(nq);
6361
6362 return nxmit;
6363}
6364
bba2556e
OBL
6365void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6366{
6367 struct stmmac_channel *ch = &priv->channel[queue];
6368 unsigned long flags;
6369
6370 spin_lock_irqsave(&ch->lock, flags);
6371 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6372 spin_unlock_irqrestore(&ch->lock, flags);
6373
6374 stmmac_stop_rx_dma(priv, queue);
6375 __free_dma_rx_desc_resources(priv, queue);
6376}
6377
6378void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6379{
6380 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
6381 struct stmmac_channel *ch = &priv->channel[queue];
6382 unsigned long flags;
6383 u32 buf_size;
6384 int ret;
6385
6386 ret = __alloc_dma_rx_desc_resources(priv, queue);
6387 if (ret) {
6388 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6389 return;
6390 }
6391
6392 ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
6393 if (ret) {
6394 __free_dma_rx_desc_resources(priv, queue);
6395 netdev_err(priv->dev, "Failed to init RX desc.\n");
6396 return;
6397 }
6398
6399 stmmac_clear_rx_descriptors(priv, queue);
6400
6401 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6402 rx_q->dma_rx_phy, rx_q->queue_index);
6403
6404 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6405 sizeof(struct dma_desc));
6406 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6407 rx_q->rx_tail_addr, rx_q->queue_index);
6408
6409 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6410 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6411 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6412 buf_size,
6413 rx_q->queue_index);
6414 } else {
6415 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6416 priv->dma_buf_sz,
6417 rx_q->queue_index);
6418 }
6419
6420 stmmac_start_rx_dma(priv, queue);
6421
6422 spin_lock_irqsave(&ch->lock, flags);
6423 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6424 spin_unlock_irqrestore(&ch->lock, flags);
6425}
6426
132c32ee
OBL
6427void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6428{
6429 struct stmmac_channel *ch = &priv->channel[queue];
6430 unsigned long flags;
6431
6432 spin_lock_irqsave(&ch->lock, flags);
6433 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6434 spin_unlock_irqrestore(&ch->lock, flags);
6435
6436 stmmac_stop_tx_dma(priv, queue);
6437 __free_dma_tx_desc_resources(priv, queue);
6438}
6439
6440void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6441{
6442 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
6443 struct stmmac_channel *ch = &priv->channel[queue];
6444 unsigned long flags;
6445 int ret;
6446
6447 ret = __alloc_dma_tx_desc_resources(priv, queue);
6448 if (ret) {
6449 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6450 return;
6451 }
6452
6453 ret = __init_dma_tx_desc_rings(priv, queue);
6454 if (ret) {
6455 __free_dma_tx_desc_resources(priv, queue);
6456 netdev_err(priv->dev, "Failed to init TX desc.\n");
6457 return;
6458 }
6459
6460 stmmac_clear_tx_descriptors(priv, queue);
6461
6462 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6463 tx_q->dma_tx_phy, tx_q->queue_index);
6464
6465 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6466 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6467
6468 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6469 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6470 tx_q->tx_tail_addr, tx_q->queue_index);
6471
6472 stmmac_start_tx_dma(priv, queue);
6473
6474 spin_lock_irqsave(&ch->lock, flags);
6475 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6476 spin_unlock_irqrestore(&ch->lock, flags);
6477}
6478
072a95fd
OBL
6479void stmmac_xdp_release(struct net_device *dev)
6480{
6481 struct stmmac_priv *priv = netdev_priv(dev);
6482 u32 chan;
6483
6484 /* Disable NAPI process */
6485 stmmac_disable_all_queues(priv);
6486
6487 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6488 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6489
6490 /* Free the IRQ lines */
6491 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6492
6493 /* Stop TX/RX DMA channels */
6494 stmmac_stop_all_dma(priv);
6495
6496 /* Release and free the Rx/Tx resources */
6497 free_dma_desc_resources(priv);
6498
6499 /* Disable the MAC Rx/Tx */
6500 stmmac_mac_set(priv, priv->ioaddr, false);
6501
6502 /* set trans_start so we don't get spurious
6503 * watchdogs during reset
6504 */
6505 netif_trans_update(dev);
6506 netif_carrier_off(dev);
6507}
6508
6509int stmmac_xdp_open(struct net_device *dev)
6510{
6511 struct stmmac_priv *priv = netdev_priv(dev);
6512 u32 rx_cnt = priv->plat->rx_queues_to_use;
6513 u32 tx_cnt = priv->plat->tx_queues_to_use;
6514 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6515 struct stmmac_rx_queue *rx_q;
6516 struct stmmac_tx_queue *tx_q;
6517 u32 buf_size;
6518 bool sph_en;
6519 u32 chan;
6520 int ret;
6521
6522 ret = alloc_dma_desc_resources(priv);
6523 if (ret < 0) {
6524 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6525 __func__);
6526 goto dma_desc_error;
6527 }
6528
6529 ret = init_dma_desc_rings(dev, GFP_KERNEL);
6530 if (ret < 0) {
6531 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6532 __func__);
6533 goto init_error;
6534 }
6535
6536 /* DMA CSR Channel configuration */
1ea4043a 6537 for (chan = 0; chan < dma_csr_ch; chan++) {
072a95fd 6538 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
1ea4043a
VW
6539 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6540 }
072a95fd
OBL
6541
6542 /* Adjust Split header */
6543 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6544
6545 /* DMA RX Channel Configuration */
6546 for (chan = 0; chan < rx_cnt; chan++) {
6547 rx_q = &priv->rx_queue[chan];
6548
6549 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6550 rx_q->dma_rx_phy, chan);
6551
6552 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6553 (rx_q->buf_alloc_num *
6554 sizeof(struct dma_desc));
6555 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6556 rx_q->rx_tail_addr, chan);
6557
6558 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6559 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6560 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6561 buf_size,
6562 rx_q->queue_index);
6563 } else {
6564 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6565 priv->dma_buf_sz,
6566 rx_q->queue_index);
6567 }
6568
6569 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6570 }
6571
6572 /* DMA TX Channel Configuration */
6573 for (chan = 0; chan < tx_cnt; chan++) {
6574 tx_q = &priv->tx_queue[chan];
6575
6576 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6577 tx_q->dma_tx_phy, chan);
6578
6579 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6580 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6581 tx_q->tx_tail_addr, chan);
6582 }
6583
6584 /* Enable the MAC Rx/Tx */
6585 stmmac_mac_set(priv, priv->ioaddr, true);
6586
6587 /* Start Rx & Tx DMA Channels */
6588 stmmac_start_all_dma(priv);
6589
6590 stmmac_init_coalesce(priv);
6591
6592 ret = stmmac_request_irq(dev);
6593 if (ret)
6594 goto irq_error;
6595
6596 /* Enable NAPI process*/
6597 stmmac_enable_all_queues(priv);
6598 netif_carrier_on(dev);
6599 netif_tx_start_all_queues(dev);
1ea4043a 6600 stmmac_enable_all_dma_irq(priv);
072a95fd
OBL
6601
6602 return 0;
6603
6604irq_error:
6605 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6606 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
6607
6608 stmmac_hw_teardown(dev);
6609init_error:
6610 free_dma_desc_resources(priv);
6611dma_desc_error:
6612 return ret;
6613}
6614
bba2556e
OBL
6615int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6616{
6617 struct stmmac_priv *priv = netdev_priv(dev);
6618 struct stmmac_rx_queue *rx_q;
132c32ee 6619 struct stmmac_tx_queue *tx_q;
bba2556e
OBL
6620 struct stmmac_channel *ch;
6621
6622 if (test_bit(STMMAC_DOWN, &priv->state) ||
6623 !netif_carrier_ok(priv->dev))
6624 return -ENETDOWN;
6625
6626 if (!stmmac_xdp_is_enabled(priv))
6627 return -ENXIO;
6628
132c32ee
OBL
6629 if (queue >= priv->plat->rx_queues_to_use ||
6630 queue >= priv->plat->tx_queues_to_use)
bba2556e
OBL
6631 return -EINVAL;
6632
6633 rx_q = &priv->rx_queue[queue];
132c32ee 6634 tx_q = &priv->tx_queue[queue];
bba2556e
OBL
6635 ch = &priv->channel[queue];
6636
132c32ee 6637 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
bba2556e
OBL
6638 return -ENXIO;
6639
132c32ee 6640 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
bba2556e
OBL
6641 /* EQoS does not have per-DMA channel SW interrupt,
6642 * so we schedule RX Napi straight-away.
6643 */
132c32ee
OBL
6644 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
6645 __napi_schedule(&ch->rxtx_napi);
bba2556e
OBL
6646 }
6647
6648 return 0;
6649}
6650
47dd7a54
GC
6651static const struct net_device_ops stmmac_netdev_ops = {
6652 .ndo_open = stmmac_open,
6653 .ndo_start_xmit = stmmac_xmit,
6654 .ndo_stop = stmmac_release,
6655 .ndo_change_mtu = stmmac_change_mtu,
5e982f3b 6656 .ndo_fix_features = stmmac_fix_features,
d2afb5bd 6657 .ndo_set_features = stmmac_set_features,
01789349 6658 .ndo_set_rx_mode = stmmac_set_rx_mode,
47dd7a54 6659 .ndo_tx_timeout = stmmac_tx_timeout,
a7605370 6660 .ndo_eth_ioctl = stmmac_ioctl,
4dbbe8dd 6661 .ndo_setup_tc = stmmac_setup_tc,
4993e5b3 6662 .ndo_select_queue = stmmac_select_queue,
47dd7a54
GC
6663#ifdef CONFIG_NET_POLL_CONTROLLER
6664 .ndo_poll_controller = stmmac_poll_controller,
6665#endif
a830405e 6666 .ndo_set_mac_address = stmmac_set_mac_address,
3cd1cfcb
JA
6667 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
6668 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
5fabb012 6669 .ndo_bpf = stmmac_bpf,
8b278a5b 6670 .ndo_xdp_xmit = stmmac_xdp_xmit,
bba2556e 6671 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
47dd7a54
GC
6672};
6673
34877a15
JA
6674static void stmmac_reset_subtask(struct stmmac_priv *priv)
6675{
6676 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
6677 return;
6678 if (test_bit(STMMAC_DOWN, &priv->state))
6679 return;
6680
6681 netdev_err(priv->dev, "Reset adapter.\n");
6682
6683 rtnl_lock();
6684 netif_trans_update(priv->dev);
6685 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
6686 usleep_range(1000, 2000);
6687
6688 set_bit(STMMAC_DOWN, &priv->state);
6689 dev_close(priv->dev);
00f54e68 6690 dev_open(priv->dev, NULL);
34877a15
JA
6691 clear_bit(STMMAC_DOWN, &priv->state);
6692 clear_bit(STMMAC_RESETING, &priv->state);
6693 rtnl_unlock();
6694}
6695
6696static void stmmac_service_task(struct work_struct *work)
6697{
6698 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6699 service_task);
6700
6701 stmmac_reset_subtask(priv);
6702 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
6703}
6704
cf3f047b
GC
6705/**
6706 * stmmac_hw_init - Init the MAC device
32ceabca 6707 * @priv: driver private structure
732fdf0e
GC
6708 * Description: this function is to configure the MAC device according to
6709 * some platform parameters or the HW capability register. It prepares the
6710 * driver to use either ring or chain modes and to setup either enhanced or
6711 * normal descriptors.
cf3f047b
GC
6712 */
6713static int stmmac_hw_init(struct stmmac_priv *priv)
6714{
5f0456b4 6715 int ret;
cf3f047b 6716
9f93ac8d
LC
6717 /* dwmac-sun8i only work in chain mode */
6718 if (priv->plat->has_sun8i)
6719 chain_mode = 1;
5f0456b4 6720 priv->chain_mode = chain_mode;
9f93ac8d 6721
5f0456b4
JA
6722 /* Initialize HW Interface */
6723 ret = stmmac_hwif_init(priv);
6724 if (ret)
6725 return ret;
4a7d666a 6726
cf3f047b
GC
6727 /* Get the HW capability (new GMAC newer than 3.50a) */
6728 priv->hw_cap_support = stmmac_get_hw_features(priv);
6729 if (priv->hw_cap_support) {
38ddc59d 6730 dev_info(priv->device, "DMA HW capability register supported\n");
cf3f047b
GC
6731
6732 /* We can override some gmac/dma configuration fields: e.g.
6733 * enh_desc, tx_coe (e.g. that are passed through the
6734 * platform) with the values from the HW capability
6735 * register (if supported).
6736 */
6737 priv->plat->enh_desc = priv->dma_cap.enh_desc;
5a9b876e
LPL
6738 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
6739 !priv->plat->use_phy_wol;
3fe5cadb 6740 priv->hw->pmt = priv->plat->pmt;
b8ef7020
BH
6741 if (priv->dma_cap.hash_tb_sz) {
6742 priv->hw->multicast_filter_bins =
6743 (BIT(priv->dma_cap.hash_tb_sz) << 5);
6744 priv->hw->mcast_bits_log2 =
6745 ilog2(priv->hw->multicast_filter_bins);
6746 }
38912bdb 6747
a8df35d4
EG
6748 /* TXCOE doesn't work in thresh DMA mode */
6749 if (priv->plat->force_thresh_dma_mode)
6750 priv->plat->tx_coe = 0;
6751 else
6752 priv->plat->tx_coe = priv->dma_cap.tx_coe;
6753
f748be53
AT
6754 /* In case of GMAC4 rx_coe is from HW cap register. */
6755 priv->plat->rx_coe = priv->dma_cap.rx_coe;
38912bdb
DS
6756
6757 if (priv->dma_cap.rx_coe_type2)
6758 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
6759 else if (priv->dma_cap.rx_coe_type1)
6760 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
6761
38ddc59d
LC
6762 } else {
6763 dev_info(priv->device, "No HW DMA feature register supported\n");
6764 }
cf3f047b 6765
d2afb5bd
GC
6766 if (priv->plat->rx_coe) {
6767 priv->hw->rx_csum = priv->plat->rx_coe;
38ddc59d 6768 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
f748be53 6769 if (priv->synopsys_id < DWMAC_CORE_4_00)
38ddc59d 6770 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
d2afb5bd 6771 }
cf3f047b 6772 if (priv->plat->tx_coe)
38ddc59d 6773 dev_info(priv->device, "TX Checksum insertion supported\n");
cf3f047b
GC
6774
6775 if (priv->plat->pmt) {
38ddc59d 6776 dev_info(priv->device, "Wake-Up On Lan supported\n");
cf3f047b
GC
6777 device_set_wakeup_capable(priv->device, 1);
6778 }
6779
f748be53 6780 if (priv->dma_cap.tsoen)
38ddc59d 6781 dev_info(priv->device, "TSO supported\n");
f748be53 6782
e0f9956a
CKT
6783 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
6784 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
6785
7cfde0af
JA
6786 /* Run HW quirks, if any */
6787 if (priv->hwif_quirks) {
6788 ret = priv->hwif_quirks(priv);
6789 if (ret)
6790 return ret;
6791 }
6792
3b509466
JA
6793 /* Rx Watchdog is available in the COREs newer than the 3.40.
6794 * In some case, for example on bugged HW this feature
6795 * has to be disable and this can be done by passing the
6796 * riwt_off field from the platform.
6797 */
6798 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
6799 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
6800 priv->use_riwt = 1;
6801 dev_info(priv->device,
6802 "Enable RX Mitigation via HW Watchdog Timer\n");
6803 }
6804
c24602ef 6805 return 0;
cf3f047b
GC
6806}
6807
0366f7e0
OBL
6808static void stmmac_napi_add(struct net_device *dev)
6809{
6810 struct stmmac_priv *priv = netdev_priv(dev);
6811 u32 queue, maxq;
6812
6813 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6814
6815 for (queue = 0; queue < maxq; queue++) {
6816 struct stmmac_channel *ch = &priv->channel[queue];
6817
6818 ch->priv_data = priv;
6819 ch->index = queue;
2b94f526 6820 spin_lock_init(&ch->lock);
0366f7e0
OBL
6821
6822 if (queue < priv->plat->rx_queues_to_use) {
6823 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
6824 NAPI_POLL_WEIGHT);
6825 }
6826 if (queue < priv->plat->tx_queues_to_use) {
6827 netif_tx_napi_add(dev, &ch->tx_napi,
6828 stmmac_napi_poll_tx,
6829 NAPI_POLL_WEIGHT);
6830 }
132c32ee
OBL
6831 if (queue < priv->plat->rx_queues_to_use &&
6832 queue < priv->plat->tx_queues_to_use) {
6833 netif_napi_add(dev, &ch->rxtx_napi,
6834 stmmac_napi_poll_rxtx,
6835 NAPI_POLL_WEIGHT);
6836 }
0366f7e0
OBL
6837 }
6838}
6839
6840static void stmmac_napi_del(struct net_device *dev)
6841{
6842 struct stmmac_priv *priv = netdev_priv(dev);
6843 u32 queue, maxq;
6844
6845 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
6846
6847 for (queue = 0; queue < maxq; queue++) {
6848 struct stmmac_channel *ch = &priv->channel[queue];
6849
6850 if (queue < priv->plat->rx_queues_to_use)
6851 netif_napi_del(&ch->rx_napi);
6852 if (queue < priv->plat->tx_queues_to_use)
6853 netif_napi_del(&ch->tx_napi);
132c32ee
OBL
6854 if (queue < priv->plat->rx_queues_to_use &&
6855 queue < priv->plat->tx_queues_to_use) {
6856 netif_napi_del(&ch->rxtx_napi);
6857 }
0366f7e0
OBL
6858 }
6859}
6860
6861int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
6862{
6863 struct stmmac_priv *priv = netdev_priv(dev);
6864 int ret = 0;
6865
6866 if (netif_running(dev))
6867 stmmac_release(dev);
6868
6869 stmmac_napi_del(dev);
6870
6871 priv->plat->rx_queues_to_use = rx_cnt;
6872 priv->plat->tx_queues_to_use = tx_cnt;
6873
6874 stmmac_napi_add(dev);
6875
6876 if (netif_running(dev))
6877 ret = stmmac_open(dev);
6878
6879 return ret;
6880}
6881
aa042f60
SYS
6882int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
6883{
6884 struct stmmac_priv *priv = netdev_priv(dev);
6885 int ret = 0;
6886
6887 if (netif_running(dev))
6888 stmmac_release(dev);
6889
6890 priv->dma_rx_size = rx_size;
6891 priv->dma_tx_size = tx_size;
6892
6893 if (netif_running(dev))
6894 ret = stmmac_open(dev);
6895
6896 return ret;
6897}
6898
5a558611
OBL
6899#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6900static void stmmac_fpe_lp_task(struct work_struct *work)
6901{
6902 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
6903 fpe_task);
6904 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
6905 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
6906 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
6907 bool *hs_enable = &fpe_cfg->hs_enable;
6908 bool *enable = &fpe_cfg->enable;
6909 int retries = 20;
6910
6911 while (retries-- > 0) {
6912 /* Bail out immediately if FPE handshake is OFF */
6913 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
6914 break;
6915
6916 if (*lo_state == FPE_STATE_ENTERING_ON &&
6917 *lp_state == FPE_STATE_ENTERING_ON) {
6918 stmmac_fpe_configure(priv, priv->ioaddr,
6919 priv->plat->tx_queues_to_use,
6920 priv->plat->rx_queues_to_use,
6921 *enable);
6922
6923 netdev_info(priv->dev, "configured FPE\n");
6924
6925 *lo_state = FPE_STATE_ON;
6926 *lp_state = FPE_STATE_ON;
6927 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
6928 break;
6929 }
6930
6931 if ((*lo_state == FPE_STATE_CAPABLE ||
6932 *lo_state == FPE_STATE_ENTERING_ON) &&
6933 *lp_state != FPE_STATE_ON) {
6934 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
6935 *lo_state, *lp_state);
6936 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6937 MPACKET_VERIFY);
6938 }
6939 /* Sleep then retry */
6940 msleep(500);
6941 }
6942
6943 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
6944}
6945
6946void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
6947{
6948 if (priv->plat->fpe_cfg->hs_enable != enable) {
6949 if (enable) {
6950 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
6951 MPACKET_VERIFY);
6952 } else {
6953 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
6954 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
6955 }
6956
6957 priv->plat->fpe_cfg->hs_enable = enable;
6958 }
6959}
6960
47dd7a54 6961/**
bfab27a1
GC
6962 * stmmac_dvr_probe
6963 * @device: device pointer
ff3dd78c 6964 * @plat_dat: platform data pointer
e56788cf 6965 * @res: stmmac resource pointer
bfab27a1
GC
6966 * Description: this is the main probe function used to
6967 * call the alloc_etherdev, allocate the priv structure.
9afec6ef 6968 * Return:
15ffac73 6969 * returns 0 on success, otherwise errno.
47dd7a54 6970 */
15ffac73
JE
6971int stmmac_dvr_probe(struct device *device,
6972 struct plat_stmmacenet_data *plat_dat,
6973 struct stmmac_resources *res)
47dd7a54 6974{
bfab27a1
GC
6975 struct net_device *ndev = NULL;
6976 struct stmmac_priv *priv;
0366f7e0 6977 u32 rxq;
76067459 6978 int i, ret = 0;
47dd7a54 6979
9737070c
JZ
6980 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
6981 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
41de8d4c 6982 if (!ndev)
15ffac73 6983 return -ENOMEM;
bfab27a1
GC
6984
6985 SET_NETDEV_DEV(ndev, device);
6986
6987 priv = netdev_priv(ndev);
6988 priv->device = device;
6989 priv->dev = ndev;
47dd7a54 6990
bfab27a1 6991 stmmac_set_ethtool_ops(ndev);
cf3f047b
GC
6992 priv->pause = pause;
6993 priv->plat = plat_dat;
e56788cf
JE
6994 priv->ioaddr = res->addr;
6995 priv->dev->base_addr = (unsigned long)res->addr;
6ccf12ae 6996 priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
e56788cf
JE
6997
6998 priv->dev->irq = res->irq;
6999 priv->wol_irq = res->wol_irq;
7000 priv->lpi_irq = res->lpi_irq;
8532f613
OBL
7001 priv->sfty_ce_irq = res->sfty_ce_irq;
7002 priv->sfty_ue_irq = res->sfty_ue_irq;
7003 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7004 priv->rx_irq[i] = res->rx_irq[i];
7005 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7006 priv->tx_irq[i] = res->tx_irq[i];
e56788cf 7007
83216e39 7008 if (!is_zero_ether_addr(res->mac))
e56788cf 7009 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
cf3f047b 7010
a7a62685 7011 dev_set_drvdata(device, priv->dev);
803f8fc4 7012
cf3f047b
GC
7013 /* Verify driver arguments */
7014 stmmac_verify_args();
bfab27a1 7015
bba2556e
OBL
7016 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7017 if (!priv->af_xdp_zc_qps)
7018 return -ENOMEM;
7019
34877a15
JA
7020 /* Allocate workqueue */
7021 priv->wq = create_singlethread_workqueue("stmmac_wq");
7022 if (!priv->wq) {
7023 dev_err(priv->device, "failed to create workqueue\n");
9737070c 7024 return -ENOMEM;
34877a15
JA
7025 }
7026
7027 INIT_WORK(&priv->service_task, stmmac_service_task);
7028
5a558611
OBL
7029 /* Initialize Link Partner FPE workqueue */
7030 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7031
cf3f047b 7032 /* Override with kernel parameters if supplied XXX CRS XXX
ceb69499
GC
7033 * this needs to have multiple instances
7034 */
cf3f047b
GC
7035 if ((phyaddr >= 0) && (phyaddr <= 31))
7036 priv->plat->phy_addr = phyaddr;
7037
90f522a2
EP
7038 if (priv->plat->stmmac_rst) {
7039 ret = reset_control_assert(priv->plat->stmmac_rst);
f573c0b9 7040 reset_control_deassert(priv->plat->stmmac_rst);
90f522a2
EP
7041 /* Some reset controllers have only reset callback instead of
7042 * assert + deassert callbacks pair.
7043 */
7044 if (ret == -ENOTSUPP)
7045 reset_control_reset(priv->plat->stmmac_rst);
7046 }
c5e4ddbd 7047
e67f325e
MH
7048 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7049 if (ret == -ENOTSUPP)
7050 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7051 ERR_PTR(ret));
7052
cf3f047b 7053 /* Init MAC and get the capabilities */
c24602ef
GC
7054 ret = stmmac_hw_init(priv);
7055 if (ret)
62866e98 7056 goto error_hw_init;
cf3f047b 7057
96874c61
MABI
7058 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7059 */
7060 if (priv->synopsys_id < DWMAC_CORE_5_20)
7061 priv->plat->dma_cfg->dche = false;
7062
b561af36
VK
7063 stmmac_check_ether_addr(priv);
7064
cf3f047b 7065 ndev->netdev_ops = &stmmac_netdev_ops;
bfab27a1 7066
cf3f047b
GC
7067 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7068 NETIF_F_RXCSUM;
f748be53 7069
4dbbe8dd
JA
7070 ret = stmmac_tc_init(priv, priv);
7071 if (!ret) {
7072 ndev->hw_features |= NETIF_F_HW_TC;
7073 }
7074
f748be53 7075 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
9edfa7da 7076 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
b7766206
JA
7077 if (priv->plat->has_gmac4)
7078 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
f748be53 7079 priv->tso = true;
38ddc59d 7080 dev_info(priv->device, "TSO feature enabled\n");
f748be53 7081 }
a993db88 7082
67afd6d1
JA
7083 if (priv->dma_cap.sphen) {
7084 ndev->hw_features |= NETIF_F_GRO;
d08d32d1
OBL
7085 priv->sph_cap = true;
7086 priv->sph = priv->sph_cap;
67afd6d1
JA
7087 dev_info(priv->device, "SPH feature enabled\n");
7088 }
7089
f119cc98
FD
7090 /* The current IP register MAC_HW_Feature1[ADDR64] only define
7091 * 32/40/64 bit width, but some SOC support others like i.MX8MP
7092 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
7093 * So overwrite dma_cap.addr64 according to HW real design.
7094 */
7095 if (priv->plat->addr64)
7096 priv->dma_cap.addr64 = priv->plat->addr64;
7097
a993db88
JA
7098 if (priv->dma_cap.addr64) {
7099 ret = dma_set_mask_and_coherent(device,
7100 DMA_BIT_MASK(priv->dma_cap.addr64));
7101 if (!ret) {
7102 dev_info(priv->device, "Using %d bits DMA width\n",
7103 priv->dma_cap.addr64);
968a2978
TR
7104
7105 /*
7106 * If more than 32 bits can be addressed, make sure to
7107 * enable enhanced addressing mode.
7108 */
7109 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7110 priv->plat->dma_cfg->eame = true;
a993db88
JA
7111 } else {
7112 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7113 if (ret) {
7114 dev_err(priv->device, "Failed to set DMA Mask\n");
7115 goto error_hw_init;
7116 }
7117
7118 priv->dma_cap.addr64 = 32;
7119 }
7120 }
7121
bfab27a1
GC
7122 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7123 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
47dd7a54
GC
7124#ifdef STMMAC_VLAN_TAG_USED
7125 /* Both mac100 and gmac support receive VLAN tag detection */
ab188e8f 7126 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
3cd1cfcb
JA
7127 if (priv->dma_cap.vlhash) {
7128 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7129 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7130 }
30d93227
JA
7131 if (priv->dma_cap.vlins) {
7132 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7133 if (priv->dma_cap.dvlan)
7134 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7135 }
47dd7a54
GC
7136#endif
7137 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7138
76067459
JA
7139 /* Initialize RSS */
7140 rxq = priv->plat->rx_queues_to_use;
7141 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7142 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7143 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7144
7145 if (priv->dma_cap.rssen && priv->plat->rss_en)
7146 ndev->features |= NETIF_F_RXHASH;
7147
44770e11
JW
7148 /* MTU range: 46 - hw-specific max */
7149 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
56bcd591 7150 if (priv->plat->has_xgmac)
7d9e6c5a 7151 ndev->max_mtu = XGMAC_JUMBO_LEN;
56bcd591
JA
7152 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7153 ndev->max_mtu = JUMBO_LEN;
44770e11
JW
7154 else
7155 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
a2cd64f3
KHL
7156 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7157 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7158 */
7159 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7160 (priv->plat->maxmtu >= ndev->min_mtu))
44770e11 7161 ndev->max_mtu = priv->plat->maxmtu;
a2cd64f3 7162 else if (priv->plat->maxmtu < ndev->min_mtu)
b618ab45
HK
7163 dev_warn(priv->device,
7164 "%s: warning: maxmtu having invalid value (%d)\n",
7165 __func__, priv->plat->maxmtu);
44770e11 7166
47dd7a54
GC
7167 if (flow_ctrl)
7168 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7169
8fce3331 7170 /* Setup channels NAPI */
0366f7e0 7171 stmmac_napi_add(ndev);
47dd7a54 7172
29555fa3 7173 mutex_init(&priv->lock);
f8e96161 7174
cd7201f4
GC
7175 /* If a specific clk_csr value is passed from the platform
7176 * this means that the CSR Clock Range selection cannot be
7177 * changed at run-time and it is fixed. Viceversa the driver'll try to
7178 * set the MDC clock dynamically according to the csr actual
7179 * clock input.
7180 */
5e7f7fc5 7181 if (priv->plat->clk_csr >= 0)
cd7201f4 7182 priv->clk_csr = priv->plat->clk_csr;
5e7f7fc5
BH
7183 else
7184 stmmac_clk_csr_set(priv);
cd7201f4 7185
e58bb43f
GC
7186 stmmac_check_pcs_mode(priv);
7187
5ec55823
JZ
7188 pm_runtime_get_noresume(device);
7189 pm_runtime_set_active(device);
7190 pm_runtime_enable(device);
7191
a47b9e15 7192 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3fe5cadb 7193 priv->hw->pcs != STMMAC_PCS_RTBI) {
e58bb43f
GC
7194 /* MDIO bus Registration */
7195 ret = stmmac_mdio_register(ndev);
7196 if (ret < 0) {
b618ab45
HK
7197 dev_err(priv->device,
7198 "%s: MDIO bus (id: %d) registration failed",
7199 __func__, priv->plat->bus_id);
e58bb43f
GC
7200 goto error_mdio_register;
7201 }
4bfcbd7a
FV
7202 }
7203
46682cb8
VW
7204 if (priv->plat->speed_mode_2500)
7205 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7206
7413f9a6
VO
7207 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7208 ret = stmmac_xpcs_setup(priv->mii);
7209 if (ret)
7210 goto error_xpcs_setup;
597a68ce
VW
7211 }
7212
74371272
JA
7213 ret = stmmac_phy_setup(priv);
7214 if (ret) {
7215 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7216 goto error_phy_setup;
7217 }
7218
57016590 7219 ret = register_netdev(ndev);
b2eb09af 7220 if (ret) {
b618ab45
HK
7221 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7222 __func__, ret);
b2eb09af
FF
7223 goto error_netdev_register;
7224 }
57016590 7225
b9663b7c
VW
7226 if (priv->plat->serdes_powerup) {
7227 ret = priv->plat->serdes_powerup(ndev,
7228 priv->plat->bsp_priv);
7229
7230 if (ret < 0)
801eb050 7231 goto error_serdes_powerup;
b9663b7c
VW
7232 }
7233
5f2b8b62 7234#ifdef CONFIG_DEBUG_FS
8d72ab11 7235 stmmac_init_fs(ndev);
5f2b8b62
TR
7236#endif
7237
97bbc968
BS
7238 if (priv->plat->dump_debug_regs)
7239 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7240
5ec55823
JZ
7241 /* Let pm_runtime_put() disable the clocks.
7242 * If CONFIG_PM is not enabled, the clocks will stay powered.
7243 */
7244 pm_runtime_put(device);
7245
57016590 7246 return ret;
47dd7a54 7247
801eb050
AS
7248error_serdes_powerup:
7249 unregister_netdev(ndev);
6a81c26f 7250error_netdev_register:
74371272 7251 phylink_destroy(priv->phylink);
597a68ce 7252error_xpcs_setup:
74371272 7253error_phy_setup:
a47b9e15 7254 if (priv->hw->pcs != STMMAC_PCS_TBI &&
b2eb09af
FF
7255 priv->hw->pcs != STMMAC_PCS_RTBI)
7256 stmmac_mdio_unregister(ndev);
6a81c26f 7257error_mdio_register:
0366f7e0 7258 stmmac_napi_del(ndev);
62866e98 7259error_hw_init:
34877a15 7260 destroy_workqueue(priv->wq);
d7f576dc 7261 bitmap_free(priv->af_xdp_zc_qps);
47dd7a54 7262
15ffac73 7263 return ret;
47dd7a54 7264}
b2e2f0c7 7265EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
47dd7a54
GC
7266
7267/**
7268 * stmmac_dvr_remove
f4e7bd81 7269 * @dev: device pointer
47dd7a54 7270 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
bfab27a1 7271 * changes the link status, releases the DMA descriptor rings.
47dd7a54 7272 */
f4e7bd81 7273int stmmac_dvr_remove(struct device *dev)
47dd7a54 7274{
f4e7bd81 7275 struct net_device *ndev = dev_get_drvdata(dev);
aec7ff27 7276 struct stmmac_priv *priv = netdev_priv(ndev);
47dd7a54 7277
38ddc59d 7278 netdev_info(priv->dev, "%s: removing driver", __func__);
47dd7a54 7279
3ec9c541
JZ
7280 pm_runtime_get_sync(dev);
7281 pm_runtime_disable(dev);
7282 pm_runtime_put_noidle(dev);
7283
ae4f0d46 7284 stmmac_stop_all_dma(priv);
9a7b3950
OBL
7285 stmmac_mac_set(priv, priv->ioaddr, false);
7286 netif_carrier_off(ndev);
7287 unregister_netdev(ndev);
47dd7a54 7288
9a7b3950
OBL
7289 /* Serdes power down needs to happen after VLAN filter
7290 * is deleted that is triggered by unregister_netdev().
7291 */
b9663b7c
VW
7292 if (priv->plat->serdes_powerdown)
7293 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7294
474a31e1
AK
7295#ifdef CONFIG_DEBUG_FS
7296 stmmac_exit_fs(ndev);
7297#endif
74371272 7298 phylink_destroy(priv->phylink);
f573c0b9 7299 if (priv->plat->stmmac_rst)
7300 reset_control_assert(priv->plat->stmmac_rst);
e67f325e 7301 reset_control_assert(priv->plat->stmmac_ahb_rst);
a47b9e15 7302 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3fe5cadb 7303 priv->hw->pcs != STMMAC_PCS_RTBI)
e743471f 7304 stmmac_mdio_unregister(ndev);
34877a15 7305 destroy_workqueue(priv->wq);
29555fa3 7306 mutex_destroy(&priv->lock);
d7f576dc 7307 bitmap_free(priv->af_xdp_zc_qps);
47dd7a54
GC
7308
7309 return 0;
7310}
b2e2f0c7 7311EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
47dd7a54 7312
732fdf0e
GC
7313/**
7314 * stmmac_suspend - suspend callback
f4e7bd81 7315 * @dev: device pointer
732fdf0e
GC
7316 * Description: this is the function to suspend the device and it is called
7317 * by the platform driver to stop the network queue, release the resources,
7318 * program the PMT register (for WoL), clean and release driver resources.
7319 */
f4e7bd81 7320int stmmac_suspend(struct device *dev)
47dd7a54 7321{
f4e7bd81 7322 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 7323 struct stmmac_priv *priv = netdev_priv(ndev);
14b41a29 7324 u32 chan;
47dd7a54 7325
874bd42d 7326 if (!ndev || !netif_running(ndev))
47dd7a54
GC
7327 return 0;
7328
134cc4ce 7329 mutex_lock(&priv->lock);
19e13cb2 7330
874bd42d 7331 netif_device_detach(ndev);
47dd7a54 7332
c22a3f48 7333 stmmac_disable_all_queues(priv);
874bd42d 7334
14b41a29 7335 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
d5a05e69 7336 hrtimer_cancel(&priv->tx_queue[chan].txtimer);
14b41a29 7337
5f585913
FD
7338 if (priv->eee_enabled) {
7339 priv->tx_path_in_lpi_mode = false;
7340 del_timer_sync(&priv->eee_ctrl_timer);
7341 }
7342
874bd42d 7343 /* Stop TX/RX DMA */
ae4f0d46 7344 stmmac_stop_all_dma(priv);
c24602ef 7345
b9663b7c
VW
7346 if (priv->plat->serdes_powerdown)
7347 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7348
874bd42d 7349 /* Enable Power down mode by programming the PMT regs */
e8377e7a 7350 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
c10d4c82 7351 stmmac_pmt(priv, priv->hw, priv->wolopts);
89f7f2cf
SK
7352 priv->irq_wake = 1;
7353 } else {
c10d4c82 7354 stmmac_mac_set(priv, priv->ioaddr, false);
db88f10a 7355 pinctrl_pm_select_sleep_state(priv->device);
ba1377ff 7356 }
5a558611 7357
29555fa3 7358 mutex_unlock(&priv->lock);
2d871aa0 7359
90702dcd
JZ
7360 rtnl_lock();
7361 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7362 phylink_suspend(priv->phylink, true);
7363 } else {
7364 if (device_may_wakeup(priv->device))
7365 phylink_speed_down(priv->phylink, false);
7366 phylink_suspend(priv->phylink, false);
7367 }
7368 rtnl_unlock();
7369
5a558611
OBL
7370 if (priv->dma_cap.fpesel) {
7371 /* Disable FPE */
7372 stmmac_fpe_configure(priv, priv->ioaddr,
7373 priv->plat->tx_queues_to_use,
7374 priv->plat->rx_queues_to_use, false);
7375
7376 stmmac_fpe_handshake(priv, false);
6b28a86d 7377 stmmac_fpe_stop_wq(priv);
5a558611
OBL
7378 }
7379
bd00632c 7380 priv->speed = SPEED_UNKNOWN;
47dd7a54
GC
7381 return 0;
7382}
b2e2f0c7 7383EXPORT_SYMBOL_GPL(stmmac_suspend);
47dd7a54 7384
54139cf3
JP
7385/**
7386 * stmmac_reset_queues_param - reset queue parameters
d0ea5cbd 7387 * @priv: device pointer
54139cf3
JP
7388 */
7389static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7390{
7391 u32 rx_cnt = priv->plat->rx_queues_to_use;
ce736788 7392 u32 tx_cnt = priv->plat->tx_queues_to_use;
54139cf3
JP
7393 u32 queue;
7394
7395 for (queue = 0; queue < rx_cnt; queue++) {
7396 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
7397
7398 rx_q->cur_rx = 0;
7399 rx_q->dirty_rx = 0;
7400 }
7401
ce736788
JP
7402 for (queue = 0; queue < tx_cnt; queue++) {
7403 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
7404
7405 tx_q->cur_tx = 0;
7406 tx_q->dirty_tx = 0;
8d212a9e 7407 tx_q->mss = 0;
c511819d
JZ
7408
7409 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
ce736788 7410 }
54139cf3
JP
7411}
7412
732fdf0e
GC
7413/**
7414 * stmmac_resume - resume callback
f4e7bd81 7415 * @dev: device pointer
732fdf0e
GC
7416 * Description: when resume this function is invoked to setup the DMA and CORE
7417 * in a usable state.
7418 */
f4e7bd81 7419int stmmac_resume(struct device *dev)
47dd7a54 7420{
f4e7bd81 7421 struct net_device *ndev = dev_get_drvdata(dev);
874bd42d 7422 struct stmmac_priv *priv = netdev_priv(ndev);
b9663b7c 7423 int ret;
47dd7a54 7424
874bd42d 7425 if (!netif_running(ndev))
47dd7a54
GC
7426 return 0;
7427
47dd7a54
GC
7428 /* Power Down bit, into the PM register, is cleared
7429 * automatically as soon as a magic packet or a Wake-up frame
7430 * is received. Anyway, it's better to manually clear
7431 * this bit because it can generate problems while resuming
ceb69499
GC
7432 * from another devices (e.g. serial console).
7433 */
e8377e7a 7434 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
29555fa3 7435 mutex_lock(&priv->lock);
c10d4c82 7436 stmmac_pmt(priv, priv->hw, 0);
29555fa3 7437 mutex_unlock(&priv->lock);
89f7f2cf 7438 priv->irq_wake = 0;
623997fb 7439 } else {
db88f10a 7440 pinctrl_pm_select_default_state(priv->device);
623997fb
SK
7441 /* reset the phy so that it's ready */
7442 if (priv->mii)
7443 stmmac_mdio_reset(priv->mii);
7444 }
47dd7a54 7445
b9663b7c
VW
7446 if (priv->plat->serdes_powerup) {
7447 ret = priv->plat->serdes_powerup(ndev,
7448 priv->plat->bsp_priv);
7449
7450 if (ret < 0)
7451 return ret;
7452 }
7453
90702dcd
JZ
7454 rtnl_lock();
7455 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7456 phylink_resume(priv->phylink);
7457 } else {
7458 phylink_resume(priv->phylink);
7459 if (device_may_wakeup(priv->device))
7460 phylink_speed_up(priv->phylink);
36d18b56 7461 }
90702dcd 7462 rtnl_unlock();
36d18b56 7463
8e5debed 7464 rtnl_lock();
29555fa3 7465 mutex_lock(&priv->lock);
f55d84b0 7466
54139cf3 7467 stmmac_reset_queues_param(priv);
00423969 7468
4ec236c7 7469 stmmac_free_tx_skbufs(priv);
ae79a639
GC
7470 stmmac_clear_descriptors(priv);
7471
fe131929 7472 stmmac_hw_setup(ndev, false);
d429b66e 7473 stmmac_init_coalesce(priv);
ac316c78 7474 stmmac_set_rx_mode(ndev);
47dd7a54 7475
ed64639b
WVK
7476 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7477
c22a3f48 7478 stmmac_enable_all_queues(priv);
1ea4043a 7479 stmmac_enable_all_dma_irq(priv);
47dd7a54 7480
19e13cb2 7481 mutex_unlock(&priv->lock);
8e5debed 7482 rtnl_unlock();
102463b1 7483
31096c3e
LY
7484 netif_device_attach(ndev);
7485
47dd7a54
GC
7486 return 0;
7487}
b2e2f0c7 7488EXPORT_SYMBOL_GPL(stmmac_resume);
ba27ec66 7489
47dd7a54
GC
7490#ifndef MODULE
7491static int __init stmmac_cmdline_opt(char *str)
7492{
7493 char *opt;
7494
7495 if (!str || !*str)
7816f0f4 7496 return 1;
47dd7a54 7497 while ((opt = strsep(&str, ",")) != NULL) {
f3240e28 7498 if (!strncmp(opt, "debug:", 6)) {
ea2ab871 7499 if (kstrtoint(opt + 6, 0, &debug))
f3240e28
GC
7500 goto err;
7501 } else if (!strncmp(opt, "phyaddr:", 8)) {
ea2ab871 7502 if (kstrtoint(opt + 8, 0, &phyaddr))
f3240e28 7503 goto err;
f3240e28 7504 } else if (!strncmp(opt, "buf_sz:", 7)) {
ea2ab871 7505 if (kstrtoint(opt + 7, 0, &buf_sz))
f3240e28
GC
7506 goto err;
7507 } else if (!strncmp(opt, "tc:", 3)) {
ea2ab871 7508 if (kstrtoint(opt + 3, 0, &tc))
f3240e28
GC
7509 goto err;
7510 } else if (!strncmp(opt, "watchdog:", 9)) {
ea2ab871 7511 if (kstrtoint(opt + 9, 0, &watchdog))
f3240e28
GC
7512 goto err;
7513 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
ea2ab871 7514 if (kstrtoint(opt + 10, 0, &flow_ctrl))
f3240e28
GC
7515 goto err;
7516 } else if (!strncmp(opt, "pause:", 6)) {
ea2ab871 7517 if (kstrtoint(opt + 6, 0, &pause))
f3240e28 7518 goto err;
506f669c 7519 } else if (!strncmp(opt, "eee_timer:", 10)) {
d765955d
GC
7520 if (kstrtoint(opt + 10, 0, &eee_timer))
7521 goto err;
4a7d666a
GC
7522 } else if (!strncmp(opt, "chain_mode:", 11)) {
7523 if (kstrtoint(opt + 11, 0, &chain_mode))
7524 goto err;
f3240e28 7525 }
47dd7a54 7526 }
7816f0f4 7527 return 1;
f3240e28
GC
7528
7529err:
7530 pr_err("%s: ERROR broken module parameter conversion", __func__);
7816f0f4 7531 return 1;
47dd7a54
GC
7532}
7533
7534__setup("stmmaceth=", stmmac_cmdline_opt);
ceb69499 7535#endif /* MODULE */
6fc0d0f2 7536
466c5ac8
MO
7537static int __init stmmac_init(void)
7538{
7539#ifdef CONFIG_DEBUG_FS
7540 /* Create debugfs main directory if it doesn't exist yet */
8d72ab11 7541 if (!stmmac_fs_dir)
466c5ac8 7542 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
474a31e1 7543 register_netdevice_notifier(&stmmac_notifier);
466c5ac8
MO
7544#endif
7545
7546 return 0;
7547}
7548
7549static void __exit stmmac_exit(void)
7550{
7551#ifdef CONFIG_DEBUG_FS
474a31e1 7552 unregister_netdevice_notifier(&stmmac_notifier);
466c5ac8
MO
7553 debugfs_remove_recursive(stmmac_fs_dir);
7554#endif
7555}
7556
7557module_init(stmmac_init)
7558module_exit(stmmac_exit)
7559
6fc0d0f2
GC
7560MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7561MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7562MODULE_LICENSE("GPL");