1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
11 Documentation available at:
12 http://www.stlinux.com
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/pkt_cls.h>
43 #include <net/xdp_sock_drv.h>
44 #include "stmmac_ptp.h"
46 #include "stmmac_xdp.h"
47 #include <linux/reset.h>
48 #include <linux/of_mdio.h>
49 #include "dwmac1000.h"
53 /* As long as the interface is active, we keep the timestamping counter enabled
54 * with fine resolution and binary rollover. This avoid non-monotonic behavior
55 * (clock jumps) when changing timestamping settings at runtime.
57 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
60 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
61 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63 /* Module parameters */
65 static int watchdog
= TX_TIMEO
;
66 module_param(watchdog
, int, 0644);
67 MODULE_PARM_DESC(watchdog
, "Transmit timeout in milliseconds (default 5s)");
69 static int debug
= -1;
70 module_param(debug
, int, 0644);
71 MODULE_PARM_DESC(debug
, "Message Level (-1: default, 0: no output, 16: all)");
73 static int phyaddr
= -1;
74 module_param(phyaddr
, int, 0444);
75 MODULE_PARM_DESC(phyaddr
, "Physical device address");
77 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
78 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
80 /* Limit to make sure XDP TX and slow path can coexist */
81 #define STMMAC_XSK_TX_BUDGET_MAX 256
82 #define STMMAC_TX_XSK_AVAIL 16
83 #define STMMAC_RX_FILL_BATCH 16
85 #define STMMAC_XDP_PASS 0
86 #define STMMAC_XDP_CONSUMED BIT(0)
87 #define STMMAC_XDP_TX BIT(1)
88 #define STMMAC_XDP_REDIRECT BIT(2)
90 static int flow_ctrl
= FLOW_AUTO
;
91 module_param(flow_ctrl
, int, 0644);
92 MODULE_PARM_DESC(flow_ctrl
, "Flow control ability [on/off]");
94 static int pause
= PAUSE_TIME
;
95 module_param(pause
, int, 0644);
96 MODULE_PARM_DESC(pause
, "Flow Control Pause Time");
99 static int tc
= TC_DEFAULT
;
100 module_param(tc
, int, 0644);
101 MODULE_PARM_DESC(tc
, "DMA threshold control value");
103 #define DEFAULT_BUFSIZE 1536
104 static int buf_sz
= DEFAULT_BUFSIZE
;
105 module_param(buf_sz
, int, 0644);
106 MODULE_PARM_DESC(buf_sz
, "DMA buffer size");
108 #define STMMAC_RX_COPYBREAK 256
110 static const u32 default_msg_level
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
111 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
112 NETIF_MSG_IFDOWN
| NETIF_MSG_TIMER
);
114 #define STMMAC_DEFAULT_LPI_TIMER 1000
115 static int eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
116 module_param(eee_timer
, int, 0644);
117 MODULE_PARM_DESC(eee_timer
, "LPI tx expiration time in msec");
118 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
121 * but allow user to force to use the chain instead of the ring
123 static unsigned int chain_mode
;
124 module_param(chain_mode
, int, 0444);
125 MODULE_PARM_DESC(chain_mode
, "To use chain instead of ring mode");
127 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
);
128 /* For MSI interrupts handling */
129 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
);
130 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
);
131 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
);
132 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
);
133 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
);
134 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
);
136 #ifdef CONFIG_DEBUG_FS
137 static const struct net_device_ops stmmac_netdev_ops
;
138 static void stmmac_init_fs(struct net_device
*dev
);
139 static void stmmac_exit_fs(struct net_device
*dev
);
142 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
144 int stmmac_bus_clks_config(struct stmmac_priv
*priv
, bool enabled
)
149 ret
= clk_prepare_enable(priv
->plat
->stmmac_clk
);
152 ret
= clk_prepare_enable(priv
->plat
->pclk
);
154 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
157 if (priv
->plat
->clks_config
) {
158 ret
= priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
160 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
161 clk_disable_unprepare(priv
->plat
->pclk
);
166 clk_disable_unprepare(priv
->plat
->stmmac_clk
);
167 clk_disable_unprepare(priv
->plat
->pclk
);
168 if (priv
->plat
->clks_config
)
169 priv
->plat
->clks_config(priv
->plat
->bsp_priv
, enabled
);
174 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config
);
177 * stmmac_verify_args - verify the driver parameters.
178 * Description: it checks the driver parameters and set a default in case of
181 static void stmmac_verify_args(void)
183 if (unlikely(watchdog
< 0))
185 if (unlikely((buf_sz
< DEFAULT_BUFSIZE
) || (buf_sz
> BUF_SIZE_16KiB
)))
186 buf_sz
= DEFAULT_BUFSIZE
;
187 if (unlikely(flow_ctrl
> 1))
188 flow_ctrl
= FLOW_AUTO
;
189 else if (likely(flow_ctrl
< 0))
190 flow_ctrl
= FLOW_OFF
;
191 if (unlikely((pause
< 0) || (pause
> 0xffff)))
194 eee_timer
= STMMAC_DEFAULT_LPI_TIMER
;
197 static void __stmmac_disable_all_queues(struct stmmac_priv
*priv
)
199 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
200 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
201 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
204 for (queue
= 0; queue
< maxq
; queue
++) {
205 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
207 if (stmmac_xdp_is_enabled(priv
) &&
208 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
209 napi_disable(&ch
->rxtx_napi
);
213 if (queue
< rx_queues_cnt
)
214 napi_disable(&ch
->rx_napi
);
215 if (queue
< tx_queues_cnt
)
216 napi_disable(&ch
->tx_napi
);
221 * stmmac_disable_all_queues - Disable all queues
222 * @priv: driver private structure
224 static void stmmac_disable_all_queues(struct stmmac_priv
*priv
)
226 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
227 struct stmmac_rx_queue
*rx_q
;
230 /* synchronize_rcu() needed for pending XDP buffers to drain */
231 for (queue
= 0; queue
< rx_queues_cnt
; queue
++) {
232 rx_q
= &priv
->rx_queue
[queue
];
233 if (rx_q
->xsk_pool
) {
239 __stmmac_disable_all_queues(priv
);
243 * stmmac_enable_all_queues - Enable all queues
244 * @priv: driver private structure
246 static void stmmac_enable_all_queues(struct stmmac_priv
*priv
)
248 u32 rx_queues_cnt
= priv
->plat
->rx_queues_to_use
;
249 u32 tx_queues_cnt
= priv
->plat
->tx_queues_to_use
;
250 u32 maxq
= max(rx_queues_cnt
, tx_queues_cnt
);
253 for (queue
= 0; queue
< maxq
; queue
++) {
254 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
256 if (stmmac_xdp_is_enabled(priv
) &&
257 test_bit(queue
, priv
->af_xdp_zc_qps
)) {
258 napi_enable(&ch
->rxtx_napi
);
262 if (queue
< rx_queues_cnt
)
263 napi_enable(&ch
->rx_napi
);
264 if (queue
< tx_queues_cnt
)
265 napi_enable(&ch
->tx_napi
);
269 static void stmmac_service_event_schedule(struct stmmac_priv
*priv
)
271 if (!test_bit(STMMAC_DOWN
, &priv
->state
) &&
272 !test_and_set_bit(STMMAC_SERVICE_SCHED
, &priv
->state
))
273 queue_work(priv
->wq
, &priv
->service_task
);
276 static void stmmac_global_err(struct stmmac_priv
*priv
)
278 netif_carrier_off(priv
->dev
);
279 set_bit(STMMAC_RESET_REQUESTED
, &priv
->state
);
280 stmmac_service_event_schedule(priv
);
284 * stmmac_clk_csr_set - dynamically set the MDC clock
285 * @priv: driver private structure
286 * Description: this is to dynamically set the MDC clock according to the csr
289 * If a specific clk_csr value is passed from the platform
290 * this means that the CSR Clock Range selection cannot be
291 * changed at run-time and it is fixed (as reported in the driver
292 * documentation). Viceversa the driver will try to set the MDC
293 * clock dynamically according to the actual clock input.
295 static void stmmac_clk_csr_set(struct stmmac_priv
*priv
)
299 clk_rate
= clk_get_rate(priv
->plat
->stmmac_clk
);
301 /* Platform provided default clk_csr would be assumed valid
302 * for all other cases except for the below mentioned ones.
303 * For values higher than the IEEE 802.3 specified frequency
304 * we can not estimate the proper divider as it is not known
305 * the frequency of clk_csr_i. So we do not change the default
308 if (!(priv
->clk_csr
& MAC_CSR_H_FRQ_MASK
)) {
309 if (clk_rate
< CSR_F_35M
)
310 priv
->clk_csr
= STMMAC_CSR_20_35M
;
311 else if ((clk_rate
>= CSR_F_35M
) && (clk_rate
< CSR_F_60M
))
312 priv
->clk_csr
= STMMAC_CSR_35_60M
;
313 else if ((clk_rate
>= CSR_F_60M
) && (clk_rate
< CSR_F_100M
))
314 priv
->clk_csr
= STMMAC_CSR_60_100M
;
315 else if ((clk_rate
>= CSR_F_100M
) && (clk_rate
< CSR_F_150M
))
316 priv
->clk_csr
= STMMAC_CSR_100_150M
;
317 else if ((clk_rate
>= CSR_F_150M
) && (clk_rate
< CSR_F_250M
))
318 priv
->clk_csr
= STMMAC_CSR_150_250M
;
319 else if ((clk_rate
>= CSR_F_250M
) && (clk_rate
<= CSR_F_300M
))
320 priv
->clk_csr
= STMMAC_CSR_250_300M
;
323 if (priv
->plat
->has_sun8i
) {
324 if (clk_rate
> 160000000)
325 priv
->clk_csr
= 0x03;
326 else if (clk_rate
> 80000000)
327 priv
->clk_csr
= 0x02;
328 else if (clk_rate
> 40000000)
329 priv
->clk_csr
= 0x01;
334 if (priv
->plat
->has_xgmac
) {
335 if (clk_rate
> 400000000)
337 else if (clk_rate
> 350000000)
339 else if (clk_rate
> 300000000)
341 else if (clk_rate
> 250000000)
343 else if (clk_rate
> 150000000)
350 static void print_pkt(unsigned char *buf
, int len
)
352 pr_debug("len = %d byte, buf addr: 0x%p\n", len
, buf
);
353 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET
, buf
, len
);
356 static inline u32
stmmac_tx_avail(struct stmmac_priv
*priv
, u32 queue
)
358 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
361 if (tx_q
->dirty_tx
> tx_q
->cur_tx
)
362 avail
= tx_q
->dirty_tx
- tx_q
->cur_tx
- 1;
364 avail
= priv
->dma_tx_size
- tx_q
->cur_tx
+ tx_q
->dirty_tx
- 1;
370 * stmmac_rx_dirty - Get RX queue dirty
371 * @priv: driver private structure
372 * @queue: RX queue index
374 static inline u32
stmmac_rx_dirty(struct stmmac_priv
*priv
, u32 queue
)
376 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
379 if (rx_q
->dirty_rx
<= rx_q
->cur_rx
)
380 dirty
= rx_q
->cur_rx
- rx_q
->dirty_rx
;
382 dirty
= priv
->dma_rx_size
- rx_q
->dirty_rx
+ rx_q
->cur_rx
;
387 static void stmmac_lpi_entry_timer_config(struct stmmac_priv
*priv
, bool en
)
391 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
392 priv
->eee_sw_timer_en
= en
? 0 : 1;
393 tx_lpi_timer
= en
? priv
->tx_lpi_timer
: 0;
394 stmmac_set_eee_lpi_timer(priv
, priv
->hw
, tx_lpi_timer
);
398 * stmmac_enable_eee_mode - check and enter in LPI mode
399 * @priv: driver private structure
400 * Description: this function is to verify and enter in LPI mode in case of
403 static void stmmac_enable_eee_mode(struct stmmac_priv
*priv
)
405 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
408 /* check if all TX queues have the work finished */
409 for (queue
= 0; queue
< tx_cnt
; queue
++) {
410 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
412 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
413 return; /* still unfinished work */
416 /* Check and enter in LPI mode */
417 if (!priv
->tx_path_in_lpi_mode
)
418 stmmac_set_eee_mode(priv
, priv
->hw
,
419 priv
->plat
->en_tx_lpi_clockgating
);
423 * stmmac_disable_eee_mode - disable and exit from LPI mode
424 * @priv: driver private structure
425 * Description: this function is to exit and disable EEE in case of
426 * LPI state is true. This is called by the xmit.
428 void stmmac_disable_eee_mode(struct stmmac_priv
*priv
)
430 if (!priv
->eee_sw_timer_en
) {
431 stmmac_lpi_entry_timer_config(priv
, 0);
435 stmmac_reset_eee_mode(priv
, priv
->hw
);
436 del_timer_sync(&priv
->eee_ctrl_timer
);
437 priv
->tx_path_in_lpi_mode
= false;
441 * stmmac_eee_ctrl_timer - EEE TX SW timer.
442 * @t: timer_list struct containing private info
444 * if there is no data transfer and if we are not in LPI state,
445 * then MAC Transmitter can be moved to LPI state.
447 static void stmmac_eee_ctrl_timer(struct timer_list
*t
)
449 struct stmmac_priv
*priv
= from_timer(priv
, t
, eee_ctrl_timer
);
451 stmmac_enable_eee_mode(priv
);
452 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
456 * stmmac_eee_init - init EEE
457 * @priv: driver private structure
459 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
460 * can also manage EEE, this function enable the LPI state and start related
463 bool stmmac_eee_init(struct stmmac_priv
*priv
)
465 int eee_tw_timer
= priv
->eee_tw_timer
;
467 /* Using PCS we cannot dial with the phy registers at this stage
468 * so we do not support extra feature like EEE.
470 if (priv
->hw
->pcs
== STMMAC_PCS_TBI
||
471 priv
->hw
->pcs
== STMMAC_PCS_RTBI
)
474 /* Check if MAC core supports the EEE feature. */
475 if (!priv
->dma_cap
.eee
)
478 mutex_lock(&priv
->lock
);
480 /* Check if it needs to be deactivated */
481 if (!priv
->eee_active
) {
482 if (priv
->eee_enabled
) {
483 netdev_dbg(priv
->dev
, "disable EEE\n");
484 stmmac_lpi_entry_timer_config(priv
, 0);
485 del_timer_sync(&priv
->eee_ctrl_timer
);
486 stmmac_set_eee_timer(priv
, priv
->hw
, 0, eee_tw_timer
);
488 xpcs_config_eee(priv
->hw
->xpcs
,
489 priv
->plat
->mult_fact_100ns
,
492 mutex_unlock(&priv
->lock
);
496 if (priv
->eee_active
&& !priv
->eee_enabled
) {
497 timer_setup(&priv
->eee_ctrl_timer
, stmmac_eee_ctrl_timer
, 0);
498 stmmac_set_eee_timer(priv
, priv
->hw
, STMMAC_DEFAULT_LIT_LS
,
501 xpcs_config_eee(priv
->hw
->xpcs
,
502 priv
->plat
->mult_fact_100ns
,
506 if (priv
->plat
->has_gmac4
&& priv
->tx_lpi_timer
<= STMMAC_ET_MAX
) {
507 del_timer_sync(&priv
->eee_ctrl_timer
);
508 priv
->tx_path_in_lpi_mode
= false;
509 stmmac_lpi_entry_timer_config(priv
, 1);
511 stmmac_lpi_entry_timer_config(priv
, 0);
512 mod_timer(&priv
->eee_ctrl_timer
,
513 STMMAC_LPI_T(priv
->tx_lpi_timer
));
516 mutex_unlock(&priv
->lock
);
517 netdev_dbg(priv
->dev
, "Energy-Efficient Ethernet initialized\n");
521 static inline u32
stmmac_cdc_adjust(struct stmmac_priv
*priv
)
523 /* Correct the clk domain crossing(CDC) error */
524 if (priv
->plat
->has_gmac4
&& priv
->plat
->clk_ptp_rate
)
525 return (2 * NSEC_PER_SEC
) / priv
->plat
->clk_ptp_rate
;
529 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
530 * @priv: driver private structure
531 * @p : descriptor pointer
532 * @skb : the socket buffer
534 * This function will read timestamp from the descriptor & pass it to stack.
535 * and also perform some sanity checks.
537 static void stmmac_get_tx_hwtstamp(struct stmmac_priv
*priv
,
538 struct dma_desc
*p
, struct sk_buff
*skb
)
540 struct skb_shared_hwtstamps shhwtstamp
;
544 if (!priv
->hwts_tx_en
)
547 /* exit if skb doesn't support hw tstamp */
548 if (likely(!skb
|| !(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)))
551 /* check tx tstamp status */
552 if (stmmac_get_tx_timestamp_status(priv
, p
)) {
553 stmmac_get_timestamp(priv
, p
, priv
->adv_ts
, &ns
);
555 } else if (!stmmac_get_mac_tx_timestamp(priv
, priv
->hw
, &ns
)) {
560 ns
-= stmmac_cdc_adjust(priv
);
562 memset(&shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
563 shhwtstamp
.hwtstamp
= ns_to_ktime(ns
);
565 netdev_dbg(priv
->dev
, "get valid TX hw timestamp %llu\n", ns
);
566 /* pass tstamp to stack */
567 skb_tstamp_tx(skb
, &shhwtstamp
);
571 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
572 * @priv: driver private structure
573 * @p : descriptor pointer
574 * @np : next descriptor pointer
575 * @skb : the socket buffer
577 * This function will read received packet's timestamp from the descriptor
578 * and pass it to stack. It also perform some sanity checks.
580 static void stmmac_get_rx_hwtstamp(struct stmmac_priv
*priv
, struct dma_desc
*p
,
581 struct dma_desc
*np
, struct sk_buff
*skb
)
583 struct skb_shared_hwtstamps
*shhwtstamp
= NULL
;
584 struct dma_desc
*desc
= p
;
587 if (!priv
->hwts_rx_en
)
589 /* For GMAC4, the valid timestamp is from CTX next desc. */
590 if (priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
)
593 /* Check if timestamp is available */
594 if (stmmac_get_rx_timestamp_status(priv
, p
, np
, priv
->adv_ts
)) {
595 stmmac_get_timestamp(priv
, desc
, priv
->adv_ts
, &ns
);
597 ns
-= stmmac_cdc_adjust(priv
);
599 netdev_dbg(priv
->dev
, "get valid RX hw timestamp %llu\n", ns
);
600 shhwtstamp
= skb_hwtstamps(skb
);
601 memset(shhwtstamp
, 0, sizeof(struct skb_shared_hwtstamps
));
602 shhwtstamp
->hwtstamp
= ns_to_ktime(ns
);
604 netdev_dbg(priv
->dev
, "cannot get RX hw timestamp\n");
609 * stmmac_hwtstamp_set - control hardware timestamping.
610 * @dev: device pointer.
611 * @ifr: An IOCTL specific structure, that can contain a pointer to
612 * a proprietary structure used to pass information to the driver.
614 * This function configures the MAC to enable/disable both outgoing(TX)
615 * and incoming(RX) packets time stamping based on user input.
617 * 0 on success and an appropriate -ve integer on failure.
619 static int stmmac_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
621 struct stmmac_priv
*priv
= netdev_priv(dev
);
622 struct hwtstamp_config config
;
625 u32 ptp_over_ipv4_udp
= 0;
626 u32 ptp_over_ipv6_udp
= 0;
627 u32 ptp_over_ethernet
= 0;
628 u32 snap_type_sel
= 0;
629 u32 ts_master_en
= 0;
632 if (!(priv
->dma_cap
.time_stamp
|| priv
->adv_ts
)) {
633 netdev_alert(priv
->dev
, "No support for HW time stamping\n");
634 priv
->hwts_tx_en
= 0;
635 priv
->hwts_rx_en
= 0;
640 if (copy_from_user(&config
, ifr
->ifr_data
,
644 netdev_dbg(priv
->dev
, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
645 __func__
, config
.flags
, config
.tx_type
, config
.rx_filter
);
647 /* reserved for future extensions */
651 if (config
.tx_type
!= HWTSTAMP_TX_OFF
&&
652 config
.tx_type
!= HWTSTAMP_TX_ON
)
656 switch (config
.rx_filter
) {
657 case HWTSTAMP_FILTER_NONE
:
658 /* time stamp no incoming packet at all */
659 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
662 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
663 /* PTP v1, UDP, any kind of event packet */
664 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
665 /* 'xmac' hardware can support Sync, Pdelay_Req and
666 * Pdelay_resp by setting bit14 and bits17/16 to 01
667 * This leaves Delay_Req timestamps out.
668 * Enable all events *and* general purpose message
671 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
672 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
673 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
676 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
677 /* PTP v1, UDP, Sync packet */
678 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_SYNC
;
679 /* take time stamp for SYNC messages only */
680 ts_event_en
= PTP_TCR_TSEVNTENA
;
682 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
683 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
686 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
687 /* PTP v1, UDP, Delay_req packet */
688 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
;
689 /* take time stamp for Delay_Req messages only */
690 ts_master_en
= PTP_TCR_TSMSTRENA
;
691 ts_event_en
= PTP_TCR_TSEVNTENA
;
693 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
694 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
697 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
698 /* PTP v2, UDP, any kind of event packet */
699 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_EVENT
;
700 ptp_v2
= PTP_TCR_TSVER2ENA
;
701 /* take time stamp for all event messages */
702 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
704 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
705 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
708 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
709 /* PTP v2, UDP, Sync packet */
710 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_SYNC
;
711 ptp_v2
= PTP_TCR_TSVER2ENA
;
712 /* take time stamp for SYNC messages only */
713 ts_event_en
= PTP_TCR_TSEVNTENA
;
715 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
716 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
719 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
720 /* PTP v2, UDP, Delay_req packet */
721 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
;
722 ptp_v2
= PTP_TCR_TSVER2ENA
;
723 /* take time stamp for Delay_Req messages only */
724 ts_master_en
= PTP_TCR_TSMSTRENA
;
725 ts_event_en
= PTP_TCR_TSEVNTENA
;
727 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
728 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
731 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
732 /* PTP v2/802.AS1 any layer, any kind of event packet */
733 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_EVENT
;
734 ptp_v2
= PTP_TCR_TSVER2ENA
;
735 snap_type_sel
= PTP_TCR_SNAPTYPSEL_1
;
736 if (priv
->synopsys_id
< DWMAC_CORE_4_10
)
737 ts_event_en
= PTP_TCR_TSEVNTENA
;
738 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
739 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
740 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
743 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
744 /* PTP v2/802.AS1, any layer, Sync packet */
745 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_SYNC
;
746 ptp_v2
= PTP_TCR_TSVER2ENA
;
747 /* take time stamp for SYNC messages only */
748 ts_event_en
= PTP_TCR_TSEVNTENA
;
750 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
751 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
752 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
755 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
756 /* PTP v2/802.AS1, any layer, Delay_req packet */
757 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
;
758 ptp_v2
= PTP_TCR_TSVER2ENA
;
759 /* take time stamp for Delay_Req messages only */
760 ts_master_en
= PTP_TCR_TSMSTRENA
;
761 ts_event_en
= PTP_TCR_TSEVNTENA
;
763 ptp_over_ipv4_udp
= PTP_TCR_TSIPV4ENA
;
764 ptp_over_ipv6_udp
= PTP_TCR_TSIPV6ENA
;
765 ptp_over_ethernet
= PTP_TCR_TSIPENA
;
768 case HWTSTAMP_FILTER_NTP_ALL
:
769 case HWTSTAMP_FILTER_ALL
:
770 /* time stamp any incoming packet */
771 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
772 tstamp_all
= PTP_TCR_TSENALL
;
779 switch (config
.rx_filter
) {
780 case HWTSTAMP_FILTER_NONE
:
781 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
784 /* PTP v1, UDP, any kind of event packet */
785 config
.rx_filter
= HWTSTAMP_FILTER_PTP_V1_L4_EVENT
;
789 priv
->hwts_rx_en
= ((config
.rx_filter
== HWTSTAMP_FILTER_NONE
) ? 0 : 1);
790 priv
->hwts_tx_en
= config
.tx_type
== HWTSTAMP_TX_ON
;
792 priv
->systime_flags
= STMMAC_HWTS_ACTIVE
;
794 if (priv
->hwts_tx_en
|| priv
->hwts_rx_en
) {
795 priv
->systime_flags
|= tstamp_all
| ptp_v2
|
796 ptp_over_ethernet
| ptp_over_ipv6_udp
|
797 ptp_over_ipv4_udp
| ts_event_en
|
798 ts_master_en
| snap_type_sel
;
801 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, priv
->systime_flags
);
803 memcpy(&priv
->tstamp_config
, &config
, sizeof(config
));
805 return copy_to_user(ifr
->ifr_data
, &config
,
806 sizeof(config
)) ? -EFAULT
: 0;
810 * stmmac_hwtstamp_get - read hardware timestamping.
811 * @dev: device pointer.
812 * @ifr: An IOCTL specific structure, that can contain a pointer to
813 * a proprietary structure used to pass information to the driver.
815 * This function obtain the current hardware timestamping settings
818 static int stmmac_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
820 struct stmmac_priv
*priv
= netdev_priv(dev
);
821 struct hwtstamp_config
*config
= &priv
->tstamp_config
;
823 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
826 return copy_to_user(ifr
->ifr_data
, config
,
827 sizeof(*config
)) ? -EFAULT
: 0;
831 * stmmac_init_tstamp_counter - init hardware timestamping counter
832 * @priv: driver private structure
833 * @systime_flags: timestamping flags
835 * Initialize hardware counter for packet timestamping.
836 * This is valid as long as the interface is open and not suspended.
837 * Will be rerun after resuming from suspend, case in which the timestamping
838 * flags updated by stmmac_hwtstamp_set() also need to be restored.
840 int stmmac_init_tstamp_counter(struct stmmac_priv
*priv
, u32 systime_flags
)
842 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
843 struct timespec64 now
;
848 if (!(priv
->dma_cap
.time_stamp
|| priv
->dma_cap
.atime_stamp
))
851 ret
= clk_prepare_enable(priv
->plat
->clk_ptp_ref
);
853 netdev_warn(priv
->dev
,
854 "failed to enable PTP reference clock: %pe\n",
859 stmmac_config_hw_tstamping(priv
, priv
->ptpaddr
, systime_flags
);
860 priv
->systime_flags
= systime_flags
;
862 /* program Sub Second Increment reg */
863 stmmac_config_sub_second_increment(priv
, priv
->ptpaddr
,
864 priv
->plat
->clk_ptp_rate
,
866 temp
= div_u64(1000000000ULL, sec_inc
);
868 /* Store sub second increment for later use */
869 priv
->sub_second_inc
= sec_inc
;
871 /* calculate default added value:
873 * addend = (2^32)/freq_div_ratio;
874 * where, freq_div_ratio = 1e9ns/sec_inc
876 temp
= (u64
)(temp
<< 32);
877 priv
->default_addend
= div_u64(temp
, priv
->plat
->clk_ptp_rate
);
878 stmmac_config_addend(priv
, priv
->ptpaddr
, priv
->default_addend
);
880 /* initialize system time */
881 ktime_get_real_ts64(&now
);
883 /* lower 32 bits of tv_sec are safe until y2106 */
884 stmmac_init_systime(priv
, priv
->ptpaddr
, (u32
)now
.tv_sec
, now
.tv_nsec
);
888 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter
);
891 * stmmac_init_ptp - init PTP
892 * @priv: driver private structure
893 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
894 * This is done by looking at the HW cap. register.
895 * This function also registers the ptp driver.
897 static int stmmac_init_ptp(struct stmmac_priv
*priv
)
899 bool xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
902 if (priv
->plat
->ptp_clk_freq_config
)
903 priv
->plat
->ptp_clk_freq_config(priv
);
905 ret
= stmmac_init_tstamp_counter(priv
, STMMAC_HWTS_ACTIVE
);
910 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
911 if (xmac
&& priv
->dma_cap
.atime_stamp
)
913 /* Dwmac 3.x core with extend_desc can support adv_ts */
914 else if (priv
->extend_desc
&& priv
->dma_cap
.atime_stamp
)
917 if (priv
->dma_cap
.time_stamp
)
918 netdev_info(priv
->dev
, "IEEE 1588-2002 Timestamp supported\n");
921 netdev_info(priv
->dev
,
922 "IEEE 1588-2008 Advanced Timestamp supported\n");
924 priv
->hwts_tx_en
= 0;
925 priv
->hwts_rx_en
= 0;
930 static void stmmac_release_ptp(struct stmmac_priv
*priv
)
932 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
933 stmmac_ptp_unregister(priv
);
937 * stmmac_mac_flow_ctrl - Configure flow control in all queues
938 * @priv: driver private structure
939 * @duplex: duplex passed to the next function
940 * Description: It is used for configuring the flow control in all queues
942 static void stmmac_mac_flow_ctrl(struct stmmac_priv
*priv
, u32 duplex
)
944 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
946 stmmac_flow_ctrl(priv
, priv
->hw
, duplex
, priv
->flow_ctrl
,
947 priv
->pause
, tx_cnt
);
950 static void stmmac_validate(struct phylink_config
*config
,
951 unsigned long *supported
,
952 struct phylink_link_state
*state
)
954 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
955 __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported
) = { 0, };
956 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
957 int tx_cnt
= priv
->plat
->tx_queues_to_use
;
958 int max_speed
= priv
->plat
->max_speed
;
960 phylink_set(mac_supported
, 10baseT_Half
);
961 phylink_set(mac_supported
, 10baseT_Full
);
962 phylink_set(mac_supported
, 100baseT_Half
);
963 phylink_set(mac_supported
, 100baseT_Full
);
964 phylink_set(mac_supported
, 1000baseT_Half
);
965 phylink_set(mac_supported
, 1000baseT_Full
);
966 phylink_set(mac_supported
, 1000baseKX_Full
);
968 phylink_set(mac_supported
, Autoneg
);
969 phylink_set(mac_supported
, Pause
);
970 phylink_set(mac_supported
, Asym_Pause
);
971 phylink_set_port_modes(mac_supported
);
973 /* Cut down 1G if asked to */
974 if ((max_speed
> 0) && (max_speed
< 1000)) {
975 phylink_set(mask
, 1000baseT_Full
);
976 phylink_set(mask
, 1000baseX_Full
);
977 } else if (priv
->plat
->has_gmac4
) {
978 if (!max_speed
|| max_speed
>= 2500) {
979 phylink_set(mac_supported
, 2500baseT_Full
);
980 phylink_set(mac_supported
, 2500baseX_Full
);
982 } else if (priv
->plat
->has_xgmac
) {
983 if (!max_speed
|| (max_speed
>= 2500)) {
984 phylink_set(mac_supported
, 2500baseT_Full
);
985 phylink_set(mac_supported
, 2500baseX_Full
);
987 if (!max_speed
|| (max_speed
>= 5000)) {
988 phylink_set(mac_supported
, 5000baseT_Full
);
990 if (!max_speed
|| (max_speed
>= 10000)) {
991 phylink_set(mac_supported
, 10000baseSR_Full
);
992 phylink_set(mac_supported
, 10000baseLR_Full
);
993 phylink_set(mac_supported
, 10000baseER_Full
);
994 phylink_set(mac_supported
, 10000baseLRM_Full
);
995 phylink_set(mac_supported
, 10000baseT_Full
);
996 phylink_set(mac_supported
, 10000baseKX4_Full
);
997 phylink_set(mac_supported
, 10000baseKR_Full
);
999 if (!max_speed
|| (max_speed
>= 25000)) {
1000 phylink_set(mac_supported
, 25000baseCR_Full
);
1001 phylink_set(mac_supported
, 25000baseKR_Full
);
1002 phylink_set(mac_supported
, 25000baseSR_Full
);
1004 if (!max_speed
|| (max_speed
>= 40000)) {
1005 phylink_set(mac_supported
, 40000baseKR4_Full
);
1006 phylink_set(mac_supported
, 40000baseCR4_Full
);
1007 phylink_set(mac_supported
, 40000baseSR4_Full
);
1008 phylink_set(mac_supported
, 40000baseLR4_Full
);
1010 if (!max_speed
|| (max_speed
>= 50000)) {
1011 phylink_set(mac_supported
, 50000baseCR2_Full
);
1012 phylink_set(mac_supported
, 50000baseKR2_Full
);
1013 phylink_set(mac_supported
, 50000baseSR2_Full
);
1014 phylink_set(mac_supported
, 50000baseKR_Full
);
1015 phylink_set(mac_supported
, 50000baseSR_Full
);
1016 phylink_set(mac_supported
, 50000baseCR_Full
);
1017 phylink_set(mac_supported
, 50000baseLR_ER_FR_Full
);
1018 phylink_set(mac_supported
, 50000baseDR_Full
);
1020 if (!max_speed
|| (max_speed
>= 100000)) {
1021 phylink_set(mac_supported
, 100000baseKR4_Full
);
1022 phylink_set(mac_supported
, 100000baseSR4_Full
);
1023 phylink_set(mac_supported
, 100000baseCR4_Full
);
1024 phylink_set(mac_supported
, 100000baseLR4_ER4_Full
);
1025 phylink_set(mac_supported
, 100000baseKR2_Full
);
1026 phylink_set(mac_supported
, 100000baseSR2_Full
);
1027 phylink_set(mac_supported
, 100000baseCR2_Full
);
1028 phylink_set(mac_supported
, 100000baseLR2_ER2_FR2_Full
);
1029 phylink_set(mac_supported
, 100000baseDR2_Full
);
1033 /* Half-Duplex can only work with single queue */
1035 phylink_set(mask
, 10baseT_Half
);
1036 phylink_set(mask
, 100baseT_Half
);
1037 phylink_set(mask
, 1000baseT_Half
);
1040 linkmode_and(supported
, supported
, mac_supported
);
1041 linkmode_andnot(supported
, supported
, mask
);
1043 linkmode_and(state
->advertising
, state
->advertising
, mac_supported
);
1044 linkmode_andnot(state
->advertising
, state
->advertising
, mask
);
1046 /* If PCS is supported, check which modes it supports. */
1048 xpcs_validate(priv
->hw
->xpcs
, supported
, state
);
1051 static void stmmac_mac_config(struct phylink_config
*config
, unsigned int mode
,
1052 const struct phylink_link_state
*state
)
1054 /* Nothing to do, xpcs_config() handles everything */
1057 static void stmmac_fpe_link_state_handle(struct stmmac_priv
*priv
, bool is_up
)
1059 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
1060 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
1061 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
1062 bool *hs_enable
= &fpe_cfg
->hs_enable
;
1064 if (is_up
&& *hs_enable
) {
1065 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
, MPACKET_VERIFY
);
1067 *lo_state
= FPE_STATE_OFF
;
1068 *lp_state
= FPE_STATE_OFF
;
1072 static void stmmac_mac_link_down(struct phylink_config
*config
,
1073 unsigned int mode
, phy_interface_t interface
)
1075 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
1077 stmmac_mac_set(priv
, priv
->ioaddr
, false);
1078 priv
->eee_active
= false;
1079 priv
->tx_lpi_enabled
= false;
1080 priv
->eee_enabled
= stmmac_eee_init(priv
);
1081 stmmac_set_eee_pls(priv
, priv
->hw
, false);
1083 if (priv
->dma_cap
.fpesel
)
1084 stmmac_fpe_link_state_handle(priv
, false);
1087 static void stmmac_mac_link_up(struct phylink_config
*config
,
1088 struct phy_device
*phy
,
1089 unsigned int mode
, phy_interface_t interface
,
1090 int speed
, int duplex
,
1091 bool tx_pause
, bool rx_pause
)
1093 struct stmmac_priv
*priv
= netdev_priv(to_net_dev(config
->dev
));
1096 ctrl
= readl(priv
->ioaddr
+ MAC_CTRL_REG
);
1097 ctrl
&= ~priv
->hw
->link
.speed_mask
;
1099 if (interface
== PHY_INTERFACE_MODE_USXGMII
) {
1102 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1105 ctrl
|= priv
->hw
->link
.xgmii
.speed5000
;
1108 ctrl
|= priv
->hw
->link
.xgmii
.speed2500
;
1113 } else if (interface
== PHY_INTERFACE_MODE_XLGMII
) {
1116 ctrl
|= priv
->hw
->link
.xlgmii
.speed100000
;
1119 ctrl
|= priv
->hw
->link
.xlgmii
.speed50000
;
1122 ctrl
|= priv
->hw
->link
.xlgmii
.speed40000
;
1125 ctrl
|= priv
->hw
->link
.xlgmii
.speed25000
;
1128 ctrl
|= priv
->hw
->link
.xgmii
.speed10000
;
1131 ctrl
|= priv
->hw
->link
.speed2500
;
1134 ctrl
|= priv
->hw
->link
.speed1000
;
1142 ctrl
|= priv
->hw
->link
.speed2500
;
1145 ctrl
|= priv
->hw
->link
.speed1000
;
1148 ctrl
|= priv
->hw
->link
.speed100
;
1151 ctrl
|= priv
->hw
->link
.speed10
;
1158 priv
->speed
= speed
;
1160 if (priv
->plat
->fix_mac_speed
)
1161 priv
->plat
->fix_mac_speed(priv
->plat
->bsp_priv
, speed
);
1164 ctrl
&= ~priv
->hw
->link
.duplex
;
1166 ctrl
|= priv
->hw
->link
.duplex
;
1168 /* Flow Control operation */
1169 if (tx_pause
&& rx_pause
)
1170 stmmac_mac_flow_ctrl(priv
, duplex
);
1172 writel(ctrl
, priv
->ioaddr
+ MAC_CTRL_REG
);
1174 stmmac_mac_set(priv
, priv
->ioaddr
, true);
1175 if (phy
&& priv
->dma_cap
.eee
) {
1176 priv
->eee_active
= phy_init_eee(phy
, 1) >= 0;
1177 priv
->eee_enabled
= stmmac_eee_init(priv
);
1178 priv
->tx_lpi_enabled
= priv
->eee_enabled
;
1179 stmmac_set_eee_pls(priv
, priv
->hw
, true);
1182 if (priv
->dma_cap
.fpesel
)
1183 stmmac_fpe_link_state_handle(priv
, true);
1186 static const struct phylink_mac_ops stmmac_phylink_mac_ops
= {
1187 .validate
= stmmac_validate
,
1188 .mac_config
= stmmac_mac_config
,
1189 .mac_link_down
= stmmac_mac_link_down
,
1190 .mac_link_up
= stmmac_mac_link_up
,
1194 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1195 * @priv: driver private structure
1196 * Description: this is to verify if the HW supports the PCS.
1197 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1198 * configured for the TBI, RTBI, or SGMII PHY interface.
1200 static void stmmac_check_pcs_mode(struct stmmac_priv
*priv
)
1202 int interface
= priv
->plat
->interface
;
1204 if (priv
->dma_cap
.pcs
) {
1205 if ((interface
== PHY_INTERFACE_MODE_RGMII
) ||
1206 (interface
== PHY_INTERFACE_MODE_RGMII_ID
) ||
1207 (interface
== PHY_INTERFACE_MODE_RGMII_RXID
) ||
1208 (interface
== PHY_INTERFACE_MODE_RGMII_TXID
)) {
1209 netdev_dbg(priv
->dev
, "PCS RGMII support enabled\n");
1210 priv
->hw
->pcs
= STMMAC_PCS_RGMII
;
1211 } else if (interface
== PHY_INTERFACE_MODE_SGMII
) {
1212 netdev_dbg(priv
->dev
, "PCS SGMII support enabled\n");
1213 priv
->hw
->pcs
= STMMAC_PCS_SGMII
;
1219 * stmmac_init_phy - PHY initialization
1220 * @dev: net device structure
1221 * Description: it initializes the driver's PHY state, and attaches the PHY
1222 * to the mac driver.
1226 static int stmmac_init_phy(struct net_device
*dev
)
1228 struct stmmac_priv
*priv
= netdev_priv(dev
);
1229 struct device_node
*node
;
1232 node
= priv
->plat
->phylink_node
;
1235 ret
= phylink_of_phy_connect(priv
->phylink
, node
, 0);
1237 /* Some DT bindings do not set-up the PHY handle. Let's try to
1241 int addr
= priv
->plat
->phy_addr
;
1242 struct phy_device
*phydev
;
1244 phydev
= mdiobus_get_phy(priv
->mii
, addr
);
1246 netdev_err(priv
->dev
, "no phy at addr %d\n", addr
);
1250 ret
= phylink_connect_phy(priv
->phylink
, phydev
);
1253 if (!priv
->plat
->pmt
) {
1254 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
1256 phylink_ethtool_get_wol(priv
->phylink
, &wol
);
1257 device_set_wakeup_capable(priv
->device
, !!wol
.supported
);
1263 static int stmmac_phy_setup(struct stmmac_priv
*priv
)
1265 struct stmmac_mdio_bus_data
*mdio_bus_data
= priv
->plat
->mdio_bus_data
;
1266 struct fwnode_handle
*fwnode
= of_fwnode_handle(priv
->plat
->phylink_node
);
1267 int mode
= priv
->plat
->phy_interface
;
1268 struct phylink
*phylink
;
1270 priv
->phylink_config
.dev
= &priv
->dev
->dev
;
1271 priv
->phylink_config
.type
= PHYLINK_NETDEV
;
1272 priv
->phylink_config
.pcs_poll
= true;
1273 if (priv
->plat
->mdio_bus_data
)
1274 priv
->phylink_config
.ovr_an_inband
=
1275 mdio_bus_data
->xpcs_an_inband
;
1278 fwnode
= dev_fwnode(priv
->device
);
1280 phylink
= phylink_create(&priv
->phylink_config
, fwnode
,
1281 mode
, &stmmac_phylink_mac_ops
);
1282 if (IS_ERR(phylink
))
1283 return PTR_ERR(phylink
);
1286 phylink_set_pcs(phylink
, &priv
->hw
->xpcs
->pcs
);
1288 priv
->phylink
= phylink
;
1292 static void stmmac_display_rx_rings(struct stmmac_priv
*priv
)
1294 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
1295 unsigned int desc_size
;
1299 /* Display RX rings */
1300 for (queue
= 0; queue
< rx_cnt
; queue
++) {
1301 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1303 pr_info("\tRX Queue %u rings\n", queue
);
1305 if (priv
->extend_desc
) {
1306 head_rx
= (void *)rx_q
->dma_erx
;
1307 desc_size
= sizeof(struct dma_extended_desc
);
1309 head_rx
= (void *)rx_q
->dma_rx
;
1310 desc_size
= sizeof(struct dma_desc
);
1313 /* Display RX ring */
1314 stmmac_display_ring(priv
, head_rx
, priv
->dma_rx_size
, true,
1315 rx_q
->dma_rx_phy
, desc_size
);
1319 static void stmmac_display_tx_rings(struct stmmac_priv
*priv
)
1321 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
1322 unsigned int desc_size
;
1326 /* Display TX rings */
1327 for (queue
= 0; queue
< tx_cnt
; queue
++) {
1328 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1330 pr_info("\tTX Queue %d rings\n", queue
);
1332 if (priv
->extend_desc
) {
1333 head_tx
= (void *)tx_q
->dma_etx
;
1334 desc_size
= sizeof(struct dma_extended_desc
);
1335 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1336 head_tx
= (void *)tx_q
->dma_entx
;
1337 desc_size
= sizeof(struct dma_edesc
);
1339 head_tx
= (void *)tx_q
->dma_tx
;
1340 desc_size
= sizeof(struct dma_desc
);
1343 stmmac_display_ring(priv
, head_tx
, priv
->dma_tx_size
, false,
1344 tx_q
->dma_tx_phy
, desc_size
);
1348 static void stmmac_display_rings(struct stmmac_priv
*priv
)
1350 /* Display RX ring */
1351 stmmac_display_rx_rings(priv
);
1353 /* Display TX ring */
1354 stmmac_display_tx_rings(priv
);
1357 static int stmmac_set_bfsize(int mtu
, int bufsize
)
1361 if (mtu
>= BUF_SIZE_8KiB
)
1362 ret
= BUF_SIZE_16KiB
;
1363 else if (mtu
>= BUF_SIZE_4KiB
)
1364 ret
= BUF_SIZE_8KiB
;
1365 else if (mtu
>= BUF_SIZE_2KiB
)
1366 ret
= BUF_SIZE_4KiB
;
1367 else if (mtu
> DEFAULT_BUFSIZE
)
1368 ret
= BUF_SIZE_2KiB
;
1370 ret
= DEFAULT_BUFSIZE
;
1376 * stmmac_clear_rx_descriptors - clear RX descriptors
1377 * @priv: driver private structure
1378 * @queue: RX queue index
1379 * Description: this function is called to clear the RX descriptors
1380 * in case of both basic and extended descriptors are used.
1382 static void stmmac_clear_rx_descriptors(struct stmmac_priv
*priv
, u32 queue
)
1384 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1387 /* Clear the RX descriptors */
1388 for (i
= 0; i
< priv
->dma_rx_size
; i
++)
1389 if (priv
->extend_desc
)
1390 stmmac_init_rx_desc(priv
, &rx_q
->dma_erx
[i
].basic
,
1391 priv
->use_riwt
, priv
->mode
,
1392 (i
== priv
->dma_rx_size
- 1),
1395 stmmac_init_rx_desc(priv
, &rx_q
->dma_rx
[i
],
1396 priv
->use_riwt
, priv
->mode
,
1397 (i
== priv
->dma_rx_size
- 1),
1402 * stmmac_clear_tx_descriptors - clear tx descriptors
1403 * @priv: driver private structure
1404 * @queue: TX queue index.
1405 * Description: this function is called to clear the TX descriptors
1406 * in case of both basic and extended descriptors are used.
1408 static void stmmac_clear_tx_descriptors(struct stmmac_priv
*priv
, u32 queue
)
1410 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1413 /* Clear the TX descriptors */
1414 for (i
= 0; i
< priv
->dma_tx_size
; i
++) {
1415 int last
= (i
== (priv
->dma_tx_size
- 1));
1418 if (priv
->extend_desc
)
1419 p
= &tx_q
->dma_etx
[i
].basic
;
1420 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1421 p
= &tx_q
->dma_entx
[i
].basic
;
1423 p
= &tx_q
->dma_tx
[i
];
1425 stmmac_init_tx_desc(priv
, p
, priv
->mode
, last
);
1430 * stmmac_clear_descriptors - clear descriptors
1431 * @priv: driver private structure
1432 * Description: this function is called to clear the TX and RX descriptors
1433 * in case of both basic and extended descriptors are used.
1435 static void stmmac_clear_descriptors(struct stmmac_priv
*priv
)
1437 u32 rx_queue_cnt
= priv
->plat
->rx_queues_to_use
;
1438 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1441 /* Clear the RX descriptors */
1442 for (queue
= 0; queue
< rx_queue_cnt
; queue
++)
1443 stmmac_clear_rx_descriptors(priv
, queue
);
1445 /* Clear the TX descriptors */
1446 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1447 stmmac_clear_tx_descriptors(priv
, queue
);
1451 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1452 * @priv: driver private structure
1453 * @p: descriptor pointer
1454 * @i: descriptor index
1456 * @queue: RX queue index
1457 * Description: this function is called to allocate a receive buffer, perform
1458 * the DMA mapping and init the descriptor.
1460 static int stmmac_init_rx_buffers(struct stmmac_priv
*priv
, struct dma_desc
*p
,
1461 int i
, gfp_t flags
, u32 queue
)
1463 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1464 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1467 buf
->page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
1470 buf
->page_offset
= stmmac_rx_offset(priv
);
1473 if (priv
->sph
&& !buf
->sec_page
) {
1474 buf
->sec_page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
1478 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
1479 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
1481 buf
->sec_page
= NULL
;
1482 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
1485 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
1487 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
1488 if (priv
->dma_buf_sz
== BUF_SIZE_16KiB
)
1489 stmmac_init_desc3(priv
, p
);
1495 * stmmac_free_rx_buffer - free RX dma buffers
1496 * @priv: private structure
1497 * @queue: RX queue index
1500 static void stmmac_free_rx_buffer(struct stmmac_priv
*priv
, u32 queue
, int i
)
1502 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1503 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1506 page_pool_put_full_page(rx_q
->page_pool
, buf
->page
, false);
1510 page_pool_put_full_page(rx_q
->page_pool
, buf
->sec_page
, false);
1511 buf
->sec_page
= NULL
;
1515 * stmmac_free_tx_buffer - free RX dma buffers
1516 * @priv: private structure
1517 * @queue: RX queue index
1520 static void stmmac_free_tx_buffer(struct stmmac_priv
*priv
, u32 queue
, int i
)
1522 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1524 if (tx_q
->tx_skbuff_dma
[i
].buf
&&
1525 tx_q
->tx_skbuff_dma
[i
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
) {
1526 if (tx_q
->tx_skbuff_dma
[i
].map_as_page
)
1527 dma_unmap_page(priv
->device
,
1528 tx_q
->tx_skbuff_dma
[i
].buf
,
1529 tx_q
->tx_skbuff_dma
[i
].len
,
1532 dma_unmap_single(priv
->device
,
1533 tx_q
->tx_skbuff_dma
[i
].buf
,
1534 tx_q
->tx_skbuff_dma
[i
].len
,
1538 if (tx_q
->xdpf
[i
] &&
1539 (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
1540 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
)) {
1541 xdp_return_frame(tx_q
->xdpf
[i
]);
1542 tx_q
->xdpf
[i
] = NULL
;
1545 if (tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
1546 tx_q
->xsk_frames_done
++;
1548 if (tx_q
->tx_skbuff
[i
] &&
1549 tx_q
->tx_skbuff_dma
[i
].buf_type
== STMMAC_TXBUF_T_SKB
) {
1550 dev_kfree_skb_any(tx_q
->tx_skbuff
[i
]);
1551 tx_q
->tx_skbuff
[i
] = NULL
;
1554 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1555 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1559 * dma_free_rx_skbufs - free RX dma buffers
1560 * @priv: private structure
1561 * @queue: RX queue index
1563 static void dma_free_rx_skbufs(struct stmmac_priv
*priv
, u32 queue
)
1567 for (i
= 0; i
< priv
->dma_rx_size
; i
++)
1568 stmmac_free_rx_buffer(priv
, queue
, i
);
1571 static int stmmac_alloc_rx_buffers(struct stmmac_priv
*priv
, u32 queue
,
1574 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1577 for (i
= 0; i
< priv
->dma_rx_size
; i
++) {
1581 if (priv
->extend_desc
)
1582 p
= &((rx_q
->dma_erx
+ i
)->basic
);
1584 p
= rx_q
->dma_rx
+ i
;
1586 ret
= stmmac_init_rx_buffers(priv
, p
, i
, flags
,
1591 rx_q
->buf_alloc_num
++;
1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599 * @priv: private structure
1600 * @queue: RX queue index
1602 static void dma_free_rx_xskbufs(struct stmmac_priv
*priv
, u32 queue
)
1604 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1607 for (i
= 0; i
< priv
->dma_rx_size
; i
++) {
1608 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[i
];
1613 xsk_buff_free(buf
->xdp
);
1618 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv
*priv
, u32 queue
)
1620 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1623 for (i
= 0; i
< priv
->dma_rx_size
; i
++) {
1624 struct stmmac_rx_buffer
*buf
;
1625 dma_addr_t dma_addr
;
1628 if (priv
->extend_desc
)
1629 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ i
);
1631 p
= rx_q
->dma_rx
+ i
;
1633 buf
= &rx_q
->buf_pool
[i
];
1635 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
1639 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
1640 stmmac_set_desc_addr(priv
, p
, dma_addr
);
1641 rx_q
->buf_alloc_num
++;
1647 static struct xsk_buff_pool
*stmmac_get_xsk_pool(struct stmmac_priv
*priv
, u32 queue
)
1649 if (!stmmac_xdp_is_enabled(priv
) || !test_bit(queue
, priv
->af_xdp_zc_qps
))
1652 return xsk_get_pool_from_qid(priv
->dev
, queue
);
1656 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1657 * @priv: driver private structure
1658 * @queue: RX queue index
1660 * Description: this function initializes the DMA RX descriptors
1661 * and allocates the socket buffers. It supports the chained and ring
1664 static int __init_dma_rx_desc_rings(struct stmmac_priv
*priv
, u32 queue
, gfp_t flags
)
1666 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1669 netif_dbg(priv
, probe
, priv
->dev
,
1670 "(%s) dma_rx_phy=0x%08x\n", __func__
,
1671 (u32
)rx_q
->dma_rx_phy
);
1673 stmmac_clear_rx_descriptors(priv
, queue
);
1675 xdp_rxq_info_unreg_mem_model(&rx_q
->xdp_rxq
);
1677 rx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1679 if (rx_q
->xsk_pool
) {
1680 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1681 MEM_TYPE_XSK_BUFF_POOL
,
1683 netdev_info(priv
->dev
,
1684 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1686 xsk_pool_set_rxq_info(rx_q
->xsk_pool
, &rx_q
->xdp_rxq
);
1688 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q
->xdp_rxq
,
1691 netdev_info(priv
->dev
,
1692 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1696 if (rx_q
->xsk_pool
) {
1697 /* RX XDP ZC buffer pool may not be populated, e.g.
1700 stmmac_alloc_rx_buffers_zc(priv
, queue
);
1702 ret
= stmmac_alloc_rx_buffers(priv
, queue
, flags
);
1710 /* Setup the chained descriptor addresses */
1711 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1712 if (priv
->extend_desc
)
1713 stmmac_mode_init(priv
, rx_q
->dma_erx
,
1715 priv
->dma_rx_size
, 1);
1717 stmmac_mode_init(priv
, rx_q
->dma_rx
,
1719 priv
->dma_rx_size
, 0);
1725 static int init_dma_rx_desc_rings(struct net_device
*dev
, gfp_t flags
)
1727 struct stmmac_priv
*priv
= netdev_priv(dev
);
1728 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1732 /* RX INITIALIZATION */
1733 netif_dbg(priv
, probe
, priv
->dev
,
1734 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1736 for (queue
= 0; queue
< rx_count
; queue
++) {
1737 ret
= __init_dma_rx_desc_rings(priv
, queue
, flags
);
1739 goto err_init_rx_buffers
;
1744 err_init_rx_buffers
:
1745 while (queue
>= 0) {
1746 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1749 dma_free_rx_xskbufs(priv
, queue
);
1751 dma_free_rx_skbufs(priv
, queue
);
1753 rx_q
->buf_alloc_num
= 0;
1754 rx_q
->xsk_pool
= NULL
;
1766 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1767 * @priv: driver private structure
1768 * @queue : TX queue index
1769 * Description: this function initializes the DMA TX descriptors
1770 * and allocates the socket buffers. It supports the chained and ring
1773 static int __init_dma_tx_desc_rings(struct stmmac_priv
*priv
, u32 queue
)
1775 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1778 netif_dbg(priv
, probe
, priv
->dev
,
1779 "(%s) dma_tx_phy=0x%08x\n", __func__
,
1780 (u32
)tx_q
->dma_tx_phy
);
1782 /* Setup the chained descriptor addresses */
1783 if (priv
->mode
== STMMAC_CHAIN_MODE
) {
1784 if (priv
->extend_desc
)
1785 stmmac_mode_init(priv
, tx_q
->dma_etx
,
1787 priv
->dma_tx_size
, 1);
1788 else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
))
1789 stmmac_mode_init(priv
, tx_q
->dma_tx
,
1791 priv
->dma_tx_size
, 0);
1794 tx_q
->xsk_pool
= stmmac_get_xsk_pool(priv
, queue
);
1796 for (i
= 0; i
< priv
->dma_tx_size
; i
++) {
1799 if (priv
->extend_desc
)
1800 p
= &((tx_q
->dma_etx
+ i
)->basic
);
1801 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
1802 p
= &((tx_q
->dma_entx
+ i
)->basic
);
1804 p
= tx_q
->dma_tx
+ i
;
1806 stmmac_clear_desc(priv
, p
);
1808 tx_q
->tx_skbuff_dma
[i
].buf
= 0;
1809 tx_q
->tx_skbuff_dma
[i
].map_as_page
= false;
1810 tx_q
->tx_skbuff_dma
[i
].len
= 0;
1811 tx_q
->tx_skbuff_dma
[i
].last_segment
= false;
1812 tx_q
->tx_skbuff
[i
] = NULL
;
1819 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, queue
));
1824 static int init_dma_tx_desc_rings(struct net_device
*dev
)
1826 struct stmmac_priv
*priv
= netdev_priv(dev
);
1830 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1832 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1833 __init_dma_tx_desc_rings(priv
, queue
);
1839 * init_dma_desc_rings - init the RX/TX descriptor rings
1840 * @dev: net device structure
1842 * Description: this function initializes the DMA RX/TX descriptors
1843 * and allocates the socket buffers. It supports the chained and ring
1846 static int init_dma_desc_rings(struct net_device
*dev
, gfp_t flags
)
1848 struct stmmac_priv
*priv
= netdev_priv(dev
);
1851 ret
= init_dma_rx_desc_rings(dev
, flags
);
1855 ret
= init_dma_tx_desc_rings(dev
);
1857 stmmac_clear_descriptors(priv
);
1859 if (netif_msg_hw(priv
))
1860 stmmac_display_rings(priv
);
1866 * dma_free_tx_skbufs - free TX dma buffers
1867 * @priv: private structure
1868 * @queue: TX queue index
1870 static void dma_free_tx_skbufs(struct stmmac_priv
*priv
, u32 queue
)
1872 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1875 tx_q
->xsk_frames_done
= 0;
1877 for (i
= 0; i
< priv
->dma_tx_size
; i
++)
1878 stmmac_free_tx_buffer(priv
, queue
, i
);
1880 if (tx_q
->xsk_pool
&& tx_q
->xsk_frames_done
) {
1881 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
1882 tx_q
->xsk_frames_done
= 0;
1883 tx_q
->xsk_pool
= NULL
;
1888 * stmmac_free_tx_skbufs - free TX skb buffers
1889 * @priv: private structure
1891 static void stmmac_free_tx_skbufs(struct stmmac_priv
*priv
)
1893 u32 tx_queue_cnt
= priv
->plat
->tx_queues_to_use
;
1896 for (queue
= 0; queue
< tx_queue_cnt
; queue
++)
1897 dma_free_tx_skbufs(priv
, queue
);
1901 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1902 * @priv: private structure
1903 * @queue: RX queue index
1905 static void __free_dma_rx_desc_resources(struct stmmac_priv
*priv
, u32 queue
)
1907 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
1909 /* Release the DMA RX socket buffers */
1911 dma_free_rx_xskbufs(priv
, queue
);
1913 dma_free_rx_skbufs(priv
, queue
);
1915 rx_q
->buf_alloc_num
= 0;
1916 rx_q
->xsk_pool
= NULL
;
1918 /* Free DMA regions of consistent memory previously allocated */
1919 if (!priv
->extend_desc
)
1920 dma_free_coherent(priv
->device
, priv
->dma_rx_size
*
1921 sizeof(struct dma_desc
),
1922 rx_q
->dma_rx
, rx_q
->dma_rx_phy
);
1924 dma_free_coherent(priv
->device
, priv
->dma_rx_size
*
1925 sizeof(struct dma_extended_desc
),
1926 rx_q
->dma_erx
, rx_q
->dma_rx_phy
);
1928 if (xdp_rxq_info_is_reg(&rx_q
->xdp_rxq
))
1929 xdp_rxq_info_unreg(&rx_q
->xdp_rxq
);
1931 kfree(rx_q
->buf_pool
);
1932 if (rx_q
->page_pool
)
1933 page_pool_destroy(rx_q
->page_pool
);
1936 static void free_dma_rx_desc_resources(struct stmmac_priv
*priv
)
1938 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
1941 /* Free RX queue resources */
1942 for (queue
= 0; queue
< rx_count
; queue
++)
1943 __free_dma_rx_desc_resources(priv
, queue
);
1947 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1948 * @priv: private structure
1949 * @queue: TX queue index
1951 static void __free_dma_tx_desc_resources(struct stmmac_priv
*priv
, u32 queue
)
1953 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
1957 /* Release the DMA TX socket buffers */
1958 dma_free_tx_skbufs(priv
, queue
);
1960 if (priv
->extend_desc
) {
1961 size
= sizeof(struct dma_extended_desc
);
1962 addr
= tx_q
->dma_etx
;
1963 } else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
) {
1964 size
= sizeof(struct dma_edesc
);
1965 addr
= tx_q
->dma_entx
;
1967 size
= sizeof(struct dma_desc
);
1968 addr
= tx_q
->dma_tx
;
1971 size
*= priv
->dma_tx_size
;
1973 dma_free_coherent(priv
->device
, size
, addr
, tx_q
->dma_tx_phy
);
1975 kfree(tx_q
->tx_skbuff_dma
);
1976 kfree(tx_q
->tx_skbuff
);
1979 static void free_dma_tx_desc_resources(struct stmmac_priv
*priv
)
1981 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
1984 /* Free TX queue resources */
1985 for (queue
= 0; queue
< tx_count
; queue
++)
1986 __free_dma_tx_desc_resources(priv
, queue
);
1990 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
1991 * @priv: private structure
1992 * @queue: RX queue index
1993 * Description: according to which descriptor can be used (extend or basic)
1994 * this function allocates the resources for TX and RX paths. In case of
1995 * reception, for example, it pre-allocated the RX socket buffer in order to
1996 * allow zero-copy mechanism.
1998 static int __alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
, u32 queue
)
2000 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
2001 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
2002 bool xdp_prog
= stmmac_xdp_is_enabled(priv
);
2003 struct page_pool_params pp_params
= { 0 };
2004 unsigned int num_pages
;
2005 unsigned int napi_id
;
2008 rx_q
->queue_index
= queue
;
2009 rx_q
->priv_data
= priv
;
2011 pp_params
.flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
;
2012 pp_params
.pool_size
= priv
->dma_rx_size
;
2013 num_pages
= DIV_ROUND_UP(priv
->dma_buf_sz
, PAGE_SIZE
);
2014 pp_params
.order
= ilog2(num_pages
);
2015 pp_params
.nid
= dev_to_node(priv
->device
);
2016 pp_params
.dev
= priv
->device
;
2017 pp_params
.dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
;
2018 pp_params
.offset
= stmmac_rx_offset(priv
);
2019 pp_params
.max_len
= STMMAC_MAX_RX_BUF_SIZE(num_pages
);
2021 rx_q
->page_pool
= page_pool_create(&pp_params
);
2022 if (IS_ERR(rx_q
->page_pool
)) {
2023 ret
= PTR_ERR(rx_q
->page_pool
);
2024 rx_q
->page_pool
= NULL
;
2028 rx_q
->buf_pool
= kcalloc(priv
->dma_rx_size
,
2029 sizeof(*rx_q
->buf_pool
),
2031 if (!rx_q
->buf_pool
)
2034 if (priv
->extend_desc
) {
2035 rx_q
->dma_erx
= dma_alloc_coherent(priv
->device
,
2037 sizeof(struct dma_extended_desc
),
2044 rx_q
->dma_rx
= dma_alloc_coherent(priv
->device
,
2046 sizeof(struct dma_desc
),
2053 if (stmmac_xdp_is_enabled(priv
) &&
2054 test_bit(queue
, priv
->af_xdp_zc_qps
))
2055 napi_id
= ch
->rxtx_napi
.napi_id
;
2057 napi_id
= ch
->rx_napi
.napi_id
;
2059 ret
= xdp_rxq_info_reg(&rx_q
->xdp_rxq
, priv
->dev
,
2063 netdev_err(priv
->dev
, "Failed to register xdp rxq info\n");
2070 static int alloc_dma_rx_desc_resources(struct stmmac_priv
*priv
)
2072 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
2076 /* RX queues buffers and DMA */
2077 for (queue
= 0; queue
< rx_count
; queue
++) {
2078 ret
= __alloc_dma_rx_desc_resources(priv
, queue
);
2086 free_dma_rx_desc_resources(priv
);
2092 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2093 * @priv: private structure
2094 * @queue: TX queue index
2095 * Description: according to which descriptor can be used (extend or basic)
2096 * this function allocates the resources for TX and RX paths. In case of
2097 * reception, for example, it pre-allocated the RX socket buffer in order to
2098 * allow zero-copy mechanism.
2100 static int __alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
, u32 queue
)
2102 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
2106 tx_q
->queue_index
= queue
;
2107 tx_q
->priv_data
= priv
;
2109 tx_q
->tx_skbuff_dma
= kcalloc(priv
->dma_tx_size
,
2110 sizeof(*tx_q
->tx_skbuff_dma
),
2112 if (!tx_q
->tx_skbuff_dma
)
2115 tx_q
->tx_skbuff
= kcalloc(priv
->dma_tx_size
,
2116 sizeof(struct sk_buff
*),
2118 if (!tx_q
->tx_skbuff
)
2121 if (priv
->extend_desc
)
2122 size
= sizeof(struct dma_extended_desc
);
2123 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2124 size
= sizeof(struct dma_edesc
);
2126 size
= sizeof(struct dma_desc
);
2128 size
*= priv
->dma_tx_size
;
2130 addr
= dma_alloc_coherent(priv
->device
, size
,
2131 &tx_q
->dma_tx_phy
, GFP_KERNEL
);
2135 if (priv
->extend_desc
)
2136 tx_q
->dma_etx
= addr
;
2137 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2138 tx_q
->dma_entx
= addr
;
2140 tx_q
->dma_tx
= addr
;
2145 static int alloc_dma_tx_desc_resources(struct stmmac_priv
*priv
)
2147 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
2151 /* TX queues buffers and DMA */
2152 for (queue
= 0; queue
< tx_count
; queue
++) {
2153 ret
= __alloc_dma_tx_desc_resources(priv
, queue
);
2161 free_dma_tx_desc_resources(priv
);
2166 * alloc_dma_desc_resources - alloc TX/RX resources.
2167 * @priv: private structure
2168 * Description: according to which descriptor can be used (extend or basic)
2169 * this function allocates the resources for TX and RX paths. In case of
2170 * reception, for example, it pre-allocated the RX socket buffer in order to
2171 * allow zero-copy mechanism.
2173 static int alloc_dma_desc_resources(struct stmmac_priv
*priv
)
2176 int ret
= alloc_dma_rx_desc_resources(priv
);
2181 ret
= alloc_dma_tx_desc_resources(priv
);
2187 * free_dma_desc_resources - free dma desc resources
2188 * @priv: private structure
2190 static void free_dma_desc_resources(struct stmmac_priv
*priv
)
2192 /* Release the DMA TX socket buffers */
2193 free_dma_tx_desc_resources(priv
);
2195 /* Release the DMA RX socket buffers later
2196 * to ensure all pending XDP_TX buffers are returned.
2198 free_dma_rx_desc_resources(priv
);
2202 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2203 * @priv: driver private structure
2204 * Description: It is used for enabling the rx queues in the MAC
2206 static void stmmac_mac_enable_rx_queues(struct stmmac_priv
*priv
)
2208 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
2212 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
2213 mode
= priv
->plat
->rx_queues_cfg
[queue
].mode_to_use
;
2214 stmmac_rx_queue_enable(priv
, priv
->hw
, mode
, queue
);
2219 * stmmac_start_rx_dma - start RX DMA channel
2220 * @priv: driver private structure
2221 * @chan: RX channel index
2223 * This starts a RX DMA channel
2225 static void stmmac_start_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2227 netdev_dbg(priv
->dev
, "DMA RX processes started in channel %d\n", chan
);
2228 stmmac_start_rx(priv
, priv
->ioaddr
, chan
);
2232 * stmmac_start_tx_dma - start TX DMA channel
2233 * @priv: driver private structure
2234 * @chan: TX channel index
2236 * This starts a TX DMA channel
2238 static void stmmac_start_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2240 netdev_dbg(priv
->dev
, "DMA TX processes started in channel %d\n", chan
);
2241 stmmac_start_tx(priv
, priv
->ioaddr
, chan
);
2245 * stmmac_stop_rx_dma - stop RX DMA channel
2246 * @priv: driver private structure
2247 * @chan: RX channel index
2249 * This stops a RX DMA channel
2251 static void stmmac_stop_rx_dma(struct stmmac_priv
*priv
, u32 chan
)
2253 netdev_dbg(priv
->dev
, "DMA RX processes stopped in channel %d\n", chan
);
2254 stmmac_stop_rx(priv
, priv
->ioaddr
, chan
);
2258 * stmmac_stop_tx_dma - stop TX DMA channel
2259 * @priv: driver private structure
2260 * @chan: TX channel index
2262 * This stops a TX DMA channel
2264 static void stmmac_stop_tx_dma(struct stmmac_priv
*priv
, u32 chan
)
2266 netdev_dbg(priv
->dev
, "DMA TX processes stopped in channel %d\n", chan
);
2267 stmmac_stop_tx(priv
, priv
->ioaddr
, chan
);
2271 * stmmac_start_all_dma - start all RX and TX DMA channels
2272 * @priv: driver private structure
2274 * This starts all the RX and TX DMA channels
2276 static void stmmac_start_all_dma(struct stmmac_priv
*priv
)
2278 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2279 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2282 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2283 stmmac_start_rx_dma(priv
, chan
);
2285 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2286 stmmac_start_tx_dma(priv
, chan
);
2290 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2291 * @priv: driver private structure
2293 * This stops the RX and TX DMA channels
2295 static void stmmac_stop_all_dma(struct stmmac_priv
*priv
)
2297 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2298 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2301 for (chan
= 0; chan
< rx_channels_count
; chan
++)
2302 stmmac_stop_rx_dma(priv
, chan
);
2304 for (chan
= 0; chan
< tx_channels_count
; chan
++)
2305 stmmac_stop_tx_dma(priv
, chan
);
2309 * stmmac_dma_operation_mode - HW DMA operation mode
2310 * @priv: driver private structure
2311 * Description: it is used for configuring the DMA operation mode register in
2312 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2314 static void stmmac_dma_operation_mode(struct stmmac_priv
*priv
)
2316 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2317 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2318 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2319 int txfifosz
= priv
->plat
->tx_fifo_size
;
2326 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2328 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2330 /* Adjust for real per queue fifo size */
2331 rxfifosz
/= rx_channels_count
;
2332 txfifosz
/= tx_channels_count
;
2334 if (priv
->plat
->force_thresh_dma_mode
) {
2337 } else if (priv
->plat
->force_sf_dma_mode
|| priv
->plat
->tx_coe
) {
2339 * In case of GMAC, SF mode can be enabled
2340 * to perform the TX COE in HW. This depends on:
2341 * 1) TX COE if actually supported
2342 * 2) There is no bugged Jumbo frame support
2343 * that needs to not insert csum in the TDES.
2345 txmode
= SF_DMA_MODE
;
2346 rxmode
= SF_DMA_MODE
;
2347 priv
->xstats
.threshold
= SF_DMA_MODE
;
2350 rxmode
= SF_DMA_MODE
;
2353 /* configure all channels */
2354 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2355 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[chan
];
2358 qmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2360 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
,
2363 if (rx_q
->xsk_pool
) {
2364 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
2365 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2369 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
2375 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2376 qmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2378 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
,
2383 static bool stmmac_xdp_xmit_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
2385 struct netdev_queue
*nq
= netdev_get_tx_queue(priv
->dev
, queue
);
2386 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
2387 struct xsk_buff_pool
*pool
= tx_q
->xsk_pool
;
2388 unsigned int entry
= tx_q
->cur_tx
;
2389 struct dma_desc
*tx_desc
= NULL
;
2390 struct xdp_desc xdp_desc
;
2391 bool work_done
= true;
2393 /* Avoids TX time-out as we are sharing with slow path */
2394 nq
->trans_start
= jiffies
;
2396 budget
= min(budget
, stmmac_tx_avail(priv
, queue
));
2398 while (budget
-- > 0) {
2399 dma_addr_t dma_addr
;
2402 /* We are sharing with slow path and stop XSK TX desc submission when
2403 * available TX ring is less than threshold.
2405 if (unlikely(stmmac_tx_avail(priv
, queue
) < STMMAC_TX_XSK_AVAIL
) ||
2406 !netif_carrier_ok(priv
->dev
)) {
2411 if (!xsk_tx_peek_desc(pool
, &xdp_desc
))
2414 if (likely(priv
->extend_desc
))
2415 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2416 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2417 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
2419 tx_desc
= tx_q
->dma_tx
+ entry
;
2421 dma_addr
= xsk_buff_raw_get_dma(pool
, xdp_desc
.addr
);
2422 xsk_buff_raw_dma_sync_for_device(pool
, dma_addr
, xdp_desc
.len
);
2424 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XSK_TX
;
2426 /* To return XDP buffer to XSK pool, we simple call
2427 * xsk_tx_completed(), so we don't need to fill up
2430 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2431 tx_q
->xdpf
[entry
] = NULL
;
2433 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2434 tx_q
->tx_skbuff_dma
[entry
].len
= xdp_desc
.len
;
2435 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
2436 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2438 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
2440 tx_q
->tx_count_frames
++;
2442 if (!priv
->tx_coal_frames
[queue
])
2444 else if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
2450 tx_q
->tx_count_frames
= 0;
2451 stmmac_set_tx_ic(priv
, tx_desc
);
2452 priv
->xstats
.tx_set_ic_bit
++;
2455 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdp_desc
.len
,
2456 true, priv
->mode
, true, true,
2459 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
2461 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_tx_size
);
2462 entry
= tx_q
->cur_tx
;
2466 stmmac_flush_tx_descriptors(priv
, queue
);
2467 xsk_tx_release(pool
);
2470 /* Return true if all of the 3 conditions are met
2471 * a) TX Budget is still available
2472 * b) work_done = true when XSK TX desc peek is empty (no more
2473 * pending XSK TX for transmission)
2475 return !!budget
&& work_done
;
2479 * stmmac_tx_clean - to manage the transmission completion
2480 * @priv: driver private structure
2481 * @budget: napi budget limiting this functions packet handling
2482 * @queue: TX queue index
2483 * Description: it reclaims the transmit resources after transmission completes.
2485 static int stmmac_tx_clean(struct stmmac_priv
*priv
, int budget
, u32 queue
)
2487 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
2488 unsigned int bytes_compl
= 0, pkts_compl
= 0;
2489 unsigned int entry
, xmits
= 0, count
= 0;
2491 __netif_tx_lock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2493 priv
->xstats
.tx_clean
++;
2495 tx_q
->xsk_frames_done
= 0;
2497 entry
= tx_q
->dirty_tx
;
2499 /* Try to clean all TX complete frame in 1 shot */
2500 while ((entry
!= tx_q
->cur_tx
) && count
< priv
->dma_tx_size
) {
2501 struct xdp_frame
*xdpf
;
2502 struct sk_buff
*skb
;
2506 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
||
2507 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2508 xdpf
= tx_q
->xdpf
[entry
];
2510 } else if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2512 skb
= tx_q
->tx_skbuff
[entry
];
2518 if (priv
->extend_desc
)
2519 p
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
2520 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
2521 p
= &tx_q
->dma_entx
[entry
].basic
;
2523 p
= tx_q
->dma_tx
+ entry
;
2525 status
= stmmac_tx_status(priv
, &priv
->dev
->stats
,
2526 &priv
->xstats
, p
, priv
->ioaddr
);
2527 /* Check if the descriptor is owned by the DMA */
2528 if (unlikely(status
& tx_dma_own
))
2533 /* Make sure descriptor fields are read after reading
2538 /* Just consider the last segment and ...*/
2539 if (likely(!(status
& tx_not_ls
))) {
2540 /* ... verify the status error condition */
2541 if (unlikely(status
& tx_err
)) {
2542 priv
->dev
->stats
.tx_errors
++;
2544 priv
->dev
->stats
.tx_packets
++;
2545 priv
->xstats
.tx_pkt_n
++;
2546 priv
->xstats
.txq_stats
[queue
].tx_pkt_n
++;
2549 stmmac_get_tx_hwtstamp(priv
, p
, skb
);
2552 if (likely(tx_q
->tx_skbuff_dma
[entry
].buf
&&
2553 tx_q
->tx_skbuff_dma
[entry
].buf_type
!= STMMAC_TXBUF_T_XDP_TX
)) {
2554 if (tx_q
->tx_skbuff_dma
[entry
].map_as_page
)
2555 dma_unmap_page(priv
->device
,
2556 tx_q
->tx_skbuff_dma
[entry
].buf
,
2557 tx_q
->tx_skbuff_dma
[entry
].len
,
2560 dma_unmap_single(priv
->device
,
2561 tx_q
->tx_skbuff_dma
[entry
].buf
,
2562 tx_q
->tx_skbuff_dma
[entry
].len
,
2564 tx_q
->tx_skbuff_dma
[entry
].buf
= 0;
2565 tx_q
->tx_skbuff_dma
[entry
].len
= 0;
2566 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
2569 stmmac_clean_desc3(priv
, tx_q
, p
);
2571 tx_q
->tx_skbuff_dma
[entry
].last_segment
= false;
2572 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
2575 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_TX
) {
2576 xdp_return_frame_rx_napi(xdpf
);
2577 tx_q
->xdpf
[entry
] = NULL
;
2581 tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XDP_NDO
) {
2582 xdp_return_frame(xdpf
);
2583 tx_q
->xdpf
[entry
] = NULL
;
2586 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_XSK_TX
)
2587 tx_q
->xsk_frames_done
++;
2589 if (tx_q
->tx_skbuff_dma
[entry
].buf_type
== STMMAC_TXBUF_T_SKB
) {
2592 bytes_compl
+= skb
->len
;
2593 dev_consume_skb_any(skb
);
2594 tx_q
->tx_skbuff
[entry
] = NULL
;
2598 stmmac_release_tx_desc(priv
, p
, priv
->mode
);
2600 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
2602 tx_q
->dirty_tx
= entry
;
2604 netdev_tx_completed_queue(netdev_get_tx_queue(priv
->dev
, queue
),
2605 pkts_compl
, bytes_compl
);
2607 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv
->dev
,
2609 stmmac_tx_avail(priv
, queue
) > STMMAC_TX_THRESH(priv
)) {
2611 netif_dbg(priv
, tx_done
, priv
->dev
,
2612 "%s: restart transmit\n", __func__
);
2613 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, queue
));
2616 if (tx_q
->xsk_pool
) {
2619 if (tx_q
->xsk_frames_done
)
2620 xsk_tx_completed(tx_q
->xsk_pool
, tx_q
->xsk_frames_done
);
2622 if (xsk_uses_need_wakeup(tx_q
->xsk_pool
))
2623 xsk_set_tx_need_wakeup(tx_q
->xsk_pool
);
2625 /* For XSK TX, we try to send as many as possible.
2626 * If XSK work done (XSK TX desc empty and budget still
2627 * available), return "budget - 1" to reenable TX IRQ.
2628 * Else, return "budget" to make NAPI continue polling.
2630 work_done
= stmmac_xdp_xmit_zc(priv
, queue
,
2631 STMMAC_XSK_TX_BUDGET_MAX
);
2638 if (priv
->eee_enabled
&& !priv
->tx_path_in_lpi_mode
&&
2639 priv
->eee_sw_timer_en
) {
2640 stmmac_enable_eee_mode(priv
);
2641 mod_timer(&priv
->eee_ctrl_timer
, STMMAC_LPI_T(priv
->tx_lpi_timer
));
2644 /* We still have pending packets, let's call for a new scheduling */
2645 if (tx_q
->dirty_tx
!= tx_q
->cur_tx
)
2646 hrtimer_start(&tx_q
->txtimer
,
2647 STMMAC_COAL_TIMER(priv
->tx_coal_timer
[queue
]),
2650 __netif_tx_unlock_bh(netdev_get_tx_queue(priv
->dev
, queue
));
2652 /* Combine decisions from TX clean and XSK TX */
2653 return max(count
, xmits
);
2657 * stmmac_tx_err - to manage the tx error
2658 * @priv: driver private structure
2659 * @chan: channel index
2660 * Description: it cleans the descriptors and restarts the transmission
2661 * in case of transmission errors.
2663 static void stmmac_tx_err(struct stmmac_priv
*priv
, u32 chan
)
2665 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2667 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2669 stmmac_stop_tx_dma(priv
, chan
);
2670 dma_free_tx_skbufs(priv
, chan
);
2671 stmmac_clear_tx_descriptors(priv
, chan
);
2675 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2676 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2677 tx_q
->dma_tx_phy
, chan
);
2678 stmmac_start_tx_dma(priv
, chan
);
2680 priv
->dev
->stats
.tx_errors
++;
2681 netif_tx_wake_queue(netdev_get_tx_queue(priv
->dev
, chan
));
2685 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2686 * @priv: driver private structure
2687 * @txmode: TX operating mode
2688 * @rxmode: RX operating mode
2689 * @chan: channel index
2690 * Description: it is used for configuring of the DMA operation mode in
2691 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2694 static void stmmac_set_dma_operation_mode(struct stmmac_priv
*priv
, u32 txmode
,
2695 u32 rxmode
, u32 chan
)
2697 u8 rxqmode
= priv
->plat
->rx_queues_cfg
[chan
].mode_to_use
;
2698 u8 txqmode
= priv
->plat
->tx_queues_cfg
[chan
].mode_to_use
;
2699 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2700 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2701 int rxfifosz
= priv
->plat
->rx_fifo_size
;
2702 int txfifosz
= priv
->plat
->tx_fifo_size
;
2705 rxfifosz
= priv
->dma_cap
.rx_fifo_size
;
2707 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
2709 /* Adjust for real per queue fifo size */
2710 rxfifosz
/= rx_channels_count
;
2711 txfifosz
/= tx_channels_count
;
2713 stmmac_dma_rx_mode(priv
, priv
->ioaddr
, rxmode
, chan
, rxfifosz
, rxqmode
);
2714 stmmac_dma_tx_mode(priv
, priv
->ioaddr
, txmode
, chan
, txfifosz
, txqmode
);
2717 static bool stmmac_safety_feat_interrupt(struct stmmac_priv
*priv
)
2721 ret
= stmmac_safety_feat_irq_status(priv
, priv
->dev
,
2722 priv
->ioaddr
, priv
->dma_cap
.asp
, &priv
->sstats
);
2723 if (ret
&& (ret
!= -EINVAL
)) {
2724 stmmac_global_err(priv
);
2731 static int stmmac_napi_check(struct stmmac_priv
*priv
, u32 chan
, u32 dir
)
2733 int status
= stmmac_dma_interrupt_status(priv
, priv
->ioaddr
,
2734 &priv
->xstats
, chan
, dir
);
2735 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[chan
];
2736 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2737 struct stmmac_channel
*ch
= &priv
->channel
[chan
];
2738 struct napi_struct
*rx_napi
;
2739 struct napi_struct
*tx_napi
;
2740 unsigned long flags
;
2742 rx_napi
= rx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->rx_napi
;
2743 tx_napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
2745 if ((status
& handle_rx
) && (chan
< priv
->plat
->rx_queues_to_use
)) {
2746 if (napi_schedule_prep(rx_napi
)) {
2747 spin_lock_irqsave(&ch
->lock
, flags
);
2748 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
2749 spin_unlock_irqrestore(&ch
->lock
, flags
);
2750 __napi_schedule(rx_napi
);
2754 if ((status
& handle_tx
) && (chan
< priv
->plat
->tx_queues_to_use
)) {
2755 if (napi_schedule_prep(tx_napi
)) {
2756 spin_lock_irqsave(&ch
->lock
, flags
);
2757 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
2758 spin_unlock_irqrestore(&ch
->lock
, flags
);
2759 __napi_schedule(tx_napi
);
2767 * stmmac_dma_interrupt - DMA ISR
2768 * @priv: driver private structure
2769 * Description: this is the DMA ISR. It is called by the main ISR.
2770 * It calls the dwmac dma routine and schedule poll method in case of some
2773 static void stmmac_dma_interrupt(struct stmmac_priv
*priv
)
2775 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2776 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
2777 u32 channels_to_check
= tx_channel_count
> rx_channel_count
?
2778 tx_channel_count
: rx_channel_count
;
2780 int status
[max_t(u32
, MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
)];
2782 /* Make sure we never check beyond our status buffer. */
2783 if (WARN_ON_ONCE(channels_to_check
> ARRAY_SIZE(status
)))
2784 channels_to_check
= ARRAY_SIZE(status
);
2786 for (chan
= 0; chan
< channels_to_check
; chan
++)
2787 status
[chan
] = stmmac_napi_check(priv
, chan
,
2790 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2791 if (unlikely(status
[chan
] & tx_hard_error_bump_tc
)) {
2792 /* Try to bump up the dma threshold on this failure */
2793 if (unlikely(priv
->xstats
.threshold
!= SF_DMA_MODE
) &&
2796 if (priv
->plat
->force_thresh_dma_mode
)
2797 stmmac_set_dma_operation_mode(priv
,
2802 stmmac_set_dma_operation_mode(priv
,
2806 priv
->xstats
.threshold
= tc
;
2808 } else if (unlikely(status
[chan
] == tx_hard_error
)) {
2809 stmmac_tx_err(priv
, chan
);
2815 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2816 * @priv: driver private structure
2817 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2819 static void stmmac_mmc_setup(struct stmmac_priv
*priv
)
2821 unsigned int mode
= MMC_CNTRL_RESET_ON_READ
| MMC_CNTRL_COUNTER_RESET
|
2822 MMC_CNTRL_PRESET
| MMC_CNTRL_FULL_HALF_PRESET
;
2824 stmmac_mmc_intr_all_mask(priv
, priv
->mmcaddr
);
2826 if (priv
->dma_cap
.rmon
) {
2827 stmmac_mmc_ctrl(priv
, priv
->mmcaddr
, mode
);
2828 memset(&priv
->mmc
, 0, sizeof(struct stmmac_counters
));
2830 netdev_info(priv
->dev
, "No MAC Management Counters available\n");
2834 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2835 * @priv: driver private structure
2837 * new GMAC chip generations have a new register to indicate the
2838 * presence of the optional feature/functions.
2839 * This can be also used to override the value passed through the
2840 * platform and necessary for old MAC10/100 and GMAC chips.
2842 static int stmmac_get_hw_features(struct stmmac_priv
*priv
)
2844 return stmmac_get_hw_feature(priv
, priv
->ioaddr
, &priv
->dma_cap
) == 0;
2848 * stmmac_check_ether_addr - check if the MAC addr is valid
2849 * @priv: driver private structure
2851 * it is to verify if the MAC address is valid, in case of failures it
2852 * generates a random MAC address
2854 static void stmmac_check_ether_addr(struct stmmac_priv
*priv
)
2856 if (!is_valid_ether_addr(priv
->dev
->dev_addr
)) {
2857 stmmac_get_umac_addr(priv
, priv
->hw
, priv
->dev
->dev_addr
, 0);
2858 if (!is_valid_ether_addr(priv
->dev
->dev_addr
))
2859 eth_hw_addr_random(priv
->dev
);
2860 dev_info(priv
->device
, "device MAC address %pM\n",
2861 priv
->dev
->dev_addr
);
2866 * stmmac_init_dma_engine - DMA init.
2867 * @priv: driver private structure
2869 * It inits the DMA invoking the specific MAC/GMAC callback.
2870 * Some DMA parameters can be passed from the platform;
2871 * in case of these are not passed a default is kept for the MAC or GMAC.
2873 static int stmmac_init_dma_engine(struct stmmac_priv
*priv
)
2875 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
2876 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
2877 u32 dma_csr_ch
= max(rx_channels_count
, tx_channels_count
);
2878 struct stmmac_rx_queue
*rx_q
;
2879 struct stmmac_tx_queue
*tx_q
;
2884 if (!priv
->plat
->dma_cfg
|| !priv
->plat
->dma_cfg
->pbl
) {
2885 dev_err(priv
->device
, "Invalid DMA configuration\n");
2889 if (priv
->extend_desc
&& (priv
->mode
== STMMAC_RING_MODE
))
2892 ret
= stmmac_reset(priv
, priv
->ioaddr
);
2894 dev_err(priv
->device
, "Failed to reset the dma\n");
2898 /* DMA Configuration */
2899 stmmac_dma_init(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, atds
);
2901 if (priv
->plat
->axi
)
2902 stmmac_axi(priv
, priv
->ioaddr
, priv
->plat
->axi
);
2904 /* DMA CSR Channel configuration */
2905 for (chan
= 0; chan
< dma_csr_ch
; chan
++)
2906 stmmac_init_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
, chan
);
2908 /* DMA RX Channel Configuration */
2909 for (chan
= 0; chan
< rx_channels_count
; chan
++) {
2910 rx_q
= &priv
->rx_queue
[chan
];
2912 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2913 rx_q
->dma_rx_phy
, chan
);
2915 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
2916 (rx_q
->buf_alloc_num
*
2917 sizeof(struct dma_desc
));
2918 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
2919 rx_q
->rx_tail_addr
, chan
);
2922 /* DMA TX Channel Configuration */
2923 for (chan
= 0; chan
< tx_channels_count
; chan
++) {
2924 tx_q
= &priv
->tx_queue
[chan
];
2926 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
2927 tx_q
->dma_tx_phy
, chan
);
2929 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
2930 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
2931 tx_q
->tx_tail_addr
, chan
);
2937 static void stmmac_tx_timer_arm(struct stmmac_priv
*priv
, u32 queue
)
2939 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
2941 hrtimer_start(&tx_q
->txtimer
,
2942 STMMAC_COAL_TIMER(priv
->tx_coal_timer
[queue
]),
2947 * stmmac_tx_timer - mitigation sw timer for tx.
2950 * This is the timer handler to directly invoke the stmmac_tx_clean.
2952 static enum hrtimer_restart
stmmac_tx_timer(struct hrtimer
*t
)
2954 struct stmmac_tx_queue
*tx_q
= container_of(t
, struct stmmac_tx_queue
, txtimer
);
2955 struct stmmac_priv
*priv
= tx_q
->priv_data
;
2956 struct stmmac_channel
*ch
;
2957 struct napi_struct
*napi
;
2959 ch
= &priv
->channel
[tx_q
->queue_index
];
2960 napi
= tx_q
->xsk_pool
? &ch
->rxtx_napi
: &ch
->tx_napi
;
2962 if (likely(napi_schedule_prep(napi
))) {
2963 unsigned long flags
;
2965 spin_lock_irqsave(&ch
->lock
, flags
);
2966 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, ch
->index
, 0, 1);
2967 spin_unlock_irqrestore(&ch
->lock
, flags
);
2968 __napi_schedule(napi
);
2971 return HRTIMER_NORESTART
;
2975 * stmmac_init_coalesce - init mitigation options.
2976 * @priv: driver private structure
2978 * This inits the coalesce parameters: i.e. timer rate,
2979 * timer handler and default threshold used for enabling the
2980 * interrupt on completion bit.
2982 static void stmmac_init_coalesce(struct stmmac_priv
*priv
)
2984 u32 tx_channel_count
= priv
->plat
->tx_queues_to_use
;
2985 u32 rx_channel_count
= priv
->plat
->rx_queues_to_use
;
2988 for (chan
= 0; chan
< tx_channel_count
; chan
++) {
2989 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
2991 priv
->tx_coal_frames
[chan
] = STMMAC_TX_FRAMES
;
2992 priv
->tx_coal_timer
[chan
] = STMMAC_COAL_TX_TIMER
;
2994 hrtimer_init(&tx_q
->txtimer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
2995 tx_q
->txtimer
.function
= stmmac_tx_timer
;
2998 for (chan
= 0; chan
< rx_channel_count
; chan
++)
2999 priv
->rx_coal_frames
[chan
] = STMMAC_RX_FRAMES
;
3002 static void stmmac_set_rings_length(struct stmmac_priv
*priv
)
3004 u32 rx_channels_count
= priv
->plat
->rx_queues_to_use
;
3005 u32 tx_channels_count
= priv
->plat
->tx_queues_to_use
;
3008 /* set TX ring length */
3009 for (chan
= 0; chan
< tx_channels_count
; chan
++)
3010 stmmac_set_tx_ring_len(priv
, priv
->ioaddr
,
3011 (priv
->dma_tx_size
- 1), chan
);
3013 /* set RX ring length */
3014 for (chan
= 0; chan
< rx_channels_count
; chan
++)
3015 stmmac_set_rx_ring_len(priv
, priv
->ioaddr
,
3016 (priv
->dma_rx_size
- 1), chan
);
3020 * stmmac_set_tx_queue_weight - Set TX queue weight
3021 * @priv: driver private structure
3022 * Description: It is used for setting TX queues weight
3024 static void stmmac_set_tx_queue_weight(struct stmmac_priv
*priv
)
3026 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3030 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3031 weight
= priv
->plat
->tx_queues_cfg
[queue
].weight
;
3032 stmmac_set_mtl_tx_queue_weight(priv
, priv
->hw
, weight
, queue
);
3037 * stmmac_configure_cbs - Configure CBS in TX queue
3038 * @priv: driver private structure
3039 * Description: It is used for configuring CBS in AVB TX queues
3041 static void stmmac_configure_cbs(struct stmmac_priv
*priv
)
3043 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3047 /* queue 0 is reserved for legacy traffic */
3048 for (queue
= 1; queue
< tx_queues_count
; queue
++) {
3049 mode_to_use
= priv
->plat
->tx_queues_cfg
[queue
].mode_to_use
;
3050 if (mode_to_use
== MTL_QUEUE_DCB
)
3053 stmmac_config_cbs(priv
, priv
->hw
,
3054 priv
->plat
->tx_queues_cfg
[queue
].send_slope
,
3055 priv
->plat
->tx_queues_cfg
[queue
].idle_slope
,
3056 priv
->plat
->tx_queues_cfg
[queue
].high_credit
,
3057 priv
->plat
->tx_queues_cfg
[queue
].low_credit
,
3063 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3064 * @priv: driver private structure
3065 * Description: It is used for mapping RX queues to RX dma channels
3067 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv
*priv
)
3069 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3073 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3074 chan
= priv
->plat
->rx_queues_cfg
[queue
].chan
;
3075 stmmac_map_mtl_to_dma(priv
, priv
->hw
, queue
, chan
);
3080 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3081 * @priv: driver private structure
3082 * Description: It is used for configuring the RX Queue Priority
3084 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv
*priv
)
3086 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3090 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3091 if (!priv
->plat
->rx_queues_cfg
[queue
].use_prio
)
3094 prio
= priv
->plat
->rx_queues_cfg
[queue
].prio
;
3095 stmmac_rx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3100 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3101 * @priv: driver private structure
3102 * Description: It is used for configuring the TX Queue Priority
3104 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv
*priv
)
3106 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3110 for (queue
= 0; queue
< tx_queues_count
; queue
++) {
3111 if (!priv
->plat
->tx_queues_cfg
[queue
].use_prio
)
3114 prio
= priv
->plat
->tx_queues_cfg
[queue
].prio
;
3115 stmmac_tx_queue_prio(priv
, priv
->hw
, prio
, queue
);
3120 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3121 * @priv: driver private structure
3122 * Description: It is used for configuring the RX queue routing
3124 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv
*priv
)
3126 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3130 for (queue
= 0; queue
< rx_queues_count
; queue
++) {
3131 /* no specific packet type routing specified for the queue */
3132 if (priv
->plat
->rx_queues_cfg
[queue
].pkt_route
== 0x0)
3135 packet
= priv
->plat
->rx_queues_cfg
[queue
].pkt_route
;
3136 stmmac_rx_queue_routing(priv
, priv
->hw
, packet
, queue
);
3140 static void stmmac_mac_config_rss(struct stmmac_priv
*priv
)
3142 if (!priv
->dma_cap
.rssen
|| !priv
->plat
->rss_en
) {
3143 priv
->rss
.enable
= false;
3147 if (priv
->dev
->features
& NETIF_F_RXHASH
)
3148 priv
->rss
.enable
= true;
3150 priv
->rss
.enable
= false;
3152 stmmac_rss_configure(priv
, priv
->hw
, &priv
->rss
,
3153 priv
->plat
->rx_queues_to_use
);
3157 * stmmac_mtl_configuration - Configure MTL
3158 * @priv: driver private structure
3159 * Description: It is used for configurring MTL
3161 static void stmmac_mtl_configuration(struct stmmac_priv
*priv
)
3163 u32 rx_queues_count
= priv
->plat
->rx_queues_to_use
;
3164 u32 tx_queues_count
= priv
->plat
->tx_queues_to_use
;
3166 if (tx_queues_count
> 1)
3167 stmmac_set_tx_queue_weight(priv
);
3169 /* Configure MTL RX algorithms */
3170 if (rx_queues_count
> 1)
3171 stmmac_prog_mtl_rx_algorithms(priv
, priv
->hw
,
3172 priv
->plat
->rx_sched_algorithm
);
3174 /* Configure MTL TX algorithms */
3175 if (tx_queues_count
> 1)
3176 stmmac_prog_mtl_tx_algorithms(priv
, priv
->hw
,
3177 priv
->plat
->tx_sched_algorithm
);
3179 /* Configure CBS in AVB TX queues */
3180 if (tx_queues_count
> 1)
3181 stmmac_configure_cbs(priv
);
3183 /* Map RX MTL to DMA channels */
3184 stmmac_rx_queue_dma_chan_map(priv
);
3186 /* Enable MAC RX Queues */
3187 stmmac_mac_enable_rx_queues(priv
);
3189 /* Set RX priorities */
3190 if (rx_queues_count
> 1)
3191 stmmac_mac_config_rx_queues_prio(priv
);
3193 /* Set TX priorities */
3194 if (tx_queues_count
> 1)
3195 stmmac_mac_config_tx_queues_prio(priv
);
3197 /* Set RX routing */
3198 if (rx_queues_count
> 1)
3199 stmmac_mac_config_rx_queues_routing(priv
);
3201 /* Receive Side Scaling */
3202 if (rx_queues_count
> 1)
3203 stmmac_mac_config_rss(priv
);
3206 static void stmmac_safety_feat_configuration(struct stmmac_priv
*priv
)
3208 if (priv
->dma_cap
.asp
) {
3209 netdev_info(priv
->dev
, "Enabling Safety Features\n");
3210 stmmac_safety_feat_config(priv
, priv
->ioaddr
, priv
->dma_cap
.asp
,
3211 priv
->plat
->safety_feat_cfg
);
3213 netdev_info(priv
->dev
, "No Safety Features support found\n");
3217 static int stmmac_fpe_start_wq(struct stmmac_priv
*priv
)
3221 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
3222 clear_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3224 name
= priv
->wq_name
;
3225 sprintf(name
, "%s-fpe", priv
->dev
->name
);
3227 priv
->fpe_wq
= create_singlethread_workqueue(name
);
3228 if (!priv
->fpe_wq
) {
3229 netdev_err(priv
->dev
, "%s: Failed to create workqueue\n", name
);
3233 netdev_info(priv
->dev
, "FPE workqueue start");
3239 * stmmac_hw_setup - setup mac in a usable state.
3240 * @dev : pointer to the device structure.
3241 * @ptp_register: register PTP if set
3243 * this is the main function to setup the HW in a usable state because the
3244 * dma engine is reset, the core registers are configured (e.g. AXI,
3245 * Checksum features, timers). The DMA is ready to start receiving and
3248 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3251 static int stmmac_hw_setup(struct net_device
*dev
, bool ptp_register
)
3253 struct stmmac_priv
*priv
= netdev_priv(dev
);
3254 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
3255 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
3260 /* DMA initialization and SW reset */
3261 ret
= stmmac_init_dma_engine(priv
);
3263 netdev_err(priv
->dev
, "%s: DMA engine initialization failed\n",
3268 /* Copy the MAC addr into the HW */
3269 stmmac_set_umac_addr(priv
, priv
->hw
, dev
->dev_addr
, 0);
3271 /* PS and related bits will be programmed according to the speed */
3272 if (priv
->hw
->pcs
) {
3273 int speed
= priv
->plat
->mac_port_sel_speed
;
3275 if ((speed
== SPEED_10
) || (speed
== SPEED_100
) ||
3276 (speed
== SPEED_1000
)) {
3277 priv
->hw
->ps
= speed
;
3279 dev_warn(priv
->device
, "invalid port speed\n");
3284 /* Initialize the MAC Core */
3285 stmmac_core_init(priv
, priv
->hw
, dev
);
3288 stmmac_mtl_configuration(priv
);
3290 /* Initialize Safety Features */
3291 stmmac_safety_feat_configuration(priv
);
3293 ret
= stmmac_rx_ipc(priv
, priv
->hw
);
3295 netdev_warn(priv
->dev
, "RX IPC Checksum Offload disabled\n");
3296 priv
->plat
->rx_coe
= STMMAC_RX_COE_NONE
;
3297 priv
->hw
->rx_csum
= 0;
3300 /* Enable the MAC Rx/Tx */
3301 stmmac_mac_set(priv
, priv
->ioaddr
, true);
3303 /* Set the HW DMA mode and the COE */
3304 stmmac_dma_operation_mode(priv
);
3306 stmmac_mmc_setup(priv
);
3308 ret
= stmmac_init_ptp(priv
);
3309 if (ret
== -EOPNOTSUPP
)
3310 netdev_warn(priv
->dev
, "PTP not supported by HW\n");
3312 netdev_warn(priv
->dev
, "PTP init failed\n");
3313 else if (ptp_register
)
3314 stmmac_ptp_register(priv
);
3316 priv
->eee_tw_timer
= STMMAC_DEFAULT_TWT_LS
;
3318 /* Convert the timer from msec to usec */
3319 if (!priv
->tx_lpi_timer
)
3320 priv
->tx_lpi_timer
= eee_timer
* 1000;
3322 if (priv
->use_riwt
) {
3325 for (queue
= 0; queue
< rx_cnt
; queue
++) {
3326 if (!priv
->rx_riwt
[queue
])
3327 priv
->rx_riwt
[queue
] = DEF_DMA_RIWT
;
3329 stmmac_rx_watchdog(priv
, priv
->ioaddr
,
3330 priv
->rx_riwt
[queue
], queue
);
3335 stmmac_pcs_ctrl_ane(priv
, priv
->ioaddr
, 1, priv
->hw
->ps
, 0);
3337 /* set TX and RX rings length */
3338 stmmac_set_rings_length(priv
);
3342 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3343 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
3345 /* TSO and TBS cannot co-exist */
3346 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3349 stmmac_enable_tso(priv
, priv
->ioaddr
, 1, chan
);
3353 /* Enable Split Header */
3354 sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
3355 for (chan
= 0; chan
< rx_cnt
; chan
++)
3356 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
3359 /* VLAN Tag Insertion */
3360 if (priv
->dma_cap
.vlins
)
3361 stmmac_enable_vlan(priv
, priv
->hw
, STMMAC_VLAN_INSERT
);
3364 for (chan
= 0; chan
< tx_cnt
; chan
++) {
3365 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
3366 int enable
= tx_q
->tbs
& STMMAC_TBS_AVAIL
;
3368 stmmac_enable_tbs(priv
, priv
->ioaddr
, enable
, chan
);
3371 /* Configure real RX and TX queues */
3372 netif_set_real_num_rx_queues(dev
, priv
->plat
->rx_queues_to_use
);
3373 netif_set_real_num_tx_queues(dev
, priv
->plat
->tx_queues_to_use
);
3375 /* Start the ball rolling... */
3376 stmmac_start_all_dma(priv
);
3378 if (priv
->dma_cap
.fpesel
) {
3379 stmmac_fpe_start_wq(priv
);
3381 if (priv
->plat
->fpe_cfg
->enable
)
3382 stmmac_fpe_handshake(priv
, true);
3388 static void stmmac_hw_teardown(struct net_device
*dev
)
3390 struct stmmac_priv
*priv
= netdev_priv(dev
);
3392 clk_disable_unprepare(priv
->plat
->clk_ptp_ref
);
3395 static void stmmac_free_irq(struct net_device
*dev
,
3396 enum request_irq_err irq_err
, int irq_idx
)
3398 struct stmmac_priv
*priv
= netdev_priv(dev
);
3402 case REQ_IRQ_ERR_ALL
:
3403 irq_idx
= priv
->plat
->tx_queues_to_use
;
3405 case REQ_IRQ_ERR_TX
:
3406 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3407 if (priv
->tx_irq
[j
] > 0) {
3408 irq_set_affinity_hint(priv
->tx_irq
[j
], NULL
);
3409 free_irq(priv
->tx_irq
[j
], &priv
->tx_queue
[j
]);
3412 irq_idx
= priv
->plat
->rx_queues_to_use
;
3414 case REQ_IRQ_ERR_RX
:
3415 for (j
= irq_idx
- 1; j
>= 0; j
--) {
3416 if (priv
->rx_irq
[j
] > 0) {
3417 irq_set_affinity_hint(priv
->rx_irq
[j
], NULL
);
3418 free_irq(priv
->rx_irq
[j
], &priv
->rx_queue
[j
]);
3422 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
)
3423 free_irq(priv
->sfty_ue_irq
, dev
);
3425 case REQ_IRQ_ERR_SFTY_UE
:
3426 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
)
3427 free_irq(priv
->sfty_ce_irq
, dev
);
3429 case REQ_IRQ_ERR_SFTY_CE
:
3430 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
)
3431 free_irq(priv
->lpi_irq
, dev
);
3433 case REQ_IRQ_ERR_LPI
:
3434 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
)
3435 free_irq(priv
->wol_irq
, dev
);
3437 case REQ_IRQ_ERR_WOL
:
3438 free_irq(dev
->irq
, dev
);
3440 case REQ_IRQ_ERR_MAC
:
3441 case REQ_IRQ_ERR_NO
:
3442 /* If MAC IRQ request error, no more IRQ to free */
3447 static int stmmac_request_irq_multi_msi(struct net_device
*dev
)
3449 struct stmmac_priv
*priv
= netdev_priv(dev
);
3450 enum request_irq_err irq_err
;
3457 /* For common interrupt */
3458 int_name
= priv
->int_name_mac
;
3459 sprintf(int_name
, "%s:%s", dev
->name
, "mac");
3460 ret
= request_irq(dev
->irq
, stmmac_mac_interrupt
,
3462 if (unlikely(ret
< 0)) {
3463 netdev_err(priv
->dev
,
3464 "%s: alloc mac MSI %d (error: %d)\n",
3465 __func__
, dev
->irq
, ret
);
3466 irq_err
= REQ_IRQ_ERR_MAC
;
3470 /* Request the Wake IRQ in case of another line
3473 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3474 int_name
= priv
->int_name_wol
;
3475 sprintf(int_name
, "%s:%s", dev
->name
, "wol");
3476 ret
= request_irq(priv
->wol_irq
,
3477 stmmac_mac_interrupt
,
3479 if (unlikely(ret
< 0)) {
3480 netdev_err(priv
->dev
,
3481 "%s: alloc wol MSI %d (error: %d)\n",
3482 __func__
, priv
->wol_irq
, ret
);
3483 irq_err
= REQ_IRQ_ERR_WOL
;
3488 /* Request the LPI IRQ in case of another line
3491 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3492 int_name
= priv
->int_name_lpi
;
3493 sprintf(int_name
, "%s:%s", dev
->name
, "lpi");
3494 ret
= request_irq(priv
->lpi_irq
,
3495 stmmac_mac_interrupt
,
3497 if (unlikely(ret
< 0)) {
3498 netdev_err(priv
->dev
,
3499 "%s: alloc lpi MSI %d (error: %d)\n",
3500 __func__
, priv
->lpi_irq
, ret
);
3501 irq_err
= REQ_IRQ_ERR_LPI
;
3506 /* Request the Safety Feature Correctible Error line in
3507 * case of another line is used
3509 if (priv
->sfty_ce_irq
> 0 && priv
->sfty_ce_irq
!= dev
->irq
) {
3510 int_name
= priv
->int_name_sfty_ce
;
3511 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ce");
3512 ret
= request_irq(priv
->sfty_ce_irq
,
3513 stmmac_safety_interrupt
,
3515 if (unlikely(ret
< 0)) {
3516 netdev_err(priv
->dev
,
3517 "%s: alloc sfty ce MSI %d (error: %d)\n",
3518 __func__
, priv
->sfty_ce_irq
, ret
);
3519 irq_err
= REQ_IRQ_ERR_SFTY_CE
;
3524 /* Request the Safety Feature Uncorrectible Error line in
3525 * case of another line is used
3527 if (priv
->sfty_ue_irq
> 0 && priv
->sfty_ue_irq
!= dev
->irq
) {
3528 int_name
= priv
->int_name_sfty_ue
;
3529 sprintf(int_name
, "%s:%s", dev
->name
, "safety-ue");
3530 ret
= request_irq(priv
->sfty_ue_irq
,
3531 stmmac_safety_interrupt
,
3533 if (unlikely(ret
< 0)) {
3534 netdev_err(priv
->dev
,
3535 "%s: alloc sfty ue MSI %d (error: %d)\n",
3536 __func__
, priv
->sfty_ue_irq
, ret
);
3537 irq_err
= REQ_IRQ_ERR_SFTY_UE
;
3542 /* Request Rx MSI irq */
3543 for (i
= 0; i
< priv
->plat
->rx_queues_to_use
; i
++) {
3544 if (priv
->rx_irq
[i
] == 0)
3547 int_name
= priv
->int_name_rx_irq
[i
];
3548 sprintf(int_name
, "%s:%s-%d", dev
->name
, "rx", i
);
3549 ret
= request_irq(priv
->rx_irq
[i
],
3551 0, int_name
, &priv
->rx_queue
[i
]);
3552 if (unlikely(ret
< 0)) {
3553 netdev_err(priv
->dev
,
3554 "%s: alloc rx-%d MSI %d (error: %d)\n",
3555 __func__
, i
, priv
->rx_irq
[i
], ret
);
3556 irq_err
= REQ_IRQ_ERR_RX
;
3560 cpumask_clear(&cpu_mask
);
3561 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3562 irq_set_affinity_hint(priv
->rx_irq
[i
], &cpu_mask
);
3565 /* Request Tx MSI irq */
3566 for (i
= 0; i
< priv
->plat
->tx_queues_to_use
; i
++) {
3567 if (priv
->tx_irq
[i
] == 0)
3570 int_name
= priv
->int_name_tx_irq
[i
];
3571 sprintf(int_name
, "%s:%s-%d", dev
->name
, "tx", i
);
3572 ret
= request_irq(priv
->tx_irq
[i
],
3574 0, int_name
, &priv
->tx_queue
[i
]);
3575 if (unlikely(ret
< 0)) {
3576 netdev_err(priv
->dev
,
3577 "%s: alloc tx-%d MSI %d (error: %d)\n",
3578 __func__
, i
, priv
->tx_irq
[i
], ret
);
3579 irq_err
= REQ_IRQ_ERR_TX
;
3583 cpumask_clear(&cpu_mask
);
3584 cpumask_set_cpu(i
% num_online_cpus(), &cpu_mask
);
3585 irq_set_affinity_hint(priv
->tx_irq
[i
], &cpu_mask
);
3591 stmmac_free_irq(dev
, irq_err
, irq_idx
);
3595 static int stmmac_request_irq_single(struct net_device
*dev
)
3597 struct stmmac_priv
*priv
= netdev_priv(dev
);
3598 enum request_irq_err irq_err
;
3601 ret
= request_irq(dev
->irq
, stmmac_interrupt
,
3602 IRQF_SHARED
, dev
->name
, dev
);
3603 if (unlikely(ret
< 0)) {
3604 netdev_err(priv
->dev
,
3605 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3606 __func__
, dev
->irq
, ret
);
3607 irq_err
= REQ_IRQ_ERR_MAC
;
3611 /* Request the Wake IRQ in case of another line
3614 if (priv
->wol_irq
> 0 && priv
->wol_irq
!= dev
->irq
) {
3615 ret
= request_irq(priv
->wol_irq
, stmmac_interrupt
,
3616 IRQF_SHARED
, dev
->name
, dev
);
3617 if (unlikely(ret
< 0)) {
3618 netdev_err(priv
->dev
,
3619 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3620 __func__
, priv
->wol_irq
, ret
);
3621 irq_err
= REQ_IRQ_ERR_WOL
;
3626 /* Request the IRQ lines */
3627 if (priv
->lpi_irq
> 0 && priv
->lpi_irq
!= dev
->irq
) {
3628 ret
= request_irq(priv
->lpi_irq
, stmmac_interrupt
,
3629 IRQF_SHARED
, dev
->name
, dev
);
3630 if (unlikely(ret
< 0)) {
3631 netdev_err(priv
->dev
,
3632 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3633 __func__
, priv
->lpi_irq
, ret
);
3634 irq_err
= REQ_IRQ_ERR_LPI
;
3642 stmmac_free_irq(dev
, irq_err
, 0);
3646 static int stmmac_request_irq(struct net_device
*dev
)
3648 struct stmmac_priv
*priv
= netdev_priv(dev
);
3651 /* Request the IRQ lines */
3652 if (priv
->plat
->multi_msi_en
)
3653 ret
= stmmac_request_irq_multi_msi(dev
);
3655 ret
= stmmac_request_irq_single(dev
);
3661 * stmmac_open - open entry point of the driver
3662 * @dev : pointer to the device structure.
3664 * This function is the open entry point of the driver.
3666 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3669 int stmmac_open(struct net_device
*dev
)
3671 struct stmmac_priv
*priv
= netdev_priv(dev
);
3672 int mode
= priv
->plat
->phy_interface
;
3677 ret
= pm_runtime_get_sync(priv
->device
);
3679 pm_runtime_put_noidle(priv
->device
);
3683 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
3684 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
&&
3686 xpcs_get_an_mode(priv
->hw
->xpcs
, mode
) != DW_AN_C73
)) {
3687 ret
= stmmac_init_phy(dev
);
3689 netdev_err(priv
->dev
,
3690 "%s: Cannot attach to PHY (error: %d)\n",
3692 goto init_phy_error
;
3696 /* Extra statistics */
3697 memset(&priv
->xstats
, 0, sizeof(struct stmmac_extra_stats
));
3698 priv
->xstats
.threshold
= tc
;
3700 bfsize
= stmmac_set_16kib_bfsize(priv
, dev
->mtu
);
3704 if (bfsize
< BUF_SIZE_16KiB
)
3705 bfsize
= stmmac_set_bfsize(dev
->mtu
, priv
->dma_buf_sz
);
3707 priv
->dma_buf_sz
= bfsize
;
3710 priv
->rx_copybreak
= STMMAC_RX_COPYBREAK
;
3712 if (!priv
->dma_tx_size
)
3713 priv
->dma_tx_size
= DMA_DEFAULT_TX_SIZE
;
3714 if (!priv
->dma_rx_size
)
3715 priv
->dma_rx_size
= DMA_DEFAULT_RX_SIZE
;
3717 /* Earlier check for TBS */
3718 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++) {
3719 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[chan
];
3720 int tbs_en
= priv
->plat
->tx_queues_cfg
[chan
].tbs_en
;
3722 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3723 tx_q
->tbs
|= tbs_en
? STMMAC_TBS_AVAIL
: 0;
3726 ret
= alloc_dma_desc_resources(priv
);
3728 netdev_err(priv
->dev
, "%s: DMA descriptors allocation failed\n",
3730 goto dma_desc_error
;
3733 ret
= init_dma_desc_rings(dev
, GFP_KERNEL
);
3735 netdev_err(priv
->dev
, "%s: DMA descriptors initialization failed\n",
3740 ret
= stmmac_hw_setup(dev
, true);
3742 netdev_err(priv
->dev
, "%s: Hw setup failed\n", __func__
);
3746 stmmac_init_coalesce(priv
);
3748 phylink_start(priv
->phylink
);
3749 /* We may have called phylink_speed_down before */
3750 phylink_speed_up(priv
->phylink
);
3752 ret
= stmmac_request_irq(dev
);
3756 stmmac_enable_all_queues(priv
);
3757 netif_tx_start_all_queues(priv
->dev
);
3762 phylink_stop(priv
->phylink
);
3764 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
3765 hrtimer_cancel(&priv
->tx_queue
[chan
].txtimer
);
3767 stmmac_hw_teardown(dev
);
3769 free_dma_desc_resources(priv
);
3771 phylink_disconnect_phy(priv
->phylink
);
3773 pm_runtime_put(priv
->device
);
3777 static void stmmac_fpe_stop_wq(struct stmmac_priv
*priv
)
3779 set_bit(__FPE_REMOVING
, &priv
->fpe_task_state
);
3782 destroy_workqueue(priv
->fpe_wq
);
3784 netdev_info(priv
->dev
, "FPE workqueue stop");
3788 * stmmac_release - close entry point of the driver
3789 * @dev : device pointer.
3791 * This is the stop entry point of the driver.
3793 int stmmac_release(struct net_device
*dev
)
3795 struct stmmac_priv
*priv
= netdev_priv(dev
);
3798 netif_tx_disable(dev
);
3800 if (device_may_wakeup(priv
->device
))
3801 phylink_speed_down(priv
->phylink
, false);
3802 /* Stop and disconnect the PHY */
3803 phylink_stop(priv
->phylink
);
3804 phylink_disconnect_phy(priv
->phylink
);
3806 stmmac_disable_all_queues(priv
);
3808 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
3809 hrtimer_cancel(&priv
->tx_queue
[chan
].txtimer
);
3811 /* Free the IRQ lines */
3812 stmmac_free_irq(dev
, REQ_IRQ_ERR_ALL
, 0);
3814 if (priv
->eee_enabled
) {
3815 priv
->tx_path_in_lpi_mode
= false;
3816 del_timer_sync(&priv
->eee_ctrl_timer
);
3819 /* Stop TX/RX DMA and clear the descriptors */
3820 stmmac_stop_all_dma(priv
);
3822 /* Release and free the Rx/Tx resources */
3823 free_dma_desc_resources(priv
);
3825 /* Disable the MAC Rx/Tx */
3826 stmmac_mac_set(priv
, priv
->ioaddr
, false);
3828 netif_carrier_off(dev
);
3830 stmmac_release_ptp(priv
);
3832 pm_runtime_put(priv
->device
);
3834 if (priv
->dma_cap
.fpesel
)
3835 stmmac_fpe_stop_wq(priv
);
3840 static bool stmmac_vlan_insert(struct stmmac_priv
*priv
, struct sk_buff
*skb
,
3841 struct stmmac_tx_queue
*tx_q
)
3843 u16 tag
= 0x0, inner_tag
= 0x0;
3844 u32 inner_type
= 0x0;
3847 if (!priv
->dma_cap
.vlins
)
3849 if (!skb_vlan_tag_present(skb
))
3851 if (skb
->vlan_proto
== htons(ETH_P_8021AD
)) {
3852 inner_tag
= skb_vlan_tag_get(skb
);
3853 inner_type
= STMMAC_VLAN_INSERT
;
3856 tag
= skb_vlan_tag_get(skb
);
3858 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3859 p
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
3861 p
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
3863 if (stmmac_set_desc_vlan_tag(priv
, p
, tag
, inner_tag
, inner_type
))
3866 stmmac_set_tx_owner(priv
, p
);
3867 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_tx_size
);
3872 * stmmac_tso_allocator - close entry point of the driver
3873 * @priv: driver private structure
3874 * @des: buffer start address
3875 * @total_len: total length to fill in descriptors
3876 * @last_segment: condition for the last descriptor
3877 * @queue: TX queue index
3879 * This function fills descriptor and request new descriptors according to
3880 * buffer length to fill
3882 static void stmmac_tso_allocator(struct stmmac_priv
*priv
, dma_addr_t des
,
3883 int total_len
, bool last_segment
, u32 queue
)
3885 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
3886 struct dma_desc
*desc
;
3890 tmp_len
= total_len
;
3892 while (tmp_len
> 0) {
3893 dma_addr_t curr_addr
;
3895 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
3897 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
3899 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3900 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
3902 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
3904 curr_addr
= des
+ (total_len
- tmp_len
);
3905 if (priv
->dma_cap
.addr64
<= 32)
3906 desc
->des0
= cpu_to_le32(curr_addr
);
3908 stmmac_set_desc_addr(priv
, desc
, curr_addr
);
3910 buff_size
= tmp_len
>= TSO_MAX_BUFF_SIZE
?
3911 TSO_MAX_BUFF_SIZE
: tmp_len
;
3913 stmmac_prepare_tso_tx_desc(priv
, desc
, 0, buff_size
,
3915 (last_segment
) && (tmp_len
<= TSO_MAX_BUFF_SIZE
),
3918 tmp_len
-= TSO_MAX_BUFF_SIZE
;
3922 static void stmmac_flush_tx_descriptors(struct stmmac_priv
*priv
, int queue
)
3924 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
3927 if (likely(priv
->extend_desc
))
3928 desc_size
= sizeof(struct dma_extended_desc
);
3929 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
3930 desc_size
= sizeof(struct dma_edesc
);
3932 desc_size
= sizeof(struct dma_desc
);
3934 /* The own bit must be the latest setting done when prepare the
3935 * descriptor and then barrier is needed to make sure that
3936 * all is coherent before granting the DMA engine.
3940 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
+ (tx_q
->cur_tx
* desc_size
);
3941 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
, tx_q
->tx_tail_addr
, queue
);
3945 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3946 * @skb : the socket buffer
3947 * @dev : device pointer
3948 * Description: this is the transmit function that is called on TSO frames
3949 * (support available on GMAC4 and newer chips).
3950 * Diagram below show the ring programming in case of TSO frames:
3954 * | DES0 |---> buffer1 = L2/L3/L4 header
3955 * | DES1 |---> TCP Payload (can continue on next descr...)
3956 * | DES2 |---> buffer 1 and 2 len
3957 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3963 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
3965 * | DES2 | --> buffer 1 and 2 len
3969 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
3971 static netdev_tx_t
stmmac_tso_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
3973 struct dma_desc
*desc
, *first
, *mss_desc
= NULL
;
3974 struct stmmac_priv
*priv
= netdev_priv(dev
);
3975 int nfrags
= skb_shinfo(skb
)->nr_frags
;
3976 u32 queue
= skb_get_queue_mapping(skb
);
3977 unsigned int first_entry
, tx_packets
;
3978 int tmp_pay_len
= 0, first_tx
;
3979 struct stmmac_tx_queue
*tx_q
;
3980 bool has_vlan
, set_ic
;
3981 u8 proto_hdr_len
, hdr
;
3986 tx_q
= &priv
->tx_queue
[queue
];
3987 first_tx
= tx_q
->cur_tx
;
3989 /* Compute header lengths */
3990 if (skb_shinfo(skb
)->gso_type
& SKB_GSO_UDP_L4
) {
3991 proto_hdr_len
= skb_transport_offset(skb
) + sizeof(struct udphdr
);
3992 hdr
= sizeof(struct udphdr
);
3994 proto_hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
3995 hdr
= tcp_hdrlen(skb
);
3998 /* Desc availability based on threshold should be enough safe */
3999 if (unlikely(stmmac_tx_avail(priv
, queue
) <
4000 (((skb
->len
- proto_hdr_len
) / TSO_MAX_BUFF_SIZE
+ 1)))) {
4001 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4002 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4004 /* This is a hard error, log it. */
4005 netdev_err(priv
->dev
,
4006 "%s: Tx Ring full when queue awake\n",
4009 return NETDEV_TX_BUSY
;
4012 pay_len
= skb_headlen(skb
) - proto_hdr_len
; /* no frags */
4014 mss
= skb_shinfo(skb
)->gso_size
;
4016 /* set new MSS value if needed */
4017 if (mss
!= tx_q
->mss
) {
4018 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4019 mss_desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4021 mss_desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4023 stmmac_set_mss(priv
, mss_desc
, mss
);
4025 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
,
4027 WARN_ON(tx_q
->tx_skbuff
[tx_q
->cur_tx
]);
4030 if (netif_msg_tx_queued(priv
)) {
4031 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4032 __func__
, hdr
, proto_hdr_len
, pay_len
, mss
);
4033 pr_info("\tskb->len %d, skb->data_len %d\n", skb
->len
,
4037 /* Check if VLAN can be inserted by HW */
4038 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4040 first_entry
= tx_q
->cur_tx
;
4041 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4043 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4044 desc
= &tx_q
->dma_entx
[first_entry
].basic
;
4046 desc
= &tx_q
->dma_tx
[first_entry
];
4050 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4052 /* first descriptor: fill Headers on Buf1 */
4053 des
= dma_map_single(priv
->device
, skb
->data
, skb_headlen(skb
),
4055 if (dma_mapping_error(priv
->device
, des
))
4058 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4059 tx_q
->tx_skbuff_dma
[first_entry
].len
= skb_headlen(skb
);
4060 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4061 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4063 if (priv
->dma_cap
.addr64
<= 32) {
4064 first
->des0
= cpu_to_le32(des
);
4066 /* Fill start of payload in buff2 of first descriptor */
4068 first
->des1
= cpu_to_le32(des
+ proto_hdr_len
);
4070 /* If needed take extra descriptors to fill the remaining payload */
4071 tmp_pay_len
= pay_len
- TSO_MAX_BUFF_SIZE
;
4073 stmmac_set_desc_addr(priv
, first
, des
);
4074 tmp_pay_len
= pay_len
;
4075 des
+= proto_hdr_len
;
4079 stmmac_tso_allocator(priv
, des
, tmp_pay_len
, (nfrags
== 0), queue
);
4081 /* Prepare fragments */
4082 for (i
= 0; i
< nfrags
; i
++) {
4083 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4085 des
= skb_frag_dma_map(priv
->device
, frag
, 0,
4086 skb_frag_size(frag
),
4088 if (dma_mapping_error(priv
->device
, des
))
4091 stmmac_tso_allocator(priv
, des
, skb_frag_size(frag
),
4092 (i
== nfrags
- 1), queue
);
4094 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf
= des
;
4095 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].len
= skb_frag_size(frag
);
4096 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].map_as_page
= true;
4097 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4100 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].last_segment
= true;
4102 /* Only the last descriptor gets to point to the skb. */
4103 tx_q
->tx_skbuff
[tx_q
->cur_tx
] = skb
;
4104 tx_q
->tx_skbuff_dma
[tx_q
->cur_tx
].buf_type
= STMMAC_TXBUF_T_SKB
;
4106 /* Manage tx mitigation */
4107 tx_packets
= (tx_q
->cur_tx
+ 1) - first_tx
;
4108 tx_q
->tx_count_frames
+= tx_packets
;
4110 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4112 else if (!priv
->tx_coal_frames
[queue
])
4114 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4116 else if ((tx_q
->tx_count_frames
%
4117 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4123 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4124 desc
= &tx_q
->dma_entx
[tx_q
->cur_tx
].basic
;
4126 desc
= &tx_q
->dma_tx
[tx_q
->cur_tx
];
4128 tx_q
->tx_count_frames
= 0;
4129 stmmac_set_tx_ic(priv
, desc
);
4130 priv
->xstats
.tx_set_ic_bit
++;
4133 /* We've used all descriptors we need for this skb, however,
4134 * advance cur_tx so that it references a fresh descriptor.
4135 * ndo_start_xmit will fill this descriptor the next time it's
4136 * called and stmmac_tx_clean may clean up to this descriptor.
4138 tx_q
->cur_tx
= STMMAC_GET_ENTRY(tx_q
->cur_tx
, priv
->dma_tx_size
);
4140 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4141 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4143 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4146 dev
->stats
.tx_bytes
+= skb
->len
;
4147 priv
->xstats
.tx_tso_frames
++;
4148 priv
->xstats
.tx_tso_nfrags
+= nfrags
;
4150 if (priv
->sarc_type
)
4151 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4153 skb_tx_timestamp(skb
);
4155 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4156 priv
->hwts_tx_en
)) {
4157 /* declare that device is doing timestamping */
4158 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4159 stmmac_enable_tx_timestamp(priv
, first
);
4162 /* Complete the first descriptor before granting the DMA */
4163 stmmac_prepare_tso_tx_desc(priv
, first
, 1,
4166 1, tx_q
->tx_skbuff_dma
[first_entry
].last_segment
,
4167 hdr
/ 4, (skb
->len
- proto_hdr_len
));
4169 /* If context desc is used to change MSS */
4171 /* Make sure that first descriptor has been completely
4172 * written, including its own bit. This is because MSS is
4173 * actually before first descriptor, so we need to make
4174 * sure that MSS's own bit is the last thing written.
4177 stmmac_set_tx_owner(priv
, mss_desc
);
4180 if (netif_msg_pktdata(priv
)) {
4181 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4182 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4183 tx_q
->cur_tx
, first
, nfrags
);
4184 pr_info(">>> frame to be transmitted: ");
4185 print_pkt(skb
->data
, skb_headlen(skb
));
4188 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4190 stmmac_flush_tx_descriptors(priv
, queue
);
4191 stmmac_tx_timer_arm(priv
, queue
);
4193 return NETDEV_TX_OK
;
4196 dev_err(priv
->device
, "Tx dma map failed\n");
4198 priv
->dev
->stats
.tx_dropped
++;
4199 return NETDEV_TX_OK
;
4203 * stmmac_xmit - Tx entry point of the driver
4204 * @skb : the socket buffer
4205 * @dev : device pointer
4206 * Description : this is the tx entry point of the driver.
4207 * It programs the chain or the ring and supports oversized frames
4210 static netdev_tx_t
stmmac_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
4212 unsigned int first_entry
, tx_packets
, enh_desc
;
4213 struct stmmac_priv
*priv
= netdev_priv(dev
);
4214 unsigned int nopaged_len
= skb_headlen(skb
);
4215 int i
, csum_insertion
= 0, is_jumbo
= 0;
4216 u32 queue
= skb_get_queue_mapping(skb
);
4217 int nfrags
= skb_shinfo(skb
)->nr_frags
;
4218 int gso
= skb_shinfo(skb
)->gso_type
;
4219 struct dma_edesc
*tbs_desc
= NULL
;
4220 struct dma_desc
*desc
, *first
;
4221 struct stmmac_tx_queue
*tx_q
;
4222 bool has_vlan
, set_ic
;
4223 int entry
, first_tx
;
4226 tx_q
= &priv
->tx_queue
[queue
];
4227 first_tx
= tx_q
->cur_tx
;
4229 if (priv
->tx_path_in_lpi_mode
&& priv
->eee_sw_timer_en
)
4230 stmmac_disable_eee_mode(priv
);
4232 /* Manage oversized TCP frames for GMAC4 device */
4233 if (skb_is_gso(skb
) && priv
->tso
) {
4234 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))
4235 return stmmac_tso_xmit(skb
, dev
);
4236 if (priv
->plat
->has_gmac4
&& (gso
& SKB_GSO_UDP_L4
))
4237 return stmmac_tso_xmit(skb
, dev
);
4240 if (unlikely(stmmac_tx_avail(priv
, queue
) < nfrags
+ 1)) {
4241 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, queue
))) {
4242 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
,
4244 /* This is a hard error, log it. */
4245 netdev_err(priv
->dev
,
4246 "%s: Tx Ring full when queue awake\n",
4249 return NETDEV_TX_BUSY
;
4252 /* Check if VLAN can be inserted by HW */
4253 has_vlan
= stmmac_vlan_insert(priv
, skb
, tx_q
);
4255 entry
= tx_q
->cur_tx
;
4256 first_entry
= entry
;
4257 WARN_ON(tx_q
->tx_skbuff
[first_entry
]);
4259 csum_insertion
= (skb
->ip_summed
== CHECKSUM_PARTIAL
);
4261 if (likely(priv
->extend_desc
))
4262 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4263 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4264 desc
= &tx_q
->dma_entx
[entry
].basic
;
4266 desc
= tx_q
->dma_tx
+ entry
;
4271 stmmac_set_desc_vlan(priv
, first
, STMMAC_VLAN_INSERT
);
4273 enh_desc
= priv
->plat
->enh_desc
;
4274 /* To program the descriptors according to the size of the frame */
4276 is_jumbo
= stmmac_is_jumbo_frm(priv
, skb
->len
, enh_desc
);
4278 if (unlikely(is_jumbo
)) {
4279 entry
= stmmac_jumbo_frm(priv
, tx_q
, skb
, csum_insertion
);
4280 if (unlikely(entry
< 0) && (entry
!= -EINVAL
))
4284 for (i
= 0; i
< nfrags
; i
++) {
4285 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4286 int len
= skb_frag_size(frag
);
4287 bool last_segment
= (i
== (nfrags
- 1));
4289 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
4290 WARN_ON(tx_q
->tx_skbuff
[entry
]);
4292 if (likely(priv
->extend_desc
))
4293 desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4294 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4295 desc
= &tx_q
->dma_entx
[entry
].basic
;
4297 desc
= tx_q
->dma_tx
+ entry
;
4299 des
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
4301 if (dma_mapping_error(priv
->device
, des
))
4302 goto dma_map_err
; /* should reuse desc w/o issues */
4304 tx_q
->tx_skbuff_dma
[entry
].buf
= des
;
4306 stmmac_set_desc_addr(priv
, desc
, des
);
4308 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= true;
4309 tx_q
->tx_skbuff_dma
[entry
].len
= len
;
4310 tx_q
->tx_skbuff_dma
[entry
].last_segment
= last_segment
;
4311 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4313 /* Prepare the descriptor and set the own bit too */
4314 stmmac_prepare_tx_desc(priv
, desc
, 0, len
, csum_insertion
,
4315 priv
->mode
, 1, last_segment
, skb
->len
);
4318 /* Only the last descriptor gets to point to the skb. */
4319 tx_q
->tx_skbuff
[entry
] = skb
;
4320 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4322 /* According to the coalesce parameter the IC bit for the latest
4323 * segment is reset and the timer re-started to clean the tx status.
4324 * This approach takes care about the fragments: desc is the first
4325 * element in case of no SG.
4327 tx_packets
= (entry
+ 1) - first_tx
;
4328 tx_q
->tx_count_frames
+= tx_packets
;
4330 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && priv
->hwts_tx_en
)
4332 else if (!priv
->tx_coal_frames
[queue
])
4334 else if (tx_packets
> priv
->tx_coal_frames
[queue
])
4336 else if ((tx_q
->tx_count_frames
%
4337 priv
->tx_coal_frames
[queue
]) < tx_packets
)
4343 if (likely(priv
->extend_desc
))
4344 desc
= &tx_q
->dma_etx
[entry
].basic
;
4345 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4346 desc
= &tx_q
->dma_entx
[entry
].basic
;
4348 desc
= &tx_q
->dma_tx
[entry
];
4350 tx_q
->tx_count_frames
= 0;
4351 stmmac_set_tx_ic(priv
, desc
);
4352 priv
->xstats
.tx_set_ic_bit
++;
4355 /* We've used all descriptors we need for this skb, however,
4356 * advance cur_tx so that it references a fresh descriptor.
4357 * ndo_start_xmit will fill this descriptor the next time it's
4358 * called and stmmac_tx_clean may clean up to this descriptor.
4360 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
4361 tx_q
->cur_tx
= entry
;
4363 if (netif_msg_pktdata(priv
)) {
4364 netdev_dbg(priv
->dev
,
4365 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4366 __func__
, tx_q
->cur_tx
, tx_q
->dirty_tx
, first_entry
,
4367 entry
, first
, nfrags
);
4369 netdev_dbg(priv
->dev
, ">>> frame to be transmitted: ");
4370 print_pkt(skb
->data
, skb
->len
);
4373 if (unlikely(stmmac_tx_avail(priv
, queue
) <= (MAX_SKB_FRAGS
+ 1))) {
4374 netif_dbg(priv
, hw
, priv
->dev
, "%s: stop transmitted packets\n",
4376 netif_tx_stop_queue(netdev_get_tx_queue(priv
->dev
, queue
));
4379 dev
->stats
.tx_bytes
+= skb
->len
;
4381 if (priv
->sarc_type
)
4382 stmmac_set_desc_sarc(priv
, first
, priv
->sarc_type
);
4384 skb_tx_timestamp(skb
);
4386 /* Ready to fill the first descriptor and set the OWN bit w/o any
4387 * problems because all the descriptors are actually ready to be
4388 * passed to the DMA engine.
4390 if (likely(!is_jumbo
)) {
4391 bool last_segment
= (nfrags
== 0);
4393 des
= dma_map_single(priv
->device
, skb
->data
,
4394 nopaged_len
, DMA_TO_DEVICE
);
4395 if (dma_mapping_error(priv
->device
, des
))
4398 tx_q
->tx_skbuff_dma
[first_entry
].buf
= des
;
4399 tx_q
->tx_skbuff_dma
[first_entry
].buf_type
= STMMAC_TXBUF_T_SKB
;
4400 tx_q
->tx_skbuff_dma
[first_entry
].map_as_page
= false;
4402 stmmac_set_desc_addr(priv
, first
, des
);
4404 tx_q
->tx_skbuff_dma
[first_entry
].len
= nopaged_len
;
4405 tx_q
->tx_skbuff_dma
[first_entry
].last_segment
= last_segment
;
4407 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
4408 priv
->hwts_tx_en
)) {
4409 /* declare that device is doing timestamping */
4410 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
4411 stmmac_enable_tx_timestamp(priv
, first
);
4414 /* Prepare the first descriptor setting the OWN bit too */
4415 stmmac_prepare_tx_desc(priv
, first
, 1, nopaged_len
,
4416 csum_insertion
, priv
->mode
, 0, last_segment
,
4420 if (tx_q
->tbs
& STMMAC_TBS_EN
) {
4421 struct timespec64 ts
= ns_to_timespec64(skb
->tstamp
);
4423 tbs_desc
= &tx_q
->dma_entx
[first_entry
];
4424 stmmac_set_desc_tbs(priv
, tbs_desc
, ts
.tv_sec
, ts
.tv_nsec
);
4427 stmmac_set_tx_owner(priv
, first
);
4429 netdev_tx_sent_queue(netdev_get_tx_queue(dev
, queue
), skb
->len
);
4431 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4433 stmmac_flush_tx_descriptors(priv
, queue
);
4434 stmmac_tx_timer_arm(priv
, queue
);
4436 return NETDEV_TX_OK
;
4439 netdev_err(priv
->dev
, "Tx DMA map failed\n");
4441 priv
->dev
->stats
.tx_dropped
++;
4442 return NETDEV_TX_OK
;
4445 static void stmmac_rx_vlan(struct net_device
*dev
, struct sk_buff
*skb
)
4447 struct vlan_ethhdr
*veth
;
4451 veth
= (struct vlan_ethhdr
*)skb
->data
;
4452 vlan_proto
= veth
->h_vlan_proto
;
4454 if ((vlan_proto
== htons(ETH_P_8021Q
) &&
4455 dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) ||
4456 (vlan_proto
== htons(ETH_P_8021AD
) &&
4457 dev
->features
& NETIF_F_HW_VLAN_STAG_RX
)) {
4458 /* pop the vlan tag */
4459 vlanid
= ntohs(veth
->h_vlan_TCI
);
4460 memmove(skb
->data
+ VLAN_HLEN
, veth
, ETH_ALEN
* 2);
4461 skb_pull(skb
, VLAN_HLEN
);
4462 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlanid
);
4467 * stmmac_rx_refill - refill used skb preallocated buffers
4468 * @priv: driver private structure
4469 * @queue: RX queue index
4470 * Description : this is to reallocate the skb for the reception process
4471 * that is based on zero-copy.
4473 static inline void stmmac_rx_refill(struct stmmac_priv
*priv
, u32 queue
)
4475 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
4476 int dirty
= stmmac_rx_dirty(priv
, queue
);
4477 unsigned int entry
= rx_q
->dirty_rx
;
4479 while (dirty
-- > 0) {
4480 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
4484 if (priv
->extend_desc
)
4485 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
4487 p
= rx_q
->dma_rx
+ entry
;
4490 buf
->page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
4495 if (priv
->sph
&& !buf
->sec_page
) {
4496 buf
->sec_page
= page_pool_dev_alloc_pages(rx_q
->page_pool
);
4500 buf
->sec_addr
= page_pool_get_dma_addr(buf
->sec_page
);
4503 buf
->addr
= page_pool_get_dma_addr(buf
->page
) + buf
->page_offset
;
4505 stmmac_set_desc_addr(priv
, p
, buf
->addr
);
4507 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, true);
4509 stmmac_set_desc_sec_addr(priv
, p
, buf
->sec_addr
, false);
4510 stmmac_refill_desc3(priv
, rx_q
, p
);
4512 rx_q
->rx_count_frames
++;
4513 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
4514 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
4515 rx_q
->rx_count_frames
= 0;
4517 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
4518 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
4519 if (!priv
->use_riwt
)
4523 stmmac_set_rx_owner(priv
, p
, use_rx_wd
);
4525 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_rx_size
);
4527 rx_q
->dirty_rx
= entry
;
4528 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
4529 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
4530 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
4533 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv
*priv
,
4535 int status
, unsigned int len
)
4537 unsigned int plen
= 0, hlen
= 0;
4538 int coe
= priv
->hw
->rx_csum
;
4540 /* Not first descriptor, buffer is always zero */
4541 if (priv
->sph
&& len
)
4544 /* First descriptor, get split header length */
4545 stmmac_get_rx_header_len(priv
, p
, &hlen
);
4546 if (priv
->sph
&& hlen
) {
4547 priv
->xstats
.rx_split_hdr_pkt_n
++;
4551 /* First descriptor, not last descriptor and not split header */
4552 if (status
& rx_not_ls
)
4553 return priv
->dma_buf_sz
;
4555 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4557 /* First descriptor and last descriptor and not split header */
4558 return min_t(unsigned int, priv
->dma_buf_sz
, plen
);
4561 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv
*priv
,
4563 int status
, unsigned int len
)
4565 int coe
= priv
->hw
->rx_csum
;
4566 unsigned int plen
= 0;
4568 /* Not split header, buffer is not available */
4572 /* Not last descriptor */
4573 if (status
& rx_not_ls
)
4574 return priv
->dma_buf_sz
;
4576 plen
= stmmac_get_rx_frame_len(priv
, p
, coe
);
4578 /* Last descriptor */
4582 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv
*priv
, int queue
,
4583 struct xdp_frame
*xdpf
, bool dma_map
)
4585 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
4586 unsigned int entry
= tx_q
->cur_tx
;
4587 struct dma_desc
*tx_desc
;
4588 dma_addr_t dma_addr
;
4591 if (stmmac_tx_avail(priv
, queue
) < STMMAC_TX_THRESH(priv
))
4592 return STMMAC_XDP_CONSUMED
;
4594 if (likely(priv
->extend_desc
))
4595 tx_desc
= (struct dma_desc
*)(tx_q
->dma_etx
+ entry
);
4596 else if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
4597 tx_desc
= &tx_q
->dma_entx
[entry
].basic
;
4599 tx_desc
= tx_q
->dma_tx
+ entry
;
4602 dma_addr
= dma_map_single(priv
->device
, xdpf
->data
,
4603 xdpf
->len
, DMA_TO_DEVICE
);
4604 if (dma_mapping_error(priv
->device
, dma_addr
))
4605 return STMMAC_XDP_CONSUMED
;
4607 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_NDO
;
4609 struct page
*page
= virt_to_page(xdpf
->data
);
4611 dma_addr
= page_pool_get_dma_addr(page
) + sizeof(*xdpf
) +
4613 dma_sync_single_for_device(priv
->device
, dma_addr
,
4614 xdpf
->len
, DMA_BIDIRECTIONAL
);
4616 tx_q
->tx_skbuff_dma
[entry
].buf_type
= STMMAC_TXBUF_T_XDP_TX
;
4619 tx_q
->tx_skbuff_dma
[entry
].buf
= dma_addr
;
4620 tx_q
->tx_skbuff_dma
[entry
].map_as_page
= false;
4621 tx_q
->tx_skbuff_dma
[entry
].len
= xdpf
->len
;
4622 tx_q
->tx_skbuff_dma
[entry
].last_segment
= true;
4623 tx_q
->tx_skbuff_dma
[entry
].is_jumbo
= false;
4625 tx_q
->xdpf
[entry
] = xdpf
;
4627 stmmac_set_desc_addr(priv
, tx_desc
, dma_addr
);
4629 stmmac_prepare_tx_desc(priv
, tx_desc
, 1, xdpf
->len
,
4630 true, priv
->mode
, true, true,
4633 tx_q
->tx_count_frames
++;
4635 if (tx_q
->tx_count_frames
% priv
->tx_coal_frames
[queue
] == 0)
4641 tx_q
->tx_count_frames
= 0;
4642 stmmac_set_tx_ic(priv
, tx_desc
);
4643 priv
->xstats
.tx_set_ic_bit
++;
4646 stmmac_enable_dma_transmission(priv
, priv
->ioaddr
);
4648 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_tx_size
);
4649 tx_q
->cur_tx
= entry
;
4651 return STMMAC_XDP_TX
;
4654 static int stmmac_xdp_get_tx_queue(struct stmmac_priv
*priv
,
4659 if (unlikely(index
< 0))
4662 while (index
>= priv
->plat
->tx_queues_to_use
)
4663 index
-= priv
->plat
->tx_queues_to_use
;
4668 static int stmmac_xdp_xmit_back(struct stmmac_priv
*priv
,
4669 struct xdp_buff
*xdp
)
4671 struct xdp_frame
*xdpf
= xdp_convert_buff_to_frame(xdp
);
4672 int cpu
= smp_processor_id();
4673 struct netdev_queue
*nq
;
4677 if (unlikely(!xdpf
))
4678 return STMMAC_XDP_CONSUMED
;
4680 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
4681 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
4683 __netif_tx_lock(nq
, cpu
);
4684 /* Avoids TX time-out as we are sharing with slow path */
4685 nq
->trans_start
= jiffies
;
4687 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, xdpf
, false);
4688 if (res
== STMMAC_XDP_TX
)
4689 stmmac_flush_tx_descriptors(priv
, queue
);
4691 __netif_tx_unlock(nq
);
4696 static int __stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
4697 struct bpf_prog
*prog
,
4698 struct xdp_buff
*xdp
)
4703 act
= bpf_prog_run_xdp(prog
, xdp
);
4706 res
= STMMAC_XDP_PASS
;
4709 res
= stmmac_xdp_xmit_back(priv
, xdp
);
4712 if (xdp_do_redirect(priv
->dev
, xdp
, prog
) < 0)
4713 res
= STMMAC_XDP_CONSUMED
;
4715 res
= STMMAC_XDP_REDIRECT
;
4718 bpf_warn_invalid_xdp_action(act
);
4721 trace_xdp_exception(priv
->dev
, prog
, act
);
4724 res
= STMMAC_XDP_CONSUMED
;
4731 static struct sk_buff
*stmmac_xdp_run_prog(struct stmmac_priv
*priv
,
4732 struct xdp_buff
*xdp
)
4734 struct bpf_prog
*prog
;
4737 prog
= READ_ONCE(priv
->xdp_prog
);
4739 res
= STMMAC_XDP_PASS
;
4743 res
= __stmmac_xdp_run_prog(priv
, prog
, xdp
);
4745 return ERR_PTR(-res
);
4748 static void stmmac_finalize_xdp_rx(struct stmmac_priv
*priv
,
4751 int cpu
= smp_processor_id();
4754 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
4756 if (xdp_status
& STMMAC_XDP_TX
)
4757 stmmac_tx_timer_arm(priv
, queue
);
4759 if (xdp_status
& STMMAC_XDP_REDIRECT
)
4763 static struct sk_buff
*stmmac_construct_skb_zc(struct stmmac_channel
*ch
,
4764 struct xdp_buff
*xdp
)
4766 unsigned int metasize
= xdp
->data
- xdp
->data_meta
;
4767 unsigned int datasize
= xdp
->data_end
- xdp
->data
;
4768 struct sk_buff
*skb
;
4770 skb
= __napi_alloc_skb(&ch
->rxtx_napi
,
4771 xdp
->data_end
- xdp
->data_hard_start
,
4772 GFP_ATOMIC
| __GFP_NOWARN
);
4776 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
4777 memcpy(__skb_put(skb
, datasize
), xdp
->data
, datasize
);
4779 skb_metadata_set(skb
, metasize
);
4784 static void stmmac_dispatch_skb_zc(struct stmmac_priv
*priv
, u32 queue
,
4785 struct dma_desc
*p
, struct dma_desc
*np
,
4786 struct xdp_buff
*xdp
)
4788 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
4789 unsigned int len
= xdp
->data_end
- xdp
->data
;
4790 enum pkt_hash_types hash_type
;
4791 int coe
= priv
->hw
->rx_csum
;
4792 struct sk_buff
*skb
;
4795 skb
= stmmac_construct_skb_zc(ch
, xdp
);
4797 priv
->dev
->stats
.rx_dropped
++;
4801 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
4802 stmmac_rx_vlan(priv
->dev
, skb
);
4803 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
4806 skb_checksum_none_assert(skb
);
4808 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
4810 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
4811 skb_set_hash(skb
, hash
, hash_type
);
4813 skb_record_rx_queue(skb
, queue
);
4814 napi_gro_receive(&ch
->rxtx_napi
, skb
);
4816 priv
->dev
->stats
.rx_packets
++;
4817 priv
->dev
->stats
.rx_bytes
+= len
;
4820 static bool stmmac_rx_refill_zc(struct stmmac_priv
*priv
, u32 queue
, u32 budget
)
4822 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
4823 unsigned int entry
= rx_q
->dirty_rx
;
4824 struct dma_desc
*rx_desc
= NULL
;
4827 budget
= min(budget
, stmmac_rx_dirty(priv
, queue
));
4829 while (budget
-- > 0 && entry
!= rx_q
->cur_rx
) {
4830 struct stmmac_rx_buffer
*buf
= &rx_q
->buf_pool
[entry
];
4831 dma_addr_t dma_addr
;
4835 buf
->xdp
= xsk_buff_alloc(rx_q
->xsk_pool
);
4842 if (priv
->extend_desc
)
4843 rx_desc
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
4845 rx_desc
= rx_q
->dma_rx
+ entry
;
4847 dma_addr
= xsk_buff_xdp_get_dma(buf
->xdp
);
4848 stmmac_set_desc_addr(priv
, rx_desc
, dma_addr
);
4849 stmmac_set_desc_sec_addr(priv
, rx_desc
, 0, false);
4850 stmmac_refill_desc3(priv
, rx_q
, rx_desc
);
4852 rx_q
->rx_count_frames
++;
4853 rx_q
->rx_count_frames
+= priv
->rx_coal_frames
[queue
];
4854 if (rx_q
->rx_count_frames
> priv
->rx_coal_frames
[queue
])
4855 rx_q
->rx_count_frames
= 0;
4857 use_rx_wd
= !priv
->rx_coal_frames
[queue
];
4858 use_rx_wd
|= rx_q
->rx_count_frames
> 0;
4859 if (!priv
->use_riwt
)
4863 stmmac_set_rx_owner(priv
, rx_desc
, use_rx_wd
);
4865 entry
= STMMAC_GET_ENTRY(entry
, priv
->dma_rx_size
);
4869 rx_q
->dirty_rx
= entry
;
4870 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+
4871 (rx_q
->dirty_rx
* sizeof(struct dma_desc
));
4872 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
, rx_q
->rx_tail_addr
, queue
);
4878 static int stmmac_rx_zc(struct stmmac_priv
*priv
, int limit
, u32 queue
)
4880 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
4881 unsigned int count
= 0, error
= 0, len
= 0;
4882 int dirty
= stmmac_rx_dirty(priv
, queue
);
4883 unsigned int next_entry
= rx_q
->cur_rx
;
4884 unsigned int desc_size
;
4885 struct bpf_prog
*prog
;
4886 bool failure
= false;
4890 if (netif_msg_rx_status(priv
)) {
4893 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
4894 if (priv
->extend_desc
) {
4895 rx_head
= (void *)rx_q
->dma_erx
;
4896 desc_size
= sizeof(struct dma_extended_desc
);
4898 rx_head
= (void *)rx_q
->dma_rx
;
4899 desc_size
= sizeof(struct dma_desc
);
4902 stmmac_display_ring(priv
, rx_head
, priv
->dma_rx_size
, true,
4903 rx_q
->dma_rx_phy
, desc_size
);
4905 while (count
< limit
) {
4906 struct stmmac_rx_buffer
*buf
;
4907 unsigned int buf1_len
= 0;
4908 struct dma_desc
*np
, *p
;
4912 if (!count
&& rx_q
->state_saved
) {
4913 error
= rx_q
->state
.error
;
4914 len
= rx_q
->state
.len
;
4916 rx_q
->state_saved
= false;
4927 buf
= &rx_q
->buf_pool
[entry
];
4929 if (dirty
>= STMMAC_RX_FILL_BATCH
) {
4930 failure
= failure
||
4931 !stmmac_rx_refill_zc(priv
, queue
, dirty
);
4935 if (priv
->extend_desc
)
4936 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
4938 p
= rx_q
->dma_rx
+ entry
;
4940 /* read the status of the incoming frame */
4941 status
= stmmac_rx_status(priv
, &priv
->dev
->stats
,
4943 /* check if managed by the DMA otherwise go ahead */
4944 if (unlikely(status
& dma_own
))
4947 /* Prefetch the next RX descriptor */
4948 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
4950 next_entry
= rx_q
->cur_rx
;
4952 if (priv
->extend_desc
)
4953 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
4955 np
= rx_q
->dma_rx
+ next_entry
;
4959 /* Ensure a valid XSK buffer before proceed */
4963 if (priv
->extend_desc
)
4964 stmmac_rx_extended_status(priv
, &priv
->dev
->stats
,
4966 rx_q
->dma_erx
+ entry
);
4967 if (unlikely(status
== discard_frame
)) {
4968 xsk_buff_free(buf
->xdp
);
4972 if (!priv
->hwts_rx_en
)
4973 priv
->dev
->stats
.rx_errors
++;
4976 if (unlikely(error
&& (status
& rx_not_ls
)))
4978 if (unlikely(error
)) {
4983 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
4984 if (likely(status
& rx_not_ls
)) {
4985 xsk_buff_free(buf
->xdp
);
4992 /* XDP ZC Frame only support primary buffers for now */
4993 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
4996 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
4997 * Type frames (LLC/LLC-SNAP)
4999 * llc_snap is never checked in GMAC >= 4, so this ACS
5000 * feature is always disabled and packets need to be
5001 * stripped manually.
5003 if (likely(!(status
& rx_not_ls
)) &&
5004 (likely(priv
->synopsys_id
>= DWMAC_CORE_4_00
) ||
5005 unlikely(status
!= llc_snap
))) {
5006 buf1_len
-= ETH_FCS_LEN
;
5010 /* RX buffer is good and fit into a XSK pool buffer */
5011 buf
->xdp
->data_end
= buf
->xdp
->data
+ buf1_len
;
5012 xsk_buff_dma_sync_for_cpu(buf
->xdp
, rx_q
->xsk_pool
);
5014 prog
= READ_ONCE(priv
->xdp_prog
);
5015 res
= __stmmac_xdp_run_prog(priv
, prog
, buf
->xdp
);
5018 case STMMAC_XDP_PASS
:
5019 stmmac_dispatch_skb_zc(priv
, queue
, p
, np
, buf
->xdp
);
5020 xsk_buff_free(buf
->xdp
);
5022 case STMMAC_XDP_CONSUMED
:
5023 xsk_buff_free(buf
->xdp
);
5024 priv
->dev
->stats
.rx_dropped
++;
5027 case STMMAC_XDP_REDIRECT
:
5037 if (status
& rx_not_ls
) {
5038 rx_q
->state_saved
= true;
5039 rx_q
->state
.error
= error
;
5040 rx_q
->state
.len
= len
;
5043 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5045 priv
->xstats
.rx_pkt_n
+= count
;
5046 priv
->xstats
.rxq_stats
[queue
].rx_pkt_n
+= count
;
5048 if (xsk_uses_need_wakeup(rx_q
->xsk_pool
)) {
5049 if (failure
|| stmmac_rx_dirty(priv
, queue
) > 0)
5050 xsk_set_rx_need_wakeup(rx_q
->xsk_pool
);
5052 xsk_clear_rx_need_wakeup(rx_q
->xsk_pool
);
5057 return failure
? limit
: (int)count
;
5061 * stmmac_rx - manage the receive process
5062 * @priv: driver private structure
5063 * @limit: napi bugget
5064 * @queue: RX queue index.
5065 * Description : this the function called by the napi poll method.
5066 * It gets all the frames inside the ring.
5068 static int stmmac_rx(struct stmmac_priv
*priv
, int limit
, u32 queue
)
5070 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
5071 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
5072 unsigned int count
= 0, error
= 0, len
= 0;
5073 int status
= 0, coe
= priv
->hw
->rx_csum
;
5074 unsigned int next_entry
= rx_q
->cur_rx
;
5075 enum dma_data_direction dma_dir
;
5076 unsigned int desc_size
;
5077 struct sk_buff
*skb
= NULL
;
5078 struct xdp_buff xdp
;
5082 dma_dir
= page_pool_get_dma_dir(rx_q
->page_pool
);
5083 buf_sz
= DIV_ROUND_UP(priv
->dma_buf_sz
, PAGE_SIZE
) * PAGE_SIZE
;
5085 if (netif_msg_rx_status(priv
)) {
5088 netdev_dbg(priv
->dev
, "%s: descriptor ring:\n", __func__
);
5089 if (priv
->extend_desc
) {
5090 rx_head
= (void *)rx_q
->dma_erx
;
5091 desc_size
= sizeof(struct dma_extended_desc
);
5093 rx_head
= (void *)rx_q
->dma_rx
;
5094 desc_size
= sizeof(struct dma_desc
);
5097 stmmac_display_ring(priv
, rx_head
, priv
->dma_rx_size
, true,
5098 rx_q
->dma_rx_phy
, desc_size
);
5100 while (count
< limit
) {
5101 unsigned int buf1_len
= 0, buf2_len
= 0;
5102 enum pkt_hash_types hash_type
;
5103 struct stmmac_rx_buffer
*buf
;
5104 struct dma_desc
*np
, *p
;
5108 if (!count
&& rx_q
->state_saved
) {
5109 skb
= rx_q
->state
.skb
;
5110 error
= rx_q
->state
.error
;
5111 len
= rx_q
->state
.len
;
5113 rx_q
->state_saved
= false;
5126 buf
= &rx_q
->buf_pool
[entry
];
5128 if (priv
->extend_desc
)
5129 p
= (struct dma_desc
*)(rx_q
->dma_erx
+ entry
);
5131 p
= rx_q
->dma_rx
+ entry
;
5133 /* read the status of the incoming frame */
5134 status
= stmmac_rx_status(priv
, &priv
->dev
->stats
,
5136 /* check if managed by the DMA otherwise go ahead */
5137 if (unlikely(status
& dma_own
))
5140 rx_q
->cur_rx
= STMMAC_GET_ENTRY(rx_q
->cur_rx
,
5142 next_entry
= rx_q
->cur_rx
;
5144 if (priv
->extend_desc
)
5145 np
= (struct dma_desc
*)(rx_q
->dma_erx
+ next_entry
);
5147 np
= rx_q
->dma_rx
+ next_entry
;
5151 if (priv
->extend_desc
)
5152 stmmac_rx_extended_status(priv
, &priv
->dev
->stats
,
5153 &priv
->xstats
, rx_q
->dma_erx
+ entry
);
5154 if (unlikely(status
== discard_frame
)) {
5155 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5158 if (!priv
->hwts_rx_en
)
5159 priv
->dev
->stats
.rx_errors
++;
5162 if (unlikely(error
&& (status
& rx_not_ls
)))
5164 if (unlikely(error
)) {
5171 /* Buffer is good. Go on. */
5173 prefetch(page_address(buf
->page
) + buf
->page_offset
);
5175 prefetch(page_address(buf
->sec_page
));
5177 buf1_len
= stmmac_rx_buf1_len(priv
, p
, status
, len
);
5179 buf2_len
= stmmac_rx_buf2_len(priv
, p
, status
, len
);
5182 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
5183 * Type frames (LLC/LLC-SNAP)
5185 * llc_snap is never checked in GMAC >= 4, so this ACS
5186 * feature is always disabled and packets need to be
5187 * stripped manually.
5189 if (likely(!(status
& rx_not_ls
)) &&
5190 (likely(priv
->synopsys_id
>= DWMAC_CORE_4_00
) ||
5191 unlikely(status
!= llc_snap
))) {
5193 buf2_len
-= ETH_FCS_LEN
;
5195 buf1_len
-= ETH_FCS_LEN
;
5201 unsigned int pre_len
, sync_len
;
5203 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5206 xdp_init_buff(&xdp
, buf_sz
, &rx_q
->xdp_rxq
);
5207 xdp_prepare_buff(&xdp
, page_address(buf
->page
),
5208 buf
->page_offset
, buf1_len
, false);
5210 pre_len
= xdp
.data_end
- xdp
.data_hard_start
-
5212 skb
= stmmac_xdp_run_prog(priv
, &xdp
);
5213 /* Due xdp_adjust_tail: DMA sync for_device
5214 * cover max len CPU touch
5216 sync_len
= xdp
.data_end
- xdp
.data_hard_start
-
5218 sync_len
= max(sync_len
, pre_len
);
5220 /* For Not XDP_PASS verdict */
5222 unsigned int xdp_res
= -PTR_ERR(skb
);
5224 if (xdp_res
& STMMAC_XDP_CONSUMED
) {
5225 page_pool_put_page(rx_q
->page_pool
,
5226 virt_to_head_page(xdp
.data
),
5229 priv
->dev
->stats
.rx_dropped
++;
5231 /* Clear skb as it was set as
5232 * status by XDP program.
5236 if (unlikely((status
& rx_not_ls
)))
5241 } else if (xdp_res
& (STMMAC_XDP_TX
|
5242 STMMAC_XDP_REDIRECT
)) {
5243 xdp_status
|= xdp_res
;
5253 /* XDP program may expand or reduce tail */
5254 buf1_len
= xdp
.data_end
- xdp
.data
;
5256 skb
= napi_alloc_skb(&ch
->rx_napi
, buf1_len
);
5258 priv
->dev
->stats
.rx_dropped
++;
5263 /* XDP program may adjust header */
5264 skb_copy_to_linear_data(skb
, xdp
.data
, buf1_len
);
5265 skb_put(skb
, buf1_len
);
5267 /* Data payload copied into SKB, page ready for recycle */
5268 page_pool_recycle_direct(rx_q
->page_pool
, buf
->page
);
5270 } else if (buf1_len
) {
5271 dma_sync_single_for_cpu(priv
->device
, buf
->addr
,
5273 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5274 buf
->page
, buf
->page_offset
, buf1_len
,
5277 /* Data payload appended into SKB */
5278 page_pool_release_page(rx_q
->page_pool
, buf
->page
);
5283 dma_sync_single_for_cpu(priv
->device
, buf
->sec_addr
,
5285 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
5286 buf
->sec_page
, 0, buf2_len
,
5289 /* Data payload appended into SKB */
5290 page_pool_release_page(rx_q
->page_pool
, buf
->sec_page
);
5291 buf
->sec_page
= NULL
;
5295 if (likely(status
& rx_not_ls
))
5300 /* Got entire packet into SKB. Finish it. */
5302 stmmac_get_rx_hwtstamp(priv
, p
, np
, skb
);
5303 stmmac_rx_vlan(priv
->dev
, skb
);
5304 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
5307 skb_checksum_none_assert(skb
);
5309 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
5311 if (!stmmac_get_rx_hash(priv
, p
, &hash
, &hash_type
))
5312 skb_set_hash(skb
, hash
, hash_type
);
5314 skb_record_rx_queue(skb
, queue
);
5315 napi_gro_receive(&ch
->rx_napi
, skb
);
5318 priv
->dev
->stats
.rx_packets
++;
5319 priv
->dev
->stats
.rx_bytes
+= len
;
5323 if (status
& rx_not_ls
|| skb
) {
5324 rx_q
->state_saved
= true;
5325 rx_q
->state
.skb
= skb
;
5326 rx_q
->state
.error
= error
;
5327 rx_q
->state
.len
= len
;
5330 stmmac_finalize_xdp_rx(priv
, xdp_status
);
5332 stmmac_rx_refill(priv
, queue
);
5334 priv
->xstats
.rx_pkt_n
+= count
;
5335 priv
->xstats
.rxq_stats
[queue
].rx_pkt_n
+= count
;
5340 static int stmmac_napi_poll_rx(struct napi_struct
*napi
, int budget
)
5342 struct stmmac_channel
*ch
=
5343 container_of(napi
, struct stmmac_channel
, rx_napi
);
5344 struct stmmac_priv
*priv
= ch
->priv_data
;
5345 u32 chan
= ch
->index
;
5348 priv
->xstats
.napi_poll
++;
5350 work_done
= stmmac_rx(priv
, budget
, chan
);
5351 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5352 unsigned long flags
;
5354 spin_lock_irqsave(&ch
->lock
, flags
);
5355 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 0);
5356 spin_unlock_irqrestore(&ch
->lock
, flags
);
5362 static int stmmac_napi_poll_tx(struct napi_struct
*napi
, int budget
)
5364 struct stmmac_channel
*ch
=
5365 container_of(napi
, struct stmmac_channel
, tx_napi
);
5366 struct stmmac_priv
*priv
= ch
->priv_data
;
5367 u32 chan
= ch
->index
;
5370 priv
->xstats
.napi_poll
++;
5372 work_done
= stmmac_tx_clean(priv
, budget
, chan
);
5373 work_done
= min(work_done
, budget
);
5375 if (work_done
< budget
&& napi_complete_done(napi
, work_done
)) {
5376 unsigned long flags
;
5378 spin_lock_irqsave(&ch
->lock
, flags
);
5379 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 0, 1);
5380 spin_unlock_irqrestore(&ch
->lock
, flags
);
5386 static int stmmac_napi_poll_rxtx(struct napi_struct
*napi
, int budget
)
5388 struct stmmac_channel
*ch
=
5389 container_of(napi
, struct stmmac_channel
, rxtx_napi
);
5390 struct stmmac_priv
*priv
= ch
->priv_data
;
5391 int rx_done
, tx_done
, rxtx_done
;
5392 u32 chan
= ch
->index
;
5394 priv
->xstats
.napi_poll
++;
5396 tx_done
= stmmac_tx_clean(priv
, budget
, chan
);
5397 tx_done
= min(tx_done
, budget
);
5399 rx_done
= stmmac_rx_zc(priv
, budget
, chan
);
5401 rxtx_done
= max(tx_done
, rx_done
);
5403 /* If either TX or RX work is not complete, return budget
5406 if (rxtx_done
>= budget
)
5409 /* all work done, exit the polling mode */
5410 if (napi_complete_done(napi
, rxtx_done
)) {
5411 unsigned long flags
;
5413 spin_lock_irqsave(&ch
->lock
, flags
);
5414 /* Both RX and TX work done are compelte,
5415 * so enable both RX & TX IRQs.
5417 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, chan
, 1, 1);
5418 spin_unlock_irqrestore(&ch
->lock
, flags
);
5421 return min(rxtx_done
, budget
- 1);
5426 * @dev : Pointer to net device structure
5427 * @txqueue: the index of the hanging transmit queue
5428 * Description: this function is called when a packet transmission fails to
5429 * complete within a reasonable time. The driver will mark the error in the
5430 * netdev structure and arrange for the device to be reset to a sane state
5431 * in order to transmit a new packet.
5433 static void stmmac_tx_timeout(struct net_device
*dev
, unsigned int txqueue
)
5435 struct stmmac_priv
*priv
= netdev_priv(dev
);
5437 stmmac_global_err(priv
);
5441 * stmmac_set_rx_mode - entry point for multicast addressing
5442 * @dev : pointer to the device structure
5444 * This function is a driver entry point which gets called by the kernel
5445 * whenever multicast addresses must be enabled/disabled.
5449 static void stmmac_set_rx_mode(struct net_device
*dev
)
5451 struct stmmac_priv
*priv
= netdev_priv(dev
);
5453 stmmac_set_filter(priv
, priv
->hw
, dev
);
5457 * stmmac_change_mtu - entry point to change MTU size for the device.
5458 * @dev : device pointer.
5459 * @new_mtu : the new MTU size for the device.
5460 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5461 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5462 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5464 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5467 static int stmmac_change_mtu(struct net_device
*dev
, int new_mtu
)
5469 struct stmmac_priv
*priv
= netdev_priv(dev
);
5470 int txfifosz
= priv
->plat
->tx_fifo_size
;
5471 const int mtu
= new_mtu
;
5474 txfifosz
= priv
->dma_cap
.tx_fifo_size
;
5476 txfifosz
/= priv
->plat
->tx_queues_to_use
;
5478 if (netif_running(dev
)) {
5479 netdev_err(priv
->dev
, "must be stopped to change its MTU\n");
5483 if (stmmac_xdp_is_enabled(priv
) && new_mtu
> ETH_DATA_LEN
) {
5484 netdev_dbg(priv
->dev
, "Jumbo frames not supported for XDP\n");
5488 new_mtu
= STMMAC_ALIGN(new_mtu
);
5490 /* If condition true, FIFO is too small or MTU too large */
5491 if ((txfifosz
< new_mtu
) || (new_mtu
> BUF_SIZE_16KiB
))
5496 netdev_update_features(dev
);
5501 static netdev_features_t
stmmac_fix_features(struct net_device
*dev
,
5502 netdev_features_t features
)
5504 struct stmmac_priv
*priv
= netdev_priv(dev
);
5506 if (priv
->plat
->rx_coe
== STMMAC_RX_COE_NONE
)
5507 features
&= ~NETIF_F_RXCSUM
;
5509 if (!priv
->plat
->tx_coe
)
5510 features
&= ~NETIF_F_CSUM_MASK
;
5512 /* Some GMAC devices have a bugged Jumbo frame support that
5513 * needs to have the Tx COE disabled for oversized frames
5514 * (due to limited buffer sizes). In this case we disable
5515 * the TX csum insertion in the TDES and not use SF.
5517 if (priv
->plat
->bugged_jumbo
&& (dev
->mtu
> ETH_DATA_LEN
))
5518 features
&= ~NETIF_F_CSUM_MASK
;
5520 /* Disable tso if asked by ethtool */
5521 if ((priv
->plat
->tso_en
) && (priv
->dma_cap
.tsoen
)) {
5522 if (features
& NETIF_F_TSO
)
5531 static int stmmac_set_features(struct net_device
*netdev
,
5532 netdev_features_t features
)
5534 struct stmmac_priv
*priv
= netdev_priv(netdev
);
5536 /* Keep the COE Type in case of csum is supporting */
5537 if (features
& NETIF_F_RXCSUM
)
5538 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
5540 priv
->hw
->rx_csum
= 0;
5541 /* No check needed because rx_coe has been set before and it will be
5542 * fixed in case of issue.
5544 stmmac_rx_ipc(priv
, priv
->hw
);
5546 if (priv
->sph_cap
) {
5547 bool sph_en
= (priv
->hw
->rx_csum
> 0) && priv
->sph
;
5550 for (chan
= 0; chan
< priv
->plat
->rx_queues_to_use
; chan
++)
5551 stmmac_enable_sph(priv
, priv
->ioaddr
, sph_en
, chan
);
5557 static void stmmac_fpe_event_status(struct stmmac_priv
*priv
, int status
)
5559 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
5560 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
5561 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
5562 bool *hs_enable
= &fpe_cfg
->hs_enable
;
5564 if (status
== FPE_EVENT_UNKNOWN
|| !*hs_enable
)
5567 /* If LP has sent verify mPacket, LP is FPE capable */
5568 if ((status
& FPE_EVENT_RVER
) == FPE_EVENT_RVER
) {
5569 if (*lp_state
< FPE_STATE_CAPABLE
)
5570 *lp_state
= FPE_STATE_CAPABLE
;
5572 /* If user has requested FPE enable, quickly response */
5574 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
5578 /* If Local has sent verify mPacket, Local is FPE capable */
5579 if ((status
& FPE_EVENT_TVER
) == FPE_EVENT_TVER
) {
5580 if (*lo_state
< FPE_STATE_CAPABLE
)
5581 *lo_state
= FPE_STATE_CAPABLE
;
5584 /* If LP has sent response mPacket, LP is entering FPE ON */
5585 if ((status
& FPE_EVENT_RRSP
) == FPE_EVENT_RRSP
)
5586 *lp_state
= FPE_STATE_ENTERING_ON
;
5588 /* If Local has sent response mPacket, Local is entering FPE ON */
5589 if ((status
& FPE_EVENT_TRSP
) == FPE_EVENT_TRSP
)
5590 *lo_state
= FPE_STATE_ENTERING_ON
;
5592 if (!test_bit(__FPE_REMOVING
, &priv
->fpe_task_state
) &&
5593 !test_and_set_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
) &&
5595 queue_work(priv
->fpe_wq
, &priv
->fpe_task
);
5599 static void stmmac_common_interrupt(struct stmmac_priv
*priv
)
5601 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
5602 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
5607 xmac
= priv
->plat
->has_gmac4
|| priv
->plat
->has_xgmac
;
5608 queues_count
= (rx_cnt
> tx_cnt
) ? rx_cnt
: tx_cnt
;
5611 pm_wakeup_event(priv
->device
, 0);
5613 if (priv
->dma_cap
.estsel
)
5614 stmmac_est_irq_status(priv
, priv
->ioaddr
, priv
->dev
,
5615 &priv
->xstats
, tx_cnt
);
5617 if (priv
->dma_cap
.fpesel
) {
5618 int status
= stmmac_fpe_irq_status(priv
, priv
->ioaddr
,
5621 stmmac_fpe_event_status(priv
, status
);
5624 /* To handle GMAC own interrupts */
5625 if ((priv
->plat
->has_gmac
) || xmac
) {
5626 int status
= stmmac_host_irq_status(priv
, priv
->hw
, &priv
->xstats
);
5628 if (unlikely(status
)) {
5629 /* For LPI we need to save the tx status */
5630 if (status
& CORE_IRQ_TX_PATH_IN_LPI_MODE
)
5631 priv
->tx_path_in_lpi_mode
= true;
5632 if (status
& CORE_IRQ_TX_PATH_EXIT_LPI_MODE
)
5633 priv
->tx_path_in_lpi_mode
= false;
5636 for (queue
= 0; queue
< queues_count
; queue
++) {
5637 status
= stmmac_host_mtl_irq_status(priv
, priv
->hw
,
5641 /* PCS link status */
5642 if (priv
->hw
->pcs
) {
5643 if (priv
->xstats
.pcs_link
)
5644 netif_carrier_on(priv
->dev
);
5646 netif_carrier_off(priv
->dev
);
5649 stmmac_timestamp_interrupt(priv
, priv
);
5654 * stmmac_interrupt - main ISR
5655 * @irq: interrupt number.
5656 * @dev_id: to pass the net device pointer.
5657 * Description: this is the main driver interrupt service routine.
5659 * o DMA service routine (to manage incoming frame reception and transmission
5661 * o Core interrupts to manage: remote wake-up, management counter, LPI
5664 static irqreturn_t
stmmac_interrupt(int irq
, void *dev_id
)
5666 struct net_device
*dev
= (struct net_device
*)dev_id
;
5667 struct stmmac_priv
*priv
= netdev_priv(dev
);
5669 /* Check if adapter is up */
5670 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5673 /* Check if a fatal error happened */
5674 if (stmmac_safety_feat_interrupt(priv
))
5677 /* To handle Common interrupts */
5678 stmmac_common_interrupt(priv
);
5680 /* To handle DMA interrupts */
5681 stmmac_dma_interrupt(priv
);
5686 static irqreturn_t
stmmac_mac_interrupt(int irq
, void *dev_id
)
5688 struct net_device
*dev
= (struct net_device
*)dev_id
;
5689 struct stmmac_priv
*priv
= netdev_priv(dev
);
5691 if (unlikely(!dev
)) {
5692 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5696 /* Check if adapter is up */
5697 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5700 /* To handle Common interrupts */
5701 stmmac_common_interrupt(priv
);
5706 static irqreturn_t
stmmac_safety_interrupt(int irq
, void *dev_id
)
5708 struct net_device
*dev
= (struct net_device
*)dev_id
;
5709 struct stmmac_priv
*priv
= netdev_priv(dev
);
5711 if (unlikely(!dev
)) {
5712 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5716 /* Check if adapter is up */
5717 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5720 /* Check if a fatal error happened */
5721 stmmac_safety_feat_interrupt(priv
);
5726 static irqreturn_t
stmmac_msi_intr_tx(int irq
, void *data
)
5728 struct stmmac_tx_queue
*tx_q
= (struct stmmac_tx_queue
*)data
;
5729 int chan
= tx_q
->queue_index
;
5730 struct stmmac_priv
*priv
;
5733 priv
= container_of(tx_q
, struct stmmac_priv
, tx_queue
[chan
]);
5735 if (unlikely(!data
)) {
5736 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5740 /* Check if adapter is up */
5741 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5744 status
= stmmac_napi_check(priv
, chan
, DMA_DIR_TX
);
5746 if (unlikely(status
& tx_hard_error_bump_tc
)) {
5747 /* Try to bump up the dma threshold on this failure */
5748 if (unlikely(priv
->xstats
.threshold
!= SF_DMA_MODE
) &&
5751 if (priv
->plat
->force_thresh_dma_mode
)
5752 stmmac_set_dma_operation_mode(priv
,
5757 stmmac_set_dma_operation_mode(priv
,
5761 priv
->xstats
.threshold
= tc
;
5763 } else if (unlikely(status
== tx_hard_error
)) {
5764 stmmac_tx_err(priv
, chan
);
5770 static irqreturn_t
stmmac_msi_intr_rx(int irq
, void *data
)
5772 struct stmmac_rx_queue
*rx_q
= (struct stmmac_rx_queue
*)data
;
5773 int chan
= rx_q
->queue_index
;
5774 struct stmmac_priv
*priv
;
5776 priv
= container_of(rx_q
, struct stmmac_priv
, rx_queue
[chan
]);
5778 if (unlikely(!data
)) {
5779 netdev_err(priv
->dev
, "%s: invalid dev pointer\n", __func__
);
5783 /* Check if adapter is up */
5784 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5787 stmmac_napi_check(priv
, chan
, DMA_DIR_RX
);
5792 #ifdef CONFIG_NET_POLL_CONTROLLER
5793 /* Polling receive - used by NETCONSOLE and other diagnostic tools
5794 * to allow network I/O with interrupts disabled.
5796 static void stmmac_poll_controller(struct net_device
*dev
)
5798 struct stmmac_priv
*priv
= netdev_priv(dev
);
5801 /* If adapter is down, do nothing */
5802 if (test_bit(STMMAC_DOWN
, &priv
->state
))
5805 if (priv
->plat
->multi_msi_en
) {
5806 for (i
= 0; i
< priv
->plat
->rx_queues_to_use
; i
++)
5807 stmmac_msi_intr_rx(0, &priv
->rx_queue
[i
]);
5809 for (i
= 0; i
< priv
->plat
->tx_queues_to_use
; i
++)
5810 stmmac_msi_intr_tx(0, &priv
->tx_queue
[i
]);
5812 disable_irq(dev
->irq
);
5813 stmmac_interrupt(dev
->irq
, dev
);
5814 enable_irq(dev
->irq
);
5820 * stmmac_ioctl - Entry point for the Ioctl
5821 * @dev: Device pointer.
5822 * @rq: An IOCTL specefic structure, that can contain a pointer to
5823 * a proprietary structure used to pass information to the driver.
5824 * @cmd: IOCTL command
5826 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
5828 static int stmmac_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
5830 struct stmmac_priv
*priv
= netdev_priv (dev
);
5831 int ret
= -EOPNOTSUPP
;
5833 if (!netif_running(dev
))
5840 ret
= phylink_mii_ioctl(priv
->phylink
, rq
, cmd
);
5843 ret
= stmmac_hwtstamp_set(dev
, rq
);
5846 ret
= stmmac_hwtstamp_get(dev
, rq
);
5855 static int stmmac_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
5858 struct stmmac_priv
*priv
= cb_priv
;
5859 int ret
= -EOPNOTSUPP
;
5861 if (!tc_cls_can_offload_and_chain0(priv
->dev
, type_data
))
5864 __stmmac_disable_all_queues(priv
);
5867 case TC_SETUP_CLSU32
:
5868 ret
= stmmac_tc_setup_cls_u32(priv
, priv
, type_data
);
5870 case TC_SETUP_CLSFLOWER
:
5871 ret
= stmmac_tc_setup_cls(priv
, priv
, type_data
);
5877 stmmac_enable_all_queues(priv
);
5881 static LIST_HEAD(stmmac_block_cb_list
);
5883 static int stmmac_setup_tc(struct net_device
*ndev
, enum tc_setup_type type
,
5886 struct stmmac_priv
*priv
= netdev_priv(ndev
);
5889 case TC_SETUP_BLOCK
:
5890 return flow_block_cb_setup_simple(type_data
,
5891 &stmmac_block_cb_list
,
5892 stmmac_setup_tc_block_cb
,
5894 case TC_SETUP_QDISC_CBS
:
5895 return stmmac_tc_setup_cbs(priv
, priv
, type_data
);
5896 case TC_SETUP_QDISC_TAPRIO
:
5897 return stmmac_tc_setup_taprio(priv
, priv
, type_data
);
5898 case TC_SETUP_QDISC_ETF
:
5899 return stmmac_tc_setup_etf(priv
, priv
, type_data
);
5905 static u16
stmmac_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
5906 struct net_device
*sb_dev
)
5908 int gso
= skb_shinfo(skb
)->gso_type
;
5910 if (gso
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
| SKB_GSO_UDP_L4
)) {
5912 * There is no way to determine the number of TSO/USO
5913 * capable Queues. Let's use always the Queue 0
5914 * because if TSO/USO is supported then at least this
5915 * one will be capable.
5920 return netdev_pick_tx(dev
, skb
, NULL
) % dev
->real_num_tx_queues
;
5923 static int stmmac_set_mac_address(struct net_device
*ndev
, void *addr
)
5925 struct stmmac_priv
*priv
= netdev_priv(ndev
);
5928 ret
= pm_runtime_get_sync(priv
->device
);
5930 pm_runtime_put_noidle(priv
->device
);
5934 ret
= eth_mac_addr(ndev
, addr
);
5938 stmmac_set_umac_addr(priv
, priv
->hw
, ndev
->dev_addr
, 0);
5941 pm_runtime_put(priv
->device
);
5946 #ifdef CONFIG_DEBUG_FS
5947 static struct dentry
*stmmac_fs_dir
;
5949 static void sysfs_display_ring(void *head
, int size
, int extend_desc
,
5950 struct seq_file
*seq
, dma_addr_t dma_phy_addr
)
5953 struct dma_extended_desc
*ep
= (struct dma_extended_desc
*)head
;
5954 struct dma_desc
*p
= (struct dma_desc
*)head
;
5955 dma_addr_t dma_addr
;
5957 for (i
= 0; i
< size
; i
++) {
5959 dma_addr
= dma_phy_addr
+ i
* sizeof(*ep
);
5960 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5962 le32_to_cpu(ep
->basic
.des0
),
5963 le32_to_cpu(ep
->basic
.des1
),
5964 le32_to_cpu(ep
->basic
.des2
),
5965 le32_to_cpu(ep
->basic
.des3
));
5968 dma_addr
= dma_phy_addr
+ i
* sizeof(*p
);
5969 seq_printf(seq
, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
5971 le32_to_cpu(p
->des0
), le32_to_cpu(p
->des1
),
5972 le32_to_cpu(p
->des2
), le32_to_cpu(p
->des3
));
5975 seq_printf(seq
, "\n");
5979 static int stmmac_rings_status_show(struct seq_file
*seq
, void *v
)
5981 struct net_device
*dev
= seq
->private;
5982 struct stmmac_priv
*priv
= netdev_priv(dev
);
5983 u32 rx_count
= priv
->plat
->rx_queues_to_use
;
5984 u32 tx_count
= priv
->plat
->tx_queues_to_use
;
5987 if ((dev
->flags
& IFF_UP
) == 0)
5990 for (queue
= 0; queue
< rx_count
; queue
++) {
5991 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
5993 seq_printf(seq
, "RX Queue %d:\n", queue
);
5995 if (priv
->extend_desc
) {
5996 seq_printf(seq
, "Extended descriptor ring:\n");
5997 sysfs_display_ring((void *)rx_q
->dma_erx
,
5998 priv
->dma_rx_size
, 1, seq
, rx_q
->dma_rx_phy
);
6000 seq_printf(seq
, "Descriptor ring:\n");
6001 sysfs_display_ring((void *)rx_q
->dma_rx
,
6002 priv
->dma_rx_size
, 0, seq
, rx_q
->dma_rx_phy
);
6006 for (queue
= 0; queue
< tx_count
; queue
++) {
6007 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
6009 seq_printf(seq
, "TX Queue %d:\n", queue
);
6011 if (priv
->extend_desc
) {
6012 seq_printf(seq
, "Extended descriptor ring:\n");
6013 sysfs_display_ring((void *)tx_q
->dma_etx
,
6014 priv
->dma_tx_size
, 1, seq
, tx_q
->dma_tx_phy
);
6015 } else if (!(tx_q
->tbs
& STMMAC_TBS_AVAIL
)) {
6016 seq_printf(seq
, "Descriptor ring:\n");
6017 sysfs_display_ring((void *)tx_q
->dma_tx
,
6018 priv
->dma_tx_size
, 0, seq
, tx_q
->dma_tx_phy
);
6024 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status
);
6026 static int stmmac_dma_cap_show(struct seq_file
*seq
, void *v
)
6028 struct net_device
*dev
= seq
->private;
6029 struct stmmac_priv
*priv
= netdev_priv(dev
);
6031 if (!priv
->hw_cap_support
) {
6032 seq_printf(seq
, "DMA HW features not supported\n");
6036 seq_printf(seq
, "==============================\n");
6037 seq_printf(seq
, "\tDMA HW features\n");
6038 seq_printf(seq
, "==============================\n");
6040 seq_printf(seq
, "\t10/100 Mbps: %s\n",
6041 (priv
->dma_cap
.mbps_10_100
) ? "Y" : "N");
6042 seq_printf(seq
, "\t1000 Mbps: %s\n",
6043 (priv
->dma_cap
.mbps_1000
) ? "Y" : "N");
6044 seq_printf(seq
, "\tHalf duplex: %s\n",
6045 (priv
->dma_cap
.half_duplex
) ? "Y" : "N");
6046 seq_printf(seq
, "\tHash Filter: %s\n",
6047 (priv
->dma_cap
.hash_filter
) ? "Y" : "N");
6048 seq_printf(seq
, "\tMultiple MAC address registers: %s\n",
6049 (priv
->dma_cap
.multi_addr
) ? "Y" : "N");
6050 seq_printf(seq
, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6051 (priv
->dma_cap
.pcs
) ? "Y" : "N");
6052 seq_printf(seq
, "\tSMA (MDIO) Interface: %s\n",
6053 (priv
->dma_cap
.sma_mdio
) ? "Y" : "N");
6054 seq_printf(seq
, "\tPMT Remote wake up: %s\n",
6055 (priv
->dma_cap
.pmt_remote_wake_up
) ? "Y" : "N");
6056 seq_printf(seq
, "\tPMT Magic Frame: %s\n",
6057 (priv
->dma_cap
.pmt_magic_frame
) ? "Y" : "N");
6058 seq_printf(seq
, "\tRMON module: %s\n",
6059 (priv
->dma_cap
.rmon
) ? "Y" : "N");
6060 seq_printf(seq
, "\tIEEE 1588-2002 Time Stamp: %s\n",
6061 (priv
->dma_cap
.time_stamp
) ? "Y" : "N");
6062 seq_printf(seq
, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6063 (priv
->dma_cap
.atime_stamp
) ? "Y" : "N");
6064 seq_printf(seq
, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6065 (priv
->dma_cap
.eee
) ? "Y" : "N");
6066 seq_printf(seq
, "\tAV features: %s\n", (priv
->dma_cap
.av
) ? "Y" : "N");
6067 seq_printf(seq
, "\tChecksum Offload in TX: %s\n",
6068 (priv
->dma_cap
.tx_coe
) ? "Y" : "N");
6069 if (priv
->synopsys_id
>= DWMAC_CORE_4_00
) {
6070 seq_printf(seq
, "\tIP Checksum Offload in RX: %s\n",
6071 (priv
->dma_cap
.rx_coe
) ? "Y" : "N");
6073 seq_printf(seq
, "\tIP Checksum Offload (type1) in RX: %s\n",
6074 (priv
->dma_cap
.rx_coe_type1
) ? "Y" : "N");
6075 seq_printf(seq
, "\tIP Checksum Offload (type2) in RX: %s\n",
6076 (priv
->dma_cap
.rx_coe_type2
) ? "Y" : "N");
6078 seq_printf(seq
, "\tRXFIFO > 2048bytes: %s\n",
6079 (priv
->dma_cap
.rxfifo_over_2048
) ? "Y" : "N");
6080 seq_printf(seq
, "\tNumber of Additional RX channel: %d\n",
6081 priv
->dma_cap
.number_rx_channel
);
6082 seq_printf(seq
, "\tNumber of Additional TX channel: %d\n",
6083 priv
->dma_cap
.number_tx_channel
);
6084 seq_printf(seq
, "\tNumber of Additional RX queues: %d\n",
6085 priv
->dma_cap
.number_rx_queues
);
6086 seq_printf(seq
, "\tNumber of Additional TX queues: %d\n",
6087 priv
->dma_cap
.number_tx_queues
);
6088 seq_printf(seq
, "\tEnhanced descriptors: %s\n",
6089 (priv
->dma_cap
.enh_desc
) ? "Y" : "N");
6090 seq_printf(seq
, "\tTX Fifo Size: %d\n", priv
->dma_cap
.tx_fifo_size
);
6091 seq_printf(seq
, "\tRX Fifo Size: %d\n", priv
->dma_cap
.rx_fifo_size
);
6092 seq_printf(seq
, "\tHash Table Size: %d\n", priv
->dma_cap
.hash_tb_sz
);
6093 seq_printf(seq
, "\tTSO: %s\n", priv
->dma_cap
.tsoen
? "Y" : "N");
6094 seq_printf(seq
, "\tNumber of PPS Outputs: %d\n",
6095 priv
->dma_cap
.pps_out_num
);
6096 seq_printf(seq
, "\tSafety Features: %s\n",
6097 priv
->dma_cap
.asp
? "Y" : "N");
6098 seq_printf(seq
, "\tFlexible RX Parser: %s\n",
6099 priv
->dma_cap
.frpsel
? "Y" : "N");
6100 seq_printf(seq
, "\tEnhanced Addressing: %d\n",
6101 priv
->dma_cap
.addr64
);
6102 seq_printf(seq
, "\tReceive Side Scaling: %s\n",
6103 priv
->dma_cap
.rssen
? "Y" : "N");
6104 seq_printf(seq
, "\tVLAN Hash Filtering: %s\n",
6105 priv
->dma_cap
.vlhash
? "Y" : "N");
6106 seq_printf(seq
, "\tSplit Header: %s\n",
6107 priv
->dma_cap
.sphen
? "Y" : "N");
6108 seq_printf(seq
, "\tVLAN TX Insertion: %s\n",
6109 priv
->dma_cap
.vlins
? "Y" : "N");
6110 seq_printf(seq
, "\tDouble VLAN: %s\n",
6111 priv
->dma_cap
.dvlan
? "Y" : "N");
6112 seq_printf(seq
, "\tNumber of L3/L4 Filters: %d\n",
6113 priv
->dma_cap
.l3l4fnum
);
6114 seq_printf(seq
, "\tARP Offloading: %s\n",
6115 priv
->dma_cap
.arpoffsel
? "Y" : "N");
6116 seq_printf(seq
, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6117 priv
->dma_cap
.estsel
? "Y" : "N");
6118 seq_printf(seq
, "\tFrame Preemption (FPE): %s\n",
6119 priv
->dma_cap
.fpesel
? "Y" : "N");
6120 seq_printf(seq
, "\tTime-Based Scheduling (TBS): %s\n",
6121 priv
->dma_cap
.tbssel
? "Y" : "N");
6124 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap
);
6126 /* Use network device events to rename debugfs file entries.
6128 static int stmmac_device_event(struct notifier_block
*unused
,
6129 unsigned long event
, void *ptr
)
6131 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
6132 struct stmmac_priv
*priv
= netdev_priv(dev
);
6134 if (dev
->netdev_ops
!= &stmmac_netdev_ops
)
6138 case NETDEV_CHANGENAME
:
6139 if (priv
->dbgfs_dir
)
6140 priv
->dbgfs_dir
= debugfs_rename(stmmac_fs_dir
,
6150 static struct notifier_block stmmac_notifier
= {
6151 .notifier_call
= stmmac_device_event
,
6154 static void stmmac_init_fs(struct net_device
*dev
)
6156 struct stmmac_priv
*priv
= netdev_priv(dev
);
6160 /* Create per netdev entries */
6161 priv
->dbgfs_dir
= debugfs_create_dir(dev
->name
, stmmac_fs_dir
);
6163 /* Entry to report DMA RX/TX rings */
6164 debugfs_create_file("descriptors_status", 0444, priv
->dbgfs_dir
, dev
,
6165 &stmmac_rings_status_fops
);
6167 /* Entry to report the DMA HW features */
6168 debugfs_create_file("dma_cap", 0444, priv
->dbgfs_dir
, dev
,
6169 &stmmac_dma_cap_fops
);
6174 static void stmmac_exit_fs(struct net_device
*dev
)
6176 struct stmmac_priv
*priv
= netdev_priv(dev
);
6178 debugfs_remove_recursive(priv
->dbgfs_dir
);
6180 #endif /* CONFIG_DEBUG_FS */
6182 static u32
stmmac_vid_crc32_le(__le16 vid_le
)
6184 unsigned char *data
= (unsigned char *)&vid_le
;
6185 unsigned char data_byte
= 0;
6190 bits
= get_bitmask_order(VLAN_VID_MASK
);
6191 for (i
= 0; i
< bits
; i
++) {
6193 data_byte
= data
[i
/ 8];
6195 temp
= ((crc
& 1) ^ data_byte
) & 1;
6206 static int stmmac_vlan_update(struct stmmac_priv
*priv
, bool is_double
)
6213 for_each_set_bit(vid
, priv
->active_vlans
, VLAN_N_VID
) {
6214 __le16 vid_le
= cpu_to_le16(vid
);
6215 crc
= bitrev32(~stmmac_vid_crc32_le(vid_le
)) >> 28;
6220 if (!priv
->dma_cap
.vlhash
) {
6221 if (count
> 2) /* VID = 0 always passes filter */
6224 pmatch
= cpu_to_le16(vid
);
6228 return stmmac_update_vlan_hash(priv
, priv
->hw
, hash
, pmatch
, is_double
);
6231 static int stmmac_vlan_rx_add_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6233 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6234 bool is_double
= false;
6237 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6240 set_bit(vid
, priv
->active_vlans
);
6241 ret
= stmmac_vlan_update(priv
, is_double
);
6243 clear_bit(vid
, priv
->active_vlans
);
6247 if (priv
->hw
->num_vlan
) {
6248 ret
= stmmac_add_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6256 static int stmmac_vlan_rx_kill_vid(struct net_device
*ndev
, __be16 proto
, u16 vid
)
6258 struct stmmac_priv
*priv
= netdev_priv(ndev
);
6259 bool is_double
= false;
6262 ret
= pm_runtime_get_sync(priv
->device
);
6264 pm_runtime_put_noidle(priv
->device
);
6268 if (be16_to_cpu(proto
) == ETH_P_8021AD
)
6271 clear_bit(vid
, priv
->active_vlans
);
6273 if (priv
->hw
->num_vlan
) {
6274 ret
= stmmac_del_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
, proto
, vid
);
6276 goto del_vlan_error
;
6279 ret
= stmmac_vlan_update(priv
, is_double
);
6282 pm_runtime_put(priv
->device
);
6287 static int stmmac_bpf(struct net_device
*dev
, struct netdev_bpf
*bpf
)
6289 struct stmmac_priv
*priv
= netdev_priv(dev
);
6291 switch (bpf
->command
) {
6292 case XDP_SETUP_PROG
:
6293 return stmmac_xdp_set_prog(priv
, bpf
->prog
, bpf
->extack
);
6294 case XDP_SETUP_XSK_POOL
:
6295 return stmmac_xdp_setup_pool(priv
, bpf
->xsk
.pool
,
6302 static int stmmac_xdp_xmit(struct net_device
*dev
, int num_frames
,
6303 struct xdp_frame
**frames
, u32 flags
)
6305 struct stmmac_priv
*priv
= netdev_priv(dev
);
6306 int cpu
= smp_processor_id();
6307 struct netdev_queue
*nq
;
6311 if (unlikely(test_bit(STMMAC_DOWN
, &priv
->state
)))
6314 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
6317 queue
= stmmac_xdp_get_tx_queue(priv
, cpu
);
6318 nq
= netdev_get_tx_queue(priv
->dev
, queue
);
6320 __netif_tx_lock(nq
, cpu
);
6321 /* Avoids TX time-out as we are sharing with slow path */
6322 nq
->trans_start
= jiffies
;
6324 for (i
= 0; i
< num_frames
; i
++) {
6327 res
= stmmac_xdp_xmit_xdpf(priv
, queue
, frames
[i
], true);
6328 if (res
== STMMAC_XDP_CONSUMED
)
6334 if (flags
& XDP_XMIT_FLUSH
) {
6335 stmmac_flush_tx_descriptors(priv
, queue
);
6336 stmmac_tx_timer_arm(priv
, queue
);
6339 __netif_tx_unlock(nq
);
6344 void stmmac_disable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6346 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6347 unsigned long flags
;
6349 spin_lock_irqsave(&ch
->lock
, flags
);
6350 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6351 spin_unlock_irqrestore(&ch
->lock
, flags
);
6353 stmmac_stop_rx_dma(priv
, queue
);
6354 __free_dma_rx_desc_resources(priv
, queue
);
6357 void stmmac_enable_rx_queue(struct stmmac_priv
*priv
, u32 queue
)
6359 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
6360 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6361 unsigned long flags
;
6365 ret
= __alloc_dma_rx_desc_resources(priv
, queue
);
6367 netdev_err(priv
->dev
, "Failed to alloc RX desc.\n");
6371 ret
= __init_dma_rx_desc_rings(priv
, queue
, GFP_KERNEL
);
6373 __free_dma_rx_desc_resources(priv
, queue
);
6374 netdev_err(priv
->dev
, "Failed to init RX desc.\n");
6378 stmmac_clear_rx_descriptors(priv
, queue
);
6380 stmmac_init_rx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6381 rx_q
->dma_rx_phy
, rx_q
->queue_index
);
6383 rx_q
->rx_tail_addr
= rx_q
->dma_rx_phy
+ (rx_q
->buf_alloc_num
*
6384 sizeof(struct dma_desc
));
6385 stmmac_set_rx_tail_ptr(priv
, priv
->ioaddr
,
6386 rx_q
->rx_tail_addr
, rx_q
->queue_index
);
6388 if (rx_q
->xsk_pool
&& rx_q
->buf_alloc_num
) {
6389 buf_size
= xsk_pool_get_rx_frame_size(rx_q
->xsk_pool
);
6390 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6394 stmmac_set_dma_bfsize(priv
, priv
->ioaddr
,
6399 stmmac_start_rx_dma(priv
, queue
);
6401 spin_lock_irqsave(&ch
->lock
, flags
);
6402 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 1, 0);
6403 spin_unlock_irqrestore(&ch
->lock
, flags
);
6406 void stmmac_disable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6408 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6409 unsigned long flags
;
6411 spin_lock_irqsave(&ch
->lock
, flags
);
6412 stmmac_disable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6413 spin_unlock_irqrestore(&ch
->lock
, flags
);
6415 stmmac_stop_tx_dma(priv
, queue
);
6416 __free_dma_tx_desc_resources(priv
, queue
);
6419 void stmmac_enable_tx_queue(struct stmmac_priv
*priv
, u32 queue
)
6421 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
6422 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6423 unsigned long flags
;
6426 ret
= __alloc_dma_tx_desc_resources(priv
, queue
);
6428 netdev_err(priv
->dev
, "Failed to alloc TX desc.\n");
6432 ret
= __init_dma_tx_desc_rings(priv
, queue
);
6434 __free_dma_tx_desc_resources(priv
, queue
);
6435 netdev_err(priv
->dev
, "Failed to init TX desc.\n");
6439 stmmac_clear_tx_descriptors(priv
, queue
);
6441 stmmac_init_tx_chan(priv
, priv
->ioaddr
, priv
->plat
->dma_cfg
,
6442 tx_q
->dma_tx_phy
, tx_q
->queue_index
);
6444 if (tx_q
->tbs
& STMMAC_TBS_AVAIL
)
6445 stmmac_enable_tbs(priv
, priv
->ioaddr
, 1, tx_q
->queue_index
);
6447 tx_q
->tx_tail_addr
= tx_q
->dma_tx_phy
;
6448 stmmac_set_tx_tail_ptr(priv
, priv
->ioaddr
,
6449 tx_q
->tx_tail_addr
, tx_q
->queue_index
);
6451 stmmac_start_tx_dma(priv
, queue
);
6453 spin_lock_irqsave(&ch
->lock
, flags
);
6454 stmmac_enable_dma_irq(priv
, priv
->ioaddr
, queue
, 0, 1);
6455 spin_unlock_irqrestore(&ch
->lock
, flags
);
6458 int stmmac_xsk_wakeup(struct net_device
*dev
, u32 queue
, u32 flags
)
6460 struct stmmac_priv
*priv
= netdev_priv(dev
);
6461 struct stmmac_rx_queue
*rx_q
;
6462 struct stmmac_tx_queue
*tx_q
;
6463 struct stmmac_channel
*ch
;
6465 if (test_bit(STMMAC_DOWN
, &priv
->state
) ||
6466 !netif_carrier_ok(priv
->dev
))
6469 if (!stmmac_xdp_is_enabled(priv
))
6472 if (queue
>= priv
->plat
->rx_queues_to_use
||
6473 queue
>= priv
->plat
->tx_queues_to_use
)
6476 rx_q
= &priv
->rx_queue
[queue
];
6477 tx_q
= &priv
->tx_queue
[queue
];
6478 ch
= &priv
->channel
[queue
];
6480 if (!rx_q
->xsk_pool
&& !tx_q
->xsk_pool
)
6483 if (!napi_if_scheduled_mark_missed(&ch
->rxtx_napi
)) {
6484 /* EQoS does not have per-DMA channel SW interrupt,
6485 * so we schedule RX Napi straight-away.
6487 if (likely(napi_schedule_prep(&ch
->rxtx_napi
)))
6488 __napi_schedule(&ch
->rxtx_napi
);
6494 static const struct net_device_ops stmmac_netdev_ops
= {
6495 .ndo_open
= stmmac_open
,
6496 .ndo_start_xmit
= stmmac_xmit
,
6497 .ndo_stop
= stmmac_release
,
6498 .ndo_change_mtu
= stmmac_change_mtu
,
6499 .ndo_fix_features
= stmmac_fix_features
,
6500 .ndo_set_features
= stmmac_set_features
,
6501 .ndo_set_rx_mode
= stmmac_set_rx_mode
,
6502 .ndo_tx_timeout
= stmmac_tx_timeout
,
6503 .ndo_eth_ioctl
= stmmac_ioctl
,
6504 .ndo_setup_tc
= stmmac_setup_tc
,
6505 .ndo_select_queue
= stmmac_select_queue
,
6506 #ifdef CONFIG_NET_POLL_CONTROLLER
6507 .ndo_poll_controller
= stmmac_poll_controller
,
6509 .ndo_set_mac_address
= stmmac_set_mac_address
,
6510 .ndo_vlan_rx_add_vid
= stmmac_vlan_rx_add_vid
,
6511 .ndo_vlan_rx_kill_vid
= stmmac_vlan_rx_kill_vid
,
6512 .ndo_bpf
= stmmac_bpf
,
6513 .ndo_xdp_xmit
= stmmac_xdp_xmit
,
6514 .ndo_xsk_wakeup
= stmmac_xsk_wakeup
,
6517 static void stmmac_reset_subtask(struct stmmac_priv
*priv
)
6519 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED
, &priv
->state
))
6521 if (test_bit(STMMAC_DOWN
, &priv
->state
))
6524 netdev_err(priv
->dev
, "Reset adapter.\n");
6527 netif_trans_update(priv
->dev
);
6528 while (test_and_set_bit(STMMAC_RESETING
, &priv
->state
))
6529 usleep_range(1000, 2000);
6531 set_bit(STMMAC_DOWN
, &priv
->state
);
6532 dev_close(priv
->dev
);
6533 dev_open(priv
->dev
, NULL
);
6534 clear_bit(STMMAC_DOWN
, &priv
->state
);
6535 clear_bit(STMMAC_RESETING
, &priv
->state
);
6539 static void stmmac_service_task(struct work_struct
*work
)
6541 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
6544 stmmac_reset_subtask(priv
);
6545 clear_bit(STMMAC_SERVICE_SCHED
, &priv
->state
);
6549 * stmmac_hw_init - Init the MAC device
6550 * @priv: driver private structure
6551 * Description: this function is to configure the MAC device according to
6552 * some platform parameters or the HW capability register. It prepares the
6553 * driver to use either ring or chain modes and to setup either enhanced or
6554 * normal descriptors.
6556 static int stmmac_hw_init(struct stmmac_priv
*priv
)
6560 /* dwmac-sun8i only work in chain mode */
6561 if (priv
->plat
->has_sun8i
)
6563 priv
->chain_mode
= chain_mode
;
6565 /* Initialize HW Interface */
6566 ret
= stmmac_hwif_init(priv
);
6570 /* Get the HW capability (new GMAC newer than 3.50a) */
6571 priv
->hw_cap_support
= stmmac_get_hw_features(priv
);
6572 if (priv
->hw_cap_support
) {
6573 dev_info(priv
->device
, "DMA HW capability register supported\n");
6575 /* We can override some gmac/dma configuration fields: e.g.
6576 * enh_desc, tx_coe (e.g. that are passed through the
6577 * platform) with the values from the HW capability
6578 * register (if supported).
6580 priv
->plat
->enh_desc
= priv
->dma_cap
.enh_desc
;
6581 priv
->plat
->pmt
= priv
->dma_cap
.pmt_remote_wake_up
&&
6582 !priv
->plat
->use_phy_wol
;
6583 priv
->hw
->pmt
= priv
->plat
->pmt
;
6584 if (priv
->dma_cap
.hash_tb_sz
) {
6585 priv
->hw
->multicast_filter_bins
=
6586 (BIT(priv
->dma_cap
.hash_tb_sz
) << 5);
6587 priv
->hw
->mcast_bits_log2
=
6588 ilog2(priv
->hw
->multicast_filter_bins
);
6591 /* TXCOE doesn't work in thresh DMA mode */
6592 if (priv
->plat
->force_thresh_dma_mode
)
6593 priv
->plat
->tx_coe
= 0;
6595 priv
->plat
->tx_coe
= priv
->dma_cap
.tx_coe
;
6597 /* In case of GMAC4 rx_coe is from HW cap register. */
6598 priv
->plat
->rx_coe
= priv
->dma_cap
.rx_coe
;
6600 if (priv
->dma_cap
.rx_coe_type2
)
6601 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE2
;
6602 else if (priv
->dma_cap
.rx_coe_type1
)
6603 priv
->plat
->rx_coe
= STMMAC_RX_COE_TYPE1
;
6606 dev_info(priv
->device
, "No HW DMA feature register supported\n");
6609 if (priv
->plat
->rx_coe
) {
6610 priv
->hw
->rx_csum
= priv
->plat
->rx_coe
;
6611 dev_info(priv
->device
, "RX Checksum Offload Engine supported\n");
6612 if (priv
->synopsys_id
< DWMAC_CORE_4_00
)
6613 dev_info(priv
->device
, "COE Type %d\n", priv
->hw
->rx_csum
);
6615 if (priv
->plat
->tx_coe
)
6616 dev_info(priv
->device
, "TX Checksum insertion supported\n");
6618 if (priv
->plat
->pmt
) {
6619 dev_info(priv
->device
, "Wake-Up On Lan supported\n");
6620 device_set_wakeup_capable(priv
->device
, 1);
6623 if (priv
->dma_cap
.tsoen
)
6624 dev_info(priv
->device
, "TSO supported\n");
6626 priv
->hw
->vlan_fail_q_en
= priv
->plat
->vlan_fail_q_en
;
6627 priv
->hw
->vlan_fail_q
= priv
->plat
->vlan_fail_q
;
6629 /* Run HW quirks, if any */
6630 if (priv
->hwif_quirks
) {
6631 ret
= priv
->hwif_quirks(priv
);
6636 /* Rx Watchdog is available in the COREs newer than the 3.40.
6637 * In some case, for example on bugged HW this feature
6638 * has to be disable and this can be done by passing the
6639 * riwt_off field from the platform.
6641 if (((priv
->synopsys_id
>= DWMAC_CORE_3_50
) ||
6642 (priv
->plat
->has_xgmac
)) && (!priv
->plat
->riwt_off
)) {
6644 dev_info(priv
->device
,
6645 "Enable RX Mitigation via HW Watchdog Timer\n");
6651 static void stmmac_napi_add(struct net_device
*dev
)
6653 struct stmmac_priv
*priv
= netdev_priv(dev
);
6656 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
6658 for (queue
= 0; queue
< maxq
; queue
++) {
6659 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6661 ch
->priv_data
= priv
;
6663 spin_lock_init(&ch
->lock
);
6665 if (queue
< priv
->plat
->rx_queues_to_use
) {
6666 netif_napi_add(dev
, &ch
->rx_napi
, stmmac_napi_poll_rx
,
6669 if (queue
< priv
->plat
->tx_queues_to_use
) {
6670 netif_tx_napi_add(dev
, &ch
->tx_napi
,
6671 stmmac_napi_poll_tx
,
6674 if (queue
< priv
->plat
->rx_queues_to_use
&&
6675 queue
< priv
->plat
->tx_queues_to_use
) {
6676 netif_napi_add(dev
, &ch
->rxtx_napi
,
6677 stmmac_napi_poll_rxtx
,
6683 static void stmmac_napi_del(struct net_device
*dev
)
6685 struct stmmac_priv
*priv
= netdev_priv(dev
);
6688 maxq
= max(priv
->plat
->rx_queues_to_use
, priv
->plat
->tx_queues_to_use
);
6690 for (queue
= 0; queue
< maxq
; queue
++) {
6691 struct stmmac_channel
*ch
= &priv
->channel
[queue
];
6693 if (queue
< priv
->plat
->rx_queues_to_use
)
6694 netif_napi_del(&ch
->rx_napi
);
6695 if (queue
< priv
->plat
->tx_queues_to_use
)
6696 netif_napi_del(&ch
->tx_napi
);
6697 if (queue
< priv
->plat
->rx_queues_to_use
&&
6698 queue
< priv
->plat
->tx_queues_to_use
) {
6699 netif_napi_del(&ch
->rxtx_napi
);
6704 int stmmac_reinit_queues(struct net_device
*dev
, u32 rx_cnt
, u32 tx_cnt
)
6706 struct stmmac_priv
*priv
= netdev_priv(dev
);
6709 if (netif_running(dev
))
6710 stmmac_release(dev
);
6712 stmmac_napi_del(dev
);
6714 priv
->plat
->rx_queues_to_use
= rx_cnt
;
6715 priv
->plat
->tx_queues_to_use
= tx_cnt
;
6717 stmmac_napi_add(dev
);
6719 if (netif_running(dev
))
6720 ret
= stmmac_open(dev
);
6725 int stmmac_reinit_ringparam(struct net_device
*dev
, u32 rx_size
, u32 tx_size
)
6727 struct stmmac_priv
*priv
= netdev_priv(dev
);
6730 if (netif_running(dev
))
6731 stmmac_release(dev
);
6733 priv
->dma_rx_size
= rx_size
;
6734 priv
->dma_tx_size
= tx_size
;
6736 if (netif_running(dev
))
6737 ret
= stmmac_open(dev
);
6742 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
6743 static void stmmac_fpe_lp_task(struct work_struct
*work
)
6745 struct stmmac_priv
*priv
= container_of(work
, struct stmmac_priv
,
6747 struct stmmac_fpe_cfg
*fpe_cfg
= priv
->plat
->fpe_cfg
;
6748 enum stmmac_fpe_state
*lo_state
= &fpe_cfg
->lo_fpe_state
;
6749 enum stmmac_fpe_state
*lp_state
= &fpe_cfg
->lp_fpe_state
;
6750 bool *hs_enable
= &fpe_cfg
->hs_enable
;
6751 bool *enable
= &fpe_cfg
->enable
;
6754 while (retries
-- > 0) {
6755 /* Bail out immediately if FPE handshake is OFF */
6756 if (*lo_state
== FPE_STATE_OFF
|| !*hs_enable
)
6759 if (*lo_state
== FPE_STATE_ENTERING_ON
&&
6760 *lp_state
== FPE_STATE_ENTERING_ON
) {
6761 stmmac_fpe_configure(priv
, priv
->ioaddr
,
6762 priv
->plat
->tx_queues_to_use
,
6763 priv
->plat
->rx_queues_to_use
,
6766 netdev_info(priv
->dev
, "configured FPE\n");
6768 *lo_state
= FPE_STATE_ON
;
6769 *lp_state
= FPE_STATE_ON
;
6770 netdev_info(priv
->dev
, "!!! BOTH FPE stations ON\n");
6774 if ((*lo_state
== FPE_STATE_CAPABLE
||
6775 *lo_state
== FPE_STATE_ENTERING_ON
) &&
6776 *lp_state
!= FPE_STATE_ON
) {
6777 netdev_info(priv
->dev
, SEND_VERIFY_MPAKCET_FMT
,
6778 *lo_state
, *lp_state
);
6779 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
6782 /* Sleep then retry */
6786 clear_bit(__FPE_TASK_SCHED
, &priv
->fpe_task_state
);
6789 void stmmac_fpe_handshake(struct stmmac_priv
*priv
, bool enable
)
6791 if (priv
->plat
->fpe_cfg
->hs_enable
!= enable
) {
6793 stmmac_fpe_send_mpacket(priv
, priv
->ioaddr
,
6796 priv
->plat
->fpe_cfg
->lo_fpe_state
= FPE_STATE_OFF
;
6797 priv
->plat
->fpe_cfg
->lp_fpe_state
= FPE_STATE_OFF
;
6800 priv
->plat
->fpe_cfg
->hs_enable
= enable
;
6806 * @device: device pointer
6807 * @plat_dat: platform data pointer
6808 * @res: stmmac resource pointer
6809 * Description: this is the main probe function used to
6810 * call the alloc_etherdev, allocate the priv structure.
6812 * returns 0 on success, otherwise errno.
6814 int stmmac_dvr_probe(struct device
*device
,
6815 struct plat_stmmacenet_data
*plat_dat
,
6816 struct stmmac_resources
*res
)
6818 struct net_device
*ndev
= NULL
;
6819 struct stmmac_priv
*priv
;
6823 ndev
= devm_alloc_etherdev_mqs(device
, sizeof(struct stmmac_priv
),
6824 MTL_MAX_TX_QUEUES
, MTL_MAX_RX_QUEUES
);
6828 SET_NETDEV_DEV(ndev
, device
);
6830 priv
= netdev_priv(ndev
);
6831 priv
->device
= device
;
6834 stmmac_set_ethtool_ops(ndev
);
6835 priv
->pause
= pause
;
6836 priv
->plat
= plat_dat
;
6837 priv
->ioaddr
= res
->addr
;
6838 priv
->dev
->base_addr
= (unsigned long)res
->addr
;
6839 priv
->plat
->dma_cfg
->multi_msi_en
= priv
->plat
->multi_msi_en
;
6841 priv
->dev
->irq
= res
->irq
;
6842 priv
->wol_irq
= res
->wol_irq
;
6843 priv
->lpi_irq
= res
->lpi_irq
;
6844 priv
->sfty_ce_irq
= res
->sfty_ce_irq
;
6845 priv
->sfty_ue_irq
= res
->sfty_ue_irq
;
6846 for (i
= 0; i
< MTL_MAX_RX_QUEUES
; i
++)
6847 priv
->rx_irq
[i
] = res
->rx_irq
[i
];
6848 for (i
= 0; i
< MTL_MAX_TX_QUEUES
; i
++)
6849 priv
->tx_irq
[i
] = res
->tx_irq
[i
];
6851 if (!is_zero_ether_addr(res
->mac
))
6852 memcpy(priv
->dev
->dev_addr
, res
->mac
, ETH_ALEN
);
6854 dev_set_drvdata(device
, priv
->dev
);
6856 /* Verify driver arguments */
6857 stmmac_verify_args();
6859 priv
->af_xdp_zc_qps
= bitmap_zalloc(MTL_MAX_TX_QUEUES
, GFP_KERNEL
);
6860 if (!priv
->af_xdp_zc_qps
)
6863 /* Allocate workqueue */
6864 priv
->wq
= create_singlethread_workqueue("stmmac_wq");
6866 dev_err(priv
->device
, "failed to create workqueue\n");
6870 INIT_WORK(&priv
->service_task
, stmmac_service_task
);
6872 /* Initialize Link Partner FPE workqueue */
6873 INIT_WORK(&priv
->fpe_task
, stmmac_fpe_lp_task
);
6875 /* Override with kernel parameters if supplied XXX CRS XXX
6876 * this needs to have multiple instances
6878 if ((phyaddr
>= 0) && (phyaddr
<= 31))
6879 priv
->plat
->phy_addr
= phyaddr
;
6881 if (priv
->plat
->stmmac_rst
) {
6882 ret
= reset_control_assert(priv
->plat
->stmmac_rst
);
6883 reset_control_deassert(priv
->plat
->stmmac_rst
);
6884 /* Some reset controllers have only reset callback instead of
6885 * assert + deassert callbacks pair.
6887 if (ret
== -ENOTSUPP
)
6888 reset_control_reset(priv
->plat
->stmmac_rst
);
6891 ret
= reset_control_deassert(priv
->plat
->stmmac_ahb_rst
);
6892 if (ret
== -ENOTSUPP
)
6893 dev_err(priv
->device
, "unable to bring out of ahb reset: %pe\n",
6896 /* Init MAC and get the capabilities */
6897 ret
= stmmac_hw_init(priv
);
6901 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
6903 if (priv
->synopsys_id
< DWMAC_CORE_5_20
)
6904 priv
->plat
->dma_cfg
->dche
= false;
6906 stmmac_check_ether_addr(priv
);
6908 ndev
->netdev_ops
= &stmmac_netdev_ops
;
6910 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
6913 ret
= stmmac_tc_init(priv
, priv
);
6915 ndev
->hw_features
|= NETIF_F_HW_TC
;
6918 if ((priv
->plat
->tso_en
) && (priv
->dma_cap
.tsoen
)) {
6919 ndev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
6920 if (priv
->plat
->has_gmac4
)
6921 ndev
->hw_features
|= NETIF_F_GSO_UDP_L4
;
6923 dev_info(priv
->device
, "TSO feature enabled\n");
6926 if (priv
->dma_cap
.sphen
) {
6927 ndev
->hw_features
|= NETIF_F_GRO
;
6928 priv
->sph_cap
= true;
6929 priv
->sph
= priv
->sph_cap
;
6930 dev_info(priv
->device
, "SPH feature enabled\n");
6933 /* The current IP register MAC_HW_Feature1[ADDR64] only define
6934 * 32/40/64 bit width, but some SOC support others like i.MX8MP
6935 * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
6936 * So overwrite dma_cap.addr64 according to HW real design.
6938 if (priv
->plat
->addr64
)
6939 priv
->dma_cap
.addr64
= priv
->plat
->addr64
;
6941 if (priv
->dma_cap
.addr64
) {
6942 ret
= dma_set_mask_and_coherent(device
,
6943 DMA_BIT_MASK(priv
->dma_cap
.addr64
));
6945 dev_info(priv
->device
, "Using %d bits DMA width\n",
6946 priv
->dma_cap
.addr64
);
6949 * If more than 32 bits can be addressed, make sure to
6950 * enable enhanced addressing mode.
6952 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT
))
6953 priv
->plat
->dma_cfg
->eame
= true;
6955 ret
= dma_set_mask_and_coherent(device
, DMA_BIT_MASK(32));
6957 dev_err(priv
->device
, "Failed to set DMA Mask\n");
6961 priv
->dma_cap
.addr64
= 32;
6965 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
6966 ndev
->watchdog_timeo
= msecs_to_jiffies(watchdog
);
6967 #ifdef STMMAC_VLAN_TAG_USED
6968 /* Both mac100 and gmac support receive VLAN tag detection */
6969 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_HW_VLAN_STAG_RX
;
6970 if (priv
->dma_cap
.vlhash
) {
6971 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
6972 ndev
->features
|= NETIF_F_HW_VLAN_STAG_FILTER
;
6974 if (priv
->dma_cap
.vlins
) {
6975 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
;
6976 if (priv
->dma_cap
.dvlan
)
6977 ndev
->features
|= NETIF_F_HW_VLAN_STAG_TX
;
6980 priv
->msg_enable
= netif_msg_init(debug
, default_msg_level
);
6982 /* Initialize RSS */
6983 rxq
= priv
->plat
->rx_queues_to_use
;
6984 netdev_rss_key_fill(priv
->rss
.key
, sizeof(priv
->rss
.key
));
6985 for (i
= 0; i
< ARRAY_SIZE(priv
->rss
.table
); i
++)
6986 priv
->rss
.table
[i
] = ethtool_rxfh_indir_default(i
, rxq
);
6988 if (priv
->dma_cap
.rssen
&& priv
->plat
->rss_en
)
6989 ndev
->features
|= NETIF_F_RXHASH
;
6991 /* MTU range: 46 - hw-specific max */
6992 ndev
->min_mtu
= ETH_ZLEN
- ETH_HLEN
;
6993 if (priv
->plat
->has_xgmac
)
6994 ndev
->max_mtu
= XGMAC_JUMBO_LEN
;
6995 else if ((priv
->plat
->enh_desc
) || (priv
->synopsys_id
>= DWMAC_CORE_4_00
))
6996 ndev
->max_mtu
= JUMBO_LEN
;
6998 ndev
->max_mtu
= SKB_MAX_HEAD(NET_SKB_PAD
+ NET_IP_ALIGN
);
6999 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7000 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7002 if ((priv
->plat
->maxmtu
< ndev
->max_mtu
) &&
7003 (priv
->plat
->maxmtu
>= ndev
->min_mtu
))
7004 ndev
->max_mtu
= priv
->plat
->maxmtu
;
7005 else if (priv
->plat
->maxmtu
< ndev
->min_mtu
)
7006 dev_warn(priv
->device
,
7007 "%s: warning: maxmtu having invalid value (%d)\n",
7008 __func__
, priv
->plat
->maxmtu
);
7011 priv
->flow_ctrl
= FLOW_AUTO
; /* RX/TX pause on */
7013 /* Setup channels NAPI */
7014 stmmac_napi_add(ndev
);
7016 mutex_init(&priv
->lock
);
7018 /* If a specific clk_csr value is passed from the platform
7019 * this means that the CSR Clock Range selection cannot be
7020 * changed at run-time and it is fixed. Viceversa the driver'll try to
7021 * set the MDC clock dynamically according to the csr actual
7024 if (priv
->plat
->clk_csr
>= 0)
7025 priv
->clk_csr
= priv
->plat
->clk_csr
;
7027 stmmac_clk_csr_set(priv
);
7029 stmmac_check_pcs_mode(priv
);
7031 pm_runtime_get_noresume(device
);
7032 pm_runtime_set_active(device
);
7033 pm_runtime_enable(device
);
7035 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7036 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
) {
7037 /* MDIO bus Registration */
7038 ret
= stmmac_mdio_register(ndev
);
7040 dev_err(priv
->device
,
7041 "%s: MDIO bus (id: %d) registration failed",
7042 __func__
, priv
->plat
->bus_id
);
7043 goto error_mdio_register
;
7047 if (priv
->plat
->speed_mode_2500
)
7048 priv
->plat
->speed_mode_2500(ndev
, priv
->plat
->bsp_priv
);
7050 if (priv
->plat
->mdio_bus_data
&& priv
->plat
->mdio_bus_data
->has_xpcs
) {
7051 ret
= stmmac_xpcs_setup(priv
->mii
);
7053 goto error_xpcs_setup
;
7056 ret
= stmmac_phy_setup(priv
);
7058 netdev_err(ndev
, "failed to setup phy (%d)\n", ret
);
7059 goto error_phy_setup
;
7062 ret
= register_netdev(ndev
);
7064 dev_err(priv
->device
, "%s: ERROR %i registering the device\n",
7066 goto error_netdev_register
;
7069 if (priv
->plat
->serdes_powerup
) {
7070 ret
= priv
->plat
->serdes_powerup(ndev
,
7071 priv
->plat
->bsp_priv
);
7074 goto error_serdes_powerup
;
7077 #ifdef CONFIG_DEBUG_FS
7078 stmmac_init_fs(ndev
);
7081 if (priv
->plat
->dump_debug_regs
)
7082 priv
->plat
->dump_debug_regs(priv
->plat
->bsp_priv
);
7084 /* Let pm_runtime_put() disable the clocks.
7085 * If CONFIG_PM is not enabled, the clocks will stay powered.
7087 pm_runtime_put(device
);
7091 error_serdes_powerup
:
7092 unregister_netdev(ndev
);
7093 error_netdev_register
:
7094 phylink_destroy(priv
->phylink
);
7097 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7098 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7099 stmmac_mdio_unregister(ndev
);
7100 error_mdio_register
:
7101 stmmac_napi_del(ndev
);
7103 destroy_workqueue(priv
->wq
);
7104 bitmap_free(priv
->af_xdp_zc_qps
);
7108 EXPORT_SYMBOL_GPL(stmmac_dvr_probe
);
7112 * @dev: device pointer
7113 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7114 * changes the link status, releases the DMA descriptor rings.
7116 int stmmac_dvr_remove(struct device
*dev
)
7118 struct net_device
*ndev
= dev_get_drvdata(dev
);
7119 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7121 netdev_info(priv
->dev
, "%s: removing driver", __func__
);
7123 stmmac_stop_all_dma(priv
);
7124 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7125 netif_carrier_off(ndev
);
7126 unregister_netdev(ndev
);
7128 /* Serdes power down needs to happen after VLAN filter
7129 * is deleted that is triggered by unregister_netdev().
7131 if (priv
->plat
->serdes_powerdown
)
7132 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
7134 #ifdef CONFIG_DEBUG_FS
7135 stmmac_exit_fs(ndev
);
7137 phylink_destroy(priv
->phylink
);
7138 if (priv
->plat
->stmmac_rst
)
7139 reset_control_assert(priv
->plat
->stmmac_rst
);
7140 reset_control_assert(priv
->plat
->stmmac_ahb_rst
);
7141 pm_runtime_put(dev
);
7142 pm_runtime_disable(dev
);
7143 if (priv
->hw
->pcs
!= STMMAC_PCS_TBI
&&
7144 priv
->hw
->pcs
!= STMMAC_PCS_RTBI
)
7145 stmmac_mdio_unregister(ndev
);
7146 destroy_workqueue(priv
->wq
);
7147 mutex_destroy(&priv
->lock
);
7148 bitmap_free(priv
->af_xdp_zc_qps
);
7152 EXPORT_SYMBOL_GPL(stmmac_dvr_remove
);
7155 * stmmac_suspend - suspend callback
7156 * @dev: device pointer
7157 * Description: this is the function to suspend the device and it is called
7158 * by the platform driver to stop the network queue, release the resources,
7159 * program the PMT register (for WoL), clean and release driver resources.
7161 int stmmac_suspend(struct device
*dev
)
7163 struct net_device
*ndev
= dev_get_drvdata(dev
);
7164 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7167 if (!ndev
|| !netif_running(ndev
))
7170 mutex_lock(&priv
->lock
);
7172 netif_device_detach(ndev
);
7174 stmmac_disable_all_queues(priv
);
7176 for (chan
= 0; chan
< priv
->plat
->tx_queues_to_use
; chan
++)
7177 hrtimer_cancel(&priv
->tx_queue
[chan
].txtimer
);
7179 if (priv
->eee_enabled
) {
7180 priv
->tx_path_in_lpi_mode
= false;
7181 del_timer_sync(&priv
->eee_ctrl_timer
);
7184 /* Stop TX/RX DMA */
7185 stmmac_stop_all_dma(priv
);
7187 if (priv
->plat
->serdes_powerdown
)
7188 priv
->plat
->serdes_powerdown(ndev
, priv
->plat
->bsp_priv
);
7190 /* Enable Power down mode by programming the PMT regs */
7191 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7192 stmmac_pmt(priv
, priv
->hw
, priv
->wolopts
);
7195 stmmac_mac_set(priv
, priv
->ioaddr
, false);
7196 pinctrl_pm_select_sleep_state(priv
->device
);
7199 mutex_unlock(&priv
->lock
);
7202 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7203 phylink_suspend(priv
->phylink
, true);
7205 if (device_may_wakeup(priv
->device
))
7206 phylink_speed_down(priv
->phylink
, false);
7207 phylink_suspend(priv
->phylink
, false);
7211 if (priv
->dma_cap
.fpesel
) {
7213 stmmac_fpe_configure(priv
, priv
->ioaddr
,
7214 priv
->plat
->tx_queues_to_use
,
7215 priv
->plat
->rx_queues_to_use
, false);
7217 stmmac_fpe_handshake(priv
, false);
7218 stmmac_fpe_stop_wq(priv
);
7221 priv
->speed
= SPEED_UNKNOWN
;
7224 EXPORT_SYMBOL_GPL(stmmac_suspend
);
7227 * stmmac_reset_queues_param - reset queue parameters
7228 * @priv: device pointer
7230 static void stmmac_reset_queues_param(struct stmmac_priv
*priv
)
7232 u32 rx_cnt
= priv
->plat
->rx_queues_to_use
;
7233 u32 tx_cnt
= priv
->plat
->tx_queues_to_use
;
7236 for (queue
= 0; queue
< rx_cnt
; queue
++) {
7237 struct stmmac_rx_queue
*rx_q
= &priv
->rx_queue
[queue
];
7243 for (queue
= 0; queue
< tx_cnt
; queue
++) {
7244 struct stmmac_tx_queue
*tx_q
= &priv
->tx_queue
[queue
];
7250 netdev_tx_reset_queue(netdev_get_tx_queue(priv
->dev
, queue
));
7255 * stmmac_resume - resume callback
7256 * @dev: device pointer
7257 * Description: when resume this function is invoked to setup the DMA and CORE
7258 * in a usable state.
7260 int stmmac_resume(struct device
*dev
)
7262 struct net_device
*ndev
= dev_get_drvdata(dev
);
7263 struct stmmac_priv
*priv
= netdev_priv(ndev
);
7266 if (!netif_running(ndev
))
7269 /* Power Down bit, into the PM register, is cleared
7270 * automatically as soon as a magic packet or a Wake-up frame
7271 * is received. Anyway, it's better to manually clear
7272 * this bit because it can generate problems while resuming
7273 * from another devices (e.g. serial console).
7275 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7276 mutex_lock(&priv
->lock
);
7277 stmmac_pmt(priv
, priv
->hw
, 0);
7278 mutex_unlock(&priv
->lock
);
7281 pinctrl_pm_select_default_state(priv
->device
);
7282 /* reset the phy so that it's ready */
7284 stmmac_mdio_reset(priv
->mii
);
7287 if (priv
->plat
->serdes_powerup
) {
7288 ret
= priv
->plat
->serdes_powerup(ndev
,
7289 priv
->plat
->bsp_priv
);
7296 if (device_may_wakeup(priv
->device
) && priv
->plat
->pmt
) {
7297 phylink_resume(priv
->phylink
);
7299 phylink_resume(priv
->phylink
);
7300 if (device_may_wakeup(priv
->device
))
7301 phylink_speed_up(priv
->phylink
);
7306 mutex_lock(&priv
->lock
);
7308 stmmac_reset_queues_param(priv
);
7310 stmmac_free_tx_skbufs(priv
);
7311 stmmac_clear_descriptors(priv
);
7313 stmmac_hw_setup(ndev
, false);
7314 stmmac_init_coalesce(priv
);
7315 stmmac_set_rx_mode(ndev
);
7317 stmmac_restore_hw_vlan_rx_fltr(priv
, ndev
, priv
->hw
);
7319 stmmac_enable_all_queues(priv
);
7321 mutex_unlock(&priv
->lock
);
7324 netif_device_attach(ndev
);
7328 EXPORT_SYMBOL_GPL(stmmac_resume
);
7331 static int __init
stmmac_cmdline_opt(char *str
)
7337 while ((opt
= strsep(&str
, ",")) != NULL
) {
7338 if (!strncmp(opt
, "debug:", 6)) {
7339 if (kstrtoint(opt
+ 6, 0, &debug
))
7341 } else if (!strncmp(opt
, "phyaddr:", 8)) {
7342 if (kstrtoint(opt
+ 8, 0, &phyaddr
))
7344 } else if (!strncmp(opt
, "buf_sz:", 7)) {
7345 if (kstrtoint(opt
+ 7, 0, &buf_sz
))
7347 } else if (!strncmp(opt
, "tc:", 3)) {
7348 if (kstrtoint(opt
+ 3, 0, &tc
))
7350 } else if (!strncmp(opt
, "watchdog:", 9)) {
7351 if (kstrtoint(opt
+ 9, 0, &watchdog
))
7353 } else if (!strncmp(opt
, "flow_ctrl:", 10)) {
7354 if (kstrtoint(opt
+ 10, 0, &flow_ctrl
))
7356 } else if (!strncmp(opt
, "pause:", 6)) {
7357 if (kstrtoint(opt
+ 6, 0, &pause
))
7359 } else if (!strncmp(opt
, "eee_timer:", 10)) {
7360 if (kstrtoint(opt
+ 10, 0, &eee_timer
))
7362 } else if (!strncmp(opt
, "chain_mode:", 11)) {
7363 if (kstrtoint(opt
+ 11, 0, &chain_mode
))
7370 pr_err("%s: ERROR broken module parameter conversion", __func__
);
7374 __setup("stmmaceth=", stmmac_cmdline_opt
);
7377 static int __init
stmmac_init(void)
7379 #ifdef CONFIG_DEBUG_FS
7380 /* Create debugfs main directory if it doesn't exist yet */
7382 stmmac_fs_dir
= debugfs_create_dir(STMMAC_RESOURCE_NAME
, NULL
);
7383 register_netdevice_notifier(&stmmac_notifier
);
7389 static void __exit
stmmac_exit(void)
7391 #ifdef CONFIG_DEBUG_FS
7392 unregister_netdevice_notifier(&stmmac_notifier
);
7393 debugfs_remove_recursive(stmmac_fs_dir
);
7397 module_init(stmmac_init
)
7398 module_exit(stmmac_exit
)
7400 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
7401 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
7402 MODULE_LICENSE("GPL");