]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / stmicro / stmmac / dwmac4_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4 * DWC Ether MAC version 4.00 has been used for developing this code.
5 *
6 * This only implements the mac core functions for this chip.
7 *
8 * Copyright (C) 2015 STMicroelectronics Ltd
9 *
10 * Author: Alexandre Torgue <alexandre.torgue@st.com>
11 */
12
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22
23 static void dwmac4_core_init(struct mac_device_info *hw,
24 struct net_device *dev)
25 {
26 void __iomem *ioaddr = hw->pcsr;
27 u32 value = readl(ioaddr + GMAC_CONFIG);
28 int mtu = dev->mtu;
29
30 value |= GMAC_CORE_INIT;
31
32 if (mtu > 1500)
33 value |= GMAC_CONFIG_2K;
34 if (mtu > 2000)
35 value |= GMAC_CONFIG_JE;
36
37 if (hw->ps) {
38 value |= GMAC_CONFIG_TE;
39
40 value &= hw->link.speed_mask;
41 switch (hw->ps) {
42 case SPEED_1000:
43 value |= hw->link.speed1000;
44 break;
45 case SPEED_100:
46 value |= hw->link.speed100;
47 break;
48 case SPEED_10:
49 value |= hw->link.speed10;
50 break;
51 }
52 }
53
54 writel(value, ioaddr + GMAC_CONFIG);
55
56 /* Enable GMAC interrupts */
57 value = GMAC_INT_DEFAULT_ENABLE;
58
59 if (hw->pcs)
60 value |= GMAC_PCS_IRQ_DEFAULT;
61
62 writel(value, ioaddr + GMAC_INT_EN);
63 }
64
65 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
66 u8 mode, u32 queue)
67 {
68 void __iomem *ioaddr = hw->pcsr;
69 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
70
71 value &= GMAC_RX_QUEUE_CLEAR(queue);
72 if (mode == MTL_QUEUE_AVB)
73 value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
74 else if (mode == MTL_QUEUE_DCB)
75 value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
76
77 writel(value, ioaddr + GMAC_RXQ_CTRL0);
78 }
79
80 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
81 u32 prio, u32 queue)
82 {
83 void __iomem *ioaddr = hw->pcsr;
84 u32 base_register;
85 u32 value;
86
87 base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
88
89 value = readl(ioaddr + base_register);
90
91 value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
92 value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
93 GMAC_RXQCTRL_PSRQX_MASK(queue);
94 writel(value, ioaddr + base_register);
95 }
96
97 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
98 u32 prio, u32 queue)
99 {
100 void __iomem *ioaddr = hw->pcsr;
101 u32 base_register;
102 u32 value;
103
104 base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
105
106 value = readl(ioaddr + base_register);
107
108 value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
109 value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
110 GMAC_TXQCTRL_PSTQX_MASK(queue);
111
112 writel(value, ioaddr + base_register);
113 }
114
115 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
116 u8 packet, u32 queue)
117 {
118 void __iomem *ioaddr = hw->pcsr;
119 u32 value;
120
121 static const struct stmmac_rx_routing route_possibilities[] = {
122 { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
123 { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
124 { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
125 { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
126 { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
127 };
128
129 value = readl(ioaddr + GMAC_RXQ_CTRL1);
130
131 /* routing configuration */
132 value &= ~route_possibilities[packet - 1].reg_mask;
133 value |= (queue << route_possibilities[packet-1].reg_shift) &
134 route_possibilities[packet - 1].reg_mask;
135
136 /* some packets require extra ops */
137 if (packet == PACKET_AVCPQ) {
138 value &= ~GMAC_RXQCTRL_TACPQE;
139 value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
140 } else if (packet == PACKET_MCBCQ) {
141 value &= ~GMAC_RXQCTRL_MCBCQEN;
142 value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
143 }
144
145 writel(value, ioaddr + GMAC_RXQ_CTRL1);
146 }
147
148 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
149 u32 rx_alg)
150 {
151 void __iomem *ioaddr = hw->pcsr;
152 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
153
154 value &= ~MTL_OPERATION_RAA;
155 switch (rx_alg) {
156 case MTL_RX_ALGORITHM_SP:
157 value |= MTL_OPERATION_RAA_SP;
158 break;
159 case MTL_RX_ALGORITHM_WSP:
160 value |= MTL_OPERATION_RAA_WSP;
161 break;
162 default:
163 break;
164 }
165
166 writel(value, ioaddr + MTL_OPERATION_MODE);
167 }
168
169 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
170 u32 tx_alg)
171 {
172 void __iomem *ioaddr = hw->pcsr;
173 u32 value = readl(ioaddr + MTL_OPERATION_MODE);
174
175 value &= ~MTL_OPERATION_SCHALG_MASK;
176 switch (tx_alg) {
177 case MTL_TX_ALGORITHM_WRR:
178 value |= MTL_OPERATION_SCHALG_WRR;
179 break;
180 case MTL_TX_ALGORITHM_WFQ:
181 value |= MTL_OPERATION_SCHALG_WFQ;
182 break;
183 case MTL_TX_ALGORITHM_DWRR:
184 value |= MTL_OPERATION_SCHALG_DWRR;
185 break;
186 case MTL_TX_ALGORITHM_SP:
187 value |= MTL_OPERATION_SCHALG_SP;
188 break;
189 default:
190 break;
191 }
192
193 writel(value, ioaddr + MTL_OPERATION_MODE);
194 }
195
196 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
197 u32 weight, u32 queue)
198 {
199 void __iomem *ioaddr = hw->pcsr;
200 u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
201
202 value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
203 value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
204 writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
205 }
206
207 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
208 {
209 void __iomem *ioaddr = hw->pcsr;
210 u32 value;
211
212 if (queue < 4)
213 value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
214 else
215 value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
216
217 if (queue == 0 || queue == 4) {
218 value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
219 value |= MTL_RXQ_DMA_Q04MDMACH(chan);
220 } else {
221 value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
222 value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
223 }
224
225 if (queue < 4)
226 writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
227 else
228 writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
229 }
230
231 static void dwmac4_config_cbs(struct mac_device_info *hw,
232 u32 send_slope, u32 idle_slope,
233 u32 high_credit, u32 low_credit, u32 queue)
234 {
235 void __iomem *ioaddr = hw->pcsr;
236 u32 value;
237
238 pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
239 pr_debug("\tsend_slope: 0x%08x\n", send_slope);
240 pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
241 pr_debug("\thigh_credit: 0x%08x\n", high_credit);
242 pr_debug("\tlow_credit: 0x%08x\n", low_credit);
243
244 /* enable AV algorithm */
245 value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
246 value |= MTL_ETS_CTRL_AVALG;
247 value |= MTL_ETS_CTRL_CC;
248 writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
249
250 /* configure send slope */
251 value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
252 value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
253 value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
254 writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
255
256 /* configure idle slope (same register as tx weight) */
257 dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
258
259 /* configure high credit */
260 value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
261 value &= ~MTL_HIGH_CRED_HC_MASK;
262 value |= high_credit & MTL_HIGH_CRED_HC_MASK;
263 writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
264
265 /* configure high credit */
266 value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
267 value &= ~MTL_HIGH_CRED_LC_MASK;
268 value |= low_credit & MTL_HIGH_CRED_LC_MASK;
269 writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
270 }
271
272 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
273 {
274 void __iomem *ioaddr = hw->pcsr;
275 int i;
276
277 for (i = 0; i < GMAC_REG_NUM; i++)
278 reg_space[i] = readl(ioaddr + i * 4);
279 }
280
281 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
282 {
283 void __iomem *ioaddr = hw->pcsr;
284 u32 value = readl(ioaddr + GMAC_CONFIG);
285
286 if (hw->rx_csum)
287 value |= GMAC_CONFIG_IPC;
288 else
289 value &= ~GMAC_CONFIG_IPC;
290
291 writel(value, ioaddr + GMAC_CONFIG);
292
293 value = readl(ioaddr + GMAC_CONFIG);
294
295 return !!(value & GMAC_CONFIG_IPC);
296 }
297
298 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
299 {
300 void __iomem *ioaddr = hw->pcsr;
301 unsigned int pmt = 0;
302 u32 config;
303
304 if (mode & WAKE_MAGIC) {
305 pr_debug("GMAC: WOL Magic frame\n");
306 pmt |= power_down | magic_pkt_en;
307 }
308 if (mode & WAKE_UCAST) {
309 pr_debug("GMAC: WOL on global unicast\n");
310 pmt |= power_down | global_unicast | wake_up_frame_en;
311 }
312
313 if (pmt) {
314 /* The receiver must be enabled for WOL before powering down */
315 config = readl(ioaddr + GMAC_CONFIG);
316 config |= GMAC_CONFIG_RE;
317 writel(config, ioaddr + GMAC_CONFIG);
318 }
319 writel(pmt, ioaddr + GMAC_PMT);
320 }
321
322 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
323 unsigned char *addr, unsigned int reg_n)
324 {
325 void __iomem *ioaddr = hw->pcsr;
326
327 stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
328 GMAC_ADDR_LOW(reg_n));
329 }
330
331 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
332 unsigned char *addr, unsigned int reg_n)
333 {
334 void __iomem *ioaddr = hw->pcsr;
335
336 stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
337 GMAC_ADDR_LOW(reg_n));
338 }
339
340 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
341 bool en_tx_lpi_clockgating)
342 {
343 void __iomem *ioaddr = hw->pcsr;
344 u32 value;
345
346 /* Enable the link status receive on RGMII, SGMII ore SMII
347 * receive path and instruct the transmit to enter in LPI
348 * state.
349 */
350 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
351 value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
352
353 if (en_tx_lpi_clockgating)
354 value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
355
356 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
357 }
358
359 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
360 {
361 void __iomem *ioaddr = hw->pcsr;
362 u32 value;
363
364 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
365 value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
366 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
367 }
368
369 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
370 {
371 void __iomem *ioaddr = hw->pcsr;
372 u32 value;
373
374 value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
375
376 if (link)
377 value |= GMAC4_LPI_CTRL_STATUS_PLS;
378 else
379 value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
380
381 writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
382 }
383
384 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
385 {
386 void __iomem *ioaddr = hw->pcsr;
387 int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
388
389 /* Program the timers in the LPI timer control register:
390 * LS: minimum time (ms) for which the link
391 * status from PHY should be ok before transmitting
392 * the LPI pattern.
393 * TW: minimum time (us) for which the core waits
394 * after it has stopped transmitting the LPI pattern.
395 */
396 writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
397 }
398
399 static void dwmac4_set_filter(struct mac_device_info *hw,
400 struct net_device *dev)
401 {
402 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
403 unsigned int value = 0;
404
405 if (dev->flags & IFF_PROMISC) {
406 value = GMAC_PACKET_FILTER_PR;
407 } else if ((dev->flags & IFF_ALLMULTI) ||
408 (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
409 /* Pass all multi */
410 value = GMAC_PACKET_FILTER_PM;
411 /* Set the 64 bits of the HASH tab. To be updated if taller
412 * hash table is used
413 */
414 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
415 writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
416 } else if (!netdev_mc_empty(dev)) {
417 u32 mc_filter[2];
418 struct netdev_hw_addr *ha;
419
420 /* Hash filter for multicast */
421 value = GMAC_PACKET_FILTER_HMC;
422
423 memset(mc_filter, 0, sizeof(mc_filter));
424 netdev_for_each_mc_addr(ha, dev) {
425 /* The upper 6 bits of the calculated CRC are used to
426 * index the content of the Hash Table Reg 0 and 1.
427 */
428 int bit_nr =
429 (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
430 /* The most significant bit determines the register
431 * to use while the other 5 bits determines the bit
432 * within the selected register
433 */
434 mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
435 }
436 writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
437 writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
438 }
439
440 /* Handle multiple unicast addresses */
441 if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
442 /* Switch to promiscuous mode if more than 128 addrs
443 * are required
444 */
445 value |= GMAC_PACKET_FILTER_PR;
446 } else if (!netdev_uc_empty(dev)) {
447 int reg = 1;
448 struct netdev_hw_addr *ha;
449
450 netdev_for_each_uc_addr(ha, dev) {
451 dwmac4_set_umac_addr(hw, ha->addr, reg);
452 reg++;
453 }
454 }
455
456 writel(value, ioaddr + GMAC_PACKET_FILTER);
457 }
458
459 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
460 unsigned int fc, unsigned int pause_time,
461 u32 tx_cnt)
462 {
463 void __iomem *ioaddr = hw->pcsr;
464 unsigned int flow = 0;
465 u32 queue = 0;
466
467 pr_debug("GMAC Flow-Control:\n");
468 if (fc & FLOW_RX) {
469 pr_debug("\tReceive Flow-Control ON\n");
470 flow |= GMAC_RX_FLOW_CTRL_RFE;
471 writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
472 }
473 if (fc & FLOW_TX) {
474 pr_debug("\tTransmit Flow-Control ON\n");
475
476 if (duplex)
477 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
478
479 for (queue = 0; queue < tx_cnt; queue++) {
480 flow |= GMAC_TX_FLOW_CTRL_TFE;
481
482 if (duplex)
483 flow |=
484 (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
485
486 writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
487 }
488 }
489 }
490
491 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
492 bool loopback)
493 {
494 dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
495 }
496
497 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
498 {
499 dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
500 }
501
502 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
503 {
504 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
505 }
506
507 /* RGMII or SMII interface */
508 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
509 {
510 u32 status;
511
512 status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
513 x->irq_rgmii_n++;
514
515 /* Check the link status */
516 if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
517 int speed_value;
518
519 x->pcs_link = 1;
520
521 speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
522 GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
523 if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
524 x->pcs_speed = SPEED_1000;
525 else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
526 x->pcs_speed = SPEED_100;
527 else
528 x->pcs_speed = SPEED_10;
529
530 x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
531
532 pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
533 x->pcs_duplex ? "Full" : "Half");
534 } else {
535 x->pcs_link = 0;
536 pr_info("Link is Down\n");
537 }
538 }
539
540 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
541 {
542 void __iomem *ioaddr = hw->pcsr;
543 u32 mtl_int_qx_status;
544 int ret = 0;
545
546 mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
547
548 /* Check MTL Interrupt */
549 if (mtl_int_qx_status & MTL_INT_QX(chan)) {
550 /* read Queue x Interrupt status */
551 u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
552
553 if (status & MTL_RX_OVERFLOW_INT) {
554 /* clear Interrupt */
555 writel(status | MTL_RX_OVERFLOW_INT,
556 ioaddr + MTL_CHAN_INT_CTRL(chan));
557 ret = CORE_IRQ_MTL_RX_OVERFLOW;
558 }
559 }
560
561 return ret;
562 }
563
564 static int dwmac4_irq_status(struct mac_device_info *hw,
565 struct stmmac_extra_stats *x)
566 {
567 void __iomem *ioaddr = hw->pcsr;
568 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
569 u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
570 int ret = 0;
571
572 /* Discard disabled bits */
573 intr_status &= intr_enable;
574
575 /* Not used events (e.g. MMC interrupts) are not handled. */
576 if ((intr_status & mmc_tx_irq))
577 x->mmc_tx_irq_n++;
578 if (unlikely(intr_status & mmc_rx_irq))
579 x->mmc_rx_irq_n++;
580 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
581 x->mmc_rx_csum_offload_irq_n++;
582 /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
583 if (unlikely(intr_status & pmt_irq)) {
584 readl(ioaddr + GMAC_PMT);
585 x->irq_receive_pmt_irq_n++;
586 }
587
588 /* MAC tx/rx EEE LPI entry/exit interrupts */
589 if (intr_status & lpi_irq) {
590 /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
591 u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
592
593 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
594 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
595 x->irq_tx_path_in_lpi_mode_n++;
596 }
597 if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
598 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
599 x->irq_tx_path_exit_lpi_mode_n++;
600 }
601 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
602 x->irq_rx_path_in_lpi_mode_n++;
603 if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
604 x->irq_rx_path_exit_lpi_mode_n++;
605 }
606
607 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
608 if (intr_status & PCS_RGSMIIIS_IRQ)
609 dwmac4_phystatus(ioaddr, x);
610
611 return ret;
612 }
613
614 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
615 u32 rx_queues, u32 tx_queues)
616 {
617 u32 value;
618 u32 queue;
619
620 for (queue = 0; queue < tx_queues; queue++) {
621 value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
622
623 if (value & MTL_DEBUG_TXSTSFSTS)
624 x->mtl_tx_status_fifo_full++;
625 if (value & MTL_DEBUG_TXFSTS)
626 x->mtl_tx_fifo_not_empty++;
627 if (value & MTL_DEBUG_TWCSTS)
628 x->mmtl_fifo_ctrl++;
629 if (value & MTL_DEBUG_TRCSTS_MASK) {
630 u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
631 >> MTL_DEBUG_TRCSTS_SHIFT;
632 if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
633 x->mtl_tx_fifo_read_ctrl_write++;
634 else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
635 x->mtl_tx_fifo_read_ctrl_wait++;
636 else if (trcsts == MTL_DEBUG_TRCSTS_READ)
637 x->mtl_tx_fifo_read_ctrl_read++;
638 else
639 x->mtl_tx_fifo_read_ctrl_idle++;
640 }
641 if (value & MTL_DEBUG_TXPAUSED)
642 x->mac_tx_in_pause++;
643 }
644
645 for (queue = 0; queue < rx_queues; queue++) {
646 value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
647
648 if (value & MTL_DEBUG_RXFSTS_MASK) {
649 u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
650 >> MTL_DEBUG_RRCSTS_SHIFT;
651
652 if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
653 x->mtl_rx_fifo_fill_level_full++;
654 else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
655 x->mtl_rx_fifo_fill_above_thresh++;
656 else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
657 x->mtl_rx_fifo_fill_below_thresh++;
658 else
659 x->mtl_rx_fifo_fill_level_empty++;
660 }
661 if (value & MTL_DEBUG_RRCSTS_MASK) {
662 u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
663 MTL_DEBUG_RRCSTS_SHIFT;
664
665 if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
666 x->mtl_rx_fifo_read_ctrl_flush++;
667 else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
668 x->mtl_rx_fifo_read_ctrl_read_data++;
669 else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
670 x->mtl_rx_fifo_read_ctrl_status++;
671 else
672 x->mtl_rx_fifo_read_ctrl_idle++;
673 }
674 if (value & MTL_DEBUG_RWCSTS)
675 x->mtl_rx_fifo_ctrl_active++;
676 }
677
678 /* GMAC debug */
679 value = readl(ioaddr + GMAC_DEBUG);
680
681 if (value & GMAC_DEBUG_TFCSTS_MASK) {
682 u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
683 >> GMAC_DEBUG_TFCSTS_SHIFT;
684
685 if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
686 x->mac_tx_frame_ctrl_xfer++;
687 else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
688 x->mac_tx_frame_ctrl_pause++;
689 else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
690 x->mac_tx_frame_ctrl_wait++;
691 else
692 x->mac_tx_frame_ctrl_idle++;
693 }
694 if (value & GMAC_DEBUG_TPESTS)
695 x->mac_gmii_tx_proto_engine++;
696 if (value & GMAC_DEBUG_RFCFCSTS_MASK)
697 x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
698 >> GMAC_DEBUG_RFCFCSTS_SHIFT;
699 if (value & GMAC_DEBUG_RPESTS)
700 x->mac_gmii_rx_proto_engine++;
701 }
702
703 const struct stmmac_ops dwmac4_ops = {
704 .core_init = dwmac4_core_init,
705 .set_mac = stmmac_set_mac,
706 .rx_ipc = dwmac4_rx_ipc_enable,
707 .rx_queue_enable = dwmac4_rx_queue_enable,
708 .rx_queue_prio = dwmac4_rx_queue_priority,
709 .tx_queue_prio = dwmac4_tx_queue_priority,
710 .rx_queue_routing = dwmac4_rx_queue_routing,
711 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
712 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
713 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
714 .map_mtl_to_dma = dwmac4_map_mtl_dma,
715 .config_cbs = dwmac4_config_cbs,
716 .dump_regs = dwmac4_dump_regs,
717 .host_irq_status = dwmac4_irq_status,
718 .host_mtl_irq_status = dwmac4_irq_mtl_status,
719 .flow_ctrl = dwmac4_flow_ctrl,
720 .pmt = dwmac4_pmt,
721 .set_umac_addr = dwmac4_set_umac_addr,
722 .get_umac_addr = dwmac4_get_umac_addr,
723 .set_eee_mode = dwmac4_set_eee_mode,
724 .reset_eee_mode = dwmac4_reset_eee_mode,
725 .set_eee_timer = dwmac4_set_eee_timer,
726 .set_eee_pls = dwmac4_set_eee_pls,
727 .pcs_ctrl_ane = dwmac4_ctrl_ane,
728 .pcs_rane = dwmac4_rane,
729 .pcs_get_adv_lp = dwmac4_get_adv_lp,
730 .debug = dwmac4_debug,
731 .set_filter = dwmac4_set_filter,
732 };
733
734 const struct stmmac_ops dwmac410_ops = {
735 .core_init = dwmac4_core_init,
736 .set_mac = stmmac_dwmac4_set_mac,
737 .rx_ipc = dwmac4_rx_ipc_enable,
738 .rx_queue_enable = dwmac4_rx_queue_enable,
739 .rx_queue_prio = dwmac4_rx_queue_priority,
740 .tx_queue_prio = dwmac4_tx_queue_priority,
741 .rx_queue_routing = dwmac4_rx_queue_routing,
742 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
743 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
744 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
745 .map_mtl_to_dma = dwmac4_map_mtl_dma,
746 .config_cbs = dwmac4_config_cbs,
747 .dump_regs = dwmac4_dump_regs,
748 .host_irq_status = dwmac4_irq_status,
749 .host_mtl_irq_status = dwmac4_irq_mtl_status,
750 .flow_ctrl = dwmac4_flow_ctrl,
751 .pmt = dwmac4_pmt,
752 .set_umac_addr = dwmac4_set_umac_addr,
753 .get_umac_addr = dwmac4_get_umac_addr,
754 .set_eee_mode = dwmac4_set_eee_mode,
755 .reset_eee_mode = dwmac4_reset_eee_mode,
756 .set_eee_timer = dwmac4_set_eee_timer,
757 .set_eee_pls = dwmac4_set_eee_pls,
758 .pcs_ctrl_ane = dwmac4_ctrl_ane,
759 .pcs_rane = dwmac4_rane,
760 .pcs_get_adv_lp = dwmac4_get_adv_lp,
761 .debug = dwmac4_debug,
762 .set_filter = dwmac4_set_filter,
763 };
764
765 const struct stmmac_ops dwmac510_ops = {
766 .core_init = dwmac4_core_init,
767 .set_mac = stmmac_dwmac4_set_mac,
768 .rx_ipc = dwmac4_rx_ipc_enable,
769 .rx_queue_enable = dwmac4_rx_queue_enable,
770 .rx_queue_prio = dwmac4_rx_queue_priority,
771 .tx_queue_prio = dwmac4_tx_queue_priority,
772 .rx_queue_routing = dwmac4_rx_queue_routing,
773 .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
774 .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
775 .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
776 .map_mtl_to_dma = dwmac4_map_mtl_dma,
777 .config_cbs = dwmac4_config_cbs,
778 .dump_regs = dwmac4_dump_regs,
779 .host_irq_status = dwmac4_irq_status,
780 .host_mtl_irq_status = dwmac4_irq_mtl_status,
781 .flow_ctrl = dwmac4_flow_ctrl,
782 .pmt = dwmac4_pmt,
783 .set_umac_addr = dwmac4_set_umac_addr,
784 .get_umac_addr = dwmac4_get_umac_addr,
785 .set_eee_mode = dwmac4_set_eee_mode,
786 .reset_eee_mode = dwmac4_reset_eee_mode,
787 .set_eee_timer = dwmac4_set_eee_timer,
788 .set_eee_pls = dwmac4_set_eee_pls,
789 .pcs_ctrl_ane = dwmac4_ctrl_ane,
790 .pcs_rane = dwmac4_rane,
791 .pcs_get_adv_lp = dwmac4_get_adv_lp,
792 .debug = dwmac4_debug,
793 .set_filter = dwmac4_set_filter,
794 .safety_feat_config = dwmac5_safety_feat_config,
795 .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
796 .safety_feat_dump = dwmac5_safety_feat_dump,
797 .rxp_config = dwmac5_rxp_config,
798 .flex_pps_config = dwmac5_flex_pps_config,
799 };
800
801 int dwmac4_setup(struct stmmac_priv *priv)
802 {
803 struct mac_device_info *mac = priv->hw;
804
805 dev_info(priv->device, "\tDWMAC4/5\n");
806
807 priv->dev->priv_flags |= IFF_UNICAST_FLT;
808 mac->pcsr = priv->ioaddr;
809 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
810 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
811 mac->mcast_bits_log2 = 0;
812
813 if (mac->multicast_filter_bins)
814 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
815
816 mac->link.duplex = GMAC_CONFIG_DM;
817 mac->link.speed10 = GMAC_CONFIG_PS;
818 mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
819 mac->link.speed1000 = 0;
820 mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
821 mac->mii.addr = GMAC_MDIO_ADDR;
822 mac->mii.data = GMAC_MDIO_DATA;
823 mac->mii.addr_shift = 21;
824 mac->mii.addr_mask = GENMASK(25, 21);
825 mac->mii.reg_shift = 16;
826 mac->mii.reg_mask = GENMASK(20, 16);
827 mac->mii.clk_csr_shift = 8;
828 mac->mii.clk_csr_mask = GENMASK(11, 8);
829
830 return 0;
831 }