1 // SPDX-License-Identifier: GPL-2.0
3 * Driver for Marvell PPv2 network controller for Armada 375 SoC.
5 * Copyright (C) 2014 Marvell
7 * Marcin Wojtas <mw@semihalf.com>
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/clk.h>
32 #include <linux/hrtimer.h>
33 #include <linux/ktime.h>
34 #include <linux/regmap.h>
35 #include <uapi/linux/ppp_defs.h>
41 #include "mvpp2_prs.h"
42 #include "mvpp2_cls.h"
44 enum mvpp2_bm_pool_log_num
{
54 } mvpp2_pools
[MVPP2_BM_POOLS_NUM
];
56 /* The prototype is added here to be used in start_dev when using ACPI. This
57 * will be removed once phylink is used for all modes (dt+ACPI).
59 static void mvpp2_mac_config(struct net_device
*dev
, unsigned int mode
,
60 const struct phylink_link_state
*state
);
61 static void mvpp2_mac_link_up(struct net_device
*dev
, unsigned int mode
,
62 phy_interface_t interface
, struct phy_device
*phy
);
65 #define MVPP2_QDIST_SINGLE_MODE 0
66 #define MVPP2_QDIST_MULTI_MODE 1
68 static int queue_mode
= MVPP2_QDIST_MULTI_MODE
;
70 module_param(queue_mode
, int, 0444);
71 MODULE_PARM_DESC(queue_mode
, "Set queue_mode (single=0, multi=1)");
73 /* Utility/helper methods */
75 void mvpp2_write(struct mvpp2
*priv
, u32 offset
, u32 data
)
77 writel(data
, priv
->swth_base
[0] + offset
);
80 u32
mvpp2_read(struct mvpp2
*priv
, u32 offset
)
82 return readl(priv
->swth_base
[0] + offset
);
85 static u32
mvpp2_read_relaxed(struct mvpp2
*priv
, u32 offset
)
87 return readl_relaxed(priv
->swth_base
[0] + offset
);
90 static inline u32
mvpp2_cpu_to_thread(struct mvpp2
*priv
, int cpu
)
92 return cpu
% priv
->nthreads
;
95 /* These accessors should be used to access:
97 * - per-thread registers, where each thread has its own copy of the
100 * MVPP2_BM_VIRT_ALLOC_REG
101 * MVPP2_BM_ADDR_HIGH_ALLOC
102 * MVPP22_BM_ADDR_HIGH_RLS_REG
103 * MVPP2_BM_VIRT_RLS_REG
104 * MVPP2_ISR_RX_TX_CAUSE_REG
105 * MVPP2_ISR_RX_TX_MASK_REG
107 * MVPP2_AGGR_TXQ_UPDATE_REG
108 * MVPP2_TXQ_RSVD_REQ_REG
109 * MVPP2_TXQ_RSVD_RSLT_REG
113 * - global registers that must be accessed through a specific thread
114 * window, because they are related to an access to a per-thread
117 * MVPP2_BM_PHY_ALLOC_REG (related to MVPP2_BM_VIRT_ALLOC_REG)
118 * MVPP2_BM_PHY_RLS_REG (related to MVPP2_BM_VIRT_RLS_REG)
119 * MVPP2_RXQ_THRESH_REG (related to MVPP2_RXQ_NUM_REG)
120 * MVPP2_RXQ_DESC_ADDR_REG (related to MVPP2_RXQ_NUM_REG)
121 * MVPP2_RXQ_DESC_SIZE_REG (related to MVPP2_RXQ_NUM_REG)
122 * MVPP2_RXQ_INDEX_REG (related to MVPP2_RXQ_NUM_REG)
123 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
124 * MVPP2_TXQ_DESC_ADDR_REG (related to MVPP2_TXQ_NUM_REG)
125 * MVPP2_TXQ_DESC_SIZE_REG (related to MVPP2_TXQ_NUM_REG)
126 * MVPP2_TXQ_INDEX_REG (related to MVPP2_TXQ_NUM_REG)
127 * MVPP2_TXQ_PENDING_REG (related to MVPP2_TXQ_NUM_REG)
128 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
129 * MVPP2_TXQ_PREF_BUF_REG (related to MVPP2_TXQ_NUM_REG)
131 static void mvpp2_thread_write(struct mvpp2
*priv
, unsigned int thread
,
132 u32 offset
, u32 data
)
134 writel(data
, priv
->swth_base
[thread
] + offset
);
137 static u32
mvpp2_thread_read(struct mvpp2
*priv
, unsigned int thread
,
140 return readl(priv
->swth_base
[thread
] + offset
);
143 static void mvpp2_thread_write_relaxed(struct mvpp2
*priv
, unsigned int thread
,
144 u32 offset
, u32 data
)
146 writel_relaxed(data
, priv
->swth_base
[thread
] + offset
);
149 static u32
mvpp2_thread_read_relaxed(struct mvpp2
*priv
, unsigned int thread
,
152 return readl_relaxed(priv
->swth_base
[thread
] + offset
);
155 static dma_addr_t
mvpp2_txdesc_dma_addr_get(struct mvpp2_port
*port
,
156 struct mvpp2_tx_desc
*tx_desc
)
158 if (port
->priv
->hw_version
== MVPP21
)
159 return le32_to_cpu(tx_desc
->pp21
.buf_dma_addr
);
161 return le64_to_cpu(tx_desc
->pp22
.buf_dma_addr_ptp
) &
165 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port
*port
,
166 struct mvpp2_tx_desc
*tx_desc
,
169 dma_addr_t addr
, offset
;
171 addr
= dma_addr
& ~MVPP2_TX_DESC_ALIGN
;
172 offset
= dma_addr
& MVPP2_TX_DESC_ALIGN
;
174 if (port
->priv
->hw_version
== MVPP21
) {
175 tx_desc
->pp21
.buf_dma_addr
= cpu_to_le32(addr
);
176 tx_desc
->pp21
.packet_offset
= offset
;
178 __le64 val
= cpu_to_le64(addr
);
180 tx_desc
->pp22
.buf_dma_addr_ptp
&= ~cpu_to_le64(MVPP2_DESC_DMA_MASK
);
181 tx_desc
->pp22
.buf_dma_addr_ptp
|= val
;
182 tx_desc
->pp22
.packet_offset
= offset
;
186 static size_t mvpp2_txdesc_size_get(struct mvpp2_port
*port
,
187 struct mvpp2_tx_desc
*tx_desc
)
189 if (port
->priv
->hw_version
== MVPP21
)
190 return le16_to_cpu(tx_desc
->pp21
.data_size
);
192 return le16_to_cpu(tx_desc
->pp22
.data_size
);
195 static void mvpp2_txdesc_size_set(struct mvpp2_port
*port
,
196 struct mvpp2_tx_desc
*tx_desc
,
199 if (port
->priv
->hw_version
== MVPP21
)
200 tx_desc
->pp21
.data_size
= cpu_to_le16(size
);
202 tx_desc
->pp22
.data_size
= cpu_to_le16(size
);
205 static void mvpp2_txdesc_txq_set(struct mvpp2_port
*port
,
206 struct mvpp2_tx_desc
*tx_desc
,
209 if (port
->priv
->hw_version
== MVPP21
)
210 tx_desc
->pp21
.phys_txq
= txq
;
212 tx_desc
->pp22
.phys_txq
= txq
;
215 static void mvpp2_txdesc_cmd_set(struct mvpp2_port
*port
,
216 struct mvpp2_tx_desc
*tx_desc
,
217 unsigned int command
)
219 if (port
->priv
->hw_version
== MVPP21
)
220 tx_desc
->pp21
.command
= cpu_to_le32(command
);
222 tx_desc
->pp22
.command
= cpu_to_le32(command
);
225 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port
*port
,
226 struct mvpp2_tx_desc
*tx_desc
)
228 if (port
->priv
->hw_version
== MVPP21
)
229 return tx_desc
->pp21
.packet_offset
;
231 return tx_desc
->pp22
.packet_offset
;
234 static dma_addr_t
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port
*port
,
235 struct mvpp2_rx_desc
*rx_desc
)
237 if (port
->priv
->hw_version
== MVPP21
)
238 return le32_to_cpu(rx_desc
->pp21
.buf_dma_addr
);
240 return le64_to_cpu(rx_desc
->pp22
.buf_dma_addr_key_hash
) &
244 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port
*port
,
245 struct mvpp2_rx_desc
*rx_desc
)
247 if (port
->priv
->hw_version
== MVPP21
)
248 return le32_to_cpu(rx_desc
->pp21
.buf_cookie
);
250 return le64_to_cpu(rx_desc
->pp22
.buf_cookie_misc
) &
254 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port
*port
,
255 struct mvpp2_rx_desc
*rx_desc
)
257 if (port
->priv
->hw_version
== MVPP21
)
258 return le16_to_cpu(rx_desc
->pp21
.data_size
);
260 return le16_to_cpu(rx_desc
->pp22
.data_size
);
263 static u32
mvpp2_rxdesc_status_get(struct mvpp2_port
*port
,
264 struct mvpp2_rx_desc
*rx_desc
)
266 if (port
->priv
->hw_version
== MVPP21
)
267 return le32_to_cpu(rx_desc
->pp21
.status
);
269 return le32_to_cpu(rx_desc
->pp22
.status
);
272 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu
*txq_pcpu
)
274 txq_pcpu
->txq_get_index
++;
275 if (txq_pcpu
->txq_get_index
== txq_pcpu
->size
)
276 txq_pcpu
->txq_get_index
= 0;
279 static void mvpp2_txq_inc_put(struct mvpp2_port
*port
,
280 struct mvpp2_txq_pcpu
*txq_pcpu
,
282 struct mvpp2_tx_desc
*tx_desc
)
284 struct mvpp2_txq_pcpu_buf
*tx_buf
=
285 txq_pcpu
->buffs
+ txq_pcpu
->txq_put_index
;
287 tx_buf
->size
= mvpp2_txdesc_size_get(port
, tx_desc
);
288 tx_buf
->dma
= mvpp2_txdesc_dma_addr_get(port
, tx_desc
) +
289 mvpp2_txdesc_offset_get(port
, tx_desc
);
290 txq_pcpu
->txq_put_index
++;
291 if (txq_pcpu
->txq_put_index
== txq_pcpu
->size
)
292 txq_pcpu
->txq_put_index
= 0;
295 /* Get number of physical egress port */
296 static inline int mvpp2_egress_port(struct mvpp2_port
*port
)
298 return MVPP2_MAX_TCONT
+ port
->id
;
301 /* Get number of physical TXQ */
302 static inline int mvpp2_txq_phys(int port
, int txq
)
304 return (MVPP2_MAX_TCONT
+ port
) * MVPP2_MAX_TXQ
+ txq
;
307 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool
*pool
)
309 if (likely(pool
->frag_size
<= PAGE_SIZE
))
310 return netdev_alloc_frag(pool
->frag_size
);
312 return kmalloc(pool
->frag_size
, GFP_ATOMIC
);
315 static void mvpp2_frag_free(const struct mvpp2_bm_pool
*pool
, void *data
)
317 if (likely(pool
->frag_size
<= PAGE_SIZE
))
323 /* Buffer Manager configuration routines */
326 static int mvpp2_bm_pool_create(struct platform_device
*pdev
,
328 struct mvpp2_bm_pool
*bm_pool
, int size
)
332 /* Number of buffer pointers must be a multiple of 16, as per
333 * hardware constraints
335 if (!IS_ALIGNED(size
, 16))
338 /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
339 * bytes per buffer pointer
341 if (priv
->hw_version
== MVPP21
)
342 bm_pool
->size_bytes
= 2 * sizeof(u32
) * size
;
344 bm_pool
->size_bytes
= 2 * sizeof(u64
) * size
;
346 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
349 if (!bm_pool
->virt_addr
)
352 if (!IS_ALIGNED((unsigned long)bm_pool
->virt_addr
,
353 MVPP2_BM_POOL_PTR_ALIGN
)) {
354 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
355 bm_pool
->virt_addr
, bm_pool
->dma_addr
);
356 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
357 bm_pool
->id
, MVPP2_BM_POOL_PTR_ALIGN
);
361 mvpp2_write(priv
, MVPP2_BM_POOL_BASE_REG(bm_pool
->id
),
362 lower_32_bits(bm_pool
->dma_addr
));
363 mvpp2_write(priv
, MVPP2_BM_POOL_SIZE_REG(bm_pool
->id
), size
);
365 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
366 val
|= MVPP2_BM_START_MASK
;
367 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
369 bm_pool
->size
= size
;
370 bm_pool
->pkt_size
= 0;
371 bm_pool
->buf_num
= 0;
376 /* Set pool buffer size */
377 static void mvpp2_bm_pool_bufsize_set(struct mvpp2
*priv
,
378 struct mvpp2_bm_pool
*bm_pool
,
383 bm_pool
->buf_size
= buf_size
;
385 val
= ALIGN(buf_size
, 1 << MVPP2_POOL_BUF_SIZE_OFFSET
);
386 mvpp2_write(priv
, MVPP2_POOL_BUF_SIZE_REG(bm_pool
->id
), val
);
389 static void mvpp2_bm_bufs_get_addrs(struct device
*dev
, struct mvpp2
*priv
,
390 struct mvpp2_bm_pool
*bm_pool
,
391 dma_addr_t
*dma_addr
,
392 phys_addr_t
*phys_addr
)
394 unsigned int thread
= mvpp2_cpu_to_thread(priv
, get_cpu());
396 *dma_addr
= mvpp2_thread_read(priv
, thread
,
397 MVPP2_BM_PHY_ALLOC_REG(bm_pool
->id
));
398 *phys_addr
= mvpp2_thread_read(priv
, thread
, MVPP2_BM_VIRT_ALLOC_REG
);
400 if (priv
->hw_version
== MVPP22
) {
402 u32 dma_addr_highbits
, phys_addr_highbits
;
404 val
= mvpp2_thread_read(priv
, thread
, MVPP22_BM_ADDR_HIGH_ALLOC
);
405 dma_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_PHYS_MASK
);
406 phys_addr_highbits
= (val
& MVPP22_BM_ADDR_HIGH_VIRT_MASK
) >>
407 MVPP22_BM_ADDR_HIGH_VIRT_SHIFT
;
409 if (sizeof(dma_addr_t
) == 8)
410 *dma_addr
|= (u64
)dma_addr_highbits
<< 32;
412 if (sizeof(phys_addr_t
) == 8)
413 *phys_addr
|= (u64
)phys_addr_highbits
<< 32;
419 /* Free all buffers from the pool */
420 static void mvpp2_bm_bufs_free(struct device
*dev
, struct mvpp2
*priv
,
421 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
425 if (buf_num
> bm_pool
->buf_num
) {
426 WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
427 bm_pool
->id
, buf_num
);
428 buf_num
= bm_pool
->buf_num
;
431 for (i
= 0; i
< buf_num
; i
++) {
432 dma_addr_t buf_dma_addr
;
433 phys_addr_t buf_phys_addr
;
436 mvpp2_bm_bufs_get_addrs(dev
, priv
, bm_pool
,
437 &buf_dma_addr
, &buf_phys_addr
);
439 dma_unmap_single(dev
, buf_dma_addr
,
440 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
442 data
= (void *)phys_to_virt(buf_phys_addr
);
446 mvpp2_frag_free(bm_pool
, data
);
449 /* Update BM driver with number of buffers removed from pool */
450 bm_pool
->buf_num
-= i
;
453 /* Check number of buffers in BM pool */
454 static int mvpp2_check_hw_buf_num(struct mvpp2
*priv
, struct mvpp2_bm_pool
*bm_pool
)
458 buf_num
+= mvpp2_read(priv
, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool
->id
)) &
459 MVPP22_BM_POOL_PTRS_NUM_MASK
;
460 buf_num
+= mvpp2_read(priv
, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool
->id
)) &
461 MVPP2_BM_BPPI_PTR_NUM_MASK
;
463 /* HW has one buffer ready which is not reflected in the counters */
471 static int mvpp2_bm_pool_destroy(struct platform_device
*pdev
,
473 struct mvpp2_bm_pool
*bm_pool
)
478 buf_num
= mvpp2_check_hw_buf_num(priv
, bm_pool
);
479 mvpp2_bm_bufs_free(&pdev
->dev
, priv
, bm_pool
, buf_num
);
481 /* Check buffer counters after free */
482 buf_num
= mvpp2_check_hw_buf_num(priv
, bm_pool
);
484 WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
485 bm_pool
->id
, bm_pool
->buf_num
);
489 val
= mvpp2_read(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
));
490 val
|= MVPP2_BM_STOP_MASK
;
491 mvpp2_write(priv
, MVPP2_BM_POOL_CTRL_REG(bm_pool
->id
), val
);
493 dma_free_coherent(&pdev
->dev
, bm_pool
->size_bytes
,
499 static int mvpp2_bm_pools_init(struct platform_device
*pdev
,
503 struct mvpp2_bm_pool
*bm_pool
;
505 /* Create all pools with maximum size */
506 size
= MVPP2_BM_POOL_SIZE_MAX
;
507 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
508 bm_pool
= &priv
->bm_pools
[i
];
510 err
= mvpp2_bm_pool_create(pdev
, priv
, bm_pool
, size
);
512 goto err_unroll_pools
;
513 mvpp2_bm_pool_bufsize_set(priv
, bm_pool
, 0);
518 dev_err(&pdev
->dev
, "failed to create BM pool %d, size %d\n", i
, size
);
519 for (i
= i
- 1; i
>= 0; i
--)
520 mvpp2_bm_pool_destroy(pdev
, priv
, &priv
->bm_pools
[i
]);
524 static int mvpp2_bm_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
528 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
529 /* Mask BM all interrupts */
530 mvpp2_write(priv
, MVPP2_BM_INTR_MASK_REG(i
), 0);
531 /* Clear BM cause register */
532 mvpp2_write(priv
, MVPP2_BM_INTR_CAUSE_REG(i
), 0);
535 /* Allocate and initialize BM pools */
536 priv
->bm_pools
= devm_kcalloc(&pdev
->dev
, MVPP2_BM_POOLS_NUM
,
537 sizeof(*priv
->bm_pools
), GFP_KERNEL
);
541 err
= mvpp2_bm_pools_init(pdev
, priv
);
547 static void mvpp2_setup_bm_pool(void)
550 mvpp2_pools
[MVPP2_BM_SHORT
].buf_num
= MVPP2_BM_SHORT_BUF_NUM
;
551 mvpp2_pools
[MVPP2_BM_SHORT
].pkt_size
= MVPP2_BM_SHORT_PKT_SIZE
;
554 mvpp2_pools
[MVPP2_BM_LONG
].buf_num
= MVPP2_BM_LONG_BUF_NUM
;
555 mvpp2_pools
[MVPP2_BM_LONG
].pkt_size
= MVPP2_BM_LONG_PKT_SIZE
;
558 mvpp2_pools
[MVPP2_BM_JUMBO
].buf_num
= MVPP2_BM_JUMBO_BUF_NUM
;
559 mvpp2_pools
[MVPP2_BM_JUMBO
].pkt_size
= MVPP2_BM_JUMBO_PKT_SIZE
;
562 /* Attach long pool to rxq */
563 static void mvpp2_rxq_long_pool_set(struct mvpp2_port
*port
,
564 int lrxq
, int long_pool
)
569 /* Get queue physical ID */
570 prxq
= port
->rxqs
[lrxq
]->id
;
572 if (port
->priv
->hw_version
== MVPP21
)
573 mask
= MVPP21_RXQ_POOL_LONG_MASK
;
575 mask
= MVPP22_RXQ_POOL_LONG_MASK
;
577 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
579 val
|= (long_pool
<< MVPP2_RXQ_POOL_LONG_OFFS
) & mask
;
580 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
583 /* Attach short pool to rxq */
584 static void mvpp2_rxq_short_pool_set(struct mvpp2_port
*port
,
585 int lrxq
, int short_pool
)
590 /* Get queue physical ID */
591 prxq
= port
->rxqs
[lrxq
]->id
;
593 if (port
->priv
->hw_version
== MVPP21
)
594 mask
= MVPP21_RXQ_POOL_SHORT_MASK
;
596 mask
= MVPP22_RXQ_POOL_SHORT_MASK
;
598 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
600 val
|= (short_pool
<< MVPP2_RXQ_POOL_SHORT_OFFS
) & mask
;
601 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
604 static void *mvpp2_buf_alloc(struct mvpp2_port
*port
,
605 struct mvpp2_bm_pool
*bm_pool
,
606 dma_addr_t
*buf_dma_addr
,
607 phys_addr_t
*buf_phys_addr
,
613 data
= mvpp2_frag_alloc(bm_pool
);
617 dma_addr
= dma_map_single(port
->dev
->dev
.parent
, data
,
618 MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
),
620 if (unlikely(dma_mapping_error(port
->dev
->dev
.parent
, dma_addr
))) {
621 mvpp2_frag_free(bm_pool
, data
);
624 *buf_dma_addr
= dma_addr
;
625 *buf_phys_addr
= virt_to_phys(data
);
630 /* Release buffer to BM */
631 static inline void mvpp2_bm_pool_put(struct mvpp2_port
*port
, int pool
,
632 dma_addr_t buf_dma_addr
,
633 phys_addr_t buf_phys_addr
)
635 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
636 unsigned long flags
= 0;
638 if (test_bit(thread
, &port
->priv
->lock_map
))
639 spin_lock_irqsave(&port
->bm_lock
[thread
], flags
);
641 if (port
->priv
->hw_version
== MVPP22
) {
644 if (sizeof(dma_addr_t
) == 8)
645 val
|= upper_32_bits(buf_dma_addr
) &
646 MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK
;
648 if (sizeof(phys_addr_t
) == 8)
649 val
|= (upper_32_bits(buf_phys_addr
)
650 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT
) &
651 MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK
;
653 mvpp2_thread_write_relaxed(port
->priv
, thread
,
654 MVPP22_BM_ADDR_HIGH_RLS_REG
, val
);
657 /* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
658 * returned in the "cookie" field of the RX
659 * descriptor. Instead of storing the virtual address, we
660 * store the physical address
662 mvpp2_thread_write_relaxed(port
->priv
, thread
,
663 MVPP2_BM_VIRT_RLS_REG
, buf_phys_addr
);
664 mvpp2_thread_write_relaxed(port
->priv
, thread
,
665 MVPP2_BM_PHY_RLS_REG(pool
), buf_dma_addr
);
667 if (test_bit(thread
, &port
->priv
->lock_map
))
668 spin_unlock_irqrestore(&port
->bm_lock
[thread
], flags
);
673 /* Allocate buffers for the pool */
674 static int mvpp2_bm_bufs_add(struct mvpp2_port
*port
,
675 struct mvpp2_bm_pool
*bm_pool
, int buf_num
)
677 int i
, buf_size
, total_size
;
679 phys_addr_t phys_addr
;
682 buf_size
= MVPP2_RX_BUF_SIZE(bm_pool
->pkt_size
);
683 total_size
= MVPP2_RX_TOTAL_SIZE(buf_size
);
686 (buf_num
+ bm_pool
->buf_num
> bm_pool
->size
)) {
687 netdev_err(port
->dev
,
688 "cannot allocate %d buffers for pool %d\n",
689 buf_num
, bm_pool
->id
);
693 for (i
= 0; i
< buf_num
; i
++) {
694 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
,
695 &phys_addr
, GFP_KERNEL
);
699 mvpp2_bm_pool_put(port
, bm_pool
->id
, dma_addr
,
703 /* Update BM driver with number of buffers added to pool */
704 bm_pool
->buf_num
+= i
;
706 netdev_dbg(port
->dev
,
707 "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
708 bm_pool
->id
, bm_pool
->pkt_size
, buf_size
, total_size
);
710 netdev_dbg(port
->dev
,
711 "pool %d: %d of %d buffers added\n",
712 bm_pool
->id
, i
, buf_num
);
716 /* Notify the driver that BM pool is being used as specific type and return the
717 * pool pointer on success
719 static struct mvpp2_bm_pool
*
720 mvpp2_bm_pool_use(struct mvpp2_port
*port
, unsigned pool
, int pkt_size
)
722 struct mvpp2_bm_pool
*new_pool
= &port
->priv
->bm_pools
[pool
];
725 if (pool
>= MVPP2_BM_POOLS_NUM
) {
726 netdev_err(port
->dev
, "Invalid pool %d\n", pool
);
730 /* Allocate buffers in case BM pool is used as long pool, but packet
731 * size doesn't match MTU or BM pool hasn't being used yet
733 if (new_pool
->pkt_size
== 0) {
736 /* Set default buffer number or free all the buffers in case
737 * the pool is not empty
739 pkts_num
= new_pool
->buf_num
;
741 pkts_num
= mvpp2_pools
[pool
].buf_num
;
743 mvpp2_bm_bufs_free(port
->dev
->dev
.parent
,
744 port
->priv
, new_pool
, pkts_num
);
746 new_pool
->pkt_size
= pkt_size
;
747 new_pool
->frag_size
=
748 SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size
)) +
749 MVPP2_SKB_SHINFO_SIZE
;
751 /* Allocate buffers for this pool */
752 num
= mvpp2_bm_bufs_add(port
, new_pool
, pkts_num
);
753 if (num
!= pkts_num
) {
754 WARN(1, "pool %d: %d of %d allocated\n",
755 new_pool
->id
, num
, pkts_num
);
760 mvpp2_bm_pool_bufsize_set(port
->priv
, new_pool
,
761 MVPP2_RX_BUF_SIZE(new_pool
->pkt_size
));
766 /* Initialize pools for swf */
767 static int mvpp2_swf_bm_pool_init(struct mvpp2_port
*port
)
770 enum mvpp2_bm_pool_log_num long_log_pool
, short_log_pool
;
772 /* If port pkt_size is higher than 1518B:
773 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
774 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
776 if (port
->pkt_size
> MVPP2_BM_LONG_PKT_SIZE
) {
777 long_log_pool
= MVPP2_BM_JUMBO
;
778 short_log_pool
= MVPP2_BM_LONG
;
780 long_log_pool
= MVPP2_BM_LONG
;
781 short_log_pool
= MVPP2_BM_SHORT
;
784 if (!port
->pool_long
) {
786 mvpp2_bm_pool_use(port
, long_log_pool
,
787 mvpp2_pools
[long_log_pool
].pkt_size
);
788 if (!port
->pool_long
)
791 port
->pool_long
->port_map
|= BIT(port
->id
);
793 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
794 mvpp2_rxq_long_pool_set(port
, rxq
, port
->pool_long
->id
);
797 if (!port
->pool_short
) {
799 mvpp2_bm_pool_use(port
, short_log_pool
,
800 mvpp2_pools
[short_log_pool
].pkt_size
);
801 if (!port
->pool_short
)
804 port
->pool_short
->port_map
|= BIT(port
->id
);
806 for (rxq
= 0; rxq
< port
->nrxqs
; rxq
++)
807 mvpp2_rxq_short_pool_set(port
, rxq
,
808 port
->pool_short
->id
);
814 static int mvpp2_bm_update_mtu(struct net_device
*dev
, int mtu
)
816 struct mvpp2_port
*port
= netdev_priv(dev
);
817 enum mvpp2_bm_pool_log_num new_long_pool
;
818 int pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
820 /* If port MTU is higher than 1518B:
821 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
822 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
824 if (pkt_size
> MVPP2_BM_LONG_PKT_SIZE
)
825 new_long_pool
= MVPP2_BM_JUMBO
;
827 new_long_pool
= MVPP2_BM_LONG
;
829 if (new_long_pool
!= port
->pool_long
->id
) {
830 /* Remove port from old short & long pool */
831 port
->pool_long
= mvpp2_bm_pool_use(port
, port
->pool_long
->id
,
832 port
->pool_long
->pkt_size
);
833 port
->pool_long
->port_map
&= ~BIT(port
->id
);
834 port
->pool_long
= NULL
;
836 port
->pool_short
= mvpp2_bm_pool_use(port
, port
->pool_short
->id
,
837 port
->pool_short
->pkt_size
);
838 port
->pool_short
->port_map
&= ~BIT(port
->id
);
839 port
->pool_short
= NULL
;
841 port
->pkt_size
= pkt_size
;
843 /* Add port to new short & long pool */
844 mvpp2_swf_bm_pool_init(port
);
846 /* Update L4 checksum when jumbo enable/disable on port */
847 if (new_long_pool
== MVPP2_BM_JUMBO
&& port
->id
!= 0) {
848 dev
->features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
849 dev
->hw_features
&= ~(NETIF_F_IP_CSUM
|
852 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
853 dev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
858 dev
->wanted_features
= dev
->features
;
860 netdev_update_features(dev
);
864 static inline void mvpp2_interrupts_enable(struct mvpp2_port
*port
)
866 int i
, sw_thread_mask
= 0;
868 for (i
= 0; i
< port
->nqvecs
; i
++)
869 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
871 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
872 MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask
));
875 static inline void mvpp2_interrupts_disable(struct mvpp2_port
*port
)
877 int i
, sw_thread_mask
= 0;
879 for (i
= 0; i
< port
->nqvecs
; i
++)
880 sw_thread_mask
|= port
->qvecs
[i
].sw_thread_mask
;
882 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
883 MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask
));
886 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector
*qvec
)
888 struct mvpp2_port
*port
= qvec
->port
;
890 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
891 MVPP2_ISR_ENABLE_INTERRUPT(qvec
->sw_thread_mask
));
894 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector
*qvec
)
896 struct mvpp2_port
*port
= qvec
->port
;
898 mvpp2_write(port
->priv
, MVPP2_ISR_ENABLE_REG(port
->id
),
899 MVPP2_ISR_DISABLE_INTERRUPT(qvec
->sw_thread_mask
));
902 /* Mask the current thread's Rx/Tx interrupts
903 * Called by on_each_cpu(), guaranteed to run with migration disabled,
904 * using smp_processor_id() is OK.
906 static void mvpp2_interrupts_mask(void *arg
)
908 struct mvpp2_port
*port
= arg
;
910 /* If the thread isn't used, don't do anything */
911 if (smp_processor_id() > port
->priv
->nthreads
)
914 mvpp2_thread_write(port
->priv
,
915 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
916 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), 0);
919 /* Unmask the current thread's Rx/Tx interrupts.
920 * Called by on_each_cpu(), guaranteed to run with migration disabled,
921 * using smp_processor_id() is OK.
923 static void mvpp2_interrupts_unmask(void *arg
)
925 struct mvpp2_port
*port
= arg
;
928 /* If the thread isn't used, don't do anything */
929 if (smp_processor_id() > port
->priv
->nthreads
)
932 val
= MVPP2_CAUSE_MISC_SUM_MASK
|
933 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port
->priv
->hw_version
);
934 if (port
->has_tx_irqs
)
935 val
|= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
937 mvpp2_thread_write(port
->priv
,
938 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
939 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
943 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port
*port
, bool mask
)
948 if (port
->priv
->hw_version
!= MVPP22
)
954 val
= MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22
);
956 for (i
= 0; i
< port
->nqvecs
; i
++) {
957 struct mvpp2_queue_vector
*v
= port
->qvecs
+ i
;
959 if (v
->type
!= MVPP2_QUEUE_VECTOR_SHARED
)
962 mvpp2_thread_write(port
->priv
, v
->sw_thread_id
,
963 MVPP2_ISR_RX_TX_MASK_REG(port
->id
), val
);
967 /* Port configuration routines */
969 static void mvpp22_gop_init_rgmii(struct mvpp2_port
*port
)
971 struct mvpp2
*priv
= port
->priv
;
974 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
975 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
;
976 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
978 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
979 if (port
->gop_id
== 2)
980 val
|= GENCONF_CTRL0_PORT0_RGMII
| GENCONF_CTRL0_PORT1_RGMII
;
981 else if (port
->gop_id
== 3)
982 val
|= GENCONF_CTRL0_PORT1_RGMII_MII
;
983 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
986 static void mvpp22_gop_init_sgmii(struct mvpp2_port
*port
)
988 struct mvpp2
*priv
= port
->priv
;
991 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
992 val
|= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT
|
993 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE
;
994 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
996 if (port
->gop_id
> 1) {
997 regmap_read(priv
->sysctrl_base
, GENCONF_CTRL0
, &val
);
998 if (port
->gop_id
== 2)
999 val
&= ~GENCONF_CTRL0_PORT0_RGMII
;
1000 else if (port
->gop_id
== 3)
1001 val
&= ~GENCONF_CTRL0_PORT1_RGMII_MII
;
1002 regmap_write(priv
->sysctrl_base
, GENCONF_CTRL0
, val
);
1006 static void mvpp22_gop_init_10gkr(struct mvpp2_port
*port
)
1008 struct mvpp2
*priv
= port
->priv
;
1009 void __iomem
*mpcs
= priv
->iface_base
+ MVPP22_MPCS_BASE(port
->gop_id
);
1010 void __iomem
*xpcs
= priv
->iface_base
+ MVPP22_XPCS_BASE(port
->gop_id
);
1014 val
= readl(xpcs
+ MVPP22_XPCS_CFG0
);
1015 val
&= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1016 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1017 val
|= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1018 writel(val
, xpcs
+ MVPP22_XPCS_CFG0
);
1021 val
= readl(mpcs
+ MVPP22_MPCS_CTRL
);
1022 val
&= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN
;
1023 writel(val
, mpcs
+ MVPP22_MPCS_CTRL
);
1025 val
= readl(mpcs
+ MVPP22_MPCS_CLK_RESET
);
1026 val
&= ~(MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7) | MAC_CLK_RESET_MAC
|
1027 MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
);
1028 val
|= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1029 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1031 val
&= ~MVPP22_MPCS_CLK_RESET_DIV_SET
;
1032 val
|= MAC_CLK_RESET_MAC
| MAC_CLK_RESET_SD_RX
| MAC_CLK_RESET_SD_TX
;
1033 writel(val
, mpcs
+ MVPP22_MPCS_CLK_RESET
);
1036 static int mvpp22_gop_init(struct mvpp2_port
*port
)
1038 struct mvpp2
*priv
= port
->priv
;
1041 if (!priv
->sysctrl_base
)
1044 switch (port
->phy_interface
) {
1045 case PHY_INTERFACE_MODE_RGMII
:
1046 case PHY_INTERFACE_MODE_RGMII_ID
:
1047 case PHY_INTERFACE_MODE_RGMII_RXID
:
1048 case PHY_INTERFACE_MODE_RGMII_TXID
:
1049 if (port
->gop_id
== 0)
1051 mvpp22_gop_init_rgmii(port
);
1053 case PHY_INTERFACE_MODE_SGMII
:
1054 case PHY_INTERFACE_MODE_1000BASEX
:
1055 case PHY_INTERFACE_MODE_2500BASEX
:
1056 mvpp22_gop_init_sgmii(port
);
1058 case PHY_INTERFACE_MODE_10GKR
:
1059 if (port
->gop_id
!= 0)
1061 mvpp22_gop_init_10gkr(port
);
1064 goto unsupported_conf
;
1067 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, &val
);
1068 val
|= GENCONF_PORT_CTRL1_RESET(port
->gop_id
) |
1069 GENCONF_PORT_CTRL1_EN(port
->gop_id
);
1070 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL1
, val
);
1072 regmap_read(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, &val
);
1073 val
|= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR
;
1074 regmap_write(priv
->sysctrl_base
, GENCONF_PORT_CTRL0
, val
);
1076 regmap_read(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, &val
);
1077 val
|= GENCONF_SOFT_RESET1_GOP
;
1078 regmap_write(priv
->sysctrl_base
, GENCONF_SOFT_RESET1
, val
);
1084 netdev_err(port
->dev
, "Invalid port configuration\n");
1088 static void mvpp22_gop_unmask_irq(struct mvpp2_port
*port
)
1092 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1093 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1094 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1095 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
1096 /* Enable the GMAC link status irq for this port */
1097 val
= readl(port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1098 val
|= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT
;
1099 writel(val
, port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1102 if (port
->gop_id
== 0) {
1103 /* Enable the XLG/GIG irqs for this port */
1104 val
= readl(port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1105 if (port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)
1106 val
|= MVPP22_XLG_EXT_INT_MASK_XLG
;
1108 val
|= MVPP22_XLG_EXT_INT_MASK_GIG
;
1109 writel(val
, port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1113 static void mvpp22_gop_mask_irq(struct mvpp2_port
*port
)
1117 if (port
->gop_id
== 0) {
1118 val
= readl(port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1119 val
&= ~(MVPP22_XLG_EXT_INT_MASK_XLG
|
1120 MVPP22_XLG_EXT_INT_MASK_GIG
);
1121 writel(val
, port
->base
+ MVPP22_XLG_EXT_INT_MASK
);
1124 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1125 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1126 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1127 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
1128 val
= readl(port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1129 val
&= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT
;
1130 writel(val
, port
->base
+ MVPP22_GMAC_INT_SUM_MASK
);
1134 static void mvpp22_gop_setup_irq(struct mvpp2_port
*port
)
1138 if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
1139 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1140 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1141 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
1142 val
= readl(port
->base
+ MVPP22_GMAC_INT_MASK
);
1143 val
|= MVPP22_GMAC_INT_MASK_LINK_STAT
;
1144 writel(val
, port
->base
+ MVPP22_GMAC_INT_MASK
);
1147 if (port
->gop_id
== 0) {
1148 val
= readl(port
->base
+ MVPP22_XLG_INT_MASK
);
1149 val
|= MVPP22_XLG_INT_MASK_LINK
;
1150 writel(val
, port
->base
+ MVPP22_XLG_INT_MASK
);
1153 mvpp22_gop_unmask_irq(port
);
1156 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1158 * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1159 * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1162 * The COMPHY configures the serdes lanes regardless of the actual use of the
1163 * lanes by the physical layer. This is why configurations like
1164 * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1166 static int mvpp22_comphy_init(struct mvpp2_port
*port
)
1173 ret
= phy_set_mode_ext(port
->comphy
, PHY_MODE_ETHERNET
,
1174 port
->phy_interface
);
1178 return phy_power_on(port
->comphy
);
1181 static void mvpp2_port_enable(struct mvpp2_port
*port
)
1185 /* Only GOP port 0 has an XLG MAC */
1186 if (port
->gop_id
== 0 &&
1187 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
1188 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)) {
1189 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
1190 val
|= MVPP22_XLG_CTRL0_PORT_EN
|
1191 MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
1192 val
&= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS
;
1193 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1195 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1196 val
|= MVPP2_GMAC_PORT_EN_MASK
;
1197 val
|= MVPP2_GMAC_MIB_CNTR_EN_MASK
;
1198 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1202 static void mvpp2_port_disable(struct mvpp2_port
*port
)
1206 /* Only GOP port 0 has an XLG MAC */
1207 if (port
->gop_id
== 0 &&
1208 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
1209 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)) {
1210 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
1211 val
&= ~MVPP22_XLG_CTRL0_PORT_EN
;
1212 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1214 /* Disable & reset should be done separately */
1215 val
&= ~MVPP22_XLG_CTRL0_MAC_RESET_DIS
;
1216 writel(val
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
1218 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1219 val
&= ~(MVPP2_GMAC_PORT_EN_MASK
);
1220 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1224 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1225 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port
*port
)
1229 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
) &
1230 ~MVPP2_GMAC_PERIODIC_XON_EN_MASK
;
1231 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1234 /* Configure loopback port */
1235 static void mvpp2_port_loopback_set(struct mvpp2_port
*port
,
1236 const struct phylink_link_state
*state
)
1240 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1242 if (state
->speed
== 1000)
1243 val
|= MVPP2_GMAC_GMII_LB_EN_MASK
;
1245 val
&= ~MVPP2_GMAC_GMII_LB_EN_MASK
;
1247 if (port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
1248 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
1249 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
)
1250 val
|= MVPP2_GMAC_PCS_LB_EN_MASK
;
1252 val
&= ~MVPP2_GMAC_PCS_LB_EN_MASK
;
1254 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
1257 struct mvpp2_ethtool_counter
{
1258 unsigned int offset
;
1259 const char string
[ETH_GSTRING_LEN
];
1263 static u64
mvpp2_read_count(struct mvpp2_port
*port
,
1264 const struct mvpp2_ethtool_counter
*counter
)
1268 val
= readl(port
->stats_base
+ counter
->offset
);
1269 if (counter
->reg_is_64b
)
1270 val
+= (u64
)readl(port
->stats_base
+ counter
->offset
+ 4) << 32;
1275 /* Due to the fact that software statistics and hardware statistics are, by
1276 * design, incremented at different moments in the chain of packet processing,
1277 * it is very likely that incoming packets could have been dropped after being
1278 * counted by hardware but before reaching software statistics (most probably
1279 * multicast packets), and in the oppposite way, during transmission, FCS bytes
1280 * are added in between as well as TSO skb will be split and header bytes added.
1281 * Hence, statistics gathered from userspace with ifconfig (software) and
1282 * ethtool (hardware) cannot be compared.
1284 static const struct mvpp2_ethtool_counter mvpp2_ethtool_regs
[] = {
1285 { MVPP2_MIB_GOOD_OCTETS_RCVD
, "good_octets_received", true },
1286 { MVPP2_MIB_BAD_OCTETS_RCVD
, "bad_octets_received" },
1287 { MVPP2_MIB_CRC_ERRORS_SENT
, "crc_errors_sent" },
1288 { MVPP2_MIB_UNICAST_FRAMES_RCVD
, "unicast_frames_received" },
1289 { MVPP2_MIB_BROADCAST_FRAMES_RCVD
, "broadcast_frames_received" },
1290 { MVPP2_MIB_MULTICAST_FRAMES_RCVD
, "multicast_frames_received" },
1291 { MVPP2_MIB_FRAMES_64_OCTETS
, "frames_64_octets" },
1292 { MVPP2_MIB_FRAMES_65_TO_127_OCTETS
, "frames_65_to_127_octet" },
1293 { MVPP2_MIB_FRAMES_128_TO_255_OCTETS
, "frames_128_to_255_octet" },
1294 { MVPP2_MIB_FRAMES_256_TO_511_OCTETS
, "frames_256_to_511_octet" },
1295 { MVPP2_MIB_FRAMES_512_TO_1023_OCTETS
, "frames_512_to_1023_octet" },
1296 { MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS
, "frames_1024_to_max_octet" },
1297 { MVPP2_MIB_GOOD_OCTETS_SENT
, "good_octets_sent", true },
1298 { MVPP2_MIB_UNICAST_FRAMES_SENT
, "unicast_frames_sent" },
1299 { MVPP2_MIB_MULTICAST_FRAMES_SENT
, "multicast_frames_sent" },
1300 { MVPP2_MIB_BROADCAST_FRAMES_SENT
, "broadcast_frames_sent" },
1301 { MVPP2_MIB_FC_SENT
, "fc_sent" },
1302 { MVPP2_MIB_FC_RCVD
, "fc_received" },
1303 { MVPP2_MIB_RX_FIFO_OVERRUN
, "rx_fifo_overrun" },
1304 { MVPP2_MIB_UNDERSIZE_RCVD
, "undersize_received" },
1305 { MVPP2_MIB_FRAGMENTS_RCVD
, "fragments_received" },
1306 { MVPP2_MIB_OVERSIZE_RCVD
, "oversize_received" },
1307 { MVPP2_MIB_JABBER_RCVD
, "jabber_received" },
1308 { MVPP2_MIB_MAC_RCV_ERROR
, "mac_receive_error" },
1309 { MVPP2_MIB_BAD_CRC_EVENT
, "bad_crc_event" },
1310 { MVPP2_MIB_COLLISION
, "collision" },
1311 { MVPP2_MIB_LATE_COLLISION
, "late_collision" },
1314 static void mvpp2_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
1317 if (sset
== ETH_SS_STATS
) {
1320 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_regs
); i
++)
1321 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1322 &mvpp2_ethtool_regs
[i
].string
, ETH_GSTRING_LEN
);
1326 static void mvpp2_gather_hw_statistics(struct work_struct
*work
)
1328 struct delayed_work
*del_work
= to_delayed_work(work
);
1329 struct mvpp2_port
*port
= container_of(del_work
, struct mvpp2_port
,
1334 mutex_lock(&port
->gather_stats_lock
);
1336 pstats
= port
->ethtool_stats
;
1337 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_regs
); i
++)
1338 *pstats
++ += mvpp2_read_count(port
, &mvpp2_ethtool_regs
[i
]);
1340 /* No need to read again the counters right after this function if it
1341 * was called asynchronously by the user (ie. use of ethtool).
1343 cancel_delayed_work(&port
->stats_work
);
1344 queue_delayed_work(port
->priv
->stats_queue
, &port
->stats_work
,
1345 MVPP2_MIB_COUNTERS_STATS_DELAY
);
1347 mutex_unlock(&port
->gather_stats_lock
);
1350 static void mvpp2_ethtool_get_stats(struct net_device
*dev
,
1351 struct ethtool_stats
*stats
, u64
*data
)
1353 struct mvpp2_port
*port
= netdev_priv(dev
);
1355 /* Update statistics for the given port, then take the lock to avoid
1356 * concurrent accesses on the ethtool_stats structure during its copy.
1358 mvpp2_gather_hw_statistics(&port
->stats_work
.work
);
1360 mutex_lock(&port
->gather_stats_lock
);
1361 memcpy(data
, port
->ethtool_stats
,
1362 sizeof(u64
) * ARRAY_SIZE(mvpp2_ethtool_regs
));
1363 mutex_unlock(&port
->gather_stats_lock
);
1366 static int mvpp2_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
1368 if (sset
== ETH_SS_STATS
)
1369 return ARRAY_SIZE(mvpp2_ethtool_regs
);
1374 static void mvpp2_port_reset(struct mvpp2_port
*port
)
1379 /* Read the GOP statistics to reset the hardware counters */
1380 for (i
= 0; i
< ARRAY_SIZE(mvpp2_ethtool_regs
); i
++)
1381 mvpp2_read_count(port
, &mvpp2_ethtool_regs
[i
]);
1383 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
1384 ~MVPP2_GMAC_PORT_RESET_MASK
;
1385 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
1387 while (readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
) &
1388 MVPP2_GMAC_PORT_RESET_MASK
)
1392 /* Change maximum receive size of the port */
1393 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port
*port
)
1397 val
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1398 val
&= ~MVPP2_GMAC_MAX_RX_SIZE_MASK
;
1399 val
|= (((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
1400 MVPP2_GMAC_MAX_RX_SIZE_OFFS
);
1401 writel(val
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
1404 /* Change maximum receive size of the port */
1405 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port
*port
)
1409 val
= readl(port
->base
+ MVPP22_XLG_CTRL1_REG
);
1410 val
&= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK
;
1411 val
|= ((port
->pkt_size
- MVPP2_MH_SIZE
) / 2) <<
1412 MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS
;
1413 writel(val
, port
->base
+ MVPP22_XLG_CTRL1_REG
);
1416 /* Set defaults to the MVPP2 port */
1417 static void mvpp2_defaults_set(struct mvpp2_port
*port
)
1419 int tx_port_num
, val
, queue
, ptxq
, lrxq
;
1421 if (port
->priv
->hw_version
== MVPP21
) {
1422 /* Update TX FIFO MIN Threshold */
1423 val
= readl(port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
1424 val
&= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK
;
1425 /* Min. TX threshold must be less than minimal packet length */
1426 val
|= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1427 writel(val
, port
->base
+ MVPP2_GMAC_PORT_FIFO_CFG_1_REG
);
1430 /* Disable Legacy WRR, Disable EJP, Release from reset */
1431 tx_port_num
= mvpp2_egress_port(port
);
1432 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
,
1434 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_CMD_1_REG
, 0);
1436 /* Set TXQ scheduling to Round-Robin */
1437 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_FIXED_PRIO_REG
, 0);
1439 /* Close bandwidth for all queues */
1440 for (queue
= 0; queue
< MVPP2_MAX_TXQ
; queue
++) {
1441 ptxq
= mvpp2_txq_phys(port
->id
, queue
);
1442 mvpp2_write(port
->priv
,
1443 MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq
), 0);
1446 /* Set refill period to 1 usec, refill tokens
1447 * and bucket size to maximum
1449 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PERIOD_REG
,
1450 port
->priv
->tclk
/ USEC_PER_SEC
);
1451 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
);
1452 val
&= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK
;
1453 val
|= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1454 val
|= MVPP2_TXP_REFILL_TOKENS_ALL_MASK
;
1455 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_REFILL_REG
, val
);
1456 val
= MVPP2_TXP_TOKEN_SIZE_MAX
;
1457 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
1459 /* Set MaximumLowLatencyPacketSize value to 256 */
1460 mvpp2_write(port
->priv
, MVPP2_RX_CTRL_REG(port
->id
),
1461 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK
|
1462 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1464 /* Enable Rx cache snoop */
1465 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1466 queue
= port
->rxqs
[lrxq
]->id
;
1467 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1468 val
|= MVPP2_SNOOP_PKT_SIZE_MASK
|
1469 MVPP2_SNOOP_BUF_HDR_MASK
;
1470 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1473 /* At default, mask all interrupts to all present cpus */
1474 mvpp2_interrupts_disable(port
);
1477 /* Enable/disable receiving packets */
1478 static void mvpp2_ingress_enable(struct mvpp2_port
*port
)
1483 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1484 queue
= port
->rxqs
[lrxq
]->id
;
1485 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1486 val
&= ~MVPP2_RXQ_DISABLE_MASK
;
1487 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1491 static void mvpp2_ingress_disable(struct mvpp2_port
*port
)
1496 for (lrxq
= 0; lrxq
< port
->nrxqs
; lrxq
++) {
1497 queue
= port
->rxqs
[lrxq
]->id
;
1498 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
));
1499 val
|= MVPP2_RXQ_DISABLE_MASK
;
1500 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(queue
), val
);
1504 /* Enable transmit via physical egress queue
1505 * - HW starts take descriptors from DRAM
1507 static void mvpp2_egress_enable(struct mvpp2_port
*port
)
1511 int tx_port_num
= mvpp2_egress_port(port
);
1513 /* Enable all initialized TXs. */
1515 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
1516 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
1519 qmap
|= (1 << queue
);
1522 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1523 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
, qmap
);
1526 /* Disable transmit via physical egress queue
1527 * - HW doesn't take descriptors from DRAM
1529 static void mvpp2_egress_disable(struct mvpp2_port
*port
)
1533 int tx_port_num
= mvpp2_egress_port(port
);
1535 /* Issue stop command for active channels only */
1536 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1537 reg_data
= (mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
)) &
1538 MVPP2_TXP_SCHED_ENQ_MASK
;
1540 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
,
1541 (reg_data
<< MVPP2_TXP_SCHED_DISQ_OFFSET
));
1543 /* Wait for all Tx activity to terminate. */
1546 if (delay
>= MVPP2_TX_DISABLE_TIMEOUT_MSEC
) {
1547 netdev_warn(port
->dev
,
1548 "Tx stop timed out, status=0x%08x\n",
1555 /* Check port TX Command register that all
1556 * Tx queues are stopped
1558 reg_data
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_Q_CMD_REG
);
1559 } while (reg_data
& MVPP2_TXP_SCHED_ENQ_MASK
);
1562 /* Rx descriptors helper methods */
1564 /* Get number of Rx descriptors occupied by received packets */
1566 mvpp2_rxq_received(struct mvpp2_port
*port
, int rxq_id
)
1568 u32 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq_id
));
1570 return val
& MVPP2_RXQ_OCCUPIED_MASK
;
1573 /* Update Rx queue status with the number of occupied and available
1574 * Rx descriptor slots.
1577 mvpp2_rxq_status_update(struct mvpp2_port
*port
, int rxq_id
,
1578 int used_count
, int free_count
)
1580 /* Decrement the number of used descriptors and increment count
1581 * increment the number of free descriptors.
1583 u32 val
= used_count
| (free_count
<< MVPP2_RXQ_NUM_NEW_OFFSET
);
1585 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id
), val
);
1588 /* Get pointer to next RX descriptor to be processed by SW */
1589 static inline struct mvpp2_rx_desc
*
1590 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue
*rxq
)
1592 int rx_desc
= rxq
->next_desc_to_proc
;
1594 rxq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(rxq
, rx_desc
);
1595 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
1596 return rxq
->descs
+ rx_desc
;
1599 /* Set rx queue offset */
1600 static void mvpp2_rxq_offset_set(struct mvpp2_port
*port
,
1601 int prxq
, int offset
)
1605 /* Convert offset from bytes to units of 32 bytes */
1606 offset
= offset
>> 5;
1608 val
= mvpp2_read(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
));
1609 val
&= ~MVPP2_RXQ_PACKET_OFFSET_MASK
;
1612 val
|= ((offset
<< MVPP2_RXQ_PACKET_OFFSET_OFFS
) &
1613 MVPP2_RXQ_PACKET_OFFSET_MASK
);
1615 mvpp2_write(port
->priv
, MVPP2_RXQ_CONFIG_REG(prxq
), val
);
1618 /* Tx descriptors helper methods */
1620 /* Get pointer to next Tx descriptor to be processed (send) by HW */
1621 static struct mvpp2_tx_desc
*
1622 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue
*txq
)
1624 int tx_desc
= txq
->next_desc_to_proc
;
1626 txq
->next_desc_to_proc
= MVPP2_QUEUE_NEXT_DESC(txq
, tx_desc
);
1627 return txq
->descs
+ tx_desc
;
1630 /* Update HW with number of aggregated Tx descriptors to be sent
1632 * Called only from mvpp2_tx(), so migration is disabled, using
1633 * smp_processor_id() is OK.
1635 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port
*port
, int pending
)
1637 /* aggregated access - relevant TXQ number is written in TX desc */
1638 mvpp2_thread_write(port
->priv
,
1639 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
1640 MVPP2_AGGR_TXQ_UPDATE_REG
, pending
);
1643 /* Check if there are enough free descriptors in aggregated txq.
1644 * If not, update the number of occupied descriptors and repeat the check.
1646 * Called only from mvpp2_tx(), so migration is disabled, using
1647 * smp_processor_id() is OK.
1649 static int mvpp2_aggr_desc_num_check(struct mvpp2_port
*port
,
1650 struct mvpp2_tx_queue
*aggr_txq
, int num
)
1652 if ((aggr_txq
->count
+ num
) > MVPP2_AGGR_TXQ_SIZE
) {
1653 /* Update number of occupied aggregated Tx descriptors */
1654 unsigned int thread
=
1655 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
1656 u32 val
= mvpp2_read_relaxed(port
->priv
,
1657 MVPP2_AGGR_TXQ_STATUS_REG(thread
));
1659 aggr_txq
->count
= val
& MVPP2_AGGR_TXQ_PENDING_MASK
;
1661 if ((aggr_txq
->count
+ num
) > MVPP2_AGGR_TXQ_SIZE
)
1667 /* Reserved Tx descriptors allocation request
1669 * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
1670 * only by mvpp2_tx(), so migration is disabled, using
1671 * smp_processor_id() is OK.
1673 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port
*port
,
1674 struct mvpp2_tx_queue
*txq
, int num
)
1676 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
1677 struct mvpp2
*priv
= port
->priv
;
1680 val
= (txq
->id
<< MVPP2_TXQ_RSVD_REQ_Q_OFFSET
) | num
;
1681 mvpp2_thread_write_relaxed(priv
, thread
, MVPP2_TXQ_RSVD_REQ_REG
, val
);
1683 val
= mvpp2_thread_read_relaxed(priv
, thread
, MVPP2_TXQ_RSVD_RSLT_REG
);
1685 return val
& MVPP2_TXQ_RSVD_RSLT_MASK
;
1688 /* Check if there are enough reserved descriptors for transmission.
1689 * If not, request chunk of reserved descriptors and check again.
1691 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port
*port
,
1692 struct mvpp2_tx_queue
*txq
,
1693 struct mvpp2_txq_pcpu
*txq_pcpu
,
1696 int req
, desc_count
;
1697 unsigned int thread
;
1699 if (txq_pcpu
->reserved_num
>= num
)
1702 /* Not enough descriptors reserved! Update the reserved descriptor
1703 * count and check again.
1707 /* Compute total of used descriptors */
1708 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
1709 struct mvpp2_txq_pcpu
*txq_pcpu_aux
;
1711 txq_pcpu_aux
= per_cpu_ptr(txq
->pcpu
, thread
);
1712 desc_count
+= txq_pcpu_aux
->count
;
1713 desc_count
+= txq_pcpu_aux
->reserved_num
;
1716 req
= max(MVPP2_CPU_DESC_CHUNK
, num
- txq_pcpu
->reserved_num
);
1720 (txq
->size
- (MVPP2_MAX_THREADS
* MVPP2_CPU_DESC_CHUNK
)))
1723 txq_pcpu
->reserved_num
+= mvpp2_txq_alloc_reserved_desc(port
, txq
, req
);
1725 /* OK, the descriptor could have been updated: check again. */
1726 if (txq_pcpu
->reserved_num
< num
)
1731 /* Release the last allocated Tx descriptor. Useful to handle DMA
1732 * mapping failures in the Tx path.
1734 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue
*txq
)
1736 if (txq
->next_desc_to_proc
== 0)
1737 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
1739 txq
->next_desc_to_proc
--;
1742 /* Set Tx descriptors fields relevant for CSUM calculation */
1743 static u32
mvpp2_txq_desc_csum(int l3_offs
, __be16 l3_proto
,
1744 int ip_hdr_len
, int l4_proto
)
1748 /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1749 * G_L4_chk, L4_type required only for checksum calculation
1751 command
= (l3_offs
<< MVPP2_TXD_L3_OFF_SHIFT
);
1752 command
|= (ip_hdr_len
<< MVPP2_TXD_IP_HLEN_SHIFT
);
1753 command
|= MVPP2_TXD_IP_CSUM_DISABLE
;
1755 if (l3_proto
== htons(ETH_P_IP
)) {
1756 command
&= ~MVPP2_TXD_IP_CSUM_DISABLE
; /* enable IPv4 csum */
1757 command
&= ~MVPP2_TXD_L3_IP6
; /* enable IPv4 */
1759 command
|= MVPP2_TXD_L3_IP6
; /* enable IPv6 */
1762 if (l4_proto
== IPPROTO_TCP
) {
1763 command
&= ~MVPP2_TXD_L4_UDP
; /* enable TCP */
1764 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
1765 } else if (l4_proto
== IPPROTO_UDP
) {
1766 command
|= MVPP2_TXD_L4_UDP
; /* enable UDP */
1767 command
&= ~MVPP2_TXD_L4_CSUM_FRAG
; /* generate L4 csum */
1769 command
|= MVPP2_TXD_L4_CSUM_NOT
;
1775 /* Get number of sent descriptors and decrement counter.
1776 * The number of sent descriptors is returned.
1779 * Called only from mvpp2_txq_done(), called from mvpp2_tx()
1780 * (migration disabled) and from the TX completion tasklet (migration
1781 * disabled) so using smp_processor_id() is OK.
1783 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port
*port
,
1784 struct mvpp2_tx_queue
*txq
)
1788 /* Reading status reg resets transmitted descriptor counter */
1789 val
= mvpp2_thread_read_relaxed(port
->priv
,
1790 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
1791 MVPP2_TXQ_SENT_REG(txq
->id
));
1793 return (val
& MVPP2_TRANSMITTED_COUNT_MASK
) >>
1794 MVPP2_TRANSMITTED_COUNT_OFFSET
;
1797 /* Called through on_each_cpu(), so runs on all CPUs, with migration
1798 * disabled, therefore using smp_processor_id() is OK.
1800 static void mvpp2_txq_sent_counter_clear(void *arg
)
1802 struct mvpp2_port
*port
= arg
;
1805 /* If the thread isn't used, don't do anything */
1806 if (smp_processor_id() > port
->priv
->nthreads
)
1809 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
1810 int id
= port
->txqs
[queue
]->id
;
1812 mvpp2_thread_read(port
->priv
,
1813 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()),
1814 MVPP2_TXQ_SENT_REG(id
));
1818 /* Set max sizes for Tx queues */
1819 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port
*port
)
1822 int txq
, tx_port_num
;
1824 mtu
= port
->pkt_size
* 8;
1825 if (mtu
> MVPP2_TXP_MTU_MAX
)
1826 mtu
= MVPP2_TXP_MTU_MAX
;
1828 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
1831 /* Indirect access to registers */
1832 tx_port_num
= mvpp2_egress_port(port
);
1833 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
1836 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
);
1837 val
&= ~MVPP2_TXP_MTU_MAX
;
1839 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_MTU_REG
, val
);
1841 /* TXP token size and all TXQs token size must be larger that MTU */
1842 val
= mvpp2_read(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
);
1843 size
= val
& MVPP2_TXP_TOKEN_SIZE_MAX
;
1846 val
&= ~MVPP2_TXP_TOKEN_SIZE_MAX
;
1848 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_TOKEN_SIZE_REG
, val
);
1851 for (txq
= 0; txq
< port
->ntxqs
; txq
++) {
1852 val
= mvpp2_read(port
->priv
,
1853 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
));
1854 size
= val
& MVPP2_TXQ_TOKEN_SIZE_MAX
;
1858 val
&= ~MVPP2_TXQ_TOKEN_SIZE_MAX
;
1860 mvpp2_write(port
->priv
,
1861 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
),
1867 /* Set the number of packets that will be received before Rx interrupt
1868 * will be generated by HW.
1870 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port
*port
,
1871 struct mvpp2_rx_queue
*rxq
)
1873 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
1875 if (rxq
->pkts_coal
> MVPP2_OCCUPIED_THRESH_MASK
)
1876 rxq
->pkts_coal
= MVPP2_OCCUPIED_THRESH_MASK
;
1878 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
1879 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_THRESH_REG
,
1885 /* For some reason in the LSP this is done on each CPU. Why ? */
1886 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port
*port
,
1887 struct mvpp2_tx_queue
*txq
)
1889 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
1892 if (txq
->done_pkts_coal
> MVPP2_TXQ_THRESH_MASK
)
1893 txq
->done_pkts_coal
= MVPP2_TXQ_THRESH_MASK
;
1895 val
= (txq
->done_pkts_coal
<< MVPP2_TXQ_THRESH_OFFSET
);
1896 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
1897 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_THRESH_REG
, val
);
1902 static u32
mvpp2_usec_to_cycles(u32 usec
, unsigned long clk_hz
)
1904 u64 tmp
= (u64
)clk_hz
* usec
;
1906 do_div(tmp
, USEC_PER_SEC
);
1908 return tmp
> U32_MAX
? U32_MAX
: tmp
;
1911 static u32
mvpp2_cycles_to_usec(u32 cycles
, unsigned long clk_hz
)
1913 u64 tmp
= (u64
)cycles
* USEC_PER_SEC
;
1915 do_div(tmp
, clk_hz
);
1917 return tmp
> U32_MAX
? U32_MAX
: tmp
;
1920 /* Set the time delay in usec before Rx interrupt */
1921 static void mvpp2_rx_time_coal_set(struct mvpp2_port
*port
,
1922 struct mvpp2_rx_queue
*rxq
)
1924 unsigned long freq
= port
->priv
->tclk
;
1925 u32 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
1927 if (val
> MVPP2_MAX_ISR_RX_THRESHOLD
) {
1929 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD
, freq
);
1931 /* re-evaluate to get actual register value */
1932 val
= mvpp2_usec_to_cycles(rxq
->time_coal
, freq
);
1935 mvpp2_write(port
->priv
, MVPP2_ISR_RX_THRESHOLD_REG(rxq
->id
), val
);
1938 static void mvpp2_tx_time_coal_set(struct mvpp2_port
*port
)
1940 unsigned long freq
= port
->priv
->tclk
;
1941 u32 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
1943 if (val
> MVPP2_MAX_ISR_TX_THRESHOLD
) {
1944 port
->tx_time_coal
=
1945 mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD
, freq
);
1947 /* re-evaluate to get actual register value */
1948 val
= mvpp2_usec_to_cycles(port
->tx_time_coal
, freq
);
1951 mvpp2_write(port
->priv
, MVPP2_ISR_TX_THRESHOLD_REG(port
->id
), val
);
1954 /* Free Tx queue skbuffs */
1955 static void mvpp2_txq_bufs_free(struct mvpp2_port
*port
,
1956 struct mvpp2_tx_queue
*txq
,
1957 struct mvpp2_txq_pcpu
*txq_pcpu
, int num
)
1961 for (i
= 0; i
< num
; i
++) {
1962 struct mvpp2_txq_pcpu_buf
*tx_buf
=
1963 txq_pcpu
->buffs
+ txq_pcpu
->txq_get_index
;
1965 if (!IS_TSO_HEADER(txq_pcpu
, tx_buf
->dma
))
1966 dma_unmap_single(port
->dev
->dev
.parent
, tx_buf
->dma
,
1967 tx_buf
->size
, DMA_TO_DEVICE
);
1969 dev_kfree_skb_any(tx_buf
->skb
);
1971 mvpp2_txq_inc_get(txq_pcpu
);
1975 static inline struct mvpp2_rx_queue
*mvpp2_get_rx_queue(struct mvpp2_port
*port
,
1978 int queue
= fls(cause
) - 1;
1980 return port
->rxqs
[queue
];
1983 static inline struct mvpp2_tx_queue
*mvpp2_get_tx_queue(struct mvpp2_port
*port
,
1986 int queue
= fls(cause
) - 1;
1988 return port
->txqs
[queue
];
1991 /* Handle end of transmission */
1992 static void mvpp2_txq_done(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
1993 struct mvpp2_txq_pcpu
*txq_pcpu
)
1995 struct netdev_queue
*nq
= netdev_get_tx_queue(port
->dev
, txq
->log_id
);
1998 if (txq_pcpu
->thread
!= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()))
1999 netdev_err(port
->dev
, "wrong cpu on the end of Tx processing\n");
2001 tx_done
= mvpp2_txq_sent_desc_proc(port
, txq
);
2004 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, tx_done
);
2006 txq_pcpu
->count
-= tx_done
;
2008 if (netif_tx_queue_stopped(nq
))
2009 if (txq_pcpu
->count
<= txq_pcpu
->wake_threshold
)
2010 netif_tx_wake_queue(nq
);
2013 static unsigned int mvpp2_tx_done(struct mvpp2_port
*port
, u32 cause
,
2014 unsigned int thread
)
2016 struct mvpp2_tx_queue
*txq
;
2017 struct mvpp2_txq_pcpu
*txq_pcpu
;
2018 unsigned int tx_todo
= 0;
2021 txq
= mvpp2_get_tx_queue(port
, cause
);
2025 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2027 if (txq_pcpu
->count
) {
2028 mvpp2_txq_done(port
, txq
, txq_pcpu
);
2029 tx_todo
+= txq_pcpu
->count
;
2032 cause
&= ~(1 << txq
->log_id
);
2037 /* Rx/Tx queue initialization/cleanup methods */
2039 /* Allocate and initialize descriptors for aggr TXQ */
2040 static int mvpp2_aggr_txq_init(struct platform_device
*pdev
,
2041 struct mvpp2_tx_queue
*aggr_txq
,
2042 unsigned int thread
, struct mvpp2
*priv
)
2046 /* Allocate memory for TX descriptors */
2047 aggr_txq
->descs
= dma_alloc_coherent(&pdev
->dev
,
2048 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
2049 &aggr_txq
->descs_dma
, GFP_KERNEL
);
2050 if (!aggr_txq
->descs
)
2053 aggr_txq
->last_desc
= MVPP2_AGGR_TXQ_SIZE
- 1;
2055 /* Aggr TXQ no reset WA */
2056 aggr_txq
->next_desc_to_proc
= mvpp2_read(priv
,
2057 MVPP2_AGGR_TXQ_INDEX_REG(thread
));
2059 /* Set Tx descriptors queue starting address indirect
2062 if (priv
->hw_version
== MVPP21
)
2063 txq_dma
= aggr_txq
->descs_dma
;
2065 txq_dma
= aggr_txq
->descs_dma
>>
2066 MVPP22_AGGR_TXQ_DESC_ADDR_OFFS
;
2068 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread
), txq_dma
);
2069 mvpp2_write(priv
, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread
),
2070 MVPP2_AGGR_TXQ_SIZE
);
2075 /* Create a specified Rx queue */
2076 static int mvpp2_rxq_init(struct mvpp2_port
*port
,
2077 struct mvpp2_rx_queue
*rxq
)
2080 unsigned int thread
;
2083 rxq
->size
= port
->rx_ring_size
;
2085 /* Allocate memory for RX descriptors */
2086 rxq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
2087 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2088 &rxq
->descs_dma
, GFP_KERNEL
);
2092 rxq
->last_desc
= rxq
->size
- 1;
2094 /* Zero occupied and non-occupied counters - direct access */
2095 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
2097 /* Set Rx descriptors queue starting address - indirect access */
2098 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2099 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2100 if (port
->priv
->hw_version
== MVPP21
)
2101 rxq_dma
= rxq
->descs_dma
;
2103 rxq_dma
= rxq
->descs_dma
>> MVPP22_DESC_ADDR_OFFS
;
2104 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_ADDR_REG
, rxq_dma
);
2105 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_SIZE_REG
, rxq
->size
);
2106 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_INDEX_REG
, 0);
2110 mvpp2_rxq_offset_set(port
, rxq
->id
, NET_SKB_PAD
);
2112 /* Set coalescing pkts and time */
2113 mvpp2_rx_pkts_coal_set(port
, rxq
);
2114 mvpp2_rx_time_coal_set(port
, rxq
);
2116 /* Add number of descriptors ready for receiving packets */
2117 mvpp2_rxq_status_update(port
, rxq
->id
, 0, rxq
->size
);
2122 /* Push packets received by the RXQ to BM pool */
2123 static void mvpp2_rxq_drop_pkts(struct mvpp2_port
*port
,
2124 struct mvpp2_rx_queue
*rxq
)
2128 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
2132 for (i
= 0; i
< rx_received
; i
++) {
2133 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
2134 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2137 pool
= (status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
2138 MVPP2_RXD_BM_POOL_ID_OFFS
;
2140 mvpp2_bm_pool_put(port
, pool
,
2141 mvpp2_rxdesc_dma_addr_get(port
, rx_desc
),
2142 mvpp2_rxdesc_cookie_get(port
, rx_desc
));
2144 mvpp2_rxq_status_update(port
, rxq
->id
, rx_received
, rx_received
);
2147 /* Cleanup Rx queue */
2148 static void mvpp2_rxq_deinit(struct mvpp2_port
*port
,
2149 struct mvpp2_rx_queue
*rxq
)
2151 unsigned int thread
;
2153 mvpp2_rxq_drop_pkts(port
, rxq
);
2156 dma_free_coherent(port
->dev
->dev
.parent
,
2157 rxq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2163 rxq
->next_desc_to_proc
= 0;
2166 /* Clear Rx descriptors queue starting address and size;
2167 * free descriptor number
2169 mvpp2_write(port
->priv
, MVPP2_RXQ_STATUS_REG(rxq
->id
), 0);
2170 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2171 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_NUM_REG
, rxq
->id
);
2172 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_ADDR_REG
, 0);
2173 mvpp2_thread_write(port
->priv
, thread
, MVPP2_RXQ_DESC_SIZE_REG
, 0);
2177 /* Create and initialize a Tx queue */
2178 static int mvpp2_txq_init(struct mvpp2_port
*port
,
2179 struct mvpp2_tx_queue
*txq
)
2182 unsigned int thread
;
2183 int desc
, desc_per_txq
, tx_port_num
;
2184 struct mvpp2_txq_pcpu
*txq_pcpu
;
2186 txq
->size
= port
->tx_ring_size
;
2188 /* Allocate memory for Tx descriptors */
2189 txq
->descs
= dma_alloc_coherent(port
->dev
->dev
.parent
,
2190 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2191 &txq
->descs_dma
, GFP_KERNEL
);
2195 txq
->last_desc
= txq
->size
- 1;
2197 /* Set Tx descriptors queue starting address - indirect access */
2198 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2199 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2200 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_ADDR_REG
,
2202 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_SIZE_REG
,
2203 txq
->size
& MVPP2_TXQ_DESC_SIZE_MASK
);
2204 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_INDEX_REG
, 0);
2205 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_RSVD_CLR_REG
,
2206 txq
->id
<< MVPP2_TXQ_RSVD_CLR_OFFSET
);
2207 val
= mvpp2_thread_read(port
->priv
, thread
, MVPP2_TXQ_PENDING_REG
);
2208 val
&= ~MVPP2_TXQ_PENDING_MASK
;
2209 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PENDING_REG
, val
);
2211 /* Calculate base address in prefetch buffer. We reserve 16 descriptors
2212 * for each existing TXQ.
2213 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2214 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2217 desc
= (port
->id
* MVPP2_MAX_TXQ
* desc_per_txq
) +
2218 (txq
->log_id
* desc_per_txq
);
2220 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
,
2221 MVPP2_PREF_BUF_PTR(desc
) | MVPP2_PREF_BUF_SIZE_16
|
2222 MVPP2_PREF_BUF_THRESH(desc_per_txq
/ 2));
2225 /* WRR / EJP configuration - indirect access */
2226 tx_port_num
= mvpp2_egress_port(port
);
2227 mvpp2_write(port
->priv
, MVPP2_TXP_SCHED_PORT_INDEX_REG
, tx_port_num
);
2229 val
= mvpp2_read(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
));
2230 val
&= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK
;
2231 val
|= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2232 val
|= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK
;
2233 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_REFILL_REG(txq
->log_id
), val
);
2235 val
= MVPP2_TXQ_TOKEN_SIZE_MAX
;
2236 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq
->log_id
),
2239 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
2240 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2241 txq_pcpu
->size
= txq
->size
;
2242 txq_pcpu
->buffs
= kmalloc_array(txq_pcpu
->size
,
2243 sizeof(*txq_pcpu
->buffs
),
2245 if (!txq_pcpu
->buffs
)
2248 txq_pcpu
->count
= 0;
2249 txq_pcpu
->reserved_num
= 0;
2250 txq_pcpu
->txq_put_index
= 0;
2251 txq_pcpu
->txq_get_index
= 0;
2252 txq_pcpu
->tso_headers
= NULL
;
2254 txq_pcpu
->stop_threshold
= txq
->size
- MVPP2_MAX_SKB_DESCS
;
2255 txq_pcpu
->wake_threshold
= txq_pcpu
->stop_threshold
/ 2;
2257 txq_pcpu
->tso_headers
=
2258 dma_alloc_coherent(port
->dev
->dev
.parent
,
2259 txq_pcpu
->size
* TSO_HEADER_SIZE
,
2260 &txq_pcpu
->tso_headers_dma
,
2262 if (!txq_pcpu
->tso_headers
)
2269 /* Free allocated TXQ resources */
2270 static void mvpp2_txq_deinit(struct mvpp2_port
*port
,
2271 struct mvpp2_tx_queue
*txq
)
2273 struct mvpp2_txq_pcpu
*txq_pcpu
;
2274 unsigned int thread
;
2276 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
2277 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2278 kfree(txq_pcpu
->buffs
);
2280 if (txq_pcpu
->tso_headers
)
2281 dma_free_coherent(port
->dev
->dev
.parent
,
2282 txq_pcpu
->size
* TSO_HEADER_SIZE
,
2283 txq_pcpu
->tso_headers
,
2284 txq_pcpu
->tso_headers_dma
);
2286 txq_pcpu
->tso_headers
= NULL
;
2290 dma_free_coherent(port
->dev
->dev
.parent
,
2291 txq
->size
* MVPP2_DESC_ALIGNED_SIZE
,
2292 txq
->descs
, txq
->descs_dma
);
2296 txq
->next_desc_to_proc
= 0;
2299 /* Set minimum bandwidth for disabled TXQs */
2300 mvpp2_write(port
->priv
, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq
->id
), 0);
2302 /* Set Tx descriptors queue starting address and size */
2303 thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2304 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2305 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_ADDR_REG
, 0);
2306 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_DESC_SIZE_REG
, 0);
2310 /* Cleanup Tx ports */
2311 static void mvpp2_txq_clean(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
)
2313 struct mvpp2_txq_pcpu
*txq_pcpu
;
2315 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, get_cpu());
2318 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_NUM_REG
, txq
->id
);
2319 val
= mvpp2_thread_read(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
);
2320 val
|= MVPP2_TXQ_DRAIN_EN_MASK
;
2321 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
, val
);
2323 /* The napi queue has been stopped so wait for all packets
2324 * to be transmitted.
2328 if (delay
>= MVPP2_TX_PENDING_TIMEOUT_MSEC
) {
2329 netdev_warn(port
->dev
,
2330 "port %d: cleaning queue %d timed out\n",
2331 port
->id
, txq
->log_id
);
2337 pending
= mvpp2_thread_read(port
->priv
, thread
,
2338 MVPP2_TXQ_PENDING_REG
);
2339 pending
&= MVPP2_TXQ_PENDING_MASK
;
2342 val
&= ~MVPP2_TXQ_DRAIN_EN_MASK
;
2343 mvpp2_thread_write(port
->priv
, thread
, MVPP2_TXQ_PREF_BUF_REG
, val
);
2346 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
2347 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2349 /* Release all packets */
2350 mvpp2_txq_bufs_free(port
, txq
, txq_pcpu
, txq_pcpu
->count
);
2353 txq_pcpu
->count
= 0;
2354 txq_pcpu
->txq_put_index
= 0;
2355 txq_pcpu
->txq_get_index
= 0;
2359 /* Cleanup all Tx queues */
2360 static void mvpp2_cleanup_txqs(struct mvpp2_port
*port
)
2362 struct mvpp2_tx_queue
*txq
;
2366 val
= mvpp2_read(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
);
2368 /* Reset Tx ports and delete Tx queues */
2369 val
|= MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
2370 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
2372 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2373 txq
= port
->txqs
[queue
];
2374 mvpp2_txq_clean(port
, txq
);
2375 mvpp2_txq_deinit(port
, txq
);
2378 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
2380 val
&= ~MVPP2_TX_PORT_FLUSH_MASK(port
->id
);
2381 mvpp2_write(port
->priv
, MVPP2_TX_PORT_FLUSH_REG
, val
);
2384 /* Cleanup all Rx queues */
2385 static void mvpp2_cleanup_rxqs(struct mvpp2_port
*port
)
2389 for (queue
= 0; queue
< port
->nrxqs
; queue
++)
2390 mvpp2_rxq_deinit(port
, port
->rxqs
[queue
]);
2393 /* Init all Rx queues for port */
2394 static int mvpp2_setup_rxqs(struct mvpp2_port
*port
)
2398 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
2399 err
= mvpp2_rxq_init(port
, port
->rxqs
[queue
]);
2406 mvpp2_cleanup_rxqs(port
);
2410 /* Init all tx queues for port */
2411 static int mvpp2_setup_txqs(struct mvpp2_port
*port
)
2413 struct mvpp2_tx_queue
*txq
;
2414 int queue
, err
, cpu
;
2416 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2417 txq
= port
->txqs
[queue
];
2418 err
= mvpp2_txq_init(port
, txq
);
2422 /* Assign this queue to a CPU */
2423 cpu
= queue
% num_present_cpus();
2424 netif_set_xps_queue(port
->dev
, cpumask_of(cpu
), queue
);
2427 if (port
->has_tx_irqs
) {
2428 mvpp2_tx_time_coal_set(port
);
2429 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
2430 txq
= port
->txqs
[queue
];
2431 mvpp2_tx_pkts_coal_set(port
, txq
);
2435 on_each_cpu(mvpp2_txq_sent_counter_clear
, port
, 1);
2439 mvpp2_cleanup_txqs(port
);
2443 /* The callback for per-port interrupt */
2444 static irqreturn_t
mvpp2_isr(int irq
, void *dev_id
)
2446 struct mvpp2_queue_vector
*qv
= dev_id
;
2448 mvpp2_qvec_interrupt_disable(qv
);
2450 napi_schedule(&qv
->napi
);
2455 /* Per-port interrupt for link status changes */
2456 static irqreturn_t
mvpp2_link_status_isr(int irq
, void *dev_id
)
2458 struct mvpp2_port
*port
= (struct mvpp2_port
*)dev_id
;
2459 struct net_device
*dev
= port
->dev
;
2460 bool event
= false, link
= false;
2463 mvpp22_gop_mask_irq(port
);
2465 if (port
->gop_id
== 0 &&
2466 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
) {
2467 val
= readl(port
->base
+ MVPP22_XLG_INT_STAT
);
2468 if (val
& MVPP22_XLG_INT_STAT_LINK
) {
2470 val
= readl(port
->base
+ MVPP22_XLG_STATUS
);
2471 if (val
& MVPP22_XLG_STATUS_LINK_UP
)
2474 } else if (phy_interface_mode_is_rgmii(port
->phy_interface
) ||
2475 port
->phy_interface
== PHY_INTERFACE_MODE_SGMII
||
2476 port
->phy_interface
== PHY_INTERFACE_MODE_1000BASEX
||
2477 port
->phy_interface
== PHY_INTERFACE_MODE_2500BASEX
) {
2478 val
= readl(port
->base
+ MVPP22_GMAC_INT_STAT
);
2479 if (val
& MVPP22_GMAC_INT_STAT_LINK
) {
2481 val
= readl(port
->base
+ MVPP2_GMAC_STATUS0
);
2482 if (val
& MVPP2_GMAC_STATUS0_LINK_UP
)
2487 if (port
->phylink
) {
2488 phylink_mac_change(port
->phylink
, link
);
2492 if (!netif_running(dev
) || !event
)
2496 mvpp2_interrupts_enable(port
);
2498 mvpp2_egress_enable(port
);
2499 mvpp2_ingress_enable(port
);
2500 netif_carrier_on(dev
);
2501 netif_tx_wake_all_queues(dev
);
2503 netif_tx_stop_all_queues(dev
);
2504 netif_carrier_off(dev
);
2505 mvpp2_ingress_disable(port
);
2506 mvpp2_egress_disable(port
);
2508 mvpp2_interrupts_disable(port
);
2512 mvpp22_gop_unmask_irq(port
);
2516 static void mvpp2_timer_set(struct mvpp2_port_pcpu
*port_pcpu
)
2520 if (!port_pcpu
->timer_scheduled
) {
2521 port_pcpu
->timer_scheduled
= true;
2522 interval
= MVPP2_TXDONE_HRTIMER_PERIOD_NS
;
2523 hrtimer_start(&port_pcpu
->tx_done_timer
, interval
,
2524 HRTIMER_MODE_REL_PINNED
);
2528 static void mvpp2_tx_proc_cb(unsigned long data
)
2530 struct net_device
*dev
= (struct net_device
*)data
;
2531 struct mvpp2_port
*port
= netdev_priv(dev
);
2532 struct mvpp2_port_pcpu
*port_pcpu
;
2533 unsigned int tx_todo
, cause
;
2535 port_pcpu
= per_cpu_ptr(port
->pcpu
,
2536 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()));
2538 if (!netif_running(dev
))
2540 port_pcpu
->timer_scheduled
= false;
2542 /* Process all the Tx queues */
2543 cause
= (1 << port
->ntxqs
) - 1;
2544 tx_todo
= mvpp2_tx_done(port
, cause
,
2545 mvpp2_cpu_to_thread(port
->priv
, smp_processor_id()));
2547 /* Set the timer in case not all the packets were processed */
2549 mvpp2_timer_set(port_pcpu
);
2552 static enum hrtimer_restart
mvpp2_hr_timer_cb(struct hrtimer
*timer
)
2554 struct mvpp2_port_pcpu
*port_pcpu
= container_of(timer
,
2555 struct mvpp2_port_pcpu
,
2558 tasklet_schedule(&port_pcpu
->tx_done_tasklet
);
2560 return HRTIMER_NORESTART
;
2563 /* Main RX/TX processing routines */
2565 /* Display more error info */
2566 static void mvpp2_rx_error(struct mvpp2_port
*port
,
2567 struct mvpp2_rx_desc
*rx_desc
)
2569 u32 status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2570 size_t sz
= mvpp2_rxdesc_size_get(port
, rx_desc
);
2571 char *err_str
= NULL
;
2573 switch (status
& MVPP2_RXD_ERR_CODE_MASK
) {
2574 case MVPP2_RXD_ERR_CRC
:
2577 case MVPP2_RXD_ERR_OVERRUN
:
2578 err_str
= "overrun";
2580 case MVPP2_RXD_ERR_RESOURCE
:
2581 err_str
= "resource";
2584 if (err_str
&& net_ratelimit())
2585 netdev_err(port
->dev
,
2586 "bad rx status %08x (%s error), size=%zu\n",
2587 status
, err_str
, sz
);
2590 /* Handle RX checksum offload */
2591 static void mvpp2_rx_csum(struct mvpp2_port
*port
, u32 status
,
2592 struct sk_buff
*skb
)
2594 if (((status
& MVPP2_RXD_L3_IP4
) &&
2595 !(status
& MVPP2_RXD_IP4_HEADER_ERR
)) ||
2596 (status
& MVPP2_RXD_L3_IP6
))
2597 if (((status
& MVPP2_RXD_L4_UDP
) ||
2598 (status
& MVPP2_RXD_L4_TCP
)) &&
2599 (status
& MVPP2_RXD_L4_CSUM_OK
)) {
2601 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2605 skb
->ip_summed
= CHECKSUM_NONE
;
2608 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
2609 static int mvpp2_rx_refill(struct mvpp2_port
*port
,
2610 struct mvpp2_bm_pool
*bm_pool
, int pool
)
2612 dma_addr_t dma_addr
;
2613 phys_addr_t phys_addr
;
2616 /* No recycle or too many buffers are in use, so allocate a new skb */
2617 buf
= mvpp2_buf_alloc(port
, bm_pool
, &dma_addr
, &phys_addr
,
2622 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
2627 /* Handle tx checksum */
2628 static u32
mvpp2_skb_tx_csum(struct mvpp2_port
*port
, struct sk_buff
*skb
)
2630 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2633 __be16 l3_proto
= vlan_get_protocol(skb
);
2635 if (l3_proto
== htons(ETH_P_IP
)) {
2636 struct iphdr
*ip4h
= ip_hdr(skb
);
2638 /* Calculate IPv4 checksum and L4 checksum */
2639 ip_hdr_len
= ip4h
->ihl
;
2640 l4_proto
= ip4h
->protocol
;
2641 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
2642 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
2644 /* Read l4_protocol from one of IPv6 extra headers */
2645 if (skb_network_header_len(skb
) > 0)
2646 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
2647 l4_proto
= ip6h
->nexthdr
;
2649 return MVPP2_TXD_L4_CSUM_NOT
;
2652 return mvpp2_txq_desc_csum(skb_network_offset(skb
),
2653 l3_proto
, ip_hdr_len
, l4_proto
);
2656 return MVPP2_TXD_L4_CSUM_NOT
| MVPP2_TXD_IP_CSUM_DISABLE
;
2659 /* Main rx processing */
2660 static int mvpp2_rx(struct mvpp2_port
*port
, struct napi_struct
*napi
,
2661 int rx_todo
, struct mvpp2_rx_queue
*rxq
)
2663 struct net_device
*dev
= port
->dev
;
2669 /* Get number of received packets and clamp the to-do */
2670 rx_received
= mvpp2_rxq_received(port
, rxq
->id
);
2671 if (rx_todo
> rx_received
)
2672 rx_todo
= rx_received
;
2674 while (rx_done
< rx_todo
) {
2675 struct mvpp2_rx_desc
*rx_desc
= mvpp2_rxq_next_desc_get(rxq
);
2676 struct mvpp2_bm_pool
*bm_pool
;
2677 struct sk_buff
*skb
;
2678 unsigned int frag_size
;
2679 dma_addr_t dma_addr
;
2680 phys_addr_t phys_addr
;
2682 int pool
, rx_bytes
, err
;
2686 rx_status
= mvpp2_rxdesc_status_get(port
, rx_desc
);
2687 rx_bytes
= mvpp2_rxdesc_size_get(port
, rx_desc
);
2688 rx_bytes
-= MVPP2_MH_SIZE
;
2689 dma_addr
= mvpp2_rxdesc_dma_addr_get(port
, rx_desc
);
2690 phys_addr
= mvpp2_rxdesc_cookie_get(port
, rx_desc
);
2691 data
= (void *)phys_to_virt(phys_addr
);
2693 pool
= (rx_status
& MVPP2_RXD_BM_POOL_ID_MASK
) >>
2694 MVPP2_RXD_BM_POOL_ID_OFFS
;
2695 bm_pool
= &port
->priv
->bm_pools
[pool
];
2697 /* In case of an error, release the requested buffer pointer
2698 * to the Buffer Manager. This request process is controlled
2699 * by the hardware, and the information about the buffer is
2700 * comprised by the RX descriptor.
2702 if (rx_status
& MVPP2_RXD_ERR_SUMMARY
) {
2704 dev
->stats
.rx_errors
++;
2705 mvpp2_rx_error(port
, rx_desc
);
2706 /* Return the buffer to the pool */
2707 mvpp2_bm_pool_put(port
, pool
, dma_addr
, phys_addr
);
2711 if (bm_pool
->frag_size
> PAGE_SIZE
)
2714 frag_size
= bm_pool
->frag_size
;
2716 skb
= build_skb(data
, frag_size
);
2718 netdev_warn(port
->dev
, "skb build failed\n");
2719 goto err_drop_frame
;
2722 err
= mvpp2_rx_refill(port
, bm_pool
, pool
);
2724 netdev_err(port
->dev
, "failed to refill BM pools\n");
2725 goto err_drop_frame
;
2728 dma_unmap_single(dev
->dev
.parent
, dma_addr
,
2729 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
2732 rcvd_bytes
+= rx_bytes
;
2734 skb_reserve(skb
, MVPP2_MH_SIZE
+ NET_SKB_PAD
);
2735 skb_put(skb
, rx_bytes
);
2736 skb
->protocol
= eth_type_trans(skb
, dev
);
2737 mvpp2_rx_csum(port
, rx_status
, skb
);
2739 napi_gro_receive(napi
, skb
);
2743 struct mvpp2_pcpu_stats
*stats
= this_cpu_ptr(port
->stats
);
2745 u64_stats_update_begin(&stats
->syncp
);
2746 stats
->rx_packets
+= rcvd_pkts
;
2747 stats
->rx_bytes
+= rcvd_bytes
;
2748 u64_stats_update_end(&stats
->syncp
);
2751 /* Update Rx queue management counters */
2753 mvpp2_rxq_status_update(port
, rxq
->id
, rx_done
, rx_done
);
2759 tx_desc_unmap_put(struct mvpp2_port
*port
, struct mvpp2_tx_queue
*txq
,
2760 struct mvpp2_tx_desc
*desc
)
2762 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
2763 struct mvpp2_txq_pcpu
*txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2765 dma_addr_t buf_dma_addr
=
2766 mvpp2_txdesc_dma_addr_get(port
, desc
);
2768 mvpp2_txdesc_size_get(port
, desc
);
2769 if (!IS_TSO_HEADER(txq_pcpu
, buf_dma_addr
))
2770 dma_unmap_single(port
->dev
->dev
.parent
, buf_dma_addr
,
2771 buf_sz
, DMA_TO_DEVICE
);
2772 mvpp2_txq_desc_put(txq
);
2775 /* Handle tx fragmentation processing */
2776 static int mvpp2_tx_frag_process(struct mvpp2_port
*port
, struct sk_buff
*skb
,
2777 struct mvpp2_tx_queue
*aggr_txq
,
2778 struct mvpp2_tx_queue
*txq
)
2780 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
2781 struct mvpp2_txq_pcpu
*txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2782 struct mvpp2_tx_desc
*tx_desc
;
2784 dma_addr_t buf_dma_addr
;
2786 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2787 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2788 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
2790 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2791 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2792 mvpp2_txdesc_size_set(port
, tx_desc
, frag
->size
);
2794 buf_dma_addr
= dma_map_single(port
->dev
->dev
.parent
, addr
,
2795 frag
->size
, DMA_TO_DEVICE
);
2796 if (dma_mapping_error(port
->dev
->dev
.parent
, buf_dma_addr
)) {
2797 mvpp2_txq_desc_put(txq
);
2801 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
2803 if (i
== (skb_shinfo(skb
)->nr_frags
- 1)) {
2804 /* Last descriptor */
2805 mvpp2_txdesc_cmd_set(port
, tx_desc
,
2807 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
2809 /* Descriptor in the middle: Not First, Not Last */
2810 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
2811 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2817 /* Release all descriptors that were used to map fragments of
2818 * this packet, as well as the corresponding DMA mappings
2820 for (i
= i
- 1; i
>= 0; i
--) {
2821 tx_desc
= txq
->descs
+ i
;
2822 tx_desc_unmap_put(port
, txq
, tx_desc
);
2828 static inline void mvpp2_tso_put_hdr(struct sk_buff
*skb
,
2829 struct net_device
*dev
,
2830 struct mvpp2_tx_queue
*txq
,
2831 struct mvpp2_tx_queue
*aggr_txq
,
2832 struct mvpp2_txq_pcpu
*txq_pcpu
,
2835 struct mvpp2_port
*port
= netdev_priv(dev
);
2836 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2839 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2840 mvpp2_txdesc_size_set(port
, tx_desc
, hdr_sz
);
2842 addr
= txq_pcpu
->tso_headers_dma
+
2843 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
2844 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, addr
);
2846 mvpp2_txdesc_cmd_set(port
, tx_desc
, mvpp2_skb_tx_csum(port
, skb
) |
2848 MVPP2_TXD_PADDING_DISABLE
);
2849 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2852 static inline int mvpp2_tso_put_data(struct sk_buff
*skb
,
2853 struct net_device
*dev
, struct tso_t
*tso
,
2854 struct mvpp2_tx_queue
*txq
,
2855 struct mvpp2_tx_queue
*aggr_txq
,
2856 struct mvpp2_txq_pcpu
*txq_pcpu
,
2857 int sz
, bool left
, bool last
)
2859 struct mvpp2_port
*port
= netdev_priv(dev
);
2860 struct mvpp2_tx_desc
*tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2861 dma_addr_t buf_dma_addr
;
2863 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2864 mvpp2_txdesc_size_set(port
, tx_desc
, sz
);
2866 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, tso
->data
, sz
,
2868 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
2869 mvpp2_txq_desc_put(txq
);
2873 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
2876 mvpp2_txdesc_cmd_set(port
, tx_desc
, MVPP2_TXD_L_DESC
);
2878 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
2882 mvpp2_txdesc_cmd_set(port
, tx_desc
, 0);
2885 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
2889 static int mvpp2_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
2890 struct mvpp2_tx_queue
*txq
,
2891 struct mvpp2_tx_queue
*aggr_txq
,
2892 struct mvpp2_txq_pcpu
*txq_pcpu
)
2894 struct mvpp2_port
*port
= netdev_priv(dev
);
2896 int hdr_sz
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2897 int i
, len
, descs
= 0;
2899 /* Check number of available descriptors */
2900 if (mvpp2_aggr_desc_num_check(port
, aggr_txq
, tso_count_descs(skb
)) ||
2901 mvpp2_txq_reserved_desc_num_proc(port
, txq
, txq_pcpu
,
2902 tso_count_descs(skb
)))
2905 tso_start(skb
, &tso
);
2906 len
= skb
->len
- hdr_sz
;
2908 int left
= min_t(int, skb_shinfo(skb
)->gso_size
, len
);
2909 char *hdr
= txq_pcpu
->tso_headers
+
2910 txq_pcpu
->txq_put_index
* TSO_HEADER_SIZE
;
2915 tso_build_hdr(skb
, hdr
, &tso
, left
, len
== 0);
2916 mvpp2_tso_put_hdr(skb
, dev
, txq
, aggr_txq
, txq_pcpu
, hdr_sz
);
2919 int sz
= min_t(int, tso
.size
, left
);
2923 if (mvpp2_tso_put_data(skb
, dev
, &tso
, txq
, aggr_txq
,
2924 txq_pcpu
, sz
, left
, len
== 0))
2926 tso_build_data(skb
, &tso
, sz
);
2933 for (i
= descs
- 1; i
>= 0; i
--) {
2934 struct mvpp2_tx_desc
*tx_desc
= txq
->descs
+ i
;
2935 tx_desc_unmap_put(port
, txq
, tx_desc
);
2940 /* Main tx processing */
2941 static netdev_tx_t
mvpp2_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2943 struct mvpp2_port
*port
= netdev_priv(dev
);
2944 struct mvpp2_tx_queue
*txq
, *aggr_txq
;
2945 struct mvpp2_txq_pcpu
*txq_pcpu
;
2946 struct mvpp2_tx_desc
*tx_desc
;
2947 dma_addr_t buf_dma_addr
;
2948 unsigned long flags
= 0;
2949 unsigned int thread
;
2954 thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
2956 txq_id
= skb_get_queue_mapping(skb
);
2957 txq
= port
->txqs
[txq_id
];
2958 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
2959 aggr_txq
= &port
->priv
->aggr_txqs
[thread
];
2961 if (test_bit(thread
, &port
->priv
->lock_map
))
2962 spin_lock_irqsave(&port
->tx_lock
[thread
], flags
);
2964 if (skb_is_gso(skb
)) {
2965 frags
= mvpp2_tx_tso(skb
, dev
, txq
, aggr_txq
, txq_pcpu
);
2968 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2970 /* Check number of available descriptors */
2971 if (mvpp2_aggr_desc_num_check(port
, aggr_txq
, frags
) ||
2972 mvpp2_txq_reserved_desc_num_proc(port
, txq
, txq_pcpu
, frags
)) {
2977 /* Get a descriptor for the first part of the packet */
2978 tx_desc
= mvpp2_txq_next_desc_get(aggr_txq
);
2979 mvpp2_txdesc_txq_set(port
, tx_desc
, txq
->id
);
2980 mvpp2_txdesc_size_set(port
, tx_desc
, skb_headlen(skb
));
2982 buf_dma_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
2983 skb_headlen(skb
), DMA_TO_DEVICE
);
2984 if (unlikely(dma_mapping_error(dev
->dev
.parent
, buf_dma_addr
))) {
2985 mvpp2_txq_desc_put(txq
);
2990 mvpp2_txdesc_dma_addr_set(port
, tx_desc
, buf_dma_addr
);
2992 tx_cmd
= mvpp2_skb_tx_csum(port
, skb
);
2995 /* First and Last descriptor */
2996 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_L_DESC
;
2997 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
2998 mvpp2_txq_inc_put(port
, txq_pcpu
, skb
, tx_desc
);
3000 /* First but not Last */
3001 tx_cmd
|= MVPP2_TXD_F_DESC
| MVPP2_TXD_PADDING_DISABLE
;
3002 mvpp2_txdesc_cmd_set(port
, tx_desc
, tx_cmd
);
3003 mvpp2_txq_inc_put(port
, txq_pcpu
, NULL
, tx_desc
);
3005 /* Continue with other skb fragments */
3006 if (mvpp2_tx_frag_process(port
, skb
, aggr_txq
, txq
)) {
3007 tx_desc_unmap_put(port
, txq
, tx_desc
);
3014 struct mvpp2_pcpu_stats
*stats
= per_cpu_ptr(port
->stats
, thread
);
3015 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
3017 txq_pcpu
->reserved_num
-= frags
;
3018 txq_pcpu
->count
+= frags
;
3019 aggr_txq
->count
+= frags
;
3021 /* Enable transmit */
3023 mvpp2_aggr_txq_pend_desc_add(port
, frags
);
3025 if (txq_pcpu
->count
>= txq_pcpu
->stop_threshold
)
3026 netif_tx_stop_queue(nq
);
3028 u64_stats_update_begin(&stats
->syncp
);
3029 stats
->tx_packets
++;
3030 stats
->tx_bytes
+= skb
->len
;
3031 u64_stats_update_end(&stats
->syncp
);
3033 dev
->stats
.tx_dropped
++;
3034 dev_kfree_skb_any(skb
);
3037 /* Finalize TX processing */
3038 if (!port
->has_tx_irqs
&& txq_pcpu
->count
>= txq
->done_pkts_coal
)
3039 mvpp2_txq_done(port
, txq
, txq_pcpu
);
3041 /* Set the timer in case not all frags were processed */
3042 if (!port
->has_tx_irqs
&& txq_pcpu
->count
<= frags
&&
3043 txq_pcpu
->count
> 0) {
3044 struct mvpp2_port_pcpu
*port_pcpu
= per_cpu_ptr(port
->pcpu
, thread
);
3046 mvpp2_timer_set(port_pcpu
);
3049 if (test_bit(thread
, &port
->priv
->lock_map
))
3050 spin_unlock_irqrestore(&port
->tx_lock
[thread
], flags
);
3052 return NETDEV_TX_OK
;
3055 static inline void mvpp2_cause_error(struct net_device
*dev
, int cause
)
3057 if (cause
& MVPP2_CAUSE_FCS_ERR_MASK
)
3058 netdev_err(dev
, "FCS error\n");
3059 if (cause
& MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK
)
3060 netdev_err(dev
, "rx fifo overrun error\n");
3061 if (cause
& MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK
)
3062 netdev_err(dev
, "tx fifo underrun error\n");
3065 static int mvpp2_poll(struct napi_struct
*napi
, int budget
)
3067 u32 cause_rx_tx
, cause_rx
, cause_tx
, cause_misc
;
3069 struct mvpp2_port
*port
= netdev_priv(napi
->dev
);
3070 struct mvpp2_queue_vector
*qv
;
3071 unsigned int thread
= mvpp2_cpu_to_thread(port
->priv
, smp_processor_id());
3073 qv
= container_of(napi
, struct mvpp2_queue_vector
, napi
);
3075 /* Rx/Tx cause register
3077 * Bits 0-15: each bit indicates received packets on the Rx queue
3078 * (bit 0 is for Rx queue 0).
3080 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
3081 * (bit 16 is for Tx queue 0).
3083 * Each CPU has its own Rx/Tx cause register
3085 cause_rx_tx
= mvpp2_thread_read_relaxed(port
->priv
, qv
->sw_thread_id
,
3086 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
));
3088 cause_misc
= cause_rx_tx
& MVPP2_CAUSE_MISC_SUM_MASK
;
3090 mvpp2_cause_error(port
->dev
, cause_misc
);
3092 /* Clear the cause register */
3093 mvpp2_write(port
->priv
, MVPP2_ISR_MISC_CAUSE_REG
, 0);
3094 mvpp2_thread_write(port
->priv
, thread
,
3095 MVPP2_ISR_RX_TX_CAUSE_REG(port
->id
),
3096 cause_rx_tx
& ~MVPP2_CAUSE_MISC_SUM_MASK
);
3099 if (port
->has_tx_irqs
) {
3100 cause_tx
= cause_rx_tx
& MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK
;
3102 cause_tx
>>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET
;
3103 mvpp2_tx_done(port
, cause_tx
, qv
->sw_thread_id
);
3107 /* Process RX packets */
3108 cause_rx
= cause_rx_tx
&
3109 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port
->priv
->hw_version
);
3110 cause_rx
<<= qv
->first_rxq
;
3111 cause_rx
|= qv
->pending_cause_rx
;
3112 while (cause_rx
&& budget
> 0) {
3114 struct mvpp2_rx_queue
*rxq
;
3116 rxq
= mvpp2_get_rx_queue(port
, cause_rx
);
3120 count
= mvpp2_rx(port
, napi
, budget
, rxq
);
3124 /* Clear the bit associated to this Rx queue
3125 * so that next iteration will continue from
3126 * the next Rx queue.
3128 cause_rx
&= ~(1 << rxq
->logic_rxq
);
3134 napi_complete_done(napi
, rx_done
);
3136 mvpp2_qvec_interrupt_enable(qv
);
3138 qv
->pending_cause_rx
= cause_rx
;
3142 static void mvpp22_mode_reconfigure(struct mvpp2_port
*port
)
3146 /* comphy reconfiguration */
3147 mvpp22_comphy_init(port
);
3149 /* gop reconfiguration */
3150 mvpp22_gop_init(port
);
3152 /* Only GOP port 0 has an XLG MAC */
3153 if (port
->gop_id
== 0) {
3154 ctrl3
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
3155 ctrl3
&= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
3157 if (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
3158 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
)
3159 ctrl3
|= MVPP22_XLG_CTRL3_MACMODESELECT_10G
;
3161 ctrl3
|= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC
;
3163 writel(ctrl3
, port
->base
+ MVPP22_XLG_CTRL3_REG
);
3166 if (port
->gop_id
== 0 &&
3167 (port
->phy_interface
== PHY_INTERFACE_MODE_XAUI
||
3168 port
->phy_interface
== PHY_INTERFACE_MODE_10GKR
))
3169 mvpp2_xlg_max_rx_size_set(port
);
3171 mvpp2_gmac_max_rx_size_set(port
);
3174 /* Set hw internals when starting port */
3175 static void mvpp2_start_dev(struct mvpp2_port
*port
)
3179 mvpp2_txp_max_tx_size_set(port
);
3181 for (i
= 0; i
< port
->nqvecs
; i
++)
3182 napi_enable(&port
->qvecs
[i
].napi
);
3184 /* Enable interrupts on all threads */
3185 mvpp2_interrupts_enable(port
);
3187 if (port
->priv
->hw_version
== MVPP22
)
3188 mvpp22_mode_reconfigure(port
);
3190 if (port
->phylink
) {
3191 phylink_start(port
->phylink
);
3193 /* Phylink isn't used as of now for ACPI, so the MAC has to be
3194 * configured manually when the interface is started. This will
3195 * be removed as soon as the phylink ACPI support lands in.
3197 struct phylink_link_state state
= {
3198 .interface
= port
->phy_interface
,
3200 mvpp2_mac_config(port
->dev
, MLO_AN_INBAND
, &state
);
3201 mvpp2_mac_link_up(port
->dev
, MLO_AN_INBAND
, port
->phy_interface
,
3205 netif_tx_start_all_queues(port
->dev
);
3208 /* Set hw internals when stopping port */
3209 static void mvpp2_stop_dev(struct mvpp2_port
*port
)
3213 /* Disable interrupts on all threads */
3214 mvpp2_interrupts_disable(port
);
3216 for (i
= 0; i
< port
->nqvecs
; i
++)
3217 napi_disable(&port
->qvecs
[i
].napi
);
3220 phylink_stop(port
->phylink
);
3221 phy_power_off(port
->comphy
);
3224 static int mvpp2_check_ringparam_valid(struct net_device
*dev
,
3225 struct ethtool_ringparam
*ring
)
3227 u16 new_rx_pending
= ring
->rx_pending
;
3228 u16 new_tx_pending
= ring
->tx_pending
;
3230 if (ring
->rx_pending
== 0 || ring
->tx_pending
== 0)
3233 if (ring
->rx_pending
> MVPP2_MAX_RXD_MAX
)
3234 new_rx_pending
= MVPP2_MAX_RXD_MAX
;
3235 else if (!IS_ALIGNED(ring
->rx_pending
, 16))
3236 new_rx_pending
= ALIGN(ring
->rx_pending
, 16);
3238 if (ring
->tx_pending
> MVPP2_MAX_TXD_MAX
)
3239 new_tx_pending
= MVPP2_MAX_TXD_MAX
;
3240 else if (!IS_ALIGNED(ring
->tx_pending
, 32))
3241 new_tx_pending
= ALIGN(ring
->tx_pending
, 32);
3243 /* The Tx ring size cannot be smaller than the minimum number of
3244 * descriptors needed for TSO.
3246 if (new_tx_pending
< MVPP2_MAX_SKB_DESCS
)
3247 new_tx_pending
= ALIGN(MVPP2_MAX_SKB_DESCS
, 32);
3249 if (ring
->rx_pending
!= new_rx_pending
) {
3250 netdev_info(dev
, "illegal Rx ring size value %d, round to %d\n",
3251 ring
->rx_pending
, new_rx_pending
);
3252 ring
->rx_pending
= new_rx_pending
;
3255 if (ring
->tx_pending
!= new_tx_pending
) {
3256 netdev_info(dev
, "illegal Tx ring size value %d, round to %d\n",
3257 ring
->tx_pending
, new_tx_pending
);
3258 ring
->tx_pending
= new_tx_pending
;
3264 static void mvpp21_get_mac_address(struct mvpp2_port
*port
, unsigned char *addr
)
3266 u32 mac_addr_l
, mac_addr_m
, mac_addr_h
;
3268 mac_addr_l
= readl(port
->base
+ MVPP2_GMAC_CTRL_1_REG
);
3269 mac_addr_m
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_MIDDLE
);
3270 mac_addr_h
= readl(port
->priv
->lms_base
+ MVPP2_SRC_ADDR_HIGH
);
3271 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3272 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3273 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3274 addr
[3] = mac_addr_h
& 0xFF;
3275 addr
[4] = mac_addr_m
& 0xFF;
3276 addr
[5] = (mac_addr_l
>> MVPP2_GMAC_SA_LOW_OFFS
) & 0xFF;
3279 static int mvpp2_irqs_init(struct mvpp2_port
*port
)
3283 for (i
= 0; i
< port
->nqvecs
; i
++) {
3284 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3286 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
) {
3287 qv
->mask
= kzalloc(cpumask_size(), GFP_KERNEL
);
3293 irq_set_status_flags(qv
->irq
, IRQ_NO_BALANCING
);
3296 err
= request_irq(qv
->irq
, mvpp2_isr
, 0, port
->dev
->name
, qv
);
3300 if (qv
->type
== MVPP2_QUEUE_VECTOR_PRIVATE
) {
3303 for_each_present_cpu(cpu
) {
3304 if (mvpp2_cpu_to_thread(port
->priv
, cpu
) ==
3306 cpumask_set_cpu(cpu
, qv
->mask
);
3309 irq_set_affinity_hint(qv
->irq
, qv
->mask
);
3315 for (i
= 0; i
< port
->nqvecs
; i
++) {
3316 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3318 irq_set_affinity_hint(qv
->irq
, NULL
);
3321 free_irq(qv
->irq
, qv
);
3327 static void mvpp2_irqs_deinit(struct mvpp2_port
*port
)
3331 for (i
= 0; i
< port
->nqvecs
; i
++) {
3332 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
3334 irq_set_affinity_hint(qv
->irq
, NULL
);
3337 irq_clear_status_flags(qv
->irq
, IRQ_NO_BALANCING
);
3338 free_irq(qv
->irq
, qv
);
3342 static bool mvpp22_rss_is_supported(void)
3344 return queue_mode
== MVPP2_QDIST_MULTI_MODE
;
3347 static int mvpp2_open(struct net_device
*dev
)
3349 struct mvpp2_port
*port
= netdev_priv(dev
);
3350 struct mvpp2
*priv
= port
->priv
;
3351 unsigned char mac_bcast
[ETH_ALEN
] = {
3352 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3356 err
= mvpp2_prs_mac_da_accept(port
, mac_bcast
, true);
3358 netdev_err(dev
, "mvpp2_prs_mac_da_accept BC failed\n");
3361 err
= mvpp2_prs_mac_da_accept(port
, dev
->dev_addr
, true);
3363 netdev_err(dev
, "mvpp2_prs_mac_da_accept own addr failed\n");
3366 err
= mvpp2_prs_tag_mode_set(port
->priv
, port
->id
, MVPP2_TAG_TYPE_MH
);
3368 netdev_err(dev
, "mvpp2_prs_tag_mode_set failed\n");
3371 err
= mvpp2_prs_def_flow(port
);
3373 netdev_err(dev
, "mvpp2_prs_def_flow failed\n");
3377 /* Allocate the Rx/Tx queues */
3378 err
= mvpp2_setup_rxqs(port
);
3380 netdev_err(port
->dev
, "cannot allocate Rx queues\n");
3384 err
= mvpp2_setup_txqs(port
);
3386 netdev_err(port
->dev
, "cannot allocate Tx queues\n");
3387 goto err_cleanup_rxqs
;
3390 err
= mvpp2_irqs_init(port
);
3392 netdev_err(port
->dev
, "cannot init IRQs\n");
3393 goto err_cleanup_txqs
;
3396 /* Phylink isn't supported yet in ACPI mode */
3397 if (port
->of_node
) {
3398 err
= phylink_of_phy_connect(port
->phylink
, port
->of_node
, 0);
3400 netdev_err(port
->dev
, "could not attach PHY (%d)\n",
3408 if (priv
->hw_version
== MVPP22
&& port
->link_irq
&& !port
->phylink
) {
3409 err
= request_irq(port
->link_irq
, mvpp2_link_status_isr
, 0,
3412 netdev_err(port
->dev
, "cannot request link IRQ %d\n",
3417 mvpp22_gop_setup_irq(port
);
3419 /* In default link is down */
3420 netif_carrier_off(port
->dev
);
3428 netdev_err(port
->dev
,
3429 "invalid configuration: no dt or link IRQ");
3433 /* Unmask interrupts on all CPUs */
3434 on_each_cpu(mvpp2_interrupts_unmask
, port
, 1);
3435 mvpp2_shared_interrupt_mask_unmask(port
, false);
3437 mvpp2_start_dev(port
);
3439 /* Start hardware statistics gathering */
3440 queue_delayed_work(priv
->stats_queue
, &port
->stats_work
,
3441 MVPP2_MIB_COUNTERS_STATS_DELAY
);
3446 mvpp2_irqs_deinit(port
);
3448 mvpp2_cleanup_txqs(port
);
3450 mvpp2_cleanup_rxqs(port
);
3454 static int mvpp2_stop(struct net_device
*dev
)
3456 struct mvpp2_port
*port
= netdev_priv(dev
);
3457 struct mvpp2_port_pcpu
*port_pcpu
;
3458 unsigned int thread
;
3460 mvpp2_stop_dev(port
);
3462 /* Mask interrupts on all threads */
3463 on_each_cpu(mvpp2_interrupts_mask
, port
, 1);
3464 mvpp2_shared_interrupt_mask_unmask(port
, true);
3467 phylink_disconnect_phy(port
->phylink
);
3469 free_irq(port
->link_irq
, port
);
3471 mvpp2_irqs_deinit(port
);
3472 if (!port
->has_tx_irqs
) {
3473 for (thread
= 0; thread
< port
->priv
->nthreads
; thread
++) {
3474 port_pcpu
= per_cpu_ptr(port
->pcpu
, thread
);
3476 hrtimer_cancel(&port_pcpu
->tx_done_timer
);
3477 port_pcpu
->timer_scheduled
= false;
3478 tasklet_kill(&port_pcpu
->tx_done_tasklet
);
3481 mvpp2_cleanup_rxqs(port
);
3482 mvpp2_cleanup_txqs(port
);
3484 cancel_delayed_work_sync(&port
->stats_work
);
3489 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port
*port
,
3490 struct netdev_hw_addr_list
*list
)
3492 struct netdev_hw_addr
*ha
;
3495 netdev_hw_addr_list_for_each(ha
, list
) {
3496 ret
= mvpp2_prs_mac_da_accept(port
, ha
->addr
, true);
3504 static void mvpp2_set_rx_promisc(struct mvpp2_port
*port
, bool enable
)
3506 if (!enable
&& (port
->dev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
3507 mvpp2_prs_vid_enable_filtering(port
);
3509 mvpp2_prs_vid_disable_filtering(port
);
3511 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3512 MVPP2_PRS_L2_UNI_CAST
, enable
);
3514 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3515 MVPP2_PRS_L2_MULTI_CAST
, enable
);
3518 static void mvpp2_set_rx_mode(struct net_device
*dev
)
3520 struct mvpp2_port
*port
= netdev_priv(dev
);
3522 /* Clear the whole UC and MC list */
3523 mvpp2_prs_mac_del_all(port
);
3525 if (dev
->flags
& IFF_PROMISC
) {
3526 mvpp2_set_rx_promisc(port
, true);
3530 mvpp2_set_rx_promisc(port
, false);
3532 if (netdev_uc_count(dev
) > MVPP2_PRS_MAC_UC_FILT_MAX
||
3533 mvpp2_prs_mac_da_accept_list(port
, &dev
->uc
))
3534 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3535 MVPP2_PRS_L2_UNI_CAST
, true);
3537 if (dev
->flags
& IFF_ALLMULTI
) {
3538 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3539 MVPP2_PRS_L2_MULTI_CAST
, true);
3543 if (netdev_mc_count(dev
) > MVPP2_PRS_MAC_MC_FILT_MAX
||
3544 mvpp2_prs_mac_da_accept_list(port
, &dev
->mc
))
3545 mvpp2_prs_mac_promisc_set(port
->priv
, port
->id
,
3546 MVPP2_PRS_L2_MULTI_CAST
, true);
3549 static int mvpp2_set_mac_address(struct net_device
*dev
, void *p
)
3551 const struct sockaddr
*addr
= p
;
3554 if (!is_valid_ether_addr(addr
->sa_data
))
3555 return -EADDRNOTAVAIL
;
3557 err
= mvpp2_prs_update_mac_da(dev
, addr
->sa_data
);
3559 /* Reconfigure parser accept the original MAC address */
3560 mvpp2_prs_update_mac_da(dev
, dev
->dev_addr
);
3561 netdev_err(dev
, "failed to change MAC address\n");
3566 static int mvpp2_change_mtu(struct net_device
*dev
, int mtu
)
3568 struct mvpp2_port
*port
= netdev_priv(dev
);
3571 if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu
), 8)) {
3572 netdev_info(dev
, "illegal MTU value %d, round to %d\n", mtu
,
3573 ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8));
3574 mtu
= ALIGN(MVPP2_RX_PKT_SIZE(mtu
), 8);
3577 if (!netif_running(dev
)) {
3578 err
= mvpp2_bm_update_mtu(dev
, mtu
);
3580 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3584 /* Reconfigure BM to the original MTU */
3585 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
3590 mvpp2_stop_dev(port
);
3592 err
= mvpp2_bm_update_mtu(dev
, mtu
);
3594 port
->pkt_size
= MVPP2_RX_PKT_SIZE(mtu
);
3598 /* Reconfigure BM to the original MTU */
3599 err
= mvpp2_bm_update_mtu(dev
, dev
->mtu
);
3604 mvpp2_start_dev(port
);
3605 mvpp2_egress_enable(port
);
3606 mvpp2_ingress_enable(port
);
3610 netdev_err(dev
, "failed to change MTU\n");
3615 mvpp2_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
3617 struct mvpp2_port
*port
= netdev_priv(dev
);
3621 for_each_possible_cpu(cpu
) {
3622 struct mvpp2_pcpu_stats
*cpu_stats
;
3628 cpu_stats
= per_cpu_ptr(port
->stats
, cpu
);
3630 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
3631 rx_packets
= cpu_stats
->rx_packets
;
3632 rx_bytes
= cpu_stats
->rx_bytes
;
3633 tx_packets
= cpu_stats
->tx_packets
;
3634 tx_bytes
= cpu_stats
->tx_bytes
;
3635 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
3637 stats
->rx_packets
+= rx_packets
;
3638 stats
->rx_bytes
+= rx_bytes
;
3639 stats
->tx_packets
+= tx_packets
;
3640 stats
->tx_bytes
+= tx_bytes
;
3643 stats
->rx_errors
= dev
->stats
.rx_errors
;
3644 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
3645 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
3648 static int mvpp2_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3650 struct mvpp2_port
*port
= netdev_priv(dev
);
3655 return phylink_mii_ioctl(port
->phylink
, ifr
, cmd
);
3658 static int mvpp2_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3660 struct mvpp2_port
*port
= netdev_priv(dev
);
3663 ret
= mvpp2_prs_vid_entry_add(port
, vid
);
3665 netdev_err(dev
, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
3666 MVPP2_PRS_VLAN_FILT_MAX
- 1);
3670 static int mvpp2_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
3672 struct mvpp2_port
*port
= netdev_priv(dev
);
3674 mvpp2_prs_vid_entry_remove(port
, vid
);
3678 static int mvpp2_set_features(struct net_device
*dev
,
3679 netdev_features_t features
)
3681 netdev_features_t changed
= dev
->features
^ features
;
3682 struct mvpp2_port
*port
= netdev_priv(dev
);
3684 if (changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
3685 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
3686 mvpp2_prs_vid_enable_filtering(port
);
3688 /* Invalidate all registered VID filters for this
3691 mvpp2_prs_vid_remove_all(port
);
3693 mvpp2_prs_vid_disable_filtering(port
);
3697 if (changed
& NETIF_F_RXHASH
) {
3698 if (features
& NETIF_F_RXHASH
)
3699 mvpp22_rss_enable(port
);
3701 mvpp22_rss_disable(port
);
3707 /* Ethtool methods */
3709 static int mvpp2_ethtool_nway_reset(struct net_device
*dev
)
3711 struct mvpp2_port
*port
= netdev_priv(dev
);
3716 return phylink_ethtool_nway_reset(port
->phylink
);
3719 /* Set interrupt coalescing for ethtools */
3720 static int mvpp2_ethtool_set_coalesce(struct net_device
*dev
,
3721 struct ethtool_coalesce
*c
)
3723 struct mvpp2_port
*port
= netdev_priv(dev
);
3726 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
3727 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
3729 rxq
->time_coal
= c
->rx_coalesce_usecs
;
3730 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
3731 mvpp2_rx_pkts_coal_set(port
, rxq
);
3732 mvpp2_rx_time_coal_set(port
, rxq
);
3735 if (port
->has_tx_irqs
) {
3736 port
->tx_time_coal
= c
->tx_coalesce_usecs
;
3737 mvpp2_tx_time_coal_set(port
);
3740 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
3741 struct mvpp2_tx_queue
*txq
= port
->txqs
[queue
];
3743 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
3745 if (port
->has_tx_irqs
)
3746 mvpp2_tx_pkts_coal_set(port
, txq
);
3752 /* get coalescing for ethtools */
3753 static int mvpp2_ethtool_get_coalesce(struct net_device
*dev
,
3754 struct ethtool_coalesce
*c
)
3756 struct mvpp2_port
*port
= netdev_priv(dev
);
3758 c
->rx_coalesce_usecs
= port
->rxqs
[0]->time_coal
;
3759 c
->rx_max_coalesced_frames
= port
->rxqs
[0]->pkts_coal
;
3760 c
->tx_max_coalesced_frames
= port
->txqs
[0]->done_pkts_coal
;
3761 c
->tx_coalesce_usecs
= port
->tx_time_coal
;
3765 static void mvpp2_ethtool_get_drvinfo(struct net_device
*dev
,
3766 struct ethtool_drvinfo
*drvinfo
)
3768 strlcpy(drvinfo
->driver
, MVPP2_DRIVER_NAME
,
3769 sizeof(drvinfo
->driver
));
3770 strlcpy(drvinfo
->version
, MVPP2_DRIVER_VERSION
,
3771 sizeof(drvinfo
->version
));
3772 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
3773 sizeof(drvinfo
->bus_info
));
3776 static void mvpp2_ethtool_get_ringparam(struct net_device
*dev
,
3777 struct ethtool_ringparam
*ring
)
3779 struct mvpp2_port
*port
= netdev_priv(dev
);
3781 ring
->rx_max_pending
= MVPP2_MAX_RXD_MAX
;
3782 ring
->tx_max_pending
= MVPP2_MAX_TXD_MAX
;
3783 ring
->rx_pending
= port
->rx_ring_size
;
3784 ring
->tx_pending
= port
->tx_ring_size
;
3787 static int mvpp2_ethtool_set_ringparam(struct net_device
*dev
,
3788 struct ethtool_ringparam
*ring
)
3790 struct mvpp2_port
*port
= netdev_priv(dev
);
3791 u16 prev_rx_ring_size
= port
->rx_ring_size
;
3792 u16 prev_tx_ring_size
= port
->tx_ring_size
;
3795 err
= mvpp2_check_ringparam_valid(dev
, ring
);
3799 if (!netif_running(dev
)) {
3800 port
->rx_ring_size
= ring
->rx_pending
;
3801 port
->tx_ring_size
= ring
->tx_pending
;
3805 /* The interface is running, so we have to force a
3806 * reallocation of the queues
3808 mvpp2_stop_dev(port
);
3809 mvpp2_cleanup_rxqs(port
);
3810 mvpp2_cleanup_txqs(port
);
3812 port
->rx_ring_size
= ring
->rx_pending
;
3813 port
->tx_ring_size
= ring
->tx_pending
;
3815 err
= mvpp2_setup_rxqs(port
);
3817 /* Reallocate Rx queues with the original ring size */
3818 port
->rx_ring_size
= prev_rx_ring_size
;
3819 ring
->rx_pending
= prev_rx_ring_size
;
3820 err
= mvpp2_setup_rxqs(port
);
3824 err
= mvpp2_setup_txqs(port
);
3826 /* Reallocate Tx queues with the original ring size */
3827 port
->tx_ring_size
= prev_tx_ring_size
;
3828 ring
->tx_pending
= prev_tx_ring_size
;
3829 err
= mvpp2_setup_txqs(port
);
3831 goto err_clean_rxqs
;
3834 mvpp2_start_dev(port
);
3835 mvpp2_egress_enable(port
);
3836 mvpp2_ingress_enable(port
);
3841 mvpp2_cleanup_rxqs(port
);
3843 netdev_err(dev
, "failed to change ring parameters");
3847 static void mvpp2_ethtool_get_pause_param(struct net_device
*dev
,
3848 struct ethtool_pauseparam
*pause
)
3850 struct mvpp2_port
*port
= netdev_priv(dev
);
3855 phylink_ethtool_get_pauseparam(port
->phylink
, pause
);
3858 static int mvpp2_ethtool_set_pause_param(struct net_device
*dev
,
3859 struct ethtool_pauseparam
*pause
)
3861 struct mvpp2_port
*port
= netdev_priv(dev
);
3866 return phylink_ethtool_set_pauseparam(port
->phylink
, pause
);
3869 static int mvpp2_ethtool_get_link_ksettings(struct net_device
*dev
,
3870 struct ethtool_link_ksettings
*cmd
)
3872 struct mvpp2_port
*port
= netdev_priv(dev
);
3877 return phylink_ethtool_ksettings_get(port
->phylink
, cmd
);
3880 static int mvpp2_ethtool_set_link_ksettings(struct net_device
*dev
,
3881 const struct ethtool_link_ksettings
*cmd
)
3883 struct mvpp2_port
*port
= netdev_priv(dev
);
3888 return phylink_ethtool_ksettings_set(port
->phylink
, cmd
);
3891 static int mvpp2_ethtool_get_rxnfc(struct net_device
*dev
,
3892 struct ethtool_rxnfc
*info
, u32
*rules
)
3894 struct mvpp2_port
*port
= netdev_priv(dev
);
3897 if (!mvpp22_rss_is_supported())
3900 switch (info
->cmd
) {
3902 ret
= mvpp2_ethtool_rxfh_get(port
, info
);
3904 case ETHTOOL_GRXRINGS
:
3905 info
->data
= port
->nrxqs
;
3914 static int mvpp2_ethtool_set_rxnfc(struct net_device
*dev
,
3915 struct ethtool_rxnfc
*info
)
3917 struct mvpp2_port
*port
= netdev_priv(dev
);
3920 if (!mvpp22_rss_is_supported())
3923 switch (info
->cmd
) {
3925 ret
= mvpp2_ethtool_rxfh_set(port
, info
);
3933 static u32
mvpp2_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
3935 return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES
: 0;
3938 static int mvpp2_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
3941 struct mvpp2_port
*port
= netdev_priv(dev
);
3943 if (!mvpp22_rss_is_supported())
3947 memcpy(indir
, port
->indir
,
3948 ARRAY_SIZE(port
->indir
) * sizeof(port
->indir
[0]));
3951 *hfunc
= ETH_RSS_HASH_CRC32
;
3956 static int mvpp2_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
3957 const u8
*key
, const u8 hfunc
)
3959 struct mvpp2_port
*port
= netdev_priv(dev
);
3961 if (!mvpp22_rss_is_supported())
3964 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_CRC32
)
3971 memcpy(port
->indir
, indir
,
3972 ARRAY_SIZE(port
->indir
) * sizeof(port
->indir
[0]));
3973 mvpp22_rss_fill_table(port
, port
->id
);
3981 static const struct net_device_ops mvpp2_netdev_ops
= {
3982 .ndo_open
= mvpp2_open
,
3983 .ndo_stop
= mvpp2_stop
,
3984 .ndo_start_xmit
= mvpp2_tx
,
3985 .ndo_set_rx_mode
= mvpp2_set_rx_mode
,
3986 .ndo_set_mac_address
= mvpp2_set_mac_address
,
3987 .ndo_change_mtu
= mvpp2_change_mtu
,
3988 .ndo_get_stats64
= mvpp2_get_stats64
,
3989 .ndo_do_ioctl
= mvpp2_ioctl
,
3990 .ndo_vlan_rx_add_vid
= mvpp2_vlan_rx_add_vid
,
3991 .ndo_vlan_rx_kill_vid
= mvpp2_vlan_rx_kill_vid
,
3992 .ndo_set_features
= mvpp2_set_features
,
3995 static const struct ethtool_ops mvpp2_eth_tool_ops
= {
3996 .nway_reset
= mvpp2_ethtool_nway_reset
,
3997 .get_link
= ethtool_op_get_link
,
3998 .set_coalesce
= mvpp2_ethtool_set_coalesce
,
3999 .get_coalesce
= mvpp2_ethtool_get_coalesce
,
4000 .get_drvinfo
= mvpp2_ethtool_get_drvinfo
,
4001 .get_ringparam
= mvpp2_ethtool_get_ringparam
,
4002 .set_ringparam
= mvpp2_ethtool_set_ringparam
,
4003 .get_strings
= mvpp2_ethtool_get_strings
,
4004 .get_ethtool_stats
= mvpp2_ethtool_get_stats
,
4005 .get_sset_count
= mvpp2_ethtool_get_sset_count
,
4006 .get_pauseparam
= mvpp2_ethtool_get_pause_param
,
4007 .set_pauseparam
= mvpp2_ethtool_set_pause_param
,
4008 .get_link_ksettings
= mvpp2_ethtool_get_link_ksettings
,
4009 .set_link_ksettings
= mvpp2_ethtool_set_link_ksettings
,
4010 .get_rxnfc
= mvpp2_ethtool_get_rxnfc
,
4011 .set_rxnfc
= mvpp2_ethtool_set_rxnfc
,
4012 .get_rxfh_indir_size
= mvpp2_ethtool_get_rxfh_indir_size
,
4013 .get_rxfh
= mvpp2_ethtool_get_rxfh
,
4014 .set_rxfh
= mvpp2_ethtool_set_rxfh
,
4018 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
4019 * had a single IRQ defined per-port.
4021 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port
*port
,
4022 struct device_node
*port_node
)
4024 struct mvpp2_queue_vector
*v
= &port
->qvecs
[0];
4027 v
->nrxqs
= port
->nrxqs
;
4028 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
4029 v
->sw_thread_id
= 0;
4030 v
->sw_thread_mask
= *cpumask_bits(cpu_online_mask
);
4032 v
->irq
= irq_of_parse_and_map(port_node
, 0);
4035 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
4043 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port
*port
,
4044 struct device_node
*port_node
)
4046 struct mvpp2
*priv
= port
->priv
;
4047 struct mvpp2_queue_vector
*v
;
4050 switch (queue_mode
) {
4051 case MVPP2_QDIST_SINGLE_MODE
:
4052 port
->nqvecs
= priv
->nthreads
+ 1;
4054 case MVPP2_QDIST_MULTI_MODE
:
4055 port
->nqvecs
= priv
->nthreads
;
4059 for (i
= 0; i
< port
->nqvecs
; i
++) {
4062 v
= port
->qvecs
+ i
;
4065 v
->type
= MVPP2_QUEUE_VECTOR_PRIVATE
;
4066 v
->sw_thread_id
= i
;
4067 v
->sw_thread_mask
= BIT(i
);
4069 if (port
->flags
& MVPP2_F_DT_COMPAT
)
4070 snprintf(irqname
, sizeof(irqname
), "tx-cpu%d", i
);
4072 snprintf(irqname
, sizeof(irqname
), "hif%d", i
);
4074 if (queue_mode
== MVPP2_QDIST_MULTI_MODE
) {
4075 v
->first_rxq
= i
* MVPP2_DEFAULT_RXQ
;
4076 v
->nrxqs
= MVPP2_DEFAULT_RXQ
;
4077 } else if (queue_mode
== MVPP2_QDIST_SINGLE_MODE
&&
4078 i
== (port
->nqvecs
- 1)) {
4080 v
->nrxqs
= port
->nrxqs
;
4081 v
->type
= MVPP2_QUEUE_VECTOR_SHARED
;
4083 if (port
->flags
& MVPP2_F_DT_COMPAT
)
4084 strncpy(irqname
, "rx-shared", sizeof(irqname
));
4088 v
->irq
= of_irq_get_byname(port_node
, irqname
);
4090 v
->irq
= fwnode_irq_get(port
->fwnode
, i
);
4096 netif_napi_add(port
->dev
, &v
->napi
, mvpp2_poll
,
4103 for (i
= 0; i
< port
->nqvecs
; i
++)
4104 irq_dispose_mapping(port
->qvecs
[i
].irq
);
4108 static int mvpp2_queue_vectors_init(struct mvpp2_port
*port
,
4109 struct device_node
*port_node
)
4111 if (port
->has_tx_irqs
)
4112 return mvpp2_multi_queue_vectors_init(port
, port_node
);
4114 return mvpp2_simple_queue_vectors_init(port
, port_node
);
4117 static void mvpp2_queue_vectors_deinit(struct mvpp2_port
*port
)
4121 for (i
= 0; i
< port
->nqvecs
; i
++)
4122 irq_dispose_mapping(port
->qvecs
[i
].irq
);
4125 /* Configure Rx queue group interrupt for this port */
4126 static void mvpp2_rx_irqs_setup(struct mvpp2_port
*port
)
4128 struct mvpp2
*priv
= port
->priv
;
4132 if (priv
->hw_version
== MVPP21
) {
4133 mvpp2_write(priv
, MVPP21_ISR_RXQ_GROUP_REG(port
->id
),
4138 /* Handle the more complicated PPv2.2 case */
4139 for (i
= 0; i
< port
->nqvecs
; i
++) {
4140 struct mvpp2_queue_vector
*qv
= port
->qvecs
+ i
;
4145 val
= qv
->sw_thread_id
;
4146 val
|= port
->id
<< MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET
;
4147 mvpp2_write(priv
, MVPP22_ISR_RXQ_GROUP_INDEX_REG
, val
);
4149 val
= qv
->first_rxq
;
4150 val
|= qv
->nrxqs
<< MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET
;
4151 mvpp2_write(priv
, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG
, val
);
4155 /* Initialize port HW */
4156 static int mvpp2_port_init(struct mvpp2_port
*port
)
4158 struct device
*dev
= port
->dev
->dev
.parent
;
4159 struct mvpp2
*priv
= port
->priv
;
4160 struct mvpp2_txq_pcpu
*txq_pcpu
;
4161 unsigned int thread
;
4164 /* Checks for hardware constraints */
4165 if (port
->first_rxq
+ port
->nrxqs
>
4166 MVPP2_MAX_PORTS
* priv
->max_port_rxqs
)
4169 if (port
->nrxqs
% MVPP2_DEFAULT_RXQ
||
4170 port
->nrxqs
> priv
->max_port_rxqs
|| port
->ntxqs
> MVPP2_MAX_TXQ
)
4174 mvpp2_egress_disable(port
);
4175 mvpp2_port_disable(port
);
4177 port
->tx_time_coal
= MVPP2_TXDONE_COAL_USEC
;
4179 port
->txqs
= devm_kcalloc(dev
, port
->ntxqs
, sizeof(*port
->txqs
),
4184 /* Associate physical Tx queues to this port and initialize.
4185 * The mapping is predefined.
4187 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4188 int queue_phy_id
= mvpp2_txq_phys(port
->id
, queue
);
4189 struct mvpp2_tx_queue
*txq
;
4191 txq
= devm_kzalloc(dev
, sizeof(*txq
), GFP_KERNEL
);
4194 goto err_free_percpu
;
4197 txq
->pcpu
= alloc_percpu(struct mvpp2_txq_pcpu
);
4200 goto err_free_percpu
;
4203 txq
->id
= queue_phy_id
;
4204 txq
->log_id
= queue
;
4205 txq
->done_pkts_coal
= MVPP2_TXDONE_COAL_PKTS_THRESH
;
4206 for (thread
= 0; thread
< priv
->nthreads
; thread
++) {
4207 txq_pcpu
= per_cpu_ptr(txq
->pcpu
, thread
);
4208 txq_pcpu
->thread
= thread
;
4211 port
->txqs
[queue
] = txq
;
4214 port
->rxqs
= devm_kcalloc(dev
, port
->nrxqs
, sizeof(*port
->rxqs
),
4218 goto err_free_percpu
;
4221 /* Allocate and initialize Rx queue for this port */
4222 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4223 struct mvpp2_rx_queue
*rxq
;
4225 /* Map physical Rx queue to port's logical Rx queue */
4226 rxq
= devm_kzalloc(dev
, sizeof(*rxq
), GFP_KERNEL
);
4229 goto err_free_percpu
;
4231 /* Map this Rx queue to a physical queue */
4232 rxq
->id
= port
->first_rxq
+ queue
;
4233 rxq
->port
= port
->id
;
4234 rxq
->logic_rxq
= queue
;
4236 port
->rxqs
[queue
] = rxq
;
4239 mvpp2_rx_irqs_setup(port
);
4241 /* Create Rx descriptor rings */
4242 for (queue
= 0; queue
< port
->nrxqs
; queue
++) {
4243 struct mvpp2_rx_queue
*rxq
= port
->rxqs
[queue
];
4245 rxq
->size
= port
->rx_ring_size
;
4246 rxq
->pkts_coal
= MVPP2_RX_COAL_PKTS
;
4247 rxq
->time_coal
= MVPP2_RX_COAL_USEC
;
4250 mvpp2_ingress_disable(port
);
4252 /* Port default configuration */
4253 mvpp2_defaults_set(port
);
4255 /* Port's classifier configuration */
4256 mvpp2_cls_oversize_rxq_set(port
);
4257 mvpp2_cls_port_config(port
);
4259 if (mvpp22_rss_is_supported())
4260 mvpp22_rss_port_init(port
);
4262 /* Provide an initial Rx packet size */
4263 port
->pkt_size
= MVPP2_RX_PKT_SIZE(port
->dev
->mtu
);
4265 /* Initialize pools for swf */
4266 err
= mvpp2_swf_bm_pool_init(port
);
4268 goto err_free_percpu
;
4273 for (queue
= 0; queue
< port
->ntxqs
; queue
++) {
4274 if (!port
->txqs
[queue
])
4276 free_percpu(port
->txqs
[queue
]->pcpu
);
4281 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node
*port_node
,
4282 unsigned long *flags
)
4284 char *irqs
[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
4288 for (i
= 0; i
< 5; i
++)
4289 if (of_property_match_string(port_node
, "interrupt-names",
4293 *flags
|= MVPP2_F_DT_COMPAT
;
4297 /* Checks if the port dt description has the required Tx interrupts:
4298 * - PPv2.1: there are no such interrupts.
4300 * - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
4301 * - The new ones have: "hifX" with X in [0..8]
4303 * All those variants are supported to keep the backward compatibility.
4305 static bool mvpp2_port_has_irqs(struct mvpp2
*priv
,
4306 struct device_node
*port_node
,
4307 unsigned long *flags
)
4316 if (priv
->hw_version
== MVPP21
)
4319 if (mvpp22_port_has_legacy_tx_irqs(port_node
, flags
))
4322 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
4323 snprintf(name
, 5, "hif%d", i
);
4324 if (of_property_match_string(port_node
, "interrupt-names",
4332 static void mvpp2_port_copy_mac_addr(struct net_device
*dev
, struct mvpp2
*priv
,
4333 struct fwnode_handle
*fwnode
,
4336 struct mvpp2_port
*port
= netdev_priv(dev
);
4337 char hw_mac_addr
[ETH_ALEN
] = {0};
4338 char fw_mac_addr
[ETH_ALEN
];
4340 if (fwnode_get_mac_address(fwnode
, fw_mac_addr
, ETH_ALEN
)) {
4341 *mac_from
= "firmware node";
4342 ether_addr_copy(dev
->dev_addr
, fw_mac_addr
);
4346 if (priv
->hw_version
== MVPP21
) {
4347 mvpp21_get_mac_address(port
, hw_mac_addr
);
4348 if (is_valid_ether_addr(hw_mac_addr
)) {
4349 *mac_from
= "hardware";
4350 ether_addr_copy(dev
->dev_addr
, hw_mac_addr
);
4355 *mac_from
= "random";
4356 eth_hw_addr_random(dev
);
4359 static void mvpp2_phylink_validate(struct net_device
*dev
,
4360 unsigned long *supported
,
4361 struct phylink_link_state
*state
)
4363 struct mvpp2_port
*port
= netdev_priv(dev
);
4364 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
4366 /* Invalid combinations */
4367 switch (state
->interface
) {
4368 case PHY_INTERFACE_MODE_10GKR
:
4369 case PHY_INTERFACE_MODE_XAUI
:
4370 if (port
->gop_id
!= 0)
4373 case PHY_INTERFACE_MODE_RGMII
:
4374 case PHY_INTERFACE_MODE_RGMII_ID
:
4375 case PHY_INTERFACE_MODE_RGMII_RXID
:
4376 case PHY_INTERFACE_MODE_RGMII_TXID
:
4377 if (port
->gop_id
== 0)
4384 phylink_set(mask
, Autoneg
);
4385 phylink_set_port_modes(mask
);
4386 phylink_set(mask
, Pause
);
4387 phylink_set(mask
, Asym_Pause
);
4389 switch (state
->interface
) {
4390 case PHY_INTERFACE_MODE_10GKR
:
4391 case PHY_INTERFACE_MODE_XAUI
:
4392 case PHY_INTERFACE_MODE_NA
:
4393 if (port
->gop_id
== 0) {
4394 phylink_set(mask
, 10000baseT_Full
);
4395 phylink_set(mask
, 10000baseCR_Full
);
4396 phylink_set(mask
, 10000baseSR_Full
);
4397 phylink_set(mask
, 10000baseLR_Full
);
4398 phylink_set(mask
, 10000baseLRM_Full
);
4399 phylink_set(mask
, 10000baseER_Full
);
4400 phylink_set(mask
, 10000baseKR_Full
);
4403 case PHY_INTERFACE_MODE_RGMII
:
4404 case PHY_INTERFACE_MODE_RGMII_ID
:
4405 case PHY_INTERFACE_MODE_RGMII_RXID
:
4406 case PHY_INTERFACE_MODE_RGMII_TXID
:
4407 case PHY_INTERFACE_MODE_SGMII
:
4408 phylink_set(mask
, 10baseT_Half
);
4409 phylink_set(mask
, 10baseT_Full
);
4410 phylink_set(mask
, 100baseT_Half
);
4411 phylink_set(mask
, 100baseT_Full
);
4413 case PHY_INTERFACE_MODE_1000BASEX
:
4414 case PHY_INTERFACE_MODE_2500BASEX
:
4415 phylink_set(mask
, 1000baseT_Full
);
4416 phylink_set(mask
, 1000baseX_Full
);
4417 phylink_set(mask
, 2500baseX_Full
);
4423 bitmap_and(supported
, supported
, mask
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
4424 bitmap_and(state
->advertising
, state
->advertising
, mask
,
4425 __ETHTOOL_LINK_MODE_MASK_NBITS
);
4429 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
4432 static void mvpp22_xlg_link_state(struct mvpp2_port
*port
,
4433 struct phylink_link_state
*state
)
4437 state
->speed
= SPEED_10000
;
4439 state
->an_complete
= 1;
4441 val
= readl(port
->base
+ MVPP22_XLG_STATUS
);
4442 state
->link
= !!(val
& MVPP22_XLG_STATUS_LINK_UP
);
4445 val
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4446 if (val
& MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
)
4447 state
->pause
|= MLO_PAUSE_TX
;
4448 if (val
& MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
)
4449 state
->pause
|= MLO_PAUSE_RX
;
4452 static void mvpp2_gmac_link_state(struct mvpp2_port
*port
,
4453 struct phylink_link_state
*state
)
4457 val
= readl(port
->base
+ MVPP2_GMAC_STATUS0
);
4459 state
->an_complete
= !!(val
& MVPP2_GMAC_STATUS0_AN_COMPLETE
);
4460 state
->link
= !!(val
& MVPP2_GMAC_STATUS0_LINK_UP
);
4461 state
->duplex
= !!(val
& MVPP2_GMAC_STATUS0_FULL_DUPLEX
);
4463 switch (port
->phy_interface
) {
4464 case PHY_INTERFACE_MODE_1000BASEX
:
4465 state
->speed
= SPEED_1000
;
4467 case PHY_INTERFACE_MODE_2500BASEX
:
4468 state
->speed
= SPEED_2500
;
4471 if (val
& MVPP2_GMAC_STATUS0_GMII_SPEED
)
4472 state
->speed
= SPEED_1000
;
4473 else if (val
& MVPP2_GMAC_STATUS0_MII_SPEED
)
4474 state
->speed
= SPEED_100
;
4476 state
->speed
= SPEED_10
;
4480 if (val
& MVPP2_GMAC_STATUS0_RX_PAUSE
)
4481 state
->pause
|= MLO_PAUSE_RX
;
4482 if (val
& MVPP2_GMAC_STATUS0_TX_PAUSE
)
4483 state
->pause
|= MLO_PAUSE_TX
;
4486 static int mvpp2_phylink_mac_link_state(struct net_device
*dev
,
4487 struct phylink_link_state
*state
)
4489 struct mvpp2_port
*port
= netdev_priv(dev
);
4491 if (port
->priv
->hw_version
== MVPP22
&& port
->gop_id
== 0) {
4492 u32 mode
= readl(port
->base
+ MVPP22_XLG_CTRL3_REG
);
4493 mode
&= MVPP22_XLG_CTRL3_MACMODESELECT_MASK
;
4495 if (mode
== MVPP22_XLG_CTRL3_MACMODESELECT_10G
) {
4496 mvpp22_xlg_link_state(port
, state
);
4501 mvpp2_gmac_link_state(port
, state
);
4505 static void mvpp2_mac_an_restart(struct net_device
*dev
)
4507 struct mvpp2_port
*port
= netdev_priv(dev
);
4510 if (port
->phy_interface
!= PHY_INTERFACE_MODE_SGMII
)
4513 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4514 /* The RESTART_AN bit is cleared by the h/w after restarting the AN
4517 val
|= MVPP2_GMAC_IN_BAND_RESTART_AN
| MVPP2_GMAC_IN_BAND_AUTONEG
;
4518 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4521 static void mvpp2_xlg_config(struct mvpp2_port
*port
, unsigned int mode
,
4522 const struct phylink_link_state
*state
)
4526 ctrl0
= readl(port
->base
+ MVPP22_XLG_CTRL0_REG
);
4527 ctrl4
= readl(port
->base
+ MVPP22_XLG_CTRL4_REG
);
4529 if (state
->pause
& MLO_PAUSE_TX
)
4530 ctrl0
|= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN
;
4531 if (state
->pause
& MLO_PAUSE_RX
)
4532 ctrl0
|= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN
;
4534 ctrl4
&= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC
;
4535 ctrl4
|= MVPP22_XLG_CTRL4_FWD_FC
| MVPP22_XLG_CTRL4_FWD_PFC
|
4536 MVPP22_XLG_CTRL4_EN_IDLE_CHECK
;
4538 writel(ctrl0
, port
->base
+ MVPP22_XLG_CTRL0_REG
);
4539 writel(ctrl4
, port
->base
+ MVPP22_XLG_CTRL4_REG
);
4542 static void mvpp2_gmac_config(struct mvpp2_port
*port
, unsigned int mode
,
4543 const struct phylink_link_state
*state
)
4545 u32 an
, ctrl0
, ctrl2
, ctrl4
;
4547 an
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4548 ctrl0
= readl(port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4549 ctrl2
= readl(port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4550 ctrl4
= readl(port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4552 /* Force link down */
4553 an
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
4554 an
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
4555 writel(an
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4557 /* Set the GMAC in a reset state */
4558 ctrl2
|= MVPP2_GMAC_PORT_RESET_MASK
;
4559 writel(ctrl2
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4561 an
&= ~(MVPP2_GMAC_CONFIG_MII_SPEED
| MVPP2_GMAC_CONFIG_GMII_SPEED
|
4562 MVPP2_GMAC_AN_SPEED_EN
| MVPP2_GMAC_FC_ADV_EN
|
4563 MVPP2_GMAC_FC_ADV_ASM_EN
| MVPP2_GMAC_FLOW_CTRL_AUTONEG
|
4564 MVPP2_GMAC_CONFIG_FULL_DUPLEX
| MVPP2_GMAC_AN_DUPLEX_EN
|
4565 MVPP2_GMAC_FORCE_LINK_DOWN
);
4566 ctrl0
&= ~MVPP2_GMAC_PORT_TYPE_MASK
;
4567 ctrl2
&= ~(MVPP2_GMAC_PORT_RESET_MASK
| MVPP2_GMAC_PCS_ENABLE_MASK
);
4569 if (state
->interface
== PHY_INTERFACE_MODE_1000BASEX
||
4570 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
) {
4571 /* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
4572 * they negotiate duplex: they are always operating with a fixed
4573 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
4574 * speed and full duplex here.
4576 ctrl0
|= MVPP2_GMAC_PORT_TYPE_MASK
;
4577 an
|= MVPP2_GMAC_CONFIG_GMII_SPEED
|
4578 MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4579 } else if (!phy_interface_mode_is_rgmii(state
->interface
)) {
4580 an
|= MVPP2_GMAC_AN_SPEED_EN
| MVPP2_GMAC_FLOW_CTRL_AUTONEG
;
4584 an
|= MVPP2_GMAC_CONFIG_FULL_DUPLEX
;
4585 if (phylink_test(state
->advertising
, Pause
))
4586 an
|= MVPP2_GMAC_FC_ADV_EN
;
4587 if (phylink_test(state
->advertising
, Asym_Pause
))
4588 an
|= MVPP2_GMAC_FC_ADV_ASM_EN
;
4590 if (state
->interface
== PHY_INTERFACE_MODE_SGMII
||
4591 state
->interface
== PHY_INTERFACE_MODE_1000BASEX
||
4592 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
) {
4593 an
|= MVPP2_GMAC_IN_BAND_AUTONEG
;
4594 ctrl2
|= MVPP2_GMAC_INBAND_AN_MASK
| MVPP2_GMAC_PCS_ENABLE_MASK
;
4596 ctrl4
&= ~(MVPP22_CTRL4_EXT_PIN_GMII_SEL
|
4597 MVPP22_CTRL4_RX_FC_EN
| MVPP22_CTRL4_TX_FC_EN
);
4598 ctrl4
|= MVPP22_CTRL4_SYNC_BYPASS_DIS
|
4599 MVPP22_CTRL4_DP_CLK_SEL
|
4600 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4602 if (state
->pause
& MLO_PAUSE_TX
)
4603 ctrl4
|= MVPP22_CTRL4_TX_FC_EN
;
4604 if (state
->pause
& MLO_PAUSE_RX
)
4605 ctrl4
|= MVPP22_CTRL4_RX_FC_EN
;
4606 } else if (phy_interface_mode_is_rgmii(state
->interface
)) {
4607 an
|= MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS
;
4609 if (state
->speed
== SPEED_1000
)
4610 an
|= MVPP2_GMAC_CONFIG_GMII_SPEED
;
4611 else if (state
->speed
== SPEED_100
)
4612 an
|= MVPP2_GMAC_CONFIG_MII_SPEED
;
4614 ctrl4
&= ~MVPP22_CTRL4_DP_CLK_SEL
;
4615 ctrl4
|= MVPP22_CTRL4_EXT_PIN_GMII_SEL
|
4616 MVPP22_CTRL4_SYNC_BYPASS_DIS
|
4617 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE
;
4620 writel(ctrl0
, port
->base
+ MVPP2_GMAC_CTRL_0_REG
);
4621 writel(ctrl2
, port
->base
+ MVPP2_GMAC_CTRL_2_REG
);
4622 writel(ctrl4
, port
->base
+ MVPP22_GMAC_CTRL_4_REG
);
4623 writel(an
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4626 static void mvpp2_mac_config(struct net_device
*dev
, unsigned int mode
,
4627 const struct phylink_link_state
*state
)
4629 struct mvpp2_port
*port
= netdev_priv(dev
);
4631 /* Check for invalid configuration */
4632 if (state
->interface
== PHY_INTERFACE_MODE_10GKR
&& port
->gop_id
!= 0) {
4633 netdev_err(dev
, "Invalid mode on %s\n", dev
->name
);
4637 /* Make sure the port is disabled when reconfiguring the mode */
4638 mvpp2_port_disable(port
);
4640 if (port
->priv
->hw_version
== MVPP22
&&
4641 port
->phy_interface
!= state
->interface
) {
4642 port
->phy_interface
= state
->interface
;
4644 /* Reconfigure the serdes lanes */
4645 phy_power_off(port
->comphy
);
4646 mvpp22_mode_reconfigure(port
);
4649 /* mac (re)configuration */
4650 if (state
->interface
== PHY_INTERFACE_MODE_10GKR
)
4651 mvpp2_xlg_config(port
, mode
, state
);
4652 else if (phy_interface_mode_is_rgmii(state
->interface
) ||
4653 state
->interface
== PHY_INTERFACE_MODE_SGMII
||
4654 state
->interface
== PHY_INTERFACE_MODE_1000BASEX
||
4655 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
)
4656 mvpp2_gmac_config(port
, mode
, state
);
4658 if (port
->priv
->hw_version
== MVPP21
&& port
->flags
& MVPP2_F_LOOPBACK
)
4659 mvpp2_port_loopback_set(port
, state
);
4661 mvpp2_port_enable(port
);
4664 static void mvpp2_mac_link_up(struct net_device
*dev
, unsigned int mode
,
4665 phy_interface_t interface
, struct phy_device
*phy
)
4667 struct mvpp2_port
*port
= netdev_priv(dev
);
4670 if (!phylink_autoneg_inband(mode
) &&
4671 interface
!= PHY_INTERFACE_MODE_10GKR
) {
4672 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4673 val
&= ~MVPP2_GMAC_FORCE_LINK_DOWN
;
4674 if (phy_interface_mode_is_rgmii(interface
))
4675 val
|= MVPP2_GMAC_FORCE_LINK_PASS
;
4676 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4679 mvpp2_port_enable(port
);
4681 mvpp2_egress_enable(port
);
4682 mvpp2_ingress_enable(port
);
4683 netif_tx_wake_all_queues(dev
);
4686 static void mvpp2_mac_link_down(struct net_device
*dev
, unsigned int mode
,
4687 phy_interface_t interface
)
4689 struct mvpp2_port
*port
= netdev_priv(dev
);
4692 if (!phylink_autoneg_inband(mode
) &&
4693 interface
!= PHY_INTERFACE_MODE_10GKR
) {
4694 val
= readl(port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4695 val
&= ~MVPP2_GMAC_FORCE_LINK_PASS
;
4696 val
|= MVPP2_GMAC_FORCE_LINK_DOWN
;
4697 writel(val
, port
->base
+ MVPP2_GMAC_AUTONEG_CONFIG
);
4700 netif_tx_stop_all_queues(dev
);
4701 mvpp2_egress_disable(port
);
4702 mvpp2_ingress_disable(port
);
4704 /* When using link interrupts to notify phylink of a MAC state change,
4705 * we do not want the port to be disabled (we want to receive further
4706 * interrupts, to be notified when the port will have a link later).
4711 mvpp2_port_disable(port
);
4714 static const struct phylink_mac_ops mvpp2_phylink_ops
= {
4715 .validate
= mvpp2_phylink_validate
,
4716 .mac_link_state
= mvpp2_phylink_mac_link_state
,
4717 .mac_an_restart
= mvpp2_mac_an_restart
,
4718 .mac_config
= mvpp2_mac_config
,
4719 .mac_link_up
= mvpp2_mac_link_up
,
4720 .mac_link_down
= mvpp2_mac_link_down
,
4723 /* Ports initialization */
4724 static int mvpp2_port_probe(struct platform_device
*pdev
,
4725 struct fwnode_handle
*port_fwnode
,
4728 struct phy
*comphy
= NULL
;
4729 struct mvpp2_port
*port
;
4730 struct mvpp2_port_pcpu
*port_pcpu
;
4731 struct device_node
*port_node
= to_of_node(port_fwnode
);
4732 struct net_device
*dev
;
4733 struct resource
*res
;
4734 struct phylink
*phylink
;
4735 char *mac_from
= "";
4736 unsigned int ntxqs
, nrxqs
, thread
;
4737 unsigned long flags
= 0;
4744 has_tx_irqs
= mvpp2_port_has_irqs(priv
, port_node
, &flags
);
4745 if (!has_tx_irqs
&& queue_mode
== MVPP2_QDIST_MULTI_MODE
) {
4747 "not enough IRQs to support multi queue mode\n");
4751 ntxqs
= MVPP2_MAX_TXQ
;
4752 if (priv
->hw_version
== MVPP22
&& queue_mode
== MVPP2_QDIST_MULTI_MODE
)
4753 nrxqs
= MVPP2_DEFAULT_RXQ
* num_possible_cpus();
4755 nrxqs
= MVPP2_DEFAULT_RXQ
;
4757 dev
= alloc_etherdev_mqs(sizeof(*port
), ntxqs
, nrxqs
);
4761 phy_mode
= fwnode_get_phy_mode(port_fwnode
);
4763 dev_err(&pdev
->dev
, "incorrect phy mode\n");
4765 goto err_free_netdev
;
4769 comphy
= devm_of_phy_get(&pdev
->dev
, port_node
, NULL
);
4770 if (IS_ERR(comphy
)) {
4771 if (PTR_ERR(comphy
) == -EPROBE_DEFER
) {
4772 err
= -EPROBE_DEFER
;
4773 goto err_free_netdev
;
4779 if (fwnode_property_read_u32(port_fwnode
, "port-id", &id
)) {
4781 dev_err(&pdev
->dev
, "missing port-id value\n");
4782 goto err_free_netdev
;
4785 dev
->tx_queue_len
= MVPP2_MAX_TXD_MAX
;
4786 dev
->watchdog_timeo
= 5 * HZ
;
4787 dev
->netdev_ops
= &mvpp2_netdev_ops
;
4788 dev
->ethtool_ops
= &mvpp2_eth_tool_ops
;
4790 port
= netdev_priv(dev
);
4792 port
->fwnode
= port_fwnode
;
4793 port
->has_phy
= !!of_find_property(port_node
, "phy", NULL
);
4794 port
->ntxqs
= ntxqs
;
4795 port
->nrxqs
= nrxqs
;
4797 port
->has_tx_irqs
= has_tx_irqs
;
4798 port
->flags
= flags
;
4800 err
= mvpp2_queue_vectors_init(port
, port_node
);
4802 goto err_free_netdev
;
4805 port
->link_irq
= of_irq_get_byname(port_node
, "link");
4807 port
->link_irq
= fwnode_irq_get(port_fwnode
, port
->nqvecs
+ 1);
4808 if (port
->link_irq
== -EPROBE_DEFER
) {
4809 err
= -EPROBE_DEFER
;
4810 goto err_deinit_qvecs
;
4812 if (port
->link_irq
<= 0)
4813 /* the link irq is optional */
4816 if (fwnode_property_read_bool(port_fwnode
, "marvell,loopback"))
4817 port
->flags
|= MVPP2_F_LOOPBACK
;
4820 if (priv
->hw_version
== MVPP21
)
4821 port
->first_rxq
= port
->id
* port
->nrxqs
;
4823 port
->first_rxq
= port
->id
* priv
->max_port_rxqs
;
4825 port
->of_node
= port_node
;
4826 port
->phy_interface
= phy_mode
;
4827 port
->comphy
= comphy
;
4829 if (priv
->hw_version
== MVPP21
) {
4830 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 2 + id
);
4831 port
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
4832 if (IS_ERR(port
->base
)) {
4833 err
= PTR_ERR(port
->base
);
4837 port
->stats_base
= port
->priv
->lms_base
+
4838 MVPP21_MIB_COUNTERS_OFFSET
+
4839 port
->gop_id
* MVPP21_MIB_COUNTERS_PORT_SZ
;
4841 if (fwnode_property_read_u32(port_fwnode
, "gop-port-id",
4844 dev_err(&pdev
->dev
, "missing gop-port-id value\n");
4845 goto err_deinit_qvecs
;
4848 port
->base
= priv
->iface_base
+ MVPP22_GMAC_BASE(port
->gop_id
);
4849 port
->stats_base
= port
->priv
->iface_base
+
4850 MVPP22_MIB_COUNTERS_OFFSET
+
4851 port
->gop_id
* MVPP22_MIB_COUNTERS_PORT_SZ
;
4854 /* Alloc per-cpu and ethtool stats */
4855 port
->stats
= netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats
);
4861 port
->ethtool_stats
= devm_kcalloc(&pdev
->dev
,
4862 ARRAY_SIZE(mvpp2_ethtool_regs
),
4863 sizeof(u64
), GFP_KERNEL
);
4864 if (!port
->ethtool_stats
) {
4866 goto err_free_stats
;
4869 mutex_init(&port
->gather_stats_lock
);
4870 INIT_DELAYED_WORK(&port
->stats_work
, mvpp2_gather_hw_statistics
);
4872 mvpp2_port_copy_mac_addr(dev
, priv
, port_fwnode
, &mac_from
);
4874 port
->tx_ring_size
= MVPP2_MAX_TXD_DFLT
;
4875 port
->rx_ring_size
= MVPP2_MAX_RXD_DFLT
;
4876 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4878 err
= mvpp2_port_init(port
);
4880 dev_err(&pdev
->dev
, "failed to init port %d\n", id
);
4881 goto err_free_stats
;
4884 mvpp2_port_periodic_xon_disable(port
);
4886 mvpp2_port_reset(port
);
4888 port
->pcpu
= alloc_percpu(struct mvpp2_port_pcpu
);
4891 goto err_free_txq_pcpu
;
4894 if (!port
->has_tx_irqs
) {
4895 for (thread
= 0; thread
< priv
->nthreads
; thread
++) {
4896 port_pcpu
= per_cpu_ptr(port
->pcpu
, thread
);
4898 hrtimer_init(&port_pcpu
->tx_done_timer
, CLOCK_MONOTONIC
,
4899 HRTIMER_MODE_REL_PINNED
);
4900 port_pcpu
->tx_done_timer
.function
= mvpp2_hr_timer_cb
;
4901 port_pcpu
->timer_scheduled
= false;
4903 tasklet_init(&port_pcpu
->tx_done_tasklet
,
4905 (unsigned long)dev
);
4909 features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4911 dev
->features
= features
| NETIF_F_RXCSUM
;
4912 dev
->hw_features
|= features
| NETIF_F_RXCSUM
| NETIF_F_GRO
|
4913 NETIF_F_HW_VLAN_CTAG_FILTER
;
4915 if (mvpp22_rss_is_supported())
4916 dev
->hw_features
|= NETIF_F_RXHASH
;
4918 if (port
->pool_long
->id
== MVPP2_BM_JUMBO
&& port
->id
!= 0) {
4919 dev
->features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
4920 dev
->hw_features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
);
4923 dev
->vlan_features
|= features
;
4924 dev
->gso_max_segs
= MVPP2_MAX_TSO_SEGS
;
4925 dev
->priv_flags
|= IFF_UNICAST_FLT
;
4927 /* MTU range: 68 - 9704 */
4928 dev
->min_mtu
= ETH_MIN_MTU
;
4929 /* 9704 == 9728 - 20 and rounding to 8 */
4930 dev
->max_mtu
= MVPP2_BM_JUMBO_PKT_SIZE
;
4931 dev
->dev
.of_node
= port_node
;
4933 /* Phylink isn't used w/ ACPI as of now */
4935 phylink
= phylink_create(dev
, port_fwnode
, phy_mode
,
4936 &mvpp2_phylink_ops
);
4937 if (IS_ERR(phylink
)) {
4938 err
= PTR_ERR(phylink
);
4939 goto err_free_port_pcpu
;
4941 port
->phylink
= phylink
;
4943 port
->phylink
= NULL
;
4946 err
= register_netdev(dev
);
4948 dev_err(&pdev
->dev
, "failed to register netdev\n");
4951 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
, dev
->dev_addr
);
4953 priv
->port_list
[priv
->port_count
++] = port
;
4959 phylink_destroy(port
->phylink
);
4961 free_percpu(port
->pcpu
);
4963 for (i
= 0; i
< port
->ntxqs
; i
++)
4964 free_percpu(port
->txqs
[i
]->pcpu
);
4966 free_percpu(port
->stats
);
4969 irq_dispose_mapping(port
->link_irq
);
4971 mvpp2_queue_vectors_deinit(port
);
4977 /* Ports removal routine */
4978 static void mvpp2_port_remove(struct mvpp2_port
*port
)
4982 unregister_netdev(port
->dev
);
4984 phylink_destroy(port
->phylink
);
4985 free_percpu(port
->pcpu
);
4986 free_percpu(port
->stats
);
4987 for (i
= 0; i
< port
->ntxqs
; i
++)
4988 free_percpu(port
->txqs
[i
]->pcpu
);
4989 mvpp2_queue_vectors_deinit(port
);
4991 irq_dispose_mapping(port
->link_irq
);
4992 free_netdev(port
->dev
);
4995 /* Initialize decoding windows */
4996 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info
*dram
,
5002 for (i
= 0; i
< 6; i
++) {
5003 mvpp2_write(priv
, MVPP2_WIN_BASE(i
), 0);
5004 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
), 0);
5007 mvpp2_write(priv
, MVPP2_WIN_REMAP(i
), 0);
5012 for (i
= 0; i
< dram
->num_cs
; i
++) {
5013 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
5015 mvpp2_write(priv
, MVPP2_WIN_BASE(i
),
5016 (cs
->base
& 0xffff0000) | (cs
->mbus_attr
<< 8) |
5017 dram
->mbus_dram_target_id
);
5019 mvpp2_write(priv
, MVPP2_WIN_SIZE(i
),
5020 (cs
->size
- 1) & 0xffff0000);
5022 win_enable
|= (1 << i
);
5025 mvpp2_write(priv
, MVPP2_BASE_ADDR_ENABLE
, win_enable
);
5028 /* Initialize Rx FIFO's */
5029 static void mvpp2_rx_fifo_init(struct mvpp2
*priv
)
5033 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
5034 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
5035 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB
);
5036 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
5037 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB
);
5040 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
5041 MVPP2_RX_FIFO_PORT_MIN_PKT
);
5042 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
5045 static void mvpp22_rx_fifo_init(struct mvpp2
*priv
)
5049 /* The FIFO size parameters are set depending on the maximum speed a
5050 * given port can handle:
5053 * - Ports 2 and 3: 1Gbps
5056 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
5057 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB
);
5058 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
5059 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB
);
5061 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
5062 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB
);
5063 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
5064 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB
);
5066 for (port
= 2; port
< MVPP2_MAX_PORTS
; port
++) {
5067 mvpp2_write(priv
, MVPP2_RX_DATA_FIFO_SIZE_REG(port
),
5068 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB
);
5069 mvpp2_write(priv
, MVPP2_RX_ATTR_FIFO_SIZE_REG(port
),
5070 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB
);
5073 mvpp2_write(priv
, MVPP2_RX_MIN_PKT_SIZE_REG
,
5074 MVPP2_RX_FIFO_PORT_MIN_PKT
);
5075 mvpp2_write(priv
, MVPP2_RX_FIFO_INIT_REG
, 0x1);
5078 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
5079 * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
5080 * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
5082 static void mvpp22_tx_fifo_init(struct mvpp2
*priv
)
5084 int port
, size
, thrs
;
5086 for (port
= 0; port
< MVPP2_MAX_PORTS
; port
++) {
5088 size
= MVPP22_TX_FIFO_DATA_SIZE_10KB
;
5089 thrs
= MVPP2_TX_FIFO_THRESHOLD_10KB
;
5091 size
= MVPP22_TX_FIFO_DATA_SIZE_3KB
;
5092 thrs
= MVPP2_TX_FIFO_THRESHOLD_3KB
;
5094 mvpp2_write(priv
, MVPP22_TX_FIFO_SIZE_REG(port
), size
);
5095 mvpp2_write(priv
, MVPP22_TX_FIFO_THRESH_REG(port
), thrs
);
5099 static void mvpp2_axi_init(struct mvpp2
*priv
)
5101 u32 val
, rdval
, wrval
;
5103 mvpp2_write(priv
, MVPP22_BM_ADDR_HIGH_RLS_REG
, 0x0);
5105 /* AXI Bridge Configuration */
5107 rdval
= MVPP22_AXI_CODE_CACHE_RD_CACHE
5108 << MVPP22_AXI_ATTR_CACHE_OFFS
;
5109 rdval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5110 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
5112 wrval
= MVPP22_AXI_CODE_CACHE_WR_CACHE
5113 << MVPP22_AXI_ATTR_CACHE_OFFS
;
5114 wrval
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5115 << MVPP22_AXI_ATTR_DOMAIN_OFFS
;
5118 mvpp2_write(priv
, MVPP22_AXI_BM_WR_ATTR_REG
, wrval
);
5119 mvpp2_write(priv
, MVPP22_AXI_BM_RD_ATTR_REG
, rdval
);
5122 mvpp2_write(priv
, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG
, rdval
);
5123 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG
, wrval
);
5124 mvpp2_write(priv
, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG
, rdval
);
5125 mvpp2_write(priv
, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG
, wrval
);
5128 mvpp2_write(priv
, MVPP22_AXI_TX_DATA_RD_ATTR_REG
, rdval
);
5129 mvpp2_write(priv
, MVPP22_AXI_RX_DATA_WR_ATTR_REG
, wrval
);
5131 val
= MVPP22_AXI_CODE_CACHE_NON_CACHE
5132 << MVPP22_AXI_CODE_CACHE_OFFS
;
5133 val
|= MVPP22_AXI_CODE_DOMAIN_SYSTEM
5134 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5135 mvpp2_write(priv
, MVPP22_AXI_RD_NORMAL_CODE_REG
, val
);
5136 mvpp2_write(priv
, MVPP22_AXI_WR_NORMAL_CODE_REG
, val
);
5138 val
= MVPP22_AXI_CODE_CACHE_RD_CACHE
5139 << MVPP22_AXI_CODE_CACHE_OFFS
;
5140 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5141 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5143 mvpp2_write(priv
, MVPP22_AXI_RD_SNOOP_CODE_REG
, val
);
5145 val
= MVPP22_AXI_CODE_CACHE_WR_CACHE
5146 << MVPP22_AXI_CODE_CACHE_OFFS
;
5147 val
|= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
5148 << MVPP22_AXI_CODE_DOMAIN_OFFS
;
5150 mvpp2_write(priv
, MVPP22_AXI_WR_SNOOP_CODE_REG
, val
);
5153 /* Initialize network controller common part HW */
5154 static int mvpp2_init(struct platform_device
*pdev
, struct mvpp2
*priv
)
5156 const struct mbus_dram_target_info
*dram_target_info
;
5160 /* MBUS windows configuration */
5161 dram_target_info
= mv_mbus_dram_info();
5162 if (dram_target_info
)
5163 mvpp2_conf_mbus_windows(dram_target_info
, priv
);
5165 if (priv
->hw_version
== MVPP22
)
5166 mvpp2_axi_init(priv
);
5168 /* Disable HW PHY polling */
5169 if (priv
->hw_version
== MVPP21
) {
5170 val
= readl(priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5171 val
|= MVPP2_PHY_AN_STOP_SMI0_MASK
;
5172 writel(val
, priv
->lms_base
+ MVPP2_PHY_AN_CFG0_REG
);
5174 val
= readl(priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5175 val
&= ~MVPP22_SMI_POLLING_EN
;
5176 writel(val
, priv
->iface_base
+ MVPP22_SMI_MISC_CFG_REG
);
5179 /* Allocate and initialize aggregated TXQs */
5180 priv
->aggr_txqs
= devm_kcalloc(&pdev
->dev
, MVPP2_MAX_THREADS
,
5181 sizeof(*priv
->aggr_txqs
),
5183 if (!priv
->aggr_txqs
)
5186 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
5187 priv
->aggr_txqs
[i
].id
= i
;
5188 priv
->aggr_txqs
[i
].size
= MVPP2_AGGR_TXQ_SIZE
;
5189 err
= mvpp2_aggr_txq_init(pdev
, &priv
->aggr_txqs
[i
], i
, priv
);
5195 if (priv
->hw_version
== MVPP21
) {
5196 mvpp2_rx_fifo_init(priv
);
5198 mvpp22_rx_fifo_init(priv
);
5199 mvpp22_tx_fifo_init(priv
);
5202 if (priv
->hw_version
== MVPP21
)
5203 writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT
,
5204 priv
->lms_base
+ MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG
);
5206 /* Allow cache snoop when transmiting packets */
5207 mvpp2_write(priv
, MVPP2_TX_SNOOP_REG
, 0x1);
5209 /* Buffer Manager initialization */
5210 err
= mvpp2_bm_init(pdev
, priv
);
5214 /* Parser default initialization */
5215 err
= mvpp2_prs_default_init(pdev
, priv
);
5219 /* Classifier default initialization */
5220 mvpp2_cls_init(priv
);
5225 static int mvpp2_probe(struct platform_device
*pdev
)
5227 const struct acpi_device_id
*acpi_id
;
5228 struct fwnode_handle
*fwnode
= pdev
->dev
.fwnode
;
5229 struct fwnode_handle
*port_fwnode
;
5231 struct resource
*res
;
5236 priv
= devm_kzalloc(&pdev
->dev
, sizeof(*priv
), GFP_KERNEL
);
5240 if (has_acpi_companion(&pdev
->dev
)) {
5241 acpi_id
= acpi_match_device(pdev
->dev
.driver
->acpi_match_table
,
5245 priv
->hw_version
= (unsigned long)acpi_id
->driver_data
;
5248 (unsigned long)of_device_get_match_data(&pdev
->dev
);
5251 /* multi queue mode isn't supported on PPV2.1, fallback to single
5254 if (priv
->hw_version
== MVPP21
)
5255 queue_mode
= MVPP2_QDIST_SINGLE_MODE
;
5257 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
5258 base
= devm_ioremap_resource(&pdev
->dev
, res
);
5260 return PTR_ERR(base
);
5262 if (priv
->hw_version
== MVPP21
) {
5263 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
5264 priv
->lms_base
= devm_ioremap_resource(&pdev
->dev
, res
);
5265 if (IS_ERR(priv
->lms_base
))
5266 return PTR_ERR(priv
->lms_base
);
5268 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
5269 if (has_acpi_companion(&pdev
->dev
)) {
5270 /* In case the MDIO memory region is declared in
5271 * the ACPI, it can already appear as 'in-use'
5272 * in the OS. Because it is overlapped by second
5273 * region of the network controller, make
5274 * sure it is released, before requesting it again.
5275 * The care is taken by mvpp2 driver to avoid
5276 * concurrent access to this memory region.
5278 release_resource(res
);
5280 priv
->iface_base
= devm_ioremap_resource(&pdev
->dev
, res
);
5281 if (IS_ERR(priv
->iface_base
))
5282 return PTR_ERR(priv
->iface_base
);
5285 if (priv
->hw_version
== MVPP22
&& dev_of_node(&pdev
->dev
)) {
5286 priv
->sysctrl_base
=
5287 syscon_regmap_lookup_by_phandle(pdev
->dev
.of_node
,
5288 "marvell,system-controller");
5289 if (IS_ERR(priv
->sysctrl_base
))
5290 /* The system controller regmap is optional for dt
5291 * compatibility reasons. When not provided, the
5292 * configuration of the GoP relies on the
5293 * firmware/bootloader.
5295 priv
->sysctrl_base
= NULL
;
5298 mvpp2_setup_bm_pool();
5301 priv
->nthreads
= min_t(unsigned int, num_present_cpus(),
5304 shared
= num_present_cpus() - priv
->nthreads
;
5306 bitmap_fill(&priv
->lock_map
,
5307 min_t(int, shared
, MVPP2_MAX_THREADS
));
5309 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
5312 addr_space_sz
= (priv
->hw_version
== MVPP21
?
5313 MVPP21_ADDR_SPACE_SZ
: MVPP22_ADDR_SPACE_SZ
);
5314 priv
->swth_base
[i
] = base
+ i
* addr_space_sz
;
5317 if (priv
->hw_version
== MVPP21
)
5318 priv
->max_port_rxqs
= 8;
5320 priv
->max_port_rxqs
= 32;
5322 if (dev_of_node(&pdev
->dev
)) {
5323 priv
->pp_clk
= devm_clk_get(&pdev
->dev
, "pp_clk");
5324 if (IS_ERR(priv
->pp_clk
))
5325 return PTR_ERR(priv
->pp_clk
);
5326 err
= clk_prepare_enable(priv
->pp_clk
);
5330 priv
->gop_clk
= devm_clk_get(&pdev
->dev
, "gop_clk");
5331 if (IS_ERR(priv
->gop_clk
)) {
5332 err
= PTR_ERR(priv
->gop_clk
);
5335 err
= clk_prepare_enable(priv
->gop_clk
);
5339 if (priv
->hw_version
== MVPP22
) {
5340 priv
->mg_clk
= devm_clk_get(&pdev
->dev
, "mg_clk");
5341 if (IS_ERR(priv
->mg_clk
)) {
5342 err
= PTR_ERR(priv
->mg_clk
);
5346 err
= clk_prepare_enable(priv
->mg_clk
);
5350 priv
->mg_core_clk
= devm_clk_get(&pdev
->dev
, "mg_core_clk");
5351 if (IS_ERR(priv
->mg_core_clk
)) {
5352 priv
->mg_core_clk
= NULL
;
5354 err
= clk_prepare_enable(priv
->mg_core_clk
);
5360 priv
->axi_clk
= devm_clk_get(&pdev
->dev
, "axi_clk");
5361 if (IS_ERR(priv
->axi_clk
)) {
5362 err
= PTR_ERR(priv
->axi_clk
);
5363 if (err
== -EPROBE_DEFER
)
5364 goto err_mg_core_clk
;
5365 priv
->axi_clk
= NULL
;
5367 err
= clk_prepare_enable(priv
->axi_clk
);
5369 goto err_mg_core_clk
;
5372 /* Get system's tclk rate */
5373 priv
->tclk
= clk_get_rate(priv
->pp_clk
);
5374 } else if (device_property_read_u32(&pdev
->dev
, "clock-frequency",
5376 dev_err(&pdev
->dev
, "missing clock-frequency value\n");
5380 if (priv
->hw_version
== MVPP22
) {
5381 err
= dma_set_mask(&pdev
->dev
, MVPP2_DESC_DMA_MASK
);
5384 /* Sadly, the BM pools all share the same register to
5385 * store the high 32 bits of their address. So they
5386 * must all have the same high 32 bits, which forces
5387 * us to restrict coherent memory to DMA_BIT_MASK(32).
5389 err
= dma_set_coherent_mask(&pdev
->dev
, DMA_BIT_MASK(32));
5394 /* Initialize network controller */
5395 err
= mvpp2_init(pdev
, priv
);
5397 dev_err(&pdev
->dev
, "failed to initialize controller\n");
5401 /* Initialize ports */
5402 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5403 err
= mvpp2_port_probe(pdev
, port_fwnode
, priv
);
5405 goto err_port_probe
;
5408 if (priv
->port_count
== 0) {
5409 dev_err(&pdev
->dev
, "no ports enabled\n");
5414 /* Statistics must be gathered regularly because some of them (like
5415 * packets counters) are 32-bit registers and could overflow quite
5416 * quickly. For instance, a 10Gb link used at full bandwidth with the
5417 * smallest packets (64B) will overflow a 32-bit counter in less than
5418 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
5420 snprintf(priv
->queue_name
, sizeof(priv
->queue_name
),
5421 "stats-wq-%s%s", netdev_name(priv
->port_list
[0]->dev
),
5422 priv
->port_count
> 1 ? "+" : "");
5423 priv
->stats_queue
= create_singlethread_workqueue(priv
->queue_name
);
5424 if (!priv
->stats_queue
) {
5426 goto err_port_probe
;
5429 mvpp2_dbgfs_init(priv
, pdev
->name
);
5431 platform_set_drvdata(pdev
, priv
);
5436 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5437 if (priv
->port_list
[i
])
5438 mvpp2_port_remove(priv
->port_list
[i
]);
5442 clk_disable_unprepare(priv
->axi_clk
);
5445 if (priv
->hw_version
== MVPP22
)
5446 clk_disable_unprepare(priv
->mg_core_clk
);
5448 if (priv
->hw_version
== MVPP22
)
5449 clk_disable_unprepare(priv
->mg_clk
);
5451 clk_disable_unprepare(priv
->gop_clk
);
5453 clk_disable_unprepare(priv
->pp_clk
);
5457 static int mvpp2_remove(struct platform_device
*pdev
)
5459 struct mvpp2
*priv
= platform_get_drvdata(pdev
);
5460 struct fwnode_handle
*fwnode
= pdev
->dev
.fwnode
;
5461 struct fwnode_handle
*port_fwnode
;
5464 mvpp2_dbgfs_cleanup(priv
);
5466 flush_workqueue(priv
->stats_queue
);
5467 destroy_workqueue(priv
->stats_queue
);
5469 fwnode_for_each_available_child_node(fwnode
, port_fwnode
) {
5470 if (priv
->port_list
[i
]) {
5471 mutex_destroy(&priv
->port_list
[i
]->gather_stats_lock
);
5472 mvpp2_port_remove(priv
->port_list
[i
]);
5477 for (i
= 0; i
< MVPP2_BM_POOLS_NUM
; i
++) {
5478 struct mvpp2_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
5480 mvpp2_bm_pool_destroy(pdev
, priv
, bm_pool
);
5483 for (i
= 0; i
< MVPP2_MAX_THREADS
; i
++) {
5484 struct mvpp2_tx_queue
*aggr_txq
= &priv
->aggr_txqs
[i
];
5486 dma_free_coherent(&pdev
->dev
,
5487 MVPP2_AGGR_TXQ_SIZE
* MVPP2_DESC_ALIGNED_SIZE
,
5489 aggr_txq
->descs_dma
);
5492 if (is_acpi_node(port_fwnode
))
5495 clk_disable_unprepare(priv
->axi_clk
);
5496 clk_disable_unprepare(priv
->mg_core_clk
);
5497 clk_disable_unprepare(priv
->mg_clk
);
5498 clk_disable_unprepare(priv
->pp_clk
);
5499 clk_disable_unprepare(priv
->gop_clk
);
5504 static const struct of_device_id mvpp2_match
[] = {
5506 .compatible
= "marvell,armada-375-pp2",
5507 .data
= (void *)MVPP21
,
5510 .compatible
= "marvell,armada-7k-pp22",
5511 .data
= (void *)MVPP22
,
5515 MODULE_DEVICE_TABLE(of
, mvpp2_match
);
5517 static const struct acpi_device_id mvpp2_acpi_match
[] = {
5518 { "MRVL0110", MVPP22
},
5521 MODULE_DEVICE_TABLE(acpi
, mvpp2_acpi_match
);
5523 static struct platform_driver mvpp2_driver
= {
5524 .probe
= mvpp2_probe
,
5525 .remove
= mvpp2_remove
,
5527 .name
= MVPP2_DRIVER_NAME
,
5528 .of_match_table
= mvpp2_match
,
5529 .acpi_match_table
= ACPI_PTR(mvpp2_acpi_match
),
5533 module_platform_driver(mvpp2_driver
);
5535 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
5536 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
5537 MODULE_LICENSE("GPL v2");