1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2020 NXP
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/fsl/ptp_qoriq.h>
18 #include <linux/ptp_classify.h>
19 #include <net/pkt_cls.h>
22 #include "dpaa2-eth.h"
24 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
25 * using trace events only need to #include <trace/events/sched.h>
27 #define CREATE_TRACE_POINTS
28 #include "dpaa2-eth-trace.h"
30 MODULE_LICENSE("Dual BSD/GPL");
31 MODULE_AUTHOR("Freescale Semiconductor, Inc");
32 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
34 struct ptp_qoriq
*dpaa2_ptp
;
35 EXPORT_SYMBOL(dpaa2_ptp
);
37 static void *dpaa2_iova_to_virt(struct iommu_domain
*domain
,
40 phys_addr_t phys_addr
;
42 phys_addr
= domain
? iommu_iova_to_phys(domain
, iova_addr
) : iova_addr
;
44 return phys_to_virt(phys_addr
);
47 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv
*priv
,
51 skb_checksum_none_assert(skb
);
53 /* HW checksum validation is disabled, nothing to do here */
54 if (!(priv
->net_dev
->features
& NETIF_F_RXCSUM
))
57 /* Read checksum validation bits */
58 if (!((fd_status
& DPAA2_FAS_L3CV
) &&
59 (fd_status
& DPAA2_FAS_L4CV
)))
62 /* Inform the stack there's no need to compute L3/L4 csum anymore */
63 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
66 /* Free a received FD.
67 * Not to be used for Tx conf FDs or on any other paths.
69 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv
*priv
,
70 const struct dpaa2_fd
*fd
,
73 struct device
*dev
= priv
->net_dev
->dev
.parent
;
74 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
75 u8 fd_format
= dpaa2_fd_get_format(fd
);
76 struct dpaa2_sg_entry
*sgt
;
80 /* If single buffer frame, just free the data buffer */
81 if (fd_format
== dpaa2_fd_single
)
83 else if (fd_format
!= dpaa2_fd_sg
)
84 /* We don't support any other format */
87 /* For S/G frames, we first need to free all SG entries
88 * except the first one, which was taken care of already
90 sgt
= vaddr
+ dpaa2_fd_get_offset(fd
);
91 for (i
= 1; i
< DPAA2_ETH_MAX_SG_ENTRIES
; i
++) {
92 addr
= dpaa2_sg_get_addr(&sgt
[i
]);
93 sg_vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
94 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
97 free_pages((unsigned long)sg_vaddr
, 0);
98 if (dpaa2_sg_is_final(&sgt
[i
]))
103 free_pages((unsigned long)vaddr
, 0);
106 /* Build a linear skb based on a single-buffer frame descriptor */
107 static struct sk_buff
*dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel
*ch
,
108 const struct dpaa2_fd
*fd
,
111 struct sk_buff
*skb
= NULL
;
112 u16 fd_offset
= dpaa2_fd_get_offset(fd
);
113 u32 fd_length
= dpaa2_fd_get_len(fd
);
117 skb
= build_skb(fd_vaddr
, DPAA2_ETH_RX_BUF_RAW_SIZE
);
121 skb_reserve(skb
, fd_offset
);
122 skb_put(skb
, fd_length
);
127 /* Build a non linear (fragmented) skb based on a S/G table */
128 static struct sk_buff
*dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv
*priv
,
129 struct dpaa2_eth_channel
*ch
,
130 struct dpaa2_sg_entry
*sgt
)
132 struct sk_buff
*skb
= NULL
;
133 struct device
*dev
= priv
->net_dev
->dev
.parent
;
138 struct page
*page
, *head_page
;
142 for (i
= 0; i
< DPAA2_ETH_MAX_SG_ENTRIES
; i
++) {
143 struct dpaa2_sg_entry
*sge
= &sgt
[i
];
145 /* NOTE: We only support SG entries in dpaa2_sg_single format,
146 * but this is the only format we may receive from HW anyway
149 /* Get the address and length from the S/G entry */
150 sg_addr
= dpaa2_sg_get_addr(sge
);
151 sg_vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, sg_addr
);
152 dma_unmap_page(dev
, sg_addr
, priv
->rx_buf_size
,
155 sg_length
= dpaa2_sg_get_len(sge
);
158 /* We build the skb around the first data buffer */
159 skb
= build_skb(sg_vaddr
, DPAA2_ETH_RX_BUF_RAW_SIZE
);
160 if (unlikely(!skb
)) {
161 /* Free the first SG entry now, since we already
162 * unmapped it and obtained the virtual address
164 free_pages((unsigned long)sg_vaddr
, 0);
166 /* We still need to subtract the buffers used
167 * by this FD from our software counter
169 while (!dpaa2_sg_is_final(&sgt
[i
]) &&
170 i
< DPAA2_ETH_MAX_SG_ENTRIES
)
175 sg_offset
= dpaa2_sg_get_offset(sge
);
176 skb_reserve(skb
, sg_offset
);
177 skb_put(skb
, sg_length
);
179 /* Rest of the data buffers are stored as skb frags */
180 page
= virt_to_page(sg_vaddr
);
181 head_page
= virt_to_head_page(sg_vaddr
);
183 /* Offset in page (which may be compound).
184 * Data in subsequent SG entries is stored from the
185 * beginning of the buffer, so we don't need to add the
188 page_offset
= ((unsigned long)sg_vaddr
&
190 (page_address(page
) - page_address(head_page
));
192 skb_add_rx_frag(skb
, i
- 1, head_page
, page_offset
,
193 sg_length
, priv
->rx_buf_size
);
196 if (dpaa2_sg_is_final(sge
))
200 WARN_ONCE(i
== DPAA2_ETH_MAX_SG_ENTRIES
, "Final bit not set in SGT");
202 /* Count all data buffers + SG table buffer */
203 ch
->buf_count
-= i
+ 2;
208 /* Free buffers acquired from the buffer pool or which were meant to
209 * be released in the pool
211 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv
*priv
, u64
*buf_array
,
214 struct device
*dev
= priv
->net_dev
->dev
.parent
;
218 for (i
= 0; i
< count
; i
++) {
219 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, buf_array
[i
]);
220 dma_unmap_page(dev
, buf_array
[i
], priv
->rx_buf_size
,
222 free_pages((unsigned long)vaddr
, 0);
226 static void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv
*priv
,
227 struct dpaa2_eth_channel
*ch
,
233 ch
->recycled_bufs
[ch
->recycled_bufs_cnt
++] = addr
;
234 if (ch
->recycled_bufs_cnt
< DPAA2_ETH_BUFS_PER_CMD
)
237 while ((err
= dpaa2_io_service_release(ch
->dpio
, priv
->bpid
,
239 ch
->recycled_bufs_cnt
)) == -EBUSY
) {
240 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
246 dpaa2_eth_free_bufs(priv
, ch
->recycled_bufs
, ch
->recycled_bufs_cnt
);
247 ch
->buf_count
-= ch
->recycled_bufs_cnt
;
250 ch
->recycled_bufs_cnt
= 0;
253 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv
*priv
,
254 struct dpaa2_eth_fq
*fq
,
255 struct dpaa2_eth_xdp_fds
*xdp_fds
)
257 int total_enqueued
= 0, retries
= 0, enqueued
;
258 struct dpaa2_eth_drv_stats
*percpu_extras
;
259 int num_fds
, err
, max_retries
;
260 struct dpaa2_fd
*fds
;
262 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
264 /* try to enqueue all the FDs until the max number of retries is hit */
266 num_fds
= xdp_fds
->num
;
267 max_retries
= num_fds
* DPAA2_ETH_ENQUEUE_RETRIES
;
268 while (total_enqueued
< num_fds
&& retries
< max_retries
) {
269 err
= priv
->enqueue(priv
, fq
, &fds
[total_enqueued
],
270 0, num_fds
- total_enqueued
, &enqueued
);
272 percpu_extras
->tx_portal_busy
+= ++retries
;
275 total_enqueued
+= enqueued
;
279 return total_enqueued
;
282 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv
*priv
,
283 struct dpaa2_eth_channel
*ch
,
284 struct dpaa2_eth_fq
*fq
)
286 struct rtnl_link_stats64
*percpu_stats
;
287 struct dpaa2_fd
*fds
;
290 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
292 // enqueue the array of XDP_TX frames
293 enqueued
= dpaa2_eth_xdp_flush(priv
, fq
, &fq
->xdp_tx_fds
);
295 /* update statistics */
296 percpu_stats
->tx_packets
+= enqueued
;
297 fds
= fq
->xdp_tx_fds
.fds
;
298 for (i
= 0; i
< enqueued
; i
++) {
299 percpu_stats
->tx_bytes
+= dpaa2_fd_get_len(&fds
[i
]);
302 for (i
= enqueued
; i
< fq
->xdp_tx_fds
.num
; i
++) {
303 dpaa2_eth_recycle_buf(priv
, ch
, dpaa2_fd_get_addr(&fds
[i
]));
304 percpu_stats
->tx_errors
++;
305 ch
->stats
.xdp_tx_err
++;
307 fq
->xdp_tx_fds
.num
= 0;
310 static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv
*priv
,
311 struct dpaa2_eth_channel
*ch
,
313 void *buf_start
, u16 queue_id
)
315 struct dpaa2_faead
*faead
;
316 struct dpaa2_fd
*dest_fd
;
317 struct dpaa2_eth_fq
*fq
;
320 /* Mark the egress frame hardware annotation area as valid */
321 frc
= dpaa2_fd_get_frc(fd
);
322 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FAEADV
);
323 dpaa2_fd_set_ctrl(fd
, DPAA2_FD_CTRL_ASAL
);
325 /* Instruct hardware to release the FD buffer directly into
326 * the buffer pool once transmission is completed, instead of
327 * sending a Tx confirmation frame to us
329 ctrl
= DPAA2_FAEAD_A4V
| DPAA2_FAEAD_A2V
| DPAA2_FAEAD_EBDDV
;
330 faead
= dpaa2_get_faead(buf_start
, false);
331 faead
->ctrl
= cpu_to_le32(ctrl
);
332 faead
->conf_fqid
= 0;
334 fq
= &priv
->fq
[queue_id
];
335 dest_fd
= &fq
->xdp_tx_fds
.fds
[fq
->xdp_tx_fds
.num
++];
336 memcpy(dest_fd
, fd
, sizeof(*dest_fd
));
338 if (fq
->xdp_tx_fds
.num
< DEV_MAP_BULK_SIZE
)
341 dpaa2_eth_xdp_tx_flush(priv
, ch
, fq
);
344 static u32
dpaa2_eth_run_xdp(struct dpaa2_eth_priv
*priv
,
345 struct dpaa2_eth_channel
*ch
,
346 struct dpaa2_eth_fq
*rx_fq
,
347 struct dpaa2_fd
*fd
, void *vaddr
)
349 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
350 struct bpf_prog
*xdp_prog
;
352 u32 xdp_act
= XDP_PASS
;
355 xdp_prog
= READ_ONCE(ch
->xdp
.prog
);
359 offset
= dpaa2_fd_get_offset(fd
) - XDP_PACKET_HEADROOM
;
360 xdp_init_buff(&xdp
, DPAA2_ETH_RX_BUF_RAW_SIZE
- offset
, &ch
->xdp_rxq
);
361 xdp_prepare_buff(&xdp
, vaddr
+ offset
, XDP_PACKET_HEADROOM
,
362 dpaa2_fd_get_len(fd
), false);
364 xdp_act
= bpf_prog_run_xdp(xdp_prog
, &xdp
);
366 /* xdp.data pointer may have changed */
367 dpaa2_fd_set_offset(fd
, xdp
.data
- vaddr
);
368 dpaa2_fd_set_len(fd
, xdp
.data_end
- xdp
.data
);
374 dpaa2_eth_xdp_enqueue(priv
, ch
, fd
, vaddr
, rx_fq
->flowid
);
377 bpf_warn_invalid_xdp_action(xdp_act
);
380 trace_xdp_exception(priv
->net_dev
, xdp_prog
, xdp_act
);
383 dpaa2_eth_recycle_buf(priv
, ch
, addr
);
384 ch
->stats
.xdp_drop
++;
387 dma_unmap_page(priv
->net_dev
->dev
.parent
, addr
,
388 priv
->rx_buf_size
, DMA_BIDIRECTIONAL
);
391 /* Allow redirect use of full headroom */
392 xdp
.data_hard_start
= vaddr
;
393 xdp
.frame_sz
= DPAA2_ETH_RX_BUF_RAW_SIZE
;
395 err
= xdp_do_redirect(priv
->net_dev
, &xdp
, xdp_prog
);
397 addr
= dma_map_page(priv
->net_dev
->dev
.parent
,
398 virt_to_page(vaddr
), 0,
399 priv
->rx_buf_size
, DMA_BIDIRECTIONAL
);
400 if (unlikely(dma_mapping_error(priv
->net_dev
->dev
.parent
, addr
))) {
401 free_pages((unsigned long)vaddr
, 0);
404 dpaa2_eth_recycle_buf(priv
, ch
, addr
);
406 ch
->stats
.xdp_drop
++;
408 ch
->stats
.xdp_redirect
++;
413 ch
->xdp
.res
|= xdp_act
;
418 static struct sk_buff
*dpaa2_eth_copybreak(struct dpaa2_eth_channel
*ch
,
419 const struct dpaa2_fd
*fd
,
422 u16 fd_offset
= dpaa2_fd_get_offset(fd
);
423 struct dpaa2_eth_priv
*priv
= ch
->priv
;
424 u32 fd_length
= dpaa2_fd_get_len(fd
);
425 struct sk_buff
*skb
= NULL
;
426 unsigned int skb_len
;
428 if (fd_length
> priv
->rx_copybreak
)
431 skb_len
= fd_length
+ dpaa2_eth_needed_headroom(NULL
);
433 skb
= napi_alloc_skb(&ch
->napi
, skb_len
);
437 skb_reserve(skb
, dpaa2_eth_needed_headroom(NULL
));
438 skb_put(skb
, fd_length
);
440 memcpy(skb
->data
, fd_vaddr
+ fd_offset
, fd_length
);
442 dpaa2_eth_recycle_buf(priv
, ch
, dpaa2_fd_get_addr(fd
));
447 /* Main Rx frame processing routine */
448 static void dpaa2_eth_rx(struct dpaa2_eth_priv
*priv
,
449 struct dpaa2_eth_channel
*ch
,
450 const struct dpaa2_fd
*fd
,
451 struct dpaa2_eth_fq
*fq
)
453 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
454 u8 fd_format
= dpaa2_fd_get_format(fd
);
457 struct rtnl_link_stats64
*percpu_stats
;
458 struct dpaa2_eth_drv_stats
*percpu_extras
;
459 struct device
*dev
= priv
->net_dev
->dev
.parent
;
460 struct dpaa2_fas
*fas
;
466 trace_dpaa2_rx_fd(priv
->net_dev
, fd
);
468 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
469 dma_sync_single_for_cpu(dev
, addr
, priv
->rx_buf_size
,
472 fas
= dpaa2_get_fas(vaddr
, false);
474 buf_data
= vaddr
+ dpaa2_fd_get_offset(fd
);
477 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
478 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
480 if (fd_format
== dpaa2_fd_single
) {
481 xdp_act
= dpaa2_eth_run_xdp(priv
, ch
, fq
, (struct dpaa2_fd
*)fd
, vaddr
);
482 if (xdp_act
!= XDP_PASS
) {
483 percpu_stats
->rx_packets
++;
484 percpu_stats
->rx_bytes
+= dpaa2_fd_get_len(fd
);
488 skb
= dpaa2_eth_copybreak(ch
, fd
, vaddr
);
490 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
492 skb
= dpaa2_eth_build_linear_skb(ch
, fd
, vaddr
);
494 } else if (fd_format
== dpaa2_fd_sg
) {
495 WARN_ON(priv
->xdp_prog
);
497 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
499 skb
= dpaa2_eth_build_frag_skb(priv
, ch
, buf_data
);
500 free_pages((unsigned long)vaddr
, 0);
501 percpu_extras
->rx_sg_frames
++;
502 percpu_extras
->rx_sg_bytes
+= dpaa2_fd_get_len(fd
);
504 /* We don't support any other format */
505 goto err_frame_format
;
513 /* Get the timestamp value */
514 if (priv
->rx_tstamp
) {
515 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
516 __le64
*ts
= dpaa2_get_ts(vaddr
, false);
519 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
521 ns
= DPAA2_PTP_CLK_PERIOD_NS
* le64_to_cpup(ts
);
522 shhwtstamps
->hwtstamp
= ns_to_ktime(ns
);
525 /* Check if we need to validate the L4 csum */
526 if (likely(dpaa2_fd_get_frc(fd
) & DPAA2_FD_FRC_FASV
)) {
527 status
= le32_to_cpu(fas
->status
);
528 dpaa2_eth_validate_rx_csum(priv
, status
, skb
);
531 skb
->protocol
= eth_type_trans(skb
, priv
->net_dev
);
532 skb_record_rx_queue(skb
, fq
->flowid
);
534 percpu_stats
->rx_packets
++;
535 percpu_stats
->rx_bytes
+= dpaa2_fd_get_len(fd
);
537 list_add_tail(&skb
->list
, ch
->rx_list
);
542 dpaa2_eth_free_rx_fd(priv
, fd
, vaddr
);
544 percpu_stats
->rx_dropped
++;
547 /* Processing of Rx frames received on the error FQ
548 * We check and print the error bits and then free the frame
550 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv
*priv
,
551 struct dpaa2_eth_channel
*ch
,
552 const struct dpaa2_fd
*fd
,
553 struct dpaa2_eth_fq
*fq __always_unused
)
555 struct device
*dev
= priv
->net_dev
->dev
.parent
;
556 dma_addr_t addr
= dpaa2_fd_get_addr(fd
);
557 u8 fd_format
= dpaa2_fd_get_format(fd
);
558 struct rtnl_link_stats64
*percpu_stats
;
559 struct dpaa2_eth_trap_item
*trap_item
;
560 struct dpaa2_fapr
*fapr
;
565 vaddr
= dpaa2_iova_to_virt(priv
->iommu_domain
, addr
);
566 dma_sync_single_for_cpu(dev
, addr
, priv
->rx_buf_size
,
569 buf_data
= vaddr
+ dpaa2_fd_get_offset(fd
);
571 if (fd_format
== dpaa2_fd_single
) {
572 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
574 skb
= dpaa2_eth_build_linear_skb(ch
, fd
, vaddr
);
575 } else if (fd_format
== dpaa2_fd_sg
) {
576 dma_unmap_page(dev
, addr
, priv
->rx_buf_size
,
578 skb
= dpaa2_eth_build_frag_skb(priv
, ch
, buf_data
);
579 free_pages((unsigned long)vaddr
, 0);
581 /* We don't support any other format */
582 dpaa2_eth_free_rx_fd(priv
, fd
, vaddr
);
583 goto err_frame_format
;
586 fapr
= dpaa2_get_fapr(vaddr
, false);
587 trap_item
= dpaa2_eth_dl_get_trap(priv
, fapr
);
589 devlink_trap_report(priv
->devlink
, skb
, trap_item
->trap_ctx
,
590 &priv
->devlink_port
, NULL
);
594 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
595 percpu_stats
->rx_errors
++;
599 /* Consume all frames pull-dequeued into the store. This is the simplest way to
600 * make sure we don't accidentally issue another volatile dequeue which would
601 * overwrite (leak) frames already in the store.
603 * Observance of NAPI budget is not our concern, leaving that to the caller.
605 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel
*ch
,
606 struct dpaa2_eth_fq
**src
)
608 struct dpaa2_eth_priv
*priv
= ch
->priv
;
609 struct dpaa2_eth_fq
*fq
= NULL
;
611 const struct dpaa2_fd
*fd
;
612 int cleaned
= 0, retries
= 0;
616 dq
= dpaa2_io_store_next(ch
->store
, &is_last
);
618 /* If we're here, we *must* have placed a
619 * volatile dequeue comnmand, so keep reading through
620 * the store until we get some sort of valid response
621 * token (either a valid frame or an "empty dequeue")
623 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
) {
624 netdev_err_once(priv
->net_dev
,
625 "Unable to read a valid dequeue response\n");
631 fd
= dpaa2_dq_fd(dq
);
632 fq
= (struct dpaa2_eth_fq
*)(uintptr_t)dpaa2_dq_fqd_ctx(dq
);
634 fq
->consume(priv
, ch
, fd
, fq
);
642 fq
->stats
.frames
+= cleaned
;
643 ch
->stats
.frames
+= cleaned
;
645 /* A dequeue operation only pulls frames from a single queue
646 * into the store. Return the frame queue as an out param.
654 static int dpaa2_eth_ptp_parse(struct sk_buff
*skb
,
655 u8
*msgtype
, u8
*twostep
, u8
*udp
,
656 u16
*correction_offset
,
657 u16
*origintimestamp_offset
)
659 unsigned int ptp_class
;
660 struct ptp_header
*hdr
;
664 ptp_class
= ptp_classify_raw(skb
);
665 if (ptp_class
== PTP_CLASS_NONE
)
668 hdr
= ptp_parse_header(skb
, ptp_class
);
672 *msgtype
= ptp_get_msgtype(hdr
, ptp_class
);
673 *twostep
= hdr
->flag_field
[0] & 0x2;
675 type
= ptp_class
& PTP_CLASS_PMASK
;
676 if (type
== PTP_CLASS_IPV4
||
677 type
== PTP_CLASS_IPV6
)
682 base
= skb_mac_header(skb
);
683 *correction_offset
= (u8
*)&hdr
->correction
- base
;
684 *origintimestamp_offset
= (u8
*)hdr
+ sizeof(struct ptp_header
) - base
;
689 /* Configure the egress frame annotation for timestamp update */
690 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv
*priv
,
695 struct ptp_tstamp origin_timestamp
;
696 struct dpni_single_step_cfg cfg
;
697 u8 msgtype
, twostep
, udp
;
698 struct dpaa2_faead
*faead
;
699 struct dpaa2_fas
*fas
;
700 struct timespec64 ts
;
701 u16 offset1
, offset2
;
706 /* Mark the egress frame annotation area as valid */
707 frc
= dpaa2_fd_get_frc(fd
);
708 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FAEADV
);
710 /* Set hardware annotation size */
711 ctrl
= dpaa2_fd_get_ctrl(fd
);
712 dpaa2_fd_set_ctrl(fd
, ctrl
| DPAA2_FD_CTRL_ASAL
);
714 /* enable UPD (update prepanded data) bit in FAEAD field of
715 * hardware frame annotation area
717 ctrl
= DPAA2_FAEAD_A2V
| DPAA2_FAEAD_UPDV
| DPAA2_FAEAD_UPD
;
718 faead
= dpaa2_get_faead(buf_start
, true);
719 faead
->ctrl
= cpu_to_le32(ctrl
);
721 if (skb
->cb
[0] == TX_TSTAMP_ONESTEP_SYNC
) {
722 if (dpaa2_eth_ptp_parse(skb
, &msgtype
, &twostep
, &udp
,
723 &offset1
, &offset2
) ||
724 msgtype
!= PTP_MSGTYPE_SYNC
|| twostep
) {
725 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
729 /* Mark the frame annotation status as valid */
730 frc
= dpaa2_fd_get_frc(fd
);
731 dpaa2_fd_set_frc(fd
, frc
| DPAA2_FD_FRC_FASV
);
733 /* Mark the PTP flag for one step timestamping */
734 fas
= dpaa2_get_fas(buf_start
, true);
735 fas
->status
= cpu_to_le32(DPAA2_FAS_PTP
);
737 dpaa2_ptp
->caps
.gettime64(&dpaa2_ptp
->caps
, &ts
);
738 ns
= dpaa2_get_ts(buf_start
, true);
739 *ns
= cpu_to_le64(timespec64_to_ns(&ts
) /
740 DPAA2_PTP_CLK_PERIOD_NS
);
742 /* Update current time to PTP message originTimestamp field */
743 ns_to_ptp_tstamp(&origin_timestamp
, le64_to_cpup(ns
));
744 data
= skb_mac_header(skb
);
745 *(__be16
*)(data
+ offset2
) = htons(origin_timestamp
.sec_msb
);
746 *(__be32
*)(data
+ offset2
+ 2) =
747 htonl(origin_timestamp
.sec_lsb
);
748 *(__be32
*)(data
+ offset2
+ 6) = htonl(origin_timestamp
.nsec
);
752 cfg
.offset
= offset1
;
755 if (dpni_set_single_step_cfg(priv
->mc_io
, 0, priv
->mc_token
,
757 WARN_ONCE(1, "Failed to set single step register");
761 /* Create a frame descriptor based on a fragmented skb */
762 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv
*priv
,
767 struct device
*dev
= priv
->net_dev
->dev
.parent
;
768 void *sgt_buf
= NULL
;
770 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
771 struct dpaa2_sg_entry
*sgt
;
774 struct scatterlist
*scl
, *crt_scl
;
777 struct dpaa2_eth_swa
*swa
;
779 /* Create and map scatterlist.
780 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
781 * to go beyond nr_frags+1.
782 * Note: We don't support chained scatterlists
784 if (unlikely(PAGE_SIZE
/ sizeof(struct scatterlist
) < nr_frags
+ 1))
787 scl
= kmalloc_array(nr_frags
+ 1, sizeof(struct scatterlist
), GFP_ATOMIC
);
791 sg_init_table(scl
, nr_frags
+ 1);
792 num_sg
= skb_to_sgvec(skb
, scl
, 0, skb
->len
);
793 if (unlikely(num_sg
< 0)) {
795 goto dma_map_sg_failed
;
797 num_dma_bufs
= dma_map_sg(dev
, scl
, num_sg
, DMA_BIDIRECTIONAL
);
798 if (unlikely(!num_dma_bufs
)) {
800 goto dma_map_sg_failed
;
803 /* Prepare the HW SGT structure */
804 sgt_buf_size
= priv
->tx_data_offset
+
805 sizeof(struct dpaa2_sg_entry
) * num_dma_bufs
;
806 sgt_buf
= napi_alloc_frag_align(sgt_buf_size
, DPAA2_ETH_TX_BUF_ALIGN
);
807 if (unlikely(!sgt_buf
)) {
809 goto sgt_buf_alloc_failed
;
811 memset(sgt_buf
, 0, sgt_buf_size
);
813 sgt
= (struct dpaa2_sg_entry
*)(sgt_buf
+ priv
->tx_data_offset
);
815 /* Fill in the HW SGT structure.
817 * sgt_buf is zeroed out, so the following fields are implicit
818 * in all sgt entries:
820 * - format is 'dpaa2_sg_single'
822 for_each_sg(scl
, crt_scl
, num_dma_bufs
, i
) {
823 dpaa2_sg_set_addr(&sgt
[i
], sg_dma_address(crt_scl
));
824 dpaa2_sg_set_len(&sgt
[i
], sg_dma_len(crt_scl
));
826 dpaa2_sg_set_final(&sgt
[i
- 1], true);
828 /* Store the skb backpointer in the SGT buffer.
829 * Fit the scatterlist and the number of buffers alongside the
830 * skb backpointer in the software annotation area. We'll need
831 * all of them on Tx Conf.
833 *swa_addr
= (void *)sgt_buf
;
834 swa
= (struct dpaa2_eth_swa
*)sgt_buf
;
835 swa
->type
= DPAA2_ETH_SWA_SG
;
838 swa
->sg
.num_sg
= num_sg
;
839 swa
->sg
.sgt_size
= sgt_buf_size
;
841 /* Separately map the SGT buffer */
842 addr
= dma_map_single(dev
, sgt_buf
, sgt_buf_size
, DMA_BIDIRECTIONAL
);
843 if (unlikely(dma_mapping_error(dev
, addr
))) {
845 goto dma_map_single_failed
;
847 dpaa2_fd_set_offset(fd
, priv
->tx_data_offset
);
848 dpaa2_fd_set_format(fd
, dpaa2_fd_sg
);
849 dpaa2_fd_set_addr(fd
, addr
);
850 dpaa2_fd_set_len(fd
, skb
->len
);
851 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
855 dma_map_single_failed
:
856 skb_free_frag(sgt_buf
);
857 sgt_buf_alloc_failed
:
858 dma_unmap_sg(dev
, scl
, num_sg
, DMA_BIDIRECTIONAL
);
864 /* Create a SG frame descriptor based on a linear skb.
866 * This function is used on the Tx path when the skb headroom is not large
867 * enough for the HW requirements, thus instead of realloc-ing the skb we
868 * create a SG frame descriptor with only one entry.
870 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv
*priv
,
875 struct device
*dev
= priv
->net_dev
->dev
.parent
;
876 struct dpaa2_eth_sgt_cache
*sgt_cache
;
877 struct dpaa2_sg_entry
*sgt
;
878 struct dpaa2_eth_swa
*swa
;
879 dma_addr_t addr
, sgt_addr
;
880 void *sgt_buf
= NULL
;
884 /* Prepare the HW SGT structure */
885 sgt_cache
= this_cpu_ptr(priv
->sgt_cache
);
886 sgt_buf_size
= priv
->tx_data_offset
+ sizeof(struct dpaa2_sg_entry
);
888 if (sgt_cache
->count
== 0)
889 sgt_buf
= kzalloc(sgt_buf_size
+ DPAA2_ETH_TX_BUF_ALIGN
,
892 sgt_buf
= sgt_cache
->buf
[--sgt_cache
->count
];
893 if (unlikely(!sgt_buf
))
896 sgt_buf
= PTR_ALIGN(sgt_buf
, DPAA2_ETH_TX_BUF_ALIGN
);
897 sgt
= (struct dpaa2_sg_entry
*)(sgt_buf
+ priv
->tx_data_offset
);
899 addr
= dma_map_single(dev
, skb
->data
, skb
->len
, DMA_BIDIRECTIONAL
);
900 if (unlikely(dma_mapping_error(dev
, addr
))) {
902 goto data_map_failed
;
905 /* Fill in the HW SGT structure */
906 dpaa2_sg_set_addr(sgt
, addr
);
907 dpaa2_sg_set_len(sgt
, skb
->len
);
908 dpaa2_sg_set_final(sgt
, true);
910 /* Store the skb backpointer in the SGT buffer */
911 *swa_addr
= (void *)sgt_buf
;
912 swa
= (struct dpaa2_eth_swa
*)sgt_buf
;
913 swa
->type
= DPAA2_ETH_SWA_SINGLE
;
914 swa
->single
.skb
= skb
;
915 swa
->single
.sgt_size
= sgt_buf_size
;
917 /* Separately map the SGT buffer */
918 sgt_addr
= dma_map_single(dev
, sgt_buf
, sgt_buf_size
, DMA_BIDIRECTIONAL
);
919 if (unlikely(dma_mapping_error(dev
, sgt_addr
))) {
924 dpaa2_fd_set_offset(fd
, priv
->tx_data_offset
);
925 dpaa2_fd_set_format(fd
, dpaa2_fd_sg
);
926 dpaa2_fd_set_addr(fd
, sgt_addr
);
927 dpaa2_fd_set_len(fd
, skb
->len
);
928 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
933 dma_unmap_single(dev
, addr
, skb
->len
, DMA_BIDIRECTIONAL
);
935 if (sgt_cache
->count
>= DPAA2_ETH_SGT_CACHE_SIZE
)
938 sgt_cache
->buf
[sgt_cache
->count
++] = sgt_buf
;
943 /* Create a frame descriptor based on a linear skb */
944 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv
*priv
,
949 struct device
*dev
= priv
->net_dev
->dev
.parent
;
950 u8
*buffer_start
, *aligned_start
;
951 struct dpaa2_eth_swa
*swa
;
954 buffer_start
= skb
->data
- dpaa2_eth_needed_headroom(skb
);
956 /* If there's enough room to align the FD address, do it.
957 * It will help hardware optimize accesses.
959 aligned_start
= PTR_ALIGN(buffer_start
- DPAA2_ETH_TX_BUF_ALIGN
,
960 DPAA2_ETH_TX_BUF_ALIGN
);
961 if (aligned_start
>= skb
->head
)
962 buffer_start
= aligned_start
;
964 /* Store a backpointer to the skb at the beginning of the buffer
965 * (in the private data area) such that we can release it
968 *swa_addr
= (void *)buffer_start
;
969 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
970 swa
->type
= DPAA2_ETH_SWA_SINGLE
;
971 swa
->single
.skb
= skb
;
973 addr
= dma_map_single(dev
, buffer_start
,
974 skb_tail_pointer(skb
) - buffer_start
,
976 if (unlikely(dma_mapping_error(dev
, addr
)))
979 dpaa2_fd_set_addr(fd
, addr
);
980 dpaa2_fd_set_offset(fd
, (u16
)(skb
->data
- buffer_start
));
981 dpaa2_fd_set_len(fd
, skb
->len
);
982 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
983 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
988 /* FD freeing routine on the Tx path
990 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
991 * back-pointed to is also freed.
992 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
995 static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv
*priv
,
996 struct dpaa2_eth_fq
*fq
,
997 const struct dpaa2_fd
*fd
, bool in_napi
)
999 struct device
*dev
= priv
->net_dev
->dev
.parent
;
1000 dma_addr_t fd_addr
, sg_addr
;
1001 struct sk_buff
*skb
= NULL
;
1002 unsigned char *buffer_start
;
1003 struct dpaa2_eth_swa
*swa
;
1004 u8 fd_format
= dpaa2_fd_get_format(fd
);
1005 u32 fd_len
= dpaa2_fd_get_len(fd
);
1007 struct dpaa2_eth_sgt_cache
*sgt_cache
;
1008 struct dpaa2_sg_entry
*sgt
;
1010 fd_addr
= dpaa2_fd_get_addr(fd
);
1011 buffer_start
= dpaa2_iova_to_virt(priv
->iommu_domain
, fd_addr
);
1012 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
1014 if (fd_format
== dpaa2_fd_single
) {
1015 if (swa
->type
== DPAA2_ETH_SWA_SINGLE
) {
1016 skb
= swa
->single
.skb
;
1017 /* Accessing the skb buffer is safe before dma unmap,
1018 * because we didn't map the actual skb shell.
1020 dma_unmap_single(dev
, fd_addr
,
1021 skb_tail_pointer(skb
) - buffer_start
,
1024 WARN_ONCE(swa
->type
!= DPAA2_ETH_SWA_XDP
, "Wrong SWA type");
1025 dma_unmap_single(dev
, fd_addr
, swa
->xdp
.dma_size
,
1028 } else if (fd_format
== dpaa2_fd_sg
) {
1029 if (swa
->type
== DPAA2_ETH_SWA_SG
) {
1032 /* Unmap the scatterlist */
1033 dma_unmap_sg(dev
, swa
->sg
.scl
, swa
->sg
.num_sg
,
1037 /* Unmap the SGT buffer */
1038 dma_unmap_single(dev
, fd_addr
, swa
->sg
.sgt_size
,
1041 skb
= swa
->single
.skb
;
1043 /* Unmap the SGT Buffer */
1044 dma_unmap_single(dev
, fd_addr
, swa
->single
.sgt_size
,
1047 sgt
= (struct dpaa2_sg_entry
*)(buffer_start
+
1048 priv
->tx_data_offset
);
1049 sg_addr
= dpaa2_sg_get_addr(sgt
);
1050 dma_unmap_single(dev
, sg_addr
, skb
->len
, DMA_BIDIRECTIONAL
);
1053 netdev_dbg(priv
->net_dev
, "Invalid FD format\n");
1057 if (swa
->type
!= DPAA2_ETH_SWA_XDP
&& in_napi
) {
1059 fq
->dq_bytes
+= fd_len
;
1062 if (swa
->type
== DPAA2_ETH_SWA_XDP
) {
1063 xdp_return_frame(swa
->xdp
.xdpf
);
1067 /* Get the timestamp value */
1068 if (skb
->cb
[0] == TX_TSTAMP
) {
1069 struct skb_shared_hwtstamps shhwtstamps
;
1070 __le64
*ts
= dpaa2_get_ts(buffer_start
, true);
1073 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
1075 ns
= DPAA2_PTP_CLK_PERIOD_NS
* le64_to_cpup(ts
);
1076 shhwtstamps
.hwtstamp
= ns_to_ktime(ns
);
1077 skb_tstamp_tx(skb
, &shhwtstamps
);
1078 } else if (skb
->cb
[0] == TX_TSTAMP_ONESTEP_SYNC
) {
1079 mutex_unlock(&priv
->onestep_tstamp_lock
);
1082 /* Free SGT buffer allocated on tx */
1083 if (fd_format
!= dpaa2_fd_single
) {
1084 sgt_cache
= this_cpu_ptr(priv
->sgt_cache
);
1085 if (swa
->type
== DPAA2_ETH_SWA_SG
) {
1086 skb_free_frag(buffer_start
);
1088 if (sgt_cache
->count
>= DPAA2_ETH_SGT_CACHE_SIZE
)
1089 kfree(buffer_start
);
1091 sgt_cache
->buf
[sgt_cache
->count
++] = buffer_start
;
1095 /* Move on with skb release */
1096 napi_consume_skb(skb
, in_napi
);
1099 static netdev_tx_t
__dpaa2_eth_tx(struct sk_buff
*skb
,
1100 struct net_device
*net_dev
)
1102 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1104 struct rtnl_link_stats64
*percpu_stats
;
1105 struct dpaa2_eth_drv_stats
*percpu_extras
;
1106 struct dpaa2_eth_fq
*fq
;
1107 struct netdev_queue
*nq
;
1109 unsigned int needed_headroom
;
1115 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
1116 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
1118 needed_headroom
= dpaa2_eth_needed_headroom(skb
);
1120 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1121 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1123 skb
= skb_unshare(skb
, GFP_ATOMIC
);
1124 if (unlikely(!skb
)) {
1125 /* skb_unshare() has already freed the skb */
1126 percpu_stats
->tx_dropped
++;
1127 return NETDEV_TX_OK
;
1130 /* Setup the FD fields */
1131 memset(&fd
, 0, sizeof(fd
));
1133 if (skb_is_nonlinear(skb
)) {
1134 err
= dpaa2_eth_build_sg_fd(priv
, skb
, &fd
, &swa
);
1135 percpu_extras
->tx_sg_frames
++;
1136 percpu_extras
->tx_sg_bytes
+= skb
->len
;
1137 } else if (skb_headroom(skb
) < needed_headroom
) {
1138 err
= dpaa2_eth_build_sg_fd_single_buf(priv
, skb
, &fd
, &swa
);
1139 percpu_extras
->tx_sg_frames
++;
1140 percpu_extras
->tx_sg_bytes
+= skb
->len
;
1141 percpu_extras
->tx_converted_sg_frames
++;
1142 percpu_extras
->tx_converted_sg_bytes
+= skb
->len
;
1144 err
= dpaa2_eth_build_single_fd(priv
, skb
, &fd
, &swa
);
1147 if (unlikely(err
)) {
1148 percpu_stats
->tx_dropped
++;
1153 dpaa2_eth_enable_tx_tstamp(priv
, &fd
, swa
, skb
);
1156 trace_dpaa2_tx_fd(net_dev
, &fd
);
1158 /* TxConf FQ selection relies on queue id from the stack.
1159 * In case of a forwarded frame from another DPNI interface, we choose
1160 * a queue affined to the same core that processed the Rx frame
1162 queue_mapping
= skb_get_queue_mapping(skb
);
1164 if (net_dev
->num_tc
) {
1165 prio
= netdev_txq_to_tc(net_dev
, queue_mapping
);
1166 /* Hardware interprets priority level 0 as being the highest,
1167 * so we need to do a reverse mapping to the netdev tc index
1169 prio
= net_dev
->num_tc
- prio
- 1;
1170 /* We have only one FQ array entry for all Tx hardware queues
1171 * with the same flow id (but different priority levels)
1173 queue_mapping
%= dpaa2_eth_queue_count(priv
);
1175 fq
= &priv
->fq
[queue_mapping
];
1177 fd_len
= dpaa2_fd_get_len(&fd
);
1178 nq
= netdev_get_tx_queue(net_dev
, queue_mapping
);
1179 netdev_tx_sent_queue(nq
, fd_len
);
1181 /* Everything that happens after this enqueues might race with
1182 * the Tx confirmation callback for this frame
1184 for (i
= 0; i
< DPAA2_ETH_ENQUEUE_RETRIES
; i
++) {
1185 err
= priv
->enqueue(priv
, fq
, &fd
, prio
, 1, NULL
);
1189 percpu_extras
->tx_portal_busy
+= i
;
1190 if (unlikely(err
< 0)) {
1191 percpu_stats
->tx_errors
++;
1192 /* Clean up everything, including freeing the skb */
1193 dpaa2_eth_free_tx_fd(priv
, fq
, &fd
, false);
1194 netdev_tx_completed_queue(nq
, 1, fd_len
);
1196 percpu_stats
->tx_packets
++;
1197 percpu_stats
->tx_bytes
+= fd_len
;
1200 return NETDEV_TX_OK
;
1205 return NETDEV_TX_OK
;
1208 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct
*work
)
1210 struct dpaa2_eth_priv
*priv
= container_of(work
, struct dpaa2_eth_priv
,
1212 struct sk_buff
*skb
;
1215 skb
= skb_dequeue(&priv
->tx_skbs
);
1219 /* Lock just before TX one-step timestamping packet,
1220 * and release the lock in dpaa2_eth_free_tx_fd when
1221 * confirm the packet has been sent on hardware, or
1222 * when clean up during transmit failure.
1224 mutex_lock(&priv
->onestep_tstamp_lock
);
1225 __dpaa2_eth_tx(skb
, priv
->net_dev
);
1229 static netdev_tx_t
dpaa2_eth_tx(struct sk_buff
*skb
, struct net_device
*net_dev
)
1231 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1232 u8 msgtype
, twostep
, udp
;
1233 u16 offset1
, offset2
;
1235 /* Utilize skb->cb[0] for timestamping request per skb */
1238 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) && dpaa2_ptp
) {
1239 if (priv
->tx_tstamp_type
== HWTSTAMP_TX_ON
)
1240 skb
->cb
[0] = TX_TSTAMP
;
1241 else if (priv
->tx_tstamp_type
== HWTSTAMP_TX_ONESTEP_SYNC
)
1242 skb
->cb
[0] = TX_TSTAMP_ONESTEP_SYNC
;
1245 /* TX for one-step timestamping PTP Sync packet */
1246 if (skb
->cb
[0] == TX_TSTAMP_ONESTEP_SYNC
) {
1247 if (!dpaa2_eth_ptp_parse(skb
, &msgtype
, &twostep
, &udp
,
1248 &offset1
, &offset2
))
1249 if (msgtype
== PTP_MSGTYPE_SYNC
&& twostep
== 0) {
1250 skb_queue_tail(&priv
->tx_skbs
, skb
);
1251 queue_work(priv
->dpaa2_ptp_wq
,
1252 &priv
->tx_onestep_tstamp
);
1253 return NETDEV_TX_OK
;
1255 /* Use two-step timestamping if not one-step timestamping
1258 skb
->cb
[0] = TX_TSTAMP
;
1261 /* TX for other packets */
1262 return __dpaa2_eth_tx(skb
, net_dev
);
1265 /* Tx confirmation frame processing routine */
1266 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv
*priv
,
1267 struct dpaa2_eth_channel
*ch __always_unused
,
1268 const struct dpaa2_fd
*fd
,
1269 struct dpaa2_eth_fq
*fq
)
1271 struct rtnl_link_stats64
*percpu_stats
;
1272 struct dpaa2_eth_drv_stats
*percpu_extras
;
1273 u32 fd_len
= dpaa2_fd_get_len(fd
);
1277 trace_dpaa2_tx_conf_fd(priv
->net_dev
, fd
);
1279 percpu_extras
= this_cpu_ptr(priv
->percpu_extras
);
1280 percpu_extras
->tx_conf_frames
++;
1281 percpu_extras
->tx_conf_bytes
+= fd_len
;
1283 /* Check frame errors in the FD field */
1284 fd_errors
= dpaa2_fd_get_ctrl(fd
) & DPAA2_FD_TX_ERR_MASK
;
1285 dpaa2_eth_free_tx_fd(priv
, fq
, fd
, true);
1287 if (likely(!fd_errors
))
1290 if (net_ratelimit())
1291 netdev_dbg(priv
->net_dev
, "TX frame FD error: 0x%08x\n",
1294 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
1295 /* Tx-conf logically pertains to the egress path. */
1296 percpu_stats
->tx_errors
++;
1299 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv
*priv
,
1304 err
= dpni_enable_vlan_filter(priv
->mc_io
, 0, priv
->mc_token
, enable
);
1307 netdev_err(priv
->net_dev
,
1308 "dpni_enable_vlan_filter failed\n");
1315 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv
*priv
, bool enable
)
1319 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1320 DPNI_OFF_RX_L3_CSUM
, enable
);
1322 netdev_err(priv
->net_dev
,
1323 "dpni_set_offload(RX_L3_CSUM) failed\n");
1327 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1328 DPNI_OFF_RX_L4_CSUM
, enable
);
1330 netdev_err(priv
->net_dev
,
1331 "dpni_set_offload(RX_L4_CSUM) failed\n");
1338 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv
*priv
, bool enable
)
1342 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1343 DPNI_OFF_TX_L3_CSUM
, enable
);
1345 netdev_err(priv
->net_dev
, "dpni_set_offload(TX_L3_CSUM) failed\n");
1349 err
= dpni_set_offload(priv
->mc_io
, 0, priv
->mc_token
,
1350 DPNI_OFF_TX_L4_CSUM
, enable
);
1352 netdev_err(priv
->net_dev
, "dpni_set_offload(TX_L4_CSUM) failed\n");
1359 /* Perform a single release command to add buffers
1360 * to the specified buffer pool
1362 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv
*priv
,
1363 struct dpaa2_eth_channel
*ch
, u16 bpid
)
1365 struct device
*dev
= priv
->net_dev
->dev
.parent
;
1366 u64 buf_array
[DPAA2_ETH_BUFS_PER_CMD
];
1372 for (i
= 0; i
< DPAA2_ETH_BUFS_PER_CMD
; i
++) {
1373 /* Allocate buffer visible to WRIOP + skb shared info +
1376 /* allocate one page for each Rx buffer. WRIOP sees
1377 * the entire page except for a tailroom reserved for
1380 page
= dev_alloc_pages(0);
1384 addr
= dma_map_page(dev
, page
, 0, priv
->rx_buf_size
,
1386 if (unlikely(dma_mapping_error(dev
, addr
)))
1389 buf_array
[i
] = addr
;
1392 trace_dpaa2_eth_buf_seed(priv
->net_dev
,
1393 page
, DPAA2_ETH_RX_BUF_RAW_SIZE
,
1394 addr
, priv
->rx_buf_size
,
1399 /* In case the portal is busy, retry until successful */
1400 while ((err
= dpaa2_io_service_release(ch
->dpio
, bpid
,
1401 buf_array
, i
)) == -EBUSY
) {
1402 if (retries
++ >= DPAA2_ETH_SWP_BUSY_RETRIES
)
1407 /* If release command failed, clean up and bail out;
1408 * not much else we can do about it
1411 dpaa2_eth_free_bufs(priv
, buf_array
, i
);
1418 __free_pages(page
, 0);
1420 /* If we managed to allocate at least some buffers,
1421 * release them to hardware
1429 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv
*priv
, u16 bpid
)
1434 for (j
= 0; j
< priv
->num_channels
; j
++) {
1435 for (i
= 0; i
< DPAA2_ETH_NUM_BUFS
;
1436 i
+= DPAA2_ETH_BUFS_PER_CMD
) {
1437 new_count
= dpaa2_eth_add_bufs(priv
, priv
->channel
[j
], bpid
);
1438 priv
->channel
[j
]->buf_count
+= new_count
;
1440 if (new_count
< DPAA2_ETH_BUFS_PER_CMD
) {
1450 * Drain the specified number of buffers from the DPNI's private buffer pool.
1451 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1453 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv
*priv
, int count
)
1455 u64 buf_array
[DPAA2_ETH_BUFS_PER_CMD
];
1460 ret
= dpaa2_io_service_acquire(NULL
, priv
->bpid
,
1463 if (ret
== -EBUSY
&&
1464 retries
++ < DPAA2_ETH_SWP_BUSY_RETRIES
)
1466 netdev_err(priv
->net_dev
, "dpaa2_io_service_acquire() failed\n");
1469 dpaa2_eth_free_bufs(priv
, buf_array
, ret
);
1474 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv
*priv
)
1478 dpaa2_eth_drain_bufs(priv
, DPAA2_ETH_BUFS_PER_CMD
);
1479 dpaa2_eth_drain_bufs(priv
, 1);
1481 for (i
= 0; i
< priv
->num_channels
; i
++)
1482 priv
->channel
[i
]->buf_count
= 0;
1485 /* Function is called from softirq context only, so we don't need to guard
1486 * the access to percpu count
1488 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv
*priv
,
1489 struct dpaa2_eth_channel
*ch
,
1494 if (likely(ch
->buf_count
>= DPAA2_ETH_REFILL_THRESH
))
1498 new_count
= dpaa2_eth_add_bufs(priv
, ch
, bpid
);
1499 if (unlikely(!new_count
)) {
1500 /* Out of memory; abort for now, we'll try later on */
1503 ch
->buf_count
+= new_count
;
1504 } while (ch
->buf_count
< DPAA2_ETH_NUM_BUFS
);
1506 if (unlikely(ch
->buf_count
< DPAA2_ETH_NUM_BUFS
))
1512 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv
*priv
)
1514 struct dpaa2_eth_sgt_cache
*sgt_cache
;
1518 for_each_possible_cpu(k
) {
1519 sgt_cache
= per_cpu_ptr(priv
->sgt_cache
, k
);
1520 count
= sgt_cache
->count
;
1522 for (i
= 0; i
< count
; i
++)
1523 kfree(sgt_cache
->buf
[i
]);
1524 sgt_cache
->count
= 0;
1528 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel
*ch
)
1533 /* Retry while portal is busy */
1535 err
= dpaa2_io_service_pull_channel(ch
->dpio
, ch
->ch_id
,
1539 } while (err
== -EBUSY
&& dequeues
< DPAA2_ETH_SWP_BUSY_RETRIES
);
1541 ch
->stats
.dequeue_portal_busy
+= dequeues
;
1543 ch
->stats
.pull_err
++;
1548 /* NAPI poll routine
1550 * Frames are dequeued from the QMan channel associated with this NAPI context.
1551 * Rx, Tx confirmation and (if configured) Rx error frames all count
1552 * towards the NAPI budget.
1554 static int dpaa2_eth_poll(struct napi_struct
*napi
, int budget
)
1556 struct dpaa2_eth_channel
*ch
;
1557 struct dpaa2_eth_priv
*priv
;
1558 int rx_cleaned
= 0, txconf_cleaned
= 0;
1559 struct dpaa2_eth_fq
*fq
, *txc_fq
= NULL
;
1560 struct netdev_queue
*nq
;
1561 int store_cleaned
, work_done
;
1562 struct list_head rx_list
;
1567 ch
= container_of(napi
, struct dpaa2_eth_channel
, napi
);
1571 INIT_LIST_HEAD(&rx_list
);
1572 ch
->rx_list
= &rx_list
;
1575 err
= dpaa2_eth_pull_channel(ch
);
1579 /* Refill pool if appropriate */
1580 dpaa2_eth_refill_pool(priv
, ch
, priv
->bpid
);
1582 store_cleaned
= dpaa2_eth_consume_frames(ch
, &fq
);
1583 if (store_cleaned
<= 0)
1585 if (fq
->type
== DPAA2_RX_FQ
) {
1586 rx_cleaned
+= store_cleaned
;
1587 flowid
= fq
->flowid
;
1589 txconf_cleaned
+= store_cleaned
;
1590 /* We have a single Tx conf FQ on this channel */
1594 /* If we either consumed the whole NAPI budget with Rx frames
1595 * or we reached the Tx confirmations threshold, we're done.
1597 if (rx_cleaned
>= budget
||
1598 txconf_cleaned
>= DPAA2_ETH_TXCONF_PER_NAPI
) {
1602 } while (store_cleaned
);
1604 /* We didn't consume the entire budget, so finish napi and
1605 * re-enable data availability notifications
1607 napi_complete_done(napi
, rx_cleaned
);
1609 err
= dpaa2_io_service_rearm(ch
->dpio
, &ch
->nctx
);
1611 } while (err
== -EBUSY
&& retries
++ < DPAA2_ETH_SWP_BUSY_RETRIES
);
1612 WARN_ONCE(err
, "CDAN notifications rearm failed on core %d",
1613 ch
->nctx
.desired_cpu
);
1615 work_done
= max(rx_cleaned
, 1);
1618 netif_receive_skb_list(ch
->rx_list
);
1620 if (txc_fq
&& txc_fq
->dq_frames
) {
1621 nq
= netdev_get_tx_queue(priv
->net_dev
, txc_fq
->flowid
);
1622 netdev_tx_completed_queue(nq
, txc_fq
->dq_frames
,
1624 txc_fq
->dq_frames
= 0;
1625 txc_fq
->dq_bytes
= 0;
1628 if (ch
->xdp
.res
& XDP_REDIRECT
)
1630 else if (rx_cleaned
&& ch
->xdp
.res
& XDP_TX
)
1631 dpaa2_eth_xdp_tx_flush(priv
, ch
, &priv
->fq
[flowid
]);
1636 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv
*priv
)
1638 struct dpaa2_eth_channel
*ch
;
1641 for (i
= 0; i
< priv
->num_channels
; i
++) {
1642 ch
= priv
->channel
[i
];
1643 napi_enable(&ch
->napi
);
1647 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv
*priv
)
1649 struct dpaa2_eth_channel
*ch
;
1652 for (i
= 0; i
< priv
->num_channels
; i
++) {
1653 ch
= priv
->channel
[i
];
1654 napi_disable(&ch
->napi
);
1658 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv
*priv
,
1659 bool tx_pause
, bool pfc
)
1661 struct dpni_taildrop td
= {0};
1662 struct dpaa2_eth_fq
*fq
;
1665 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1666 * flow control is disabled (as it might interfere with either the
1667 * buffer pool depletion trigger for pause frames or with the group
1668 * congestion trigger for PFC frames)
1670 td
.enable
= !tx_pause
;
1671 if (priv
->rx_fqtd_enabled
== td
.enable
)
1674 td
.threshold
= DPAA2_ETH_FQ_TAILDROP_THRESH
;
1675 td
.units
= DPNI_CONGESTION_UNIT_BYTES
;
1677 for (i
= 0; i
< priv
->num_fqs
; i
++) {
1679 if (fq
->type
!= DPAA2_RX_FQ
)
1681 err
= dpni_set_taildrop(priv
->mc_io
, 0, priv
->mc_token
,
1682 DPNI_CP_QUEUE
, DPNI_QUEUE_RX
,
1683 fq
->tc
, fq
->flowid
, &td
);
1685 netdev_err(priv
->net_dev
,
1686 "dpni_set_taildrop(FQ) failed\n");
1691 priv
->rx_fqtd_enabled
= td
.enable
;
1694 /* Congestion group taildrop: threshold is in frames, per group
1695 * of FQs belonging to the same traffic class
1696 * Enabled if general Tx pause disabled or if PFCs are enabled
1697 * (congestion group threhsold for PFC generation is lower than the
1698 * CG taildrop threshold, so it won't interfere with it; we also
1699 * want frames in non-PFC enabled traffic classes to be kept in check)
1701 td
.enable
= !tx_pause
|| pfc
;
1702 if (priv
->rx_cgtd_enabled
== td
.enable
)
1705 td
.threshold
= DPAA2_ETH_CG_TAILDROP_THRESH(priv
);
1706 td
.units
= DPNI_CONGESTION_UNIT_FRAMES
;
1707 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
1708 err
= dpni_set_taildrop(priv
->mc_io
, 0, priv
->mc_token
,
1709 DPNI_CP_GROUP
, DPNI_QUEUE_RX
,
1712 netdev_err(priv
->net_dev
,
1713 "dpni_set_taildrop(CG) failed\n");
1718 priv
->rx_cgtd_enabled
= td
.enable
;
1721 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv
*priv
)
1723 struct dpni_link_state state
= {0};
1727 err
= dpni_get_link_state(priv
->mc_io
, 0, priv
->mc_token
, &state
);
1728 if (unlikely(err
)) {
1729 netdev_err(priv
->net_dev
,
1730 "dpni_get_link_state() failed\n");
1734 /* If Tx pause frame settings have changed, we need to update
1735 * Rx FQ taildrop configuration as well. We configure taildrop
1736 * only when pause frame generation is disabled.
1738 tx_pause
= dpaa2_eth_tx_pause_enabled(state
.options
);
1739 dpaa2_eth_set_rx_taildrop(priv
, tx_pause
, priv
->pfc_enabled
);
1741 /* When we manage the MAC/PHY using phylink there is no need
1742 * to manually update the netif_carrier.
1744 if (dpaa2_eth_is_type_phy(priv
))
1747 /* Chech link state; speed / duplex changes are not treated yet */
1748 if (priv
->link_state
.up
== state
.up
)
1752 netif_carrier_on(priv
->net_dev
);
1753 netif_tx_start_all_queues(priv
->net_dev
);
1755 netif_tx_stop_all_queues(priv
->net_dev
);
1756 netif_carrier_off(priv
->net_dev
);
1759 netdev_info(priv
->net_dev
, "Link Event: state %s\n",
1760 state
.up
? "up" : "down");
1763 priv
->link_state
= state
;
1768 static int dpaa2_eth_open(struct net_device
*net_dev
)
1770 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1773 err
= dpaa2_eth_seed_pool(priv
, priv
->bpid
);
1775 /* Not much to do; the buffer pool, though not filled up,
1776 * may still contain some buffers which would enable us
1779 netdev_err(net_dev
, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1780 priv
->dpbp_dev
->obj_desc
.id
, priv
->bpid
);
1783 if (!dpaa2_eth_is_type_phy(priv
)) {
1784 /* We'll only start the txqs when the link is actually ready;
1785 * make sure we don't race against the link up notification,
1786 * which may come immediately after dpni_enable();
1788 netif_tx_stop_all_queues(net_dev
);
1790 /* Also, explicitly set carrier off, otherwise
1791 * netif_carrier_ok() will return true and cause 'ip link show'
1792 * to report the LOWER_UP flag, even though the link
1793 * notification wasn't even received.
1795 netif_carrier_off(net_dev
);
1797 dpaa2_eth_enable_ch_napi(priv
);
1799 err
= dpni_enable(priv
->mc_io
, 0, priv
->mc_token
);
1801 netdev_err(net_dev
, "dpni_enable() failed\n");
1805 if (dpaa2_eth_is_type_phy(priv
))
1806 phylink_start(priv
->mac
->phylink
);
1811 dpaa2_eth_disable_ch_napi(priv
);
1812 dpaa2_eth_drain_pool(priv
);
1816 /* Total number of in-flight frames on ingress queues */
1817 static u32
dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv
*priv
)
1819 struct dpaa2_eth_fq
*fq
;
1820 u32 fcnt
= 0, bcnt
= 0, total
= 0;
1823 for (i
= 0; i
< priv
->num_fqs
; i
++) {
1825 err
= dpaa2_io_query_fq_count(NULL
, fq
->fqid
, &fcnt
, &bcnt
);
1827 netdev_warn(priv
->net_dev
, "query_fq_count failed");
1836 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv
*priv
)
1842 pending
= dpaa2_eth_ingress_fq_count(priv
);
1845 } while (pending
&& --retries
);
1848 #define DPNI_TX_PENDING_VER_MAJOR 7
1849 #define DPNI_TX_PENDING_VER_MINOR 13
1850 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv
*priv
)
1852 union dpni_statistics stats
;
1856 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_TX_PENDING_VER_MAJOR
,
1857 DPNI_TX_PENDING_VER_MINOR
) < 0)
1861 err
= dpni_get_statistics(priv
->mc_io
, 0, priv
->mc_token
, 6,
1865 if (stats
.page_6
.tx_pending_frames
== 0)
1867 } while (--retries
);
1873 static int dpaa2_eth_stop(struct net_device
*net_dev
)
1875 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1876 int dpni_enabled
= 0;
1879 if (dpaa2_eth_is_type_phy(priv
)) {
1880 phylink_stop(priv
->mac
->phylink
);
1882 netif_tx_stop_all_queues(net_dev
);
1883 netif_carrier_off(net_dev
);
1886 /* On dpni_disable(), the MC firmware will:
1887 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1888 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1889 * of all in flight Tx frames is finished (and corresponding Tx conf
1890 * frames are enqueued back to software)
1892 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1893 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1894 * and Tx conf queues are consumed on NAPI poll.
1896 dpaa2_eth_wait_for_egress_fq_empty(priv
);
1899 dpni_disable(priv
->mc_io
, 0, priv
->mc_token
);
1900 dpni_is_enabled(priv
->mc_io
, 0, priv
->mc_token
, &dpni_enabled
);
1902 /* Allow the hardware some slack */
1904 } while (dpni_enabled
&& --retries
);
1906 netdev_warn(net_dev
, "Retry count exceeded disabling DPNI\n");
1907 /* Must go on and disable NAPI nonetheless, so we don't crash at
1908 * the next "ifconfig up"
1912 dpaa2_eth_wait_for_ingress_fq_empty(priv
);
1913 dpaa2_eth_disable_ch_napi(priv
);
1915 /* Empty the buffer pool */
1916 dpaa2_eth_drain_pool(priv
);
1918 /* Empty the Scatter-Gather Buffer cache */
1919 dpaa2_eth_sgt_cache_drain(priv
);
1924 static int dpaa2_eth_set_addr(struct net_device
*net_dev
, void *addr
)
1926 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1927 struct device
*dev
= net_dev
->dev
.parent
;
1930 err
= eth_mac_addr(net_dev
, addr
);
1932 dev_err(dev
, "eth_mac_addr() failed (%d)\n", err
);
1936 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1939 dev_err(dev
, "dpni_set_primary_mac_addr() failed (%d)\n", err
);
1946 /** Fill in counters maintained by the GPP driver. These may be different from
1947 * the hardware counters obtained by ethtool.
1949 static void dpaa2_eth_get_stats(struct net_device
*net_dev
,
1950 struct rtnl_link_stats64
*stats
)
1952 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
1953 struct rtnl_link_stats64
*percpu_stats
;
1955 u64
*netstats
= (u64
*)stats
;
1957 int num
= sizeof(struct rtnl_link_stats64
) / sizeof(u64
);
1959 for_each_possible_cpu(i
) {
1960 percpu_stats
= per_cpu_ptr(priv
->percpu_stats
, i
);
1961 cpustats
= (u64
*)percpu_stats
;
1962 for (j
= 0; j
< num
; j
++)
1963 netstats
[j
] += cpustats
[j
];
1967 /* Copy mac unicast addresses from @net_dev to @priv.
1968 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1970 static void dpaa2_eth_add_uc_hw_addr(const struct net_device
*net_dev
,
1971 struct dpaa2_eth_priv
*priv
)
1973 struct netdev_hw_addr
*ha
;
1976 netdev_for_each_uc_addr(ha
, net_dev
) {
1977 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1980 netdev_warn(priv
->net_dev
,
1981 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1986 /* Copy mac multicast addresses from @net_dev to @priv
1987 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1989 static void dpaa2_eth_add_mc_hw_addr(const struct net_device
*net_dev
,
1990 struct dpaa2_eth_priv
*priv
)
1992 struct netdev_hw_addr
*ha
;
1995 netdev_for_each_mc_addr(ha
, net_dev
) {
1996 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
1999 netdev_warn(priv
->net_dev
,
2000 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2005 static int dpaa2_eth_rx_add_vid(struct net_device
*net_dev
,
2006 __be16 vlan_proto
, u16 vid
)
2008 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2011 err
= dpni_add_vlan_id(priv
->mc_io
, 0, priv
->mc_token
,
2015 netdev_warn(priv
->net_dev
,
2016 "Could not add the vlan id %u\n",
2024 static int dpaa2_eth_rx_kill_vid(struct net_device
*net_dev
,
2025 __be16 vlan_proto
, u16 vid
)
2027 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2030 err
= dpni_remove_vlan_id(priv
->mc_io
, 0, priv
->mc_token
, vid
);
2033 netdev_warn(priv
->net_dev
,
2034 "Could not remove the vlan id %u\n",
2042 static void dpaa2_eth_set_rx_mode(struct net_device
*net_dev
)
2044 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2045 int uc_count
= netdev_uc_count(net_dev
);
2046 int mc_count
= netdev_mc_count(net_dev
);
2047 u8 max_mac
= priv
->dpni_attrs
.mac_filter_entries
;
2048 u32 options
= priv
->dpni_attrs
.options
;
2049 u16 mc_token
= priv
->mc_token
;
2050 struct fsl_mc_io
*mc_io
= priv
->mc_io
;
2053 /* Basic sanity checks; these probably indicate a misconfiguration */
2054 if (options
& DPNI_OPT_NO_MAC_FILTER
&& max_mac
!= 0)
2055 netdev_info(net_dev
,
2056 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2059 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2060 if (uc_count
> max_mac
) {
2061 netdev_info(net_dev
,
2062 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2066 if (mc_count
+ uc_count
> max_mac
) {
2067 netdev_info(net_dev
,
2068 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2069 uc_count
+ mc_count
, max_mac
);
2070 goto force_mc_promisc
;
2073 /* Adjust promisc settings due to flag combinations */
2074 if (net_dev
->flags
& IFF_PROMISC
)
2076 if (net_dev
->flags
& IFF_ALLMULTI
) {
2077 /* First, rebuild unicast filtering table. This should be done
2078 * in promisc mode, in order to avoid frame loss while we
2079 * progressively add entries to the table.
2080 * We don't know whether we had been in promisc already, and
2081 * making an MC call to find out is expensive; so set uc promisc
2084 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
2086 netdev_warn(net_dev
, "Can't set uc promisc\n");
2088 /* Actual uc table reconstruction. */
2089 err
= dpni_clear_mac_filters(mc_io
, 0, mc_token
, 1, 0);
2091 netdev_warn(net_dev
, "Can't clear uc filters\n");
2092 dpaa2_eth_add_uc_hw_addr(net_dev
, priv
);
2094 /* Finally, clear uc promisc and set mc promisc as requested. */
2095 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 0);
2097 netdev_warn(net_dev
, "Can't clear uc promisc\n");
2098 goto force_mc_promisc
;
2101 /* Neither unicast, nor multicast promisc will be on... eventually.
2102 * For now, rebuild mac filtering tables while forcing both of them on.
2104 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
2106 netdev_warn(net_dev
, "Can't set uc promisc (%d)\n", err
);
2107 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 1);
2109 netdev_warn(net_dev
, "Can't set mc promisc (%d)\n", err
);
2111 /* Actual mac filtering tables reconstruction */
2112 err
= dpni_clear_mac_filters(mc_io
, 0, mc_token
, 1, 1);
2114 netdev_warn(net_dev
, "Can't clear mac filters\n");
2115 dpaa2_eth_add_mc_hw_addr(net_dev
, priv
);
2116 dpaa2_eth_add_uc_hw_addr(net_dev
, priv
);
2118 /* Now we can clear both ucast and mcast promisc, without risking
2119 * to drop legitimate frames anymore.
2121 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 0);
2123 netdev_warn(net_dev
, "Can't clear ucast promisc\n");
2124 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 0);
2126 netdev_warn(net_dev
, "Can't clear mcast promisc\n");
2131 err
= dpni_set_unicast_promisc(mc_io
, 0, mc_token
, 1);
2133 netdev_warn(net_dev
, "Can't set ucast promisc\n");
2135 err
= dpni_set_multicast_promisc(mc_io
, 0, mc_token
, 1);
2137 netdev_warn(net_dev
, "Can't set mcast promisc\n");
2140 static int dpaa2_eth_set_features(struct net_device
*net_dev
,
2141 netdev_features_t features
)
2143 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2144 netdev_features_t changed
= features
^ net_dev
->features
;
2148 if (changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
2149 enable
= !!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
);
2150 err
= dpaa2_eth_set_rx_vlan_filtering(priv
, enable
);
2155 if (changed
& NETIF_F_RXCSUM
) {
2156 enable
= !!(features
& NETIF_F_RXCSUM
);
2157 err
= dpaa2_eth_set_rx_csum(priv
, enable
);
2162 if (changed
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)) {
2163 enable
= !!(features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
));
2164 err
= dpaa2_eth_set_tx_csum(priv
, enable
);
2172 static int dpaa2_eth_ts_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2174 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2175 struct hwtstamp_config config
;
2180 if (copy_from_user(&config
, rq
->ifr_data
, sizeof(config
)))
2183 switch (config
.tx_type
) {
2184 case HWTSTAMP_TX_OFF
:
2185 case HWTSTAMP_TX_ON
:
2186 case HWTSTAMP_TX_ONESTEP_SYNC
:
2187 priv
->tx_tstamp_type
= config
.tx_type
;
2193 if (config
.rx_filter
== HWTSTAMP_FILTER_NONE
) {
2194 priv
->rx_tstamp
= false;
2196 priv
->rx_tstamp
= true;
2197 /* TS is set for all frame types, not only those requested */
2198 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2201 return copy_to_user(rq
->ifr_data
, &config
, sizeof(config
)) ?
2205 static int dpaa2_eth_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
2207 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2209 if (cmd
== SIOCSHWTSTAMP
)
2210 return dpaa2_eth_ts_ioctl(dev
, rq
, cmd
);
2212 if (dpaa2_eth_is_type_phy(priv
))
2213 return phylink_mii_ioctl(priv
->mac
->phylink
, rq
, cmd
);
2218 static bool xdp_mtu_valid(struct dpaa2_eth_priv
*priv
, int mtu
)
2220 int mfl
, linear_mfl
;
2222 mfl
= DPAA2_ETH_L2_MAX_FRM(mtu
);
2223 linear_mfl
= priv
->rx_buf_size
- DPAA2_ETH_RX_HWA_SIZE
-
2224 dpaa2_eth_rx_head_room(priv
) - XDP_PACKET_HEADROOM
;
2226 if (mfl
> linear_mfl
) {
2227 netdev_warn(priv
->net_dev
, "Maximum MTU for XDP is %d\n",
2228 linear_mfl
- VLAN_ETH_HLEN
);
2235 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv
*priv
, int mtu
, bool has_xdp
)
2239 /* We enforce a maximum Rx frame length based on MTU only if we have
2240 * an XDP program attached (in order to avoid Rx S/G frames).
2241 * Otherwise, we accept all incoming frames as long as they are not
2242 * larger than maximum size supported in hardware
2245 mfl
= DPAA2_ETH_L2_MAX_FRM(mtu
);
2247 mfl
= DPAA2_ETH_MFL
;
2249 err
= dpni_set_max_frame_length(priv
->mc_io
, 0, priv
->mc_token
, mfl
);
2251 netdev_err(priv
->net_dev
, "dpni_set_max_frame_length failed\n");
2258 static int dpaa2_eth_change_mtu(struct net_device
*dev
, int new_mtu
)
2260 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2263 if (!priv
->xdp_prog
)
2266 if (!xdp_mtu_valid(priv
, new_mtu
))
2269 err
= dpaa2_eth_set_rx_mfl(priv
, new_mtu
, true);
2278 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv
*priv
, bool has_xdp
)
2280 struct dpni_buffer_layout buf_layout
= {0};
2283 err
= dpni_get_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2284 DPNI_QUEUE_RX
, &buf_layout
);
2286 netdev_err(priv
->net_dev
, "dpni_get_buffer_layout failed\n");
2290 /* Reserve extra headroom for XDP header size changes */
2291 buf_layout
.data_head_room
= dpaa2_eth_rx_head_room(priv
) +
2292 (has_xdp
? XDP_PACKET_HEADROOM
: 0);
2293 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM
;
2294 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
2295 DPNI_QUEUE_RX
, &buf_layout
);
2297 netdev_err(priv
->net_dev
, "dpni_set_buffer_layout failed\n");
2304 static int dpaa2_eth_setup_xdp(struct net_device
*dev
, struct bpf_prog
*prog
)
2306 struct dpaa2_eth_priv
*priv
= netdev_priv(dev
);
2307 struct dpaa2_eth_channel
*ch
;
2308 struct bpf_prog
*old
;
2309 bool up
, need_update
;
2312 if (prog
&& !xdp_mtu_valid(priv
, dev
->mtu
))
2316 bpf_prog_add(prog
, priv
->num_channels
);
2318 up
= netif_running(dev
);
2319 need_update
= (!!priv
->xdp_prog
!= !!prog
);
2322 dpaa2_eth_stop(dev
);
2324 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2325 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2326 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2327 * so we are sure no old format buffers will be used from now on.
2330 err
= dpaa2_eth_set_rx_mfl(priv
, dev
->mtu
, !!prog
);
2333 err
= dpaa2_eth_update_rx_buffer_headroom(priv
, !!prog
);
2338 old
= xchg(&priv
->xdp_prog
, prog
);
2342 for (i
= 0; i
< priv
->num_channels
; i
++) {
2343 ch
= priv
->channel
[i
];
2344 old
= xchg(&ch
->xdp
.prog
, prog
);
2350 err
= dpaa2_eth_open(dev
);
2359 bpf_prog_sub(prog
, priv
->num_channels
);
2361 dpaa2_eth_open(dev
);
2366 static int dpaa2_eth_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
2368 switch (xdp
->command
) {
2369 case XDP_SETUP_PROG
:
2370 return dpaa2_eth_setup_xdp(dev
, xdp
->prog
);
2378 static int dpaa2_eth_xdp_create_fd(struct net_device
*net_dev
,
2379 struct xdp_frame
*xdpf
,
2380 struct dpaa2_fd
*fd
)
2382 struct device
*dev
= net_dev
->dev
.parent
;
2383 unsigned int needed_headroom
;
2384 struct dpaa2_eth_swa
*swa
;
2385 void *buffer_start
, *aligned_start
;
2388 /* We require a minimum headroom to be able to transmit the frame.
2389 * Otherwise return an error and let the original net_device handle it
2391 needed_headroom
= dpaa2_eth_needed_headroom(NULL
);
2392 if (xdpf
->headroom
< needed_headroom
)
2395 /* Setup the FD fields */
2396 memset(fd
, 0, sizeof(*fd
));
2398 /* Align FD address, if possible */
2399 buffer_start
= xdpf
->data
- needed_headroom
;
2400 aligned_start
= PTR_ALIGN(buffer_start
- DPAA2_ETH_TX_BUF_ALIGN
,
2401 DPAA2_ETH_TX_BUF_ALIGN
);
2402 if (aligned_start
>= xdpf
->data
- xdpf
->headroom
)
2403 buffer_start
= aligned_start
;
2405 swa
= (struct dpaa2_eth_swa
*)buffer_start
;
2406 /* fill in necessary fields here */
2407 swa
->type
= DPAA2_ETH_SWA_XDP
;
2408 swa
->xdp
.dma_size
= xdpf
->data
+ xdpf
->len
- buffer_start
;
2409 swa
->xdp
.xdpf
= xdpf
;
2411 addr
= dma_map_single(dev
, buffer_start
,
2414 if (unlikely(dma_mapping_error(dev
, addr
)))
2417 dpaa2_fd_set_addr(fd
, addr
);
2418 dpaa2_fd_set_offset(fd
, xdpf
->data
- buffer_start
);
2419 dpaa2_fd_set_len(fd
, xdpf
->len
);
2420 dpaa2_fd_set_format(fd
, dpaa2_fd_single
);
2421 dpaa2_fd_set_ctrl(fd
, FD_CTRL_PTA
);
2426 static int dpaa2_eth_xdp_xmit(struct net_device
*net_dev
, int n
,
2427 struct xdp_frame
**frames
, u32 flags
)
2429 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2430 struct dpaa2_eth_xdp_fds
*xdp_redirect_fds
;
2431 struct rtnl_link_stats64
*percpu_stats
;
2432 struct dpaa2_eth_fq
*fq
;
2433 struct dpaa2_fd
*fds
;
2434 int enqueued
, i
, err
;
2436 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
2439 if (!netif_running(net_dev
))
2442 fq
= &priv
->fq
[smp_processor_id()];
2443 xdp_redirect_fds
= &fq
->xdp_redirect_fds
;
2444 fds
= xdp_redirect_fds
->fds
;
2446 percpu_stats
= this_cpu_ptr(priv
->percpu_stats
);
2448 /* create a FD for each xdp_frame in the list received */
2449 for (i
= 0; i
< n
; i
++) {
2450 err
= dpaa2_eth_xdp_create_fd(net_dev
, frames
[i
], &fds
[i
]);
2454 xdp_redirect_fds
->num
= i
;
2456 /* enqueue all the frame descriptors */
2457 enqueued
= dpaa2_eth_xdp_flush(priv
, fq
, xdp_redirect_fds
);
2459 /* update statistics */
2460 percpu_stats
->tx_packets
+= enqueued
;
2461 for (i
= 0; i
< enqueued
; i
++)
2462 percpu_stats
->tx_bytes
+= dpaa2_fd_get_len(&fds
[i
]);
2467 static int update_xps(struct dpaa2_eth_priv
*priv
)
2469 struct net_device
*net_dev
= priv
->net_dev
;
2470 struct cpumask xps_mask
;
2471 struct dpaa2_eth_fq
*fq
;
2472 int i
, num_queues
, netdev_queues
;
2475 num_queues
= dpaa2_eth_queue_count(priv
);
2476 netdev_queues
= (net_dev
->num_tc
? : 1) * num_queues
;
2478 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2479 * queues, so only process those
2481 for (i
= 0; i
< netdev_queues
; i
++) {
2482 fq
= &priv
->fq
[i
% num_queues
];
2484 cpumask_clear(&xps_mask
);
2485 cpumask_set_cpu(fq
->target_cpu
, &xps_mask
);
2487 err
= netif_set_xps_queue(net_dev
, &xps_mask
, i
);
2489 netdev_warn_once(net_dev
, "Error setting XPS queue\n");
2497 static int dpaa2_eth_setup_mqprio(struct net_device
*net_dev
,
2498 struct tc_mqprio_qopt
*mqprio
)
2500 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2501 u8 num_tc
, num_queues
;
2504 mqprio
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
2505 num_queues
= dpaa2_eth_queue_count(priv
);
2506 num_tc
= mqprio
->num_tc
;
2508 if (num_tc
== net_dev
->num_tc
)
2511 if (num_tc
> dpaa2_eth_tc_count(priv
)) {
2512 netdev_err(net_dev
, "Max %d traffic classes supported\n",
2513 dpaa2_eth_tc_count(priv
));
2518 netdev_reset_tc(net_dev
);
2519 netif_set_real_num_tx_queues(net_dev
, num_queues
);
2523 netdev_set_num_tc(net_dev
, num_tc
);
2524 netif_set_real_num_tx_queues(net_dev
, num_tc
* num_queues
);
2526 for (i
= 0; i
< num_tc
; i
++)
2527 netdev_set_tc_queue(net_dev
, i
, num_queues
, i
* num_queues
);
2535 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2537 static int dpaa2_eth_setup_tbf(struct net_device
*net_dev
, struct tc_tbf_qopt_offload
*p
)
2539 struct tc_tbf_qopt_offload_replace_params
*cfg
= &p
->replace_params
;
2540 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
2541 struct dpni_tx_shaping_cfg tx_cr_shaper
= { 0 };
2542 struct dpni_tx_shaping_cfg tx_er_shaper
= { 0 };
2545 if (p
->command
== TC_TBF_STATS
)
2548 /* Only per port Tx shaping */
2549 if (p
->parent
!= TC_H_ROOT
)
2552 if (p
->command
== TC_TBF_REPLACE
) {
2553 if (cfg
->max_size
> DPAA2_ETH_MAX_BURST_SIZE
) {
2554 netdev_err(net_dev
, "burst size cannot be greater than %d\n",
2555 DPAA2_ETH_MAX_BURST_SIZE
);
2559 tx_cr_shaper
.max_burst_size
= cfg
->max_size
;
2560 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2563 tx_cr_shaper
.rate_limit
= bps_to_mbits(cfg
->rate
.rate_bytes_ps
);
2566 err
= dpni_set_tx_shaping(priv
->mc_io
, 0, priv
->mc_token
, &tx_cr_shaper
,
2569 netdev_err(net_dev
, "dpni_set_tx_shaping() = %d\n", err
);
2576 static int dpaa2_eth_setup_tc(struct net_device
*net_dev
,
2577 enum tc_setup_type type
, void *type_data
)
2580 case TC_SETUP_QDISC_MQPRIO
:
2581 return dpaa2_eth_setup_mqprio(net_dev
, type_data
);
2582 case TC_SETUP_QDISC_TBF
:
2583 return dpaa2_eth_setup_tbf(net_dev
, type_data
);
2589 static const struct net_device_ops dpaa2_eth_ops
= {
2590 .ndo_open
= dpaa2_eth_open
,
2591 .ndo_start_xmit
= dpaa2_eth_tx
,
2592 .ndo_stop
= dpaa2_eth_stop
,
2593 .ndo_set_mac_address
= dpaa2_eth_set_addr
,
2594 .ndo_get_stats64
= dpaa2_eth_get_stats
,
2595 .ndo_set_rx_mode
= dpaa2_eth_set_rx_mode
,
2596 .ndo_set_features
= dpaa2_eth_set_features
,
2597 .ndo_eth_ioctl
= dpaa2_eth_ioctl
,
2598 .ndo_change_mtu
= dpaa2_eth_change_mtu
,
2599 .ndo_bpf
= dpaa2_eth_xdp
,
2600 .ndo_xdp_xmit
= dpaa2_eth_xdp_xmit
,
2601 .ndo_setup_tc
= dpaa2_eth_setup_tc
,
2602 .ndo_vlan_rx_add_vid
= dpaa2_eth_rx_add_vid
,
2603 .ndo_vlan_rx_kill_vid
= dpaa2_eth_rx_kill_vid
2606 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx
*ctx
)
2608 struct dpaa2_eth_channel
*ch
;
2610 ch
= container_of(ctx
, struct dpaa2_eth_channel
, nctx
);
2612 /* Update NAPI statistics */
2615 napi_schedule(&ch
->napi
);
2618 /* Allocate and configure a DPCON object */
2619 static struct fsl_mc_device
*dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv
*priv
)
2621 struct fsl_mc_device
*dpcon
;
2622 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2625 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
),
2626 FSL_MC_POOL_DPCON
, &dpcon
);
2629 err
= -EPROBE_DEFER
;
2631 dev_info(dev
, "Not enough DPCONs, will go on as-is\n");
2632 return ERR_PTR(err
);
2635 err
= dpcon_open(priv
->mc_io
, 0, dpcon
->obj_desc
.id
, &dpcon
->mc_handle
);
2637 dev_err(dev
, "dpcon_open() failed\n");
2641 err
= dpcon_reset(priv
->mc_io
, 0, dpcon
->mc_handle
);
2643 dev_err(dev
, "dpcon_reset() failed\n");
2647 err
= dpcon_enable(priv
->mc_io
, 0, dpcon
->mc_handle
);
2649 dev_err(dev
, "dpcon_enable() failed\n");
2656 dpcon_close(priv
->mc_io
, 0, dpcon
->mc_handle
);
2658 fsl_mc_object_free(dpcon
);
2660 return ERR_PTR(err
);
2663 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv
*priv
,
2664 struct fsl_mc_device
*dpcon
)
2666 dpcon_disable(priv
->mc_io
, 0, dpcon
->mc_handle
);
2667 dpcon_close(priv
->mc_io
, 0, dpcon
->mc_handle
);
2668 fsl_mc_object_free(dpcon
);
2671 static struct dpaa2_eth_channel
*dpaa2_eth_alloc_channel(struct dpaa2_eth_priv
*priv
)
2673 struct dpaa2_eth_channel
*channel
;
2674 struct dpcon_attr attr
;
2675 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2678 channel
= kzalloc(sizeof(*channel
), GFP_KERNEL
);
2682 channel
->dpcon
= dpaa2_eth_setup_dpcon(priv
);
2683 if (IS_ERR(channel
->dpcon
)) {
2684 err
= PTR_ERR(channel
->dpcon
);
2688 err
= dpcon_get_attributes(priv
->mc_io
, 0, channel
->dpcon
->mc_handle
,
2691 dev_err(dev
, "dpcon_get_attributes() failed\n");
2695 channel
->dpcon_id
= attr
.id
;
2696 channel
->ch_id
= attr
.qbman_ch_id
;
2697 channel
->priv
= priv
;
2702 dpaa2_eth_free_dpcon(priv
, channel
->dpcon
);
2705 return ERR_PTR(err
);
2708 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv
*priv
,
2709 struct dpaa2_eth_channel
*channel
)
2711 dpaa2_eth_free_dpcon(priv
, channel
->dpcon
);
2715 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2716 * and register data availability notifications
2718 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv
*priv
)
2720 struct dpaa2_io_notification_ctx
*nctx
;
2721 struct dpaa2_eth_channel
*channel
;
2722 struct dpcon_notification_cfg dpcon_notif_cfg
;
2723 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2726 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2727 * many cores as possible, so we need one channel for each core
2728 * (unless there's fewer queues than cores, in which case the extra
2729 * channels would be wasted).
2730 * Allocate one channel per core and register it to the core's
2731 * affine DPIO. If not enough channels are available for all cores
2732 * or if some cores don't have an affine DPIO, there will be no
2733 * ingress frame processing on those cores.
2735 cpumask_clear(&priv
->dpio_cpumask
);
2736 for_each_online_cpu(i
) {
2737 /* Try to allocate a channel */
2738 channel
= dpaa2_eth_alloc_channel(priv
);
2739 if (IS_ERR_OR_NULL(channel
)) {
2740 err
= PTR_ERR_OR_ZERO(channel
);
2741 if (err
!= -EPROBE_DEFER
)
2743 "No affine channel for cpu %d and above\n", i
);
2747 priv
->channel
[priv
->num_channels
] = channel
;
2749 nctx
= &channel
->nctx
;
2751 nctx
->cb
= dpaa2_eth_cdan_cb
;
2752 nctx
->id
= channel
->ch_id
;
2753 nctx
->desired_cpu
= i
;
2755 /* Register the new context */
2756 channel
->dpio
= dpaa2_io_service_select(i
);
2757 err
= dpaa2_io_service_register(channel
->dpio
, nctx
, dev
);
2759 dev_dbg(dev
, "No affine DPIO for cpu %d\n", i
);
2760 /* If no affine DPIO for this core, there's probably
2761 * none available for next cores either. Signal we want
2762 * to retry later, in case the DPIO devices weren't
2765 err
= -EPROBE_DEFER
;
2766 goto err_service_reg
;
2769 /* Register DPCON notification with MC */
2770 dpcon_notif_cfg
.dpio_id
= nctx
->dpio_id
;
2771 dpcon_notif_cfg
.priority
= 0;
2772 dpcon_notif_cfg
.user_ctx
= nctx
->qman64
;
2773 err
= dpcon_set_notification(priv
->mc_io
, 0,
2774 channel
->dpcon
->mc_handle
,
2777 dev_err(dev
, "dpcon_set_notification failed()\n");
2781 /* If we managed to allocate a channel and also found an affine
2782 * DPIO for this core, add it to the final mask
2784 cpumask_set_cpu(i
, &priv
->dpio_cpumask
);
2785 priv
->num_channels
++;
2787 /* Stop if we already have enough channels to accommodate all
2788 * RX and TX conf queues
2790 if (priv
->num_channels
== priv
->dpni_attrs
.num_queues
)
2797 dpaa2_io_service_deregister(channel
->dpio
, nctx
, dev
);
2799 dpaa2_eth_free_channel(priv
, channel
);
2801 if (err
== -EPROBE_DEFER
) {
2802 for (i
= 0; i
< priv
->num_channels
; i
++) {
2803 channel
= priv
->channel
[i
];
2804 nctx
= &channel
->nctx
;
2805 dpaa2_io_service_deregister(channel
->dpio
, nctx
, dev
);
2806 dpaa2_eth_free_channel(priv
, channel
);
2808 priv
->num_channels
= 0;
2812 if (cpumask_empty(&priv
->dpio_cpumask
)) {
2813 dev_err(dev
, "No cpu with an affine DPIO/DPCON\n");
2817 dev_info(dev
, "Cores %*pbl available for processing ingress traffic\n",
2818 cpumask_pr_args(&priv
->dpio_cpumask
));
2823 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv
*priv
)
2825 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2826 struct dpaa2_eth_channel
*ch
;
2829 /* deregister CDAN notifications and free channels */
2830 for (i
= 0; i
< priv
->num_channels
; i
++) {
2831 ch
= priv
->channel
[i
];
2832 dpaa2_io_service_deregister(ch
->dpio
, &ch
->nctx
, dev
);
2833 dpaa2_eth_free_channel(priv
, ch
);
2837 static struct dpaa2_eth_channel
*dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv
*priv
,
2840 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2843 for (i
= 0; i
< priv
->num_channels
; i
++)
2844 if (priv
->channel
[i
]->nctx
.desired_cpu
== cpu
)
2845 return priv
->channel
[i
];
2847 /* We should never get here. Issue a warning and return
2848 * the first channel, because it's still better than nothing
2850 dev_warn(dev
, "No affine channel found for cpu %d\n", cpu
);
2852 return priv
->channel
[0];
2855 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv
*priv
)
2857 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2858 struct dpaa2_eth_fq
*fq
;
2859 int rx_cpu
, txc_cpu
;
2862 /* For each FQ, pick one channel/CPU to deliver frames to.
2863 * This may well change at runtime, either through irqbalance or
2864 * through direct user intervention.
2866 rx_cpu
= txc_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2868 for (i
= 0; i
< priv
->num_fqs
; i
++) {
2872 case DPAA2_RX_ERR_FQ
:
2873 fq
->target_cpu
= rx_cpu
;
2874 rx_cpu
= cpumask_next(rx_cpu
, &priv
->dpio_cpumask
);
2875 if (rx_cpu
>= nr_cpu_ids
)
2876 rx_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2878 case DPAA2_TX_CONF_FQ
:
2879 fq
->target_cpu
= txc_cpu
;
2880 txc_cpu
= cpumask_next(txc_cpu
, &priv
->dpio_cpumask
);
2881 if (txc_cpu
>= nr_cpu_ids
)
2882 txc_cpu
= cpumask_first(&priv
->dpio_cpumask
);
2885 dev_err(dev
, "Unknown FQ type: %d\n", fq
->type
);
2887 fq
->channel
= dpaa2_eth_get_affine_channel(priv
, fq
->target_cpu
);
2893 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv
*priv
)
2897 /* We have one TxConf FQ per Tx flow.
2898 * The number of Tx and Rx queues is the same.
2899 * Tx queues come first in the fq array.
2901 for (i
= 0; i
< dpaa2_eth_queue_count(priv
); i
++) {
2902 priv
->fq
[priv
->num_fqs
].type
= DPAA2_TX_CONF_FQ
;
2903 priv
->fq
[priv
->num_fqs
].consume
= dpaa2_eth_tx_conf
;
2904 priv
->fq
[priv
->num_fqs
++].flowid
= (u16
)i
;
2907 for (j
= 0; j
< dpaa2_eth_tc_count(priv
); j
++) {
2908 for (i
= 0; i
< dpaa2_eth_queue_count(priv
); i
++) {
2909 priv
->fq
[priv
->num_fqs
].type
= DPAA2_RX_FQ
;
2910 priv
->fq
[priv
->num_fqs
].consume
= dpaa2_eth_rx
;
2911 priv
->fq
[priv
->num_fqs
].tc
= (u8
)j
;
2912 priv
->fq
[priv
->num_fqs
++].flowid
= (u16
)i
;
2916 /* We have exactly one Rx error queue per DPNI */
2917 priv
->fq
[priv
->num_fqs
].type
= DPAA2_RX_ERR_FQ
;
2918 priv
->fq
[priv
->num_fqs
++].consume
= dpaa2_eth_rx_err
;
2920 /* For each FQ, decide on which core to process incoming frames */
2921 dpaa2_eth_set_fq_affinity(priv
);
2924 /* Allocate and configure one buffer pool for each interface */
2925 static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv
*priv
)
2928 struct fsl_mc_device
*dpbp_dev
;
2929 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2930 struct dpbp_attr dpbp_attrs
;
2932 err
= fsl_mc_object_allocate(to_fsl_mc_device(dev
), FSL_MC_POOL_DPBP
,
2936 err
= -EPROBE_DEFER
;
2938 dev_err(dev
, "DPBP device allocation failed\n");
2942 priv
->dpbp_dev
= dpbp_dev
;
2944 err
= dpbp_open(priv
->mc_io
, 0, priv
->dpbp_dev
->obj_desc
.id
,
2945 &dpbp_dev
->mc_handle
);
2947 dev_err(dev
, "dpbp_open() failed\n");
2951 err
= dpbp_reset(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2953 dev_err(dev
, "dpbp_reset() failed\n");
2957 err
= dpbp_enable(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2959 dev_err(dev
, "dpbp_enable() failed\n");
2963 err
= dpbp_get_attributes(priv
->mc_io
, 0, dpbp_dev
->mc_handle
,
2966 dev_err(dev
, "dpbp_get_attributes() failed\n");
2969 priv
->bpid
= dpbp_attrs
.bpid
;
2974 dpbp_disable(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2977 dpbp_close(priv
->mc_io
, 0, dpbp_dev
->mc_handle
);
2979 fsl_mc_object_free(dpbp_dev
);
2984 static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv
*priv
)
2986 dpaa2_eth_drain_pool(priv
);
2987 dpbp_disable(priv
->mc_io
, 0, priv
->dpbp_dev
->mc_handle
);
2988 dpbp_close(priv
->mc_io
, 0, priv
->dpbp_dev
->mc_handle
);
2989 fsl_mc_object_free(priv
->dpbp_dev
);
2992 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv
*priv
)
2994 struct device
*dev
= priv
->net_dev
->dev
.parent
;
2995 struct dpni_buffer_layout buf_layout
= {0};
2999 /* We need to check for WRIOP version 1.0.0, but depending on the MC
3000 * version, this number is not always provided correctly on rev1.
3001 * We need to check for both alternatives in this situation.
3003 if (priv
->dpni_attrs
.wriop_version
== DPAA2_WRIOP_VERSION(0, 0, 0) ||
3004 priv
->dpni_attrs
.wriop_version
== DPAA2_WRIOP_VERSION(1, 0, 0))
3005 rx_buf_align
= DPAA2_ETH_RX_BUF_ALIGN_REV1
;
3007 rx_buf_align
= DPAA2_ETH_RX_BUF_ALIGN
;
3009 /* We need to ensure that the buffer size seen by WRIOP is a multiple
3010 * of 64 or 256 bytes depending on the WRIOP version.
3012 priv
->rx_buf_size
= ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE
, rx_buf_align
);
3015 buf_layout
.private_data_size
= DPAA2_ETH_SWA_SIZE
;
3016 buf_layout
.pass_timestamp
= true;
3017 buf_layout
.pass_frame_status
= true;
3018 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE
|
3019 DPNI_BUF_LAYOUT_OPT_TIMESTAMP
|
3020 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
;
3021 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
3022 DPNI_QUEUE_TX
, &buf_layout
);
3024 dev_err(dev
, "dpni_set_buffer_layout(TX) failed\n");
3028 /* tx-confirm buffer */
3029 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_TIMESTAMP
|
3030 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
;
3031 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
3032 DPNI_QUEUE_TX_CONFIRM
, &buf_layout
);
3034 dev_err(dev
, "dpni_set_buffer_layout(TX_CONF) failed\n");
3038 /* Now that we've set our tx buffer layout, retrieve the minimum
3039 * required tx data offset.
3041 err
= dpni_get_tx_data_offset(priv
->mc_io
, 0, priv
->mc_token
,
3042 &priv
->tx_data_offset
);
3044 dev_err(dev
, "dpni_get_tx_data_offset() failed\n");
3048 if ((priv
->tx_data_offset
% 64) != 0)
3049 dev_warn(dev
, "Tx data offset (%d) not a multiple of 64B\n",
3050 priv
->tx_data_offset
);
3053 buf_layout
.pass_frame_status
= true;
3054 buf_layout
.pass_parser_result
= true;
3055 buf_layout
.data_align
= rx_buf_align
;
3056 buf_layout
.data_head_room
= dpaa2_eth_rx_head_room(priv
);
3057 buf_layout
.private_data_size
= 0;
3058 buf_layout
.options
= DPNI_BUF_LAYOUT_OPT_PARSER_RESULT
|
3059 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS
|
3060 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN
|
3061 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM
|
3062 DPNI_BUF_LAYOUT_OPT_TIMESTAMP
;
3063 err
= dpni_set_buffer_layout(priv
->mc_io
, 0, priv
->mc_token
,
3064 DPNI_QUEUE_RX
, &buf_layout
);
3066 dev_err(dev
, "dpni_set_buffer_layout(RX) failed\n");
3073 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3074 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
3076 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv
*priv
,
3077 struct dpaa2_eth_fq
*fq
,
3078 struct dpaa2_fd
*fd
, u8 prio
,
3079 u32 num_frames __always_unused
,
3080 int *frames_enqueued
)
3084 err
= dpaa2_io_service_enqueue_qd(fq
->channel
->dpio
,
3085 priv
->tx_qdid
, prio
,
3087 if (!err
&& frames_enqueued
)
3088 *frames_enqueued
= 1;
3092 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv
*priv
,
3093 struct dpaa2_eth_fq
*fq
,
3094 struct dpaa2_fd
*fd
,
3095 u8 prio
, u32 num_frames
,
3096 int *frames_enqueued
)
3100 err
= dpaa2_io_service_enqueue_multiple_fq(fq
->channel
->dpio
,
3107 if (frames_enqueued
)
3108 *frames_enqueued
= err
;
3112 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv
*priv
)
3114 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_ENQUEUE_FQID_VER_MAJOR
,
3115 DPNI_ENQUEUE_FQID_VER_MINOR
) < 0)
3116 priv
->enqueue
= dpaa2_eth_enqueue_qd
;
3118 priv
->enqueue
= dpaa2_eth_enqueue_fq_multiple
;
3121 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv
*priv
)
3123 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3124 struct dpni_link_cfg link_cfg
= {0};
3127 /* Get the default link options so we don't override other flags */
3128 err
= dpni_get_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &link_cfg
);
3130 dev_err(dev
, "dpni_get_link_cfg() failed\n");
3134 /* By default, enable both Rx and Tx pause frames */
3135 link_cfg
.options
|= DPNI_LINK_OPT_PAUSE
;
3136 link_cfg
.options
&= ~DPNI_LINK_OPT_ASYM_PAUSE
;
3137 err
= dpni_set_link_cfg(priv
->mc_io
, 0, priv
->mc_token
, &link_cfg
);
3139 dev_err(dev
, "dpni_set_link_cfg() failed\n");
3143 priv
->link_state
.options
= link_cfg
.options
;
3148 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv
*priv
)
3150 struct dpni_queue_id qid
= {0};
3151 struct dpaa2_eth_fq
*fq
;
3152 struct dpni_queue queue
;
3155 /* We only use Tx FQIDs for FQID-based enqueue, so check
3156 * if DPNI version supports it before updating FQIDs
3158 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_ENQUEUE_FQID_VER_MAJOR
,
3159 DPNI_ENQUEUE_FQID_VER_MINOR
) < 0)
3162 for (i
= 0; i
< priv
->num_fqs
; i
++) {
3164 if (fq
->type
!= DPAA2_TX_CONF_FQ
)
3166 for (j
= 0; j
< dpaa2_eth_tc_count(priv
); j
++) {
3167 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3168 DPNI_QUEUE_TX
, j
, fq
->flowid
,
3173 fq
->tx_fqid
[j
] = qid
.fqid
;
3174 if (fq
->tx_fqid
[j
] == 0)
3179 priv
->enqueue
= dpaa2_eth_enqueue_fq_multiple
;
3184 netdev_info(priv
->net_dev
,
3185 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3186 priv
->enqueue
= dpaa2_eth_enqueue_qd
;
3189 /* Configure ingress classification based on VLAN PCP */
3190 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv
*priv
)
3192 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3193 struct dpkg_profile_cfg kg_cfg
= {0};
3194 struct dpni_qos_tbl_cfg qos_cfg
= {0};
3195 struct dpni_rule_cfg key_params
;
3196 void *dma_mem
, *key
, *mask
;
3197 u8 key_size
= 2; /* VLAN TCI field */
3200 /* VLAN-based classification only makes sense if we have multiple
3202 * Also, we need to extract just the 3-bit PCP field from the VLAN
3203 * header and we can only do that by using a mask
3205 if (dpaa2_eth_tc_count(priv
) == 1 || !dpaa2_eth_fs_mask_enabled(priv
)) {
3206 dev_dbg(dev
, "VLAN-based QoS classification not supported\n");
3210 dma_mem
= kzalloc(DPAA2_CLASSIFIER_DMA_SIZE
, GFP_KERNEL
);
3214 kg_cfg
.num_extracts
= 1;
3215 kg_cfg
.extracts
[0].type
= DPKG_EXTRACT_FROM_HDR
;
3216 kg_cfg
.extracts
[0].extract
.from_hdr
.prot
= NET_PROT_VLAN
;
3217 kg_cfg
.extracts
[0].extract
.from_hdr
.type
= DPKG_FULL_FIELD
;
3218 kg_cfg
.extracts
[0].extract
.from_hdr
.field
= NH_FLD_VLAN_TCI
;
3220 err
= dpni_prepare_key_cfg(&kg_cfg
, dma_mem
);
3222 dev_err(dev
, "dpni_prepare_key_cfg failed\n");
3227 qos_cfg
.default_tc
= 0;
3228 qos_cfg
.discard_on_miss
= 0;
3229 qos_cfg
.key_cfg_iova
= dma_map_single(dev
, dma_mem
,
3230 DPAA2_CLASSIFIER_DMA_SIZE
,
3232 if (dma_mapping_error(dev
, qos_cfg
.key_cfg_iova
)) {
3233 dev_err(dev
, "QoS table DMA mapping failed\n");
3238 err
= dpni_set_qos_table(priv
->mc_io
, 0, priv
->mc_token
, &qos_cfg
);
3240 dev_err(dev
, "dpni_set_qos_table failed\n");
3244 /* Add QoS table entries */
3245 key
= kzalloc(key_size
* 2, GFP_KERNEL
);
3250 mask
= key
+ key_size
;
3251 *(__be16
*)mask
= cpu_to_be16(VLAN_PRIO_MASK
);
3253 key_params
.key_iova
= dma_map_single(dev
, key
, key_size
* 2,
3255 if (dma_mapping_error(dev
, key_params
.key_iova
)) {
3256 dev_err(dev
, "Qos table entry DMA mapping failed\n");
3261 key_params
.mask_iova
= key_params
.key_iova
+ key_size
;
3262 key_params
.key_size
= key_size
;
3264 /* We add rules for PCP-based distribution starting with highest
3265 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3266 * classes to accommodate all priority levels, the lowest ones end up
3267 * on TC 0 which was configured as default
3269 for (i
= dpaa2_eth_tc_count(priv
) - 1, pcp
= 7; i
>= 0; i
--, pcp
--) {
3270 *(__be16
*)key
= cpu_to_be16(pcp
<< VLAN_PRIO_SHIFT
);
3271 dma_sync_single_for_device(dev
, key_params
.key_iova
,
3272 key_size
* 2, DMA_TO_DEVICE
);
3274 err
= dpni_add_qos_entry(priv
->mc_io
, 0, priv
->mc_token
,
3277 dev_err(dev
, "dpni_add_qos_entry failed\n");
3278 dpni_clear_qos_table(priv
->mc_io
, 0, priv
->mc_token
);
3283 priv
->vlan_cls_enabled
= true;
3285 /* Table and key memory is not persistent, clean everything up after
3286 * configuration is finished
3289 dma_unmap_single(dev
, key_params
.key_iova
, key_size
* 2, DMA_TO_DEVICE
);
3293 dma_unmap_single(dev
, qos_cfg
.key_cfg_iova
, DPAA2_CLASSIFIER_DMA_SIZE
,
3301 /* Configure the DPNI object this interface is associated with */
3302 static int dpaa2_eth_setup_dpni(struct fsl_mc_device
*ls_dev
)
3304 struct device
*dev
= &ls_dev
->dev
;
3305 struct dpaa2_eth_priv
*priv
;
3306 struct net_device
*net_dev
;
3309 net_dev
= dev_get_drvdata(dev
);
3310 priv
= netdev_priv(net_dev
);
3312 /* get a handle for the DPNI object */
3313 err
= dpni_open(priv
->mc_io
, 0, ls_dev
->obj_desc
.id
, &priv
->mc_token
);
3315 dev_err(dev
, "dpni_open() failed\n");
3319 /* Check if we can work with this DPNI object */
3320 err
= dpni_get_api_version(priv
->mc_io
, 0, &priv
->dpni_ver_major
,
3321 &priv
->dpni_ver_minor
);
3323 dev_err(dev
, "dpni_get_api_version() failed\n");
3326 if (dpaa2_eth_cmp_dpni_ver(priv
, DPNI_VER_MAJOR
, DPNI_VER_MINOR
) < 0) {
3327 dev_err(dev
, "DPNI version %u.%u not supported, need >= %u.%u\n",
3328 priv
->dpni_ver_major
, priv
->dpni_ver_minor
,
3329 DPNI_VER_MAJOR
, DPNI_VER_MINOR
);
3334 ls_dev
->mc_io
= priv
->mc_io
;
3335 ls_dev
->mc_handle
= priv
->mc_token
;
3337 err
= dpni_reset(priv
->mc_io
, 0, priv
->mc_token
);
3339 dev_err(dev
, "dpni_reset() failed\n");
3343 err
= dpni_get_attributes(priv
->mc_io
, 0, priv
->mc_token
,
3346 dev_err(dev
, "dpni_get_attributes() failed (err=%d)\n", err
);
3350 err
= dpaa2_eth_set_buffer_layout(priv
);
3354 dpaa2_eth_set_enqueue_mode(priv
);
3356 /* Enable pause frame support */
3357 if (dpaa2_eth_has_pause_support(priv
)) {
3358 err
= dpaa2_eth_set_pause(priv
);
3363 err
= dpaa2_eth_set_vlan_qos(priv
);
3364 if (err
&& err
!= -EOPNOTSUPP
)
3367 priv
->cls_rules
= devm_kcalloc(dev
, dpaa2_eth_fs_count(priv
),
3368 sizeof(struct dpaa2_eth_cls_rule
),
3370 if (!priv
->cls_rules
) {
3378 dpni_close(priv
->mc_io
, 0, priv
->mc_token
);
3383 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv
*priv
)
3387 err
= dpni_reset(priv
->mc_io
, 0, priv
->mc_token
);
3389 netdev_warn(priv
->net_dev
, "dpni_reset() failed (err %d)\n",
3392 dpni_close(priv
->mc_io
, 0, priv
->mc_token
);
3395 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv
*priv
,
3396 struct dpaa2_eth_fq
*fq
)
3398 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3399 struct dpni_queue queue
;
3400 struct dpni_queue_id qid
;
3403 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3404 DPNI_QUEUE_RX
, fq
->tc
, fq
->flowid
, &queue
, &qid
);
3406 dev_err(dev
, "dpni_get_queue(RX) failed\n");
3410 fq
->fqid
= qid
.fqid
;
3412 queue
.destination
.id
= fq
->channel
->dpcon_id
;
3413 queue
.destination
.type
= DPNI_DEST_DPCON
;
3414 queue
.destination
.priority
= 1;
3415 queue
.user_context
= (u64
)(uintptr_t)fq
;
3416 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
3417 DPNI_QUEUE_RX
, fq
->tc
, fq
->flowid
,
3418 DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
,
3421 dev_err(dev
, "dpni_set_queue(RX) failed\n");
3426 /* only once for each channel */
3430 err
= xdp_rxq_info_reg(&fq
->channel
->xdp_rxq
, priv
->net_dev
,
3433 dev_err(dev
, "xdp_rxq_info_reg failed\n");
3437 err
= xdp_rxq_info_reg_mem_model(&fq
->channel
->xdp_rxq
,
3438 MEM_TYPE_PAGE_ORDER0
, NULL
);
3440 dev_err(dev
, "xdp_rxq_info_reg_mem_model failed\n");
3447 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv
*priv
,
3448 struct dpaa2_eth_fq
*fq
)
3450 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3451 struct dpni_queue queue
;
3452 struct dpni_queue_id qid
;
3455 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3456 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3457 DPNI_QUEUE_TX
, i
, fq
->flowid
,
3460 dev_err(dev
, "dpni_get_queue(TX) failed\n");
3463 fq
->tx_fqid
[i
] = qid
.fqid
;
3466 /* All Tx queues belonging to the same flowid have the same qdbin */
3467 fq
->tx_qdbin
= qid
.qdbin
;
3469 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3470 DPNI_QUEUE_TX_CONFIRM
, 0, fq
->flowid
,
3473 dev_err(dev
, "dpni_get_queue(TX_CONF) failed\n");
3477 fq
->fqid
= qid
.fqid
;
3479 queue
.destination
.id
= fq
->channel
->dpcon_id
;
3480 queue
.destination
.type
= DPNI_DEST_DPCON
;
3481 queue
.destination
.priority
= 0;
3482 queue
.user_context
= (u64
)(uintptr_t)fq
;
3483 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
3484 DPNI_QUEUE_TX_CONFIRM
, 0, fq
->flowid
,
3485 DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
,
3488 dev_err(dev
, "dpni_set_queue(TX_CONF) failed\n");
3495 static int setup_rx_err_flow(struct dpaa2_eth_priv
*priv
,
3496 struct dpaa2_eth_fq
*fq
)
3498 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3499 struct dpni_queue q
= { { 0 } };
3500 struct dpni_queue_id qid
;
3501 u8 q_opt
= DPNI_QUEUE_OPT_USER_CTX
| DPNI_QUEUE_OPT_DEST
;
3504 err
= dpni_get_queue(priv
->mc_io
, 0, priv
->mc_token
,
3505 DPNI_QUEUE_RX_ERR
, 0, 0, &q
, &qid
);
3507 dev_err(dev
, "dpni_get_queue() failed (%d)\n", err
);
3511 fq
->fqid
= qid
.fqid
;
3513 q
.destination
.id
= fq
->channel
->dpcon_id
;
3514 q
.destination
.type
= DPNI_DEST_DPCON
;
3515 q
.destination
.priority
= 1;
3516 q
.user_context
= (u64
)(uintptr_t)fq
;
3517 err
= dpni_set_queue(priv
->mc_io
, 0, priv
->mc_token
,
3518 DPNI_QUEUE_RX_ERR
, 0, 0, q_opt
, &q
);
3520 dev_err(dev
, "dpni_set_queue() failed (%d)\n", err
);
3527 /* Supported header fields for Rx hash distribution key */
3528 static const struct dpaa2_eth_dist_fields dist_fields
[] = {
3531 .rxnfc_field
= RXH_L2DA
,
3532 .cls_prot
= NET_PROT_ETH
,
3533 .cls_field
= NH_FLD_ETH_DA
,
3534 .id
= DPAA2_ETH_DIST_ETHDST
,
3537 .cls_prot
= NET_PROT_ETH
,
3538 .cls_field
= NH_FLD_ETH_SA
,
3539 .id
= DPAA2_ETH_DIST_ETHSRC
,
3542 /* This is the last ethertype field parsed:
3543 * depending on frame format, it can be the MAC ethertype
3544 * or the VLAN etype.
3546 .cls_prot
= NET_PROT_ETH
,
3547 .cls_field
= NH_FLD_ETH_TYPE
,
3548 .id
= DPAA2_ETH_DIST_ETHTYPE
,
3552 .rxnfc_field
= RXH_VLAN
,
3553 .cls_prot
= NET_PROT_VLAN
,
3554 .cls_field
= NH_FLD_VLAN_TCI
,
3555 .id
= DPAA2_ETH_DIST_VLAN
,
3559 .rxnfc_field
= RXH_IP_SRC
,
3560 .cls_prot
= NET_PROT_IP
,
3561 .cls_field
= NH_FLD_IP_SRC
,
3562 .id
= DPAA2_ETH_DIST_IPSRC
,
3565 .rxnfc_field
= RXH_IP_DST
,
3566 .cls_prot
= NET_PROT_IP
,
3567 .cls_field
= NH_FLD_IP_DST
,
3568 .id
= DPAA2_ETH_DIST_IPDST
,
3571 .rxnfc_field
= RXH_L3_PROTO
,
3572 .cls_prot
= NET_PROT_IP
,
3573 .cls_field
= NH_FLD_IP_PROTO
,
3574 .id
= DPAA2_ETH_DIST_IPPROTO
,
3577 /* Using UDP ports, this is functionally equivalent to raw
3578 * byte pairs from L4 header.
3580 .rxnfc_field
= RXH_L4_B_0_1
,
3581 .cls_prot
= NET_PROT_UDP
,
3582 .cls_field
= NH_FLD_UDP_PORT_SRC
,
3583 .id
= DPAA2_ETH_DIST_L4SRC
,
3586 .rxnfc_field
= RXH_L4_B_2_3
,
3587 .cls_prot
= NET_PROT_UDP
,
3588 .cls_field
= NH_FLD_UDP_PORT_DST
,
3589 .id
= DPAA2_ETH_DIST_L4DST
,
3594 /* Configure the Rx hash key using the legacy API */
3595 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
3597 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3598 struct dpni_rx_tc_dist_cfg dist_cfg
;
3601 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
3603 dist_cfg
.key_cfg_iova
= key
;
3604 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
3605 dist_cfg
.dist_mode
= DPNI_DIST_MODE_HASH
;
3607 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3608 err
= dpni_set_rx_tc_dist(priv
->mc_io
, 0, priv
->mc_token
,
3611 dev_err(dev
, "dpni_set_rx_tc_dist failed\n");
3619 /* Configure the Rx hash key using the new API */
3620 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
3622 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3623 struct dpni_rx_dist_cfg dist_cfg
;
3626 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
3628 dist_cfg
.key_cfg_iova
= key
;
3629 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
3630 dist_cfg
.enable
= 1;
3632 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3634 err
= dpni_set_rx_hash_dist(priv
->mc_io
, 0, priv
->mc_token
,
3637 dev_err(dev
, "dpni_set_rx_hash_dist failed\n");
3641 /* If the flow steering / hashing key is shared between all
3642 * traffic classes, install it just once
3644 if (priv
->dpni_attrs
.options
& DPNI_OPT_SHARED_FS
)
3651 /* Configure the Rx flow classification key */
3652 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv
*priv
, dma_addr_t key
)
3654 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3655 struct dpni_rx_dist_cfg dist_cfg
;
3658 memset(&dist_cfg
, 0, sizeof(dist_cfg
));
3660 dist_cfg
.key_cfg_iova
= key
;
3661 dist_cfg
.dist_size
= dpaa2_eth_queue_count(priv
);
3662 dist_cfg
.enable
= 1;
3664 for (i
= 0; i
< dpaa2_eth_tc_count(priv
); i
++) {
3666 err
= dpni_set_rx_fs_dist(priv
->mc_io
, 0, priv
->mc_token
,
3669 dev_err(dev
, "dpni_set_rx_fs_dist failed\n");
3673 /* If the flow steering / hashing key is shared between all
3674 * traffic classes, install it just once
3676 if (priv
->dpni_attrs
.options
& DPNI_OPT_SHARED_FS
)
3683 /* Size of the Rx flow classification key */
3684 int dpaa2_eth_cls_key_size(u64 fields
)
3688 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3689 if (!(fields
& dist_fields
[i
].id
))
3691 size
+= dist_fields
[i
].size
;
3697 /* Offset of header field in Rx classification key */
3698 int dpaa2_eth_cls_fld_off(int prot
, int field
)
3702 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3703 if (dist_fields
[i
].cls_prot
== prot
&&
3704 dist_fields
[i
].cls_field
== field
)
3706 off
+= dist_fields
[i
].size
;
3709 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3713 /* Prune unused fields from the classification rule.
3714 * Used when masking is not supported
3716 void dpaa2_eth_cls_trim_rule(void *key_mem
, u64 fields
)
3718 int off
= 0, new_off
= 0;
3721 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3722 size
= dist_fields
[i
].size
;
3723 if (dist_fields
[i
].id
& fields
) {
3724 memcpy(key_mem
+ new_off
, key_mem
+ off
, size
);
3731 /* Set Rx distribution (hash or flow classification) key
3732 * flags is a combination of RXH_ bits
3734 static int dpaa2_eth_set_dist_key(struct net_device
*net_dev
,
3735 enum dpaa2_eth_rx_dist type
, u64 flags
)
3737 struct device
*dev
= net_dev
->dev
.parent
;
3738 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3739 struct dpkg_profile_cfg cls_cfg
;
3740 u32 rx_hash_fields
= 0;
3741 dma_addr_t key_iova
;
3746 memset(&cls_cfg
, 0, sizeof(cls_cfg
));
3748 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++) {
3749 struct dpkg_extract
*key
=
3750 &cls_cfg
.extracts
[cls_cfg
.num_extracts
];
3752 /* For both Rx hashing and classification keys
3753 * we set only the selected fields.
3755 if (!(flags
& dist_fields
[i
].id
))
3757 if (type
== DPAA2_ETH_RX_DIST_HASH
)
3758 rx_hash_fields
|= dist_fields
[i
].rxnfc_field
;
3760 if (cls_cfg
.num_extracts
>= DPKG_MAX_NUM_OF_EXTRACTS
) {
3761 dev_err(dev
, "error adding key extraction rule, too many rules?\n");
3765 key
->type
= DPKG_EXTRACT_FROM_HDR
;
3766 key
->extract
.from_hdr
.prot
= dist_fields
[i
].cls_prot
;
3767 key
->extract
.from_hdr
.type
= DPKG_FULL_FIELD
;
3768 key
->extract
.from_hdr
.field
= dist_fields
[i
].cls_field
;
3769 cls_cfg
.num_extracts
++;
3772 dma_mem
= kzalloc(DPAA2_CLASSIFIER_DMA_SIZE
, GFP_KERNEL
);
3776 err
= dpni_prepare_key_cfg(&cls_cfg
, dma_mem
);
3778 dev_err(dev
, "dpni_prepare_key_cfg error %d\n", err
);
3782 /* Prepare for setting the rx dist */
3783 key_iova
= dma_map_single(dev
, dma_mem
, DPAA2_CLASSIFIER_DMA_SIZE
,
3785 if (dma_mapping_error(dev
, key_iova
)) {
3786 dev_err(dev
, "DMA mapping failed\n");
3791 if (type
== DPAA2_ETH_RX_DIST_HASH
) {
3792 if (dpaa2_eth_has_legacy_dist(priv
))
3793 err
= dpaa2_eth_config_legacy_hash_key(priv
, key_iova
);
3795 err
= dpaa2_eth_config_hash_key(priv
, key_iova
);
3797 err
= dpaa2_eth_config_cls_key(priv
, key_iova
);
3800 dma_unmap_single(dev
, key_iova
, DPAA2_CLASSIFIER_DMA_SIZE
,
3802 if (!err
&& type
== DPAA2_ETH_RX_DIST_HASH
)
3803 priv
->rx_hash_fields
= rx_hash_fields
;
3810 int dpaa2_eth_set_hash(struct net_device
*net_dev
, u64 flags
)
3812 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
3816 if (!dpaa2_eth_hash_enabled(priv
))
3819 for (i
= 0; i
< ARRAY_SIZE(dist_fields
); i
++)
3820 if (dist_fields
[i
].rxnfc_field
& flags
)
3821 key
|= dist_fields
[i
].id
;
3823 return dpaa2_eth_set_dist_key(net_dev
, DPAA2_ETH_RX_DIST_HASH
, key
);
3826 int dpaa2_eth_set_cls(struct net_device
*net_dev
, u64 flags
)
3828 return dpaa2_eth_set_dist_key(net_dev
, DPAA2_ETH_RX_DIST_CLS
, flags
);
3831 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv
*priv
)
3833 struct device
*dev
= priv
->net_dev
->dev
.parent
;
3836 /* Check if we actually support Rx flow classification */
3837 if (dpaa2_eth_has_legacy_dist(priv
)) {
3838 dev_dbg(dev
, "Rx cls not supported by current MC version\n");
3842 if (!dpaa2_eth_fs_enabled(priv
)) {
3843 dev_dbg(dev
, "Rx cls disabled in DPNI options\n");
3847 if (!dpaa2_eth_hash_enabled(priv
)) {
3848 dev_dbg(dev
, "Rx cls disabled for single queue DPNIs\n");
3852 /* If there is no support for masking in the classification table,
3853 * we don't set a default key, as it will depend on the rules
3854 * added by the user at runtime.
3856 if (!dpaa2_eth_fs_mask_enabled(priv
))
3859 err
= dpaa2_eth_set_cls(priv
->net_dev
, DPAA2_ETH_DIST_ALL
);
3864 priv
->rx_cls_enabled
= 1;
3869 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3870 * frame queues and channels
3872 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv
*priv
)
3874 struct net_device
*net_dev
= priv
->net_dev
;
3875 struct device
*dev
= net_dev
->dev
.parent
;
3876 struct dpni_pools_cfg pools_params
;
3877 struct dpni_error_cfg err_cfg
;
3881 pools_params
.num_dpbp
= 1;
3882 pools_params
.pools
[0].dpbp_id
= priv
->dpbp_dev
->obj_desc
.id
;
3883 pools_params
.pools
[0].backup_pool
= 0;
3884 pools_params
.pools
[0].buffer_size
= priv
->rx_buf_size
;
3885 err
= dpni_set_pools(priv
->mc_io
, 0, priv
->mc_token
, &pools_params
);
3887 dev_err(dev
, "dpni_set_pools() failed\n");
3891 /* have the interface implicitly distribute traffic based on
3892 * the default hash key
3894 err
= dpaa2_eth_set_hash(net_dev
, DPAA2_RXH_DEFAULT
);
3895 if (err
&& err
!= -EOPNOTSUPP
)
3896 dev_err(dev
, "Failed to configure hashing\n");
3898 /* Configure the flow classification key; it includes all
3899 * supported header fields and cannot be modified at runtime
3901 err
= dpaa2_eth_set_default_cls(priv
);
3902 if (err
&& err
!= -EOPNOTSUPP
)
3903 dev_err(dev
, "Failed to configure Rx classification key\n");
3905 /* Configure handling of error frames */
3906 err_cfg
.errors
= DPAA2_FAS_RX_ERR_MASK
;
3907 err_cfg
.set_frame_annotation
= 1;
3908 err_cfg
.error_action
= DPNI_ERROR_ACTION_DISCARD
;
3909 err
= dpni_set_errors_behavior(priv
->mc_io
, 0, priv
->mc_token
,
3912 dev_err(dev
, "dpni_set_errors_behavior failed\n");
3916 /* Configure Rx and Tx conf queues to generate CDANs */
3917 for (i
= 0; i
< priv
->num_fqs
; i
++) {
3918 switch (priv
->fq
[i
].type
) {
3920 err
= dpaa2_eth_setup_rx_flow(priv
, &priv
->fq
[i
]);
3922 case DPAA2_TX_CONF_FQ
:
3923 err
= dpaa2_eth_setup_tx_flow(priv
, &priv
->fq
[i
]);
3925 case DPAA2_RX_ERR_FQ
:
3926 err
= setup_rx_err_flow(priv
, &priv
->fq
[i
]);
3929 dev_err(dev
, "Invalid FQ type %d\n", priv
->fq
[i
].type
);
3936 err
= dpni_get_qdid(priv
->mc_io
, 0, priv
->mc_token
,
3937 DPNI_QUEUE_TX
, &priv
->tx_qdid
);
3939 dev_err(dev
, "dpni_get_qdid() failed\n");
3946 /* Allocate rings for storing incoming frame descriptors */
3947 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv
*priv
)
3949 struct net_device
*net_dev
= priv
->net_dev
;
3950 struct device
*dev
= net_dev
->dev
.parent
;
3953 for (i
= 0; i
< priv
->num_channels
; i
++) {
3954 priv
->channel
[i
]->store
=
3955 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE
, dev
);
3956 if (!priv
->channel
[i
]->store
) {
3957 netdev_err(net_dev
, "dpaa2_io_store_create() failed\n");
3965 for (i
= 0; i
< priv
->num_channels
; i
++) {
3966 if (!priv
->channel
[i
]->store
)
3968 dpaa2_io_store_destroy(priv
->channel
[i
]->store
);
3974 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv
*priv
)
3978 for (i
= 0; i
< priv
->num_channels
; i
++)
3979 dpaa2_io_store_destroy(priv
->channel
[i
]->store
);
3982 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv
*priv
)
3984 struct net_device
*net_dev
= priv
->net_dev
;
3985 struct device
*dev
= net_dev
->dev
.parent
;
3986 u8 mac_addr
[ETH_ALEN
], dpni_mac_addr
[ETH_ALEN
];
3989 /* Get firmware address, if any */
3990 err
= dpni_get_port_mac_addr(priv
->mc_io
, 0, priv
->mc_token
, mac_addr
);
3992 dev_err(dev
, "dpni_get_port_mac_addr() failed\n");
3996 /* Get DPNI attributes address, if any */
3997 err
= dpni_get_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
4000 dev_err(dev
, "dpni_get_primary_mac_addr() failed\n");
4004 /* First check if firmware has any address configured by bootloader */
4005 if (!is_zero_ether_addr(mac_addr
)) {
4006 /* If the DPMAC addr != DPNI addr, update it */
4007 if (!ether_addr_equal(mac_addr
, dpni_mac_addr
)) {
4008 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0,
4012 dev_err(dev
, "dpni_set_primary_mac_addr() failed\n");
4016 memcpy(net_dev
->dev_addr
, mac_addr
, net_dev
->addr_len
);
4017 } else if (is_zero_ether_addr(dpni_mac_addr
)) {
4018 /* No MAC address configured, fill in net_dev->dev_addr
4021 eth_hw_addr_random(net_dev
);
4022 dev_dbg_once(dev
, "device(s) have all-zero hwaddr, replaced with random\n");
4024 err
= dpni_set_primary_mac_addr(priv
->mc_io
, 0, priv
->mc_token
,
4027 dev_err(dev
, "dpni_set_primary_mac_addr() failed\n");
4031 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4032 * practical purposes, this will be our "permanent" mac address,
4033 * at least until the next reboot. This move will also permit
4034 * register_netdevice() to properly fill up net_dev->perm_addr.
4036 net_dev
->addr_assign_type
= NET_ADDR_PERM
;
4038 /* NET_ADDR_PERM is default, all we have to do is
4039 * fill in the device addr.
4041 memcpy(net_dev
->dev_addr
, dpni_mac_addr
, net_dev
->addr_len
);
4047 static int dpaa2_eth_netdev_init(struct net_device
*net_dev
)
4049 struct device
*dev
= net_dev
->dev
.parent
;
4050 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
4051 u32 options
= priv
->dpni_attrs
.options
;
4052 u64 supported
= 0, not_supported
= 0;
4053 u8 bcast_addr
[ETH_ALEN
];
4057 net_dev
->netdev_ops
= &dpaa2_eth_ops
;
4058 net_dev
->ethtool_ops
= &dpaa2_ethtool_ops
;
4060 err
= dpaa2_eth_set_mac_addr(priv
);
4064 /* Explicitly add the broadcast address to the MAC filtering table */
4065 eth_broadcast_addr(bcast_addr
);
4066 err
= dpni_add_mac_addr(priv
->mc_io
, 0, priv
->mc_token
, bcast_addr
);
4068 dev_err(dev
, "dpni_add_mac_addr() failed\n");
4072 /* Set MTU upper limit; lower limit is 68B (default value) */
4073 net_dev
->max_mtu
= DPAA2_ETH_MAX_MTU
;
4074 err
= dpni_set_max_frame_length(priv
->mc_io
, 0, priv
->mc_token
,
4077 dev_err(dev
, "dpni_set_max_frame_length() failed\n");
4081 /* Set actual number of queues in the net device */
4082 num_queues
= dpaa2_eth_queue_count(priv
);
4083 err
= netif_set_real_num_tx_queues(net_dev
, num_queues
);
4085 dev_err(dev
, "netif_set_real_num_tx_queues() failed\n");
4088 err
= netif_set_real_num_rx_queues(net_dev
, num_queues
);
4090 dev_err(dev
, "netif_set_real_num_rx_queues() failed\n");
4094 /* Capabilities listing */
4095 supported
|= IFF_LIVE_ADDR_CHANGE
;
4097 if (options
& DPNI_OPT_NO_MAC_FILTER
)
4098 not_supported
|= IFF_UNICAST_FLT
;
4100 supported
|= IFF_UNICAST_FLT
;
4102 net_dev
->priv_flags
|= supported
;
4103 net_dev
->priv_flags
&= ~not_supported
;
4106 net_dev
->features
= NETIF_F_RXCSUM
|
4107 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4108 NETIF_F_SG
| NETIF_F_HIGHDMA
|
4109 NETIF_F_LLTX
| NETIF_F_HW_TC
;
4110 net_dev
->hw_features
= net_dev
->features
;
4112 if (priv
->dpni_attrs
.vlan_filter_entries
)
4113 net_dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
4118 static int dpaa2_eth_poll_link_state(void *arg
)
4120 struct dpaa2_eth_priv
*priv
= (struct dpaa2_eth_priv
*)arg
;
4123 while (!kthread_should_stop()) {
4124 err
= dpaa2_eth_link_state_update(priv
);
4128 msleep(DPAA2_ETH_LINK_STATE_REFRESH
);
4134 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv
*priv
)
4136 struct fsl_mc_device
*dpni_dev
, *dpmac_dev
;
4137 struct dpaa2_mac
*mac
;
4140 dpni_dev
= to_fsl_mc_device(priv
->net_dev
->dev
.parent
);
4141 dpmac_dev
= fsl_mc_get_endpoint(dpni_dev
, 0);
4143 if (PTR_ERR(dpmac_dev
) == -EPROBE_DEFER
)
4144 return PTR_ERR(dpmac_dev
);
4146 if (IS_ERR(dpmac_dev
) || dpmac_dev
->dev
.type
!= &fsl_mc_bus_dpmac_type
)
4149 mac
= kzalloc(sizeof(struct dpaa2_mac
), GFP_KERNEL
);
4153 mac
->mc_dev
= dpmac_dev
;
4154 mac
->mc_io
= priv
->mc_io
;
4155 mac
->net_dev
= priv
->net_dev
;
4157 err
= dpaa2_mac_open(mac
);
4162 if (dpaa2_eth_is_type_phy(priv
)) {
4163 err
= dpaa2_mac_connect(mac
);
4164 if (err
&& err
!= -EPROBE_DEFER
)
4165 netdev_err(priv
->net_dev
, "Error connecting to the MAC endpoint: %pe",
4174 dpaa2_mac_close(mac
);
4181 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv
*priv
)
4183 if (dpaa2_eth_is_type_phy(priv
))
4184 dpaa2_mac_disconnect(priv
->mac
);
4186 if (!dpaa2_eth_has_mac(priv
))
4189 dpaa2_mac_close(priv
->mac
);
4194 static irqreturn_t
dpni_irq0_handler_thread(int irq_num
, void *arg
)
4197 struct device
*dev
= (struct device
*)arg
;
4198 struct fsl_mc_device
*dpni_dev
= to_fsl_mc_device(dev
);
4199 struct net_device
*net_dev
= dev_get_drvdata(dev
);
4200 struct dpaa2_eth_priv
*priv
= netdev_priv(net_dev
);
4203 err
= dpni_get_irq_status(dpni_dev
->mc_io
, 0, dpni_dev
->mc_handle
,
4204 DPNI_IRQ_INDEX
, &status
);
4205 if (unlikely(err
)) {
4206 netdev_err(net_dev
, "Can't get irq status (err %d)\n", err
);
4210 if (status
& DPNI_IRQ_EVENT_LINK_CHANGED
)
4211 dpaa2_eth_link_state_update(netdev_priv(net_dev
));
4213 if (status
& DPNI_IRQ_EVENT_ENDPOINT_CHANGED
) {
4214 dpaa2_eth_set_mac_addr(netdev_priv(net_dev
));
4215 dpaa2_eth_update_tx_fqids(priv
);
4218 if (dpaa2_eth_has_mac(priv
))
4219 dpaa2_eth_disconnect_mac(priv
);
4221 dpaa2_eth_connect_mac(priv
);
4228 static int dpaa2_eth_setup_irqs(struct fsl_mc_device
*ls_dev
)
4231 struct fsl_mc_device_irq
*irq
;
4233 err
= fsl_mc_allocate_irqs(ls_dev
);
4235 dev_err(&ls_dev
->dev
, "MC irqs allocation failed\n");
4239 irq
= ls_dev
->irqs
[0];
4240 err
= devm_request_threaded_irq(&ls_dev
->dev
, irq
->msi_desc
->irq
,
4241 NULL
, dpni_irq0_handler_thread
,
4242 IRQF_NO_SUSPEND
| IRQF_ONESHOT
,
4243 dev_name(&ls_dev
->dev
), &ls_dev
->dev
);
4245 dev_err(&ls_dev
->dev
, "devm_request_threaded_irq(): %d\n", err
);
4249 err
= dpni_set_irq_mask(ls_dev
->mc_io
, 0, ls_dev
->mc_handle
,
4250 DPNI_IRQ_INDEX
, DPNI_IRQ_EVENT_LINK_CHANGED
|
4251 DPNI_IRQ_EVENT_ENDPOINT_CHANGED
);
4253 dev_err(&ls_dev
->dev
, "dpni_set_irq_mask(): %d\n", err
);
4257 err
= dpni_set_irq_enable(ls_dev
->mc_io
, 0, ls_dev
->mc_handle
,
4260 dev_err(&ls_dev
->dev
, "dpni_set_irq_enable(): %d\n", err
);
4267 devm_free_irq(&ls_dev
->dev
, irq
->msi_desc
->irq
, &ls_dev
->dev
);
4269 fsl_mc_free_irqs(ls_dev
);
4274 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv
*priv
)
4277 struct dpaa2_eth_channel
*ch
;
4279 for (i
= 0; i
< priv
->num_channels
; i
++) {
4280 ch
= priv
->channel
[i
];
4281 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4282 netif_napi_add(priv
->net_dev
, &ch
->napi
, dpaa2_eth_poll
,
4287 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv
*priv
)
4290 struct dpaa2_eth_channel
*ch
;
4292 for (i
= 0; i
< priv
->num_channels
; i
++) {
4293 ch
= priv
->channel
[i
];
4294 netif_napi_del(&ch
->napi
);
4298 static int dpaa2_eth_probe(struct fsl_mc_device
*dpni_dev
)
4301 struct net_device
*net_dev
= NULL
;
4302 struct dpaa2_eth_priv
*priv
= NULL
;
4305 dev
= &dpni_dev
->dev
;
4308 net_dev
= alloc_etherdev_mq(sizeof(*priv
), DPAA2_ETH_MAX_NETDEV_QUEUES
);
4310 dev_err(dev
, "alloc_etherdev_mq() failed\n");
4314 SET_NETDEV_DEV(net_dev
, dev
);
4315 dev_set_drvdata(dev
, net_dev
);
4317 priv
= netdev_priv(net_dev
);
4318 priv
->net_dev
= net_dev
;
4320 priv
->iommu_domain
= iommu_get_domain_for_dev(dev
);
4322 priv
->tx_tstamp_type
= HWTSTAMP_TX_OFF
;
4323 priv
->rx_tstamp
= false;
4325 priv
->dpaa2_ptp_wq
= alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4326 if (!priv
->dpaa2_ptp_wq
) {
4331 INIT_WORK(&priv
->tx_onestep_tstamp
, dpaa2_eth_tx_onestep_tstamp
);
4333 skb_queue_head_init(&priv
->tx_skbs
);
4335 priv
->rx_copybreak
= DPAA2_ETH_DEFAULT_COPYBREAK
;
4337 /* Obtain a MC portal */
4338 err
= fsl_mc_portal_allocate(dpni_dev
, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL
,
4342 err
= -EPROBE_DEFER
;
4344 dev_err(dev
, "MC portal allocation failed\n");
4345 goto err_portal_alloc
;
4348 /* MC objects initialization and configuration */
4349 err
= dpaa2_eth_setup_dpni(dpni_dev
);
4351 goto err_dpni_setup
;
4353 err
= dpaa2_eth_setup_dpio(priv
);
4355 goto err_dpio_setup
;
4357 dpaa2_eth_setup_fqs(priv
);
4359 err
= dpaa2_eth_setup_dpbp(priv
);
4361 goto err_dpbp_setup
;
4363 err
= dpaa2_eth_bind_dpni(priv
);
4367 /* Add a NAPI context for each channel */
4368 dpaa2_eth_add_ch_napi(priv
);
4370 /* Percpu statistics */
4371 priv
->percpu_stats
= alloc_percpu(*priv
->percpu_stats
);
4372 if (!priv
->percpu_stats
) {
4373 dev_err(dev
, "alloc_percpu(percpu_stats) failed\n");
4375 goto err_alloc_percpu_stats
;
4377 priv
->percpu_extras
= alloc_percpu(*priv
->percpu_extras
);
4378 if (!priv
->percpu_extras
) {
4379 dev_err(dev
, "alloc_percpu(percpu_extras) failed\n");
4381 goto err_alloc_percpu_extras
;
4384 priv
->sgt_cache
= alloc_percpu(*priv
->sgt_cache
);
4385 if (!priv
->sgt_cache
) {
4386 dev_err(dev
, "alloc_percpu(sgt_cache) failed\n");
4388 goto err_alloc_sgt_cache
;
4391 err
= dpaa2_eth_netdev_init(net_dev
);
4393 goto err_netdev_init
;
4395 /* Configure checksum offload based on current interface flags */
4396 err
= dpaa2_eth_set_rx_csum(priv
, !!(net_dev
->features
& NETIF_F_RXCSUM
));
4400 err
= dpaa2_eth_set_tx_csum(priv
,
4401 !!(net_dev
->features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)));
4405 err
= dpaa2_eth_alloc_rings(priv
);
4407 goto err_alloc_rings
;
4409 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4410 if (dpaa2_eth_has_pause_support(priv
) && priv
->vlan_cls_enabled
) {
4411 priv
->dcbx_mode
= DCB_CAP_DCBX_HOST
| DCB_CAP_DCBX_VER_IEEE
;
4412 net_dev
->dcbnl_ops
= &dpaa2_eth_dcbnl_ops
;
4414 dev_dbg(dev
, "PFC not supported\n");
4418 err
= dpaa2_eth_setup_irqs(dpni_dev
);
4420 netdev_warn(net_dev
, "Failed to set link interrupt, fall back to polling\n");
4421 priv
->poll_thread
= kthread_run(dpaa2_eth_poll_link_state
, priv
,
4422 "%s_poll_link", net_dev
->name
);
4423 if (IS_ERR(priv
->poll_thread
)) {
4424 dev_err(dev
, "Error starting polling thread\n");
4425 goto err_poll_thread
;
4427 priv
->do_link_poll
= true;
4430 err
= dpaa2_eth_connect_mac(priv
);
4432 goto err_connect_mac
;
4434 err
= dpaa2_eth_dl_register(priv
);
4436 goto err_dl_register
;
4438 err
= dpaa2_eth_dl_traps_register(priv
);
4440 goto err_dl_trap_register
;
4442 err
= dpaa2_eth_dl_port_add(priv
);
4444 goto err_dl_port_add
;
4446 err
= register_netdev(net_dev
);
4448 dev_err(dev
, "register_netdev() failed\n");
4449 goto err_netdev_reg
;
4452 #ifdef CONFIG_DEBUG_FS
4453 dpaa2_dbg_add(priv
);
4456 dev_info(dev
, "Probed interface %s\n", net_dev
->name
);
4460 dpaa2_eth_dl_port_del(priv
);
4462 dpaa2_eth_dl_traps_unregister(priv
);
4463 err_dl_trap_register
:
4464 dpaa2_eth_dl_unregister(priv
);
4466 dpaa2_eth_disconnect_mac(priv
);
4468 if (priv
->do_link_poll
)
4469 kthread_stop(priv
->poll_thread
);
4471 fsl_mc_free_irqs(dpni_dev
);
4473 dpaa2_eth_free_rings(priv
);
4477 free_percpu(priv
->sgt_cache
);
4478 err_alloc_sgt_cache
:
4479 free_percpu(priv
->percpu_extras
);
4480 err_alloc_percpu_extras
:
4481 free_percpu(priv
->percpu_stats
);
4482 err_alloc_percpu_stats
:
4483 dpaa2_eth_del_ch_napi(priv
);
4485 dpaa2_eth_free_dpbp(priv
);
4487 dpaa2_eth_free_dpio(priv
);
4489 dpaa2_eth_free_dpni(priv
);
4491 fsl_mc_portal_free(priv
->mc_io
);
4493 destroy_workqueue(priv
->dpaa2_ptp_wq
);
4495 dev_set_drvdata(dev
, NULL
);
4496 free_netdev(net_dev
);
4501 static int dpaa2_eth_remove(struct fsl_mc_device
*ls_dev
)
4504 struct net_device
*net_dev
;
4505 struct dpaa2_eth_priv
*priv
;
4508 net_dev
= dev_get_drvdata(dev
);
4509 priv
= netdev_priv(net_dev
);
4511 #ifdef CONFIG_DEBUG_FS
4512 dpaa2_dbg_remove(priv
);
4515 dpaa2_eth_disconnect_mac(priv
);
4518 unregister_netdev(net_dev
);
4520 dpaa2_eth_dl_port_del(priv
);
4521 dpaa2_eth_dl_traps_unregister(priv
);
4522 dpaa2_eth_dl_unregister(priv
);
4524 if (priv
->do_link_poll
)
4525 kthread_stop(priv
->poll_thread
);
4527 fsl_mc_free_irqs(ls_dev
);
4529 dpaa2_eth_free_rings(priv
);
4530 free_percpu(priv
->sgt_cache
);
4531 free_percpu(priv
->percpu_stats
);
4532 free_percpu(priv
->percpu_extras
);
4534 dpaa2_eth_del_ch_napi(priv
);
4535 dpaa2_eth_free_dpbp(priv
);
4536 dpaa2_eth_free_dpio(priv
);
4537 dpaa2_eth_free_dpni(priv
);
4539 fsl_mc_portal_free(priv
->mc_io
);
4541 destroy_workqueue(priv
->dpaa2_ptp_wq
);
4543 dev_dbg(net_dev
->dev
.parent
, "Removed interface %s\n", net_dev
->name
);
4545 free_netdev(net_dev
);
4550 static const struct fsl_mc_device_id dpaa2_eth_match_id_table
[] = {
4552 .vendor
= FSL_MC_VENDOR_FREESCALE
,
4557 MODULE_DEVICE_TABLE(fslmc
, dpaa2_eth_match_id_table
);
4559 static struct fsl_mc_driver dpaa2_eth_driver
= {
4561 .name
= KBUILD_MODNAME
,
4562 .owner
= THIS_MODULE
,
4564 .probe
= dpaa2_eth_probe
,
4565 .remove
= dpaa2_eth_remove
,
4566 .match_id_table
= dpaa2_eth_match_id_table
4569 static int __init
dpaa2_eth_driver_init(void)
4573 dpaa2_eth_dbg_init();
4574 err
= fsl_mc_driver_register(&dpaa2_eth_driver
);
4576 dpaa2_eth_dbg_exit();
4583 static void __exit
dpaa2_eth_driver_exit(void)
4585 dpaa2_eth_dbg_exit();
4586 fsl_mc_driver_unregister(&dpaa2_eth_driver
);
4589 module_init(dpaa2_eth_driver_init
);
4590 module_exit(dpaa2_eth_driver_exit
);