]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
dpaa2-mac: fix the remove path for non-MAC interfaces
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / freescale / dpaa2 / dpaa2-eth.c
CommitLineData
0bb29b25 1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
6e2387e8 2/* Copyright 2014-2016 Freescale Semiconductor Inc.
48c0481e 3 * Copyright 2016-2020 NXP
6e2387e8
IR
4 */
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/etherdevice.h>
9#include <linux/of_net.h>
10#include <linux/interrupt.h>
11#include <linux/msi.h>
12#include <linux/kthread.h>
08eb2397 13#include <linux/iommu.h>
6bd067c4 14#include <linux/fsl/mc.h>
7e273a8e
ICR
15#include <linux/bpf.h>
16#include <linux/bpf_trace.h>
d21c784c 17#include <linux/fsl/ptp_qoriq.h>
c5521189 18#include <linux/ptp_classify.h>
3657cdaf 19#include <net/pkt_cls.h>
859f998e
IR
20#include <net/sock.h>
21
6e2387e8
IR
22#include "dpaa2-eth.h"
23
5636187b
IR
24/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
25 * using trace events only need to #include <trace/events/sched.h>
26 */
27#define CREATE_TRACE_POINTS
28#include "dpaa2-eth-trace.h"
29
6e2387e8
IR
30MODULE_LICENSE("Dual BSD/GPL");
31MODULE_AUTHOR("Freescale Semiconductor, Inc");
32MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
33
d21c784c
YL
34struct ptp_qoriq *dpaa2_ptp;
35EXPORT_SYMBOL(dpaa2_ptp);
36
08eb2397
IR
37static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
38 dma_addr_t iova_addr)
39{
40 phys_addr_t phys_addr;
41
42 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
43
44 return phys_to_virt(phys_addr);
45}
46
5d8dccf8
IC
47static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
48 u32 fd_status,
49 struct sk_buff *skb)
6e2387e8
IR
50{
51 skb_checksum_none_assert(skb);
52
53 /* HW checksum validation is disabled, nothing to do here */
54 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
55 return;
56
57 /* Read checksum validation bits */
58 if (!((fd_status & DPAA2_FAS_L3CV) &&
59 (fd_status & DPAA2_FAS_L4CV)))
60 return;
61
62 /* Inform the stack there's no need to compute L3/L4 csum anymore */
63 skb->ip_summed = CHECKSUM_UNNECESSARY;
64}
65
66/* Free a received FD.
67 * Not to be used for Tx conf FDs or on any other paths.
68 */
5d8dccf8
IC
69static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
70 const struct dpaa2_fd *fd,
71 void *vaddr)
6e2387e8
IR
72{
73 struct device *dev = priv->net_dev->dev.parent;
74 dma_addr_t addr = dpaa2_fd_get_addr(fd);
75 u8 fd_format = dpaa2_fd_get_format(fd);
76 struct dpaa2_sg_entry *sgt;
77 void *sg_vaddr;
78 int i;
79
80 /* If single buffer frame, just free the data buffer */
81 if (fd_format == dpaa2_fd_single)
82 goto free_buf;
83 else if (fd_format != dpaa2_fd_sg)
84 /* We don't support any other format */
85 return;
86
729d79b8
IR
87 /* For S/G frames, we first need to free all SG entries
88 * except the first one, which was taken care of already
89 */
6e2387e8 90 sgt = vaddr + dpaa2_fd_get_offset(fd);
729d79b8 91 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
6e2387e8 92 addr = dpaa2_sg_get_addr(&sgt[i]);
08eb2397 93 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
efa6a7d0 94 dma_unmap_page(dev, addr, priv->rx_buf_size,
27c87486 95 DMA_BIDIRECTIONAL);
6e2387e8 96
27c87486 97 free_pages((unsigned long)sg_vaddr, 0);
6e2387e8
IR
98 if (dpaa2_sg_is_final(&sgt[i]))
99 break;
100 }
101
102free_buf:
27c87486 103 free_pages((unsigned long)vaddr, 0);
6e2387e8
IR
104}
105
106/* Build a linear skb based on a single-buffer frame descriptor */
5d8dccf8
IC
107static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
108 const struct dpaa2_fd *fd,
109 void *fd_vaddr)
6e2387e8
IR
110{
111 struct sk_buff *skb = NULL;
112 u16 fd_offset = dpaa2_fd_get_offset(fd);
113 u32 fd_length = dpaa2_fd_get_len(fd);
114
cbb3ea40
IR
115 ch->buf_count--;
116
27c87486 117 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
6e2387e8
IR
118 if (unlikely(!skb))
119 return NULL;
120
121 skb_reserve(skb, fd_offset);
122 skb_put(skb, fd_length);
123
6e2387e8
IR
124 return skb;
125}
126
127/* Build a non linear (fragmented) skb based on a S/G table */
5d8dccf8
IC
128static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
129 struct dpaa2_eth_channel *ch,
130 struct dpaa2_sg_entry *sgt)
6e2387e8
IR
131{
132 struct sk_buff *skb = NULL;
133 struct device *dev = priv->net_dev->dev.parent;
134 void *sg_vaddr;
135 dma_addr_t sg_addr;
136 u16 sg_offset;
137 u32 sg_length;
138 struct page *page, *head_page;
139 int page_offset;
140 int i;
141
142 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
143 struct dpaa2_sg_entry *sge = &sgt[i];
144
145 /* NOTE: We only support SG entries in dpaa2_sg_single format,
146 * but this is the only format we may receive from HW anyway
147 */
148
149 /* Get the address and length from the S/G entry */
150 sg_addr = dpaa2_sg_get_addr(sge);
08eb2397 151 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
efa6a7d0 152 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
27c87486 153 DMA_BIDIRECTIONAL);
6e2387e8 154
6e2387e8
IR
155 sg_length = dpaa2_sg_get_len(sge);
156
157 if (i == 0) {
158 /* We build the skb around the first data buffer */
27c87486 159 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
cbb3ea40 160 if (unlikely(!skb)) {
729d79b8
IR
161 /* Free the first SG entry now, since we already
162 * unmapped it and obtained the virtual address
163 */
27c87486 164 free_pages((unsigned long)sg_vaddr, 0);
729d79b8 165
cbb3ea40
IR
166 /* We still need to subtract the buffers used
167 * by this FD from our software counter
168 */
169 while (!dpaa2_sg_is_final(&sgt[i]) &&
170 i < DPAA2_ETH_MAX_SG_ENTRIES)
171 i++;
172 break;
173 }
6e2387e8
IR
174
175 sg_offset = dpaa2_sg_get_offset(sge);
176 skb_reserve(skb, sg_offset);
177 skb_put(skb, sg_length);
178 } else {
179 /* Rest of the data buffers are stored as skb frags */
180 page = virt_to_page(sg_vaddr);
181 head_page = virt_to_head_page(sg_vaddr);
182
183 /* Offset in page (which may be compound).
184 * Data in subsequent SG entries is stored from the
185 * beginning of the buffer, so we don't need to add the
186 * sg_offset.
187 */
188 page_offset = ((unsigned long)sg_vaddr &
189 (PAGE_SIZE - 1)) +
190 (page_address(page) - page_address(head_page));
191
192 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
efa6a7d0 193 sg_length, priv->rx_buf_size);
6e2387e8
IR
194 }
195
196 if (dpaa2_sg_is_final(sge))
197 break;
198 }
199
b63baf71
IR
200 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
201
6e2387e8
IR
202 /* Count all data buffers + SG table buffer */
203 ch->buf_count -= i + 2;
204
205 return skb;
206}
207
569375fb
ICR
208/* Free buffers acquired from the buffer pool or which were meant to
209 * be released in the pool
210 */
5d8dccf8
IC
211static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
212 int count)
569375fb
ICR
213{
214 struct device *dev = priv->net_dev->dev.parent;
215 void *vaddr;
216 int i;
217
218 for (i = 0; i < count; i++) {
219 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
efa6a7d0 220 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
27c87486
ICR
221 DMA_BIDIRECTIONAL);
222 free_pages((unsigned long)vaddr, 0);
569375fb
ICR
223 }
224}
225
5d8dccf8
IC
226static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
227 struct dpaa2_eth_channel *ch,
228 dma_addr_t addr)
5d39dc21 229{
ef17bd7c 230 int retries = 0;
5d39dc21
ICR
231 int err;
232
233 ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
234 if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
235 return;
236
237 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
238 ch->xdp.drop_bufs,
ef17bd7c
IR
239 ch->xdp.drop_cnt)) == -EBUSY) {
240 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
241 break;
5d39dc21 242 cpu_relax();
ef17bd7c 243 }
5d39dc21
ICR
244
245 if (err) {
5d8dccf8 246 dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
5d39dc21
ICR
247 ch->buf_count -= ch->xdp.drop_cnt;
248 }
249
250 ch->xdp.drop_cnt = 0;
251}
252
38c440b2
IC
253static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
254 struct dpaa2_eth_fq *fq,
255 struct dpaa2_eth_xdp_fds *xdp_fds)
256{
257 int total_enqueued = 0, retries = 0, enqueued;
258 struct dpaa2_eth_drv_stats *percpu_extras;
259 int num_fds, err, max_retries;
260 struct dpaa2_fd *fds;
261
262 percpu_extras = this_cpu_ptr(priv->percpu_extras);
263
264 /* try to enqueue all the FDs until the max number of retries is hit */
265 fds = xdp_fds->fds;
266 num_fds = xdp_fds->num;
267 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
268 while (total_enqueued < num_fds && retries < max_retries) {
269 err = priv->enqueue(priv, fq, &fds[total_enqueued],
270 0, num_fds - total_enqueued, &enqueued);
271 if (err == -EBUSY) {
272 percpu_extras->tx_portal_busy += ++retries;
273 continue;
274 }
275 total_enqueued += enqueued;
276 }
277 xdp_fds->num = 0;
278
279 return total_enqueued;
280}
281
5d8dccf8
IC
282static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
283 struct dpaa2_eth_channel *ch,
284 struct dpaa2_eth_fq *fq)
74a1c059
IC
285{
286 struct rtnl_link_stats64 *percpu_stats;
287 struct dpaa2_fd *fds;
288 int enqueued, i;
289
290 percpu_stats = this_cpu_ptr(priv->percpu_stats);
291
292 // enqueue the array of XDP_TX frames
293 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
294
295 /* update statistics */
296 percpu_stats->tx_packets += enqueued;
297 fds = fq->xdp_tx_fds.fds;
298 for (i = 0; i < enqueued; i++) {
299 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
300 ch->stats.xdp_tx++;
301 }
302 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
5d8dccf8 303 dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
74a1c059
IC
304 percpu_stats->tx_errors++;
305 ch->stats.xdp_tx_err++;
306 }
307 fq->xdp_tx_fds.num = 0;
308}
309
5d8dccf8
IC
310static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
311 struct dpaa2_eth_channel *ch,
312 struct dpaa2_fd *fd,
313 void *buf_start, u16 queue_id)
99e43521 314{
99e43521 315 struct dpaa2_faead *faead;
74a1c059
IC
316 struct dpaa2_fd *dest_fd;
317 struct dpaa2_eth_fq *fq;
99e43521 318 u32 ctrl, frc;
99e43521
ICR
319
320 /* Mark the egress frame hardware annotation area as valid */
321 frc = dpaa2_fd_get_frc(fd);
322 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
323 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
324
325 /* Instruct hardware to release the FD buffer directly into
326 * the buffer pool once transmission is completed, instead of
327 * sending a Tx confirmation frame to us
328 */
329 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
330 faead = dpaa2_get_faead(buf_start, false);
331 faead->ctrl = cpu_to_le32(ctrl);
332 faead->conf_fqid = 0;
333
334 fq = &priv->fq[queue_id];
74a1c059
IC
335 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
336 memcpy(dest_fd, fd, sizeof(*dest_fd));
99e43521 337
74a1c059
IC
338 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
339 return;
340
5d8dccf8 341 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
99e43521
ICR
342}
343
5d8dccf8
IC
344static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
345 struct dpaa2_eth_channel *ch,
346 struct dpaa2_eth_fq *rx_fq,
347 struct dpaa2_fd *fd, void *vaddr)
7e273a8e 348{
5d39dc21 349 dma_addr_t addr = dpaa2_fd_get_addr(fd);
7e273a8e
ICR
350 struct bpf_prog *xdp_prog;
351 struct xdp_buff xdp;
352 u32 xdp_act = XDP_PASS;
99e43521
ICR
353 int err;
354
7e273a8e
ICR
355 rcu_read_lock();
356
357 xdp_prog = READ_ONCE(ch->xdp.prog);
358 if (!xdp_prog)
359 goto out;
360
361 xdp.data = vaddr + dpaa2_fd_get_offset(fd);
362 xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
7b1eea1a 363 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
7e273a8e 364 xdp_set_data_meta_invalid(&xdp);
d678be1d 365 xdp.rxq = &ch->xdp_rxq;
7e273a8e 366
4a9b052a
JDB
367 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
368 (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
369
7e273a8e
ICR
370 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
371
7b1eea1a
ICR
372 /* xdp.data pointer may have changed */
373 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
374 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
375
7e273a8e
ICR
376 switch (xdp_act) {
377 case XDP_PASS:
378 break;
99e43521 379 case XDP_TX:
5d8dccf8 380 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
99e43521 381 break;
7e273a8e
ICR
382 default:
383 bpf_warn_invalid_xdp_action(xdp_act);
df561f66 384 fallthrough;
7e273a8e
ICR
385 case XDP_ABORTED:
386 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
df561f66 387 fallthrough;
7e273a8e 388 case XDP_DROP:
5d8dccf8 389 dpaa2_eth_xdp_release_buf(priv, ch, addr);
a4a7b762 390 ch->stats.xdp_drop++;
7e273a8e 391 break;
d678be1d
IR
392 case XDP_REDIRECT:
393 dma_unmap_page(priv->net_dev->dev.parent, addr,
efa6a7d0 394 priv->rx_buf_size, DMA_BIDIRECTIONAL);
d678be1d 395 ch->buf_count--;
4a9b052a
JDB
396
397 /* Allow redirect use of full headroom */
d678be1d 398 xdp.data_hard_start = vaddr;
4a9b052a
JDB
399 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
400
d678be1d
IR
401 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
402 if (unlikely(err))
403 ch->stats.xdp_drop++;
404 else
405 ch->stats.xdp_redirect++;
406 break;
7e273a8e
ICR
407 }
408
d678be1d 409 ch->xdp.res |= xdp_act;
7e273a8e
ICR
410out:
411 rcu_read_unlock();
412 return xdp_act;
413}
414
6e2387e8
IR
415/* Main Rx frame processing routine */
416static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
417 struct dpaa2_eth_channel *ch,
418 const struct dpaa2_fd *fd,
dbcdf728 419 struct dpaa2_eth_fq *fq)
6e2387e8
IR
420{
421 dma_addr_t addr = dpaa2_fd_get_addr(fd);
422 u8 fd_format = dpaa2_fd_get_format(fd);
423 void *vaddr;
424 struct sk_buff *skb;
425 struct rtnl_link_stats64 *percpu_stats;
85047abd 426 struct dpaa2_eth_drv_stats *percpu_extras;
6e2387e8
IR
427 struct device *dev = priv->net_dev->dev.parent;
428 struct dpaa2_fas *fas;
d695e764 429 void *buf_data;
6e2387e8 430 u32 status = 0;
7e273a8e 431 u32 xdp_act;
6e2387e8 432
5636187b
IR
433 /* Tracing point */
434 trace_dpaa2_rx_fd(priv->net_dev, fd);
435
08eb2397 436 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
efa6a7d0 437 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
18c2e770 438 DMA_BIDIRECTIONAL);
6e2387e8 439
54ce8917 440 fas = dpaa2_get_fas(vaddr, false);
d695e764
IR
441 prefetch(fas);
442 buf_data = vaddr + dpaa2_fd_get_offset(fd);
443 prefetch(buf_data);
6e2387e8
IR
444
445 percpu_stats = this_cpu_ptr(priv->percpu_stats);
85047abd 446 percpu_extras = this_cpu_ptr(priv->percpu_extras);
6e2387e8
IR
447
448 if (fd_format == dpaa2_fd_single) {
5d8dccf8 449 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
7e273a8e
ICR
450 if (xdp_act != XDP_PASS) {
451 percpu_stats->rx_packets++;
452 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
453 return;
454 }
455
efa6a7d0 456 dma_unmap_page(dev, addr, priv->rx_buf_size,
27c87486 457 DMA_BIDIRECTIONAL);
5d8dccf8 458 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
6e2387e8 459 } else if (fd_format == dpaa2_fd_sg) {
7e273a8e
ICR
460 WARN_ON(priv->xdp_prog);
461
efa6a7d0 462 dma_unmap_page(dev, addr, priv->rx_buf_size,
27c87486 463 DMA_BIDIRECTIONAL);
5d8dccf8 464 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
27c87486 465 free_pages((unsigned long)vaddr, 0);
85047abd
IR
466 percpu_extras->rx_sg_frames++;
467 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
6e2387e8
IR
468 } else {
469 /* We don't support any other format */
470 goto err_frame_format;
471 }
472
473 if (unlikely(!skb))
474 goto err_build_skb;
475
476 prefetch(skb->data);
477
859f998e
IR
478 /* Get the timestamp value */
479 if (priv->rx_tstamp) {
480 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
481 __le64 *ts = dpaa2_get_ts(vaddr, false);
482 u64 ns;
483
484 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
485
486 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
487 shhwtstamps->hwtstamp = ns_to_ktime(ns);
488 }
489
6e2387e8
IR
490 /* Check if we need to validate the L4 csum */
491 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
6e2387e8 492 status = le32_to_cpu(fas->status);
5d8dccf8 493 dpaa2_eth_validate_rx_csum(priv, status, skb);
6e2387e8
IR
494 }
495
496 skb->protocol = eth_type_trans(skb, priv->net_dev);
dbcdf728 497 skb_record_rx_queue(skb, fq->flowid);
6e2387e8
IR
498
499 percpu_stats->rx_packets++;
500 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
501
0a25d92c 502 list_add_tail(&skb->list, ch->rx_list);
6e2387e8
IR
503
504 return;
505
506err_build_skb:
5d8dccf8 507 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
6e2387e8
IR
508err_frame_format:
509 percpu_stats->rx_dropped++;
510}
511
061d631f
IC
512/* Processing of Rx frames received on the error FQ
513 * We check and print the error bits and then free the frame
514 */
515static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
516 struct dpaa2_eth_channel *ch,
517 const struct dpaa2_fd *fd,
518 struct dpaa2_eth_fq *fq __always_unused)
519{
520 struct device *dev = priv->net_dev->dev.parent;
521 dma_addr_t addr = dpaa2_fd_get_addr(fd);
522 u8 fd_format = dpaa2_fd_get_format(fd);
523 struct rtnl_link_stats64 *percpu_stats;
524 struct dpaa2_eth_trap_item *trap_item;
525 struct dpaa2_fapr *fapr;
526 struct sk_buff *skb;
527 void *buf_data;
528 void *vaddr;
529
530 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
531 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
532 DMA_BIDIRECTIONAL);
533
534 buf_data = vaddr + dpaa2_fd_get_offset(fd);
535
536 if (fd_format == dpaa2_fd_single) {
537 dma_unmap_page(dev, addr, priv->rx_buf_size,
538 DMA_BIDIRECTIONAL);
539 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
540 } else if (fd_format == dpaa2_fd_sg) {
541 dma_unmap_page(dev, addr, priv->rx_buf_size,
542 DMA_BIDIRECTIONAL);
543 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
544 free_pages((unsigned long)vaddr, 0);
545 } else {
546 /* We don't support any other format */
547 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
548 goto err_frame_format;
549 }
550
551 fapr = dpaa2_get_fapr(vaddr, false);
552 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
553 if (trap_item)
554 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
555 &priv->devlink_port, NULL);
556 consume_skb(skb);
557
558err_frame_format:
559 percpu_stats = this_cpu_ptr(priv->percpu_stats);
560 percpu_stats->rx_errors++;
561 ch->buf_count--;
562}
563
6e2387e8
IR
564/* Consume all frames pull-dequeued into the store. This is the simplest way to
565 * make sure we don't accidentally issue another volatile dequeue which would
566 * overwrite (leak) frames already in the store.
567 *
568 * Observance of NAPI budget is not our concern, leaving that to the caller.
569 */
5d8dccf8
IC
570static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
571 struct dpaa2_eth_fq **src)
6e2387e8
IR
572{
573 struct dpaa2_eth_priv *priv = ch->priv;
68049a5f 574 struct dpaa2_eth_fq *fq = NULL;
6e2387e8
IR
575 struct dpaa2_dq *dq;
576 const struct dpaa2_fd *fd;
ef17bd7c 577 int cleaned = 0, retries = 0;
6e2387e8
IR
578 int is_last;
579
580 do {
581 dq = dpaa2_io_store_next(ch->store, &is_last);
582 if (unlikely(!dq)) {
583 /* If we're here, we *must* have placed a
584 * volatile dequeue comnmand, so keep reading through
585 * the store until we get some sort of valid response
586 * token (either a valid frame or an "empty dequeue")
587 */
ef17bd7c
IR
588 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
589 netdev_err_once(priv->net_dev,
590 "Unable to read a valid dequeue response\n");
591 return -ETIMEDOUT;
592 }
6e2387e8
IR
593 continue;
594 }
595
596 fd = dpaa2_dq_fd(dq);
75c583ab 597 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
6e2387e8 598
dbcdf728 599 fq->consume(priv, ch, fd, fq);
6e2387e8 600 cleaned++;
ef17bd7c 601 retries = 0;
6e2387e8
IR
602 } while (!is_last);
603
68049a5f
ICR
604 if (!cleaned)
605 return 0;
606
607 fq->stats.frames += cleaned;
460fd830 608 ch->stats.frames += cleaned;
68049a5f
ICR
609
610 /* A dequeue operation only pulls frames from a single queue
569dac6a 611 * into the store. Return the frame queue as an out param.
68049a5f 612 */
569dac6a
ICR
613 if (src)
614 *src = fq;
68049a5f 615
6e2387e8
IR
616 return cleaned;
617}
618
c5521189
YL
619static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
620 u8 *msgtype, u8 *twostep, u8 *udp,
621 u16 *correction_offset,
622 u16 *origintimestamp_offset)
623{
624 unsigned int ptp_class;
625 struct ptp_header *hdr;
626 unsigned int type;
627 u8 *base;
628
629 ptp_class = ptp_classify_raw(skb);
630 if (ptp_class == PTP_CLASS_NONE)
631 return -EINVAL;
632
633 hdr = ptp_parse_header(skb, ptp_class);
634 if (!hdr)
635 return -EINVAL;
636
637 *msgtype = ptp_get_msgtype(hdr, ptp_class);
638 *twostep = hdr->flag_field[0] & 0x2;
639
640 type = ptp_class & PTP_CLASS_PMASK;
641 if (type == PTP_CLASS_IPV4 ||
642 type == PTP_CLASS_IPV6)
643 *udp = 1;
644 else
645 *udp = 0;
646
647 base = skb_mac_header(skb);
648 *correction_offset = (u8 *)&hdr->correction - base;
649 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
650
651 return 0;
652}
653
859f998e 654/* Configure the egress frame annotation for timestamp update */
c5521189
YL
655static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
656 struct dpaa2_fd *fd,
657 void *buf_start,
658 struct sk_buff *skb)
859f998e 659{
c5521189
YL
660 struct ptp_tstamp origin_timestamp;
661 struct dpni_single_step_cfg cfg;
662 u8 msgtype, twostep, udp;
859f998e 663 struct dpaa2_faead *faead;
c5521189
YL
664 struct dpaa2_fas *fas;
665 struct timespec64 ts;
666 u16 offset1, offset2;
859f998e 667 u32 ctrl, frc;
c5521189
YL
668 __le64 *ns;
669 u8 *data;
859f998e
IR
670
671 /* Mark the egress frame annotation area as valid */
672 frc = dpaa2_fd_get_frc(fd);
673 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
674
675 /* Set hardware annotation size */
676 ctrl = dpaa2_fd_get_ctrl(fd);
677 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
678
679 /* enable UPD (update prepanded data) bit in FAEAD field of
680 * hardware frame annotation area
681 */
682 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
683 faead = dpaa2_get_faead(buf_start, true);
684 faead->ctrl = cpu_to_le32(ctrl);
c5521189
YL
685
686 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
687 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
688 &offset1, &offset2) ||
6b6817c5 689 msgtype != PTP_MSGTYPE_SYNC || twostep) {
c5521189
YL
690 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
691 return;
692 }
693
694 /* Mark the frame annotation status as valid */
695 frc = dpaa2_fd_get_frc(fd);
696 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
697
698 /* Mark the PTP flag for one step timestamping */
699 fas = dpaa2_get_fas(buf_start, true);
700 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
701
702 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
703 ns = dpaa2_get_ts(buf_start, true);
704 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
705 DPAA2_PTP_CLK_PERIOD_NS);
706
707 /* Update current time to PTP message originTimestamp field */
708 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
709 data = skb_mac_header(skb);
710 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
711 *(__be32 *)(data + offset2 + 2) =
712 htonl(origin_timestamp.sec_lsb);
713 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
714
715 cfg.en = 1;
716 cfg.ch_update = udp;
717 cfg.offset = offset1;
718 cfg.peer_delay = 0;
719
720 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
721 &cfg))
722 WARN_ONCE(1, "Failed to set single step register");
723 }
859f998e
IR
724}
725
6e2387e8 726/* Create a frame descriptor based on a fragmented skb */
5d8dccf8
IC
727static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
728 struct sk_buff *skb,
64a965de
YL
729 struct dpaa2_fd *fd,
730 void **swa_addr)
6e2387e8
IR
731{
732 struct device *dev = priv->net_dev->dev.parent;
733 void *sgt_buf = NULL;
6e2387e8
IR
734 dma_addr_t addr;
735 int nr_frags = skb_shinfo(skb)->nr_frags;
736 struct dpaa2_sg_entry *sgt;
737 int i, err;
738 int sgt_buf_size;
739 struct scatterlist *scl, *crt_scl;
740 int num_sg;
741 int num_dma_bufs;
742 struct dpaa2_eth_swa *swa;
743
744 /* Create and map scatterlist.
745 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
746 * to go beyond nr_frags+1.
747 * Note: We don't support chained scatterlists
748 */
749 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
750 return -EINVAL;
751
d4ceb8de 752 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
6e2387e8
IR
753 if (unlikely(!scl))
754 return -ENOMEM;
755
756 sg_init_table(scl, nr_frags + 1);
757 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
37fbbdda
IC
758 if (unlikely(num_sg < 0)) {
759 err = -ENOMEM;
760 goto dma_map_sg_failed;
761 }
1e5fa9e2 762 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
6e2387e8
IR
763 if (unlikely(!num_dma_bufs)) {
764 err = -ENOMEM;
765 goto dma_map_sg_failed;
766 }
767
768 /* Prepare the HW SGT structure */
769 sgt_buf_size = priv->tx_data_offset +
fa722c00 770 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
90bc6d4b 771 sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
6e2387e8
IR
772 if (unlikely(!sgt_buf)) {
773 err = -ENOMEM;
774 goto sgt_buf_alloc_failed;
775 }
776 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
6a9bbe53
IR
777 memset(sgt_buf, 0, sgt_buf_size);
778
6e2387e8
IR
779 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
780
781 /* Fill in the HW SGT structure.
782 *
783 * sgt_buf is zeroed out, so the following fields are implicit
784 * in all sgt entries:
785 * - offset is 0
786 * - format is 'dpaa2_sg_single'
787 */
788 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
789 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
790 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
791 }
792 dpaa2_sg_set_final(&sgt[i - 1], true);
793
794 /* Store the skb backpointer in the SGT buffer.
795 * Fit the scatterlist and the number of buffers alongside the
796 * skb backpointer in the software annotation area. We'll need
797 * all of them on Tx Conf.
798 */
64a965de 799 *swa_addr = (void *)sgt_buf;
6e2387e8 800 swa = (struct dpaa2_eth_swa *)sgt_buf;
e3fdf6ba
IR
801 swa->type = DPAA2_ETH_SWA_SG;
802 swa->sg.skb = skb;
803 swa->sg.scl = scl;
804 swa->sg.num_sg = num_sg;
805 swa->sg.sgt_size = sgt_buf_size;
6e2387e8
IR
806
807 /* Separately map the SGT buffer */
1e5fa9e2 808 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
6e2387e8
IR
809 if (unlikely(dma_mapping_error(dev, addr))) {
810 err = -ENOMEM;
811 goto dma_map_single_failed;
812 }
813 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
814 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
815 dpaa2_fd_set_addr(fd, addr);
816 dpaa2_fd_set_len(fd, skb->len);
b948c8c6 817 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
6e2387e8
IR
818
819 return 0;
820
821dma_map_single_failed:
6a9bbe53 822 skb_free_frag(sgt_buf);
6e2387e8 823sgt_buf_alloc_failed:
1e5fa9e2 824 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
6e2387e8
IR
825dma_map_sg_failed:
826 kfree(scl);
827 return err;
828}
829
d70446ee
IC
830/* Create a SG frame descriptor based on a linear skb.
831 *
832 * This function is used on the Tx path when the skb headroom is not large
833 * enough for the HW requirements, thus instead of realloc-ing the skb we
834 * create a SG frame descriptor with only one entry.
835 */
5d8dccf8
IC
836static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
837 struct sk_buff *skb,
64a965de
YL
838 struct dpaa2_fd *fd,
839 void **swa_addr)
d70446ee
IC
840{
841 struct device *dev = priv->net_dev->dev.parent;
842 struct dpaa2_eth_sgt_cache *sgt_cache;
843 struct dpaa2_sg_entry *sgt;
844 struct dpaa2_eth_swa *swa;
845 dma_addr_t addr, sgt_addr;
846 void *sgt_buf = NULL;
847 int sgt_buf_size;
848 int err;
849
850 /* Prepare the HW SGT structure */
851 sgt_cache = this_cpu_ptr(priv->sgt_cache);
852 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
853
854 if (sgt_cache->count == 0)
855 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
856 GFP_ATOMIC);
857 else
858 sgt_buf = sgt_cache->buf[--sgt_cache->count];
859 if (unlikely(!sgt_buf))
860 return -ENOMEM;
861
862 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
863 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
864
865 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
866 if (unlikely(dma_mapping_error(dev, addr))) {
867 err = -ENOMEM;
868 goto data_map_failed;
869 }
870
871 /* Fill in the HW SGT structure */
872 dpaa2_sg_set_addr(sgt, addr);
873 dpaa2_sg_set_len(sgt, skb->len);
874 dpaa2_sg_set_final(sgt, true);
875
876 /* Store the skb backpointer in the SGT buffer */
64a965de 877 *swa_addr = (void *)sgt_buf;
d70446ee
IC
878 swa = (struct dpaa2_eth_swa *)sgt_buf;
879 swa->type = DPAA2_ETH_SWA_SINGLE;
880 swa->single.skb = skb;
54a57d1c 881 swa->single.sgt_size = sgt_buf_size;
d70446ee
IC
882
883 /* Separately map the SGT buffer */
884 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
885 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
886 err = -ENOMEM;
887 goto sgt_map_failed;
888 }
889
890 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
891 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
892 dpaa2_fd_set_addr(fd, sgt_addr);
893 dpaa2_fd_set_len(fd, skb->len);
894 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
895
d70446ee
IC
896 return 0;
897
898sgt_map_failed:
899 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
900data_map_failed:
901 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
902 kfree(sgt_buf);
903 else
904 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
905
906 return err;
907}
908
6e2387e8 909/* Create a frame descriptor based on a linear skb */
5d8dccf8
IC
910static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
911 struct sk_buff *skb,
64a965de
YL
912 struct dpaa2_fd *fd,
913 void **swa_addr)
6e2387e8
IR
914{
915 struct device *dev = priv->net_dev->dev.parent;
c163685f 916 u8 *buffer_start, *aligned_start;
e3fdf6ba 917 struct dpaa2_eth_swa *swa;
6e2387e8
IR
918 dma_addr_t addr;
919
1cf773bd 920 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
c163685f
IR
921
922 /* If there's enough room to align the FD address, do it.
923 * It will help hardware optimize accesses.
924 */
925 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
926 DPAA2_ETH_TX_BUF_ALIGN);
927 if (aligned_start >= skb->head)
928 buffer_start = aligned_start;
6e2387e8 929
6e2387e8
IR
930 /* Store a backpointer to the skb at the beginning of the buffer
931 * (in the private data area) such that we can release it
932 * on Tx confirm
933 */
64a965de 934 *swa_addr = (void *)buffer_start;
e3fdf6ba
IR
935 swa = (struct dpaa2_eth_swa *)buffer_start;
936 swa->type = DPAA2_ETH_SWA_SINGLE;
937 swa->single.skb = skb;
6e2387e8
IR
938
939 addr = dma_map_single(dev, buffer_start,
940 skb_tail_pointer(skb) - buffer_start,
1e5fa9e2 941 DMA_BIDIRECTIONAL);
6e2387e8
IR
942 if (unlikely(dma_mapping_error(dev, addr)))
943 return -ENOMEM;
944
945 dpaa2_fd_set_addr(fd, addr);
946 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
947 dpaa2_fd_set_len(fd, skb->len);
948 dpaa2_fd_set_format(fd, dpaa2_fd_single);
b948c8c6 949 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
6e2387e8
IR
950
951 return 0;
952}
953
954/* FD freeing routine on the Tx path
955 *
956 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
957 * back-pointed to is also freed.
958 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
959 * dpaa2_eth_tx().
6e2387e8 960 */
c5521189 961static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
5d8dccf8
IC
962 struct dpaa2_eth_fq *fq,
963 const struct dpaa2_fd *fd, bool in_napi)
6e2387e8
IR
964{
965 struct device *dev = priv->net_dev->dev.parent;
d70446ee 966 dma_addr_t fd_addr, sg_addr;
d678be1d 967 struct sk_buff *skb = NULL;
6e2387e8 968 unsigned char *buffer_start;
6e2387e8
IR
969 struct dpaa2_eth_swa *swa;
970 u8 fd_format = dpaa2_fd_get_format(fd);
d678be1d 971 u32 fd_len = dpaa2_fd_get_len(fd);
6e2387e8 972
d70446ee
IC
973 struct dpaa2_eth_sgt_cache *sgt_cache;
974 struct dpaa2_sg_entry *sgt;
975
6e2387e8 976 fd_addr = dpaa2_fd_get_addr(fd);
e3fdf6ba
IR
977 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
978 swa = (struct dpaa2_eth_swa *)buffer_start;
6e2387e8
IR
979
980 if (fd_format == dpaa2_fd_single) {
d678be1d
IR
981 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
982 skb = swa->single.skb;
983 /* Accessing the skb buffer is safe before dma unmap,
984 * because we didn't map the actual skb shell.
985 */
986 dma_unmap_single(dev, fd_addr,
987 skb_tail_pointer(skb) - buffer_start,
988 DMA_BIDIRECTIONAL);
989 } else {
990 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
991 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
992 DMA_BIDIRECTIONAL);
993 }
6e2387e8 994 } else if (fd_format == dpaa2_fd_sg) {
d70446ee
IC
995 if (swa->type == DPAA2_ETH_SWA_SG) {
996 skb = swa->sg.skb;
997
998 /* Unmap the scatterlist */
999 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1000 DMA_BIDIRECTIONAL);
1001 kfree(swa->sg.scl);
6e2387e8 1002
d70446ee
IC
1003 /* Unmap the SGT buffer */
1004 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1005 DMA_BIDIRECTIONAL);
1006 } else {
1007 skb = swa->single.skb;
6e2387e8 1008
d70446ee
IC
1009 /* Unmap the SGT Buffer */
1010 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1011 DMA_BIDIRECTIONAL);
1012
1013 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1014 priv->tx_data_offset);
1015 sg_addr = dpaa2_sg_get_addr(sgt);
1016 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1017 }
6e2387e8 1018 } else {
2b7c86eb 1019 netdev_dbg(priv->net_dev, "Invalid FD format\n");
6e2387e8
IR
1020 return;
1021 }
1022
d678be1d
IR
1023 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1024 fq->dq_frames++;
1025 fq->dq_bytes += fd_len;
1026 }
1027
1028 if (swa->type == DPAA2_ETH_SWA_XDP) {
1029 xdp_return_frame(swa->xdp.xdpf);
1030 return;
1031 }
1032
859f998e 1033 /* Get the timestamp value */
1cf773bd 1034 if (skb->cb[0] == TX_TSTAMP) {
859f998e 1035 struct skb_shared_hwtstamps shhwtstamps;
e3fdf6ba 1036 __le64 *ts = dpaa2_get_ts(buffer_start, true);
859f998e
IR
1037 u64 ns;
1038
1039 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1040
1041 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1042 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1043 skb_tstamp_tx(skb, &shhwtstamps);
c5521189
YL
1044 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1045 mutex_unlock(&priv->onestep_tstamp_lock);
859f998e
IR
1046 }
1047
6a9bbe53 1048 /* Free SGT buffer allocated on tx */
d70446ee
IC
1049 if (fd_format != dpaa2_fd_single) {
1050 sgt_cache = this_cpu_ptr(priv->sgt_cache);
1051 if (swa->type == DPAA2_ETH_SWA_SG) {
1052 skb_free_frag(buffer_start);
1053 } else {
1054 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
1055 kfree(buffer_start);
1056 else
1057 sgt_cache->buf[sgt_cache->count++] = buffer_start;
1058 }
1059 }
6e2387e8
IR
1060
1061 /* Move on with skb release */
0723a3ae 1062 napi_consume_skb(skb, in_napi);
6e2387e8
IR
1063}
1064
c5521189
YL
1065static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1066 struct net_device *net_dev)
6e2387e8
IR
1067{
1068 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1069 struct dpaa2_fd fd;
1070 struct rtnl_link_stats64 *percpu_stats;
85047abd 1071 struct dpaa2_eth_drv_stats *percpu_extras;
6e2387e8 1072 struct dpaa2_eth_fq *fq;
569dac6a 1073 struct netdev_queue *nq;
6e2387e8 1074 u16 queue_mapping;
18c21467 1075 unsigned int needed_headroom;
569dac6a 1076 u32 fd_len;
ab1e6de2 1077 u8 prio = 0;
6e2387e8 1078 int err, i;
64a965de 1079 void *swa;
6e2387e8
IR
1080
1081 percpu_stats = this_cpu_ptr(priv->percpu_stats);
85047abd 1082 percpu_extras = this_cpu_ptr(priv->percpu_extras);
6e2387e8 1083
1cf773bd 1084 needed_headroom = dpaa2_eth_needed_headroom(skb);
6e2387e8
IR
1085
1086 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1087 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1088 */
1089 skb = skb_unshare(skb, GFP_ATOMIC);
1090 if (unlikely(!skb)) {
1091 /* skb_unshare() has already freed the skb */
1092 percpu_stats->tx_dropped++;
1093 return NETDEV_TX_OK;
1094 }
1095
1096 /* Setup the FD fields */
1097 memset(&fd, 0, sizeof(fd));
1098
85047abd 1099 if (skb_is_nonlinear(skb)) {
64a965de 1100 err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
85047abd
IR
1101 percpu_extras->tx_sg_frames++;
1102 percpu_extras->tx_sg_bytes += skb->len;
d70446ee 1103 } else if (skb_headroom(skb) < needed_headroom) {
64a965de 1104 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
d70446ee
IC
1105 percpu_extras->tx_sg_frames++;
1106 percpu_extras->tx_sg_bytes += skb->len;
4c96c0ac
IC
1107 percpu_extras->tx_converted_sg_frames++;
1108 percpu_extras->tx_converted_sg_bytes += skb->len;
85047abd 1109 } else {
64a965de 1110 err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
85047abd
IR
1111 }
1112
6e2387e8
IR
1113 if (unlikely(err)) {
1114 percpu_stats->tx_dropped++;
1115 goto err_build_fd;
1116 }
1117
c5521189
YL
1118 if (skb->cb[0])
1119 dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
64a965de 1120
5636187b
IR
1121 /* Tracing point */
1122 trace_dpaa2_tx_fd(net_dev, &fd);
1123
537336ce
IR
1124 /* TxConf FQ selection relies on queue id from the stack.
1125 * In case of a forwarded frame from another DPNI interface, we choose
1126 * a queue affined to the same core that processed the Rx frame
6e2387e8 1127 */
537336ce 1128 queue_mapping = skb_get_queue_mapping(skb);
ab1e6de2
IR
1129
1130 if (net_dev->num_tc) {
1131 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1132 /* Hardware interprets priority level 0 as being the highest,
1133 * so we need to do a reverse mapping to the netdev tc index
1134 */
1135 prio = net_dev->num_tc - prio - 1;
1136 /* We have only one FQ array entry for all Tx hardware queues
1137 * with the same flow id (but different priority levels)
1138 */
1139 queue_mapping %= dpaa2_eth_queue_count(priv);
1140 }
6e2387e8 1141 fq = &priv->fq[queue_mapping];
8c838f53
IC
1142
1143 fd_len = dpaa2_fd_get_len(&fd);
1144 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1145 netdev_tx_sent_queue(nq, fd_len);
1146
1147 /* Everything that happens after this enqueues might race with
1148 * the Tx confirmation callback for this frame
1149 */
6e2387e8 1150 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
6ff80447 1151 err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
6e2387e8
IR
1152 if (err != -EBUSY)
1153 break;
1154 }
85047abd 1155 percpu_extras->tx_portal_busy += i;
6e2387e8
IR
1156 if (unlikely(err < 0)) {
1157 percpu_stats->tx_errors++;
1158 /* Clean up everything, including freeing the skb */
5d8dccf8 1159 dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
8c838f53 1160 netdev_tx_completed_queue(nq, 1, fd_len);
6e2387e8
IR
1161 } else {
1162 percpu_stats->tx_packets++;
569dac6a 1163 percpu_stats->tx_bytes += fd_len;
6e2387e8
IR
1164 }
1165
1166 return NETDEV_TX_OK;
1167
1168err_build_fd:
6e2387e8
IR
1169 dev_kfree_skb(skb);
1170
1171 return NETDEV_TX_OK;
1172}
1173
c5521189
YL
1174static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1175{
1176 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1177 tx_onestep_tstamp);
1178 struct sk_buff *skb;
1179
1180 while (true) {
1181 skb = skb_dequeue(&priv->tx_skbs);
1182 if (!skb)
1183 return;
1184
1185 /* Lock just before TX one-step timestamping packet,
1186 * and release the lock in dpaa2_eth_free_tx_fd when
1187 * confirm the packet has been sent on hardware, or
1188 * when clean up during transmit failure.
1189 */
1190 mutex_lock(&priv->onestep_tstamp_lock);
1191 __dpaa2_eth_tx(skb, priv->net_dev);
1192 }
1193}
1194
1195static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1196{
1197 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1198 u8 msgtype, twostep, udp;
1199 u16 offset1, offset2;
1200
1201 /* Utilize skb->cb[0] for timestamping request per skb */
1202 skb->cb[0] = 0;
1203
1204 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1205 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1206 skb->cb[0] = TX_TSTAMP;
1207 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1208 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1209 }
1210
1211 /* TX for one-step timestamping PTP Sync packet */
1212 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1213 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1214 &offset1, &offset2))
6b6817c5 1215 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
c5521189
YL
1216 skb_queue_tail(&priv->tx_skbs, skb);
1217 queue_work(priv->dpaa2_ptp_wq,
1218 &priv->tx_onestep_tstamp);
1219 return NETDEV_TX_OK;
1220 }
1221 /* Use two-step timestamping if not one-step timestamping
1222 * PTP Sync packet
1223 */
1224 skb->cb[0] = TX_TSTAMP;
1225 }
1226
1227 /* TX for other packets */
1228 return __dpaa2_eth_tx(skb, net_dev);
1229}
1230
6e2387e8
IR
1231/* Tx confirmation frame processing routine */
1232static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
b00c898c 1233 struct dpaa2_eth_channel *ch __always_unused,
6e2387e8 1234 const struct dpaa2_fd *fd,
569dac6a 1235 struct dpaa2_eth_fq *fq)
6e2387e8
IR
1236{
1237 struct rtnl_link_stats64 *percpu_stats;
85047abd 1238 struct dpaa2_eth_drv_stats *percpu_extras;
569dac6a 1239 u32 fd_len = dpaa2_fd_get_len(fd);
39163c0c 1240 u32 fd_errors;
6e2387e8 1241
5636187b
IR
1242 /* Tracing point */
1243 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1244
85047abd
IR
1245 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1246 percpu_extras->tx_conf_frames++;
569dac6a
ICR
1247 percpu_extras->tx_conf_bytes += fd_len;
1248
39163c0c
IR
1249 /* Check frame errors in the FD field */
1250 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
5d8dccf8 1251 dpaa2_eth_free_tx_fd(priv, fq, fd, true);
39163c0c
IR
1252
1253 if (likely(!fd_errors))
1254 return;
1255
2b7c86eb
IR
1256 if (net_ratelimit())
1257 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1258 fd_errors);
1259
39163c0c
IR
1260 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1261 /* Tx-conf logically pertains to the egress path. */
1262 percpu_stats->tx_errors++;
6e2387e8
IR
1263}
1264
70b32d82
IA
1265static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1266 bool enable)
1267{
1268 int err;
1269
1270 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1271
1272 if (err) {
1273 netdev_err(priv->net_dev,
1274 "dpni_enable_vlan_filter failed\n");
1275 return err;
1276 }
1277
1278 return 0;
1279}
1280
5d8dccf8 1281static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
6e2387e8
IR
1282{
1283 int err;
1284
1285 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1286 DPNI_OFF_RX_L3_CSUM, enable);
1287 if (err) {
1288 netdev_err(priv->net_dev,
1289 "dpni_set_offload(RX_L3_CSUM) failed\n");
1290 return err;
1291 }
1292
1293 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1294 DPNI_OFF_RX_L4_CSUM, enable);
1295 if (err) {
1296 netdev_err(priv->net_dev,
1297 "dpni_set_offload(RX_L4_CSUM) failed\n");
1298 return err;
1299 }
1300
1301 return 0;
1302}
1303
5d8dccf8 1304static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
6e2387e8
IR
1305{
1306 int err;
1307
1308 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1309 DPNI_OFF_TX_L3_CSUM, enable);
1310 if (err) {
1311 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1312 return err;
1313 }
1314
1315 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1316 DPNI_OFF_TX_L4_CSUM, enable);
1317 if (err) {
1318 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1319 return err;
1320 }
1321
1322 return 0;
1323}
1324
1325/* Perform a single release command to add buffers
1326 * to the specified buffer pool
1327 */
5d8dccf8
IC
1328static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1329 struct dpaa2_eth_channel *ch, u16 bpid)
6e2387e8
IR
1330{
1331 struct device *dev = priv->net_dev->dev.parent;
1332 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
27c87486 1333 struct page *page;
6e2387e8 1334 dma_addr_t addr;
ef17bd7c 1335 int retries = 0;
87eb55e4 1336 int i, err;
6e2387e8
IR
1337
1338 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1339 /* Allocate buffer visible to WRIOP + skb shared info +
1340 * alignment padding
1341 */
27c87486
ICR
1342 /* allocate one page for each Rx buffer. WRIOP sees
1343 * the entire page except for a tailroom reserved for
1344 * skb shared info
1345 */
1346 page = dev_alloc_pages(0);
1347 if (!page)
6e2387e8
IR
1348 goto err_alloc;
1349
efa6a7d0 1350 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
27c87486 1351 DMA_BIDIRECTIONAL);
6e2387e8
IR
1352 if (unlikely(dma_mapping_error(dev, addr)))
1353 goto err_map;
1354
1355 buf_array[i] = addr;
5636187b
IR
1356
1357 /* tracing point */
1358 trace_dpaa2_eth_buf_seed(priv->net_dev,
27c87486 1359 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
efa6a7d0 1360 addr, priv->rx_buf_size,
5636187b 1361 bpid);
6e2387e8
IR
1362 }
1363
1364release_bufs:
87eb55e4 1365 /* In case the portal is busy, retry until successful */
7ec0596f 1366 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
ef17bd7c
IR
1367 buf_array, i)) == -EBUSY) {
1368 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1369 break;
6e2387e8 1370 cpu_relax();
ef17bd7c 1371 }
87eb55e4
IR
1372
1373 /* If release command failed, clean up and bail out;
1374 * not much else we can do about it
1375 */
1376 if (err) {
5d8dccf8 1377 dpaa2_eth_free_bufs(priv, buf_array, i);
87eb55e4
IR
1378 return 0;
1379 }
1380
6e2387e8
IR
1381 return i;
1382
1383err_map:
27c87486 1384 __free_pages(page, 0);
6e2387e8 1385err_alloc:
87eb55e4
IR
1386 /* If we managed to allocate at least some buffers,
1387 * release them to hardware
1388 */
6e2387e8
IR
1389 if (i)
1390 goto release_bufs;
1391
1392 return 0;
1393}
1394
5d8dccf8 1395static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
6e2387e8
IR
1396{
1397 int i, j;
1398 int new_count;
1399
6e2387e8
IR
1400 for (j = 0; j < priv->num_channels; j++) {
1401 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1402 i += DPAA2_ETH_BUFS_PER_CMD) {
5d8dccf8 1403 new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
6e2387e8
IR
1404 priv->channel[j]->buf_count += new_count;
1405
1406 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
6e2387e8
IR
1407 return -ENOMEM;
1408 }
1409 }
1410 }
6e2387e8
IR
1411
1412 return 0;
1413}
1414
d0ea5cbd 1415/*
6e2387e8
IR
1416 * Drain the specified number of buffers from the DPNI's private buffer pool.
1417 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1418 */
5d8dccf8 1419static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
6e2387e8 1420{
6e2387e8 1421 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
ef17bd7c 1422 int retries = 0;
87eb55e4 1423 int ret;
6e2387e8
IR
1424
1425 do {
05fa39c6 1426 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
6e2387e8
IR
1427 buf_array, count);
1428 if (ret < 0) {
ef17bd7c 1429 if (ret == -EBUSY &&
0e5ad75b 1430 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
ef17bd7c 1431 continue;
6e2387e8
IR
1432 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1433 return;
1434 }
5d8dccf8 1435 dpaa2_eth_free_bufs(priv, buf_array, ret);
ef17bd7c 1436 retries = 0;
6e2387e8
IR
1437 } while (ret);
1438}
1439
5d8dccf8 1440static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
6e2387e8
IR
1441{
1442 int i;
1443
5d8dccf8
IC
1444 dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1445 dpaa2_eth_drain_bufs(priv, 1);
6e2387e8
IR
1446
1447 for (i = 0; i < priv->num_channels; i++)
1448 priv->channel[i]->buf_count = 0;
1449}
1450
1451/* Function is called from softirq context only, so we don't need to guard
1452 * the access to percpu count
1453 */
5d8dccf8
IC
1454static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1455 struct dpaa2_eth_channel *ch,
1456 u16 bpid)
6e2387e8
IR
1457{
1458 int new_count;
1459
1460 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1461 return 0;
1462
1463 do {
5d8dccf8 1464 new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
6e2387e8
IR
1465 if (unlikely(!new_count)) {
1466 /* Out of memory; abort for now, we'll try later on */
1467 break;
1468 }
1469 ch->buf_count += new_count;
1470 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1471
1472 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1473 return -ENOMEM;
1474
1475 return 0;
1476}
1477
d70446ee
IC
1478static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1479{
1480 struct dpaa2_eth_sgt_cache *sgt_cache;
1481 u16 count;
1482 int k, i;
1483
0fe665d4 1484 for_each_possible_cpu(k) {
d70446ee
IC
1485 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1486 count = sgt_cache->count;
1487
1488 for (i = 0; i < count; i++)
1489 kfree(sgt_cache->buf[i]);
1490 sgt_cache->count = 0;
1491 }
1492}
1493
5d8dccf8 1494static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
6e2387e8
IR
1495{
1496 int err;
85047abd 1497 int dequeues = -1;
6e2387e8
IR
1498
1499 /* Retry while portal is busy */
1500 do {
7ec0596f
IR
1501 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1502 ch->store);
85047abd 1503 dequeues++;
6e2387e8 1504 cpu_relax();
ef17bd7c 1505 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
6e2387e8 1506
85047abd
IR
1507 ch->stats.dequeue_portal_busy += dequeues;
1508 if (unlikely(err))
1509 ch->stats.pull_err++;
1510
6e2387e8
IR
1511 return err;
1512}
1513
1514/* NAPI poll routine
1515 *
1516 * Frames are dequeued from the QMan channel associated with this NAPI context.
1517 * Rx, Tx confirmation and (if configured) Rx error frames all count
1518 * towards the NAPI budget.
1519 */
1520static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1521{
1522 struct dpaa2_eth_channel *ch;
6e2387e8 1523 struct dpaa2_eth_priv *priv;
68049a5f 1524 int rx_cleaned = 0, txconf_cleaned = 0;
569dac6a
ICR
1525 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1526 struct netdev_queue *nq;
1527 int store_cleaned, work_done;
0a25d92c 1528 struct list_head rx_list;
ef17bd7c 1529 int retries = 0;
74a1c059 1530 u16 flowid;
6e2387e8
IR
1531 int err;
1532
1533 ch = container_of(napi, struct dpaa2_eth_channel, napi);
d678be1d 1534 ch->xdp.res = 0;
6e2387e8
IR
1535 priv = ch->priv;
1536
0a25d92c
IC
1537 INIT_LIST_HEAD(&rx_list);
1538 ch->rx_list = &rx_list;
1539
68049a5f 1540 do {
5d8dccf8 1541 err = dpaa2_eth_pull_channel(ch);
6e2387e8
IR
1542 if (unlikely(err))
1543 break;
1544
1545 /* Refill pool if appropriate */
5d8dccf8 1546 dpaa2_eth_refill_pool(priv, ch, priv->bpid);
6e2387e8 1547
5d8dccf8 1548 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
ef17bd7c 1549 if (store_cleaned <= 0)
569dac6a
ICR
1550 break;
1551 if (fq->type == DPAA2_RX_FQ) {
68049a5f 1552 rx_cleaned += store_cleaned;
74a1c059 1553 flowid = fq->flowid;
569dac6a 1554 } else {
68049a5f 1555 txconf_cleaned += store_cleaned;
569dac6a
ICR
1556 /* We have a single Tx conf FQ on this channel */
1557 txc_fq = fq;
1558 }
6e2387e8 1559
68049a5f
ICR
1560 /* If we either consumed the whole NAPI budget with Rx frames
1561 * or we reached the Tx confirmations threshold, we're done.
6e2387e8 1562 */
68049a5f 1563 if (rx_cleaned >= budget ||
569dac6a
ICR
1564 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1565 work_done = budget;
1566 goto out;
1567 }
68049a5f 1568 } while (store_cleaned);
6e2387e8 1569
68049a5f
ICR
1570 /* We didn't consume the entire budget, so finish napi and
1571 * re-enable data availability notifications
1572 */
1573 napi_complete_done(napi, rx_cleaned);
1574 do {
1575 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1576 cpu_relax();
ef17bd7c 1577 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
68049a5f
ICR
1578 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1579 ch->nctx.desired_cpu);
85047abd 1580
569dac6a
ICR
1581 work_done = max(rx_cleaned, 1);
1582
1583out:
0a25d92c
IC
1584 netif_receive_skb_list(ch->rx_list);
1585
d678be1d 1586 if (txc_fq && txc_fq->dq_frames) {
569dac6a
ICR
1587 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1588 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1589 txc_fq->dq_bytes);
1590 txc_fq->dq_frames = 0;
1591 txc_fq->dq_bytes = 0;
1592 }
1593
d678be1d
IR
1594 if (ch->xdp.res & XDP_REDIRECT)
1595 xdp_do_flush_map();
74a1c059 1596 else if (rx_cleaned && ch->xdp.res & XDP_TX)
5d8dccf8 1597 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
d678be1d 1598
569dac6a 1599 return work_done;
6e2387e8
IR
1600}
1601
5d8dccf8 1602static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
1603{
1604 struct dpaa2_eth_channel *ch;
1605 int i;
1606
1607 for (i = 0; i < priv->num_channels; i++) {
1608 ch = priv->channel[i];
1609 napi_enable(&ch->napi);
1610 }
1611}
1612
5d8dccf8 1613static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
1614{
1615 struct dpaa2_eth_channel *ch;
1616 int i;
1617
1618 for (i = 0; i < priv->num_channels; i++) {
1619 ch = priv->channel[i];
1620 napi_disable(&ch->napi);
1621 }
1622}
1623
07beb165
IC
1624void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1625 bool tx_pause, bool pfc)
8eb3cef8
IR
1626{
1627 struct dpni_taildrop td = {0};
685e39ea 1628 struct dpaa2_eth_fq *fq;
8eb3cef8
IR
1629 int i, err;
1630
07beb165
IC
1631 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1632 * flow control is disabled (as it might interfere with either the
1633 * buffer pool depletion trigger for pause frames or with the group
1634 * congestion trigger for PFC frames)
1635 */
2c8d1c8d 1636 td.enable = !tx_pause;
07beb165
IC
1637 if (priv->rx_fqtd_enabled == td.enable)
1638 goto set_cgtd;
8eb3cef8 1639
2c8d1c8d
IR
1640 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1641 td.units = DPNI_CONGESTION_UNIT_BYTES;
8eb3cef8
IR
1642
1643 for (i = 0; i < priv->num_fqs; i++) {
685e39ea
IR
1644 fq = &priv->fq[i];
1645 if (fq->type != DPAA2_RX_FQ)
8eb3cef8
IR
1646 continue;
1647 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
685e39ea
IR
1648 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1649 fq->tc, fq->flowid, &td);
8eb3cef8
IR
1650 if (err) {
1651 netdev_err(priv->net_dev,
2c8d1c8d
IR
1652 "dpni_set_taildrop(FQ) failed\n");
1653 return;
1654 }
1655 }
1656
07beb165
IC
1657 priv->rx_fqtd_enabled = td.enable;
1658
1659set_cgtd:
2c8d1c8d
IR
1660 /* Congestion group taildrop: threshold is in frames, per group
1661 * of FQs belonging to the same traffic class
07beb165
IC
1662 * Enabled if general Tx pause disabled or if PFCs are enabled
1663 * (congestion group threhsold for PFC generation is lower than the
1664 * CG taildrop threshold, so it won't interfere with it; we also
1665 * want frames in non-PFC enabled traffic classes to be kept in check)
2c8d1c8d 1666 */
07beb165
IC
1667 td.enable = !tx_pause || (tx_pause && pfc);
1668 if (priv->rx_cgtd_enabled == td.enable)
1669 return;
1670
2c8d1c8d
IR
1671 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1672 td.units = DPNI_CONGESTION_UNIT_FRAMES;
1673 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1674 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1675 DPNI_CP_GROUP, DPNI_QUEUE_RX,
1676 i, 0, &td);
1677 if (err) {
1678 netdev_err(priv->net_dev,
1679 "dpni_set_taildrop(CG) failed\n");
1680 return;
8eb3cef8
IR
1681 }
1682 }
1683
07beb165 1684 priv->rx_cgtd_enabled = td.enable;
8eb3cef8
IR
1685}
1686
5d8dccf8 1687static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
6e2387e8 1688{
85b7a342 1689 struct dpni_link_state state = {0};
8eb3cef8 1690 bool tx_pause;
6e2387e8
IR
1691 int err;
1692
1693 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1694 if (unlikely(err)) {
1695 netdev_err(priv->net_dev,
1696 "dpni_get_link_state() failed\n");
1697 return err;
1698 }
1699
8eb3cef8
IR
1700 /* If Tx pause frame settings have changed, we need to update
1701 * Rx FQ taildrop configuration as well. We configure taildrop
1702 * only when pause frame generation is disabled.
1703 */
ad054f26 1704 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
07beb165 1705 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
8eb3cef8 1706
71947923
IC
1707 /* When we manage the MAC/PHY using phylink there is no need
1708 * to manually update the netif_carrier.
1709 */
d87e6063 1710 if (dpaa2_eth_is_type_phy(priv))
71947923
IC
1711 goto out;
1712
6e2387e8
IR
1713 /* Chech link state; speed / duplex changes are not treated yet */
1714 if (priv->link_state.up == state.up)
cce62943 1715 goto out;
6e2387e8 1716
6e2387e8
IR
1717 if (state.up) {
1718 netif_carrier_on(priv->net_dev);
1719 netif_tx_start_all_queues(priv->net_dev);
1720 } else {
1721 netif_tx_stop_all_queues(priv->net_dev);
1722 netif_carrier_off(priv->net_dev);
1723 }
1724
77160af3 1725 netdev_info(priv->net_dev, "Link Event: state %s\n",
6e2387e8
IR
1726 state.up ? "up" : "down");
1727
cce62943
IR
1728out:
1729 priv->link_state = state;
1730
6e2387e8
IR
1731 return 0;
1732}
1733
1734static int dpaa2_eth_open(struct net_device *net_dev)
1735{
1736 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1737 int err;
1738
5d8dccf8 1739 err = dpaa2_eth_seed_pool(priv, priv->bpid);
6e2387e8
IR
1740 if (err) {
1741 /* Not much to do; the buffer pool, though not filled up,
1742 * may still contain some buffers which would enable us
1743 * to limp on.
1744 */
1745 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
05fa39c6 1746 priv->dpbp_dev->obj_desc.id, priv->bpid);
6e2387e8
IR
1747 }
1748
d87e6063 1749 if (!dpaa2_eth_is_type_phy(priv)) {
71947923
IC
1750 /* We'll only start the txqs when the link is actually ready;
1751 * make sure we don't race against the link up notification,
1752 * which may come immediately after dpni_enable();
1753 */
1754 netif_tx_stop_all_queues(net_dev);
1755
1756 /* Also, explicitly set carrier off, otherwise
1757 * netif_carrier_ok() will return true and cause 'ip link show'
1758 * to report the LOWER_UP flag, even though the link
1759 * notification wasn't even received.
1760 */
1761 netif_carrier_off(net_dev);
1762 }
5d8dccf8 1763 dpaa2_eth_enable_ch_napi(priv);
6e2387e8
IR
1764
1765 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1766 if (err < 0) {
1767 netdev_err(net_dev, "dpni_enable() failed\n");
1768 goto enable_err;
1769 }
1770
d87e6063 1771 if (dpaa2_eth_is_type_phy(priv))
71947923 1772 phylink_start(priv->mac->phylink);
6e2387e8
IR
1773
1774 return 0;
1775
6e2387e8 1776enable_err:
5d8dccf8
IC
1777 dpaa2_eth_disable_ch_napi(priv);
1778 dpaa2_eth_drain_pool(priv);
6e2387e8
IR
1779 return err;
1780}
1781
68d74315 1782/* Total number of in-flight frames on ingress queues */
5d8dccf8 1783static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
6e2387e8 1784{
68d74315
ICR
1785 struct dpaa2_eth_fq *fq;
1786 u32 fcnt = 0, bcnt = 0, total = 0;
1787 int i, err;
6e2387e8 1788
68d74315
ICR
1789 for (i = 0; i < priv->num_fqs; i++) {
1790 fq = &priv->fq[i];
1791 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1792 if (err) {
1793 netdev_warn(priv->net_dev, "query_fq_count failed");
1794 break;
1795 }
1796 total += fcnt;
1797 }
6e2387e8
IR
1798
1799 return total;
1800}
1801
5d8dccf8 1802static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
6e2387e8 1803{
68d74315
ICR
1804 int retries = 10;
1805 u32 pending;
6e2387e8 1806
68d74315 1807 do {
5d8dccf8 1808 pending = dpaa2_eth_ingress_fq_count(priv);
68d74315
ICR
1809 if (pending)
1810 msleep(100);
1811 } while (pending && --retries);
6e2387e8
IR
1812}
1813
52b6a4ff
IR
1814#define DPNI_TX_PENDING_VER_MAJOR 7
1815#define DPNI_TX_PENDING_VER_MINOR 13
5d8dccf8 1816static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
52b6a4ff
IR
1817{
1818 union dpni_statistics stats;
1819 int retries = 10;
1820 int err;
1821
1822 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1823 DPNI_TX_PENDING_VER_MINOR) < 0)
1824 goto out;
1825
1826 do {
1827 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1828 &stats);
1829 if (err)
1830 goto out;
1831 if (stats.page_6.tx_pending_frames == 0)
1832 return;
1833 } while (--retries);
1834
1835out:
1836 msleep(500);
1837}
1838
6e2387e8
IR
1839static int dpaa2_eth_stop(struct net_device *net_dev)
1840{
1841 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
85b7a342 1842 int dpni_enabled = 0;
6e2387e8 1843 int retries = 10;
6e2387e8 1844
d87e6063
IC
1845 if (dpaa2_eth_is_type_phy(priv)) {
1846 phylink_stop(priv->mac->phylink);
1847 } else {
71947923
IC
1848 netif_tx_stop_all_queues(net_dev);
1849 netif_carrier_off(net_dev);
71947923 1850 }
6e2387e8 1851
68d74315
ICR
1852 /* On dpni_disable(), the MC firmware will:
1853 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1854 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1855 * of all in flight Tx frames is finished (and corresponding Tx conf
1856 * frames are enqueued back to software)
1857 *
1858 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1859 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1860 * and Tx conf queues are consumed on NAPI poll.
6e2387e8 1861 */
5d8dccf8 1862 dpaa2_eth_wait_for_egress_fq_empty(priv);
68d74315 1863
6e2387e8
IR
1864 do {
1865 dpni_disable(priv->mc_io, 0, priv->mc_token);
1866 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1867 if (dpni_enabled)
1868 /* Allow the hardware some slack */
1869 msleep(100);
1870 } while (dpni_enabled && --retries);
1871 if (!retries) {
1872 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1873 /* Must go on and disable NAPI nonetheless, so we don't crash at
1874 * the next "ifconfig up"
1875 */
1876 }
1877
5d8dccf8
IC
1878 dpaa2_eth_wait_for_ingress_fq_empty(priv);
1879 dpaa2_eth_disable_ch_napi(priv);
6e2387e8 1880
6e2387e8 1881 /* Empty the buffer pool */
5d8dccf8 1882 dpaa2_eth_drain_pool(priv);
6e2387e8 1883
d70446ee
IC
1884 /* Empty the Scatter-Gather Buffer cache */
1885 dpaa2_eth_sgt_cache_drain(priv);
1886
6e2387e8
IR
1887 return 0;
1888}
1889
6e2387e8
IR
1890static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1891{
1892 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1893 struct device *dev = net_dev->dev.parent;
1894 int err;
1895
1896 err = eth_mac_addr(net_dev, addr);
1897 if (err < 0) {
1898 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1899 return err;
1900 }
1901
1902 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1903 net_dev->dev_addr);
1904 if (err) {
1905 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1906 return err;
1907 }
1908
1909 return 0;
1910}
1911
1912/** Fill in counters maintained by the GPP driver. These may be different from
1913 * the hardware counters obtained by ethtool.
1914 */
acbff8e3
IR
1915static void dpaa2_eth_get_stats(struct net_device *net_dev,
1916 struct rtnl_link_stats64 *stats)
6e2387e8
IR
1917{
1918 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1919 struct rtnl_link_stats64 *percpu_stats;
1920 u64 *cpustats;
1921 u64 *netstats = (u64 *)stats;
1922 int i, j;
1923 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1924
1925 for_each_possible_cpu(i) {
1926 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1927 cpustats = (u64 *)percpu_stats;
1928 for (j = 0; j < num; j++)
1929 netstats[j] += cpustats[j];
1930 }
1931}
1932
6e2387e8
IR
1933/* Copy mac unicast addresses from @net_dev to @priv.
1934 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1935 */
5d8dccf8
IC
1936static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
1937 struct dpaa2_eth_priv *priv)
6e2387e8
IR
1938{
1939 struct netdev_hw_addr *ha;
1940 int err;
1941
1942 netdev_for_each_uc_addr(ha, net_dev) {
1943 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1944 ha->addr);
1945 if (err)
1946 netdev_warn(priv->net_dev,
1947 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1948 ha->addr, err);
1949 }
1950}
1951
1952/* Copy mac multicast addresses from @net_dev to @priv
1953 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1954 */
5d8dccf8
IC
1955static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
1956 struct dpaa2_eth_priv *priv)
6e2387e8
IR
1957{
1958 struct netdev_hw_addr *ha;
1959 int err;
1960
1961 netdev_for_each_mc_addr(ha, net_dev) {
1962 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1963 ha->addr);
1964 if (err)
1965 netdev_warn(priv->net_dev,
1966 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1967 ha->addr, err);
1968 }
1969}
1970
70b32d82
IA
1971static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
1972 __be16 vlan_proto, u16 vid)
1973{
1974 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1975 int err;
1976
1977 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
1978 vid, 0, 0, 0);
1979
1980 if (err) {
1981 netdev_warn(priv->net_dev,
1982 "Could not add the vlan id %u\n",
1983 vid);
1984 return err;
1985 }
1986
1987 return 0;
1988}
1989
1990static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
1991 __be16 vlan_proto, u16 vid)
1992{
1993 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1994 int err;
1995
1996 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
1997
1998 if (err) {
1999 netdev_warn(priv->net_dev,
2000 "Could not remove the vlan id %u\n",
2001 vid);
2002 return err;
2003 }
2004
2005 return 0;
2006}
2007
6e2387e8
IR
2008static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2009{
2010 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2011 int uc_count = netdev_uc_count(net_dev);
2012 int mc_count = netdev_mc_count(net_dev);
2013 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2014 u32 options = priv->dpni_attrs.options;
2015 u16 mc_token = priv->mc_token;
2016 struct fsl_mc_io *mc_io = priv->mc_io;
2017 int err;
2018
2019 /* Basic sanity checks; these probably indicate a misconfiguration */
2020 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2021 netdev_info(net_dev,
2022 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2023 max_mac);
2024
2025 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2026 if (uc_count > max_mac) {
2027 netdev_info(net_dev,
2028 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2029 uc_count, max_mac);
2030 goto force_promisc;
2031 }
2032 if (mc_count + uc_count > max_mac) {
2033 netdev_info(net_dev,
2034 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2035 uc_count + mc_count, max_mac);
2036 goto force_mc_promisc;
2037 }
2038
2039 /* Adjust promisc settings due to flag combinations */
2040 if (net_dev->flags & IFF_PROMISC)
2041 goto force_promisc;
2042 if (net_dev->flags & IFF_ALLMULTI) {
2043 /* First, rebuild unicast filtering table. This should be done
2044 * in promisc mode, in order to avoid frame loss while we
2045 * progressively add entries to the table.
2046 * We don't know whether we had been in promisc already, and
2047 * making an MC call to find out is expensive; so set uc promisc
2048 * nonetheless.
2049 */
2050 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2051 if (err)
2052 netdev_warn(net_dev, "Can't set uc promisc\n");
2053
2054 /* Actual uc table reconstruction. */
2055 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2056 if (err)
2057 netdev_warn(net_dev, "Can't clear uc filters\n");
5d8dccf8 2058 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
6e2387e8
IR
2059
2060 /* Finally, clear uc promisc and set mc promisc as requested. */
2061 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2062 if (err)
2063 netdev_warn(net_dev, "Can't clear uc promisc\n");
2064 goto force_mc_promisc;
2065 }
2066
2067 /* Neither unicast, nor multicast promisc will be on... eventually.
2068 * For now, rebuild mac filtering tables while forcing both of them on.
2069 */
2070 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2071 if (err)
2072 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2073 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2074 if (err)
2075 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2076
2077 /* Actual mac filtering tables reconstruction */
2078 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2079 if (err)
2080 netdev_warn(net_dev, "Can't clear mac filters\n");
5d8dccf8
IC
2081 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2082 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
6e2387e8
IR
2083
2084 /* Now we can clear both ucast and mcast promisc, without risking
2085 * to drop legitimate frames anymore.
2086 */
2087 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2088 if (err)
2089 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2090 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2091 if (err)
2092 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2093
2094 return;
2095
2096force_promisc:
2097 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2098 if (err)
2099 netdev_warn(net_dev, "Can't set ucast promisc\n");
2100force_mc_promisc:
2101 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2102 if (err)
2103 netdev_warn(net_dev, "Can't set mcast promisc\n");
2104}
2105
2106static int dpaa2_eth_set_features(struct net_device *net_dev,
2107 netdev_features_t features)
2108{
2109 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2110 netdev_features_t changed = features ^ net_dev->features;
2111 bool enable;
2112 int err;
2113
70b32d82
IA
2114 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2115 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2116 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2117 if (err)
2118 return err;
2119 }
2120
6e2387e8
IR
2121 if (changed & NETIF_F_RXCSUM) {
2122 enable = !!(features & NETIF_F_RXCSUM);
5d8dccf8 2123 err = dpaa2_eth_set_rx_csum(priv, enable);
6e2387e8
IR
2124 if (err)
2125 return err;
2126 }
2127
2128 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2129 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
5d8dccf8 2130 err = dpaa2_eth_set_tx_csum(priv, enable);
6e2387e8
IR
2131 if (err)
2132 return err;
2133 }
2134
2135 return 0;
2136}
2137
859f998e
IR
2138static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2139{
2140 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2141 struct hwtstamp_config config;
2142
c5521189
YL
2143 if (!dpaa2_ptp)
2144 return -EINVAL;
2145
859f998e
IR
2146 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2147 return -EFAULT;
2148
2149 switch (config.tx_type) {
2150 case HWTSTAMP_TX_OFF:
859f998e 2151 case HWTSTAMP_TX_ON:
c5521189 2152 case HWTSTAMP_TX_ONESTEP_SYNC:
1cf773bd 2153 priv->tx_tstamp_type = config.tx_type;
859f998e
IR
2154 break;
2155 default:
2156 return -ERANGE;
2157 }
2158
2159 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2160 priv->rx_tstamp = false;
2161 } else {
2162 priv->rx_tstamp = true;
2163 /* TS is set for all frame types, not only those requested */
2164 config.rx_filter = HWTSTAMP_FILTER_ALL;
2165 }
2166
2167 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2168 -EFAULT : 0;
2169}
2170
2171static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2172{
4a84182a
RK
2173 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2174
859f998e
IR
2175 if (cmd == SIOCSHWTSTAMP)
2176 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2177
d87e6063 2178 if (dpaa2_eth_is_type_phy(priv))
4a84182a
RK
2179 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2180
2181 return -EOPNOTSUPP;
859f998e
IR
2182}
2183
7e273a8e
ICR
2184static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2185{
2186 int mfl, linear_mfl;
2187
2188 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
efa6a7d0 2189 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
7b1eea1a 2190 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
7e273a8e
ICR
2191
2192 if (mfl > linear_mfl) {
2193 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2194 linear_mfl - VLAN_ETH_HLEN);
2195 return false;
2196 }
2197
2198 return true;
2199}
2200
5d8dccf8 2201static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
7e273a8e
ICR
2202{
2203 int mfl, err;
2204
2205 /* We enforce a maximum Rx frame length based on MTU only if we have
2206 * an XDP program attached (in order to avoid Rx S/G frames).
2207 * Otherwise, we accept all incoming frames as long as they are not
2208 * larger than maximum size supported in hardware
2209 */
2210 if (has_xdp)
2211 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2212 else
2213 mfl = DPAA2_ETH_MFL;
2214
2215 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2216 if (err) {
2217 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2218 return err;
2219 }
2220
2221 return 0;
2222}
2223
2224static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2225{
2226 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2227 int err;
2228
2229 if (!priv->xdp_prog)
2230 goto out;
2231
2232 if (!xdp_mtu_valid(priv, new_mtu))
2233 return -EINVAL;
2234
5d8dccf8 2235 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
7e273a8e
ICR
2236 if (err)
2237 return err;
2238
2239out:
2240 dev->mtu = new_mtu;
2241 return 0;
2242}
2243
5d8dccf8 2244static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
7b1eea1a
ICR
2245{
2246 struct dpni_buffer_layout buf_layout = {0};
2247 int err;
2248
2249 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2250 DPNI_QUEUE_RX, &buf_layout);
2251 if (err) {
2252 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2253 return err;
2254 }
2255
2256 /* Reserve extra headroom for XDP header size changes */
2257 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2258 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2259 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2260 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2261 DPNI_QUEUE_RX, &buf_layout);
2262 if (err) {
2263 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2264 return err;
2265 }
2266
2267 return 0;
2268}
2269
5d8dccf8 2270static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
7e273a8e
ICR
2271{
2272 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2273 struct dpaa2_eth_channel *ch;
2274 struct bpf_prog *old;
2275 bool up, need_update;
2276 int i, err;
2277
2278 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2279 return -EINVAL;
2280
85192dbf
AN
2281 if (prog)
2282 bpf_prog_add(prog, priv->num_channels);
7e273a8e
ICR
2283
2284 up = netif_running(dev);
2285 need_update = (!!priv->xdp_prog != !!prog);
2286
2287 if (up)
2288 dpaa2_eth_stop(dev);
2289
7b1eea1a
ICR
2290 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2291 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2292 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2293 * so we are sure no old format buffers will be used from now on.
2294 */
7e273a8e 2295 if (need_update) {
5d8dccf8 2296 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
7e273a8e
ICR
2297 if (err)
2298 goto out_err;
5d8dccf8 2299 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
7b1eea1a
ICR
2300 if (err)
2301 goto out_err;
7e273a8e
ICR
2302 }
2303
2304 old = xchg(&priv->xdp_prog, prog);
2305 if (old)
2306 bpf_prog_put(old);
2307
2308 for (i = 0; i < priv->num_channels; i++) {
2309 ch = priv->channel[i];
2310 old = xchg(&ch->xdp.prog, prog);
2311 if (old)
2312 bpf_prog_put(old);
2313 }
2314
2315 if (up) {
2316 err = dpaa2_eth_open(dev);
2317 if (err)
2318 return err;
2319 }
2320
2321 return 0;
2322
2323out_err:
2324 if (prog)
2325 bpf_prog_sub(prog, priv->num_channels);
2326 if (up)
2327 dpaa2_eth_open(dev);
2328
2329 return err;
2330}
2331
2332static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2333{
7e273a8e
ICR
2334 switch (xdp->command) {
2335 case XDP_SETUP_PROG:
5d8dccf8 2336 return dpaa2_eth_setup_xdp(dev, xdp->prog);
7e273a8e
ICR
2337 default:
2338 return -EINVAL;
2339 }
2340
2341 return 0;
2342}
2343
6aa40b9e
IC
2344static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2345 struct xdp_frame *xdpf,
2346 struct dpaa2_fd *fd)
d678be1d 2347{
d678be1d 2348 struct device *dev = net_dev->dev.parent;
d678be1d
IR
2349 unsigned int needed_headroom;
2350 struct dpaa2_eth_swa *swa;
d678be1d
IR
2351 void *buffer_start, *aligned_start;
2352 dma_addr_t addr;
d678be1d
IR
2353
2354 /* We require a minimum headroom to be able to transmit the frame.
2355 * Otherwise return an error and let the original net_device handle it
2356 */
1cf773bd 2357 needed_headroom = dpaa2_eth_needed_headroom(NULL);
d678be1d
IR
2358 if (xdpf->headroom < needed_headroom)
2359 return -EINVAL;
2360
d678be1d 2361 /* Setup the FD fields */
6aa40b9e 2362 memset(fd, 0, sizeof(*fd));
d678be1d
IR
2363
2364 /* Align FD address, if possible */
2365 buffer_start = xdpf->data - needed_headroom;
2366 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2367 DPAA2_ETH_TX_BUF_ALIGN);
2368 if (aligned_start >= xdpf->data - xdpf->headroom)
2369 buffer_start = aligned_start;
2370
2371 swa = (struct dpaa2_eth_swa *)buffer_start;
2372 /* fill in necessary fields here */
2373 swa->type = DPAA2_ETH_SWA_XDP;
2374 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2375 swa->xdp.xdpf = xdpf;
2376
2377 addr = dma_map_single(dev, buffer_start,
2378 swa->xdp.dma_size,
2379 DMA_BIDIRECTIONAL);
6aa40b9e 2380 if (unlikely(dma_mapping_error(dev, addr)))
d678be1d 2381 return -ENOMEM;
d678be1d 2382
6aa40b9e
IC
2383 dpaa2_fd_set_addr(fd, addr);
2384 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2385 dpaa2_fd_set_len(fd, xdpf->len);
2386 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2387 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
d678be1d
IR
2388
2389 return 0;
2390}
2391
2392static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2393 struct xdp_frame **frames, u32 flags)
2394{
6aa40b9e 2395 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
38c440b2 2396 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
6aa40b9e
IC
2397 struct rtnl_link_stats64 *percpu_stats;
2398 struct dpaa2_eth_fq *fq;
8665d978 2399 struct dpaa2_fd *fds;
38c440b2 2400 int enqueued, i, err;
d678be1d
IR
2401
2402 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2403 return -EINVAL;
2404
2405 if (!netif_running(net_dev))
2406 return -ENETDOWN;
2407
8665d978 2408 fq = &priv->fq[smp_processor_id()];
38c440b2
IC
2409 xdp_redirect_fds = &fq->xdp_redirect_fds;
2410 fds = xdp_redirect_fds->fds;
8665d978 2411
6aa40b9e 2412 percpu_stats = this_cpu_ptr(priv->percpu_stats);
6aa40b9e 2413
8665d978 2414 /* create a FD for each xdp_frame in the list received */
d678be1d 2415 for (i = 0; i < n; i++) {
8665d978
IC
2416 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2417 if (err)
2418 break;
2419 }
38c440b2 2420 xdp_redirect_fds->num = i;
6aa40b9e 2421
38c440b2
IC
2422 /* enqueue all the frame descriptors */
2423 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
d678be1d 2424
8665d978 2425 /* update statistics */
38c440b2
IC
2426 percpu_stats->tx_packets += enqueued;
2427 for (i = 0; i < enqueued; i++)
8665d978 2428 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
38c440b2 2429 for (i = enqueued; i < n; i++)
8665d978
IC
2430 xdp_return_frame_rx_napi(frames[i]);
2431
38c440b2 2432 return enqueued;
d678be1d
IR
2433}
2434
06d5b179
IR
2435static int update_xps(struct dpaa2_eth_priv *priv)
2436{
2437 struct net_device *net_dev = priv->net_dev;
2438 struct cpumask xps_mask;
2439 struct dpaa2_eth_fq *fq;
ab1e6de2 2440 int i, num_queues, netdev_queues;
06d5b179
IR
2441 int err = 0;
2442
2443 num_queues = dpaa2_eth_queue_count(priv);
ab1e6de2 2444 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
06d5b179
IR
2445
2446 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2447 * queues, so only process those
2448 */
ab1e6de2
IR
2449 for (i = 0; i < netdev_queues; i++) {
2450 fq = &priv->fq[i % num_queues];
06d5b179
IR
2451
2452 cpumask_clear(&xps_mask);
2453 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2454
2455 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2456 if (err) {
2457 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2458 break;
2459 }
2460 }
2461
2462 return err;
2463}
2464
e3ec13be
IC
2465static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2466 struct tc_mqprio_qopt *mqprio)
ab1e6de2
IR
2467{
2468 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
ab1e6de2
IR
2469 u8 num_tc, num_queues;
2470 int i;
2471
ab1e6de2
IR
2472 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2473 num_queues = dpaa2_eth_queue_count(priv);
2474 num_tc = mqprio->num_tc;
2475
2476 if (num_tc == net_dev->num_tc)
2477 return 0;
2478
2479 if (num_tc > dpaa2_eth_tc_count(priv)) {
2480 netdev_err(net_dev, "Max %d traffic classes supported\n",
2481 dpaa2_eth_tc_count(priv));
b89c1e6b 2482 return -EOPNOTSUPP;
ab1e6de2
IR
2483 }
2484
2485 if (!num_tc) {
2486 netdev_reset_tc(net_dev);
2487 netif_set_real_num_tx_queues(net_dev, num_queues);
2488 goto out;
2489 }
2490
2491 netdev_set_num_tc(net_dev, num_tc);
2492 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2493
2494 for (i = 0; i < num_tc; i++)
2495 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2496
2497out:
2498 update_xps(priv);
2499
2500 return 0;
2501}
2502
3657cdaf
IC
2503#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2504
2505static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2506{
2507 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2508 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2509 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2510 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2511 int err;
2512
2513 if (p->command == TC_TBF_STATS)
2514 return -EOPNOTSUPP;
2515
2516 /* Only per port Tx shaping */
2517 if (p->parent != TC_H_ROOT)
2518 return -EOPNOTSUPP;
2519
2520 if (p->command == TC_TBF_REPLACE) {
2521 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2522 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2523 DPAA2_ETH_MAX_BURST_SIZE);
2524 return -EINVAL;
2525 }
2526
2527 tx_cr_shaper.max_burst_size = cfg->max_size;
2528 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2529 * rate in Mbits/s
2530 */
2531 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2532 }
2533
2534 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2535 &tx_er_shaper, 0);
2536 if (err) {
2537 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2538 return err;
2539 }
2540
2541 return 0;
2542}
2543
e3ec13be
IC
2544static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2545 enum tc_setup_type type, void *type_data)
2546{
2547 switch (type) {
2548 case TC_SETUP_QDISC_MQPRIO:
2549 return dpaa2_eth_setup_mqprio(net_dev, type_data);
3657cdaf
IC
2550 case TC_SETUP_QDISC_TBF:
2551 return dpaa2_eth_setup_tbf(net_dev, type_data);
e3ec13be
IC
2552 default:
2553 return -EOPNOTSUPP;
2554 }
2555}
2556
6e2387e8
IR
2557static const struct net_device_ops dpaa2_eth_ops = {
2558 .ndo_open = dpaa2_eth_open,
2559 .ndo_start_xmit = dpaa2_eth_tx,
2560 .ndo_stop = dpaa2_eth_stop,
6e2387e8
IR
2561 .ndo_set_mac_address = dpaa2_eth_set_addr,
2562 .ndo_get_stats64 = dpaa2_eth_get_stats,
6e2387e8
IR
2563 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2564 .ndo_set_features = dpaa2_eth_set_features,
859f998e 2565 .ndo_do_ioctl = dpaa2_eth_ioctl,
7e273a8e
ICR
2566 .ndo_change_mtu = dpaa2_eth_change_mtu,
2567 .ndo_bpf = dpaa2_eth_xdp,
d678be1d 2568 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
ab1e6de2 2569 .ndo_setup_tc = dpaa2_eth_setup_tc,
70b32d82
IA
2570 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
2571 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
6e2387e8
IR
2572};
2573
5d8dccf8 2574static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
6e2387e8
IR
2575{
2576 struct dpaa2_eth_channel *ch;
2577
2578 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
85047abd
IR
2579
2580 /* Update NAPI statistics */
2581 ch->stats.cdan++;
2582
6c33ae1a 2583 napi_schedule(&ch->napi);
6e2387e8
IR
2584}
2585
2586/* Allocate and configure a DPCON object */
5d8dccf8 2587static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2588{
2589 struct fsl_mc_device *dpcon;
2590 struct device *dev = priv->net_dev->dev.parent;
6e2387e8
IR
2591 int err;
2592
2593 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2594 FSL_MC_POOL_DPCON, &dpcon);
2595 if (err) {
d7f5a9d8
IC
2596 if (err == -ENXIO)
2597 err = -EPROBE_DEFER;
2598 else
2599 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2600 return ERR_PTR(err);
6e2387e8
IR
2601 }
2602
2603 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2604 if (err) {
2605 dev_err(dev, "dpcon_open() failed\n");
f6dda809 2606 goto free;
6e2387e8
IR
2607 }
2608
2609 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2610 if (err) {
2611 dev_err(dev, "dpcon_reset() failed\n");
f6dda809 2612 goto close;
6e2387e8
IR
2613 }
2614
6e2387e8
IR
2615 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2616 if (err) {
2617 dev_err(dev, "dpcon_enable() failed\n");
f6dda809 2618 goto close;
6e2387e8
IR
2619 }
2620
2621 return dpcon;
2622
f6dda809 2623close:
6e2387e8 2624 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
f6dda809 2625free:
6e2387e8
IR
2626 fsl_mc_object_free(dpcon);
2627
02afa9c6 2628 return ERR_PTR(err);
6e2387e8
IR
2629}
2630
5d8dccf8
IC
2631static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2632 struct fsl_mc_device *dpcon)
6e2387e8
IR
2633{
2634 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2635 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2636 fsl_mc_object_free(dpcon);
2637}
2638
5d8dccf8 2639static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2640{
2641 struct dpaa2_eth_channel *channel;
2642 struct dpcon_attr attr;
2643 struct device *dev = priv->net_dev->dev.parent;
2644 int err;
2645
2646 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2647 if (!channel)
2648 return NULL;
2649
5d8dccf8 2650 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
02afa9c6
Y
2651 if (IS_ERR(channel->dpcon)) {
2652 err = PTR_ERR(channel->dpcon);
6e2387e8 2653 goto err_setup;
d7f5a9d8 2654 }
6e2387e8
IR
2655
2656 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2657 &attr);
2658 if (err) {
2659 dev_err(dev, "dpcon_get_attributes() failed\n");
2660 goto err_get_attr;
2661 }
2662
2663 channel->dpcon_id = attr.id;
2664 channel->ch_id = attr.qbman_ch_id;
2665 channel->priv = priv;
2666
2667 return channel;
2668
2669err_get_attr:
5d8dccf8 2670 dpaa2_eth_free_dpcon(priv, channel->dpcon);
6e2387e8
IR
2671err_setup:
2672 kfree(channel);
d7f5a9d8 2673 return ERR_PTR(err);
6e2387e8
IR
2674}
2675
5d8dccf8
IC
2676static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2677 struct dpaa2_eth_channel *channel)
6e2387e8 2678{
5d8dccf8 2679 dpaa2_eth_free_dpcon(priv, channel->dpcon);
6e2387e8
IR
2680 kfree(channel);
2681}
2682
2683/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2684 * and register data availability notifications
2685 */
5d8dccf8 2686static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2687{
2688 struct dpaa2_io_notification_ctx *nctx;
2689 struct dpaa2_eth_channel *channel;
2690 struct dpcon_notification_cfg dpcon_notif_cfg;
2691 struct device *dev = priv->net_dev->dev.parent;
2692 int i, err;
2693
2694 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2695 * many cores as possible, so we need one channel for each core
2696 * (unless there's fewer queues than cores, in which case the extra
2697 * channels would be wasted).
2698 * Allocate one channel per core and register it to the core's
2699 * affine DPIO. If not enough channels are available for all cores
2700 * or if some cores don't have an affine DPIO, there will be no
2701 * ingress frame processing on those cores.
2702 */
2703 cpumask_clear(&priv->dpio_cpumask);
2704 for_each_online_cpu(i) {
2705 /* Try to allocate a channel */
5d8dccf8 2706 channel = dpaa2_eth_alloc_channel(priv);
d7f5a9d8 2707 if (IS_ERR_OR_NULL(channel)) {
bd8460fa 2708 err = PTR_ERR_OR_ZERO(channel);
d7f5a9d8
IC
2709 if (err != -EPROBE_DEFER)
2710 dev_info(dev,
2711 "No affine channel for cpu %d and above\n", i);
6e2387e8
IR
2712 goto err_alloc_ch;
2713 }
2714
2715 priv->channel[priv->num_channels] = channel;
2716
2717 nctx = &channel->nctx;
2718 nctx->is_cdan = 1;
5d8dccf8 2719 nctx->cb = dpaa2_eth_cdan_cb;
6e2387e8
IR
2720 nctx->id = channel->ch_id;
2721 nctx->desired_cpu = i;
2722
2723 /* Register the new context */
7ec0596f 2724 channel->dpio = dpaa2_io_service_select(i);
47441f7f 2725 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
6e2387e8 2726 if (err) {
5206d8d1 2727 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
6e2387e8 2728 /* If no affine DPIO for this core, there's probably
5206d8d1
IR
2729 * none available for next cores either. Signal we want
2730 * to retry later, in case the DPIO devices weren't
2731 * probed yet.
6e2387e8 2732 */
5206d8d1 2733 err = -EPROBE_DEFER;
6e2387e8
IR
2734 goto err_service_reg;
2735 }
2736
2737 /* Register DPCON notification with MC */
2738 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2739 dpcon_notif_cfg.priority = 0;
2740 dpcon_notif_cfg.user_ctx = nctx->qman64;
2741 err = dpcon_set_notification(priv->mc_io, 0,
2742 channel->dpcon->mc_handle,
2743 &dpcon_notif_cfg);
2744 if (err) {
2745 dev_err(dev, "dpcon_set_notification failed()\n");
2746 goto err_set_cdan;
2747 }
2748
2749 /* If we managed to allocate a channel and also found an affine
2750 * DPIO for this core, add it to the final mask
2751 */
2752 cpumask_set_cpu(i, &priv->dpio_cpumask);
2753 priv->num_channels++;
2754
2755 /* Stop if we already have enough channels to accommodate all
2756 * RX and TX conf queues
2757 */
b0e4f37b 2758 if (priv->num_channels == priv->dpni_attrs.num_queues)
6e2387e8
IR
2759 break;
2760 }
2761
2762 return 0;
2763
2764err_set_cdan:
47441f7f 2765 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
6e2387e8 2766err_service_reg:
5d8dccf8 2767 dpaa2_eth_free_channel(priv, channel);
6e2387e8 2768err_alloc_ch:
5aa4277d
IC
2769 if (err == -EPROBE_DEFER) {
2770 for (i = 0; i < priv->num_channels; i++) {
2771 channel = priv->channel[i];
2772 nctx = &channel->nctx;
2773 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
5d8dccf8 2774 dpaa2_eth_free_channel(priv, channel);
5aa4277d
IC
2775 }
2776 priv->num_channels = 0;
d7f5a9d8 2777 return err;
5aa4277d 2778 }
d7f5a9d8 2779
6e2387e8
IR
2780 if (cpumask_empty(&priv->dpio_cpumask)) {
2781 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
d7f5a9d8 2782 return -ENODEV;
6e2387e8
IR
2783 }
2784
2785 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2786 cpumask_pr_args(&priv->dpio_cpumask));
2787
2788 return 0;
2789}
2790
5d8dccf8 2791static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
6e2387e8 2792{
47441f7f 2793 struct device *dev = priv->net_dev->dev.parent;
6e2387e8 2794 struct dpaa2_eth_channel *ch;
47441f7f 2795 int i;
6e2387e8
IR
2796
2797 /* deregister CDAN notifications and free channels */
2798 for (i = 0; i < priv->num_channels; i++) {
2799 ch = priv->channel[i];
47441f7f 2800 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
5d8dccf8 2801 dpaa2_eth_free_channel(priv, ch);
6e2387e8
IR
2802 }
2803}
2804
5d8dccf8
IC
2805static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
2806 int cpu)
6e2387e8
IR
2807{
2808 struct device *dev = priv->net_dev->dev.parent;
2809 int i;
2810
2811 for (i = 0; i < priv->num_channels; i++)
2812 if (priv->channel[i]->nctx.desired_cpu == cpu)
2813 return priv->channel[i];
2814
2815 /* We should never get here. Issue a warning and return
2816 * the first channel, because it's still better than nothing
2817 */
2818 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2819
2820 return priv->channel[0];
2821}
2822
5d8dccf8 2823static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2824{
2825 struct device *dev = priv->net_dev->dev.parent;
2826 struct dpaa2_eth_fq *fq;
2827 int rx_cpu, txc_cpu;
06d5b179 2828 int i;
6e2387e8
IR
2829
2830 /* For each FQ, pick one channel/CPU to deliver frames to.
2831 * This may well change at runtime, either through irqbalance or
2832 * through direct user intervention.
2833 */
2834 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2835
2836 for (i = 0; i < priv->num_fqs; i++) {
2837 fq = &priv->fq[i];
2838 switch (fq->type) {
2839 case DPAA2_RX_FQ:
061d631f 2840 case DPAA2_RX_ERR_FQ:
6e2387e8
IR
2841 fq->target_cpu = rx_cpu;
2842 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2843 if (rx_cpu >= nr_cpu_ids)
2844 rx_cpu = cpumask_first(&priv->dpio_cpumask);
2845 break;
2846 case DPAA2_TX_CONF_FQ:
2847 fq->target_cpu = txc_cpu;
2848 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2849 if (txc_cpu >= nr_cpu_ids)
2850 txc_cpu = cpumask_first(&priv->dpio_cpumask);
2851 break;
2852 default:
2853 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2854 }
5d8dccf8 2855 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
6e2387e8 2856 }
06d5b179
IR
2857
2858 update_xps(priv);
6e2387e8
IR
2859}
2860
5d8dccf8 2861static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
6e2387e8 2862{
685e39ea 2863 int i, j;
6e2387e8
IR
2864
2865 /* We have one TxConf FQ per Tx flow.
2866 * The number of Tx and Rx queues is the same.
2867 * Tx queues come first in the fq array.
2868 */
2869 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2870 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2871 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2872 priv->fq[priv->num_fqs++].flowid = (u16)i;
2873 }
2874
685e39ea
IR
2875 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2876 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2877 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2878 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2879 priv->fq[priv->num_fqs].tc = (u8)j;
2880 priv->fq[priv->num_fqs++].flowid = (u16)i;
2881 }
6e2387e8
IR
2882 }
2883
061d631f
IC
2884 /* We have exactly one Rx error queue per DPNI */
2885 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
2886 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
2887
6e2387e8 2888 /* For each FQ, decide on which core to process incoming frames */
5d8dccf8 2889 dpaa2_eth_set_fq_affinity(priv);
6e2387e8
IR
2890}
2891
2892/* Allocate and configure one buffer pool for each interface */
5d8dccf8 2893static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2894{
2895 int err;
2896 struct fsl_mc_device *dpbp_dev;
2897 struct device *dev = priv->net_dev->dev.parent;
05fa39c6 2898 struct dpbp_attr dpbp_attrs;
6e2387e8
IR
2899
2900 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2901 &dpbp_dev);
2902 if (err) {
d7f5a9d8
IC
2903 if (err == -ENXIO)
2904 err = -EPROBE_DEFER;
2905 else
2906 dev_err(dev, "DPBP device allocation failed\n");
6e2387e8
IR
2907 return err;
2908 }
2909
2910 priv->dpbp_dev = dpbp_dev;
2911
2912 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2913 &dpbp_dev->mc_handle);
2914 if (err) {
2915 dev_err(dev, "dpbp_open() failed\n");
2916 goto err_open;
2917 }
2918
d00defe3
IR
2919 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2920 if (err) {
2921 dev_err(dev, "dpbp_reset() failed\n");
2922 goto err_reset;
2923 }
2924
6e2387e8
IR
2925 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2926 if (err) {
2927 dev_err(dev, "dpbp_enable() failed\n");
2928 goto err_enable;
2929 }
2930
2931 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
05fa39c6 2932 &dpbp_attrs);
6e2387e8
IR
2933 if (err) {
2934 dev_err(dev, "dpbp_get_attributes() failed\n");
2935 goto err_get_attr;
2936 }
05fa39c6 2937 priv->bpid = dpbp_attrs.bpid;
6e2387e8
IR
2938
2939 return 0;
2940
2941err_get_attr:
2942 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2943err_enable:
d00defe3 2944err_reset:
6e2387e8
IR
2945 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2946err_open:
2947 fsl_mc_object_free(dpbp_dev);
2948
2949 return err;
2950}
2951
5d8dccf8 2952static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
6e2387e8 2953{
5d8dccf8 2954 dpaa2_eth_drain_pool(priv);
6e2387e8
IR
2955 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2956 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2957 fsl_mc_object_free(priv->dpbp_dev);
2958}
2959
5d8dccf8 2960static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
6e2387e8 2961{
308f64e7 2962 struct device *dev = priv->net_dev->dev.parent;
50eacbc8 2963 struct dpni_buffer_layout buf_layout = {0};
27c87486 2964 u16 rx_buf_align;
6e2387e8
IR
2965 int err;
2966
8a4fd877
BP
2967 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2968 * version, this number is not always provided correctly on rev1.
2969 * We need to check for both alternatives in this situation.
2970 */
2971 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2972 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
27c87486 2973 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
8a4fd877 2974 else
27c87486 2975 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
8a4fd877 2976
efa6a7d0
IC
2977 /* We need to ensure that the buffer size seen by WRIOP is a multiple
2978 * of 64 or 256 bytes depending on the WRIOP version.
2979 */
2980 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
2981
4b2d9fe8 2982 /* tx buffer */
50eacbc8 2983 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
859f998e 2984 buf_layout.pass_timestamp = true;
c5521189 2985 buf_layout.pass_frame_status = true;
859f998e 2986 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
c5521189
YL
2987 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
2988 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
6e2387e8 2989 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
50eacbc8 2990 DPNI_QUEUE_TX, &buf_layout);
6e2387e8
IR
2991 if (err) {
2992 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
308f64e7 2993 return err;
6e2387e8
IR
2994 }
2995
2996 /* tx-confirm buffer */
c5521189
YL
2997 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
2998 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
6e2387e8 2999 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
50eacbc8 3000 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
6e2387e8
IR
3001 if (err) {
3002 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
308f64e7
IR
3003 return err;
3004 }
3005
4b2d9fe8
BP
3006 /* Now that we've set our tx buffer layout, retrieve the minimum
3007 * required tx data offset.
3008 */
3009 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3010 &priv->tx_data_offset);
3011 if (err) {
3012 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3013 return err;
3014 }
3015
3016 if ((priv->tx_data_offset % 64) != 0)
3017 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3018 priv->tx_data_offset);
3019
3020 /* rx buffer */
2b7c86eb 3021 buf_layout.pass_frame_status = true;
4b2d9fe8 3022 buf_layout.pass_parser_result = true;
27c87486 3023 buf_layout.data_align = rx_buf_align;
4b2d9fe8
BP
3024 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3025 buf_layout.private_data_size = 0;
3026 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3027 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3028 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
859f998e
IR
3029 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3030 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
4b2d9fe8
BP
3031 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3032 DPNI_QUEUE_RX, &buf_layout);
3033 if (err) {
3034 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3035 return err;
3036 }
3037
308f64e7
IR
3038 return 0;
3039}
3040
1fa0f68c
ICR
3041#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3042#define DPNI_ENQUEUE_FQID_VER_MINOR 9
3043
3044static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3045 struct dpaa2_eth_fq *fq,
48c0481e 3046 struct dpaa2_fd *fd, u8 prio,
6ff80447 3047 u32 num_frames __always_unused,
48c0481e 3048 int *frames_enqueued)
1fa0f68c 3049{
48c0481e
IC
3050 int err;
3051
3052 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3053 priv->tx_qdid, prio,
3054 fq->tx_qdbin, fd);
3055 if (!err && frames_enqueued)
3056 *frames_enqueued = 1;
3057 return err;
1fa0f68c
ICR
3058}
3059
6ff80447
IC
3060static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3061 struct dpaa2_eth_fq *fq,
3062 struct dpaa2_fd *fd,
3063 u8 prio, u32 num_frames,
3064 int *frames_enqueued)
1fa0f68c 3065{
48c0481e
IC
3066 int err;
3067
6ff80447
IC
3068 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3069 fq->tx_fqid[prio],
3070 fd, num_frames);
3071
3072 if (err == 0)
3073 return -EBUSY;
3074
3075 if (frames_enqueued)
3076 *frames_enqueued = err;
3077 return 0;
1fa0f68c
ICR
3078}
3079
5d8dccf8 3080static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
1fa0f68c
ICR
3081{
3082 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3083 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3084 priv->enqueue = dpaa2_eth_enqueue_qd;
3085 else
6ff80447 3086 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
1fa0f68c
ICR
3087}
3088
5d8dccf8 3089static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
8eb3cef8
IR
3090{
3091 struct device *dev = priv->net_dev->dev.parent;
3092 struct dpni_link_cfg link_cfg = {0};
3093 int err;
3094
3095 /* Get the default link options so we don't override other flags */
3096 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3097 if (err) {
3098 dev_err(dev, "dpni_get_link_cfg() failed\n");
3099 return err;
3100 }
3101
3102 /* By default, enable both Rx and Tx pause frames */
3103 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3104 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3105 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3106 if (err) {
3107 dev_err(dev, "dpni_set_link_cfg() failed\n");
3108 return err;
3109 }
3110
3111 priv->link_state.options = link_cfg.options;
3112
3113 return 0;
3114}
3115
5d8dccf8 3116static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
a690af4f
IR
3117{
3118 struct dpni_queue_id qid = {0};
3119 struct dpaa2_eth_fq *fq;
3120 struct dpni_queue queue;
3121 int i, j, err;
3122
3123 /* We only use Tx FQIDs for FQID-based enqueue, so check
3124 * if DPNI version supports it before updating FQIDs
3125 */
3126 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3127 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3128 return;
3129
3130 for (i = 0; i < priv->num_fqs; i++) {
3131 fq = &priv->fq[i];
3132 if (fq->type != DPAA2_TX_CONF_FQ)
3133 continue;
3134 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3135 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3136 DPNI_QUEUE_TX, j, fq->flowid,
3137 &queue, &qid);
3138 if (err)
3139 goto out_err;
3140
3141 fq->tx_fqid[j] = qid.fqid;
3142 if (fq->tx_fqid[j] == 0)
3143 goto out_err;
3144 }
3145 }
3146
6ff80447 3147 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
a690af4f
IR
3148
3149 return;
3150
3151out_err:
3152 netdev_info(priv->net_dev,
3153 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3154 priv->enqueue = dpaa2_eth_enqueue_qd;
3155}
3156
6aa90fe2 3157/* Configure ingress classification based on VLAN PCP */
5d8dccf8 3158static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
6aa90fe2
IR
3159{
3160 struct device *dev = priv->net_dev->dev.parent;
3161 struct dpkg_profile_cfg kg_cfg = {0};
3162 struct dpni_qos_tbl_cfg qos_cfg = {0};
3163 struct dpni_rule_cfg key_params;
3164 void *dma_mem, *key, *mask;
3165 u8 key_size = 2; /* VLAN TCI field */
3166 int i, pcp, err;
3167
3168 /* VLAN-based classification only makes sense if we have multiple
3169 * traffic classes.
3170 * Also, we need to extract just the 3-bit PCP field from the VLAN
3171 * header and we can only do that by using a mask
3172 */
3173 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3174 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3175 return -EOPNOTSUPP;
3176 }
3177
3178 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3179 if (!dma_mem)
3180 return -ENOMEM;
3181
3182 kg_cfg.num_extracts = 1;
3183 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3184 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3185 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3186 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3187
3188 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3189 if (err) {
3190 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3191 goto out_free_tbl;
3192 }
3193
3194 /* set QoS table */
3195 qos_cfg.default_tc = 0;
3196 qos_cfg.discard_on_miss = 0;
3197 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3198 DPAA2_CLASSIFIER_DMA_SIZE,
3199 DMA_TO_DEVICE);
3200 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3201 dev_err(dev, "QoS table DMA mapping failed\n");
3202 err = -ENOMEM;
3203 goto out_free_tbl;
3204 }
3205
3206 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3207 if (err) {
3208 dev_err(dev, "dpni_set_qos_table failed\n");
3209 goto out_unmap_tbl;
3210 }
3211
3212 /* Add QoS table entries */
3213 key = kzalloc(key_size * 2, GFP_KERNEL);
3214 if (!key) {
3215 err = -ENOMEM;
3216 goto out_unmap_tbl;
3217 }
3218 mask = key + key_size;
3219 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3220
3221 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3222 DMA_TO_DEVICE);
3223 if (dma_mapping_error(dev, key_params.key_iova)) {
3224 dev_err(dev, "Qos table entry DMA mapping failed\n");
3225 err = -ENOMEM;
3226 goto out_free_key;
3227 }
3228
3229 key_params.mask_iova = key_params.key_iova + key_size;
3230 key_params.key_size = key_size;
3231
3232 /* We add rules for PCP-based distribution starting with highest
3233 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3234 * classes to accommodate all priority levels, the lowest ones end up
3235 * on TC 0 which was configured as default
3236 */
3237 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3238 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3239 dma_sync_single_for_device(dev, key_params.key_iova,
3240 key_size * 2, DMA_TO_DEVICE);
3241
3242 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3243 &key_params, i, i);
3244 if (err) {
3245 dev_err(dev, "dpni_add_qos_entry failed\n");
3246 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3247 goto out_unmap_key;
3248 }
3249 }
3250
3251 priv->vlan_cls_enabled = true;
3252
3253 /* Table and key memory is not persistent, clean everything up after
3254 * configuration is finished
3255 */
3256out_unmap_key:
3257 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3258out_free_key:
3259 kfree(key);
3260out_unmap_tbl:
3261 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3262 DMA_TO_DEVICE);
3263out_free_tbl:
3264 kfree(dma_mem);
3265
3266 return err;
3267}
3268
308f64e7 3269/* Configure the DPNI object this interface is associated with */
5d8dccf8 3270static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
308f64e7
IR
3271{
3272 struct device *dev = &ls_dev->dev;
3273 struct dpaa2_eth_priv *priv;
3274 struct net_device *net_dev;
3275 int err;
3276
3277 net_dev = dev_get_drvdata(dev);
3278 priv = netdev_priv(net_dev);
3279
3280 /* get a handle for the DPNI object */
3281 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3282 if (err) {
3283 dev_err(dev, "dpni_open() failed\n");
3284 return err;
3285 }
3286
311cffa5
IR
3287 /* Check if we can work with this DPNI object */
3288 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3289 &priv->dpni_ver_minor);
3290 if (err) {
3291 dev_err(dev, "dpni_get_api_version() failed\n");
3292 goto close;
3293 }
3294 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3295 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3296 priv->dpni_ver_major, priv->dpni_ver_minor,
3297 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3298 err = -ENOTSUPP;
3299 goto close;
3300 }
3301
308f64e7
IR
3302 ls_dev->mc_io = priv->mc_io;
3303 ls_dev->mc_handle = priv->mc_token;
3304
3305 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3306 if (err) {
3307 dev_err(dev, "dpni_reset() failed\n");
f6dda809 3308 goto close;
6e2387e8
IR
3309 }
3310
308f64e7
IR
3311 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3312 &priv->dpni_attrs);
3313 if (err) {
3314 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3315 goto close;
3316 }
3317
5d8dccf8 3318 err = dpaa2_eth_set_buffer_layout(priv);
308f64e7
IR
3319 if (err)
3320 goto close;
3321
5d8dccf8 3322 dpaa2_eth_set_enqueue_mode(priv);
1fa0f68c 3323
8eb3cef8
IR
3324 /* Enable pause frame support */
3325 if (dpaa2_eth_has_pause_support(priv)) {
5d8dccf8 3326 err = dpaa2_eth_set_pause(priv);
8eb3cef8
IR
3327 if (err)
3328 goto close;
3329 }
3330
5d8dccf8 3331 err = dpaa2_eth_set_vlan_qos(priv);
6aa90fe2
IR
3332 if (err && err != -EOPNOTSUPP)
3333 goto close;
3334
9334d5ba
XW
3335 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3336 sizeof(struct dpaa2_eth_cls_rule),
3337 GFP_KERNEL);
97fff7c8
WY
3338 if (!priv->cls_rules) {
3339 err = -ENOMEM;
afb90dbb 3340 goto close;
97fff7c8 3341 }
afb90dbb 3342
6e2387e8
IR
3343 return 0;
3344
f6dda809 3345close:
6e2387e8 3346 dpni_close(priv->mc_io, 0, priv->mc_token);
f6dda809 3347
6e2387e8
IR
3348 return err;
3349}
3350
5d8dccf8 3351static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3352{
3353 int err;
3354
3355 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3356 if (err)
3357 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3358 err);
3359
3360 dpni_close(priv->mc_io, 0, priv->mc_token);
3361}
3362
5d8dccf8
IC
3363static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3364 struct dpaa2_eth_fq *fq)
6e2387e8
IR
3365{
3366 struct device *dev = priv->net_dev->dev.parent;
3367 struct dpni_queue queue;
3368 struct dpni_queue_id qid;
6e2387e8
IR
3369 int err;
3370
3371 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
685e39ea 3372 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
6e2387e8
IR
3373 if (err) {
3374 dev_err(dev, "dpni_get_queue(RX) failed\n");
3375 return err;
3376 }
3377
3378 fq->fqid = qid.fqid;
3379
3380 queue.destination.id = fq->channel->dpcon_id;
3381 queue.destination.type = DPNI_DEST_DPCON;
3382 queue.destination.priority = 1;
75c583ab 3383 queue.user_context = (u64)(uintptr_t)fq;
6e2387e8 3384 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
685e39ea 3385 DPNI_QUEUE_RX, fq->tc, fq->flowid,
16fa1cf1 3386 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
6e2387e8
IR
3387 &queue);
3388 if (err) {
3389 dev_err(dev, "dpni_set_queue(RX) failed\n");
3390 return err;
3391 }
3392
d678be1d 3393 /* xdp_rxq setup */
685e39ea
IR
3394 /* only once for each channel */
3395 if (fq->tc > 0)
3396 return 0;
3397
d678be1d 3398 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
b02e5a0e 3399 fq->flowid, 0);
d678be1d
IR
3400 if (err) {
3401 dev_err(dev, "xdp_rxq_info_reg failed\n");
3402 return err;
3403 }
3404
3405 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3406 MEM_TYPE_PAGE_ORDER0, NULL);
3407 if (err) {
3408 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3409 return err;
3410 }
3411
6e2387e8
IR
3412 return 0;
3413}
3414
5d8dccf8
IC
3415static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3416 struct dpaa2_eth_fq *fq)
6e2387e8
IR
3417{
3418 struct device *dev = priv->net_dev->dev.parent;
3419 struct dpni_queue queue;
3420 struct dpni_queue_id qid;
15c87f6b 3421 int i, err;
6e2387e8 3422
15c87f6b
IR
3423 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3424 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3425 DPNI_QUEUE_TX, i, fq->flowid,
3426 &queue, &qid);
3427 if (err) {
3428 dev_err(dev, "dpni_get_queue(TX) failed\n");
3429 return err;
3430 }
3431 fq->tx_fqid[i] = qid.fqid;
6e2387e8
IR
3432 }
3433
15c87f6b 3434 /* All Tx queues belonging to the same flowid have the same qdbin */
6e2387e8
IR
3435 fq->tx_qdbin = qid.qdbin;
3436
3437 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3438 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3439 &queue, &qid);
3440 if (err) {
3441 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3442 return err;
3443 }
3444
3445 fq->fqid = qid.fqid;
3446
3447 queue.destination.id = fq->channel->dpcon_id;
3448 queue.destination.type = DPNI_DEST_DPCON;
3449 queue.destination.priority = 0;
75c583ab 3450 queue.user_context = (u64)(uintptr_t)fq;
6e2387e8
IR
3451 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3452 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3453 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3454 &queue);
3455 if (err) {
3456 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3457 return err;
3458 }
3459
3460 return 0;
3461}
3462
061d631f
IC
3463static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3464 struct dpaa2_eth_fq *fq)
3465{
3466 struct device *dev = priv->net_dev->dev.parent;
3467 struct dpni_queue q = { { 0 } };
3468 struct dpni_queue_id qid;
3469 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3470 int err;
3471
3472 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3473 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3474 if (err) {
3475 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3476 return err;
3477 }
3478
3479 fq->fqid = qid.fqid;
3480
3481 q.destination.id = fq->channel->dpcon_id;
3482 q.destination.type = DPNI_DEST_DPCON;
3483 q.destination.priority = 1;
3484 q.user_context = (u64)(uintptr_t)fq;
3485 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3486 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3487 if (err) {
3488 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3489 return err;
3490 }
3491
3492 return 0;
3493}
3494
edad8d26 3495/* Supported header fields for Rx hash distribution key */
f76c483a 3496static const struct dpaa2_eth_dist_fields dist_fields[] = {
6e2387e8 3497 {
edad8d26
ICR
3498 /* L2 header */
3499 .rxnfc_field = RXH_L2DA,
3500 .cls_prot = NET_PROT_ETH,
3501 .cls_field = NH_FLD_ETH_DA,
3a1e6b84 3502 .id = DPAA2_ETH_DIST_ETHDST,
edad8d26 3503 .size = 6,
afb90dbb
IR
3504 }, {
3505 .cls_prot = NET_PROT_ETH,
3506 .cls_field = NH_FLD_ETH_SA,
3a1e6b84 3507 .id = DPAA2_ETH_DIST_ETHSRC,
afb90dbb
IR
3508 .size = 6,
3509 }, {
3510 /* This is the last ethertype field parsed:
3511 * depending on frame format, it can be the MAC ethertype
3512 * or the VLAN etype.
3513 */
3514 .cls_prot = NET_PROT_ETH,
3515 .cls_field = NH_FLD_ETH_TYPE,
3a1e6b84 3516 .id = DPAA2_ETH_DIST_ETHTYPE,
afb90dbb 3517 .size = 2,
edad8d26
ICR
3518 }, {
3519 /* VLAN header */
3520 .rxnfc_field = RXH_VLAN,
3521 .cls_prot = NET_PROT_VLAN,
3522 .cls_field = NH_FLD_VLAN_TCI,
3a1e6b84 3523 .id = DPAA2_ETH_DIST_VLAN,
edad8d26
ICR
3524 .size = 2,
3525 }, {
6e2387e8
IR
3526 /* IP header */
3527 .rxnfc_field = RXH_IP_SRC,
3528 .cls_prot = NET_PROT_IP,
3529 .cls_field = NH_FLD_IP_SRC,
3a1e6b84 3530 .id = DPAA2_ETH_DIST_IPSRC,
6e2387e8
IR
3531 .size = 4,
3532 }, {
3533 .rxnfc_field = RXH_IP_DST,
3534 .cls_prot = NET_PROT_IP,
3535 .cls_field = NH_FLD_IP_DST,
3a1e6b84 3536 .id = DPAA2_ETH_DIST_IPDST,
6e2387e8
IR
3537 .size = 4,
3538 }, {
3539 .rxnfc_field = RXH_L3_PROTO,
3540 .cls_prot = NET_PROT_IP,
3541 .cls_field = NH_FLD_IP_PROTO,
3a1e6b84 3542 .id = DPAA2_ETH_DIST_IPPROTO,
6e2387e8
IR
3543 .size = 1,
3544 }, {
3545 /* Using UDP ports, this is functionally equivalent to raw
3546 * byte pairs from L4 header.
3547 */
3548 .rxnfc_field = RXH_L4_B_0_1,
3549 .cls_prot = NET_PROT_UDP,
3550 .cls_field = NH_FLD_UDP_PORT_SRC,
3a1e6b84 3551 .id = DPAA2_ETH_DIST_L4SRC,
6e2387e8
IR
3552 .size = 2,
3553 }, {
3554 .rxnfc_field = RXH_L4_B_2_3,
3555 .cls_prot = NET_PROT_UDP,
3556 .cls_field = NH_FLD_UDP_PORT_DST,
3a1e6b84 3557 .id = DPAA2_ETH_DIST_L4DST,
6e2387e8
IR
3558 .size = 2,
3559 },
3560};
3561
df85aeb9 3562/* Configure the Rx hash key using the legacy API */
5d8dccf8 3563static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
df85aeb9
IR
3564{
3565 struct device *dev = priv->net_dev->dev.parent;
3566 struct dpni_rx_tc_dist_cfg dist_cfg;
685e39ea 3567 int i, err = 0;
df85aeb9
IR
3568
3569 memset(&dist_cfg, 0, sizeof(dist_cfg));
3570
3571 dist_cfg.key_cfg_iova = key;
3572 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3573 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3574
685e39ea
IR
3575 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3576 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3577 i, &dist_cfg);
3578 if (err) {
3579 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3580 break;
3581 }
3582 }
df85aeb9
IR
3583
3584 return err;
3585}
3586
3587/* Configure the Rx hash key using the new API */
5d8dccf8 3588static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
df85aeb9
IR
3589{
3590 struct device *dev = priv->net_dev->dev.parent;
3591 struct dpni_rx_dist_cfg dist_cfg;
685e39ea 3592 int i, err = 0;
df85aeb9
IR
3593
3594 memset(&dist_cfg, 0, sizeof(dist_cfg));
3595
3596 dist_cfg.key_cfg_iova = key;
3597 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3598 dist_cfg.enable = 1;
3599
685e39ea
IR
3600 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3601 dist_cfg.tc = i;
3602 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3603 &dist_cfg);
3604 if (err) {
3605 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3606 break;
3607 }
5e29c16f
IA
3608
3609 /* If the flow steering / hashing key is shared between all
3610 * traffic classes, install it just once
3611 */
3612 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3613 break;
685e39ea 3614 }
df85aeb9
IR
3615
3616 return err;
3617}
3618
4aaaf9b9 3619/* Configure the Rx flow classification key */
5d8dccf8 3620static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4aaaf9b9
IR
3621{
3622 struct device *dev = priv->net_dev->dev.parent;
3623 struct dpni_rx_dist_cfg dist_cfg;
685e39ea 3624 int i, err = 0;
4aaaf9b9
IR
3625
3626 memset(&dist_cfg, 0, sizeof(dist_cfg));
3627
3628 dist_cfg.key_cfg_iova = key;
3629 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3630 dist_cfg.enable = 1;
3631
685e39ea
IR
3632 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3633 dist_cfg.tc = i;
3634 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3635 &dist_cfg);
3636 if (err) {
3637 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3638 break;
3639 }
5e29c16f
IA
3640
3641 /* If the flow steering / hashing key is shared between all
3642 * traffic classes, install it just once
3643 */
3644 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3645 break;
685e39ea 3646 }
4aaaf9b9
IR
3647
3648 return err;
3649}
3650
afb90dbb 3651/* Size of the Rx flow classification key */
2d680237 3652int dpaa2_eth_cls_key_size(u64 fields)
afb90dbb
IR
3653{
3654 int i, size = 0;
3655
2d680237
ICR
3656 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3657 if (!(fields & dist_fields[i].id))
3658 continue;
afb90dbb 3659 size += dist_fields[i].size;
2d680237 3660 }
afb90dbb
IR
3661
3662 return size;
3663}
3664
3665/* Offset of header field in Rx classification key */
3666int dpaa2_eth_cls_fld_off(int prot, int field)
3667{
3668 int i, off = 0;
3669
3670 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3671 if (dist_fields[i].cls_prot == prot &&
3672 dist_fields[i].cls_field == field)
3673 return off;
3674 off += dist_fields[i].size;
3675 }
3676
3677 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3678 return 0;
3679}
3680
2d680237
ICR
3681/* Prune unused fields from the classification rule.
3682 * Used when masking is not supported
3683 */
3684void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3685{
3686 int off = 0, new_off = 0;
3687 int i, size;
3688
3689 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3690 size = dist_fields[i].size;
3691 if (dist_fields[i].id & fields) {
3692 memcpy(key_mem + new_off, key_mem + off, size);
3693 new_off += size;
3694 }
3695 off += size;
3696 }
3697}
3698
4aaaf9b9 3699/* Set Rx distribution (hash or flow classification) key
6e2387e8
IR
3700 * flags is a combination of RXH_ bits
3701 */
3233c151
IC
3702static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3703 enum dpaa2_eth_rx_dist type, u64 flags)
6e2387e8
IR
3704{
3705 struct device *dev = net_dev->dev.parent;
3706 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3707 struct dpkg_profile_cfg cls_cfg;
edad8d26 3708 u32 rx_hash_fields = 0;
df85aeb9 3709 dma_addr_t key_iova;
6e2387e8
IR
3710 u8 *dma_mem;
3711 int i;
3712 int err = 0;
3713
6e2387e8
IR
3714 memset(&cls_cfg, 0, sizeof(cls_cfg));
3715
f76c483a 3716 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
6e2387e8
IR
3717 struct dpkg_extract *key =
3718 &cls_cfg.extracts[cls_cfg.num_extracts];
3719
2d680237
ICR
3720 /* For both Rx hashing and classification keys
3721 * we set only the selected fields.
4aaaf9b9 3722 */
2d680237
ICR
3723 if (!(flags & dist_fields[i].id))
3724 continue;
3725 if (type == DPAA2_ETH_RX_DIST_HASH)
4aaaf9b9 3726 rx_hash_fields |= dist_fields[i].rxnfc_field;
6e2387e8
IR
3727
3728 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3729 dev_err(dev, "error adding key extraction rule, too many rules?\n");
3730 return -E2BIG;
3731 }
3732
3733 key->type = DPKG_EXTRACT_FROM_HDR;
f76c483a 3734 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
6e2387e8 3735 key->extract.from_hdr.type = DPKG_FULL_FIELD;
f76c483a 3736 key->extract.from_hdr.field = dist_fields[i].cls_field;
6e2387e8
IR
3737 cls_cfg.num_extracts++;
3738 }
3739
e40ef9e4 3740 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
6e2387e8
IR
3741 if (!dma_mem)
3742 return -ENOMEM;
3743
3744 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3745 if (err) {
77160af3 3746 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
df85aeb9 3747 goto free_key;
6e2387e8
IR
3748 }
3749
6e2387e8 3750 /* Prepare for setting the rx dist */
df85aeb9
IR
3751 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3752 DMA_TO_DEVICE);
3753 if (dma_mapping_error(dev, key_iova)) {
6e2387e8
IR
3754 dev_err(dev, "DMA mapping failed\n");
3755 err = -ENOMEM;
df85aeb9 3756 goto free_key;
6e2387e8
IR
3757 }
3758
4aaaf9b9
IR
3759 if (type == DPAA2_ETH_RX_DIST_HASH) {
3760 if (dpaa2_eth_has_legacy_dist(priv))
5d8dccf8 3761 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4aaaf9b9 3762 else
5d8dccf8 3763 err = dpaa2_eth_config_hash_key(priv, key_iova);
4aaaf9b9 3764 } else {
5d8dccf8 3765 err = dpaa2_eth_config_cls_key(priv, key_iova);
4aaaf9b9 3766 }
df85aeb9
IR
3767
3768 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3769 DMA_TO_DEVICE);
4aaaf9b9 3770 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
edad8d26 3771 priv->rx_hash_fields = rx_hash_fields;
6e2387e8 3772
df85aeb9 3773free_key:
6e2387e8
IR
3774 kfree(dma_mem);
3775 return err;
3776}
3777
4aaaf9b9
IR
3778int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3779{
3780 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3a1e6b84
ICR
3781 u64 key = 0;
3782 int i;
4aaaf9b9
IR
3783
3784 if (!dpaa2_eth_hash_enabled(priv))
3785 return -EOPNOTSUPP;
3786
3a1e6b84
ICR
3787 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3788 if (dist_fields[i].rxnfc_field & flags)
3789 key |= dist_fields[i].id;
3790
3791 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4aaaf9b9
IR
3792}
3793
2d680237
ICR
3794int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3795{
3796 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3797}
3798
3799static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4aaaf9b9
IR
3800{
3801 struct device *dev = priv->net_dev->dev.parent;
df8e249b 3802 int err;
4aaaf9b9
IR
3803
3804 /* Check if we actually support Rx flow classification */
3805 if (dpaa2_eth_has_legacy_dist(priv)) {
3806 dev_dbg(dev, "Rx cls not supported by current MC version\n");
3807 return -EOPNOTSUPP;
3808 }
3809
2d680237 3810 if (!dpaa2_eth_fs_enabled(priv)) {
4aaaf9b9
IR
3811 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3812 return -EOPNOTSUPP;
3813 }
3814
3815 if (!dpaa2_eth_hash_enabled(priv)) {
3816 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3817 return -EOPNOTSUPP;
3818 }
3819
2d680237
ICR
3820 /* If there is no support for masking in the classification table,
3821 * we don't set a default key, as it will depend on the rules
3822 * added by the user at runtime.
3823 */
3824 if (!dpaa2_eth_fs_mask_enabled(priv))
3825 goto out;
3826
3827 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
df8e249b
ICR
3828 if (err)
3829 return err;
3830
2d680237 3831out:
4aaaf9b9
IR
3832 priv->rx_cls_enabled = 1;
3833
df8e249b 3834 return 0;
4aaaf9b9
IR
3835}
3836
6e2387e8
IR
3837/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3838 * frame queues and channels
3839 */
5d8dccf8 3840static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3841{
3842 struct net_device *net_dev = priv->net_dev;
3843 struct device *dev = net_dev->dev.parent;
3844 struct dpni_pools_cfg pools_params;
3845 struct dpni_error_cfg err_cfg;
3846 int err = 0;
3847 int i;
3848
3849 pools_params.num_dpbp = 1;
3850 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3851 pools_params.pools[0].backup_pool = 0;
efa6a7d0 3852 pools_params.pools[0].buffer_size = priv->rx_buf_size;
6e2387e8
IR
3853 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3854 if (err) {
3855 dev_err(dev, "dpni_set_pools() failed\n");
3856 return err;
3857 }
3858
227686b6
IR
3859 /* have the interface implicitly distribute traffic based on
3860 * the default hash key
6e2387e8 3861 */
227686b6 3862 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
edad8d26 3863 if (err && err != -EOPNOTSUPP)
0f4c295f 3864 dev_err(dev, "Failed to configure hashing\n");
6e2387e8 3865
4aaaf9b9
IR
3866 /* Configure the flow classification key; it includes all
3867 * supported header fields and cannot be modified at runtime
3868 */
2d680237 3869 err = dpaa2_eth_set_default_cls(priv);
4aaaf9b9
IR
3870 if (err && err != -EOPNOTSUPP)
3871 dev_err(dev, "Failed to configure Rx classification key\n");
3872
6e2387e8 3873 /* Configure handling of error frames */
39163c0c 3874 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
6e2387e8
IR
3875 err_cfg.set_frame_annotation = 1;
3876 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3877 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3878 &err_cfg);
3879 if (err) {
3880 dev_err(dev, "dpni_set_errors_behavior failed\n");
3881 return err;
3882 }
3883
3884 /* Configure Rx and Tx conf queues to generate CDANs */
3885 for (i = 0; i < priv->num_fqs; i++) {
3886 switch (priv->fq[i].type) {
3887 case DPAA2_RX_FQ:
5d8dccf8 3888 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
6e2387e8
IR
3889 break;
3890 case DPAA2_TX_CONF_FQ:
5d8dccf8 3891 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
6e2387e8 3892 break;
061d631f
IC
3893 case DPAA2_RX_ERR_FQ:
3894 err = setup_rx_err_flow(priv, &priv->fq[i]);
3895 break;
6e2387e8
IR
3896 default:
3897 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3898 return -EINVAL;
3899 }
3900 if (err)
3901 return err;
3902 }
3903
3904 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3905 DPNI_QUEUE_TX, &priv->tx_qdid);
3906 if (err) {
3907 dev_err(dev, "dpni_get_qdid() failed\n");
3908 return err;
3909 }
3910
3911 return 0;
3912}
3913
3914/* Allocate rings for storing incoming frame descriptors */
5d8dccf8 3915static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3916{
3917 struct net_device *net_dev = priv->net_dev;
3918 struct device *dev = net_dev->dev.parent;
3919 int i;
3920
3921 for (i = 0; i < priv->num_channels; i++) {
3922 priv->channel[i]->store =
3923 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3924 if (!priv->channel[i]->store) {
3925 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3926 goto err_ring;
3927 }
3928 }
3929
3930 return 0;
3931
3932err_ring:
3933 for (i = 0; i < priv->num_channels; i++) {
3934 if (!priv->channel[i]->store)
3935 break;
3936 dpaa2_io_store_destroy(priv->channel[i]->store);
3937 }
3938
3939 return -ENOMEM;
3940}
3941
5d8dccf8 3942static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3943{
3944 int i;
3945
3946 for (i = 0; i < priv->num_channels; i++)
3947 dpaa2_io_store_destroy(priv->channel[i]->store);
3948}
3949
5d8dccf8 3950static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
6e2387e8 3951{
6ab00868 3952 struct net_device *net_dev = priv->net_dev;
6e2387e8 3953 struct device *dev = net_dev->dev.parent;
6e2387e8 3954 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
6ab00868 3955 int err;
6e2387e8
IR
3956
3957 /* Get firmware address, if any */
3958 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3959 if (err) {
3960 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3961 return err;
3962 }
3963
3964 /* Get DPNI attributes address, if any */
3965 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3966 dpni_mac_addr);
3967 if (err) {
6ab00868 3968 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
6e2387e8
IR
3969 return err;
3970 }
3971
3972 /* First check if firmware has any address configured by bootloader */
3973 if (!is_zero_ether_addr(mac_addr)) {
3974 /* If the DPMAC addr != DPNI addr, update it */
3975 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3976 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3977 priv->mc_token,
3978 mac_addr);
3979 if (err) {
3980 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3981 return err;
3982 }
3983 }
3984 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3985 } else if (is_zero_ether_addr(dpni_mac_addr)) {
6ab00868
IR
3986 /* No MAC address configured, fill in net_dev->dev_addr
3987 * with a random one
6e2387e8
IR
3988 */
3989 eth_hw_addr_random(net_dev);
6ab00868
IR
3990 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3991
6e2387e8
IR
3992 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3993 net_dev->dev_addr);
3994 if (err) {
6ab00868 3995 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
6e2387e8
IR
3996 return err;
3997 }
6ab00868 3998
6e2387e8
IR
3999 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4000 * practical purposes, this will be our "permanent" mac address,
4001 * at least until the next reboot. This move will also permit
4002 * register_netdevice() to properly fill up net_dev->perm_addr.
4003 */
4004 net_dev->addr_assign_type = NET_ADDR_PERM;
4005 } else {
4006 /* NET_ADDR_PERM is default, all we have to do is
4007 * fill in the device addr.
4008 */
4009 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
4010 }
4011
6ab00868
IR
4012 return 0;
4013}
4014
5d8dccf8 4015static int dpaa2_eth_netdev_init(struct net_device *net_dev)
6ab00868
IR
4016{
4017 struct device *dev = net_dev->dev.parent;
4018 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7f12c8a3
IR
4019 u32 options = priv->dpni_attrs.options;
4020 u64 supported = 0, not_supported = 0;
6ab00868 4021 u8 bcast_addr[ETH_ALEN];
bb5b42c0 4022 u8 num_queues;
6ab00868
IR
4023 int err;
4024
4025 net_dev->netdev_ops = &dpaa2_eth_ops;
7f12c8a3 4026 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
6ab00868 4027
5d8dccf8 4028 err = dpaa2_eth_set_mac_addr(priv);
6ab00868
IR
4029 if (err)
4030 return err;
4031
4032 /* Explicitly add the broadcast address to the MAC filtering table */
6e2387e8
IR
4033 eth_broadcast_addr(bcast_addr);
4034 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4035 if (err) {
6ab00868
IR
4036 dev_err(dev, "dpni_add_mac_addr() failed\n");
4037 return err;
6e2387e8
IR
4038 }
4039
3ccc8d47 4040 /* Set MTU upper limit; lower limit is 68B (default value) */
6e2387e8 4041 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
00fee002 4042 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
81f34e96 4043 DPAA2_ETH_MFL);
00fee002
IR
4044 if (err) {
4045 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4046 return err;
4047 }
6e2387e8 4048
bb5b42c0
IR
4049 /* Set actual number of queues in the net device */
4050 num_queues = dpaa2_eth_queue_count(priv);
4051 err = netif_set_real_num_tx_queues(net_dev, num_queues);
4052 if (err) {
4053 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4054 return err;
4055 }
4056 err = netif_set_real_num_rx_queues(net_dev, num_queues);
4057 if (err) {
4058 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4059 return err;
4060 }
4061
7f12c8a3
IR
4062 /* Capabilities listing */
4063 supported |= IFF_LIVE_ADDR_CHANGE;
4064
4065 if (options & DPNI_OPT_NO_MAC_FILTER)
4066 not_supported |= IFF_UNICAST_FLT;
4067 else
4068 supported |= IFF_UNICAST_FLT;
4069
4070 net_dev->priv_flags |= supported;
4071 net_dev->priv_flags &= ~not_supported;
4072
4073 /* Features */
4074 net_dev->features = NETIF_F_RXCSUM |
4075 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4076 NETIF_F_SG | NETIF_F_HIGHDMA |
3657cdaf 4077 NETIF_F_LLTX | NETIF_F_HW_TC;
7f12c8a3 4078 net_dev->hw_features = net_dev->features;
6e2387e8 4079
70b32d82
IA
4080 if (priv->dpni_attrs.vlan_filter_entries)
4081 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4082
6e2387e8
IR
4083 return 0;
4084}
4085
5d8dccf8 4086static int dpaa2_eth_poll_link_state(void *arg)
6e2387e8
IR
4087{
4088 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4089 int err;
4090
4091 while (!kthread_should_stop()) {
5d8dccf8 4092 err = dpaa2_eth_link_state_update(priv);
6e2387e8
IR
4093 if (unlikely(err))
4094 return err;
4095
4096 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4097 }
4098
4099 return 0;
4100}
4101
71947923
IC
4102static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4103{
4104 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4105 struct dpaa2_mac *mac;
4106 int err;
4107
4108 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4109 dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
47325da2
IC
4110
4111 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4112 return PTR_ERR(dpmac_dev);
4113
4114 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
71947923
IC
4115 return 0;
4116
71947923
IC
4117 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4118 if (!mac)
4119 return -ENOMEM;
4120
4121 mac->mc_dev = dpmac_dev;
4122 mac->mc_io = priv->mc_io;
4123 mac->net_dev = priv->net_dev;
4124
095dca16
IC
4125 err = dpaa2_mac_open(mac);
4126 if (err)
4127 goto err_free_mac;
d87e6063 4128 priv->mac = mac;
095dca16 4129
d87e6063
IC
4130 if (dpaa2_eth_is_type_phy(priv)) {
4131 err = dpaa2_mac_connect(mac);
4132 if (err) {
4133 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
4134 goto err_close_mac;
4135 }
71947923 4136 }
71947923
IC
4137
4138 return 0;
095dca16
IC
4139
4140err_close_mac:
4141 dpaa2_mac_close(mac);
d87e6063 4142 priv->mac = NULL;
095dca16
IC
4143err_free_mac:
4144 kfree(mac);
4145 return err;
71947923
IC
4146}
4147
4148static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4149{
d87e6063
IC
4150 if (dpaa2_eth_is_type_phy(priv))
4151 dpaa2_mac_disconnect(priv->mac);
71947923 4152
848c1903
IC
4153 if (!dpaa2_eth_has_mac(priv))
4154 return;
4155
095dca16 4156 dpaa2_mac_close(priv->mac);
71947923
IC
4157 kfree(priv->mac);
4158 priv->mac = NULL;
4159}
4160
6e2387e8
IR
4161static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4162{
112197de 4163 u32 status = ~0;
6e2387e8
IR
4164 struct device *dev = (struct device *)arg;
4165 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4166 struct net_device *net_dev = dev_get_drvdata(dev);
71947923 4167 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6e2387e8
IR
4168 int err;
4169
4170 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4171 DPNI_IRQ_INDEX, &status);
4172 if (unlikely(err)) {
77160af3 4173 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
112197de 4174 return IRQ_HANDLED;
6e2387e8
IR
4175 }
4176
112197de 4177 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
5d8dccf8 4178 dpaa2_eth_link_state_update(netdev_priv(net_dev));
6e2387e8 4179
f5c3fffa 4180 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
5d8dccf8
IC
4181 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4182 dpaa2_eth_update_tx_fqids(priv);
71947923
IC
4183
4184 rtnl_lock();
d87e6063 4185 if (dpaa2_eth_has_mac(priv))
71947923
IC
4186 dpaa2_eth_disconnect_mac(priv);
4187 else
4188 dpaa2_eth_connect_mac(priv);
4189 rtnl_unlock();
f5c3fffa 4190 }
8398b375 4191
6e2387e8
IR
4192 return IRQ_HANDLED;
4193}
4194
5d8dccf8 4195static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
6e2387e8
IR
4196{
4197 int err = 0;
4198 struct fsl_mc_device_irq *irq;
4199
4200 err = fsl_mc_allocate_irqs(ls_dev);
4201 if (err) {
4202 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4203 return err;
4204 }
4205
4206 irq = ls_dev->irqs[0];
4207 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
fdc9b532 4208 NULL, dpni_irq0_handler_thread,
6e2387e8
IR
4209 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4210 dev_name(&ls_dev->dev), &ls_dev->dev);
4211 if (err < 0) {
77160af3 4212 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
6e2387e8
IR
4213 goto free_mc_irq;
4214 }
4215
4216 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
8398b375
FC
4217 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4218 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
6e2387e8 4219 if (err < 0) {
77160af3 4220 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
6e2387e8
IR
4221 goto free_irq;
4222 }
4223
4224 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4225 DPNI_IRQ_INDEX, 1);
4226 if (err < 0) {
77160af3 4227 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
6e2387e8
IR
4228 goto free_irq;
4229 }
4230
4231 return 0;
4232
4233free_irq:
4234 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
4235free_mc_irq:
4236 fsl_mc_free_irqs(ls_dev);
4237
4238 return err;
4239}
4240
5d8dccf8 4241static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
4242{
4243 int i;
4244 struct dpaa2_eth_channel *ch;
4245
4246 for (i = 0; i < priv->num_channels; i++) {
4247 ch = priv->channel[i];
4248 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4249 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4250 NAPI_POLL_WEIGHT);
4251 }
4252}
4253
5d8dccf8 4254static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
4255{
4256 int i;
4257 struct dpaa2_eth_channel *ch;
4258
4259 for (i = 0; i < priv->num_channels; i++) {
4260 ch = priv->channel[i];
4261 netif_napi_del(&ch->napi);
4262 }
4263}
4264
4265static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4266{
4267 struct device *dev;
4268 struct net_device *net_dev = NULL;
4269 struct dpaa2_eth_priv *priv = NULL;
4270 int err = 0;
4271
4272 dev = &dpni_dev->dev;
4273
4274 /* Net device */
ab1e6de2 4275 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
6e2387e8
IR
4276 if (!net_dev) {
4277 dev_err(dev, "alloc_etherdev_mq() failed\n");
4278 return -ENOMEM;
4279 }
4280
4281 SET_NETDEV_DEV(net_dev, dev);
4282 dev_set_drvdata(dev, net_dev);
4283
4284 priv = netdev_priv(net_dev);
4285 priv->net_dev = net_dev;
4286
08eb2397
IR
4287 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4288
1cf773bd
YL
4289 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4290 priv->rx_tstamp = false;
4291
c5521189
YL
4292 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4293 if (!priv->dpaa2_ptp_wq) {
4294 err = -ENOMEM;
4295 goto err_wq_alloc;
4296 }
4297
4298 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4299
4300 skb_queue_head_init(&priv->tx_skbs);
4301
6e2387e8
IR
4302 /* Obtain a MC portal */
4303 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4304 &priv->mc_io);
4305 if (err) {
8c369610
IR
4306 if (err == -ENXIO)
4307 err = -EPROBE_DEFER;
4308 else
4309 dev_err(dev, "MC portal allocation failed\n");
6e2387e8
IR
4310 goto err_portal_alloc;
4311 }
4312
4313 /* MC objects initialization and configuration */
5d8dccf8 4314 err = dpaa2_eth_setup_dpni(dpni_dev);
6e2387e8
IR
4315 if (err)
4316 goto err_dpni_setup;
4317
5d8dccf8 4318 err = dpaa2_eth_setup_dpio(priv);
6e2387e8
IR
4319 if (err)
4320 goto err_dpio_setup;
4321
5d8dccf8 4322 dpaa2_eth_setup_fqs(priv);
6e2387e8 4323
5d8dccf8 4324 err = dpaa2_eth_setup_dpbp(priv);
6e2387e8
IR
4325 if (err)
4326 goto err_dpbp_setup;
4327
5d8dccf8 4328 err = dpaa2_eth_bind_dpni(priv);
6e2387e8
IR
4329 if (err)
4330 goto err_bind;
4331
4332 /* Add a NAPI context for each channel */
5d8dccf8 4333 dpaa2_eth_add_ch_napi(priv);
6e2387e8
IR
4334
4335 /* Percpu statistics */
4336 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4337 if (!priv->percpu_stats) {
4338 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4339 err = -ENOMEM;
4340 goto err_alloc_percpu_stats;
4341 }
85047abd
IR
4342 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4343 if (!priv->percpu_extras) {
4344 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4345 err = -ENOMEM;
4346 goto err_alloc_percpu_extras;
4347 }
6e2387e8 4348
d70446ee
IC
4349 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4350 if (!priv->sgt_cache) {
4351 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4352 err = -ENOMEM;
4353 goto err_alloc_sgt_cache;
4354 }
4355
5d8dccf8 4356 err = dpaa2_eth_netdev_init(net_dev);
6e2387e8
IR
4357 if (err)
4358 goto err_netdev_init;
4359
4360 /* Configure checksum offload based on current interface flags */
5d8dccf8 4361 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
6e2387e8
IR
4362 if (err)
4363 goto err_csum;
4364
5d8dccf8
IC
4365 err = dpaa2_eth_set_tx_csum(priv,
4366 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
6e2387e8
IR
4367 if (err)
4368 goto err_csum;
4369
5d8dccf8 4370 err = dpaa2_eth_alloc_rings(priv);
6e2387e8
IR
4371 if (err)
4372 goto err_alloc_rings;
4373
f395b69f
IC
4374#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4375 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4376 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4377 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4378 } else {
4379 dev_dbg(dev, "PFC not supported\n");
4380 }
4381#endif
4382
5d8dccf8 4383 err = dpaa2_eth_setup_irqs(dpni_dev);
6e2387e8
IR
4384 if (err) {
4385 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
5d8dccf8 4386 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
6e2387e8
IR
4387 "%s_poll_link", net_dev->name);
4388 if (IS_ERR(priv->poll_thread)) {
7f12c8a3 4389 dev_err(dev, "Error starting polling thread\n");
6e2387e8
IR
4390 goto err_poll_thread;
4391 }
4392 priv->do_link_poll = true;
4393 }
4394
71947923
IC
4395 err = dpaa2_eth_connect_mac(priv);
4396 if (err)
4397 goto err_connect_mac;
4398
ceeb03ad
IC
4399 err = dpaa2_eth_dl_register(priv);
4400 if (err)
4401 goto err_dl_register;
4402
061d631f
IC
4403 err = dpaa2_eth_dl_traps_register(priv);
4404 if (err)
4405 goto err_dl_trap_register;
4406
ceeb03ad
IC
4407 err = dpaa2_eth_dl_port_add(priv);
4408 if (err)
4409 goto err_dl_port_add;
4410
7f12c8a3
IR
4411 err = register_netdev(net_dev);
4412 if (err < 0) {
4413 dev_err(dev, "register_netdev() failed\n");
4414 goto err_netdev_reg;
4415 }
4416
091a19ea
IR
4417#ifdef CONFIG_DEBUG_FS
4418 dpaa2_dbg_add(priv);
4419#endif
4420
6e2387e8
IR
4421 dev_info(dev, "Probed interface %s\n", net_dev->name);
4422 return 0;
4423
7f12c8a3 4424err_netdev_reg:
ceeb03ad
IC
4425 dpaa2_eth_dl_port_del(priv);
4426err_dl_port_add:
061d631f
IC
4427 dpaa2_eth_dl_traps_unregister(priv);
4428err_dl_trap_register:
ceeb03ad
IC
4429 dpaa2_eth_dl_unregister(priv);
4430err_dl_register:
71947923
IC
4431 dpaa2_eth_disconnect_mac(priv);
4432err_connect_mac:
7f12c8a3
IR
4433 if (priv->do_link_poll)
4434 kthread_stop(priv->poll_thread);
4435 else
4436 fsl_mc_free_irqs(dpni_dev);
6e2387e8 4437err_poll_thread:
5d8dccf8 4438 dpaa2_eth_free_rings(priv);
6e2387e8
IR
4439err_alloc_rings:
4440err_csum:
6e2387e8 4441err_netdev_init:
d70446ee
IC
4442 free_percpu(priv->sgt_cache);
4443err_alloc_sgt_cache:
85047abd
IR
4444 free_percpu(priv->percpu_extras);
4445err_alloc_percpu_extras:
6e2387e8
IR
4446 free_percpu(priv->percpu_stats);
4447err_alloc_percpu_stats:
5d8dccf8 4448 dpaa2_eth_del_ch_napi(priv);
6e2387e8 4449err_bind:
5d8dccf8 4450 dpaa2_eth_free_dpbp(priv);
6e2387e8 4451err_dpbp_setup:
5d8dccf8 4452 dpaa2_eth_free_dpio(priv);
6e2387e8 4453err_dpio_setup:
5d8dccf8 4454 dpaa2_eth_free_dpni(priv);
6e2387e8
IR
4455err_dpni_setup:
4456 fsl_mc_portal_free(priv->mc_io);
4457err_portal_alloc:
c5521189
YL
4458 destroy_workqueue(priv->dpaa2_ptp_wq);
4459err_wq_alloc:
6e2387e8
IR
4460 dev_set_drvdata(dev, NULL);
4461 free_netdev(net_dev);
4462
4463 return err;
4464}
4465
4466static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4467{
4468 struct device *dev;
4469 struct net_device *net_dev;
4470 struct dpaa2_eth_priv *priv;
4471
4472 dev = &ls_dev->dev;
4473 net_dev = dev_get_drvdata(dev);
4474 priv = netdev_priv(net_dev);
4475
091a19ea
IR
4476#ifdef CONFIG_DEBUG_FS
4477 dpaa2_dbg_remove(priv);
4478#endif
71947923
IC
4479 rtnl_lock();
4480 dpaa2_eth_disconnect_mac(priv);
4481 rtnl_unlock();
4482
6e2387e8 4483 unregister_netdev(net_dev);
6e2387e8 4484
ceeb03ad 4485 dpaa2_eth_dl_port_del(priv);
061d631f 4486 dpaa2_eth_dl_traps_unregister(priv);
ceeb03ad
IC
4487 dpaa2_eth_dl_unregister(priv);
4488
6e2387e8
IR
4489 if (priv->do_link_poll)
4490 kthread_stop(priv->poll_thread);
4491 else
4492 fsl_mc_free_irqs(ls_dev);
4493
5d8dccf8 4494 dpaa2_eth_free_rings(priv);
d70446ee 4495 free_percpu(priv->sgt_cache);
6e2387e8 4496 free_percpu(priv->percpu_stats);
85047abd 4497 free_percpu(priv->percpu_extras);
6e2387e8 4498
5d8dccf8
IC
4499 dpaa2_eth_del_ch_napi(priv);
4500 dpaa2_eth_free_dpbp(priv);
4501 dpaa2_eth_free_dpio(priv);
4502 dpaa2_eth_free_dpni(priv);
6e2387e8
IR
4503
4504 fsl_mc_portal_free(priv->mc_io);
4505
6e2387e8
IR
4506 free_netdev(net_dev);
4507
4bc07aa4 4508 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
7472dd9f 4509
6e2387e8
IR
4510 return 0;
4511}
4512
4513static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4514 {
4515 .vendor = FSL_MC_VENDOR_FREESCALE,
4516 .obj_type = "dpni",
4517 },
4518 { .vendor = 0x0 }
4519};
4520MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4521
4522static struct fsl_mc_driver dpaa2_eth_driver = {
4523 .driver = {
4524 .name = KBUILD_MODNAME,
4525 .owner = THIS_MODULE,
4526 },
4527 .probe = dpaa2_eth_probe,
4528 .remove = dpaa2_eth_remove,
4529 .match_id_table = dpaa2_eth_match_id_table
4530};
4531
091a19ea
IR
4532static int __init dpaa2_eth_driver_init(void)
4533{
4534 int err;
4535
4536 dpaa2_eth_dbg_init();
4537 err = fsl_mc_driver_register(&dpaa2_eth_driver);
4538 if (err) {
4539 dpaa2_eth_dbg_exit();
4540 return err;
4541 }
4542
4543 return 0;
4544}
4545
4546static void __exit dpaa2_eth_driver_exit(void)
4547{
4548 dpaa2_eth_dbg_exit();
4549 fsl_mc_driver_unregister(&dpaa2_eth_driver);
4550}
4551
4552module_init(dpaa2_eth_driver_init);
4553module_exit(dpaa2_eth_driver_exit);