]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
dpaa2-eth: retry the probe when the MAC is not yet discovered on the bus
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / freescale / dpaa2 / dpaa2-eth.c
CommitLineData
0bb29b25 1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
6e2387e8 2/* Copyright 2014-2016 Freescale Semiconductor Inc.
48c0481e 3 * Copyright 2016-2020 NXP
6e2387e8
IR
4 */
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/etherdevice.h>
9#include <linux/of_net.h>
10#include <linux/interrupt.h>
11#include <linux/msi.h>
12#include <linux/kthread.h>
08eb2397 13#include <linux/iommu.h>
6bd067c4 14#include <linux/fsl/mc.h>
7e273a8e
ICR
15#include <linux/bpf.h>
16#include <linux/bpf_trace.h>
d21c784c 17#include <linux/fsl/ptp_qoriq.h>
c5521189 18#include <linux/ptp_classify.h>
3657cdaf 19#include <net/pkt_cls.h>
859f998e
IR
20#include <net/sock.h>
21
6e2387e8
IR
22#include "dpaa2-eth.h"
23
5636187b
IR
24/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
25 * using trace events only need to #include <trace/events/sched.h>
26 */
27#define CREATE_TRACE_POINTS
28#include "dpaa2-eth-trace.h"
29
6e2387e8
IR
30MODULE_LICENSE("Dual BSD/GPL");
31MODULE_AUTHOR("Freescale Semiconductor, Inc");
32MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
33
d21c784c
YL
34struct ptp_qoriq *dpaa2_ptp;
35EXPORT_SYMBOL(dpaa2_ptp);
36
08eb2397
IR
37static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
38 dma_addr_t iova_addr)
39{
40 phys_addr_t phys_addr;
41
42 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
43
44 return phys_to_virt(phys_addr);
45}
46
5d8dccf8
IC
47static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
48 u32 fd_status,
49 struct sk_buff *skb)
6e2387e8
IR
50{
51 skb_checksum_none_assert(skb);
52
53 /* HW checksum validation is disabled, nothing to do here */
54 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
55 return;
56
57 /* Read checksum validation bits */
58 if (!((fd_status & DPAA2_FAS_L3CV) &&
59 (fd_status & DPAA2_FAS_L4CV)))
60 return;
61
62 /* Inform the stack there's no need to compute L3/L4 csum anymore */
63 skb->ip_summed = CHECKSUM_UNNECESSARY;
64}
65
66/* Free a received FD.
67 * Not to be used for Tx conf FDs or on any other paths.
68 */
5d8dccf8
IC
69static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
70 const struct dpaa2_fd *fd,
71 void *vaddr)
6e2387e8
IR
72{
73 struct device *dev = priv->net_dev->dev.parent;
74 dma_addr_t addr = dpaa2_fd_get_addr(fd);
75 u8 fd_format = dpaa2_fd_get_format(fd);
76 struct dpaa2_sg_entry *sgt;
77 void *sg_vaddr;
78 int i;
79
80 /* If single buffer frame, just free the data buffer */
81 if (fd_format == dpaa2_fd_single)
82 goto free_buf;
83 else if (fd_format != dpaa2_fd_sg)
84 /* We don't support any other format */
85 return;
86
729d79b8
IR
87 /* For S/G frames, we first need to free all SG entries
88 * except the first one, which was taken care of already
89 */
6e2387e8 90 sgt = vaddr + dpaa2_fd_get_offset(fd);
729d79b8 91 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
6e2387e8 92 addr = dpaa2_sg_get_addr(&sgt[i]);
08eb2397 93 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
efa6a7d0 94 dma_unmap_page(dev, addr, priv->rx_buf_size,
27c87486 95 DMA_BIDIRECTIONAL);
6e2387e8 96
27c87486 97 free_pages((unsigned long)sg_vaddr, 0);
6e2387e8
IR
98 if (dpaa2_sg_is_final(&sgt[i]))
99 break;
100 }
101
102free_buf:
27c87486 103 free_pages((unsigned long)vaddr, 0);
6e2387e8
IR
104}
105
106/* Build a linear skb based on a single-buffer frame descriptor */
5d8dccf8
IC
107static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
108 const struct dpaa2_fd *fd,
109 void *fd_vaddr)
6e2387e8
IR
110{
111 struct sk_buff *skb = NULL;
112 u16 fd_offset = dpaa2_fd_get_offset(fd);
113 u32 fd_length = dpaa2_fd_get_len(fd);
114
cbb3ea40
IR
115 ch->buf_count--;
116
27c87486 117 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
6e2387e8
IR
118 if (unlikely(!skb))
119 return NULL;
120
121 skb_reserve(skb, fd_offset);
122 skb_put(skb, fd_length);
123
6e2387e8
IR
124 return skb;
125}
126
127/* Build a non linear (fragmented) skb based on a S/G table */
5d8dccf8
IC
128static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
129 struct dpaa2_eth_channel *ch,
130 struct dpaa2_sg_entry *sgt)
6e2387e8
IR
131{
132 struct sk_buff *skb = NULL;
133 struct device *dev = priv->net_dev->dev.parent;
134 void *sg_vaddr;
135 dma_addr_t sg_addr;
136 u16 sg_offset;
137 u32 sg_length;
138 struct page *page, *head_page;
139 int page_offset;
140 int i;
141
142 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
143 struct dpaa2_sg_entry *sge = &sgt[i];
144
145 /* NOTE: We only support SG entries in dpaa2_sg_single format,
146 * but this is the only format we may receive from HW anyway
147 */
148
149 /* Get the address and length from the S/G entry */
150 sg_addr = dpaa2_sg_get_addr(sge);
08eb2397 151 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
efa6a7d0 152 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
27c87486 153 DMA_BIDIRECTIONAL);
6e2387e8 154
6e2387e8
IR
155 sg_length = dpaa2_sg_get_len(sge);
156
157 if (i == 0) {
158 /* We build the skb around the first data buffer */
27c87486 159 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
cbb3ea40 160 if (unlikely(!skb)) {
729d79b8
IR
161 /* Free the first SG entry now, since we already
162 * unmapped it and obtained the virtual address
163 */
27c87486 164 free_pages((unsigned long)sg_vaddr, 0);
729d79b8 165
cbb3ea40
IR
166 /* We still need to subtract the buffers used
167 * by this FD from our software counter
168 */
169 while (!dpaa2_sg_is_final(&sgt[i]) &&
170 i < DPAA2_ETH_MAX_SG_ENTRIES)
171 i++;
172 break;
173 }
6e2387e8
IR
174
175 sg_offset = dpaa2_sg_get_offset(sge);
176 skb_reserve(skb, sg_offset);
177 skb_put(skb, sg_length);
178 } else {
179 /* Rest of the data buffers are stored as skb frags */
180 page = virt_to_page(sg_vaddr);
181 head_page = virt_to_head_page(sg_vaddr);
182
183 /* Offset in page (which may be compound).
184 * Data in subsequent SG entries is stored from the
185 * beginning of the buffer, so we don't need to add the
186 * sg_offset.
187 */
188 page_offset = ((unsigned long)sg_vaddr &
189 (PAGE_SIZE - 1)) +
190 (page_address(page) - page_address(head_page));
191
192 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
efa6a7d0 193 sg_length, priv->rx_buf_size);
6e2387e8
IR
194 }
195
196 if (dpaa2_sg_is_final(sge))
197 break;
198 }
199
b63baf71
IR
200 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
201
6e2387e8
IR
202 /* Count all data buffers + SG table buffer */
203 ch->buf_count -= i + 2;
204
205 return skb;
206}
207
569375fb
ICR
208/* Free buffers acquired from the buffer pool or which were meant to
209 * be released in the pool
210 */
5d8dccf8
IC
211static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
212 int count)
569375fb
ICR
213{
214 struct device *dev = priv->net_dev->dev.parent;
215 void *vaddr;
216 int i;
217
218 for (i = 0; i < count; i++) {
219 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
efa6a7d0 220 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
27c87486
ICR
221 DMA_BIDIRECTIONAL);
222 free_pages((unsigned long)vaddr, 0);
569375fb
ICR
223 }
224}
225
5d8dccf8
IC
226static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
227 struct dpaa2_eth_channel *ch,
228 dma_addr_t addr)
5d39dc21 229{
ef17bd7c 230 int retries = 0;
5d39dc21
ICR
231 int err;
232
233 ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
234 if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
235 return;
236
237 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
238 ch->xdp.drop_bufs,
ef17bd7c
IR
239 ch->xdp.drop_cnt)) == -EBUSY) {
240 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
241 break;
5d39dc21 242 cpu_relax();
ef17bd7c 243 }
5d39dc21
ICR
244
245 if (err) {
5d8dccf8 246 dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
5d39dc21
ICR
247 ch->buf_count -= ch->xdp.drop_cnt;
248 }
249
250 ch->xdp.drop_cnt = 0;
251}
252
38c440b2
IC
253static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
254 struct dpaa2_eth_fq *fq,
255 struct dpaa2_eth_xdp_fds *xdp_fds)
256{
257 int total_enqueued = 0, retries = 0, enqueued;
258 struct dpaa2_eth_drv_stats *percpu_extras;
259 int num_fds, err, max_retries;
260 struct dpaa2_fd *fds;
261
262 percpu_extras = this_cpu_ptr(priv->percpu_extras);
263
264 /* try to enqueue all the FDs until the max number of retries is hit */
265 fds = xdp_fds->fds;
266 num_fds = xdp_fds->num;
267 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
268 while (total_enqueued < num_fds && retries < max_retries) {
269 err = priv->enqueue(priv, fq, &fds[total_enqueued],
270 0, num_fds - total_enqueued, &enqueued);
271 if (err == -EBUSY) {
272 percpu_extras->tx_portal_busy += ++retries;
273 continue;
274 }
275 total_enqueued += enqueued;
276 }
277 xdp_fds->num = 0;
278
279 return total_enqueued;
280}
281
5d8dccf8
IC
282static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
283 struct dpaa2_eth_channel *ch,
284 struct dpaa2_eth_fq *fq)
74a1c059
IC
285{
286 struct rtnl_link_stats64 *percpu_stats;
287 struct dpaa2_fd *fds;
288 int enqueued, i;
289
290 percpu_stats = this_cpu_ptr(priv->percpu_stats);
291
292 // enqueue the array of XDP_TX frames
293 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
294
295 /* update statistics */
296 percpu_stats->tx_packets += enqueued;
297 fds = fq->xdp_tx_fds.fds;
298 for (i = 0; i < enqueued; i++) {
299 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
300 ch->stats.xdp_tx++;
301 }
302 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
5d8dccf8 303 dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
74a1c059
IC
304 percpu_stats->tx_errors++;
305 ch->stats.xdp_tx_err++;
306 }
307 fq->xdp_tx_fds.num = 0;
308}
309
5d8dccf8
IC
310static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
311 struct dpaa2_eth_channel *ch,
312 struct dpaa2_fd *fd,
313 void *buf_start, u16 queue_id)
99e43521 314{
99e43521 315 struct dpaa2_faead *faead;
74a1c059
IC
316 struct dpaa2_fd *dest_fd;
317 struct dpaa2_eth_fq *fq;
99e43521 318 u32 ctrl, frc;
99e43521
ICR
319
320 /* Mark the egress frame hardware annotation area as valid */
321 frc = dpaa2_fd_get_frc(fd);
322 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
323 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
324
325 /* Instruct hardware to release the FD buffer directly into
326 * the buffer pool once transmission is completed, instead of
327 * sending a Tx confirmation frame to us
328 */
329 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
330 faead = dpaa2_get_faead(buf_start, false);
331 faead->ctrl = cpu_to_le32(ctrl);
332 faead->conf_fqid = 0;
333
334 fq = &priv->fq[queue_id];
74a1c059
IC
335 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
336 memcpy(dest_fd, fd, sizeof(*dest_fd));
99e43521 337
74a1c059
IC
338 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
339 return;
340
5d8dccf8 341 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
99e43521
ICR
342}
343
5d8dccf8
IC
344static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
345 struct dpaa2_eth_channel *ch,
346 struct dpaa2_eth_fq *rx_fq,
347 struct dpaa2_fd *fd, void *vaddr)
7e273a8e 348{
5d39dc21 349 dma_addr_t addr = dpaa2_fd_get_addr(fd);
7e273a8e
ICR
350 struct bpf_prog *xdp_prog;
351 struct xdp_buff xdp;
352 u32 xdp_act = XDP_PASS;
99e43521
ICR
353 int err;
354
7e273a8e
ICR
355 rcu_read_lock();
356
357 xdp_prog = READ_ONCE(ch->xdp.prog);
358 if (!xdp_prog)
359 goto out;
360
361 xdp.data = vaddr + dpaa2_fd_get_offset(fd);
362 xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
7b1eea1a 363 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
7e273a8e 364 xdp_set_data_meta_invalid(&xdp);
d678be1d 365 xdp.rxq = &ch->xdp_rxq;
7e273a8e 366
4a9b052a
JDB
367 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
368 (dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);
369
7e273a8e
ICR
370 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
371
7b1eea1a
ICR
372 /* xdp.data pointer may have changed */
373 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
374 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
375
7e273a8e
ICR
376 switch (xdp_act) {
377 case XDP_PASS:
378 break;
99e43521 379 case XDP_TX:
5d8dccf8 380 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
99e43521 381 break;
7e273a8e
ICR
382 default:
383 bpf_warn_invalid_xdp_action(xdp_act);
df561f66 384 fallthrough;
7e273a8e
ICR
385 case XDP_ABORTED:
386 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
df561f66 387 fallthrough;
7e273a8e 388 case XDP_DROP:
5d8dccf8 389 dpaa2_eth_xdp_release_buf(priv, ch, addr);
a4a7b762 390 ch->stats.xdp_drop++;
7e273a8e 391 break;
d678be1d
IR
392 case XDP_REDIRECT:
393 dma_unmap_page(priv->net_dev->dev.parent, addr,
efa6a7d0 394 priv->rx_buf_size, DMA_BIDIRECTIONAL);
d678be1d 395 ch->buf_count--;
4a9b052a
JDB
396
397 /* Allow redirect use of full headroom */
d678be1d 398 xdp.data_hard_start = vaddr;
4a9b052a
JDB
399 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
400
d678be1d
IR
401 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
402 if (unlikely(err))
403 ch->stats.xdp_drop++;
404 else
405 ch->stats.xdp_redirect++;
406 break;
7e273a8e
ICR
407 }
408
d678be1d 409 ch->xdp.res |= xdp_act;
7e273a8e
ICR
410out:
411 rcu_read_unlock();
412 return xdp_act;
413}
414
6e2387e8
IR
415/* Main Rx frame processing routine */
416static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
417 struct dpaa2_eth_channel *ch,
418 const struct dpaa2_fd *fd,
dbcdf728 419 struct dpaa2_eth_fq *fq)
6e2387e8
IR
420{
421 dma_addr_t addr = dpaa2_fd_get_addr(fd);
422 u8 fd_format = dpaa2_fd_get_format(fd);
423 void *vaddr;
424 struct sk_buff *skb;
425 struct rtnl_link_stats64 *percpu_stats;
85047abd 426 struct dpaa2_eth_drv_stats *percpu_extras;
6e2387e8
IR
427 struct device *dev = priv->net_dev->dev.parent;
428 struct dpaa2_fas *fas;
d695e764 429 void *buf_data;
6e2387e8 430 u32 status = 0;
7e273a8e 431 u32 xdp_act;
6e2387e8 432
5636187b
IR
433 /* Tracing point */
434 trace_dpaa2_rx_fd(priv->net_dev, fd);
435
08eb2397 436 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
efa6a7d0 437 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
18c2e770 438 DMA_BIDIRECTIONAL);
6e2387e8 439
54ce8917 440 fas = dpaa2_get_fas(vaddr, false);
d695e764
IR
441 prefetch(fas);
442 buf_data = vaddr + dpaa2_fd_get_offset(fd);
443 prefetch(buf_data);
6e2387e8
IR
444
445 percpu_stats = this_cpu_ptr(priv->percpu_stats);
85047abd 446 percpu_extras = this_cpu_ptr(priv->percpu_extras);
6e2387e8
IR
447
448 if (fd_format == dpaa2_fd_single) {
5d8dccf8 449 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
7e273a8e
ICR
450 if (xdp_act != XDP_PASS) {
451 percpu_stats->rx_packets++;
452 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
453 return;
454 }
455
efa6a7d0 456 dma_unmap_page(dev, addr, priv->rx_buf_size,
27c87486 457 DMA_BIDIRECTIONAL);
5d8dccf8 458 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
6e2387e8 459 } else if (fd_format == dpaa2_fd_sg) {
7e273a8e
ICR
460 WARN_ON(priv->xdp_prog);
461
efa6a7d0 462 dma_unmap_page(dev, addr, priv->rx_buf_size,
27c87486 463 DMA_BIDIRECTIONAL);
5d8dccf8 464 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
27c87486 465 free_pages((unsigned long)vaddr, 0);
85047abd
IR
466 percpu_extras->rx_sg_frames++;
467 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
6e2387e8
IR
468 } else {
469 /* We don't support any other format */
470 goto err_frame_format;
471 }
472
473 if (unlikely(!skb))
474 goto err_build_skb;
475
476 prefetch(skb->data);
477
859f998e
IR
478 /* Get the timestamp value */
479 if (priv->rx_tstamp) {
480 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
481 __le64 *ts = dpaa2_get_ts(vaddr, false);
482 u64 ns;
483
484 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
485
486 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
487 shhwtstamps->hwtstamp = ns_to_ktime(ns);
488 }
489
6e2387e8
IR
490 /* Check if we need to validate the L4 csum */
491 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
6e2387e8 492 status = le32_to_cpu(fas->status);
5d8dccf8 493 dpaa2_eth_validate_rx_csum(priv, status, skb);
6e2387e8
IR
494 }
495
496 skb->protocol = eth_type_trans(skb, priv->net_dev);
dbcdf728 497 skb_record_rx_queue(skb, fq->flowid);
6e2387e8
IR
498
499 percpu_stats->rx_packets++;
500 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
501
0a25d92c 502 list_add_tail(&skb->list, ch->rx_list);
6e2387e8
IR
503
504 return;
505
506err_build_skb:
5d8dccf8 507 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
6e2387e8
IR
508err_frame_format:
509 percpu_stats->rx_dropped++;
510}
511
061d631f
IC
512/* Processing of Rx frames received on the error FQ
513 * We check and print the error bits and then free the frame
514 */
515static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
516 struct dpaa2_eth_channel *ch,
517 const struct dpaa2_fd *fd,
518 struct dpaa2_eth_fq *fq __always_unused)
519{
520 struct device *dev = priv->net_dev->dev.parent;
521 dma_addr_t addr = dpaa2_fd_get_addr(fd);
522 u8 fd_format = dpaa2_fd_get_format(fd);
523 struct rtnl_link_stats64 *percpu_stats;
524 struct dpaa2_eth_trap_item *trap_item;
525 struct dpaa2_fapr *fapr;
526 struct sk_buff *skb;
527 void *buf_data;
528 void *vaddr;
529
530 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
531 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
532 DMA_BIDIRECTIONAL);
533
534 buf_data = vaddr + dpaa2_fd_get_offset(fd);
535
536 if (fd_format == dpaa2_fd_single) {
537 dma_unmap_page(dev, addr, priv->rx_buf_size,
538 DMA_BIDIRECTIONAL);
539 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
540 } else if (fd_format == dpaa2_fd_sg) {
541 dma_unmap_page(dev, addr, priv->rx_buf_size,
542 DMA_BIDIRECTIONAL);
543 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
544 free_pages((unsigned long)vaddr, 0);
545 } else {
546 /* We don't support any other format */
547 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
548 goto err_frame_format;
549 }
550
551 fapr = dpaa2_get_fapr(vaddr, false);
552 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
553 if (trap_item)
554 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
555 &priv->devlink_port, NULL);
556 consume_skb(skb);
557
558err_frame_format:
559 percpu_stats = this_cpu_ptr(priv->percpu_stats);
560 percpu_stats->rx_errors++;
561 ch->buf_count--;
562}
563
6e2387e8
IR
564/* Consume all frames pull-dequeued into the store. This is the simplest way to
565 * make sure we don't accidentally issue another volatile dequeue which would
566 * overwrite (leak) frames already in the store.
567 *
568 * Observance of NAPI budget is not our concern, leaving that to the caller.
569 */
5d8dccf8
IC
570static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
571 struct dpaa2_eth_fq **src)
6e2387e8
IR
572{
573 struct dpaa2_eth_priv *priv = ch->priv;
68049a5f 574 struct dpaa2_eth_fq *fq = NULL;
6e2387e8
IR
575 struct dpaa2_dq *dq;
576 const struct dpaa2_fd *fd;
ef17bd7c 577 int cleaned = 0, retries = 0;
6e2387e8
IR
578 int is_last;
579
580 do {
581 dq = dpaa2_io_store_next(ch->store, &is_last);
582 if (unlikely(!dq)) {
583 /* If we're here, we *must* have placed a
584 * volatile dequeue comnmand, so keep reading through
585 * the store until we get some sort of valid response
586 * token (either a valid frame or an "empty dequeue")
587 */
ef17bd7c
IR
588 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
589 netdev_err_once(priv->net_dev,
590 "Unable to read a valid dequeue response\n");
591 return -ETIMEDOUT;
592 }
6e2387e8
IR
593 continue;
594 }
595
596 fd = dpaa2_dq_fd(dq);
75c583ab 597 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
6e2387e8 598
dbcdf728 599 fq->consume(priv, ch, fd, fq);
6e2387e8 600 cleaned++;
ef17bd7c 601 retries = 0;
6e2387e8
IR
602 } while (!is_last);
603
68049a5f
ICR
604 if (!cleaned)
605 return 0;
606
607 fq->stats.frames += cleaned;
460fd830 608 ch->stats.frames += cleaned;
68049a5f
ICR
609
610 /* A dequeue operation only pulls frames from a single queue
569dac6a 611 * into the store. Return the frame queue as an out param.
68049a5f 612 */
569dac6a
ICR
613 if (src)
614 *src = fq;
68049a5f 615
6e2387e8
IR
616 return cleaned;
617}
618
c5521189
YL
619static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
620 u8 *msgtype, u8 *twostep, u8 *udp,
621 u16 *correction_offset,
622 u16 *origintimestamp_offset)
623{
624 unsigned int ptp_class;
625 struct ptp_header *hdr;
626 unsigned int type;
627 u8 *base;
628
629 ptp_class = ptp_classify_raw(skb);
630 if (ptp_class == PTP_CLASS_NONE)
631 return -EINVAL;
632
633 hdr = ptp_parse_header(skb, ptp_class);
634 if (!hdr)
635 return -EINVAL;
636
637 *msgtype = ptp_get_msgtype(hdr, ptp_class);
638 *twostep = hdr->flag_field[0] & 0x2;
639
640 type = ptp_class & PTP_CLASS_PMASK;
641 if (type == PTP_CLASS_IPV4 ||
642 type == PTP_CLASS_IPV6)
643 *udp = 1;
644 else
645 *udp = 0;
646
647 base = skb_mac_header(skb);
648 *correction_offset = (u8 *)&hdr->correction - base;
649 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
650
651 return 0;
652}
653
859f998e 654/* Configure the egress frame annotation for timestamp update */
c5521189
YL
655static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
656 struct dpaa2_fd *fd,
657 void *buf_start,
658 struct sk_buff *skb)
859f998e 659{
c5521189
YL
660 struct ptp_tstamp origin_timestamp;
661 struct dpni_single_step_cfg cfg;
662 u8 msgtype, twostep, udp;
859f998e 663 struct dpaa2_faead *faead;
c5521189
YL
664 struct dpaa2_fas *fas;
665 struct timespec64 ts;
666 u16 offset1, offset2;
859f998e 667 u32 ctrl, frc;
c5521189
YL
668 __le64 *ns;
669 u8 *data;
859f998e
IR
670
671 /* Mark the egress frame annotation area as valid */
672 frc = dpaa2_fd_get_frc(fd);
673 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
674
675 /* Set hardware annotation size */
676 ctrl = dpaa2_fd_get_ctrl(fd);
677 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
678
679 /* enable UPD (update prepanded data) bit in FAEAD field of
680 * hardware frame annotation area
681 */
682 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
683 faead = dpaa2_get_faead(buf_start, true);
684 faead->ctrl = cpu_to_le32(ctrl);
c5521189
YL
685
686 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
687 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
688 &offset1, &offset2) ||
6b6817c5 689 msgtype != PTP_MSGTYPE_SYNC || twostep) {
c5521189
YL
690 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
691 return;
692 }
693
694 /* Mark the frame annotation status as valid */
695 frc = dpaa2_fd_get_frc(fd);
696 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
697
698 /* Mark the PTP flag for one step timestamping */
699 fas = dpaa2_get_fas(buf_start, true);
700 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
701
702 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
703 ns = dpaa2_get_ts(buf_start, true);
704 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
705 DPAA2_PTP_CLK_PERIOD_NS);
706
707 /* Update current time to PTP message originTimestamp field */
708 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
709 data = skb_mac_header(skb);
710 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
711 *(__be32 *)(data + offset2 + 2) =
712 htonl(origin_timestamp.sec_lsb);
713 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
714
715 cfg.en = 1;
716 cfg.ch_update = udp;
717 cfg.offset = offset1;
718 cfg.peer_delay = 0;
719
720 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
721 &cfg))
722 WARN_ONCE(1, "Failed to set single step register");
723 }
859f998e
IR
724}
725
6e2387e8 726/* Create a frame descriptor based on a fragmented skb */
5d8dccf8
IC
727static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
728 struct sk_buff *skb,
64a965de
YL
729 struct dpaa2_fd *fd,
730 void **swa_addr)
6e2387e8
IR
731{
732 struct device *dev = priv->net_dev->dev.parent;
733 void *sgt_buf = NULL;
6e2387e8
IR
734 dma_addr_t addr;
735 int nr_frags = skb_shinfo(skb)->nr_frags;
736 struct dpaa2_sg_entry *sgt;
737 int i, err;
738 int sgt_buf_size;
739 struct scatterlist *scl, *crt_scl;
740 int num_sg;
741 int num_dma_bufs;
742 struct dpaa2_eth_swa *swa;
743
744 /* Create and map scatterlist.
745 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
746 * to go beyond nr_frags+1.
747 * Note: We don't support chained scatterlists
748 */
749 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
750 return -EINVAL;
751
d4ceb8de 752 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
6e2387e8
IR
753 if (unlikely(!scl))
754 return -ENOMEM;
755
756 sg_init_table(scl, nr_frags + 1);
757 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
37fbbdda
IC
758 if (unlikely(num_sg < 0)) {
759 err = -ENOMEM;
760 goto dma_map_sg_failed;
761 }
1e5fa9e2 762 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
6e2387e8
IR
763 if (unlikely(!num_dma_bufs)) {
764 err = -ENOMEM;
765 goto dma_map_sg_failed;
766 }
767
768 /* Prepare the HW SGT structure */
769 sgt_buf_size = priv->tx_data_offset +
fa722c00 770 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
90bc6d4b 771 sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
6e2387e8
IR
772 if (unlikely(!sgt_buf)) {
773 err = -ENOMEM;
774 goto sgt_buf_alloc_failed;
775 }
776 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
6a9bbe53
IR
777 memset(sgt_buf, 0, sgt_buf_size);
778
6e2387e8
IR
779 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
780
781 /* Fill in the HW SGT structure.
782 *
783 * sgt_buf is zeroed out, so the following fields are implicit
784 * in all sgt entries:
785 * - offset is 0
786 * - format is 'dpaa2_sg_single'
787 */
788 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
789 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
790 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
791 }
792 dpaa2_sg_set_final(&sgt[i - 1], true);
793
794 /* Store the skb backpointer in the SGT buffer.
795 * Fit the scatterlist and the number of buffers alongside the
796 * skb backpointer in the software annotation area. We'll need
797 * all of them on Tx Conf.
798 */
64a965de 799 *swa_addr = (void *)sgt_buf;
6e2387e8 800 swa = (struct dpaa2_eth_swa *)sgt_buf;
e3fdf6ba
IR
801 swa->type = DPAA2_ETH_SWA_SG;
802 swa->sg.skb = skb;
803 swa->sg.scl = scl;
804 swa->sg.num_sg = num_sg;
805 swa->sg.sgt_size = sgt_buf_size;
6e2387e8
IR
806
807 /* Separately map the SGT buffer */
1e5fa9e2 808 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
6e2387e8
IR
809 if (unlikely(dma_mapping_error(dev, addr))) {
810 err = -ENOMEM;
811 goto dma_map_single_failed;
812 }
813 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
814 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
815 dpaa2_fd_set_addr(fd, addr);
816 dpaa2_fd_set_len(fd, skb->len);
b948c8c6 817 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
6e2387e8
IR
818
819 return 0;
820
821dma_map_single_failed:
6a9bbe53 822 skb_free_frag(sgt_buf);
6e2387e8 823sgt_buf_alloc_failed:
1e5fa9e2 824 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
6e2387e8
IR
825dma_map_sg_failed:
826 kfree(scl);
827 return err;
828}
829
d70446ee
IC
830/* Create a SG frame descriptor based on a linear skb.
831 *
832 * This function is used on the Tx path when the skb headroom is not large
833 * enough for the HW requirements, thus instead of realloc-ing the skb we
834 * create a SG frame descriptor with only one entry.
835 */
5d8dccf8
IC
836static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
837 struct sk_buff *skb,
64a965de
YL
838 struct dpaa2_fd *fd,
839 void **swa_addr)
d70446ee
IC
840{
841 struct device *dev = priv->net_dev->dev.parent;
842 struct dpaa2_eth_sgt_cache *sgt_cache;
843 struct dpaa2_sg_entry *sgt;
844 struct dpaa2_eth_swa *swa;
845 dma_addr_t addr, sgt_addr;
846 void *sgt_buf = NULL;
847 int sgt_buf_size;
848 int err;
849
850 /* Prepare the HW SGT structure */
851 sgt_cache = this_cpu_ptr(priv->sgt_cache);
852 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
853
854 if (sgt_cache->count == 0)
855 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN,
856 GFP_ATOMIC);
857 else
858 sgt_buf = sgt_cache->buf[--sgt_cache->count];
859 if (unlikely(!sgt_buf))
860 return -ENOMEM;
861
862 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
863 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
864
865 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
866 if (unlikely(dma_mapping_error(dev, addr))) {
867 err = -ENOMEM;
868 goto data_map_failed;
869 }
870
871 /* Fill in the HW SGT structure */
872 dpaa2_sg_set_addr(sgt, addr);
873 dpaa2_sg_set_len(sgt, skb->len);
874 dpaa2_sg_set_final(sgt, true);
875
876 /* Store the skb backpointer in the SGT buffer */
64a965de 877 *swa_addr = (void *)sgt_buf;
d70446ee
IC
878 swa = (struct dpaa2_eth_swa *)sgt_buf;
879 swa->type = DPAA2_ETH_SWA_SINGLE;
880 swa->single.skb = skb;
54a57d1c 881 swa->single.sgt_size = sgt_buf_size;
d70446ee
IC
882
883 /* Separately map the SGT buffer */
884 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
885 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
886 err = -ENOMEM;
887 goto sgt_map_failed;
888 }
889
890 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
891 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
892 dpaa2_fd_set_addr(fd, sgt_addr);
893 dpaa2_fd_set_len(fd, skb->len);
894 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
895
d70446ee
IC
896 return 0;
897
898sgt_map_failed:
899 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
900data_map_failed:
901 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
902 kfree(sgt_buf);
903 else
904 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
905
906 return err;
907}
908
6e2387e8 909/* Create a frame descriptor based on a linear skb */
5d8dccf8
IC
910static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
911 struct sk_buff *skb,
64a965de
YL
912 struct dpaa2_fd *fd,
913 void **swa_addr)
6e2387e8
IR
914{
915 struct device *dev = priv->net_dev->dev.parent;
c163685f 916 u8 *buffer_start, *aligned_start;
e3fdf6ba 917 struct dpaa2_eth_swa *swa;
6e2387e8
IR
918 dma_addr_t addr;
919
1cf773bd 920 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
c163685f
IR
921
922 /* If there's enough room to align the FD address, do it.
923 * It will help hardware optimize accesses.
924 */
925 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
926 DPAA2_ETH_TX_BUF_ALIGN);
927 if (aligned_start >= skb->head)
928 buffer_start = aligned_start;
6e2387e8 929
6e2387e8
IR
930 /* Store a backpointer to the skb at the beginning of the buffer
931 * (in the private data area) such that we can release it
932 * on Tx confirm
933 */
64a965de 934 *swa_addr = (void *)buffer_start;
e3fdf6ba
IR
935 swa = (struct dpaa2_eth_swa *)buffer_start;
936 swa->type = DPAA2_ETH_SWA_SINGLE;
937 swa->single.skb = skb;
6e2387e8
IR
938
939 addr = dma_map_single(dev, buffer_start,
940 skb_tail_pointer(skb) - buffer_start,
1e5fa9e2 941 DMA_BIDIRECTIONAL);
6e2387e8
IR
942 if (unlikely(dma_mapping_error(dev, addr)))
943 return -ENOMEM;
944
945 dpaa2_fd_set_addr(fd, addr);
946 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
947 dpaa2_fd_set_len(fd, skb->len);
948 dpaa2_fd_set_format(fd, dpaa2_fd_single);
b948c8c6 949 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
6e2387e8
IR
950
951 return 0;
952}
953
954/* FD freeing routine on the Tx path
955 *
956 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
957 * back-pointed to is also freed.
958 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
959 * dpaa2_eth_tx().
6e2387e8 960 */
c5521189 961static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
5d8dccf8
IC
962 struct dpaa2_eth_fq *fq,
963 const struct dpaa2_fd *fd, bool in_napi)
6e2387e8
IR
964{
965 struct device *dev = priv->net_dev->dev.parent;
d70446ee 966 dma_addr_t fd_addr, sg_addr;
d678be1d 967 struct sk_buff *skb = NULL;
6e2387e8 968 unsigned char *buffer_start;
6e2387e8
IR
969 struct dpaa2_eth_swa *swa;
970 u8 fd_format = dpaa2_fd_get_format(fd);
d678be1d 971 u32 fd_len = dpaa2_fd_get_len(fd);
6e2387e8 972
d70446ee
IC
973 struct dpaa2_eth_sgt_cache *sgt_cache;
974 struct dpaa2_sg_entry *sgt;
975
6e2387e8 976 fd_addr = dpaa2_fd_get_addr(fd);
e3fdf6ba
IR
977 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
978 swa = (struct dpaa2_eth_swa *)buffer_start;
6e2387e8
IR
979
980 if (fd_format == dpaa2_fd_single) {
d678be1d
IR
981 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
982 skb = swa->single.skb;
983 /* Accessing the skb buffer is safe before dma unmap,
984 * because we didn't map the actual skb shell.
985 */
986 dma_unmap_single(dev, fd_addr,
987 skb_tail_pointer(skb) - buffer_start,
988 DMA_BIDIRECTIONAL);
989 } else {
990 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
991 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
992 DMA_BIDIRECTIONAL);
993 }
6e2387e8 994 } else if (fd_format == dpaa2_fd_sg) {
d70446ee
IC
995 if (swa->type == DPAA2_ETH_SWA_SG) {
996 skb = swa->sg.skb;
997
998 /* Unmap the scatterlist */
999 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1000 DMA_BIDIRECTIONAL);
1001 kfree(swa->sg.scl);
6e2387e8 1002
d70446ee
IC
1003 /* Unmap the SGT buffer */
1004 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1005 DMA_BIDIRECTIONAL);
1006 } else {
1007 skb = swa->single.skb;
6e2387e8 1008
d70446ee
IC
1009 /* Unmap the SGT Buffer */
1010 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1011 DMA_BIDIRECTIONAL);
1012
1013 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1014 priv->tx_data_offset);
1015 sg_addr = dpaa2_sg_get_addr(sgt);
1016 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1017 }
6e2387e8 1018 } else {
2b7c86eb 1019 netdev_dbg(priv->net_dev, "Invalid FD format\n");
6e2387e8
IR
1020 return;
1021 }
1022
d678be1d
IR
1023 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1024 fq->dq_frames++;
1025 fq->dq_bytes += fd_len;
1026 }
1027
1028 if (swa->type == DPAA2_ETH_SWA_XDP) {
1029 xdp_return_frame(swa->xdp.xdpf);
1030 return;
1031 }
1032
859f998e 1033 /* Get the timestamp value */
1cf773bd 1034 if (skb->cb[0] == TX_TSTAMP) {
859f998e 1035 struct skb_shared_hwtstamps shhwtstamps;
e3fdf6ba 1036 __le64 *ts = dpaa2_get_ts(buffer_start, true);
859f998e
IR
1037 u64 ns;
1038
1039 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1040
1041 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1042 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1043 skb_tstamp_tx(skb, &shhwtstamps);
c5521189
YL
1044 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1045 mutex_unlock(&priv->onestep_tstamp_lock);
859f998e
IR
1046 }
1047
6a9bbe53 1048 /* Free SGT buffer allocated on tx */
d70446ee
IC
1049 if (fd_format != dpaa2_fd_single) {
1050 sgt_cache = this_cpu_ptr(priv->sgt_cache);
1051 if (swa->type == DPAA2_ETH_SWA_SG) {
1052 skb_free_frag(buffer_start);
1053 } else {
1054 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
1055 kfree(buffer_start);
1056 else
1057 sgt_cache->buf[sgt_cache->count++] = buffer_start;
1058 }
1059 }
6e2387e8
IR
1060
1061 /* Move on with skb release */
0723a3ae 1062 napi_consume_skb(skb, in_napi);
6e2387e8
IR
1063}
1064
c5521189
YL
1065static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1066 struct net_device *net_dev)
6e2387e8
IR
1067{
1068 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1069 struct dpaa2_fd fd;
1070 struct rtnl_link_stats64 *percpu_stats;
85047abd 1071 struct dpaa2_eth_drv_stats *percpu_extras;
6e2387e8 1072 struct dpaa2_eth_fq *fq;
569dac6a 1073 struct netdev_queue *nq;
6e2387e8 1074 u16 queue_mapping;
18c21467 1075 unsigned int needed_headroom;
569dac6a 1076 u32 fd_len;
ab1e6de2 1077 u8 prio = 0;
6e2387e8 1078 int err, i;
64a965de 1079 void *swa;
6e2387e8
IR
1080
1081 percpu_stats = this_cpu_ptr(priv->percpu_stats);
85047abd 1082 percpu_extras = this_cpu_ptr(priv->percpu_extras);
6e2387e8 1083
1cf773bd 1084 needed_headroom = dpaa2_eth_needed_headroom(skb);
6e2387e8
IR
1085
1086 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1087 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1088 */
1089 skb = skb_unshare(skb, GFP_ATOMIC);
1090 if (unlikely(!skb)) {
1091 /* skb_unshare() has already freed the skb */
1092 percpu_stats->tx_dropped++;
1093 return NETDEV_TX_OK;
1094 }
1095
1096 /* Setup the FD fields */
1097 memset(&fd, 0, sizeof(fd));
1098
85047abd 1099 if (skb_is_nonlinear(skb)) {
64a965de 1100 err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
85047abd
IR
1101 percpu_extras->tx_sg_frames++;
1102 percpu_extras->tx_sg_bytes += skb->len;
d70446ee 1103 } else if (skb_headroom(skb) < needed_headroom) {
64a965de 1104 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
d70446ee
IC
1105 percpu_extras->tx_sg_frames++;
1106 percpu_extras->tx_sg_bytes += skb->len;
4c96c0ac
IC
1107 percpu_extras->tx_converted_sg_frames++;
1108 percpu_extras->tx_converted_sg_bytes += skb->len;
85047abd 1109 } else {
64a965de 1110 err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
85047abd
IR
1111 }
1112
6e2387e8
IR
1113 if (unlikely(err)) {
1114 percpu_stats->tx_dropped++;
1115 goto err_build_fd;
1116 }
1117
c5521189
YL
1118 if (skb->cb[0])
1119 dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
64a965de 1120
5636187b
IR
1121 /* Tracing point */
1122 trace_dpaa2_tx_fd(net_dev, &fd);
1123
537336ce
IR
1124 /* TxConf FQ selection relies on queue id from the stack.
1125 * In case of a forwarded frame from another DPNI interface, we choose
1126 * a queue affined to the same core that processed the Rx frame
6e2387e8 1127 */
537336ce 1128 queue_mapping = skb_get_queue_mapping(skb);
ab1e6de2
IR
1129
1130 if (net_dev->num_tc) {
1131 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1132 /* Hardware interprets priority level 0 as being the highest,
1133 * so we need to do a reverse mapping to the netdev tc index
1134 */
1135 prio = net_dev->num_tc - prio - 1;
1136 /* We have only one FQ array entry for all Tx hardware queues
1137 * with the same flow id (but different priority levels)
1138 */
1139 queue_mapping %= dpaa2_eth_queue_count(priv);
1140 }
6e2387e8 1141 fq = &priv->fq[queue_mapping];
8c838f53
IC
1142
1143 fd_len = dpaa2_fd_get_len(&fd);
1144 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1145 netdev_tx_sent_queue(nq, fd_len);
1146
1147 /* Everything that happens after this enqueues might race with
1148 * the Tx confirmation callback for this frame
1149 */
6e2387e8 1150 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
6ff80447 1151 err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
6e2387e8
IR
1152 if (err != -EBUSY)
1153 break;
1154 }
85047abd 1155 percpu_extras->tx_portal_busy += i;
6e2387e8
IR
1156 if (unlikely(err < 0)) {
1157 percpu_stats->tx_errors++;
1158 /* Clean up everything, including freeing the skb */
5d8dccf8 1159 dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
8c838f53 1160 netdev_tx_completed_queue(nq, 1, fd_len);
6e2387e8
IR
1161 } else {
1162 percpu_stats->tx_packets++;
569dac6a 1163 percpu_stats->tx_bytes += fd_len;
6e2387e8
IR
1164 }
1165
1166 return NETDEV_TX_OK;
1167
1168err_build_fd:
6e2387e8
IR
1169 dev_kfree_skb(skb);
1170
1171 return NETDEV_TX_OK;
1172}
1173
c5521189
YL
1174static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1175{
1176 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1177 tx_onestep_tstamp);
1178 struct sk_buff *skb;
1179
1180 while (true) {
1181 skb = skb_dequeue(&priv->tx_skbs);
1182 if (!skb)
1183 return;
1184
1185 /* Lock just before TX one-step timestamping packet,
1186 * and release the lock in dpaa2_eth_free_tx_fd when
1187 * confirm the packet has been sent on hardware, or
1188 * when clean up during transmit failure.
1189 */
1190 mutex_lock(&priv->onestep_tstamp_lock);
1191 __dpaa2_eth_tx(skb, priv->net_dev);
1192 }
1193}
1194
1195static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1196{
1197 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1198 u8 msgtype, twostep, udp;
1199 u16 offset1, offset2;
1200
1201 /* Utilize skb->cb[0] for timestamping request per skb */
1202 skb->cb[0] = 0;
1203
1204 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1205 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1206 skb->cb[0] = TX_TSTAMP;
1207 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1208 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1209 }
1210
1211 /* TX for one-step timestamping PTP Sync packet */
1212 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1213 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1214 &offset1, &offset2))
6b6817c5 1215 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
c5521189
YL
1216 skb_queue_tail(&priv->tx_skbs, skb);
1217 queue_work(priv->dpaa2_ptp_wq,
1218 &priv->tx_onestep_tstamp);
1219 return NETDEV_TX_OK;
1220 }
1221 /* Use two-step timestamping if not one-step timestamping
1222 * PTP Sync packet
1223 */
1224 skb->cb[0] = TX_TSTAMP;
1225 }
1226
1227 /* TX for other packets */
1228 return __dpaa2_eth_tx(skb, net_dev);
1229}
1230
6e2387e8
IR
1231/* Tx confirmation frame processing routine */
1232static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
b00c898c 1233 struct dpaa2_eth_channel *ch __always_unused,
6e2387e8 1234 const struct dpaa2_fd *fd,
569dac6a 1235 struct dpaa2_eth_fq *fq)
6e2387e8
IR
1236{
1237 struct rtnl_link_stats64 *percpu_stats;
85047abd 1238 struct dpaa2_eth_drv_stats *percpu_extras;
569dac6a 1239 u32 fd_len = dpaa2_fd_get_len(fd);
39163c0c 1240 u32 fd_errors;
6e2387e8 1241
5636187b
IR
1242 /* Tracing point */
1243 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1244
85047abd
IR
1245 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1246 percpu_extras->tx_conf_frames++;
569dac6a
ICR
1247 percpu_extras->tx_conf_bytes += fd_len;
1248
39163c0c
IR
1249 /* Check frame errors in the FD field */
1250 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
5d8dccf8 1251 dpaa2_eth_free_tx_fd(priv, fq, fd, true);
39163c0c
IR
1252
1253 if (likely(!fd_errors))
1254 return;
1255
2b7c86eb
IR
1256 if (net_ratelimit())
1257 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1258 fd_errors);
1259
39163c0c
IR
1260 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1261 /* Tx-conf logically pertains to the egress path. */
1262 percpu_stats->tx_errors++;
6e2387e8
IR
1263}
1264
5d8dccf8 1265static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
6e2387e8
IR
1266{
1267 int err;
1268
1269 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1270 DPNI_OFF_RX_L3_CSUM, enable);
1271 if (err) {
1272 netdev_err(priv->net_dev,
1273 "dpni_set_offload(RX_L3_CSUM) failed\n");
1274 return err;
1275 }
1276
1277 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1278 DPNI_OFF_RX_L4_CSUM, enable);
1279 if (err) {
1280 netdev_err(priv->net_dev,
1281 "dpni_set_offload(RX_L4_CSUM) failed\n");
1282 return err;
1283 }
1284
1285 return 0;
1286}
1287
5d8dccf8 1288static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
6e2387e8
IR
1289{
1290 int err;
1291
1292 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1293 DPNI_OFF_TX_L3_CSUM, enable);
1294 if (err) {
1295 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1296 return err;
1297 }
1298
1299 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1300 DPNI_OFF_TX_L4_CSUM, enable);
1301 if (err) {
1302 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1303 return err;
1304 }
1305
1306 return 0;
1307}
1308
1309/* Perform a single release command to add buffers
1310 * to the specified buffer pool
1311 */
5d8dccf8
IC
1312static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1313 struct dpaa2_eth_channel *ch, u16 bpid)
6e2387e8
IR
1314{
1315 struct device *dev = priv->net_dev->dev.parent;
1316 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
27c87486 1317 struct page *page;
6e2387e8 1318 dma_addr_t addr;
ef17bd7c 1319 int retries = 0;
87eb55e4 1320 int i, err;
6e2387e8
IR
1321
1322 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1323 /* Allocate buffer visible to WRIOP + skb shared info +
1324 * alignment padding
1325 */
27c87486
ICR
1326 /* allocate one page for each Rx buffer. WRIOP sees
1327 * the entire page except for a tailroom reserved for
1328 * skb shared info
1329 */
1330 page = dev_alloc_pages(0);
1331 if (!page)
6e2387e8
IR
1332 goto err_alloc;
1333
efa6a7d0 1334 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
27c87486 1335 DMA_BIDIRECTIONAL);
6e2387e8
IR
1336 if (unlikely(dma_mapping_error(dev, addr)))
1337 goto err_map;
1338
1339 buf_array[i] = addr;
5636187b
IR
1340
1341 /* tracing point */
1342 trace_dpaa2_eth_buf_seed(priv->net_dev,
27c87486 1343 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
efa6a7d0 1344 addr, priv->rx_buf_size,
5636187b 1345 bpid);
6e2387e8
IR
1346 }
1347
1348release_bufs:
87eb55e4 1349 /* In case the portal is busy, retry until successful */
7ec0596f 1350 while ((err = dpaa2_io_service_release(ch->dpio, bpid,
ef17bd7c
IR
1351 buf_array, i)) == -EBUSY) {
1352 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1353 break;
6e2387e8 1354 cpu_relax();
ef17bd7c 1355 }
87eb55e4
IR
1356
1357 /* If release command failed, clean up and bail out;
1358 * not much else we can do about it
1359 */
1360 if (err) {
5d8dccf8 1361 dpaa2_eth_free_bufs(priv, buf_array, i);
87eb55e4
IR
1362 return 0;
1363 }
1364
6e2387e8
IR
1365 return i;
1366
1367err_map:
27c87486 1368 __free_pages(page, 0);
6e2387e8 1369err_alloc:
87eb55e4
IR
1370 /* If we managed to allocate at least some buffers,
1371 * release them to hardware
1372 */
6e2387e8
IR
1373 if (i)
1374 goto release_bufs;
1375
1376 return 0;
1377}
1378
5d8dccf8 1379static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
6e2387e8
IR
1380{
1381 int i, j;
1382 int new_count;
1383
6e2387e8
IR
1384 for (j = 0; j < priv->num_channels; j++) {
1385 for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1386 i += DPAA2_ETH_BUFS_PER_CMD) {
5d8dccf8 1387 new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
6e2387e8
IR
1388 priv->channel[j]->buf_count += new_count;
1389
1390 if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
6e2387e8
IR
1391 return -ENOMEM;
1392 }
1393 }
1394 }
6e2387e8
IR
1395
1396 return 0;
1397}
1398
d0ea5cbd 1399/*
6e2387e8
IR
1400 * Drain the specified number of buffers from the DPNI's private buffer pool.
1401 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1402 */
5d8dccf8 1403static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
6e2387e8 1404{
6e2387e8 1405 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
ef17bd7c 1406 int retries = 0;
87eb55e4 1407 int ret;
6e2387e8
IR
1408
1409 do {
05fa39c6 1410 ret = dpaa2_io_service_acquire(NULL, priv->bpid,
6e2387e8
IR
1411 buf_array, count);
1412 if (ret < 0) {
ef17bd7c 1413 if (ret == -EBUSY &&
0e5ad75b 1414 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
ef17bd7c 1415 continue;
6e2387e8
IR
1416 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1417 return;
1418 }
5d8dccf8 1419 dpaa2_eth_free_bufs(priv, buf_array, ret);
ef17bd7c 1420 retries = 0;
6e2387e8
IR
1421 } while (ret);
1422}
1423
5d8dccf8 1424static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
6e2387e8
IR
1425{
1426 int i;
1427
5d8dccf8
IC
1428 dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1429 dpaa2_eth_drain_bufs(priv, 1);
6e2387e8
IR
1430
1431 for (i = 0; i < priv->num_channels; i++)
1432 priv->channel[i]->buf_count = 0;
1433}
1434
1435/* Function is called from softirq context only, so we don't need to guard
1436 * the access to percpu count
1437 */
5d8dccf8
IC
1438static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1439 struct dpaa2_eth_channel *ch,
1440 u16 bpid)
6e2387e8
IR
1441{
1442 int new_count;
1443
1444 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1445 return 0;
1446
1447 do {
5d8dccf8 1448 new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
6e2387e8
IR
1449 if (unlikely(!new_count)) {
1450 /* Out of memory; abort for now, we'll try later on */
1451 break;
1452 }
1453 ch->buf_count += new_count;
1454 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1455
1456 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1457 return -ENOMEM;
1458
1459 return 0;
1460}
1461
d70446ee
IC
1462static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1463{
1464 struct dpaa2_eth_sgt_cache *sgt_cache;
1465 u16 count;
1466 int k, i;
1467
0fe665d4 1468 for_each_possible_cpu(k) {
d70446ee
IC
1469 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1470 count = sgt_cache->count;
1471
1472 for (i = 0; i < count; i++)
1473 kfree(sgt_cache->buf[i]);
1474 sgt_cache->count = 0;
1475 }
1476}
1477
5d8dccf8 1478static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
6e2387e8
IR
1479{
1480 int err;
85047abd 1481 int dequeues = -1;
6e2387e8
IR
1482
1483 /* Retry while portal is busy */
1484 do {
7ec0596f
IR
1485 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1486 ch->store);
85047abd 1487 dequeues++;
6e2387e8 1488 cpu_relax();
ef17bd7c 1489 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
6e2387e8 1490
85047abd
IR
1491 ch->stats.dequeue_portal_busy += dequeues;
1492 if (unlikely(err))
1493 ch->stats.pull_err++;
1494
6e2387e8
IR
1495 return err;
1496}
1497
1498/* NAPI poll routine
1499 *
1500 * Frames are dequeued from the QMan channel associated with this NAPI context.
1501 * Rx, Tx confirmation and (if configured) Rx error frames all count
1502 * towards the NAPI budget.
1503 */
1504static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1505{
1506 struct dpaa2_eth_channel *ch;
6e2387e8 1507 struct dpaa2_eth_priv *priv;
68049a5f 1508 int rx_cleaned = 0, txconf_cleaned = 0;
569dac6a
ICR
1509 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1510 struct netdev_queue *nq;
1511 int store_cleaned, work_done;
0a25d92c 1512 struct list_head rx_list;
ef17bd7c 1513 int retries = 0;
74a1c059 1514 u16 flowid;
6e2387e8
IR
1515 int err;
1516
1517 ch = container_of(napi, struct dpaa2_eth_channel, napi);
d678be1d 1518 ch->xdp.res = 0;
6e2387e8
IR
1519 priv = ch->priv;
1520
0a25d92c
IC
1521 INIT_LIST_HEAD(&rx_list);
1522 ch->rx_list = &rx_list;
1523
68049a5f 1524 do {
5d8dccf8 1525 err = dpaa2_eth_pull_channel(ch);
6e2387e8
IR
1526 if (unlikely(err))
1527 break;
1528
1529 /* Refill pool if appropriate */
5d8dccf8 1530 dpaa2_eth_refill_pool(priv, ch, priv->bpid);
6e2387e8 1531
5d8dccf8 1532 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
ef17bd7c 1533 if (store_cleaned <= 0)
569dac6a
ICR
1534 break;
1535 if (fq->type == DPAA2_RX_FQ) {
68049a5f 1536 rx_cleaned += store_cleaned;
74a1c059 1537 flowid = fq->flowid;
569dac6a 1538 } else {
68049a5f 1539 txconf_cleaned += store_cleaned;
569dac6a
ICR
1540 /* We have a single Tx conf FQ on this channel */
1541 txc_fq = fq;
1542 }
6e2387e8 1543
68049a5f
ICR
1544 /* If we either consumed the whole NAPI budget with Rx frames
1545 * or we reached the Tx confirmations threshold, we're done.
6e2387e8 1546 */
68049a5f 1547 if (rx_cleaned >= budget ||
569dac6a
ICR
1548 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1549 work_done = budget;
1550 goto out;
1551 }
68049a5f 1552 } while (store_cleaned);
6e2387e8 1553
68049a5f
ICR
1554 /* We didn't consume the entire budget, so finish napi and
1555 * re-enable data availability notifications
1556 */
1557 napi_complete_done(napi, rx_cleaned);
1558 do {
1559 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1560 cpu_relax();
ef17bd7c 1561 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
68049a5f
ICR
1562 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1563 ch->nctx.desired_cpu);
85047abd 1564
569dac6a
ICR
1565 work_done = max(rx_cleaned, 1);
1566
1567out:
0a25d92c
IC
1568 netif_receive_skb_list(ch->rx_list);
1569
d678be1d 1570 if (txc_fq && txc_fq->dq_frames) {
569dac6a
ICR
1571 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1572 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1573 txc_fq->dq_bytes);
1574 txc_fq->dq_frames = 0;
1575 txc_fq->dq_bytes = 0;
1576 }
1577
d678be1d
IR
1578 if (ch->xdp.res & XDP_REDIRECT)
1579 xdp_do_flush_map();
74a1c059 1580 else if (rx_cleaned && ch->xdp.res & XDP_TX)
5d8dccf8 1581 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
d678be1d 1582
569dac6a 1583 return work_done;
6e2387e8
IR
1584}
1585
5d8dccf8 1586static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
1587{
1588 struct dpaa2_eth_channel *ch;
1589 int i;
1590
1591 for (i = 0; i < priv->num_channels; i++) {
1592 ch = priv->channel[i];
1593 napi_enable(&ch->napi);
1594 }
1595}
1596
5d8dccf8 1597static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
1598{
1599 struct dpaa2_eth_channel *ch;
1600 int i;
1601
1602 for (i = 0; i < priv->num_channels; i++) {
1603 ch = priv->channel[i];
1604 napi_disable(&ch->napi);
1605 }
1606}
1607
07beb165
IC
1608void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
1609 bool tx_pause, bool pfc)
8eb3cef8
IR
1610{
1611 struct dpni_taildrop td = {0};
685e39ea 1612 struct dpaa2_eth_fq *fq;
8eb3cef8
IR
1613 int i, err;
1614
07beb165
IC
1615 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
1616 * flow control is disabled (as it might interfere with either the
1617 * buffer pool depletion trigger for pause frames or with the group
1618 * congestion trigger for PFC frames)
1619 */
2c8d1c8d 1620 td.enable = !tx_pause;
07beb165
IC
1621 if (priv->rx_fqtd_enabled == td.enable)
1622 goto set_cgtd;
8eb3cef8 1623
2c8d1c8d
IR
1624 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
1625 td.units = DPNI_CONGESTION_UNIT_BYTES;
8eb3cef8
IR
1626
1627 for (i = 0; i < priv->num_fqs; i++) {
685e39ea
IR
1628 fq = &priv->fq[i];
1629 if (fq->type != DPAA2_RX_FQ)
8eb3cef8
IR
1630 continue;
1631 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
685e39ea
IR
1632 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
1633 fq->tc, fq->flowid, &td);
8eb3cef8
IR
1634 if (err) {
1635 netdev_err(priv->net_dev,
2c8d1c8d
IR
1636 "dpni_set_taildrop(FQ) failed\n");
1637 return;
1638 }
1639 }
1640
07beb165
IC
1641 priv->rx_fqtd_enabled = td.enable;
1642
1643set_cgtd:
2c8d1c8d
IR
1644 /* Congestion group taildrop: threshold is in frames, per group
1645 * of FQs belonging to the same traffic class
07beb165
IC
1646 * Enabled if general Tx pause disabled or if PFCs are enabled
1647 * (congestion group threhsold for PFC generation is lower than the
1648 * CG taildrop threshold, so it won't interfere with it; we also
1649 * want frames in non-PFC enabled traffic classes to be kept in check)
2c8d1c8d 1650 */
07beb165
IC
1651 td.enable = !tx_pause || (tx_pause && pfc);
1652 if (priv->rx_cgtd_enabled == td.enable)
1653 return;
1654
2c8d1c8d
IR
1655 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
1656 td.units = DPNI_CONGESTION_UNIT_FRAMES;
1657 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
1658 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1659 DPNI_CP_GROUP, DPNI_QUEUE_RX,
1660 i, 0, &td);
1661 if (err) {
1662 netdev_err(priv->net_dev,
1663 "dpni_set_taildrop(CG) failed\n");
1664 return;
8eb3cef8
IR
1665 }
1666 }
1667
07beb165 1668 priv->rx_cgtd_enabled = td.enable;
8eb3cef8
IR
1669}
1670
5d8dccf8 1671static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
6e2387e8 1672{
85b7a342 1673 struct dpni_link_state state = {0};
8eb3cef8 1674 bool tx_pause;
6e2387e8
IR
1675 int err;
1676
1677 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1678 if (unlikely(err)) {
1679 netdev_err(priv->net_dev,
1680 "dpni_get_link_state() failed\n");
1681 return err;
1682 }
1683
8eb3cef8
IR
1684 /* If Tx pause frame settings have changed, we need to update
1685 * Rx FQ taildrop configuration as well. We configure taildrop
1686 * only when pause frame generation is disabled.
1687 */
ad054f26 1688 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
07beb165 1689 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
8eb3cef8 1690
71947923
IC
1691 /* When we manage the MAC/PHY using phylink there is no need
1692 * to manually update the netif_carrier.
1693 */
d87e6063 1694 if (dpaa2_eth_is_type_phy(priv))
71947923
IC
1695 goto out;
1696
6e2387e8
IR
1697 /* Chech link state; speed / duplex changes are not treated yet */
1698 if (priv->link_state.up == state.up)
cce62943 1699 goto out;
6e2387e8 1700
6e2387e8
IR
1701 if (state.up) {
1702 netif_carrier_on(priv->net_dev);
1703 netif_tx_start_all_queues(priv->net_dev);
1704 } else {
1705 netif_tx_stop_all_queues(priv->net_dev);
1706 netif_carrier_off(priv->net_dev);
1707 }
1708
77160af3 1709 netdev_info(priv->net_dev, "Link Event: state %s\n",
6e2387e8
IR
1710 state.up ? "up" : "down");
1711
cce62943
IR
1712out:
1713 priv->link_state = state;
1714
6e2387e8
IR
1715 return 0;
1716}
1717
1718static int dpaa2_eth_open(struct net_device *net_dev)
1719{
1720 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1721 int err;
1722
5d8dccf8 1723 err = dpaa2_eth_seed_pool(priv, priv->bpid);
6e2387e8
IR
1724 if (err) {
1725 /* Not much to do; the buffer pool, though not filled up,
1726 * may still contain some buffers which would enable us
1727 * to limp on.
1728 */
1729 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
05fa39c6 1730 priv->dpbp_dev->obj_desc.id, priv->bpid);
6e2387e8
IR
1731 }
1732
d87e6063 1733 if (!dpaa2_eth_is_type_phy(priv)) {
71947923
IC
1734 /* We'll only start the txqs when the link is actually ready;
1735 * make sure we don't race against the link up notification,
1736 * which may come immediately after dpni_enable();
1737 */
1738 netif_tx_stop_all_queues(net_dev);
1739
1740 /* Also, explicitly set carrier off, otherwise
1741 * netif_carrier_ok() will return true and cause 'ip link show'
1742 * to report the LOWER_UP flag, even though the link
1743 * notification wasn't even received.
1744 */
1745 netif_carrier_off(net_dev);
1746 }
5d8dccf8 1747 dpaa2_eth_enable_ch_napi(priv);
6e2387e8
IR
1748
1749 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1750 if (err < 0) {
1751 netdev_err(net_dev, "dpni_enable() failed\n");
1752 goto enable_err;
1753 }
1754
d87e6063 1755 if (dpaa2_eth_is_type_phy(priv))
71947923 1756 phylink_start(priv->mac->phylink);
6e2387e8
IR
1757
1758 return 0;
1759
6e2387e8 1760enable_err:
5d8dccf8
IC
1761 dpaa2_eth_disable_ch_napi(priv);
1762 dpaa2_eth_drain_pool(priv);
6e2387e8
IR
1763 return err;
1764}
1765
68d74315 1766/* Total number of in-flight frames on ingress queues */
5d8dccf8 1767static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
6e2387e8 1768{
68d74315
ICR
1769 struct dpaa2_eth_fq *fq;
1770 u32 fcnt = 0, bcnt = 0, total = 0;
1771 int i, err;
6e2387e8 1772
68d74315
ICR
1773 for (i = 0; i < priv->num_fqs; i++) {
1774 fq = &priv->fq[i];
1775 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1776 if (err) {
1777 netdev_warn(priv->net_dev, "query_fq_count failed");
1778 break;
1779 }
1780 total += fcnt;
1781 }
6e2387e8
IR
1782
1783 return total;
1784}
1785
5d8dccf8 1786static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
6e2387e8 1787{
68d74315
ICR
1788 int retries = 10;
1789 u32 pending;
6e2387e8 1790
68d74315 1791 do {
5d8dccf8 1792 pending = dpaa2_eth_ingress_fq_count(priv);
68d74315
ICR
1793 if (pending)
1794 msleep(100);
1795 } while (pending && --retries);
6e2387e8
IR
1796}
1797
52b6a4ff
IR
1798#define DPNI_TX_PENDING_VER_MAJOR 7
1799#define DPNI_TX_PENDING_VER_MINOR 13
5d8dccf8 1800static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
52b6a4ff
IR
1801{
1802 union dpni_statistics stats;
1803 int retries = 10;
1804 int err;
1805
1806 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1807 DPNI_TX_PENDING_VER_MINOR) < 0)
1808 goto out;
1809
1810 do {
1811 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1812 &stats);
1813 if (err)
1814 goto out;
1815 if (stats.page_6.tx_pending_frames == 0)
1816 return;
1817 } while (--retries);
1818
1819out:
1820 msleep(500);
1821}
1822
6e2387e8
IR
1823static int dpaa2_eth_stop(struct net_device *net_dev)
1824{
1825 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
85b7a342 1826 int dpni_enabled = 0;
6e2387e8 1827 int retries = 10;
6e2387e8 1828
d87e6063
IC
1829 if (dpaa2_eth_is_type_phy(priv)) {
1830 phylink_stop(priv->mac->phylink);
1831 } else {
71947923
IC
1832 netif_tx_stop_all_queues(net_dev);
1833 netif_carrier_off(net_dev);
71947923 1834 }
6e2387e8 1835
68d74315
ICR
1836 /* On dpni_disable(), the MC firmware will:
1837 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1838 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1839 * of all in flight Tx frames is finished (and corresponding Tx conf
1840 * frames are enqueued back to software)
1841 *
1842 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1843 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1844 * and Tx conf queues are consumed on NAPI poll.
6e2387e8 1845 */
5d8dccf8 1846 dpaa2_eth_wait_for_egress_fq_empty(priv);
68d74315 1847
6e2387e8
IR
1848 do {
1849 dpni_disable(priv->mc_io, 0, priv->mc_token);
1850 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1851 if (dpni_enabled)
1852 /* Allow the hardware some slack */
1853 msleep(100);
1854 } while (dpni_enabled && --retries);
1855 if (!retries) {
1856 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1857 /* Must go on and disable NAPI nonetheless, so we don't crash at
1858 * the next "ifconfig up"
1859 */
1860 }
1861
5d8dccf8
IC
1862 dpaa2_eth_wait_for_ingress_fq_empty(priv);
1863 dpaa2_eth_disable_ch_napi(priv);
6e2387e8 1864
6e2387e8 1865 /* Empty the buffer pool */
5d8dccf8 1866 dpaa2_eth_drain_pool(priv);
6e2387e8 1867
d70446ee
IC
1868 /* Empty the Scatter-Gather Buffer cache */
1869 dpaa2_eth_sgt_cache_drain(priv);
1870
6e2387e8
IR
1871 return 0;
1872}
1873
6e2387e8
IR
1874static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1875{
1876 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1877 struct device *dev = net_dev->dev.parent;
1878 int err;
1879
1880 err = eth_mac_addr(net_dev, addr);
1881 if (err < 0) {
1882 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1883 return err;
1884 }
1885
1886 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1887 net_dev->dev_addr);
1888 if (err) {
1889 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1890 return err;
1891 }
1892
1893 return 0;
1894}
1895
1896/** Fill in counters maintained by the GPP driver. These may be different from
1897 * the hardware counters obtained by ethtool.
1898 */
acbff8e3
IR
1899static void dpaa2_eth_get_stats(struct net_device *net_dev,
1900 struct rtnl_link_stats64 *stats)
6e2387e8
IR
1901{
1902 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1903 struct rtnl_link_stats64 *percpu_stats;
1904 u64 *cpustats;
1905 u64 *netstats = (u64 *)stats;
1906 int i, j;
1907 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1908
1909 for_each_possible_cpu(i) {
1910 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1911 cpustats = (u64 *)percpu_stats;
1912 for (j = 0; j < num; j++)
1913 netstats[j] += cpustats[j];
1914 }
1915}
1916
6e2387e8
IR
1917/* Copy mac unicast addresses from @net_dev to @priv.
1918 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1919 */
5d8dccf8
IC
1920static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
1921 struct dpaa2_eth_priv *priv)
6e2387e8
IR
1922{
1923 struct netdev_hw_addr *ha;
1924 int err;
1925
1926 netdev_for_each_uc_addr(ha, net_dev) {
1927 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1928 ha->addr);
1929 if (err)
1930 netdev_warn(priv->net_dev,
1931 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1932 ha->addr, err);
1933 }
1934}
1935
1936/* Copy mac multicast addresses from @net_dev to @priv
1937 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1938 */
5d8dccf8
IC
1939static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
1940 struct dpaa2_eth_priv *priv)
6e2387e8
IR
1941{
1942 struct netdev_hw_addr *ha;
1943 int err;
1944
1945 netdev_for_each_mc_addr(ha, net_dev) {
1946 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1947 ha->addr);
1948 if (err)
1949 netdev_warn(priv->net_dev,
1950 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1951 ha->addr, err);
1952 }
1953}
1954
1955static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1956{
1957 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1958 int uc_count = netdev_uc_count(net_dev);
1959 int mc_count = netdev_mc_count(net_dev);
1960 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1961 u32 options = priv->dpni_attrs.options;
1962 u16 mc_token = priv->mc_token;
1963 struct fsl_mc_io *mc_io = priv->mc_io;
1964 int err;
1965
1966 /* Basic sanity checks; these probably indicate a misconfiguration */
1967 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1968 netdev_info(net_dev,
1969 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1970 max_mac);
1971
1972 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
1973 if (uc_count > max_mac) {
1974 netdev_info(net_dev,
1975 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1976 uc_count, max_mac);
1977 goto force_promisc;
1978 }
1979 if (mc_count + uc_count > max_mac) {
1980 netdev_info(net_dev,
1981 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1982 uc_count + mc_count, max_mac);
1983 goto force_mc_promisc;
1984 }
1985
1986 /* Adjust promisc settings due to flag combinations */
1987 if (net_dev->flags & IFF_PROMISC)
1988 goto force_promisc;
1989 if (net_dev->flags & IFF_ALLMULTI) {
1990 /* First, rebuild unicast filtering table. This should be done
1991 * in promisc mode, in order to avoid frame loss while we
1992 * progressively add entries to the table.
1993 * We don't know whether we had been in promisc already, and
1994 * making an MC call to find out is expensive; so set uc promisc
1995 * nonetheless.
1996 */
1997 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1998 if (err)
1999 netdev_warn(net_dev, "Can't set uc promisc\n");
2000
2001 /* Actual uc table reconstruction. */
2002 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2003 if (err)
2004 netdev_warn(net_dev, "Can't clear uc filters\n");
5d8dccf8 2005 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
6e2387e8
IR
2006
2007 /* Finally, clear uc promisc and set mc promisc as requested. */
2008 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2009 if (err)
2010 netdev_warn(net_dev, "Can't clear uc promisc\n");
2011 goto force_mc_promisc;
2012 }
2013
2014 /* Neither unicast, nor multicast promisc will be on... eventually.
2015 * For now, rebuild mac filtering tables while forcing both of them on.
2016 */
2017 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2018 if (err)
2019 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2020 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2021 if (err)
2022 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2023
2024 /* Actual mac filtering tables reconstruction */
2025 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2026 if (err)
2027 netdev_warn(net_dev, "Can't clear mac filters\n");
5d8dccf8
IC
2028 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2029 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
6e2387e8
IR
2030
2031 /* Now we can clear both ucast and mcast promisc, without risking
2032 * to drop legitimate frames anymore.
2033 */
2034 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2035 if (err)
2036 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2037 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2038 if (err)
2039 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2040
2041 return;
2042
2043force_promisc:
2044 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2045 if (err)
2046 netdev_warn(net_dev, "Can't set ucast promisc\n");
2047force_mc_promisc:
2048 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2049 if (err)
2050 netdev_warn(net_dev, "Can't set mcast promisc\n");
2051}
2052
2053static int dpaa2_eth_set_features(struct net_device *net_dev,
2054 netdev_features_t features)
2055{
2056 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2057 netdev_features_t changed = features ^ net_dev->features;
2058 bool enable;
2059 int err;
2060
2061 if (changed & NETIF_F_RXCSUM) {
2062 enable = !!(features & NETIF_F_RXCSUM);
5d8dccf8 2063 err = dpaa2_eth_set_rx_csum(priv, enable);
6e2387e8
IR
2064 if (err)
2065 return err;
2066 }
2067
2068 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2069 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
5d8dccf8 2070 err = dpaa2_eth_set_tx_csum(priv, enable);
6e2387e8
IR
2071 if (err)
2072 return err;
2073 }
2074
2075 return 0;
2076}
2077
859f998e
IR
2078static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2079{
2080 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2081 struct hwtstamp_config config;
2082
c5521189
YL
2083 if (!dpaa2_ptp)
2084 return -EINVAL;
2085
859f998e
IR
2086 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2087 return -EFAULT;
2088
2089 switch (config.tx_type) {
2090 case HWTSTAMP_TX_OFF:
859f998e 2091 case HWTSTAMP_TX_ON:
c5521189 2092 case HWTSTAMP_TX_ONESTEP_SYNC:
1cf773bd 2093 priv->tx_tstamp_type = config.tx_type;
859f998e
IR
2094 break;
2095 default:
2096 return -ERANGE;
2097 }
2098
2099 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2100 priv->rx_tstamp = false;
2101 } else {
2102 priv->rx_tstamp = true;
2103 /* TS is set for all frame types, not only those requested */
2104 config.rx_filter = HWTSTAMP_FILTER_ALL;
2105 }
2106
2107 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2108 -EFAULT : 0;
2109}
2110
2111static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2112{
4a84182a
RK
2113 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2114
859f998e
IR
2115 if (cmd == SIOCSHWTSTAMP)
2116 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2117
d87e6063 2118 if (dpaa2_eth_is_type_phy(priv))
4a84182a
RK
2119 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2120
2121 return -EOPNOTSUPP;
859f998e
IR
2122}
2123
7e273a8e
ICR
2124static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2125{
2126 int mfl, linear_mfl;
2127
2128 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
efa6a7d0 2129 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
7b1eea1a 2130 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
7e273a8e
ICR
2131
2132 if (mfl > linear_mfl) {
2133 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2134 linear_mfl - VLAN_ETH_HLEN);
2135 return false;
2136 }
2137
2138 return true;
2139}
2140
5d8dccf8 2141static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
7e273a8e
ICR
2142{
2143 int mfl, err;
2144
2145 /* We enforce a maximum Rx frame length based on MTU only if we have
2146 * an XDP program attached (in order to avoid Rx S/G frames).
2147 * Otherwise, we accept all incoming frames as long as they are not
2148 * larger than maximum size supported in hardware
2149 */
2150 if (has_xdp)
2151 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2152 else
2153 mfl = DPAA2_ETH_MFL;
2154
2155 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2156 if (err) {
2157 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2158 return err;
2159 }
2160
2161 return 0;
2162}
2163
2164static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2165{
2166 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2167 int err;
2168
2169 if (!priv->xdp_prog)
2170 goto out;
2171
2172 if (!xdp_mtu_valid(priv, new_mtu))
2173 return -EINVAL;
2174
5d8dccf8 2175 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
7e273a8e
ICR
2176 if (err)
2177 return err;
2178
2179out:
2180 dev->mtu = new_mtu;
2181 return 0;
2182}
2183
5d8dccf8 2184static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
7b1eea1a
ICR
2185{
2186 struct dpni_buffer_layout buf_layout = {0};
2187 int err;
2188
2189 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2190 DPNI_QUEUE_RX, &buf_layout);
2191 if (err) {
2192 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2193 return err;
2194 }
2195
2196 /* Reserve extra headroom for XDP header size changes */
2197 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2198 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2199 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2200 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2201 DPNI_QUEUE_RX, &buf_layout);
2202 if (err) {
2203 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2204 return err;
2205 }
2206
2207 return 0;
2208}
2209
5d8dccf8 2210static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
7e273a8e
ICR
2211{
2212 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2213 struct dpaa2_eth_channel *ch;
2214 struct bpf_prog *old;
2215 bool up, need_update;
2216 int i, err;
2217
2218 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2219 return -EINVAL;
2220
85192dbf
AN
2221 if (prog)
2222 bpf_prog_add(prog, priv->num_channels);
7e273a8e
ICR
2223
2224 up = netif_running(dev);
2225 need_update = (!!priv->xdp_prog != !!prog);
2226
2227 if (up)
2228 dpaa2_eth_stop(dev);
2229
7b1eea1a
ICR
2230 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2231 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2232 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2233 * so we are sure no old format buffers will be used from now on.
2234 */
7e273a8e 2235 if (need_update) {
5d8dccf8 2236 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
7e273a8e
ICR
2237 if (err)
2238 goto out_err;
5d8dccf8 2239 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
7b1eea1a
ICR
2240 if (err)
2241 goto out_err;
7e273a8e
ICR
2242 }
2243
2244 old = xchg(&priv->xdp_prog, prog);
2245 if (old)
2246 bpf_prog_put(old);
2247
2248 for (i = 0; i < priv->num_channels; i++) {
2249 ch = priv->channel[i];
2250 old = xchg(&ch->xdp.prog, prog);
2251 if (old)
2252 bpf_prog_put(old);
2253 }
2254
2255 if (up) {
2256 err = dpaa2_eth_open(dev);
2257 if (err)
2258 return err;
2259 }
2260
2261 return 0;
2262
2263out_err:
2264 if (prog)
2265 bpf_prog_sub(prog, priv->num_channels);
2266 if (up)
2267 dpaa2_eth_open(dev);
2268
2269 return err;
2270}
2271
2272static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2273{
7e273a8e
ICR
2274 switch (xdp->command) {
2275 case XDP_SETUP_PROG:
5d8dccf8 2276 return dpaa2_eth_setup_xdp(dev, xdp->prog);
7e273a8e
ICR
2277 default:
2278 return -EINVAL;
2279 }
2280
2281 return 0;
2282}
2283
6aa40b9e
IC
2284static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2285 struct xdp_frame *xdpf,
2286 struct dpaa2_fd *fd)
d678be1d 2287{
d678be1d 2288 struct device *dev = net_dev->dev.parent;
d678be1d
IR
2289 unsigned int needed_headroom;
2290 struct dpaa2_eth_swa *swa;
d678be1d
IR
2291 void *buffer_start, *aligned_start;
2292 dma_addr_t addr;
d678be1d
IR
2293
2294 /* We require a minimum headroom to be able to transmit the frame.
2295 * Otherwise return an error and let the original net_device handle it
2296 */
1cf773bd 2297 needed_headroom = dpaa2_eth_needed_headroom(NULL);
d678be1d
IR
2298 if (xdpf->headroom < needed_headroom)
2299 return -EINVAL;
2300
d678be1d 2301 /* Setup the FD fields */
6aa40b9e 2302 memset(fd, 0, sizeof(*fd));
d678be1d
IR
2303
2304 /* Align FD address, if possible */
2305 buffer_start = xdpf->data - needed_headroom;
2306 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2307 DPAA2_ETH_TX_BUF_ALIGN);
2308 if (aligned_start >= xdpf->data - xdpf->headroom)
2309 buffer_start = aligned_start;
2310
2311 swa = (struct dpaa2_eth_swa *)buffer_start;
2312 /* fill in necessary fields here */
2313 swa->type = DPAA2_ETH_SWA_XDP;
2314 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2315 swa->xdp.xdpf = xdpf;
2316
2317 addr = dma_map_single(dev, buffer_start,
2318 swa->xdp.dma_size,
2319 DMA_BIDIRECTIONAL);
6aa40b9e 2320 if (unlikely(dma_mapping_error(dev, addr)))
d678be1d 2321 return -ENOMEM;
d678be1d 2322
6aa40b9e
IC
2323 dpaa2_fd_set_addr(fd, addr);
2324 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2325 dpaa2_fd_set_len(fd, xdpf->len);
2326 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2327 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
d678be1d
IR
2328
2329 return 0;
2330}
2331
2332static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2333 struct xdp_frame **frames, u32 flags)
2334{
6aa40b9e 2335 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
38c440b2 2336 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
6aa40b9e
IC
2337 struct rtnl_link_stats64 *percpu_stats;
2338 struct dpaa2_eth_fq *fq;
8665d978 2339 struct dpaa2_fd *fds;
38c440b2 2340 int enqueued, i, err;
d678be1d
IR
2341
2342 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2343 return -EINVAL;
2344
2345 if (!netif_running(net_dev))
2346 return -ENETDOWN;
2347
8665d978 2348 fq = &priv->fq[smp_processor_id()];
38c440b2
IC
2349 xdp_redirect_fds = &fq->xdp_redirect_fds;
2350 fds = xdp_redirect_fds->fds;
8665d978 2351
6aa40b9e 2352 percpu_stats = this_cpu_ptr(priv->percpu_stats);
6aa40b9e 2353
8665d978 2354 /* create a FD for each xdp_frame in the list received */
d678be1d 2355 for (i = 0; i < n; i++) {
8665d978
IC
2356 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2357 if (err)
2358 break;
2359 }
38c440b2 2360 xdp_redirect_fds->num = i;
6aa40b9e 2361
38c440b2
IC
2362 /* enqueue all the frame descriptors */
2363 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
d678be1d 2364
8665d978 2365 /* update statistics */
38c440b2
IC
2366 percpu_stats->tx_packets += enqueued;
2367 for (i = 0; i < enqueued; i++)
8665d978 2368 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
38c440b2 2369 for (i = enqueued; i < n; i++)
8665d978
IC
2370 xdp_return_frame_rx_napi(frames[i]);
2371
38c440b2 2372 return enqueued;
d678be1d
IR
2373}
2374
06d5b179
IR
2375static int update_xps(struct dpaa2_eth_priv *priv)
2376{
2377 struct net_device *net_dev = priv->net_dev;
2378 struct cpumask xps_mask;
2379 struct dpaa2_eth_fq *fq;
ab1e6de2 2380 int i, num_queues, netdev_queues;
06d5b179
IR
2381 int err = 0;
2382
2383 num_queues = dpaa2_eth_queue_count(priv);
ab1e6de2 2384 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
06d5b179
IR
2385
2386 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2387 * queues, so only process those
2388 */
ab1e6de2
IR
2389 for (i = 0; i < netdev_queues; i++) {
2390 fq = &priv->fq[i % num_queues];
06d5b179
IR
2391
2392 cpumask_clear(&xps_mask);
2393 cpumask_set_cpu(fq->target_cpu, &xps_mask);
2394
2395 err = netif_set_xps_queue(net_dev, &xps_mask, i);
2396 if (err) {
2397 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2398 break;
2399 }
2400 }
2401
2402 return err;
2403}
2404
e3ec13be
IC
2405static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2406 struct tc_mqprio_qopt *mqprio)
ab1e6de2
IR
2407{
2408 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
ab1e6de2
IR
2409 u8 num_tc, num_queues;
2410 int i;
2411
ab1e6de2
IR
2412 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2413 num_queues = dpaa2_eth_queue_count(priv);
2414 num_tc = mqprio->num_tc;
2415
2416 if (num_tc == net_dev->num_tc)
2417 return 0;
2418
2419 if (num_tc > dpaa2_eth_tc_count(priv)) {
2420 netdev_err(net_dev, "Max %d traffic classes supported\n",
2421 dpaa2_eth_tc_count(priv));
b89c1e6b 2422 return -EOPNOTSUPP;
ab1e6de2
IR
2423 }
2424
2425 if (!num_tc) {
2426 netdev_reset_tc(net_dev);
2427 netif_set_real_num_tx_queues(net_dev, num_queues);
2428 goto out;
2429 }
2430
2431 netdev_set_num_tc(net_dev, num_tc);
2432 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2433
2434 for (i = 0; i < num_tc; i++)
2435 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2436
2437out:
2438 update_xps(priv);
2439
2440 return 0;
2441}
2442
3657cdaf
IC
2443#define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2444
2445static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2446{
2447 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2448 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2449 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2450 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2451 int err;
2452
2453 if (p->command == TC_TBF_STATS)
2454 return -EOPNOTSUPP;
2455
2456 /* Only per port Tx shaping */
2457 if (p->parent != TC_H_ROOT)
2458 return -EOPNOTSUPP;
2459
2460 if (p->command == TC_TBF_REPLACE) {
2461 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2462 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2463 DPAA2_ETH_MAX_BURST_SIZE);
2464 return -EINVAL;
2465 }
2466
2467 tx_cr_shaper.max_burst_size = cfg->max_size;
2468 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2469 * rate in Mbits/s
2470 */
2471 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2472 }
2473
2474 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2475 &tx_er_shaper, 0);
2476 if (err) {
2477 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2478 return err;
2479 }
2480
2481 return 0;
2482}
2483
e3ec13be
IC
2484static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2485 enum tc_setup_type type, void *type_data)
2486{
2487 switch (type) {
2488 case TC_SETUP_QDISC_MQPRIO:
2489 return dpaa2_eth_setup_mqprio(net_dev, type_data);
3657cdaf
IC
2490 case TC_SETUP_QDISC_TBF:
2491 return dpaa2_eth_setup_tbf(net_dev, type_data);
e3ec13be
IC
2492 default:
2493 return -EOPNOTSUPP;
2494 }
2495}
2496
6e2387e8
IR
2497static const struct net_device_ops dpaa2_eth_ops = {
2498 .ndo_open = dpaa2_eth_open,
2499 .ndo_start_xmit = dpaa2_eth_tx,
2500 .ndo_stop = dpaa2_eth_stop,
6e2387e8
IR
2501 .ndo_set_mac_address = dpaa2_eth_set_addr,
2502 .ndo_get_stats64 = dpaa2_eth_get_stats,
6e2387e8
IR
2503 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2504 .ndo_set_features = dpaa2_eth_set_features,
859f998e 2505 .ndo_do_ioctl = dpaa2_eth_ioctl,
7e273a8e
ICR
2506 .ndo_change_mtu = dpaa2_eth_change_mtu,
2507 .ndo_bpf = dpaa2_eth_xdp,
d678be1d 2508 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
ab1e6de2 2509 .ndo_setup_tc = dpaa2_eth_setup_tc,
6e2387e8
IR
2510};
2511
5d8dccf8 2512static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
6e2387e8
IR
2513{
2514 struct dpaa2_eth_channel *ch;
2515
2516 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
85047abd
IR
2517
2518 /* Update NAPI statistics */
2519 ch->stats.cdan++;
2520
6c33ae1a 2521 napi_schedule(&ch->napi);
6e2387e8
IR
2522}
2523
2524/* Allocate and configure a DPCON object */
5d8dccf8 2525static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2526{
2527 struct fsl_mc_device *dpcon;
2528 struct device *dev = priv->net_dev->dev.parent;
6e2387e8
IR
2529 int err;
2530
2531 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2532 FSL_MC_POOL_DPCON, &dpcon);
2533 if (err) {
d7f5a9d8
IC
2534 if (err == -ENXIO)
2535 err = -EPROBE_DEFER;
2536 else
2537 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2538 return ERR_PTR(err);
6e2387e8
IR
2539 }
2540
2541 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2542 if (err) {
2543 dev_err(dev, "dpcon_open() failed\n");
f6dda809 2544 goto free;
6e2387e8
IR
2545 }
2546
2547 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2548 if (err) {
2549 dev_err(dev, "dpcon_reset() failed\n");
f6dda809 2550 goto close;
6e2387e8
IR
2551 }
2552
6e2387e8
IR
2553 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2554 if (err) {
2555 dev_err(dev, "dpcon_enable() failed\n");
f6dda809 2556 goto close;
6e2387e8
IR
2557 }
2558
2559 return dpcon;
2560
f6dda809 2561close:
6e2387e8 2562 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
f6dda809 2563free:
6e2387e8
IR
2564 fsl_mc_object_free(dpcon);
2565
02afa9c6 2566 return ERR_PTR(err);
6e2387e8
IR
2567}
2568
5d8dccf8
IC
2569static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
2570 struct fsl_mc_device *dpcon)
6e2387e8
IR
2571{
2572 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2573 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2574 fsl_mc_object_free(dpcon);
2575}
2576
5d8dccf8 2577static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2578{
2579 struct dpaa2_eth_channel *channel;
2580 struct dpcon_attr attr;
2581 struct device *dev = priv->net_dev->dev.parent;
2582 int err;
2583
2584 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2585 if (!channel)
2586 return NULL;
2587
5d8dccf8 2588 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
02afa9c6
Y
2589 if (IS_ERR(channel->dpcon)) {
2590 err = PTR_ERR(channel->dpcon);
6e2387e8 2591 goto err_setup;
d7f5a9d8 2592 }
6e2387e8
IR
2593
2594 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2595 &attr);
2596 if (err) {
2597 dev_err(dev, "dpcon_get_attributes() failed\n");
2598 goto err_get_attr;
2599 }
2600
2601 channel->dpcon_id = attr.id;
2602 channel->ch_id = attr.qbman_ch_id;
2603 channel->priv = priv;
2604
2605 return channel;
2606
2607err_get_attr:
5d8dccf8 2608 dpaa2_eth_free_dpcon(priv, channel->dpcon);
6e2387e8
IR
2609err_setup:
2610 kfree(channel);
d7f5a9d8 2611 return ERR_PTR(err);
6e2387e8
IR
2612}
2613
5d8dccf8
IC
2614static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
2615 struct dpaa2_eth_channel *channel)
6e2387e8 2616{
5d8dccf8 2617 dpaa2_eth_free_dpcon(priv, channel->dpcon);
6e2387e8
IR
2618 kfree(channel);
2619}
2620
2621/* DPIO setup: allocate and configure QBMan channels, setup core affinity
2622 * and register data availability notifications
2623 */
5d8dccf8 2624static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2625{
2626 struct dpaa2_io_notification_ctx *nctx;
2627 struct dpaa2_eth_channel *channel;
2628 struct dpcon_notification_cfg dpcon_notif_cfg;
2629 struct device *dev = priv->net_dev->dev.parent;
2630 int i, err;
2631
2632 /* We want the ability to spread ingress traffic (RX, TX conf) to as
2633 * many cores as possible, so we need one channel for each core
2634 * (unless there's fewer queues than cores, in which case the extra
2635 * channels would be wasted).
2636 * Allocate one channel per core and register it to the core's
2637 * affine DPIO. If not enough channels are available for all cores
2638 * or if some cores don't have an affine DPIO, there will be no
2639 * ingress frame processing on those cores.
2640 */
2641 cpumask_clear(&priv->dpio_cpumask);
2642 for_each_online_cpu(i) {
2643 /* Try to allocate a channel */
5d8dccf8 2644 channel = dpaa2_eth_alloc_channel(priv);
d7f5a9d8 2645 if (IS_ERR_OR_NULL(channel)) {
bd8460fa 2646 err = PTR_ERR_OR_ZERO(channel);
d7f5a9d8
IC
2647 if (err != -EPROBE_DEFER)
2648 dev_info(dev,
2649 "No affine channel for cpu %d and above\n", i);
6e2387e8
IR
2650 goto err_alloc_ch;
2651 }
2652
2653 priv->channel[priv->num_channels] = channel;
2654
2655 nctx = &channel->nctx;
2656 nctx->is_cdan = 1;
5d8dccf8 2657 nctx->cb = dpaa2_eth_cdan_cb;
6e2387e8
IR
2658 nctx->id = channel->ch_id;
2659 nctx->desired_cpu = i;
2660
2661 /* Register the new context */
7ec0596f 2662 channel->dpio = dpaa2_io_service_select(i);
47441f7f 2663 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
6e2387e8 2664 if (err) {
5206d8d1 2665 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
6e2387e8 2666 /* If no affine DPIO for this core, there's probably
5206d8d1
IR
2667 * none available for next cores either. Signal we want
2668 * to retry later, in case the DPIO devices weren't
2669 * probed yet.
6e2387e8 2670 */
5206d8d1 2671 err = -EPROBE_DEFER;
6e2387e8
IR
2672 goto err_service_reg;
2673 }
2674
2675 /* Register DPCON notification with MC */
2676 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2677 dpcon_notif_cfg.priority = 0;
2678 dpcon_notif_cfg.user_ctx = nctx->qman64;
2679 err = dpcon_set_notification(priv->mc_io, 0,
2680 channel->dpcon->mc_handle,
2681 &dpcon_notif_cfg);
2682 if (err) {
2683 dev_err(dev, "dpcon_set_notification failed()\n");
2684 goto err_set_cdan;
2685 }
2686
2687 /* If we managed to allocate a channel and also found an affine
2688 * DPIO for this core, add it to the final mask
2689 */
2690 cpumask_set_cpu(i, &priv->dpio_cpumask);
2691 priv->num_channels++;
2692
2693 /* Stop if we already have enough channels to accommodate all
2694 * RX and TX conf queues
2695 */
b0e4f37b 2696 if (priv->num_channels == priv->dpni_attrs.num_queues)
6e2387e8
IR
2697 break;
2698 }
2699
2700 return 0;
2701
2702err_set_cdan:
47441f7f 2703 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
6e2387e8 2704err_service_reg:
5d8dccf8 2705 dpaa2_eth_free_channel(priv, channel);
6e2387e8 2706err_alloc_ch:
5aa4277d
IC
2707 if (err == -EPROBE_DEFER) {
2708 for (i = 0; i < priv->num_channels; i++) {
2709 channel = priv->channel[i];
2710 nctx = &channel->nctx;
2711 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
5d8dccf8 2712 dpaa2_eth_free_channel(priv, channel);
5aa4277d
IC
2713 }
2714 priv->num_channels = 0;
d7f5a9d8 2715 return err;
5aa4277d 2716 }
d7f5a9d8 2717
6e2387e8
IR
2718 if (cpumask_empty(&priv->dpio_cpumask)) {
2719 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
d7f5a9d8 2720 return -ENODEV;
6e2387e8
IR
2721 }
2722
2723 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2724 cpumask_pr_args(&priv->dpio_cpumask));
2725
2726 return 0;
2727}
2728
5d8dccf8 2729static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
6e2387e8 2730{
47441f7f 2731 struct device *dev = priv->net_dev->dev.parent;
6e2387e8 2732 struct dpaa2_eth_channel *ch;
47441f7f 2733 int i;
6e2387e8
IR
2734
2735 /* deregister CDAN notifications and free channels */
2736 for (i = 0; i < priv->num_channels; i++) {
2737 ch = priv->channel[i];
47441f7f 2738 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
5d8dccf8 2739 dpaa2_eth_free_channel(priv, ch);
6e2387e8
IR
2740 }
2741}
2742
5d8dccf8
IC
2743static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
2744 int cpu)
6e2387e8
IR
2745{
2746 struct device *dev = priv->net_dev->dev.parent;
2747 int i;
2748
2749 for (i = 0; i < priv->num_channels; i++)
2750 if (priv->channel[i]->nctx.desired_cpu == cpu)
2751 return priv->channel[i];
2752
2753 /* We should never get here. Issue a warning and return
2754 * the first channel, because it's still better than nothing
2755 */
2756 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2757
2758 return priv->channel[0];
2759}
2760
5d8dccf8 2761static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2762{
2763 struct device *dev = priv->net_dev->dev.parent;
2764 struct dpaa2_eth_fq *fq;
2765 int rx_cpu, txc_cpu;
06d5b179 2766 int i;
6e2387e8
IR
2767
2768 /* For each FQ, pick one channel/CPU to deliver frames to.
2769 * This may well change at runtime, either through irqbalance or
2770 * through direct user intervention.
2771 */
2772 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2773
2774 for (i = 0; i < priv->num_fqs; i++) {
2775 fq = &priv->fq[i];
2776 switch (fq->type) {
2777 case DPAA2_RX_FQ:
061d631f 2778 case DPAA2_RX_ERR_FQ:
6e2387e8
IR
2779 fq->target_cpu = rx_cpu;
2780 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2781 if (rx_cpu >= nr_cpu_ids)
2782 rx_cpu = cpumask_first(&priv->dpio_cpumask);
2783 break;
2784 case DPAA2_TX_CONF_FQ:
2785 fq->target_cpu = txc_cpu;
2786 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2787 if (txc_cpu >= nr_cpu_ids)
2788 txc_cpu = cpumask_first(&priv->dpio_cpumask);
2789 break;
2790 default:
2791 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2792 }
5d8dccf8 2793 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
6e2387e8 2794 }
06d5b179
IR
2795
2796 update_xps(priv);
6e2387e8
IR
2797}
2798
5d8dccf8 2799static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
6e2387e8 2800{
685e39ea 2801 int i, j;
6e2387e8
IR
2802
2803 /* We have one TxConf FQ per Tx flow.
2804 * The number of Tx and Rx queues is the same.
2805 * Tx queues come first in the fq array.
2806 */
2807 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2808 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2809 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2810 priv->fq[priv->num_fqs++].flowid = (u16)i;
2811 }
2812
685e39ea
IR
2813 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2814 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2815 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2816 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2817 priv->fq[priv->num_fqs].tc = (u8)j;
2818 priv->fq[priv->num_fqs++].flowid = (u16)i;
2819 }
6e2387e8
IR
2820 }
2821
061d631f
IC
2822 /* We have exactly one Rx error queue per DPNI */
2823 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
2824 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
2825
6e2387e8 2826 /* For each FQ, decide on which core to process incoming frames */
5d8dccf8 2827 dpaa2_eth_set_fq_affinity(priv);
6e2387e8
IR
2828}
2829
2830/* Allocate and configure one buffer pool for each interface */
5d8dccf8 2831static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
6e2387e8
IR
2832{
2833 int err;
2834 struct fsl_mc_device *dpbp_dev;
2835 struct device *dev = priv->net_dev->dev.parent;
05fa39c6 2836 struct dpbp_attr dpbp_attrs;
6e2387e8
IR
2837
2838 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2839 &dpbp_dev);
2840 if (err) {
d7f5a9d8
IC
2841 if (err == -ENXIO)
2842 err = -EPROBE_DEFER;
2843 else
2844 dev_err(dev, "DPBP device allocation failed\n");
6e2387e8
IR
2845 return err;
2846 }
2847
2848 priv->dpbp_dev = dpbp_dev;
2849
2850 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2851 &dpbp_dev->mc_handle);
2852 if (err) {
2853 dev_err(dev, "dpbp_open() failed\n");
2854 goto err_open;
2855 }
2856
d00defe3
IR
2857 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2858 if (err) {
2859 dev_err(dev, "dpbp_reset() failed\n");
2860 goto err_reset;
2861 }
2862
6e2387e8
IR
2863 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2864 if (err) {
2865 dev_err(dev, "dpbp_enable() failed\n");
2866 goto err_enable;
2867 }
2868
2869 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
05fa39c6 2870 &dpbp_attrs);
6e2387e8
IR
2871 if (err) {
2872 dev_err(dev, "dpbp_get_attributes() failed\n");
2873 goto err_get_attr;
2874 }
05fa39c6 2875 priv->bpid = dpbp_attrs.bpid;
6e2387e8
IR
2876
2877 return 0;
2878
2879err_get_attr:
2880 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2881err_enable:
d00defe3 2882err_reset:
6e2387e8
IR
2883 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2884err_open:
2885 fsl_mc_object_free(dpbp_dev);
2886
2887 return err;
2888}
2889
5d8dccf8 2890static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
6e2387e8 2891{
5d8dccf8 2892 dpaa2_eth_drain_pool(priv);
6e2387e8
IR
2893 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2894 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2895 fsl_mc_object_free(priv->dpbp_dev);
2896}
2897
5d8dccf8 2898static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
6e2387e8 2899{
308f64e7 2900 struct device *dev = priv->net_dev->dev.parent;
50eacbc8 2901 struct dpni_buffer_layout buf_layout = {0};
27c87486 2902 u16 rx_buf_align;
6e2387e8
IR
2903 int err;
2904
8a4fd877
BP
2905 /* We need to check for WRIOP version 1.0.0, but depending on the MC
2906 * version, this number is not always provided correctly on rev1.
2907 * We need to check for both alternatives in this situation.
2908 */
2909 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2910 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
27c87486 2911 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
8a4fd877 2912 else
27c87486 2913 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
8a4fd877 2914
efa6a7d0
IC
2915 /* We need to ensure that the buffer size seen by WRIOP is a multiple
2916 * of 64 or 256 bytes depending on the WRIOP version.
2917 */
2918 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
2919
4b2d9fe8 2920 /* tx buffer */
50eacbc8 2921 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
859f998e 2922 buf_layout.pass_timestamp = true;
c5521189 2923 buf_layout.pass_frame_status = true;
859f998e 2924 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
c5521189
YL
2925 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
2926 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
6e2387e8 2927 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
50eacbc8 2928 DPNI_QUEUE_TX, &buf_layout);
6e2387e8
IR
2929 if (err) {
2930 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
308f64e7 2931 return err;
6e2387e8
IR
2932 }
2933
2934 /* tx-confirm buffer */
c5521189
YL
2935 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
2936 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
6e2387e8 2937 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
50eacbc8 2938 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
6e2387e8
IR
2939 if (err) {
2940 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
308f64e7
IR
2941 return err;
2942 }
2943
4b2d9fe8
BP
2944 /* Now that we've set our tx buffer layout, retrieve the minimum
2945 * required tx data offset.
2946 */
2947 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2948 &priv->tx_data_offset);
2949 if (err) {
2950 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2951 return err;
2952 }
2953
2954 if ((priv->tx_data_offset % 64) != 0)
2955 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2956 priv->tx_data_offset);
2957
2958 /* rx buffer */
2b7c86eb 2959 buf_layout.pass_frame_status = true;
4b2d9fe8 2960 buf_layout.pass_parser_result = true;
27c87486 2961 buf_layout.data_align = rx_buf_align;
4b2d9fe8
BP
2962 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2963 buf_layout.private_data_size = 0;
2964 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2965 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2966 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
859f998e
IR
2967 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2968 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
4b2d9fe8
BP
2969 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2970 DPNI_QUEUE_RX, &buf_layout);
2971 if (err) {
2972 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2973 return err;
2974 }
2975
308f64e7
IR
2976 return 0;
2977}
2978
1fa0f68c
ICR
2979#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2980#define DPNI_ENQUEUE_FQID_VER_MINOR 9
2981
2982static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2983 struct dpaa2_eth_fq *fq,
48c0481e 2984 struct dpaa2_fd *fd, u8 prio,
6ff80447 2985 u32 num_frames __always_unused,
48c0481e 2986 int *frames_enqueued)
1fa0f68c 2987{
48c0481e
IC
2988 int err;
2989
2990 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2991 priv->tx_qdid, prio,
2992 fq->tx_qdbin, fd);
2993 if (!err && frames_enqueued)
2994 *frames_enqueued = 1;
2995 return err;
1fa0f68c
ICR
2996}
2997
6ff80447
IC
2998static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
2999 struct dpaa2_eth_fq *fq,
3000 struct dpaa2_fd *fd,
3001 u8 prio, u32 num_frames,
3002 int *frames_enqueued)
1fa0f68c 3003{
48c0481e
IC
3004 int err;
3005
6ff80447
IC
3006 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3007 fq->tx_fqid[prio],
3008 fd, num_frames);
3009
3010 if (err == 0)
3011 return -EBUSY;
3012
3013 if (frames_enqueued)
3014 *frames_enqueued = err;
3015 return 0;
1fa0f68c
ICR
3016}
3017
5d8dccf8 3018static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
1fa0f68c
ICR
3019{
3020 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3021 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3022 priv->enqueue = dpaa2_eth_enqueue_qd;
3023 else
6ff80447 3024 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
1fa0f68c
ICR
3025}
3026
5d8dccf8 3027static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
8eb3cef8
IR
3028{
3029 struct device *dev = priv->net_dev->dev.parent;
3030 struct dpni_link_cfg link_cfg = {0};
3031 int err;
3032
3033 /* Get the default link options so we don't override other flags */
3034 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3035 if (err) {
3036 dev_err(dev, "dpni_get_link_cfg() failed\n");
3037 return err;
3038 }
3039
3040 /* By default, enable both Rx and Tx pause frames */
3041 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3042 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3043 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3044 if (err) {
3045 dev_err(dev, "dpni_set_link_cfg() failed\n");
3046 return err;
3047 }
3048
3049 priv->link_state.options = link_cfg.options;
3050
3051 return 0;
3052}
3053
5d8dccf8 3054static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
a690af4f
IR
3055{
3056 struct dpni_queue_id qid = {0};
3057 struct dpaa2_eth_fq *fq;
3058 struct dpni_queue queue;
3059 int i, j, err;
3060
3061 /* We only use Tx FQIDs for FQID-based enqueue, so check
3062 * if DPNI version supports it before updating FQIDs
3063 */
3064 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3065 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3066 return;
3067
3068 for (i = 0; i < priv->num_fqs; i++) {
3069 fq = &priv->fq[i];
3070 if (fq->type != DPAA2_TX_CONF_FQ)
3071 continue;
3072 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3073 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3074 DPNI_QUEUE_TX, j, fq->flowid,
3075 &queue, &qid);
3076 if (err)
3077 goto out_err;
3078
3079 fq->tx_fqid[j] = qid.fqid;
3080 if (fq->tx_fqid[j] == 0)
3081 goto out_err;
3082 }
3083 }
3084
6ff80447 3085 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
a690af4f
IR
3086
3087 return;
3088
3089out_err:
3090 netdev_info(priv->net_dev,
3091 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3092 priv->enqueue = dpaa2_eth_enqueue_qd;
3093}
3094
6aa90fe2 3095/* Configure ingress classification based on VLAN PCP */
5d8dccf8 3096static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
6aa90fe2
IR
3097{
3098 struct device *dev = priv->net_dev->dev.parent;
3099 struct dpkg_profile_cfg kg_cfg = {0};
3100 struct dpni_qos_tbl_cfg qos_cfg = {0};
3101 struct dpni_rule_cfg key_params;
3102 void *dma_mem, *key, *mask;
3103 u8 key_size = 2; /* VLAN TCI field */
3104 int i, pcp, err;
3105
3106 /* VLAN-based classification only makes sense if we have multiple
3107 * traffic classes.
3108 * Also, we need to extract just the 3-bit PCP field from the VLAN
3109 * header and we can only do that by using a mask
3110 */
3111 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3112 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3113 return -EOPNOTSUPP;
3114 }
3115
3116 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3117 if (!dma_mem)
3118 return -ENOMEM;
3119
3120 kg_cfg.num_extracts = 1;
3121 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3122 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3123 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3124 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3125
3126 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3127 if (err) {
3128 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3129 goto out_free_tbl;
3130 }
3131
3132 /* set QoS table */
3133 qos_cfg.default_tc = 0;
3134 qos_cfg.discard_on_miss = 0;
3135 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3136 DPAA2_CLASSIFIER_DMA_SIZE,
3137 DMA_TO_DEVICE);
3138 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3139 dev_err(dev, "QoS table DMA mapping failed\n");
3140 err = -ENOMEM;
3141 goto out_free_tbl;
3142 }
3143
3144 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3145 if (err) {
3146 dev_err(dev, "dpni_set_qos_table failed\n");
3147 goto out_unmap_tbl;
3148 }
3149
3150 /* Add QoS table entries */
3151 key = kzalloc(key_size * 2, GFP_KERNEL);
3152 if (!key) {
3153 err = -ENOMEM;
3154 goto out_unmap_tbl;
3155 }
3156 mask = key + key_size;
3157 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3158
3159 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3160 DMA_TO_DEVICE);
3161 if (dma_mapping_error(dev, key_params.key_iova)) {
3162 dev_err(dev, "Qos table entry DMA mapping failed\n");
3163 err = -ENOMEM;
3164 goto out_free_key;
3165 }
3166
3167 key_params.mask_iova = key_params.key_iova + key_size;
3168 key_params.key_size = key_size;
3169
3170 /* We add rules for PCP-based distribution starting with highest
3171 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3172 * classes to accommodate all priority levels, the lowest ones end up
3173 * on TC 0 which was configured as default
3174 */
3175 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3176 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3177 dma_sync_single_for_device(dev, key_params.key_iova,
3178 key_size * 2, DMA_TO_DEVICE);
3179
3180 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3181 &key_params, i, i);
3182 if (err) {
3183 dev_err(dev, "dpni_add_qos_entry failed\n");
3184 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3185 goto out_unmap_key;
3186 }
3187 }
3188
3189 priv->vlan_cls_enabled = true;
3190
3191 /* Table and key memory is not persistent, clean everything up after
3192 * configuration is finished
3193 */
3194out_unmap_key:
3195 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3196out_free_key:
3197 kfree(key);
3198out_unmap_tbl:
3199 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3200 DMA_TO_DEVICE);
3201out_free_tbl:
3202 kfree(dma_mem);
3203
3204 return err;
3205}
3206
308f64e7 3207/* Configure the DPNI object this interface is associated with */
5d8dccf8 3208static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
308f64e7
IR
3209{
3210 struct device *dev = &ls_dev->dev;
3211 struct dpaa2_eth_priv *priv;
3212 struct net_device *net_dev;
3213 int err;
3214
3215 net_dev = dev_get_drvdata(dev);
3216 priv = netdev_priv(net_dev);
3217
3218 /* get a handle for the DPNI object */
3219 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3220 if (err) {
3221 dev_err(dev, "dpni_open() failed\n");
3222 return err;
3223 }
3224
311cffa5
IR
3225 /* Check if we can work with this DPNI object */
3226 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3227 &priv->dpni_ver_minor);
3228 if (err) {
3229 dev_err(dev, "dpni_get_api_version() failed\n");
3230 goto close;
3231 }
3232 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3233 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3234 priv->dpni_ver_major, priv->dpni_ver_minor,
3235 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3236 err = -ENOTSUPP;
3237 goto close;
3238 }
3239
308f64e7
IR
3240 ls_dev->mc_io = priv->mc_io;
3241 ls_dev->mc_handle = priv->mc_token;
3242
3243 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3244 if (err) {
3245 dev_err(dev, "dpni_reset() failed\n");
f6dda809 3246 goto close;
6e2387e8
IR
3247 }
3248
308f64e7
IR
3249 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3250 &priv->dpni_attrs);
3251 if (err) {
3252 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3253 goto close;
3254 }
3255
5d8dccf8 3256 err = dpaa2_eth_set_buffer_layout(priv);
308f64e7
IR
3257 if (err)
3258 goto close;
3259
5d8dccf8 3260 dpaa2_eth_set_enqueue_mode(priv);
1fa0f68c 3261
8eb3cef8
IR
3262 /* Enable pause frame support */
3263 if (dpaa2_eth_has_pause_support(priv)) {
5d8dccf8 3264 err = dpaa2_eth_set_pause(priv);
8eb3cef8
IR
3265 if (err)
3266 goto close;
3267 }
3268
5d8dccf8 3269 err = dpaa2_eth_set_vlan_qos(priv);
6aa90fe2
IR
3270 if (err && err != -EOPNOTSUPP)
3271 goto close;
3272
9334d5ba
XW
3273 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3274 sizeof(struct dpaa2_eth_cls_rule),
3275 GFP_KERNEL);
97fff7c8
WY
3276 if (!priv->cls_rules) {
3277 err = -ENOMEM;
afb90dbb 3278 goto close;
97fff7c8 3279 }
afb90dbb 3280
6e2387e8
IR
3281 return 0;
3282
f6dda809 3283close:
6e2387e8 3284 dpni_close(priv->mc_io, 0, priv->mc_token);
f6dda809 3285
6e2387e8
IR
3286 return err;
3287}
3288
5d8dccf8 3289static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3290{
3291 int err;
3292
3293 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3294 if (err)
3295 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3296 err);
3297
3298 dpni_close(priv->mc_io, 0, priv->mc_token);
3299}
3300
5d8dccf8
IC
3301static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3302 struct dpaa2_eth_fq *fq)
6e2387e8
IR
3303{
3304 struct device *dev = priv->net_dev->dev.parent;
3305 struct dpni_queue queue;
3306 struct dpni_queue_id qid;
6e2387e8
IR
3307 int err;
3308
3309 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
685e39ea 3310 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
6e2387e8
IR
3311 if (err) {
3312 dev_err(dev, "dpni_get_queue(RX) failed\n");
3313 return err;
3314 }
3315
3316 fq->fqid = qid.fqid;
3317
3318 queue.destination.id = fq->channel->dpcon_id;
3319 queue.destination.type = DPNI_DEST_DPCON;
3320 queue.destination.priority = 1;
75c583ab 3321 queue.user_context = (u64)(uintptr_t)fq;
6e2387e8 3322 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
685e39ea 3323 DPNI_QUEUE_RX, fq->tc, fq->flowid,
16fa1cf1 3324 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
6e2387e8
IR
3325 &queue);
3326 if (err) {
3327 dev_err(dev, "dpni_set_queue(RX) failed\n");
3328 return err;
3329 }
3330
d678be1d 3331 /* xdp_rxq setup */
685e39ea
IR
3332 /* only once for each channel */
3333 if (fq->tc > 0)
3334 return 0;
3335
d678be1d 3336 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
b02e5a0e 3337 fq->flowid, 0);
d678be1d
IR
3338 if (err) {
3339 dev_err(dev, "xdp_rxq_info_reg failed\n");
3340 return err;
3341 }
3342
3343 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3344 MEM_TYPE_PAGE_ORDER0, NULL);
3345 if (err) {
3346 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3347 return err;
3348 }
3349
6e2387e8
IR
3350 return 0;
3351}
3352
5d8dccf8
IC
3353static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3354 struct dpaa2_eth_fq *fq)
6e2387e8
IR
3355{
3356 struct device *dev = priv->net_dev->dev.parent;
3357 struct dpni_queue queue;
3358 struct dpni_queue_id qid;
15c87f6b 3359 int i, err;
6e2387e8 3360
15c87f6b
IR
3361 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3362 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3363 DPNI_QUEUE_TX, i, fq->flowid,
3364 &queue, &qid);
3365 if (err) {
3366 dev_err(dev, "dpni_get_queue(TX) failed\n");
3367 return err;
3368 }
3369 fq->tx_fqid[i] = qid.fqid;
6e2387e8
IR
3370 }
3371
15c87f6b 3372 /* All Tx queues belonging to the same flowid have the same qdbin */
6e2387e8
IR
3373 fq->tx_qdbin = qid.qdbin;
3374
3375 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3376 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3377 &queue, &qid);
3378 if (err) {
3379 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3380 return err;
3381 }
3382
3383 fq->fqid = qid.fqid;
3384
3385 queue.destination.id = fq->channel->dpcon_id;
3386 queue.destination.type = DPNI_DEST_DPCON;
3387 queue.destination.priority = 0;
75c583ab 3388 queue.user_context = (u64)(uintptr_t)fq;
6e2387e8
IR
3389 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3390 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3391 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3392 &queue);
3393 if (err) {
3394 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3395 return err;
3396 }
3397
3398 return 0;
3399}
3400
061d631f
IC
3401static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3402 struct dpaa2_eth_fq *fq)
3403{
3404 struct device *dev = priv->net_dev->dev.parent;
3405 struct dpni_queue q = { { 0 } };
3406 struct dpni_queue_id qid;
3407 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3408 int err;
3409
3410 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3411 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3412 if (err) {
3413 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3414 return err;
3415 }
3416
3417 fq->fqid = qid.fqid;
3418
3419 q.destination.id = fq->channel->dpcon_id;
3420 q.destination.type = DPNI_DEST_DPCON;
3421 q.destination.priority = 1;
3422 q.user_context = (u64)(uintptr_t)fq;
3423 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3424 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3425 if (err) {
3426 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3427 return err;
3428 }
3429
3430 return 0;
3431}
3432
edad8d26 3433/* Supported header fields for Rx hash distribution key */
f76c483a 3434static const struct dpaa2_eth_dist_fields dist_fields[] = {
6e2387e8 3435 {
edad8d26
ICR
3436 /* L2 header */
3437 .rxnfc_field = RXH_L2DA,
3438 .cls_prot = NET_PROT_ETH,
3439 .cls_field = NH_FLD_ETH_DA,
3a1e6b84 3440 .id = DPAA2_ETH_DIST_ETHDST,
edad8d26 3441 .size = 6,
afb90dbb
IR
3442 }, {
3443 .cls_prot = NET_PROT_ETH,
3444 .cls_field = NH_FLD_ETH_SA,
3a1e6b84 3445 .id = DPAA2_ETH_DIST_ETHSRC,
afb90dbb
IR
3446 .size = 6,
3447 }, {
3448 /* This is the last ethertype field parsed:
3449 * depending on frame format, it can be the MAC ethertype
3450 * or the VLAN etype.
3451 */
3452 .cls_prot = NET_PROT_ETH,
3453 .cls_field = NH_FLD_ETH_TYPE,
3a1e6b84 3454 .id = DPAA2_ETH_DIST_ETHTYPE,
afb90dbb 3455 .size = 2,
edad8d26
ICR
3456 }, {
3457 /* VLAN header */
3458 .rxnfc_field = RXH_VLAN,
3459 .cls_prot = NET_PROT_VLAN,
3460 .cls_field = NH_FLD_VLAN_TCI,
3a1e6b84 3461 .id = DPAA2_ETH_DIST_VLAN,
edad8d26
ICR
3462 .size = 2,
3463 }, {
6e2387e8
IR
3464 /* IP header */
3465 .rxnfc_field = RXH_IP_SRC,
3466 .cls_prot = NET_PROT_IP,
3467 .cls_field = NH_FLD_IP_SRC,
3a1e6b84 3468 .id = DPAA2_ETH_DIST_IPSRC,
6e2387e8
IR
3469 .size = 4,
3470 }, {
3471 .rxnfc_field = RXH_IP_DST,
3472 .cls_prot = NET_PROT_IP,
3473 .cls_field = NH_FLD_IP_DST,
3a1e6b84 3474 .id = DPAA2_ETH_DIST_IPDST,
6e2387e8
IR
3475 .size = 4,
3476 }, {
3477 .rxnfc_field = RXH_L3_PROTO,
3478 .cls_prot = NET_PROT_IP,
3479 .cls_field = NH_FLD_IP_PROTO,
3a1e6b84 3480 .id = DPAA2_ETH_DIST_IPPROTO,
6e2387e8
IR
3481 .size = 1,
3482 }, {
3483 /* Using UDP ports, this is functionally equivalent to raw
3484 * byte pairs from L4 header.
3485 */
3486 .rxnfc_field = RXH_L4_B_0_1,
3487 .cls_prot = NET_PROT_UDP,
3488 .cls_field = NH_FLD_UDP_PORT_SRC,
3a1e6b84 3489 .id = DPAA2_ETH_DIST_L4SRC,
6e2387e8
IR
3490 .size = 2,
3491 }, {
3492 .rxnfc_field = RXH_L4_B_2_3,
3493 .cls_prot = NET_PROT_UDP,
3494 .cls_field = NH_FLD_UDP_PORT_DST,
3a1e6b84 3495 .id = DPAA2_ETH_DIST_L4DST,
6e2387e8
IR
3496 .size = 2,
3497 },
3498};
3499
df85aeb9 3500/* Configure the Rx hash key using the legacy API */
5d8dccf8 3501static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
df85aeb9
IR
3502{
3503 struct device *dev = priv->net_dev->dev.parent;
3504 struct dpni_rx_tc_dist_cfg dist_cfg;
685e39ea 3505 int i, err = 0;
df85aeb9
IR
3506
3507 memset(&dist_cfg, 0, sizeof(dist_cfg));
3508
3509 dist_cfg.key_cfg_iova = key;
3510 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3511 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3512
685e39ea
IR
3513 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3514 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
3515 i, &dist_cfg);
3516 if (err) {
3517 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
3518 break;
3519 }
3520 }
df85aeb9
IR
3521
3522 return err;
3523}
3524
3525/* Configure the Rx hash key using the new API */
5d8dccf8 3526static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
df85aeb9
IR
3527{
3528 struct device *dev = priv->net_dev->dev.parent;
3529 struct dpni_rx_dist_cfg dist_cfg;
685e39ea 3530 int i, err = 0;
df85aeb9
IR
3531
3532 memset(&dist_cfg, 0, sizeof(dist_cfg));
3533
3534 dist_cfg.key_cfg_iova = key;
3535 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3536 dist_cfg.enable = 1;
3537
685e39ea
IR
3538 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3539 dist_cfg.tc = i;
3540 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
3541 &dist_cfg);
3542 if (err) {
3543 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
3544 break;
3545 }
5e29c16f
IA
3546
3547 /* If the flow steering / hashing key is shared between all
3548 * traffic classes, install it just once
3549 */
3550 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3551 break;
685e39ea 3552 }
df85aeb9
IR
3553
3554 return err;
3555}
3556
4aaaf9b9 3557/* Configure the Rx flow classification key */
5d8dccf8 3558static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4aaaf9b9
IR
3559{
3560 struct device *dev = priv->net_dev->dev.parent;
3561 struct dpni_rx_dist_cfg dist_cfg;
685e39ea 3562 int i, err = 0;
4aaaf9b9
IR
3563
3564 memset(&dist_cfg, 0, sizeof(dist_cfg));
3565
3566 dist_cfg.key_cfg_iova = key;
3567 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
3568 dist_cfg.enable = 1;
3569
685e39ea
IR
3570 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3571 dist_cfg.tc = i;
3572 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
3573 &dist_cfg);
3574 if (err) {
3575 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
3576 break;
3577 }
5e29c16f
IA
3578
3579 /* If the flow steering / hashing key is shared between all
3580 * traffic classes, install it just once
3581 */
3582 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
3583 break;
685e39ea 3584 }
4aaaf9b9
IR
3585
3586 return err;
3587}
3588
afb90dbb 3589/* Size of the Rx flow classification key */
2d680237 3590int dpaa2_eth_cls_key_size(u64 fields)
afb90dbb
IR
3591{
3592 int i, size = 0;
3593
2d680237
ICR
3594 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3595 if (!(fields & dist_fields[i].id))
3596 continue;
afb90dbb 3597 size += dist_fields[i].size;
2d680237 3598 }
afb90dbb
IR
3599
3600 return size;
3601}
3602
3603/* Offset of header field in Rx classification key */
3604int dpaa2_eth_cls_fld_off(int prot, int field)
3605{
3606 int i, off = 0;
3607
3608 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3609 if (dist_fields[i].cls_prot == prot &&
3610 dist_fields[i].cls_field == field)
3611 return off;
3612 off += dist_fields[i].size;
3613 }
3614
3615 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
3616 return 0;
3617}
3618
2d680237
ICR
3619/* Prune unused fields from the classification rule.
3620 * Used when masking is not supported
3621 */
3622void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
3623{
3624 int off = 0, new_off = 0;
3625 int i, size;
3626
3627 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3628 size = dist_fields[i].size;
3629 if (dist_fields[i].id & fields) {
3630 memcpy(key_mem + new_off, key_mem + off, size);
3631 new_off += size;
3632 }
3633 off += size;
3634 }
3635}
3636
4aaaf9b9 3637/* Set Rx distribution (hash or flow classification) key
6e2387e8
IR
3638 * flags is a combination of RXH_ bits
3639 */
3233c151
IC
3640static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3641 enum dpaa2_eth_rx_dist type, u64 flags)
6e2387e8
IR
3642{
3643 struct device *dev = net_dev->dev.parent;
3644 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3645 struct dpkg_profile_cfg cls_cfg;
edad8d26 3646 u32 rx_hash_fields = 0;
df85aeb9 3647 dma_addr_t key_iova;
6e2387e8
IR
3648 u8 *dma_mem;
3649 int i;
3650 int err = 0;
3651
6e2387e8
IR
3652 memset(&cls_cfg, 0, sizeof(cls_cfg));
3653
f76c483a 3654 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
6e2387e8
IR
3655 struct dpkg_extract *key =
3656 &cls_cfg.extracts[cls_cfg.num_extracts];
3657
2d680237
ICR
3658 /* For both Rx hashing and classification keys
3659 * we set only the selected fields.
4aaaf9b9 3660 */
2d680237
ICR
3661 if (!(flags & dist_fields[i].id))
3662 continue;
3663 if (type == DPAA2_ETH_RX_DIST_HASH)
4aaaf9b9 3664 rx_hash_fields |= dist_fields[i].rxnfc_field;
6e2387e8
IR
3665
3666 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3667 dev_err(dev, "error adding key extraction rule, too many rules?\n");
3668 return -E2BIG;
3669 }
3670
3671 key->type = DPKG_EXTRACT_FROM_HDR;
f76c483a 3672 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
6e2387e8 3673 key->extract.from_hdr.type = DPKG_FULL_FIELD;
f76c483a 3674 key->extract.from_hdr.field = dist_fields[i].cls_field;
6e2387e8
IR
3675 cls_cfg.num_extracts++;
3676 }
3677
e40ef9e4 3678 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
6e2387e8
IR
3679 if (!dma_mem)
3680 return -ENOMEM;
3681
3682 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3683 if (err) {
77160af3 3684 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
df85aeb9 3685 goto free_key;
6e2387e8
IR
3686 }
3687
6e2387e8 3688 /* Prepare for setting the rx dist */
df85aeb9
IR
3689 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3690 DMA_TO_DEVICE);
3691 if (dma_mapping_error(dev, key_iova)) {
6e2387e8
IR
3692 dev_err(dev, "DMA mapping failed\n");
3693 err = -ENOMEM;
df85aeb9 3694 goto free_key;
6e2387e8
IR
3695 }
3696
4aaaf9b9
IR
3697 if (type == DPAA2_ETH_RX_DIST_HASH) {
3698 if (dpaa2_eth_has_legacy_dist(priv))
5d8dccf8 3699 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4aaaf9b9 3700 else
5d8dccf8 3701 err = dpaa2_eth_config_hash_key(priv, key_iova);
4aaaf9b9 3702 } else {
5d8dccf8 3703 err = dpaa2_eth_config_cls_key(priv, key_iova);
4aaaf9b9 3704 }
df85aeb9
IR
3705
3706 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3707 DMA_TO_DEVICE);
4aaaf9b9 3708 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
edad8d26 3709 priv->rx_hash_fields = rx_hash_fields;
6e2387e8 3710
df85aeb9 3711free_key:
6e2387e8
IR
3712 kfree(dma_mem);
3713 return err;
3714}
3715
4aaaf9b9
IR
3716int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3717{
3718 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3a1e6b84
ICR
3719 u64 key = 0;
3720 int i;
4aaaf9b9
IR
3721
3722 if (!dpaa2_eth_hash_enabled(priv))
3723 return -EOPNOTSUPP;
3724
3a1e6b84
ICR
3725 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3726 if (dist_fields[i].rxnfc_field & flags)
3727 key |= dist_fields[i].id;
3728
3729 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4aaaf9b9
IR
3730}
3731
2d680237
ICR
3732int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3733{
3734 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3735}
3736
3737static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4aaaf9b9
IR
3738{
3739 struct device *dev = priv->net_dev->dev.parent;
df8e249b 3740 int err;
4aaaf9b9
IR
3741
3742 /* Check if we actually support Rx flow classification */
3743 if (dpaa2_eth_has_legacy_dist(priv)) {
3744 dev_dbg(dev, "Rx cls not supported by current MC version\n");
3745 return -EOPNOTSUPP;
3746 }
3747
2d680237 3748 if (!dpaa2_eth_fs_enabled(priv)) {
4aaaf9b9
IR
3749 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3750 return -EOPNOTSUPP;
3751 }
3752
3753 if (!dpaa2_eth_hash_enabled(priv)) {
3754 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3755 return -EOPNOTSUPP;
3756 }
3757
2d680237
ICR
3758 /* If there is no support for masking in the classification table,
3759 * we don't set a default key, as it will depend on the rules
3760 * added by the user at runtime.
3761 */
3762 if (!dpaa2_eth_fs_mask_enabled(priv))
3763 goto out;
3764
3765 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
df8e249b
ICR
3766 if (err)
3767 return err;
3768
2d680237 3769out:
4aaaf9b9
IR
3770 priv->rx_cls_enabled = 1;
3771
df8e249b 3772 return 0;
4aaaf9b9
IR
3773}
3774
6e2387e8
IR
3775/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3776 * frame queues and channels
3777 */
5d8dccf8 3778static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3779{
3780 struct net_device *net_dev = priv->net_dev;
3781 struct device *dev = net_dev->dev.parent;
3782 struct dpni_pools_cfg pools_params;
3783 struct dpni_error_cfg err_cfg;
3784 int err = 0;
3785 int i;
3786
3787 pools_params.num_dpbp = 1;
3788 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3789 pools_params.pools[0].backup_pool = 0;
efa6a7d0 3790 pools_params.pools[0].buffer_size = priv->rx_buf_size;
6e2387e8
IR
3791 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3792 if (err) {
3793 dev_err(dev, "dpni_set_pools() failed\n");
3794 return err;
3795 }
3796
227686b6
IR
3797 /* have the interface implicitly distribute traffic based on
3798 * the default hash key
6e2387e8 3799 */
227686b6 3800 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
edad8d26 3801 if (err && err != -EOPNOTSUPP)
0f4c295f 3802 dev_err(dev, "Failed to configure hashing\n");
6e2387e8 3803
4aaaf9b9
IR
3804 /* Configure the flow classification key; it includes all
3805 * supported header fields and cannot be modified at runtime
3806 */
2d680237 3807 err = dpaa2_eth_set_default_cls(priv);
4aaaf9b9
IR
3808 if (err && err != -EOPNOTSUPP)
3809 dev_err(dev, "Failed to configure Rx classification key\n");
3810
6e2387e8 3811 /* Configure handling of error frames */
39163c0c 3812 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
6e2387e8
IR
3813 err_cfg.set_frame_annotation = 1;
3814 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3815 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3816 &err_cfg);
3817 if (err) {
3818 dev_err(dev, "dpni_set_errors_behavior failed\n");
3819 return err;
3820 }
3821
3822 /* Configure Rx and Tx conf queues to generate CDANs */
3823 for (i = 0; i < priv->num_fqs; i++) {
3824 switch (priv->fq[i].type) {
3825 case DPAA2_RX_FQ:
5d8dccf8 3826 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
6e2387e8
IR
3827 break;
3828 case DPAA2_TX_CONF_FQ:
5d8dccf8 3829 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
6e2387e8 3830 break;
061d631f
IC
3831 case DPAA2_RX_ERR_FQ:
3832 err = setup_rx_err_flow(priv, &priv->fq[i]);
3833 break;
6e2387e8
IR
3834 default:
3835 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3836 return -EINVAL;
3837 }
3838 if (err)
3839 return err;
3840 }
3841
3842 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3843 DPNI_QUEUE_TX, &priv->tx_qdid);
3844 if (err) {
3845 dev_err(dev, "dpni_get_qdid() failed\n");
3846 return err;
3847 }
3848
3849 return 0;
3850}
3851
3852/* Allocate rings for storing incoming frame descriptors */
5d8dccf8 3853static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3854{
3855 struct net_device *net_dev = priv->net_dev;
3856 struct device *dev = net_dev->dev.parent;
3857 int i;
3858
3859 for (i = 0; i < priv->num_channels; i++) {
3860 priv->channel[i]->store =
3861 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3862 if (!priv->channel[i]->store) {
3863 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3864 goto err_ring;
3865 }
3866 }
3867
3868 return 0;
3869
3870err_ring:
3871 for (i = 0; i < priv->num_channels; i++) {
3872 if (!priv->channel[i]->store)
3873 break;
3874 dpaa2_io_store_destroy(priv->channel[i]->store);
3875 }
3876
3877 return -ENOMEM;
3878}
3879
5d8dccf8 3880static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
6e2387e8
IR
3881{
3882 int i;
3883
3884 for (i = 0; i < priv->num_channels; i++)
3885 dpaa2_io_store_destroy(priv->channel[i]->store);
3886}
3887
5d8dccf8 3888static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
6e2387e8 3889{
6ab00868 3890 struct net_device *net_dev = priv->net_dev;
6e2387e8 3891 struct device *dev = net_dev->dev.parent;
6e2387e8 3892 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
6ab00868 3893 int err;
6e2387e8
IR
3894
3895 /* Get firmware address, if any */
3896 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3897 if (err) {
3898 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3899 return err;
3900 }
3901
3902 /* Get DPNI attributes address, if any */
3903 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3904 dpni_mac_addr);
3905 if (err) {
6ab00868 3906 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
6e2387e8
IR
3907 return err;
3908 }
3909
3910 /* First check if firmware has any address configured by bootloader */
3911 if (!is_zero_ether_addr(mac_addr)) {
3912 /* If the DPMAC addr != DPNI addr, update it */
3913 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3914 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3915 priv->mc_token,
3916 mac_addr);
3917 if (err) {
3918 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3919 return err;
3920 }
3921 }
3922 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3923 } else if (is_zero_ether_addr(dpni_mac_addr)) {
6ab00868
IR
3924 /* No MAC address configured, fill in net_dev->dev_addr
3925 * with a random one
6e2387e8
IR
3926 */
3927 eth_hw_addr_random(net_dev);
6ab00868
IR
3928 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3929
6e2387e8
IR
3930 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3931 net_dev->dev_addr);
3932 if (err) {
6ab00868 3933 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
6e2387e8
IR
3934 return err;
3935 }
6ab00868 3936
6e2387e8
IR
3937 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3938 * practical purposes, this will be our "permanent" mac address,
3939 * at least until the next reboot. This move will also permit
3940 * register_netdevice() to properly fill up net_dev->perm_addr.
3941 */
3942 net_dev->addr_assign_type = NET_ADDR_PERM;
3943 } else {
3944 /* NET_ADDR_PERM is default, all we have to do is
3945 * fill in the device addr.
3946 */
3947 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3948 }
3949
6ab00868
IR
3950 return 0;
3951}
3952
5d8dccf8 3953static int dpaa2_eth_netdev_init(struct net_device *net_dev)
6ab00868
IR
3954{
3955 struct device *dev = net_dev->dev.parent;
3956 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
7f12c8a3
IR
3957 u32 options = priv->dpni_attrs.options;
3958 u64 supported = 0, not_supported = 0;
6ab00868 3959 u8 bcast_addr[ETH_ALEN];
bb5b42c0 3960 u8 num_queues;
6ab00868
IR
3961 int err;
3962
3963 net_dev->netdev_ops = &dpaa2_eth_ops;
7f12c8a3 3964 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
6ab00868 3965
5d8dccf8 3966 err = dpaa2_eth_set_mac_addr(priv);
6ab00868
IR
3967 if (err)
3968 return err;
3969
3970 /* Explicitly add the broadcast address to the MAC filtering table */
6e2387e8
IR
3971 eth_broadcast_addr(bcast_addr);
3972 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3973 if (err) {
6ab00868
IR
3974 dev_err(dev, "dpni_add_mac_addr() failed\n");
3975 return err;
6e2387e8
IR
3976 }
3977
3ccc8d47 3978 /* Set MTU upper limit; lower limit is 68B (default value) */
6e2387e8 3979 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
00fee002 3980 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
81f34e96 3981 DPAA2_ETH_MFL);
00fee002
IR
3982 if (err) {
3983 dev_err(dev, "dpni_set_max_frame_length() failed\n");
3984 return err;
3985 }
6e2387e8 3986
bb5b42c0
IR
3987 /* Set actual number of queues in the net device */
3988 num_queues = dpaa2_eth_queue_count(priv);
3989 err = netif_set_real_num_tx_queues(net_dev, num_queues);
3990 if (err) {
3991 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3992 return err;
3993 }
3994 err = netif_set_real_num_rx_queues(net_dev, num_queues);
3995 if (err) {
3996 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3997 return err;
3998 }
3999
7f12c8a3
IR
4000 /* Capabilities listing */
4001 supported |= IFF_LIVE_ADDR_CHANGE;
4002
4003 if (options & DPNI_OPT_NO_MAC_FILTER)
4004 not_supported |= IFF_UNICAST_FLT;
4005 else
4006 supported |= IFF_UNICAST_FLT;
4007
4008 net_dev->priv_flags |= supported;
4009 net_dev->priv_flags &= ~not_supported;
4010
4011 /* Features */
4012 net_dev->features = NETIF_F_RXCSUM |
4013 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4014 NETIF_F_SG | NETIF_F_HIGHDMA |
3657cdaf 4015 NETIF_F_LLTX | NETIF_F_HW_TC;
7f12c8a3 4016 net_dev->hw_features = net_dev->features;
6e2387e8
IR
4017
4018 return 0;
4019}
4020
5d8dccf8 4021static int dpaa2_eth_poll_link_state(void *arg)
6e2387e8
IR
4022{
4023 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4024 int err;
4025
4026 while (!kthread_should_stop()) {
5d8dccf8 4027 err = dpaa2_eth_link_state_update(priv);
6e2387e8
IR
4028 if (unlikely(err))
4029 return err;
4030
4031 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4032 }
4033
4034 return 0;
4035}
4036
71947923
IC
4037static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4038{
4039 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4040 struct dpaa2_mac *mac;
4041 int err;
4042
4043 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4044 dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
47325da2
IC
4045
4046 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4047 return PTR_ERR(dpmac_dev);
4048
4049 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
71947923
IC
4050 return 0;
4051
71947923
IC
4052 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4053 if (!mac)
4054 return -ENOMEM;
4055
4056 mac->mc_dev = dpmac_dev;
4057 mac->mc_io = priv->mc_io;
4058 mac->net_dev = priv->net_dev;
4059
095dca16
IC
4060 err = dpaa2_mac_open(mac);
4061 if (err)
4062 goto err_free_mac;
d87e6063 4063 priv->mac = mac;
095dca16 4064
d87e6063
IC
4065 if (dpaa2_eth_is_type_phy(priv)) {
4066 err = dpaa2_mac_connect(mac);
4067 if (err) {
4068 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
4069 goto err_close_mac;
4070 }
71947923 4071 }
71947923
IC
4072
4073 return 0;
095dca16
IC
4074
4075err_close_mac:
4076 dpaa2_mac_close(mac);
d87e6063 4077 priv->mac = NULL;
095dca16
IC
4078err_free_mac:
4079 kfree(mac);
4080 return err;
71947923
IC
4081}
4082
4083static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4084{
d87e6063
IC
4085 if (dpaa2_eth_is_type_phy(priv))
4086 dpaa2_mac_disconnect(priv->mac);
71947923 4087
095dca16 4088 dpaa2_mac_close(priv->mac);
71947923
IC
4089 kfree(priv->mac);
4090 priv->mac = NULL;
4091}
4092
6e2387e8
IR
4093static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4094{
112197de 4095 u32 status = ~0;
6e2387e8
IR
4096 struct device *dev = (struct device *)arg;
4097 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4098 struct net_device *net_dev = dev_get_drvdata(dev);
71947923 4099 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
6e2387e8
IR
4100 int err;
4101
4102 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4103 DPNI_IRQ_INDEX, &status);
4104 if (unlikely(err)) {
77160af3 4105 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
112197de 4106 return IRQ_HANDLED;
6e2387e8
IR
4107 }
4108
112197de 4109 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
5d8dccf8 4110 dpaa2_eth_link_state_update(netdev_priv(net_dev));
6e2387e8 4111
f5c3fffa 4112 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
5d8dccf8
IC
4113 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4114 dpaa2_eth_update_tx_fqids(priv);
71947923
IC
4115
4116 rtnl_lock();
d87e6063 4117 if (dpaa2_eth_has_mac(priv))
71947923
IC
4118 dpaa2_eth_disconnect_mac(priv);
4119 else
4120 dpaa2_eth_connect_mac(priv);
4121 rtnl_unlock();
f5c3fffa 4122 }
8398b375 4123
6e2387e8
IR
4124 return IRQ_HANDLED;
4125}
4126
5d8dccf8 4127static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
6e2387e8
IR
4128{
4129 int err = 0;
4130 struct fsl_mc_device_irq *irq;
4131
4132 err = fsl_mc_allocate_irqs(ls_dev);
4133 if (err) {
4134 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4135 return err;
4136 }
4137
4138 irq = ls_dev->irqs[0];
4139 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
fdc9b532 4140 NULL, dpni_irq0_handler_thread,
6e2387e8
IR
4141 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4142 dev_name(&ls_dev->dev), &ls_dev->dev);
4143 if (err < 0) {
77160af3 4144 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
6e2387e8
IR
4145 goto free_mc_irq;
4146 }
4147
4148 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
8398b375
FC
4149 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4150 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
6e2387e8 4151 if (err < 0) {
77160af3 4152 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
6e2387e8
IR
4153 goto free_irq;
4154 }
4155
4156 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4157 DPNI_IRQ_INDEX, 1);
4158 if (err < 0) {
77160af3 4159 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
6e2387e8
IR
4160 goto free_irq;
4161 }
4162
4163 return 0;
4164
4165free_irq:
4166 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
4167free_mc_irq:
4168 fsl_mc_free_irqs(ls_dev);
4169
4170 return err;
4171}
4172
5d8dccf8 4173static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
4174{
4175 int i;
4176 struct dpaa2_eth_channel *ch;
4177
4178 for (i = 0; i < priv->num_channels; i++) {
4179 ch = priv->channel[i];
4180 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4181 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
4182 NAPI_POLL_WEIGHT);
4183 }
4184}
4185
5d8dccf8 4186static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
6e2387e8
IR
4187{
4188 int i;
4189 struct dpaa2_eth_channel *ch;
4190
4191 for (i = 0; i < priv->num_channels; i++) {
4192 ch = priv->channel[i];
4193 netif_napi_del(&ch->napi);
4194 }
4195}
4196
4197static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4198{
4199 struct device *dev;
4200 struct net_device *net_dev = NULL;
4201 struct dpaa2_eth_priv *priv = NULL;
4202 int err = 0;
4203
4204 dev = &dpni_dev->dev;
4205
4206 /* Net device */
ab1e6de2 4207 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
6e2387e8
IR
4208 if (!net_dev) {
4209 dev_err(dev, "alloc_etherdev_mq() failed\n");
4210 return -ENOMEM;
4211 }
4212
4213 SET_NETDEV_DEV(net_dev, dev);
4214 dev_set_drvdata(dev, net_dev);
4215
4216 priv = netdev_priv(net_dev);
4217 priv->net_dev = net_dev;
4218
08eb2397
IR
4219 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4220
1cf773bd
YL
4221 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4222 priv->rx_tstamp = false;
4223
c5521189
YL
4224 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4225 if (!priv->dpaa2_ptp_wq) {
4226 err = -ENOMEM;
4227 goto err_wq_alloc;
4228 }
4229
4230 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4231
4232 skb_queue_head_init(&priv->tx_skbs);
4233
6e2387e8
IR
4234 /* Obtain a MC portal */
4235 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4236 &priv->mc_io);
4237 if (err) {
8c369610
IR
4238 if (err == -ENXIO)
4239 err = -EPROBE_DEFER;
4240 else
4241 dev_err(dev, "MC portal allocation failed\n");
6e2387e8
IR
4242 goto err_portal_alloc;
4243 }
4244
4245 /* MC objects initialization and configuration */
5d8dccf8 4246 err = dpaa2_eth_setup_dpni(dpni_dev);
6e2387e8
IR
4247 if (err)
4248 goto err_dpni_setup;
4249
5d8dccf8 4250 err = dpaa2_eth_setup_dpio(priv);
6e2387e8
IR
4251 if (err)
4252 goto err_dpio_setup;
4253
5d8dccf8 4254 dpaa2_eth_setup_fqs(priv);
6e2387e8 4255
5d8dccf8 4256 err = dpaa2_eth_setup_dpbp(priv);
6e2387e8
IR
4257 if (err)
4258 goto err_dpbp_setup;
4259
5d8dccf8 4260 err = dpaa2_eth_bind_dpni(priv);
6e2387e8
IR
4261 if (err)
4262 goto err_bind;
4263
4264 /* Add a NAPI context for each channel */
5d8dccf8 4265 dpaa2_eth_add_ch_napi(priv);
6e2387e8
IR
4266
4267 /* Percpu statistics */
4268 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4269 if (!priv->percpu_stats) {
4270 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4271 err = -ENOMEM;
4272 goto err_alloc_percpu_stats;
4273 }
85047abd
IR
4274 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4275 if (!priv->percpu_extras) {
4276 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4277 err = -ENOMEM;
4278 goto err_alloc_percpu_extras;
4279 }
6e2387e8 4280
d70446ee
IC
4281 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4282 if (!priv->sgt_cache) {
4283 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4284 err = -ENOMEM;
4285 goto err_alloc_sgt_cache;
4286 }
4287
5d8dccf8 4288 err = dpaa2_eth_netdev_init(net_dev);
6e2387e8
IR
4289 if (err)
4290 goto err_netdev_init;
4291
4292 /* Configure checksum offload based on current interface flags */
5d8dccf8 4293 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
6e2387e8
IR
4294 if (err)
4295 goto err_csum;
4296
5d8dccf8
IC
4297 err = dpaa2_eth_set_tx_csum(priv,
4298 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
6e2387e8
IR
4299 if (err)
4300 goto err_csum;
4301
5d8dccf8 4302 err = dpaa2_eth_alloc_rings(priv);
6e2387e8
IR
4303 if (err)
4304 goto err_alloc_rings;
4305
f395b69f
IC
4306#ifdef CONFIG_FSL_DPAA2_ETH_DCB
4307 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4308 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4309 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4310 } else {
4311 dev_dbg(dev, "PFC not supported\n");
4312 }
4313#endif
4314
5d8dccf8 4315 err = dpaa2_eth_setup_irqs(dpni_dev);
6e2387e8
IR
4316 if (err) {
4317 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
5d8dccf8 4318 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
6e2387e8
IR
4319 "%s_poll_link", net_dev->name);
4320 if (IS_ERR(priv->poll_thread)) {
7f12c8a3 4321 dev_err(dev, "Error starting polling thread\n");
6e2387e8
IR
4322 goto err_poll_thread;
4323 }
4324 priv->do_link_poll = true;
4325 }
4326
71947923
IC
4327 err = dpaa2_eth_connect_mac(priv);
4328 if (err)
4329 goto err_connect_mac;
4330
ceeb03ad
IC
4331 err = dpaa2_eth_dl_register(priv);
4332 if (err)
4333 goto err_dl_register;
4334
061d631f
IC
4335 err = dpaa2_eth_dl_traps_register(priv);
4336 if (err)
4337 goto err_dl_trap_register;
4338
ceeb03ad
IC
4339 err = dpaa2_eth_dl_port_add(priv);
4340 if (err)
4341 goto err_dl_port_add;
4342
7f12c8a3
IR
4343 err = register_netdev(net_dev);
4344 if (err < 0) {
4345 dev_err(dev, "register_netdev() failed\n");
4346 goto err_netdev_reg;
4347 }
4348
091a19ea
IR
4349#ifdef CONFIG_DEBUG_FS
4350 dpaa2_dbg_add(priv);
4351#endif
4352
6e2387e8
IR
4353 dev_info(dev, "Probed interface %s\n", net_dev->name);
4354 return 0;
4355
7f12c8a3 4356err_netdev_reg:
ceeb03ad
IC
4357 dpaa2_eth_dl_port_del(priv);
4358err_dl_port_add:
061d631f
IC
4359 dpaa2_eth_dl_traps_unregister(priv);
4360err_dl_trap_register:
ceeb03ad
IC
4361 dpaa2_eth_dl_unregister(priv);
4362err_dl_register:
71947923
IC
4363 dpaa2_eth_disconnect_mac(priv);
4364err_connect_mac:
7f12c8a3
IR
4365 if (priv->do_link_poll)
4366 kthread_stop(priv->poll_thread);
4367 else
4368 fsl_mc_free_irqs(dpni_dev);
6e2387e8 4369err_poll_thread:
5d8dccf8 4370 dpaa2_eth_free_rings(priv);
6e2387e8
IR
4371err_alloc_rings:
4372err_csum:
6e2387e8 4373err_netdev_init:
d70446ee
IC
4374 free_percpu(priv->sgt_cache);
4375err_alloc_sgt_cache:
85047abd
IR
4376 free_percpu(priv->percpu_extras);
4377err_alloc_percpu_extras:
6e2387e8
IR
4378 free_percpu(priv->percpu_stats);
4379err_alloc_percpu_stats:
5d8dccf8 4380 dpaa2_eth_del_ch_napi(priv);
6e2387e8 4381err_bind:
5d8dccf8 4382 dpaa2_eth_free_dpbp(priv);
6e2387e8 4383err_dpbp_setup:
5d8dccf8 4384 dpaa2_eth_free_dpio(priv);
6e2387e8 4385err_dpio_setup:
5d8dccf8 4386 dpaa2_eth_free_dpni(priv);
6e2387e8
IR
4387err_dpni_setup:
4388 fsl_mc_portal_free(priv->mc_io);
4389err_portal_alloc:
c5521189
YL
4390 destroy_workqueue(priv->dpaa2_ptp_wq);
4391err_wq_alloc:
6e2387e8
IR
4392 dev_set_drvdata(dev, NULL);
4393 free_netdev(net_dev);
4394
4395 return err;
4396}
4397
4398static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4399{
4400 struct device *dev;
4401 struct net_device *net_dev;
4402 struct dpaa2_eth_priv *priv;
4403
4404 dev = &ls_dev->dev;
4405 net_dev = dev_get_drvdata(dev);
4406 priv = netdev_priv(net_dev);
4407
091a19ea
IR
4408#ifdef CONFIG_DEBUG_FS
4409 dpaa2_dbg_remove(priv);
4410#endif
71947923
IC
4411 rtnl_lock();
4412 dpaa2_eth_disconnect_mac(priv);
4413 rtnl_unlock();
4414
6e2387e8 4415 unregister_netdev(net_dev);
6e2387e8 4416
ceeb03ad 4417 dpaa2_eth_dl_port_del(priv);
061d631f 4418 dpaa2_eth_dl_traps_unregister(priv);
ceeb03ad
IC
4419 dpaa2_eth_dl_unregister(priv);
4420
6e2387e8
IR
4421 if (priv->do_link_poll)
4422 kthread_stop(priv->poll_thread);
4423 else
4424 fsl_mc_free_irqs(ls_dev);
4425
5d8dccf8 4426 dpaa2_eth_free_rings(priv);
d70446ee 4427 free_percpu(priv->sgt_cache);
6e2387e8 4428 free_percpu(priv->percpu_stats);
85047abd 4429 free_percpu(priv->percpu_extras);
6e2387e8 4430
5d8dccf8
IC
4431 dpaa2_eth_del_ch_napi(priv);
4432 dpaa2_eth_free_dpbp(priv);
4433 dpaa2_eth_free_dpio(priv);
4434 dpaa2_eth_free_dpni(priv);
6e2387e8
IR
4435
4436 fsl_mc_portal_free(priv->mc_io);
4437
6e2387e8
IR
4438 free_netdev(net_dev);
4439
4bc07aa4 4440 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
7472dd9f 4441
6e2387e8
IR
4442 return 0;
4443}
4444
4445static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
4446 {
4447 .vendor = FSL_MC_VENDOR_FREESCALE,
4448 .obj_type = "dpni",
4449 },
4450 { .vendor = 0x0 }
4451};
4452MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
4453
4454static struct fsl_mc_driver dpaa2_eth_driver = {
4455 .driver = {
4456 .name = KBUILD_MODNAME,
4457 .owner = THIS_MODULE,
4458 },
4459 .probe = dpaa2_eth_probe,
4460 .remove = dpaa2_eth_remove,
4461 .match_id_table = dpaa2_eth_match_id_table
4462};
4463
091a19ea
IR
4464static int __init dpaa2_eth_driver_init(void)
4465{
4466 int err;
4467
4468 dpaa2_eth_dbg_init();
4469 err = fsl_mc_driver_register(&dpaa2_eth_driver);
4470 if (err) {
4471 dpaa2_eth_dbg_exit();
4472 return err;
4473 }
4474
4475 return 0;
4476}
4477
4478static void __exit dpaa2_eth_driver_exit(void)
4479{
4480 dpaa2_eth_dbg_exit();
4481 fsl_mc_driver_unregister(&dpaa2_eth_driver);
4482}
4483
4484module_init(dpaa2_eth_driver_init);
4485module_exit(dpaa2_eth_driver_exit);