]>
Commit | Line | Data |
---|---|---|
6e2387e8 IR |
1 | /* Copyright 2014-2016 Freescale Semiconductor Inc. |
2 | * Copyright 2016-2017 NXP | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions are met: | |
6 | * * Redistributions of source code must retain the above copyright | |
7 | * notice, this list of conditions and the following disclaimer. | |
8 | * * Redistributions in binary form must reproduce the above copyright | |
9 | * notice, this list of conditions and the following disclaimer in the | |
10 | * documentation and/or other materials provided with the distribution. | |
11 | * * Neither the name of Freescale Semiconductor nor the | |
12 | * names of its contributors may be used to endorse or promote products | |
13 | * derived from this software without specific prior written permission. | |
14 | * | |
15 | * | |
16 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
17 | * GNU General Public License ("GPL") as published by the Free Software | |
18 | * Foundation, either version 2 of that License or (at your option) any | |
19 | * later version. | |
20 | * | |
21 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
22 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
23 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
24 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
25 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
26 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
27 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
28 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
30 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
31 | */ | |
32 | #include <linux/init.h> | |
33 | #include <linux/module.h> | |
34 | #include <linux/platform_device.h> | |
35 | #include <linux/etherdevice.h> | |
36 | #include <linux/of_net.h> | |
37 | #include <linux/interrupt.h> | |
38 | #include <linux/msi.h> | |
39 | #include <linux/kthread.h> | |
08eb2397 | 40 | #include <linux/iommu.h> |
6e2387e8 IR |
41 | |
42 | #include "../../fsl-mc/include/mc.h" | |
6e2387e8 IR |
43 | #include "dpaa2-eth.h" |
44 | ||
5636187b IR |
45 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files |
46 | * using trace events only need to #include <trace/events/sched.h> | |
47 | */ | |
48 | #define CREATE_TRACE_POINTS | |
49 | #include "dpaa2-eth-trace.h" | |
50 | ||
6e2387e8 IR |
51 | MODULE_LICENSE("Dual BSD/GPL"); |
52 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | |
53 | MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); | |
54 | ||
34196740 IR |
55 | const char dpaa2_eth_drv_version[] = "0.1"; |
56 | ||
08eb2397 IR |
57 | static void *dpaa2_iova_to_virt(struct iommu_domain *domain, |
58 | dma_addr_t iova_addr) | |
59 | { | |
60 | phys_addr_t phys_addr; | |
61 | ||
62 | phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; | |
63 | ||
64 | return phys_to_virt(phys_addr); | |
65 | } | |
66 | ||
6e2387e8 IR |
67 | static void validate_rx_csum(struct dpaa2_eth_priv *priv, |
68 | u32 fd_status, | |
69 | struct sk_buff *skb) | |
70 | { | |
71 | skb_checksum_none_assert(skb); | |
72 | ||
73 | /* HW checksum validation is disabled, nothing to do here */ | |
74 | if (!(priv->net_dev->features & NETIF_F_RXCSUM)) | |
75 | return; | |
76 | ||
77 | /* Read checksum validation bits */ | |
78 | if (!((fd_status & DPAA2_FAS_L3CV) && | |
79 | (fd_status & DPAA2_FAS_L4CV))) | |
80 | return; | |
81 | ||
82 | /* Inform the stack there's no need to compute L3/L4 csum anymore */ | |
83 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
84 | } | |
85 | ||
86 | /* Free a received FD. | |
87 | * Not to be used for Tx conf FDs or on any other paths. | |
88 | */ | |
89 | static void free_rx_fd(struct dpaa2_eth_priv *priv, | |
90 | const struct dpaa2_fd *fd, | |
91 | void *vaddr) | |
92 | { | |
93 | struct device *dev = priv->net_dev->dev.parent; | |
94 | dma_addr_t addr = dpaa2_fd_get_addr(fd); | |
95 | u8 fd_format = dpaa2_fd_get_format(fd); | |
96 | struct dpaa2_sg_entry *sgt; | |
97 | void *sg_vaddr; | |
98 | int i; | |
99 | ||
100 | /* If single buffer frame, just free the data buffer */ | |
101 | if (fd_format == dpaa2_fd_single) | |
102 | goto free_buf; | |
103 | else if (fd_format != dpaa2_fd_sg) | |
104 | /* We don't support any other format */ | |
105 | return; | |
106 | ||
107 | /* For S/G frames, we first need to free all SG entries */ | |
108 | sgt = vaddr + dpaa2_fd_get_offset(fd); | |
109 | for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { | |
110 | addr = dpaa2_sg_get_addr(&sgt[i]); | |
08eb2397 | 111 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
6e2387e8 IR |
112 | dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, |
113 | DMA_FROM_DEVICE); | |
114 | ||
6e2387e8 | 115 | skb_free_frag(sg_vaddr); |
6e2387e8 IR |
116 | if (dpaa2_sg_is_final(&sgt[i])) |
117 | break; | |
118 | } | |
119 | ||
120 | free_buf: | |
121 | skb_free_frag(vaddr); | |
122 | } | |
123 | ||
124 | /* Build a linear skb based on a single-buffer frame descriptor */ | |
125 | static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, | |
126 | struct dpaa2_eth_channel *ch, | |
127 | const struct dpaa2_fd *fd, | |
128 | void *fd_vaddr) | |
129 | { | |
130 | struct sk_buff *skb = NULL; | |
131 | u16 fd_offset = dpaa2_fd_get_offset(fd); | |
132 | u32 fd_length = dpaa2_fd_get_len(fd); | |
133 | ||
134 | skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + | |
135 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
136 | if (unlikely(!skb)) | |
137 | return NULL; | |
138 | ||
139 | skb_reserve(skb, fd_offset); | |
140 | skb_put(skb, fd_length); | |
141 | ||
142 | ch->buf_count--; | |
143 | ||
144 | return skb; | |
145 | } | |
146 | ||
147 | /* Build a non linear (fragmented) skb based on a S/G table */ | |
148 | static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, | |
149 | struct dpaa2_eth_channel *ch, | |
150 | struct dpaa2_sg_entry *sgt) | |
151 | { | |
152 | struct sk_buff *skb = NULL; | |
153 | struct device *dev = priv->net_dev->dev.parent; | |
154 | void *sg_vaddr; | |
155 | dma_addr_t sg_addr; | |
156 | u16 sg_offset; | |
157 | u32 sg_length; | |
158 | struct page *page, *head_page; | |
159 | int page_offset; | |
160 | int i; | |
161 | ||
162 | for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { | |
163 | struct dpaa2_sg_entry *sge = &sgt[i]; | |
164 | ||
165 | /* NOTE: We only support SG entries in dpaa2_sg_single format, | |
166 | * but this is the only format we may receive from HW anyway | |
167 | */ | |
168 | ||
169 | /* Get the address and length from the S/G entry */ | |
170 | sg_addr = dpaa2_sg_get_addr(sge); | |
08eb2397 | 171 | sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); |
6e2387e8 IR |
172 | dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, |
173 | DMA_FROM_DEVICE); | |
174 | ||
6e2387e8 IR |
175 | sg_length = dpaa2_sg_get_len(sge); |
176 | ||
177 | if (i == 0) { | |
178 | /* We build the skb around the first data buffer */ | |
179 | skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + | |
180 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
181 | if (unlikely(!skb)) | |
182 | return NULL; | |
183 | ||
184 | sg_offset = dpaa2_sg_get_offset(sge); | |
185 | skb_reserve(skb, sg_offset); | |
186 | skb_put(skb, sg_length); | |
187 | } else { | |
188 | /* Rest of the data buffers are stored as skb frags */ | |
189 | page = virt_to_page(sg_vaddr); | |
190 | head_page = virt_to_head_page(sg_vaddr); | |
191 | ||
192 | /* Offset in page (which may be compound). | |
193 | * Data in subsequent SG entries is stored from the | |
194 | * beginning of the buffer, so we don't need to add the | |
195 | * sg_offset. | |
196 | */ | |
197 | page_offset = ((unsigned long)sg_vaddr & | |
198 | (PAGE_SIZE - 1)) + | |
199 | (page_address(page) - page_address(head_page)); | |
200 | ||
201 | skb_add_rx_frag(skb, i - 1, head_page, page_offset, | |
202 | sg_length, DPAA2_ETH_RX_BUF_SIZE); | |
203 | } | |
204 | ||
205 | if (dpaa2_sg_is_final(sge)) | |
206 | break; | |
207 | } | |
208 | ||
209 | /* Count all data buffers + SG table buffer */ | |
210 | ch->buf_count -= i + 2; | |
211 | ||
212 | return skb; | |
213 | } | |
214 | ||
215 | /* Main Rx frame processing routine */ | |
216 | static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, | |
217 | struct dpaa2_eth_channel *ch, | |
218 | const struct dpaa2_fd *fd, | |
219 | struct napi_struct *napi) | |
220 | { | |
221 | dma_addr_t addr = dpaa2_fd_get_addr(fd); | |
222 | u8 fd_format = dpaa2_fd_get_format(fd); | |
223 | void *vaddr; | |
224 | struct sk_buff *skb; | |
225 | struct rtnl_link_stats64 *percpu_stats; | |
85047abd | 226 | struct dpaa2_eth_drv_stats *percpu_extras; |
6e2387e8 IR |
227 | struct device *dev = priv->net_dev->dev.parent; |
228 | struct dpaa2_fas *fas; | |
d695e764 | 229 | void *buf_data; |
6e2387e8 IR |
230 | u32 status = 0; |
231 | ||
5636187b IR |
232 | /* Tracing point */ |
233 | trace_dpaa2_rx_fd(priv->net_dev, fd); | |
234 | ||
08eb2397 | 235 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); |
6e2387e8 | 236 | dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); |
6e2387e8 | 237 | |
d695e764 IR |
238 | fas = dpaa2_get_fas(vaddr); |
239 | prefetch(fas); | |
240 | buf_data = vaddr + dpaa2_fd_get_offset(fd); | |
241 | prefetch(buf_data); | |
6e2387e8 IR |
242 | |
243 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
85047abd | 244 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
6e2387e8 IR |
245 | |
246 | if (fd_format == dpaa2_fd_single) { | |
247 | skb = build_linear_skb(priv, ch, fd, vaddr); | |
248 | } else if (fd_format == dpaa2_fd_sg) { | |
d695e764 | 249 | skb = build_frag_skb(priv, ch, buf_data); |
6e2387e8 | 250 | skb_free_frag(vaddr); |
85047abd IR |
251 | percpu_extras->rx_sg_frames++; |
252 | percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); | |
6e2387e8 IR |
253 | } else { |
254 | /* We don't support any other format */ | |
255 | goto err_frame_format; | |
256 | } | |
257 | ||
258 | if (unlikely(!skb)) | |
259 | goto err_build_skb; | |
260 | ||
261 | prefetch(skb->data); | |
262 | ||
263 | /* Check if we need to validate the L4 csum */ | |
264 | if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { | |
6e2387e8 IR |
265 | status = le32_to_cpu(fas->status); |
266 | validate_rx_csum(priv, status, skb); | |
267 | } | |
268 | ||
269 | skb->protocol = eth_type_trans(skb, priv->net_dev); | |
270 | ||
271 | percpu_stats->rx_packets++; | |
272 | percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); | |
273 | ||
d4b3763d | 274 | napi_gro_receive(napi, skb); |
6e2387e8 IR |
275 | |
276 | return; | |
277 | ||
278 | err_build_skb: | |
279 | free_rx_fd(priv, fd, vaddr); | |
280 | err_frame_format: | |
281 | percpu_stats->rx_dropped++; | |
282 | } | |
283 | ||
284 | /* Consume all frames pull-dequeued into the store. This is the simplest way to | |
285 | * make sure we don't accidentally issue another volatile dequeue which would | |
286 | * overwrite (leak) frames already in the store. | |
287 | * | |
288 | * Observance of NAPI budget is not our concern, leaving that to the caller. | |
289 | */ | |
290 | static int consume_frames(struct dpaa2_eth_channel *ch) | |
291 | { | |
292 | struct dpaa2_eth_priv *priv = ch->priv; | |
293 | struct dpaa2_eth_fq *fq; | |
294 | struct dpaa2_dq *dq; | |
295 | const struct dpaa2_fd *fd; | |
296 | int cleaned = 0; | |
297 | int is_last; | |
298 | ||
299 | do { | |
300 | dq = dpaa2_io_store_next(ch->store, &is_last); | |
301 | if (unlikely(!dq)) { | |
302 | /* If we're here, we *must* have placed a | |
303 | * volatile dequeue comnmand, so keep reading through | |
304 | * the store until we get some sort of valid response | |
305 | * token (either a valid frame or an "empty dequeue") | |
306 | */ | |
307 | continue; | |
308 | } | |
309 | ||
310 | fd = dpaa2_dq_fd(dq); | |
311 | fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); | |
85047abd | 312 | fq->stats.frames++; |
6e2387e8 IR |
313 | |
314 | fq->consume(priv, ch, fd, &ch->napi); | |
315 | cleaned++; | |
316 | } while (!is_last); | |
317 | ||
318 | return cleaned; | |
319 | } | |
320 | ||
321 | /* Create a frame descriptor based on a fragmented skb */ | |
322 | static int build_sg_fd(struct dpaa2_eth_priv *priv, | |
323 | struct sk_buff *skb, | |
324 | struct dpaa2_fd *fd) | |
325 | { | |
326 | struct device *dev = priv->net_dev->dev.parent; | |
327 | void *sgt_buf = NULL; | |
6e2387e8 IR |
328 | dma_addr_t addr; |
329 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
330 | struct dpaa2_sg_entry *sgt; | |
331 | int i, err; | |
332 | int sgt_buf_size; | |
333 | struct scatterlist *scl, *crt_scl; | |
334 | int num_sg; | |
335 | int num_dma_bufs; | |
336 | struct dpaa2_eth_swa *swa; | |
d695e764 | 337 | struct dpaa2_fas *fas; |
6e2387e8 IR |
338 | |
339 | /* Create and map scatterlist. | |
340 | * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have | |
341 | * to go beyond nr_frags+1. | |
342 | * Note: We don't support chained scatterlists | |
343 | */ | |
344 | if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) | |
345 | return -EINVAL; | |
346 | ||
347 | scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); | |
348 | if (unlikely(!scl)) | |
349 | return -ENOMEM; | |
350 | ||
351 | sg_init_table(scl, nr_frags + 1); | |
352 | num_sg = skb_to_sgvec(skb, scl, 0, skb->len); | |
1e5fa9e2 | 353 | num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
354 | if (unlikely(!num_dma_bufs)) { |
355 | err = -ENOMEM; | |
356 | goto dma_map_sg_failed; | |
357 | } | |
358 | ||
359 | /* Prepare the HW SGT structure */ | |
360 | sgt_buf_size = priv->tx_data_offset + | |
361 | sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); | |
362 | sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); | |
363 | if (unlikely(!sgt_buf)) { | |
364 | err = -ENOMEM; | |
365 | goto sgt_buf_alloc_failed; | |
366 | } | |
367 | sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); | |
368 | ||
369 | /* PTA from egress side is passed as is to the confirmation side so | |
370 | * we need to clear some fields here in order to find consistent values | |
371 | * on TX confirmation. We are clearing FAS (Frame Annotation Status) | |
372 | * field from the hardware annotation area | |
373 | */ | |
d695e764 IR |
374 | fas = dpaa2_get_fas(sgt_buf); |
375 | memset(fas, 0, DPAA2_FAS_SIZE); | |
6e2387e8 IR |
376 | |
377 | sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); | |
378 | ||
379 | /* Fill in the HW SGT structure. | |
380 | * | |
381 | * sgt_buf is zeroed out, so the following fields are implicit | |
382 | * in all sgt entries: | |
383 | * - offset is 0 | |
384 | * - format is 'dpaa2_sg_single' | |
385 | */ | |
386 | for_each_sg(scl, crt_scl, num_dma_bufs, i) { | |
387 | dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); | |
388 | dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); | |
389 | } | |
390 | dpaa2_sg_set_final(&sgt[i - 1], true); | |
391 | ||
392 | /* Store the skb backpointer in the SGT buffer. | |
393 | * Fit the scatterlist and the number of buffers alongside the | |
394 | * skb backpointer in the software annotation area. We'll need | |
395 | * all of them on Tx Conf. | |
396 | */ | |
397 | swa = (struct dpaa2_eth_swa *)sgt_buf; | |
398 | swa->skb = skb; | |
399 | swa->scl = scl; | |
400 | swa->num_sg = num_sg; | |
401 | swa->num_dma_bufs = num_dma_bufs; | |
402 | ||
403 | /* Separately map the SGT buffer */ | |
1e5fa9e2 | 404 | addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
405 | if (unlikely(dma_mapping_error(dev, addr))) { |
406 | err = -ENOMEM; | |
407 | goto dma_map_single_failed; | |
408 | } | |
409 | dpaa2_fd_set_offset(fd, priv->tx_data_offset); | |
410 | dpaa2_fd_set_format(fd, dpaa2_fd_sg); | |
411 | dpaa2_fd_set_addr(fd, addr); | |
412 | dpaa2_fd_set_len(fd, skb->len); | |
413 | dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | | |
414 | DPAA2_FD_CTRL_PTV1); | |
415 | ||
416 | return 0; | |
417 | ||
418 | dma_map_single_failed: | |
419 | kfree(sgt_buf); | |
420 | sgt_buf_alloc_failed: | |
1e5fa9e2 | 421 | dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
422 | dma_map_sg_failed: |
423 | kfree(scl); | |
424 | return err; | |
425 | } | |
426 | ||
427 | /* Create a frame descriptor based on a linear skb */ | |
428 | static int build_single_fd(struct dpaa2_eth_priv *priv, | |
429 | struct sk_buff *skb, | |
430 | struct dpaa2_fd *fd) | |
431 | { | |
432 | struct device *dev = priv->net_dev->dev.parent; | |
433 | u8 *buffer_start; | |
d695e764 | 434 | struct dpaa2_fas *fas; |
6e2387e8 IR |
435 | struct sk_buff **skbh; |
436 | dma_addr_t addr; | |
437 | ||
438 | buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - | |
439 | DPAA2_ETH_TX_BUF_ALIGN, | |
440 | DPAA2_ETH_TX_BUF_ALIGN); | |
441 | ||
442 | /* PTA from egress side is passed as is to the confirmation side so | |
443 | * we need to clear some fields here in order to find consistent values | |
444 | * on TX confirmation. We are clearing FAS (Frame Annotation Status) | |
445 | * field from the hardware annotation area | |
446 | */ | |
d695e764 IR |
447 | fas = dpaa2_get_fas(buffer_start); |
448 | memset(fas, 0, DPAA2_FAS_SIZE); | |
6e2387e8 IR |
449 | |
450 | /* Store a backpointer to the skb at the beginning of the buffer | |
451 | * (in the private data area) such that we can release it | |
452 | * on Tx confirm | |
453 | */ | |
454 | skbh = (struct sk_buff **)buffer_start; | |
455 | *skbh = skb; | |
456 | ||
457 | addr = dma_map_single(dev, buffer_start, | |
458 | skb_tail_pointer(skb) - buffer_start, | |
1e5fa9e2 | 459 | DMA_BIDIRECTIONAL); |
6e2387e8 IR |
460 | if (unlikely(dma_mapping_error(dev, addr))) |
461 | return -ENOMEM; | |
462 | ||
463 | dpaa2_fd_set_addr(fd, addr); | |
464 | dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); | |
465 | dpaa2_fd_set_len(fd, skb->len); | |
466 | dpaa2_fd_set_format(fd, dpaa2_fd_single); | |
467 | dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | | |
468 | DPAA2_FD_CTRL_PTV1); | |
469 | ||
470 | return 0; | |
471 | } | |
472 | ||
473 | /* FD freeing routine on the Tx path | |
474 | * | |
475 | * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb | |
476 | * back-pointed to is also freed. | |
477 | * This can be called either from dpaa2_eth_tx_conf() or on the error path of | |
478 | * dpaa2_eth_tx(). | |
479 | * Optionally, return the frame annotation status word (FAS), which needs | |
480 | * to be checked if we're on the confirmation path. | |
481 | */ | |
482 | static void free_tx_fd(const struct dpaa2_eth_priv *priv, | |
483 | const struct dpaa2_fd *fd, | |
484 | u32 *status) | |
485 | { | |
486 | struct device *dev = priv->net_dev->dev.parent; | |
487 | dma_addr_t fd_addr; | |
488 | struct sk_buff **skbh, *skb; | |
489 | unsigned char *buffer_start; | |
490 | int unmap_size; | |
491 | struct scatterlist *scl; | |
492 | int num_sg, num_dma_bufs; | |
493 | struct dpaa2_eth_swa *swa; | |
494 | u8 fd_format = dpaa2_fd_get_format(fd); | |
495 | struct dpaa2_fas *fas; | |
496 | ||
497 | fd_addr = dpaa2_fd_get_addr(fd); | |
08eb2397 | 498 | skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); |
d695e764 | 499 | fas = dpaa2_get_fas(skbh); |
6e2387e8 IR |
500 | |
501 | if (fd_format == dpaa2_fd_single) { | |
502 | skb = *skbh; | |
503 | buffer_start = (unsigned char *)skbh; | |
504 | /* Accessing the skb buffer is safe before dma unmap, because | |
505 | * we didn't map the actual skb shell. | |
506 | */ | |
507 | dma_unmap_single(dev, fd_addr, | |
508 | skb_tail_pointer(skb) - buffer_start, | |
1e5fa9e2 | 509 | DMA_BIDIRECTIONAL); |
6e2387e8 IR |
510 | } else if (fd_format == dpaa2_fd_sg) { |
511 | swa = (struct dpaa2_eth_swa *)skbh; | |
512 | skb = swa->skb; | |
513 | scl = swa->scl; | |
514 | num_sg = swa->num_sg; | |
515 | num_dma_bufs = swa->num_dma_bufs; | |
516 | ||
517 | /* Unmap the scatterlist */ | |
1e5fa9e2 | 518 | dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
519 | kfree(scl); |
520 | ||
521 | /* Unmap the SGT buffer */ | |
522 | unmap_size = priv->tx_data_offset + | |
523 | sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); | |
1e5fa9e2 | 524 | dma_unmap_single(dev, fd_addr, unmap_size, DMA_BIDIRECTIONAL); |
6e2387e8 IR |
525 | } else { |
526 | /* Unsupported format, mark it as errored and give up */ | |
527 | if (status) | |
528 | *status = ~0; | |
529 | return; | |
530 | } | |
531 | ||
532 | /* Read the status from the Frame Annotation after we unmap the first | |
533 | * buffer but before we free it. The caller function is responsible | |
534 | * for checking the status value. | |
535 | */ | |
39163c0c | 536 | if (status) |
6e2387e8 | 537 | *status = le32_to_cpu(fas->status); |
6e2387e8 IR |
538 | |
539 | /* Free SGT buffer kmalloc'ed on tx */ | |
540 | if (fd_format != dpaa2_fd_single) | |
541 | kfree(skbh); | |
542 | ||
543 | /* Move on with skb release */ | |
544 | dev_kfree_skb(skb); | |
545 | } | |
546 | ||
c433db40 | 547 | static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) |
6e2387e8 IR |
548 | { |
549 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
550 | struct dpaa2_fd fd; | |
551 | struct rtnl_link_stats64 *percpu_stats; | |
85047abd | 552 | struct dpaa2_eth_drv_stats *percpu_extras; |
6e2387e8 IR |
553 | struct dpaa2_eth_fq *fq; |
554 | u16 queue_mapping; | |
555 | int err, i; | |
556 | ||
557 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
85047abd | 558 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
6e2387e8 IR |
559 | |
560 | if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { | |
561 | struct sk_buff *ns; | |
562 | ||
563 | ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); | |
564 | if (unlikely(!ns)) { | |
565 | percpu_stats->tx_dropped++; | |
566 | goto err_alloc_headroom; | |
567 | } | |
568 | dev_kfree_skb(skb); | |
569 | skb = ns; | |
570 | } | |
571 | ||
572 | /* We'll be holding a back-reference to the skb until Tx Confirmation; | |
573 | * we don't want that overwritten by a concurrent Tx with a cloned skb. | |
574 | */ | |
575 | skb = skb_unshare(skb, GFP_ATOMIC); | |
576 | if (unlikely(!skb)) { | |
577 | /* skb_unshare() has already freed the skb */ | |
578 | percpu_stats->tx_dropped++; | |
579 | return NETDEV_TX_OK; | |
580 | } | |
581 | ||
582 | /* Setup the FD fields */ | |
583 | memset(&fd, 0, sizeof(fd)); | |
584 | ||
85047abd | 585 | if (skb_is_nonlinear(skb)) { |
6e2387e8 | 586 | err = build_sg_fd(priv, skb, &fd); |
85047abd IR |
587 | percpu_extras->tx_sg_frames++; |
588 | percpu_extras->tx_sg_bytes += skb->len; | |
589 | } else { | |
6e2387e8 | 590 | err = build_single_fd(priv, skb, &fd); |
85047abd IR |
591 | } |
592 | ||
6e2387e8 IR |
593 | if (unlikely(err)) { |
594 | percpu_stats->tx_dropped++; | |
595 | goto err_build_fd; | |
596 | } | |
597 | ||
5636187b IR |
598 | /* Tracing point */ |
599 | trace_dpaa2_tx_fd(net_dev, &fd); | |
600 | ||
6e2387e8 IR |
601 | /* TxConf FQ selection primarily based on cpu affinity; this is |
602 | * non-migratable context, so it's safe to call smp_processor_id(). | |
603 | */ | |
604 | queue_mapping = smp_processor_id() % dpaa2_eth_queue_count(priv); | |
605 | fq = &priv->fq[queue_mapping]; | |
606 | for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { | |
607 | err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, | |
608 | fq->tx_qdbin, &fd); | |
609 | if (err != -EBUSY) | |
610 | break; | |
611 | } | |
85047abd | 612 | percpu_extras->tx_portal_busy += i; |
6e2387e8 IR |
613 | if (unlikely(err < 0)) { |
614 | percpu_stats->tx_errors++; | |
615 | /* Clean up everything, including freeing the skb */ | |
616 | free_tx_fd(priv, &fd, NULL); | |
617 | } else { | |
618 | percpu_stats->tx_packets++; | |
619 | percpu_stats->tx_bytes += skb->len; | |
620 | } | |
621 | ||
622 | return NETDEV_TX_OK; | |
623 | ||
624 | err_build_fd: | |
625 | err_alloc_headroom: | |
626 | dev_kfree_skb(skb); | |
627 | ||
628 | return NETDEV_TX_OK; | |
629 | } | |
630 | ||
631 | /* Tx confirmation frame processing routine */ | |
632 | static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, | |
633 | struct dpaa2_eth_channel *ch, | |
634 | const struct dpaa2_fd *fd, | |
635 | struct napi_struct *napi __always_unused) | |
636 | { | |
637 | struct rtnl_link_stats64 *percpu_stats; | |
85047abd | 638 | struct dpaa2_eth_drv_stats *percpu_extras; |
6e2387e8 | 639 | u32 status = 0; |
39163c0c IR |
640 | u32 fd_errors; |
641 | bool has_fas_errors = false; | |
6e2387e8 | 642 | |
5636187b IR |
643 | /* Tracing point */ |
644 | trace_dpaa2_tx_conf_fd(priv->net_dev, fd); | |
645 | ||
85047abd IR |
646 | percpu_extras = this_cpu_ptr(priv->percpu_extras); |
647 | percpu_extras->tx_conf_frames++; | |
648 | percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); | |
649 | ||
39163c0c IR |
650 | /* Check frame errors in the FD field */ |
651 | fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; | |
652 | if (unlikely(fd_errors)) { | |
653 | /* We only check error bits in the FAS field if corresponding | |
654 | * FAERR bit is set in FD and the FAS field is marked as valid | |
655 | */ | |
656 | has_fas_errors = (fd_errors & DPAA2_FD_CTRL_FAERR) && | |
657 | !!(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV); | |
658 | if (net_ratelimit()) | |
659 | netdev_dbg(priv->net_dev, "TX frame FD error: %x08\n", | |
660 | fd_errors); | |
6e2387e8 | 661 | } |
39163c0c IR |
662 | |
663 | free_tx_fd(priv, fd, has_fas_errors ? &status : NULL); | |
664 | ||
665 | if (likely(!fd_errors)) | |
666 | return; | |
667 | ||
668 | percpu_stats = this_cpu_ptr(priv->percpu_stats); | |
669 | /* Tx-conf logically pertains to the egress path. */ | |
670 | percpu_stats->tx_errors++; | |
671 | ||
672 | if (has_fas_errors && net_ratelimit()) | |
673 | netdev_dbg(priv->net_dev, "TX frame FAS error: %x08\n", | |
674 | status & DPAA2_FAS_TX_ERR_MASK); | |
6e2387e8 IR |
675 | } |
676 | ||
677 | static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) | |
678 | { | |
679 | int err; | |
680 | ||
681 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
682 | DPNI_OFF_RX_L3_CSUM, enable); | |
683 | if (err) { | |
684 | netdev_err(priv->net_dev, | |
685 | "dpni_set_offload(RX_L3_CSUM) failed\n"); | |
686 | return err; | |
687 | } | |
688 | ||
689 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
690 | DPNI_OFF_RX_L4_CSUM, enable); | |
691 | if (err) { | |
692 | netdev_err(priv->net_dev, | |
693 | "dpni_set_offload(RX_L4_CSUM) failed\n"); | |
694 | return err; | |
695 | } | |
696 | ||
697 | return 0; | |
698 | } | |
699 | ||
700 | static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) | |
701 | { | |
702 | int err; | |
703 | ||
704 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
705 | DPNI_OFF_TX_L3_CSUM, enable); | |
706 | if (err) { | |
707 | netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); | |
708 | return err; | |
709 | } | |
710 | ||
711 | err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, | |
712 | DPNI_OFF_TX_L4_CSUM, enable); | |
713 | if (err) { | |
714 | netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); | |
715 | return err; | |
716 | } | |
717 | ||
718 | return 0; | |
719 | } | |
720 | ||
721 | /* Perform a single release command to add buffers | |
722 | * to the specified buffer pool | |
723 | */ | |
724 | static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) | |
725 | { | |
726 | struct device *dev = priv->net_dev->dev.parent; | |
727 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; | |
728 | void *buf; | |
729 | dma_addr_t addr; | |
730 | int i; | |
731 | ||
732 | for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { | |
733 | /* Allocate buffer visible to WRIOP + skb shared info + | |
734 | * alignment padding | |
735 | */ | |
736 | buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); | |
737 | if (unlikely(!buf)) | |
738 | goto err_alloc; | |
739 | ||
740 | buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); | |
741 | ||
742 | addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, | |
743 | DMA_FROM_DEVICE); | |
744 | if (unlikely(dma_mapping_error(dev, addr))) | |
745 | goto err_map; | |
746 | ||
747 | buf_array[i] = addr; | |
5636187b IR |
748 | |
749 | /* tracing point */ | |
750 | trace_dpaa2_eth_buf_seed(priv->net_dev, | |
751 | buf, DPAA2_ETH_BUF_RAW_SIZE, | |
752 | addr, DPAA2_ETH_RX_BUF_SIZE, | |
753 | bpid); | |
6e2387e8 IR |
754 | } |
755 | ||
756 | release_bufs: | |
757 | /* In case the portal is busy, retry until successful. | |
758 | * The buffer release function would only fail if the QBMan portal | |
759 | * was busy, which implies portal contention (i.e. more CPUs than | |
760 | * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, | |
761 | * there is little we can realistically do, short of giving up - | |
762 | * in which case we'd risk depleting the buffer pool and never again | |
763 | * receiving the Rx interrupt which would kick-start the refill logic. | |
764 | * So just keep retrying, at the risk of being moved to ksoftirqd. | |
765 | */ | |
766 | while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) | |
767 | cpu_relax(); | |
768 | return i; | |
769 | ||
770 | err_map: | |
771 | skb_free_frag(buf); | |
772 | err_alloc: | |
773 | if (i) | |
774 | goto release_bufs; | |
775 | ||
776 | return 0; | |
777 | } | |
778 | ||
779 | static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) | |
780 | { | |
781 | int i, j; | |
782 | int new_count; | |
783 | ||
784 | /* This is the lazy seeding of Rx buffer pools. | |
785 | * dpaa2_add_bufs() is also used on the Rx hotpath and calls | |
786 | * napi_alloc_frag(). The trouble with that is that it in turn ends up | |
787 | * calling this_cpu_ptr(), which mandates execution in atomic context. | |
788 | * Rather than splitting up the code, do a one-off preempt disable. | |
789 | */ | |
790 | preempt_disable(); | |
791 | for (j = 0; j < priv->num_channels; j++) { | |
792 | for (i = 0; i < DPAA2_ETH_NUM_BUFS; | |
793 | i += DPAA2_ETH_BUFS_PER_CMD) { | |
794 | new_count = add_bufs(priv, bpid); | |
795 | priv->channel[j]->buf_count += new_count; | |
796 | ||
797 | if (new_count < DPAA2_ETH_BUFS_PER_CMD) { | |
798 | preempt_enable(); | |
799 | return -ENOMEM; | |
800 | } | |
801 | } | |
802 | } | |
803 | preempt_enable(); | |
804 | ||
805 | return 0; | |
806 | } | |
807 | ||
808 | /** | |
809 | * Drain the specified number of buffers from the DPNI's private buffer pool. | |
810 | * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD | |
811 | */ | |
812 | static void drain_bufs(struct dpaa2_eth_priv *priv, int count) | |
813 | { | |
814 | struct device *dev = priv->net_dev->dev.parent; | |
815 | u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; | |
816 | void *vaddr; | |
817 | int ret, i; | |
818 | ||
819 | do { | |
05fa39c6 | 820 | ret = dpaa2_io_service_acquire(NULL, priv->bpid, |
6e2387e8 IR |
821 | buf_array, count); |
822 | if (ret < 0) { | |
823 | netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); | |
824 | return; | |
825 | } | |
826 | for (i = 0; i < ret; i++) { | |
827 | /* Same logic as on regular Rx path */ | |
08eb2397 IR |
828 | vaddr = dpaa2_iova_to_virt(priv->iommu_domain, |
829 | buf_array[i]); | |
6e2387e8 IR |
830 | dma_unmap_single(dev, buf_array[i], |
831 | DPAA2_ETH_RX_BUF_SIZE, | |
832 | DMA_FROM_DEVICE); | |
6e2387e8 IR |
833 | skb_free_frag(vaddr); |
834 | } | |
835 | } while (ret); | |
836 | } | |
837 | ||
838 | static void drain_pool(struct dpaa2_eth_priv *priv) | |
839 | { | |
840 | int i; | |
841 | ||
842 | drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); | |
843 | drain_bufs(priv, 1); | |
844 | ||
845 | for (i = 0; i < priv->num_channels; i++) | |
846 | priv->channel[i]->buf_count = 0; | |
847 | } | |
848 | ||
849 | /* Function is called from softirq context only, so we don't need to guard | |
850 | * the access to percpu count | |
851 | */ | |
852 | static int refill_pool(struct dpaa2_eth_priv *priv, | |
853 | struct dpaa2_eth_channel *ch, | |
854 | u16 bpid) | |
855 | { | |
856 | int new_count; | |
857 | ||
858 | if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) | |
859 | return 0; | |
860 | ||
861 | do { | |
862 | new_count = add_bufs(priv, bpid); | |
863 | if (unlikely(!new_count)) { | |
864 | /* Out of memory; abort for now, we'll try later on */ | |
865 | break; | |
866 | } | |
867 | ch->buf_count += new_count; | |
868 | } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); | |
869 | ||
870 | if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) | |
871 | return -ENOMEM; | |
872 | ||
873 | return 0; | |
874 | } | |
875 | ||
876 | static int pull_channel(struct dpaa2_eth_channel *ch) | |
877 | { | |
878 | int err; | |
85047abd | 879 | int dequeues = -1; |
6e2387e8 IR |
880 | |
881 | /* Retry while portal is busy */ | |
882 | do { | |
883 | err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); | |
85047abd | 884 | dequeues++; |
6e2387e8 IR |
885 | cpu_relax(); |
886 | } while (err == -EBUSY); | |
887 | ||
85047abd IR |
888 | ch->stats.dequeue_portal_busy += dequeues; |
889 | if (unlikely(err)) | |
890 | ch->stats.pull_err++; | |
891 | ||
6e2387e8 IR |
892 | return err; |
893 | } | |
894 | ||
895 | /* NAPI poll routine | |
896 | * | |
897 | * Frames are dequeued from the QMan channel associated with this NAPI context. | |
898 | * Rx, Tx confirmation and (if configured) Rx error frames all count | |
899 | * towards the NAPI budget. | |
900 | */ | |
901 | static int dpaa2_eth_poll(struct napi_struct *napi, int budget) | |
902 | { | |
903 | struct dpaa2_eth_channel *ch; | |
904 | int cleaned = 0, store_cleaned; | |
905 | struct dpaa2_eth_priv *priv; | |
906 | int err; | |
907 | ||
908 | ch = container_of(napi, struct dpaa2_eth_channel, napi); | |
909 | priv = ch->priv; | |
910 | ||
911 | while (cleaned < budget) { | |
912 | err = pull_channel(ch); | |
913 | if (unlikely(err)) | |
914 | break; | |
915 | ||
916 | /* Refill pool if appropriate */ | |
05fa39c6 | 917 | refill_pool(priv, ch, priv->bpid); |
6e2387e8 IR |
918 | |
919 | store_cleaned = consume_frames(ch); | |
920 | cleaned += store_cleaned; | |
921 | ||
922 | /* If we have enough budget left for a full store, | |
923 | * try a new pull dequeue, otherwise we're done here | |
924 | */ | |
925 | if (store_cleaned == 0 || | |
926 | cleaned > budget - DPAA2_ETH_STORE_SIZE) | |
927 | break; | |
928 | } | |
929 | ||
930 | if (cleaned < budget) { | |
931 | napi_complete_done(napi, cleaned); | |
932 | /* Re-enable data available notifications */ | |
933 | do { | |
934 | err = dpaa2_io_service_rearm(NULL, &ch->nctx); | |
935 | cpu_relax(); | |
936 | } while (err == -EBUSY); | |
937 | } | |
938 | ||
85047abd IR |
939 | ch->stats.frames += cleaned; |
940 | ||
6e2387e8 IR |
941 | return cleaned; |
942 | } | |
943 | ||
944 | static void enable_ch_napi(struct dpaa2_eth_priv *priv) | |
945 | { | |
946 | struct dpaa2_eth_channel *ch; | |
947 | int i; | |
948 | ||
949 | for (i = 0; i < priv->num_channels; i++) { | |
950 | ch = priv->channel[i]; | |
951 | napi_enable(&ch->napi); | |
952 | } | |
953 | } | |
954 | ||
955 | static void disable_ch_napi(struct dpaa2_eth_priv *priv) | |
956 | { | |
957 | struct dpaa2_eth_channel *ch; | |
958 | int i; | |
959 | ||
960 | for (i = 0; i < priv->num_channels; i++) { | |
961 | ch = priv->channel[i]; | |
962 | napi_disable(&ch->napi); | |
963 | } | |
964 | } | |
965 | ||
966 | static int link_state_update(struct dpaa2_eth_priv *priv) | |
967 | { | |
968 | struct dpni_link_state state; | |
969 | int err; | |
970 | ||
971 | err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); | |
972 | if (unlikely(err)) { | |
973 | netdev_err(priv->net_dev, | |
974 | "dpni_get_link_state() failed\n"); | |
975 | return err; | |
976 | } | |
977 | ||
978 | /* Chech link state; speed / duplex changes are not treated yet */ | |
979 | if (priv->link_state.up == state.up) | |
980 | return 0; | |
981 | ||
982 | priv->link_state = state; | |
983 | if (state.up) { | |
984 | netif_carrier_on(priv->net_dev); | |
985 | netif_tx_start_all_queues(priv->net_dev); | |
986 | } else { | |
987 | netif_tx_stop_all_queues(priv->net_dev); | |
988 | netif_carrier_off(priv->net_dev); | |
989 | } | |
990 | ||
77160af3 | 991 | netdev_info(priv->net_dev, "Link Event: state %s\n", |
6e2387e8 IR |
992 | state.up ? "up" : "down"); |
993 | ||
994 | return 0; | |
995 | } | |
996 | ||
997 | static int dpaa2_eth_open(struct net_device *net_dev) | |
998 | { | |
999 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1000 | int err; | |
1001 | ||
05fa39c6 | 1002 | err = seed_pool(priv, priv->bpid); |
6e2387e8 IR |
1003 | if (err) { |
1004 | /* Not much to do; the buffer pool, though not filled up, | |
1005 | * may still contain some buffers which would enable us | |
1006 | * to limp on. | |
1007 | */ | |
1008 | netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", | |
05fa39c6 | 1009 | priv->dpbp_dev->obj_desc.id, priv->bpid); |
6e2387e8 IR |
1010 | } |
1011 | ||
1012 | /* We'll only start the txqs when the link is actually ready; make sure | |
1013 | * we don't race against the link up notification, which may come | |
1014 | * immediately after dpni_enable(); | |
1015 | */ | |
1016 | netif_tx_stop_all_queues(net_dev); | |
1017 | enable_ch_napi(priv); | |
1018 | /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will | |
1019 | * return true and cause 'ip link show' to report the LOWER_UP flag, | |
1020 | * even though the link notification wasn't even received. | |
1021 | */ | |
1022 | netif_carrier_off(net_dev); | |
1023 | ||
1024 | err = dpni_enable(priv->mc_io, 0, priv->mc_token); | |
1025 | if (err < 0) { | |
1026 | netdev_err(net_dev, "dpni_enable() failed\n"); | |
1027 | goto enable_err; | |
1028 | } | |
1029 | ||
1030 | /* If the DPMAC object has already processed the link up interrupt, | |
1031 | * we have to learn the link state ourselves. | |
1032 | */ | |
1033 | err = link_state_update(priv); | |
1034 | if (err < 0) { | |
1035 | netdev_err(net_dev, "Can't update link state\n"); | |
1036 | goto link_state_err; | |
1037 | } | |
1038 | ||
1039 | return 0; | |
1040 | ||
1041 | link_state_err: | |
1042 | enable_err: | |
1043 | disable_ch_napi(priv); | |
1044 | drain_pool(priv); | |
1045 | return err; | |
1046 | } | |
1047 | ||
1048 | /* The DPIO store must be empty when we call this, | |
1049 | * at the end of every NAPI cycle. | |
1050 | */ | |
1051 | static u32 drain_channel(struct dpaa2_eth_priv *priv, | |
1052 | struct dpaa2_eth_channel *ch) | |
1053 | { | |
1054 | u32 drained = 0, total = 0; | |
1055 | ||
1056 | do { | |
1057 | pull_channel(ch); | |
1058 | drained = consume_frames(ch); | |
1059 | total += drained; | |
1060 | } while (drained); | |
1061 | ||
1062 | return total; | |
1063 | } | |
1064 | ||
1065 | static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) | |
1066 | { | |
1067 | struct dpaa2_eth_channel *ch; | |
1068 | int i; | |
1069 | u32 drained = 0; | |
1070 | ||
1071 | for (i = 0; i < priv->num_channels; i++) { | |
1072 | ch = priv->channel[i]; | |
1073 | drained += drain_channel(priv, ch); | |
1074 | } | |
1075 | ||
1076 | return drained; | |
1077 | } | |
1078 | ||
1079 | static int dpaa2_eth_stop(struct net_device *net_dev) | |
1080 | { | |
1081 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1082 | int dpni_enabled; | |
1083 | int retries = 10; | |
1084 | u32 drained; | |
1085 | ||
1086 | netif_tx_stop_all_queues(net_dev); | |
1087 | netif_carrier_off(net_dev); | |
1088 | ||
1089 | /* Loop while dpni_disable() attempts to drain the egress FQs | |
1090 | * and confirm them back to us. | |
1091 | */ | |
1092 | do { | |
1093 | dpni_disable(priv->mc_io, 0, priv->mc_token); | |
1094 | dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); | |
1095 | if (dpni_enabled) | |
1096 | /* Allow the hardware some slack */ | |
1097 | msleep(100); | |
1098 | } while (dpni_enabled && --retries); | |
1099 | if (!retries) { | |
1100 | netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); | |
1101 | /* Must go on and disable NAPI nonetheless, so we don't crash at | |
1102 | * the next "ifconfig up" | |
1103 | */ | |
1104 | } | |
1105 | ||
1106 | /* Wait for NAPI to complete on every core and disable it. | |
1107 | * In particular, this will also prevent NAPI from being rescheduled if | |
1108 | * a new CDAN is serviced, effectively discarding the CDAN. We therefore | |
1109 | * don't even need to disarm the channels, except perhaps for the case | |
1110 | * of a huge coalescing value. | |
1111 | */ | |
1112 | disable_ch_napi(priv); | |
1113 | ||
1114 | /* Manually drain the Rx and TxConf queues */ | |
1115 | drained = drain_ingress_frames(priv); | |
1116 | if (drained) | |
1117 | netdev_dbg(net_dev, "Drained %d frames.\n", drained); | |
1118 | ||
1119 | /* Empty the buffer pool */ | |
1120 | drain_pool(priv); | |
1121 | ||
1122 | return 0; | |
1123 | } | |
1124 | ||
1125 | static int dpaa2_eth_init(struct net_device *net_dev) | |
1126 | { | |
1127 | u64 supported = 0; | |
1128 | u64 not_supported = 0; | |
1129 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1130 | u32 options = priv->dpni_attrs.options; | |
1131 | ||
1132 | /* Capabilities listing */ | |
1133 | supported |= IFF_LIVE_ADDR_CHANGE; | |
1134 | ||
1135 | if (options & DPNI_OPT_NO_MAC_FILTER) | |
1136 | not_supported |= IFF_UNICAST_FLT; | |
1137 | else | |
1138 | supported |= IFF_UNICAST_FLT; | |
1139 | ||
1140 | net_dev->priv_flags |= supported; | |
1141 | net_dev->priv_flags &= ~not_supported; | |
1142 | ||
1143 | /* Features */ | |
1144 | net_dev->features = NETIF_F_RXCSUM | | |
1145 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
1146 | NETIF_F_SG | NETIF_F_HIGHDMA | | |
1147 | NETIF_F_LLTX; | |
1148 | net_dev->hw_features = net_dev->features; | |
1149 | ||
1150 | return 0; | |
1151 | } | |
1152 | ||
1153 | static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) | |
1154 | { | |
1155 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1156 | struct device *dev = net_dev->dev.parent; | |
1157 | int err; | |
1158 | ||
1159 | err = eth_mac_addr(net_dev, addr); | |
1160 | if (err < 0) { | |
1161 | dev_err(dev, "eth_mac_addr() failed (%d)\n", err); | |
1162 | return err; | |
1163 | } | |
1164 | ||
1165 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, | |
1166 | net_dev->dev_addr); | |
1167 | if (err) { | |
1168 | dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); | |
1169 | return err; | |
1170 | } | |
1171 | ||
1172 | return 0; | |
1173 | } | |
1174 | ||
1175 | /** Fill in counters maintained by the GPP driver. These may be different from | |
1176 | * the hardware counters obtained by ethtool. | |
1177 | */ | |
acbff8e3 IR |
1178 | static void dpaa2_eth_get_stats(struct net_device *net_dev, |
1179 | struct rtnl_link_stats64 *stats) | |
6e2387e8 IR |
1180 | { |
1181 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1182 | struct rtnl_link_stats64 *percpu_stats; | |
1183 | u64 *cpustats; | |
1184 | u64 *netstats = (u64 *)stats; | |
1185 | int i, j; | |
1186 | int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); | |
1187 | ||
1188 | for_each_possible_cpu(i) { | |
1189 | percpu_stats = per_cpu_ptr(priv->percpu_stats, i); | |
1190 | cpustats = (u64 *)percpu_stats; | |
1191 | for (j = 0; j < num; j++) | |
1192 | netstats[j] += cpustats[j]; | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) | |
1197 | { | |
1198 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1199 | int err; | |
1200 | ||
1201 | /* Set the maximum Rx frame length to match the transmit side; | |
1202 | * account for L2 headers when computing the MFL | |
1203 | */ | |
1204 | err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, | |
1205 | (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); | |
1206 | if (err) { | |
1207 | netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); | |
1208 | return err; | |
1209 | } | |
1210 | ||
1211 | net_dev->mtu = mtu; | |
1212 | return 0; | |
1213 | } | |
1214 | ||
1215 | /* Copy mac unicast addresses from @net_dev to @priv. | |
1216 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. | |
1217 | */ | |
1218 | static void add_uc_hw_addr(const struct net_device *net_dev, | |
1219 | struct dpaa2_eth_priv *priv) | |
1220 | { | |
1221 | struct netdev_hw_addr *ha; | |
1222 | int err; | |
1223 | ||
1224 | netdev_for_each_uc_addr(ha, net_dev) { | |
1225 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, | |
1226 | ha->addr); | |
1227 | if (err) | |
1228 | netdev_warn(priv->net_dev, | |
1229 | "Could not add ucast MAC %pM to the filtering table (err %d)\n", | |
1230 | ha->addr, err); | |
1231 | } | |
1232 | } | |
1233 | ||
1234 | /* Copy mac multicast addresses from @net_dev to @priv | |
1235 | * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. | |
1236 | */ | |
1237 | static void add_mc_hw_addr(const struct net_device *net_dev, | |
1238 | struct dpaa2_eth_priv *priv) | |
1239 | { | |
1240 | struct netdev_hw_addr *ha; | |
1241 | int err; | |
1242 | ||
1243 | netdev_for_each_mc_addr(ha, net_dev) { | |
1244 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, | |
1245 | ha->addr); | |
1246 | if (err) | |
1247 | netdev_warn(priv->net_dev, | |
1248 | "Could not add mcast MAC %pM to the filtering table (err %d)\n", | |
1249 | ha->addr, err); | |
1250 | } | |
1251 | } | |
1252 | ||
1253 | static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) | |
1254 | { | |
1255 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1256 | int uc_count = netdev_uc_count(net_dev); | |
1257 | int mc_count = netdev_mc_count(net_dev); | |
1258 | u8 max_mac = priv->dpni_attrs.mac_filter_entries; | |
1259 | u32 options = priv->dpni_attrs.options; | |
1260 | u16 mc_token = priv->mc_token; | |
1261 | struct fsl_mc_io *mc_io = priv->mc_io; | |
1262 | int err; | |
1263 | ||
1264 | /* Basic sanity checks; these probably indicate a misconfiguration */ | |
1265 | if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) | |
1266 | netdev_info(net_dev, | |
1267 | "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", | |
1268 | max_mac); | |
1269 | ||
1270 | /* Force promiscuous if the uc or mc counts exceed our capabilities. */ | |
1271 | if (uc_count > max_mac) { | |
1272 | netdev_info(net_dev, | |
1273 | "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", | |
1274 | uc_count, max_mac); | |
1275 | goto force_promisc; | |
1276 | } | |
1277 | if (mc_count + uc_count > max_mac) { | |
1278 | netdev_info(net_dev, | |
1279 | "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", | |
1280 | uc_count + mc_count, max_mac); | |
1281 | goto force_mc_promisc; | |
1282 | } | |
1283 | ||
1284 | /* Adjust promisc settings due to flag combinations */ | |
1285 | if (net_dev->flags & IFF_PROMISC) | |
1286 | goto force_promisc; | |
1287 | if (net_dev->flags & IFF_ALLMULTI) { | |
1288 | /* First, rebuild unicast filtering table. This should be done | |
1289 | * in promisc mode, in order to avoid frame loss while we | |
1290 | * progressively add entries to the table. | |
1291 | * We don't know whether we had been in promisc already, and | |
1292 | * making an MC call to find out is expensive; so set uc promisc | |
1293 | * nonetheless. | |
1294 | */ | |
1295 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); | |
1296 | if (err) | |
1297 | netdev_warn(net_dev, "Can't set uc promisc\n"); | |
1298 | ||
1299 | /* Actual uc table reconstruction. */ | |
1300 | err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); | |
1301 | if (err) | |
1302 | netdev_warn(net_dev, "Can't clear uc filters\n"); | |
1303 | add_uc_hw_addr(net_dev, priv); | |
1304 | ||
1305 | /* Finally, clear uc promisc and set mc promisc as requested. */ | |
1306 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); | |
1307 | if (err) | |
1308 | netdev_warn(net_dev, "Can't clear uc promisc\n"); | |
1309 | goto force_mc_promisc; | |
1310 | } | |
1311 | ||
1312 | /* Neither unicast, nor multicast promisc will be on... eventually. | |
1313 | * For now, rebuild mac filtering tables while forcing both of them on. | |
1314 | */ | |
1315 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); | |
1316 | if (err) | |
1317 | netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); | |
1318 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); | |
1319 | if (err) | |
1320 | netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); | |
1321 | ||
1322 | /* Actual mac filtering tables reconstruction */ | |
1323 | err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); | |
1324 | if (err) | |
1325 | netdev_warn(net_dev, "Can't clear mac filters\n"); | |
1326 | add_mc_hw_addr(net_dev, priv); | |
1327 | add_uc_hw_addr(net_dev, priv); | |
1328 | ||
1329 | /* Now we can clear both ucast and mcast promisc, without risking | |
1330 | * to drop legitimate frames anymore. | |
1331 | */ | |
1332 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); | |
1333 | if (err) | |
1334 | netdev_warn(net_dev, "Can't clear ucast promisc\n"); | |
1335 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); | |
1336 | if (err) | |
1337 | netdev_warn(net_dev, "Can't clear mcast promisc\n"); | |
1338 | ||
1339 | return; | |
1340 | ||
1341 | force_promisc: | |
1342 | err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); | |
1343 | if (err) | |
1344 | netdev_warn(net_dev, "Can't set ucast promisc\n"); | |
1345 | force_mc_promisc: | |
1346 | err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); | |
1347 | if (err) | |
1348 | netdev_warn(net_dev, "Can't set mcast promisc\n"); | |
1349 | } | |
1350 | ||
1351 | static int dpaa2_eth_set_features(struct net_device *net_dev, | |
1352 | netdev_features_t features) | |
1353 | { | |
1354 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1355 | netdev_features_t changed = features ^ net_dev->features; | |
1356 | bool enable; | |
1357 | int err; | |
1358 | ||
1359 | if (changed & NETIF_F_RXCSUM) { | |
1360 | enable = !!(features & NETIF_F_RXCSUM); | |
1361 | err = set_rx_csum(priv, enable); | |
1362 | if (err) | |
1363 | return err; | |
1364 | } | |
1365 | ||
1366 | if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { | |
1367 | enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); | |
1368 | err = set_tx_csum(priv, enable); | |
1369 | if (err) | |
1370 | return err; | |
1371 | } | |
1372 | ||
1373 | return 0; | |
1374 | } | |
1375 | ||
1376 | static const struct net_device_ops dpaa2_eth_ops = { | |
1377 | .ndo_open = dpaa2_eth_open, | |
1378 | .ndo_start_xmit = dpaa2_eth_tx, | |
1379 | .ndo_stop = dpaa2_eth_stop, | |
1380 | .ndo_init = dpaa2_eth_init, | |
1381 | .ndo_set_mac_address = dpaa2_eth_set_addr, | |
1382 | .ndo_get_stats64 = dpaa2_eth_get_stats, | |
1383 | .ndo_change_mtu = dpaa2_eth_change_mtu, | |
1384 | .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, | |
1385 | .ndo_set_features = dpaa2_eth_set_features, | |
1386 | }; | |
1387 | ||
1388 | static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) | |
1389 | { | |
1390 | struct dpaa2_eth_channel *ch; | |
1391 | ||
1392 | ch = container_of(ctx, struct dpaa2_eth_channel, nctx); | |
85047abd IR |
1393 | |
1394 | /* Update NAPI statistics */ | |
1395 | ch->stats.cdan++; | |
1396 | ||
6e2387e8 IR |
1397 | napi_schedule_irqoff(&ch->napi); |
1398 | } | |
1399 | ||
1400 | /* Allocate and configure a DPCON object */ | |
1401 | static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) | |
1402 | { | |
1403 | struct fsl_mc_device *dpcon; | |
1404 | struct device *dev = priv->net_dev->dev.parent; | |
1405 | struct dpcon_attr attrs; | |
1406 | int err; | |
1407 | ||
1408 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), | |
1409 | FSL_MC_POOL_DPCON, &dpcon); | |
1410 | if (err) { | |
1411 | dev_info(dev, "Not enough DPCONs, will go on as-is\n"); | |
1412 | return NULL; | |
1413 | } | |
1414 | ||
1415 | err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); | |
1416 | if (err) { | |
1417 | dev_err(dev, "dpcon_open() failed\n"); | |
1418 | goto err_open; | |
1419 | } | |
1420 | ||
1421 | err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); | |
1422 | if (err) { | |
1423 | dev_err(dev, "dpcon_reset() failed\n"); | |
1424 | goto err_reset; | |
1425 | } | |
1426 | ||
1427 | err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); | |
1428 | if (err) { | |
1429 | dev_err(dev, "dpcon_get_attributes() failed\n"); | |
1430 | goto err_get_attr; | |
1431 | } | |
1432 | ||
1433 | err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); | |
1434 | if (err) { | |
1435 | dev_err(dev, "dpcon_enable() failed\n"); | |
1436 | goto err_enable; | |
1437 | } | |
1438 | ||
1439 | return dpcon; | |
1440 | ||
1441 | err_enable: | |
1442 | err_get_attr: | |
1443 | err_reset: | |
1444 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); | |
1445 | err_open: | |
1446 | fsl_mc_object_free(dpcon); | |
1447 | ||
1448 | return NULL; | |
1449 | } | |
1450 | ||
1451 | static void free_dpcon(struct dpaa2_eth_priv *priv, | |
1452 | struct fsl_mc_device *dpcon) | |
1453 | { | |
1454 | dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); | |
1455 | dpcon_close(priv->mc_io, 0, dpcon->mc_handle); | |
1456 | fsl_mc_object_free(dpcon); | |
1457 | } | |
1458 | ||
1459 | static struct dpaa2_eth_channel * | |
1460 | alloc_channel(struct dpaa2_eth_priv *priv) | |
1461 | { | |
1462 | struct dpaa2_eth_channel *channel; | |
1463 | struct dpcon_attr attr; | |
1464 | struct device *dev = priv->net_dev->dev.parent; | |
1465 | int err; | |
1466 | ||
1467 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | |
1468 | if (!channel) | |
1469 | return NULL; | |
1470 | ||
1471 | channel->dpcon = setup_dpcon(priv); | |
1472 | if (!channel->dpcon) | |
1473 | goto err_setup; | |
1474 | ||
1475 | err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, | |
1476 | &attr); | |
1477 | if (err) { | |
1478 | dev_err(dev, "dpcon_get_attributes() failed\n"); | |
1479 | goto err_get_attr; | |
1480 | } | |
1481 | ||
1482 | channel->dpcon_id = attr.id; | |
1483 | channel->ch_id = attr.qbman_ch_id; | |
1484 | channel->priv = priv; | |
1485 | ||
1486 | return channel; | |
1487 | ||
1488 | err_get_attr: | |
1489 | free_dpcon(priv, channel->dpcon); | |
1490 | err_setup: | |
1491 | kfree(channel); | |
1492 | return NULL; | |
1493 | } | |
1494 | ||
1495 | static void free_channel(struct dpaa2_eth_priv *priv, | |
1496 | struct dpaa2_eth_channel *channel) | |
1497 | { | |
1498 | free_dpcon(priv, channel->dpcon); | |
1499 | kfree(channel); | |
1500 | } | |
1501 | ||
1502 | /* DPIO setup: allocate and configure QBMan channels, setup core affinity | |
1503 | * and register data availability notifications | |
1504 | */ | |
1505 | static int setup_dpio(struct dpaa2_eth_priv *priv) | |
1506 | { | |
1507 | struct dpaa2_io_notification_ctx *nctx; | |
1508 | struct dpaa2_eth_channel *channel; | |
1509 | struct dpcon_notification_cfg dpcon_notif_cfg; | |
1510 | struct device *dev = priv->net_dev->dev.parent; | |
1511 | int i, err; | |
1512 | ||
1513 | /* We want the ability to spread ingress traffic (RX, TX conf) to as | |
1514 | * many cores as possible, so we need one channel for each core | |
1515 | * (unless there's fewer queues than cores, in which case the extra | |
1516 | * channels would be wasted). | |
1517 | * Allocate one channel per core and register it to the core's | |
1518 | * affine DPIO. If not enough channels are available for all cores | |
1519 | * or if some cores don't have an affine DPIO, there will be no | |
1520 | * ingress frame processing on those cores. | |
1521 | */ | |
1522 | cpumask_clear(&priv->dpio_cpumask); | |
1523 | for_each_online_cpu(i) { | |
1524 | /* Try to allocate a channel */ | |
1525 | channel = alloc_channel(priv); | |
1526 | if (!channel) { | |
1527 | dev_info(dev, | |
1528 | "No affine channel for cpu %d and above\n", i); | |
5206d8d1 | 1529 | err = -ENODEV; |
6e2387e8 IR |
1530 | goto err_alloc_ch; |
1531 | } | |
1532 | ||
1533 | priv->channel[priv->num_channels] = channel; | |
1534 | ||
1535 | nctx = &channel->nctx; | |
1536 | nctx->is_cdan = 1; | |
1537 | nctx->cb = cdan_cb; | |
1538 | nctx->id = channel->ch_id; | |
1539 | nctx->desired_cpu = i; | |
1540 | ||
1541 | /* Register the new context */ | |
1542 | err = dpaa2_io_service_register(NULL, nctx); | |
1543 | if (err) { | |
5206d8d1 | 1544 | dev_dbg(dev, "No affine DPIO for cpu %d\n", i); |
6e2387e8 | 1545 | /* If no affine DPIO for this core, there's probably |
5206d8d1 IR |
1546 | * none available for next cores either. Signal we want |
1547 | * to retry later, in case the DPIO devices weren't | |
1548 | * probed yet. | |
6e2387e8 | 1549 | */ |
5206d8d1 | 1550 | err = -EPROBE_DEFER; |
6e2387e8 IR |
1551 | goto err_service_reg; |
1552 | } | |
1553 | ||
1554 | /* Register DPCON notification with MC */ | |
1555 | dpcon_notif_cfg.dpio_id = nctx->dpio_id; | |
1556 | dpcon_notif_cfg.priority = 0; | |
1557 | dpcon_notif_cfg.user_ctx = nctx->qman64; | |
1558 | err = dpcon_set_notification(priv->mc_io, 0, | |
1559 | channel->dpcon->mc_handle, | |
1560 | &dpcon_notif_cfg); | |
1561 | if (err) { | |
1562 | dev_err(dev, "dpcon_set_notification failed()\n"); | |
1563 | goto err_set_cdan; | |
1564 | } | |
1565 | ||
1566 | /* If we managed to allocate a channel and also found an affine | |
1567 | * DPIO for this core, add it to the final mask | |
1568 | */ | |
1569 | cpumask_set_cpu(i, &priv->dpio_cpumask); | |
1570 | priv->num_channels++; | |
1571 | ||
1572 | /* Stop if we already have enough channels to accommodate all | |
1573 | * RX and TX conf queues | |
1574 | */ | |
1575 | if (priv->num_channels == dpaa2_eth_queue_count(priv)) | |
1576 | break; | |
1577 | } | |
1578 | ||
1579 | return 0; | |
1580 | ||
1581 | err_set_cdan: | |
1582 | dpaa2_io_service_deregister(NULL, nctx); | |
1583 | err_service_reg: | |
1584 | free_channel(priv, channel); | |
1585 | err_alloc_ch: | |
1586 | if (cpumask_empty(&priv->dpio_cpumask)) { | |
1587 | dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); | |
5206d8d1 | 1588 | return err; |
6e2387e8 IR |
1589 | } |
1590 | ||
1591 | dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", | |
1592 | cpumask_pr_args(&priv->dpio_cpumask)); | |
1593 | ||
1594 | return 0; | |
1595 | } | |
1596 | ||
1597 | static void free_dpio(struct dpaa2_eth_priv *priv) | |
1598 | { | |
1599 | int i; | |
1600 | struct dpaa2_eth_channel *ch; | |
1601 | ||
1602 | /* deregister CDAN notifications and free channels */ | |
1603 | for (i = 0; i < priv->num_channels; i++) { | |
1604 | ch = priv->channel[i]; | |
1605 | dpaa2_io_service_deregister(NULL, &ch->nctx); | |
1606 | free_channel(priv, ch); | |
1607 | } | |
1608 | } | |
1609 | ||
1610 | static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, | |
1611 | int cpu) | |
1612 | { | |
1613 | struct device *dev = priv->net_dev->dev.parent; | |
1614 | int i; | |
1615 | ||
1616 | for (i = 0; i < priv->num_channels; i++) | |
1617 | if (priv->channel[i]->nctx.desired_cpu == cpu) | |
1618 | return priv->channel[i]; | |
1619 | ||
1620 | /* We should never get here. Issue a warning and return | |
1621 | * the first channel, because it's still better than nothing | |
1622 | */ | |
1623 | dev_warn(dev, "No affine channel found for cpu %d\n", cpu); | |
1624 | ||
1625 | return priv->channel[0]; | |
1626 | } | |
1627 | ||
1628 | static void set_fq_affinity(struct dpaa2_eth_priv *priv) | |
1629 | { | |
1630 | struct device *dev = priv->net_dev->dev.parent; | |
1631 | struct dpaa2_eth_fq *fq; | |
1632 | int rx_cpu, txc_cpu; | |
1633 | int i; | |
1634 | ||
1635 | /* For each FQ, pick one channel/CPU to deliver frames to. | |
1636 | * This may well change at runtime, either through irqbalance or | |
1637 | * through direct user intervention. | |
1638 | */ | |
1639 | rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); | |
1640 | ||
1641 | for (i = 0; i < priv->num_fqs; i++) { | |
1642 | fq = &priv->fq[i]; | |
1643 | switch (fq->type) { | |
1644 | case DPAA2_RX_FQ: | |
1645 | fq->target_cpu = rx_cpu; | |
1646 | rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); | |
1647 | if (rx_cpu >= nr_cpu_ids) | |
1648 | rx_cpu = cpumask_first(&priv->dpio_cpumask); | |
1649 | break; | |
1650 | case DPAA2_TX_CONF_FQ: | |
1651 | fq->target_cpu = txc_cpu; | |
1652 | txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); | |
1653 | if (txc_cpu >= nr_cpu_ids) | |
1654 | txc_cpu = cpumask_first(&priv->dpio_cpumask); | |
1655 | break; | |
1656 | default: | |
1657 | dev_err(dev, "Unknown FQ type: %d\n", fq->type); | |
1658 | } | |
1659 | fq->channel = get_affine_channel(priv, fq->target_cpu); | |
1660 | } | |
1661 | } | |
1662 | ||
1663 | static void setup_fqs(struct dpaa2_eth_priv *priv) | |
1664 | { | |
1665 | int i; | |
1666 | ||
1667 | /* We have one TxConf FQ per Tx flow. | |
1668 | * The number of Tx and Rx queues is the same. | |
1669 | * Tx queues come first in the fq array. | |
1670 | */ | |
1671 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { | |
1672 | priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; | |
1673 | priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; | |
1674 | priv->fq[priv->num_fqs++].flowid = (u16)i; | |
1675 | } | |
1676 | ||
1677 | for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { | |
1678 | priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; | |
1679 | priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; | |
1680 | priv->fq[priv->num_fqs++].flowid = (u16)i; | |
1681 | } | |
1682 | ||
1683 | /* For each FQ, decide on which core to process incoming frames */ | |
1684 | set_fq_affinity(priv); | |
1685 | } | |
1686 | ||
1687 | /* Allocate and configure one buffer pool for each interface */ | |
1688 | static int setup_dpbp(struct dpaa2_eth_priv *priv) | |
1689 | { | |
1690 | int err; | |
1691 | struct fsl_mc_device *dpbp_dev; | |
1692 | struct device *dev = priv->net_dev->dev.parent; | |
05fa39c6 | 1693 | struct dpbp_attr dpbp_attrs; |
6e2387e8 IR |
1694 | |
1695 | err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, | |
1696 | &dpbp_dev); | |
1697 | if (err) { | |
1698 | dev_err(dev, "DPBP device allocation failed\n"); | |
1699 | return err; | |
1700 | } | |
1701 | ||
1702 | priv->dpbp_dev = dpbp_dev; | |
1703 | ||
1704 | err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, | |
1705 | &dpbp_dev->mc_handle); | |
1706 | if (err) { | |
1707 | dev_err(dev, "dpbp_open() failed\n"); | |
1708 | goto err_open; | |
1709 | } | |
1710 | ||
d00defe3 IR |
1711 | err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); |
1712 | if (err) { | |
1713 | dev_err(dev, "dpbp_reset() failed\n"); | |
1714 | goto err_reset; | |
1715 | } | |
1716 | ||
6e2387e8 IR |
1717 | err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); |
1718 | if (err) { | |
1719 | dev_err(dev, "dpbp_enable() failed\n"); | |
1720 | goto err_enable; | |
1721 | } | |
1722 | ||
1723 | err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, | |
05fa39c6 | 1724 | &dpbp_attrs); |
6e2387e8 IR |
1725 | if (err) { |
1726 | dev_err(dev, "dpbp_get_attributes() failed\n"); | |
1727 | goto err_get_attr; | |
1728 | } | |
05fa39c6 | 1729 | priv->bpid = dpbp_attrs.bpid; |
6e2387e8 IR |
1730 | |
1731 | return 0; | |
1732 | ||
1733 | err_get_attr: | |
1734 | dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); | |
1735 | err_enable: | |
d00defe3 | 1736 | err_reset: |
6e2387e8 IR |
1737 | dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); |
1738 | err_open: | |
1739 | fsl_mc_object_free(dpbp_dev); | |
1740 | ||
1741 | return err; | |
1742 | } | |
1743 | ||
1744 | static void free_dpbp(struct dpaa2_eth_priv *priv) | |
1745 | { | |
1746 | drain_pool(priv); | |
1747 | dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); | |
1748 | dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); | |
1749 | fsl_mc_object_free(priv->dpbp_dev); | |
1750 | } | |
1751 | ||
1752 | /* Configure the DPNI object this interface is associated with */ | |
1753 | static int setup_dpni(struct fsl_mc_device *ls_dev) | |
1754 | { | |
1755 | struct device *dev = &ls_dev->dev; | |
1756 | struct dpaa2_eth_priv *priv; | |
1757 | struct net_device *net_dev; | |
50eacbc8 | 1758 | struct dpni_buffer_layout buf_layout = {0}; |
6e2387e8 IR |
1759 | int err; |
1760 | ||
1761 | net_dev = dev_get_drvdata(dev); | |
1762 | priv = netdev_priv(net_dev); | |
1763 | ||
6e2387e8 | 1764 | /* get a handle for the DPNI object */ |
50eacbc8 | 1765 | err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); |
6e2387e8 IR |
1766 | if (err) { |
1767 | dev_err(dev, "dpni_open() failed\n"); | |
1768 | goto err_open; | |
1769 | } | |
1770 | ||
1771 | ls_dev->mc_io = priv->mc_io; | |
1772 | ls_dev->mc_handle = priv->mc_token; | |
1773 | ||
1774 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); | |
1775 | if (err) { | |
1776 | dev_err(dev, "dpni_reset() failed\n"); | |
1777 | goto err_reset; | |
1778 | } | |
1779 | ||
1780 | err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, | |
1781 | &priv->dpni_attrs); | |
1782 | if (err) { | |
1783 | dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); | |
1784 | goto err_get_attr; | |
1785 | } | |
1786 | ||
1787 | /* Configure buffer layouts */ | |
1788 | /* rx buffer */ | |
50eacbc8 IR |
1789 | buf_layout.pass_parser_result = true; |
1790 | buf_layout.pass_frame_status = true; | |
1791 | buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; | |
1792 | buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; | |
1793 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | | |
1794 | DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | | |
1795 | DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | | |
1796 | DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; | |
6e2387e8 | 1797 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
50eacbc8 | 1798 | DPNI_QUEUE_RX, &buf_layout); |
6e2387e8 IR |
1799 | if (err) { |
1800 | dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); | |
1801 | goto err_buf_layout; | |
1802 | } | |
1803 | ||
1804 | /* tx buffer */ | |
50eacbc8 IR |
1805 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | |
1806 | DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; | |
6e2387e8 | 1807 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
50eacbc8 | 1808 | DPNI_QUEUE_TX, &buf_layout); |
6e2387e8 IR |
1809 | if (err) { |
1810 | dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); | |
1811 | goto err_buf_layout; | |
1812 | } | |
1813 | ||
1814 | /* tx-confirm buffer */ | |
50eacbc8 | 1815 | buf_layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; |
6e2387e8 | 1816 | err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, |
50eacbc8 | 1817 | DPNI_QUEUE_TX_CONFIRM, &buf_layout); |
6e2387e8 IR |
1818 | if (err) { |
1819 | dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); | |
1820 | goto err_buf_layout; | |
1821 | } | |
1822 | ||
1823 | /* Now that we've set our tx buffer layout, retrieve the minimum | |
1824 | * required tx data offset. | |
1825 | */ | |
1826 | err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, | |
1827 | &priv->tx_data_offset); | |
1828 | if (err) { | |
1829 | dev_err(dev, "dpni_get_tx_data_offset() failed\n"); | |
1830 | goto err_data_offset; | |
1831 | } | |
1832 | ||
1833 | if ((priv->tx_data_offset % 64) != 0) | |
77160af3 | 1834 | dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", |
6e2387e8 IR |
1835 | priv->tx_data_offset); |
1836 | ||
1837 | /* Accommodate software annotation space (SWA) */ | |
1838 | priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; | |
1839 | ||
1840 | return 0; | |
1841 | ||
1842 | err_data_offset: | |
1843 | err_buf_layout: | |
1844 | err_get_attr: | |
1845 | err_reset: | |
1846 | dpni_close(priv->mc_io, 0, priv->mc_token); | |
1847 | err_open: | |
1848 | return err; | |
1849 | } | |
1850 | ||
1851 | static void free_dpni(struct dpaa2_eth_priv *priv) | |
1852 | { | |
1853 | int err; | |
1854 | ||
1855 | err = dpni_reset(priv->mc_io, 0, priv->mc_token); | |
1856 | if (err) | |
1857 | netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", | |
1858 | err); | |
1859 | ||
1860 | dpni_close(priv->mc_io, 0, priv->mc_token); | |
1861 | } | |
1862 | ||
1863 | static int setup_rx_flow(struct dpaa2_eth_priv *priv, | |
1864 | struct dpaa2_eth_fq *fq) | |
1865 | { | |
1866 | struct device *dev = priv->net_dev->dev.parent; | |
1867 | struct dpni_queue queue; | |
1868 | struct dpni_queue_id qid; | |
1869 | struct dpni_taildrop td; | |
1870 | int err; | |
1871 | ||
1872 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
1873 | DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); | |
1874 | if (err) { | |
1875 | dev_err(dev, "dpni_get_queue(RX) failed\n"); | |
1876 | return err; | |
1877 | } | |
1878 | ||
1879 | fq->fqid = qid.fqid; | |
1880 | ||
1881 | queue.destination.id = fq->channel->dpcon_id; | |
1882 | queue.destination.type = DPNI_DEST_DPCON; | |
1883 | queue.destination.priority = 1; | |
1884 | queue.user_context = (u64)fq; | |
1885 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, | |
1886 | DPNI_QUEUE_RX, 0, fq->flowid, | |
1887 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, | |
1888 | &queue); | |
1889 | if (err) { | |
1890 | dev_err(dev, "dpni_set_queue(RX) failed\n"); | |
1891 | return err; | |
1892 | } | |
1893 | ||
1894 | td.enable = 1; | |
1895 | td.threshold = DPAA2_ETH_TAILDROP_THRESH; | |
1896 | err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, | |
1897 | DPNI_QUEUE_RX, 0, fq->flowid, &td); | |
1898 | if (err) { | |
1899 | dev_err(dev, "dpni_set_threshold() failed\n"); | |
1900 | return err; | |
1901 | } | |
1902 | ||
1903 | return 0; | |
1904 | } | |
1905 | ||
1906 | static int setup_tx_flow(struct dpaa2_eth_priv *priv, | |
1907 | struct dpaa2_eth_fq *fq) | |
1908 | { | |
1909 | struct device *dev = priv->net_dev->dev.parent; | |
1910 | struct dpni_queue queue; | |
1911 | struct dpni_queue_id qid; | |
1912 | int err; | |
1913 | ||
1914 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
1915 | DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); | |
1916 | if (err) { | |
1917 | dev_err(dev, "dpni_get_queue(TX) failed\n"); | |
1918 | return err; | |
1919 | } | |
1920 | ||
1921 | fq->tx_qdbin = qid.qdbin; | |
1922 | ||
1923 | err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, | |
1924 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, | |
1925 | &queue, &qid); | |
1926 | if (err) { | |
1927 | dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); | |
1928 | return err; | |
1929 | } | |
1930 | ||
1931 | fq->fqid = qid.fqid; | |
1932 | ||
1933 | queue.destination.id = fq->channel->dpcon_id; | |
1934 | queue.destination.type = DPNI_DEST_DPCON; | |
1935 | queue.destination.priority = 0; | |
1936 | queue.user_context = (u64)fq; | |
1937 | err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, | |
1938 | DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, | |
1939 | DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, | |
1940 | &queue); | |
1941 | if (err) { | |
1942 | dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); | |
1943 | return err; | |
1944 | } | |
1945 | ||
1946 | return 0; | |
1947 | } | |
1948 | ||
1949 | /* Hash key is a 5-tuple: IPsrc, IPdst, IPnextproto, L4src, L4dst */ | |
1950 | static const struct dpaa2_eth_hash_fields hash_fields[] = { | |
1951 | { | |
1952 | /* IP header */ | |
1953 | .rxnfc_field = RXH_IP_SRC, | |
1954 | .cls_prot = NET_PROT_IP, | |
1955 | .cls_field = NH_FLD_IP_SRC, | |
1956 | .size = 4, | |
1957 | }, { | |
1958 | .rxnfc_field = RXH_IP_DST, | |
1959 | .cls_prot = NET_PROT_IP, | |
1960 | .cls_field = NH_FLD_IP_DST, | |
1961 | .size = 4, | |
1962 | }, { | |
1963 | .rxnfc_field = RXH_L3_PROTO, | |
1964 | .cls_prot = NET_PROT_IP, | |
1965 | .cls_field = NH_FLD_IP_PROTO, | |
1966 | .size = 1, | |
1967 | }, { | |
1968 | /* Using UDP ports, this is functionally equivalent to raw | |
1969 | * byte pairs from L4 header. | |
1970 | */ | |
1971 | .rxnfc_field = RXH_L4_B_0_1, | |
1972 | .cls_prot = NET_PROT_UDP, | |
1973 | .cls_field = NH_FLD_UDP_PORT_SRC, | |
1974 | .size = 2, | |
1975 | }, { | |
1976 | .rxnfc_field = RXH_L4_B_2_3, | |
1977 | .cls_prot = NET_PROT_UDP, | |
1978 | .cls_field = NH_FLD_UDP_PORT_DST, | |
1979 | .size = 2, | |
1980 | }, | |
1981 | }; | |
1982 | ||
1983 | /* Set RX hash options | |
1984 | * flags is a combination of RXH_ bits | |
1985 | */ | |
acbff8e3 | 1986 | static int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) |
6e2387e8 IR |
1987 | { |
1988 | struct device *dev = net_dev->dev.parent; | |
1989 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
1990 | struct dpkg_profile_cfg cls_cfg; | |
1991 | struct dpni_rx_tc_dist_cfg dist_cfg; | |
1992 | u8 *dma_mem; | |
1993 | int i; | |
1994 | int err = 0; | |
1995 | ||
1996 | if (!dpaa2_eth_hash_enabled(priv)) { | |
e202c82c IR |
1997 | dev_dbg(dev, "Hashing support is not enabled\n"); |
1998 | return 0; | |
6e2387e8 IR |
1999 | } |
2000 | ||
2001 | memset(&cls_cfg, 0, sizeof(cls_cfg)); | |
2002 | ||
2003 | for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { | |
2004 | struct dpkg_extract *key = | |
2005 | &cls_cfg.extracts[cls_cfg.num_extracts]; | |
2006 | ||
2007 | if (!(flags & hash_fields[i].rxnfc_field)) | |
2008 | continue; | |
2009 | ||
2010 | if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { | |
2011 | dev_err(dev, "error adding key extraction rule, too many rules?\n"); | |
2012 | return -E2BIG; | |
2013 | } | |
2014 | ||
2015 | key->type = DPKG_EXTRACT_FROM_HDR; | |
2016 | key->extract.from_hdr.prot = hash_fields[i].cls_prot; | |
2017 | key->extract.from_hdr.type = DPKG_FULL_FIELD; | |
2018 | key->extract.from_hdr.field = hash_fields[i].cls_field; | |
2019 | cls_cfg.num_extracts++; | |
34196740 IR |
2020 | |
2021 | priv->rx_hash_fields |= hash_fields[i].rxnfc_field; | |
6e2387e8 IR |
2022 | } |
2023 | ||
e40ef9e4 | 2024 | dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); |
6e2387e8 IR |
2025 | if (!dma_mem) |
2026 | return -ENOMEM; | |
2027 | ||
2028 | err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); | |
2029 | if (err) { | |
77160af3 | 2030 | dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); |
6e2387e8 IR |
2031 | goto err_prep_key; |
2032 | } | |
2033 | ||
2034 | memset(&dist_cfg, 0, sizeof(dist_cfg)); | |
2035 | ||
2036 | /* Prepare for setting the rx dist */ | |
d87d5baf | 2037 | dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, |
6e2387e8 IR |
2038 | DPAA2_CLASSIFIER_DMA_SIZE, |
2039 | DMA_TO_DEVICE); | |
d87d5baf | 2040 | if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { |
6e2387e8 IR |
2041 | dev_err(dev, "DMA mapping failed\n"); |
2042 | err = -ENOMEM; | |
2043 | goto err_dma_map; | |
2044 | } | |
2045 | ||
2046 | dist_cfg.dist_size = dpaa2_eth_queue_count(priv); | |
2047 | dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; | |
2048 | ||
2049 | err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); | |
d87d5baf | 2050 | dma_unmap_single(dev, dist_cfg.key_cfg_iova, |
6e2387e8 IR |
2051 | DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); |
2052 | if (err) | |
2053 | dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); | |
2054 | ||
2055 | err_dma_map: | |
2056 | err_prep_key: | |
2057 | kfree(dma_mem); | |
2058 | return err; | |
2059 | } | |
2060 | ||
2061 | /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, | |
2062 | * frame queues and channels | |
2063 | */ | |
2064 | static int bind_dpni(struct dpaa2_eth_priv *priv) | |
2065 | { | |
2066 | struct net_device *net_dev = priv->net_dev; | |
2067 | struct device *dev = net_dev->dev.parent; | |
2068 | struct dpni_pools_cfg pools_params; | |
2069 | struct dpni_error_cfg err_cfg; | |
2070 | int err = 0; | |
2071 | int i; | |
2072 | ||
2073 | pools_params.num_dpbp = 1; | |
2074 | pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; | |
2075 | pools_params.pools[0].backup_pool = 0; | |
2076 | pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; | |
2077 | err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); | |
2078 | if (err) { | |
2079 | dev_err(dev, "dpni_set_pools() failed\n"); | |
2080 | return err; | |
2081 | } | |
2082 | ||
2083 | /* have the interface implicitly distribute traffic based on supported | |
2084 | * header fields | |
2085 | */ | |
2086 | err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); | |
2087 | if (err) | |
2088 | netdev_err(net_dev, "Failed to configure hashing\n"); | |
2089 | ||
2090 | /* Configure handling of error frames */ | |
39163c0c | 2091 | err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; |
6e2387e8 IR |
2092 | err_cfg.set_frame_annotation = 1; |
2093 | err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; | |
2094 | err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, | |
2095 | &err_cfg); | |
2096 | if (err) { | |
2097 | dev_err(dev, "dpni_set_errors_behavior failed\n"); | |
2098 | return err; | |
2099 | } | |
2100 | ||
2101 | /* Configure Rx and Tx conf queues to generate CDANs */ | |
2102 | for (i = 0; i < priv->num_fqs; i++) { | |
2103 | switch (priv->fq[i].type) { | |
2104 | case DPAA2_RX_FQ: | |
2105 | err = setup_rx_flow(priv, &priv->fq[i]); | |
2106 | break; | |
2107 | case DPAA2_TX_CONF_FQ: | |
2108 | err = setup_tx_flow(priv, &priv->fq[i]); | |
2109 | break; | |
2110 | default: | |
2111 | dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); | |
2112 | return -EINVAL; | |
2113 | } | |
2114 | if (err) | |
2115 | return err; | |
2116 | } | |
2117 | ||
2118 | err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, | |
2119 | DPNI_QUEUE_TX, &priv->tx_qdid); | |
2120 | if (err) { | |
2121 | dev_err(dev, "dpni_get_qdid() failed\n"); | |
2122 | return err; | |
2123 | } | |
2124 | ||
2125 | return 0; | |
2126 | } | |
2127 | ||
2128 | /* Allocate rings for storing incoming frame descriptors */ | |
2129 | static int alloc_rings(struct dpaa2_eth_priv *priv) | |
2130 | { | |
2131 | struct net_device *net_dev = priv->net_dev; | |
2132 | struct device *dev = net_dev->dev.parent; | |
2133 | int i; | |
2134 | ||
2135 | for (i = 0; i < priv->num_channels; i++) { | |
2136 | priv->channel[i]->store = | |
2137 | dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); | |
2138 | if (!priv->channel[i]->store) { | |
2139 | netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); | |
2140 | goto err_ring; | |
2141 | } | |
2142 | } | |
2143 | ||
2144 | return 0; | |
2145 | ||
2146 | err_ring: | |
2147 | for (i = 0; i < priv->num_channels; i++) { | |
2148 | if (!priv->channel[i]->store) | |
2149 | break; | |
2150 | dpaa2_io_store_destroy(priv->channel[i]->store); | |
2151 | } | |
2152 | ||
2153 | return -ENOMEM; | |
2154 | } | |
2155 | ||
2156 | static void free_rings(struct dpaa2_eth_priv *priv) | |
2157 | { | |
2158 | int i; | |
2159 | ||
2160 | for (i = 0; i < priv->num_channels; i++) | |
2161 | dpaa2_io_store_destroy(priv->channel[i]->store); | |
2162 | } | |
2163 | ||
6ab00868 | 2164 | static int set_mac_addr(struct dpaa2_eth_priv *priv) |
6e2387e8 | 2165 | { |
6ab00868 | 2166 | struct net_device *net_dev = priv->net_dev; |
6e2387e8 | 2167 | struct device *dev = net_dev->dev.parent; |
6e2387e8 | 2168 | u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; |
6ab00868 | 2169 | int err; |
6e2387e8 IR |
2170 | |
2171 | /* Get firmware address, if any */ | |
2172 | err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); | |
2173 | if (err) { | |
2174 | dev_err(dev, "dpni_get_port_mac_addr() failed\n"); | |
2175 | return err; | |
2176 | } | |
2177 | ||
2178 | /* Get DPNI attributes address, if any */ | |
2179 | err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, | |
2180 | dpni_mac_addr); | |
2181 | if (err) { | |
6ab00868 | 2182 | dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); |
6e2387e8 IR |
2183 | return err; |
2184 | } | |
2185 | ||
2186 | /* First check if firmware has any address configured by bootloader */ | |
2187 | if (!is_zero_ether_addr(mac_addr)) { | |
2188 | /* If the DPMAC addr != DPNI addr, update it */ | |
2189 | if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { | |
2190 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, | |
2191 | priv->mc_token, | |
2192 | mac_addr); | |
2193 | if (err) { | |
2194 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); | |
2195 | return err; | |
2196 | } | |
2197 | } | |
2198 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); | |
2199 | } else if (is_zero_ether_addr(dpni_mac_addr)) { | |
6ab00868 IR |
2200 | /* No MAC address configured, fill in net_dev->dev_addr |
2201 | * with a random one | |
6e2387e8 IR |
2202 | */ |
2203 | eth_hw_addr_random(net_dev); | |
6ab00868 IR |
2204 | dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); |
2205 | ||
6e2387e8 IR |
2206 | err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, |
2207 | net_dev->dev_addr); | |
2208 | if (err) { | |
6ab00868 | 2209 | dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); |
6e2387e8 IR |
2210 | return err; |
2211 | } | |
6ab00868 | 2212 | |
6e2387e8 IR |
2213 | /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all |
2214 | * practical purposes, this will be our "permanent" mac address, | |
2215 | * at least until the next reboot. This move will also permit | |
2216 | * register_netdevice() to properly fill up net_dev->perm_addr. | |
2217 | */ | |
2218 | net_dev->addr_assign_type = NET_ADDR_PERM; | |
2219 | } else { | |
2220 | /* NET_ADDR_PERM is default, all we have to do is | |
2221 | * fill in the device addr. | |
2222 | */ | |
2223 | memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); | |
2224 | } | |
2225 | ||
6ab00868 IR |
2226 | return 0; |
2227 | } | |
2228 | ||
2229 | static int netdev_init(struct net_device *net_dev) | |
2230 | { | |
2231 | struct device *dev = net_dev->dev.parent; | |
2232 | struct dpaa2_eth_priv *priv = netdev_priv(net_dev); | |
2233 | u8 bcast_addr[ETH_ALEN]; | |
bb5b42c0 | 2234 | u8 num_queues; |
6ab00868 IR |
2235 | int err; |
2236 | ||
2237 | net_dev->netdev_ops = &dpaa2_eth_ops; | |
2238 | ||
2239 | err = set_mac_addr(priv); | |
2240 | if (err) | |
2241 | return err; | |
2242 | ||
2243 | /* Explicitly add the broadcast address to the MAC filtering table */ | |
6e2387e8 IR |
2244 | eth_broadcast_addr(bcast_addr); |
2245 | err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); | |
2246 | if (err) { | |
6ab00868 IR |
2247 | dev_err(dev, "dpni_add_mac_addr() failed\n"); |
2248 | return err; | |
6e2387e8 IR |
2249 | } |
2250 | ||
2251 | /* Reserve enough space to align buffer as per hardware requirement; | |
2252 | * NOTE: priv->tx_data_offset MUST be initialized at this point. | |
2253 | */ | |
2254 | net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); | |
2255 | ||
2256 | /* Set MTU limits */ | |
2257 | net_dev->min_mtu = 68; | |
2258 | net_dev->max_mtu = DPAA2_ETH_MAX_MTU; | |
2259 | ||
bb5b42c0 IR |
2260 | /* Set actual number of queues in the net device */ |
2261 | num_queues = dpaa2_eth_queue_count(priv); | |
2262 | err = netif_set_real_num_tx_queues(net_dev, num_queues); | |
2263 | if (err) { | |
2264 | dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); | |
2265 | return err; | |
2266 | } | |
2267 | err = netif_set_real_num_rx_queues(net_dev, num_queues); | |
2268 | if (err) { | |
2269 | dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); | |
2270 | return err; | |
2271 | } | |
2272 | ||
6e2387e8 IR |
2273 | /* Our .ndo_init will be called herein */ |
2274 | err = register_netdev(net_dev); | |
2275 | if (err < 0) { | |
2276 | dev_err(dev, "register_netdev() failed\n"); | |
2277 | return err; | |
2278 | } | |
2279 | ||
2280 | return 0; | |
2281 | } | |
2282 | ||
2283 | static int poll_link_state(void *arg) | |
2284 | { | |
2285 | struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; | |
2286 | int err; | |
2287 | ||
2288 | while (!kthread_should_stop()) { | |
2289 | err = link_state_update(priv); | |
2290 | if (unlikely(err)) | |
2291 | return err; | |
2292 | ||
2293 | msleep(DPAA2_ETH_LINK_STATE_REFRESH); | |
2294 | } | |
2295 | ||
2296 | return 0; | |
2297 | } | |
2298 | ||
2299 | static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) | |
2300 | { | |
2301 | return IRQ_WAKE_THREAD; | |
2302 | } | |
2303 | ||
2304 | static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) | |
2305 | { | |
8dffaf8a | 2306 | u32 status = 0, clear = 0; |
6e2387e8 IR |
2307 | struct device *dev = (struct device *)arg; |
2308 | struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); | |
2309 | struct net_device *net_dev = dev_get_drvdata(dev); | |
2310 | int err; | |
2311 | ||
2312 | err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, | |
2313 | DPNI_IRQ_INDEX, &status); | |
2314 | if (unlikely(err)) { | |
77160af3 | 2315 | netdev_err(net_dev, "Can't get irq status (err %d)\n", err); |
6e2387e8 IR |
2316 | clear = 0xffffffff; |
2317 | goto out; | |
2318 | } | |
2319 | ||
2320 | if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { | |
2321 | clear |= DPNI_IRQ_EVENT_LINK_CHANGED; | |
2322 | link_state_update(netdev_priv(net_dev)); | |
2323 | } | |
2324 | ||
2325 | out: | |
2326 | dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, | |
2327 | DPNI_IRQ_INDEX, clear); | |
2328 | return IRQ_HANDLED; | |
2329 | } | |
2330 | ||
2331 | static int setup_irqs(struct fsl_mc_device *ls_dev) | |
2332 | { | |
2333 | int err = 0; | |
2334 | struct fsl_mc_device_irq *irq; | |
2335 | ||
2336 | err = fsl_mc_allocate_irqs(ls_dev); | |
2337 | if (err) { | |
2338 | dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); | |
2339 | return err; | |
2340 | } | |
2341 | ||
2342 | irq = ls_dev->irqs[0]; | |
2343 | err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, | |
2344 | dpni_irq0_handler, | |
2345 | dpni_irq0_handler_thread, | |
2346 | IRQF_NO_SUSPEND | IRQF_ONESHOT, | |
2347 | dev_name(&ls_dev->dev), &ls_dev->dev); | |
2348 | if (err < 0) { | |
77160af3 | 2349 | dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); |
6e2387e8 IR |
2350 | goto free_mc_irq; |
2351 | } | |
2352 | ||
2353 | err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, | |
2354 | DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); | |
2355 | if (err < 0) { | |
77160af3 | 2356 | dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); |
6e2387e8 IR |
2357 | goto free_irq; |
2358 | } | |
2359 | ||
2360 | err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, | |
2361 | DPNI_IRQ_INDEX, 1); | |
2362 | if (err < 0) { | |
77160af3 | 2363 | dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); |
6e2387e8 IR |
2364 | goto free_irq; |
2365 | } | |
2366 | ||
2367 | return 0; | |
2368 | ||
2369 | free_irq: | |
2370 | devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); | |
2371 | free_mc_irq: | |
2372 | fsl_mc_free_irqs(ls_dev); | |
2373 | ||
2374 | return err; | |
2375 | } | |
2376 | ||
2377 | static void add_ch_napi(struct dpaa2_eth_priv *priv) | |
2378 | { | |
2379 | int i; | |
2380 | struct dpaa2_eth_channel *ch; | |
2381 | ||
2382 | for (i = 0; i < priv->num_channels; i++) { | |
2383 | ch = priv->channel[i]; | |
2384 | /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ | |
2385 | netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, | |
2386 | NAPI_POLL_WEIGHT); | |
2387 | } | |
2388 | } | |
2389 | ||
2390 | static void del_ch_napi(struct dpaa2_eth_priv *priv) | |
2391 | { | |
2392 | int i; | |
2393 | struct dpaa2_eth_channel *ch; | |
2394 | ||
2395 | for (i = 0; i < priv->num_channels; i++) { | |
2396 | ch = priv->channel[i]; | |
2397 | netif_napi_del(&ch->napi); | |
2398 | } | |
2399 | } | |
2400 | ||
2401 | static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) | |
2402 | { | |
2403 | struct device *dev; | |
2404 | struct net_device *net_dev = NULL; | |
2405 | struct dpaa2_eth_priv *priv = NULL; | |
2406 | int err = 0; | |
2407 | ||
2408 | dev = &dpni_dev->dev; | |
2409 | ||
2410 | /* Net device */ | |
2411 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); | |
2412 | if (!net_dev) { | |
2413 | dev_err(dev, "alloc_etherdev_mq() failed\n"); | |
2414 | return -ENOMEM; | |
2415 | } | |
2416 | ||
2417 | SET_NETDEV_DEV(net_dev, dev); | |
2418 | dev_set_drvdata(dev, net_dev); | |
2419 | ||
2420 | priv = netdev_priv(net_dev); | |
2421 | priv->net_dev = net_dev; | |
2422 | ||
08eb2397 IR |
2423 | priv->iommu_domain = iommu_get_domain_for_dev(dev); |
2424 | ||
6e2387e8 IR |
2425 | /* Obtain a MC portal */ |
2426 | err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, | |
2427 | &priv->mc_io); | |
2428 | if (err) { | |
2429 | dev_err(dev, "MC portal allocation failed\n"); | |
2430 | goto err_portal_alloc; | |
2431 | } | |
2432 | ||
2433 | /* MC objects initialization and configuration */ | |
2434 | err = setup_dpni(dpni_dev); | |
2435 | if (err) | |
2436 | goto err_dpni_setup; | |
2437 | ||
2438 | err = setup_dpio(priv); | |
2439 | if (err) | |
2440 | goto err_dpio_setup; | |
2441 | ||
2442 | setup_fqs(priv); | |
2443 | ||
2444 | err = setup_dpbp(priv); | |
2445 | if (err) | |
2446 | goto err_dpbp_setup; | |
2447 | ||
2448 | err = bind_dpni(priv); | |
2449 | if (err) | |
2450 | goto err_bind; | |
2451 | ||
2452 | /* Add a NAPI context for each channel */ | |
2453 | add_ch_napi(priv); | |
2454 | ||
2455 | /* Percpu statistics */ | |
2456 | priv->percpu_stats = alloc_percpu(*priv->percpu_stats); | |
2457 | if (!priv->percpu_stats) { | |
2458 | dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); | |
2459 | err = -ENOMEM; | |
2460 | goto err_alloc_percpu_stats; | |
2461 | } | |
85047abd IR |
2462 | priv->percpu_extras = alloc_percpu(*priv->percpu_extras); |
2463 | if (!priv->percpu_extras) { | |
2464 | dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); | |
2465 | err = -ENOMEM; | |
2466 | goto err_alloc_percpu_extras; | |
2467 | } | |
6e2387e8 IR |
2468 | |
2469 | err = netdev_init(net_dev); | |
2470 | if (err) | |
2471 | goto err_netdev_init; | |
2472 | ||
2473 | /* Configure checksum offload based on current interface flags */ | |
2474 | err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); | |
2475 | if (err) | |
2476 | goto err_csum; | |
2477 | ||
2478 | err = set_tx_csum(priv, !!(net_dev->features & | |
2479 | (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); | |
2480 | if (err) | |
2481 | goto err_csum; | |
2482 | ||
2483 | err = alloc_rings(priv); | |
2484 | if (err) | |
2485 | goto err_alloc_rings; | |
2486 | ||
34196740 IR |
2487 | net_dev->ethtool_ops = &dpaa2_ethtool_ops; |
2488 | ||
6e2387e8 IR |
2489 | err = setup_irqs(dpni_dev); |
2490 | if (err) { | |
2491 | netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); | |
2492 | priv->poll_thread = kthread_run(poll_link_state, priv, | |
2493 | "%s_poll_link", net_dev->name); | |
2494 | if (IS_ERR(priv->poll_thread)) { | |
2495 | netdev_err(net_dev, "Error starting polling thread\n"); | |
2496 | goto err_poll_thread; | |
2497 | } | |
2498 | priv->do_link_poll = true; | |
2499 | } | |
2500 | ||
2501 | dev_info(dev, "Probed interface %s\n", net_dev->name); | |
2502 | return 0; | |
2503 | ||
2504 | err_poll_thread: | |
2505 | free_rings(priv); | |
2506 | err_alloc_rings: | |
2507 | err_csum: | |
2508 | unregister_netdev(net_dev); | |
2509 | err_netdev_init: | |
85047abd IR |
2510 | free_percpu(priv->percpu_extras); |
2511 | err_alloc_percpu_extras: | |
6e2387e8 IR |
2512 | free_percpu(priv->percpu_stats); |
2513 | err_alloc_percpu_stats: | |
2514 | del_ch_napi(priv); | |
2515 | err_bind: | |
2516 | free_dpbp(priv); | |
2517 | err_dpbp_setup: | |
2518 | free_dpio(priv); | |
2519 | err_dpio_setup: | |
2520 | free_dpni(priv); | |
2521 | err_dpni_setup: | |
2522 | fsl_mc_portal_free(priv->mc_io); | |
2523 | err_portal_alloc: | |
2524 | dev_set_drvdata(dev, NULL); | |
2525 | free_netdev(net_dev); | |
2526 | ||
2527 | return err; | |
2528 | } | |
2529 | ||
2530 | static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) | |
2531 | { | |
2532 | struct device *dev; | |
2533 | struct net_device *net_dev; | |
2534 | struct dpaa2_eth_priv *priv; | |
2535 | ||
2536 | dev = &ls_dev->dev; | |
2537 | net_dev = dev_get_drvdata(dev); | |
2538 | priv = netdev_priv(net_dev); | |
2539 | ||
2540 | unregister_netdev(net_dev); | |
2541 | dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); | |
2542 | ||
2543 | if (priv->do_link_poll) | |
2544 | kthread_stop(priv->poll_thread); | |
2545 | else | |
2546 | fsl_mc_free_irqs(ls_dev); | |
2547 | ||
2548 | free_rings(priv); | |
2549 | free_percpu(priv->percpu_stats); | |
85047abd | 2550 | free_percpu(priv->percpu_extras); |
6e2387e8 IR |
2551 | |
2552 | del_ch_napi(priv); | |
2553 | free_dpbp(priv); | |
2554 | free_dpio(priv); | |
2555 | free_dpni(priv); | |
2556 | ||
2557 | fsl_mc_portal_free(priv->mc_io); | |
2558 | ||
2559 | dev_set_drvdata(dev, NULL); | |
2560 | free_netdev(net_dev); | |
2561 | ||
2562 | return 0; | |
2563 | } | |
2564 | ||
2565 | static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { | |
2566 | { | |
2567 | .vendor = FSL_MC_VENDOR_FREESCALE, | |
2568 | .obj_type = "dpni", | |
2569 | }, | |
2570 | { .vendor = 0x0 } | |
2571 | }; | |
2572 | MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); | |
2573 | ||
2574 | static struct fsl_mc_driver dpaa2_eth_driver = { | |
2575 | .driver = { | |
2576 | .name = KBUILD_MODNAME, | |
2577 | .owner = THIS_MODULE, | |
2578 | }, | |
2579 | .probe = dpaa2_eth_probe, | |
2580 | .remove = dpaa2_eth_remove, | |
2581 | .match_id_table = dpaa2_eth_match_id_table | |
2582 | }; | |
2583 | ||
2584 | module_fsl_mc_driver(dpaa2_eth_driver); |