2 * AMD 10Gb Ethernet driver
4 * This file is available to you under your choice of the following two
9 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
60 * All rights reserved.
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 * This file incorporates work covered by the following copyright and
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
117 #include <linux/spinlock.h>
118 #include <linux/tcp.h>
119 #include <linux/if_vlan.h>
120 #include <net/busy_poll.h>
121 #include <linux/clk.h>
122 #include <linux/if_ether.h>
123 #include <linux/net_tstamp.h>
124 #include <linux/phy.h>
127 #include "xgbe-common.h"
129 static int xgbe_one_poll(struct napi_struct
*, int);
130 static int xgbe_all_poll(struct napi_struct
*, int);
132 static int xgbe_alloc_channels(struct xgbe_prv_data
*pdata
)
134 struct xgbe_channel
*channel_mem
, *channel
;
135 struct xgbe_ring
*tx_ring
, *rx_ring
;
136 unsigned int count
, i
;
139 count
= max_t(unsigned int, pdata
->tx_ring_count
, pdata
->rx_ring_count
);
141 channel_mem
= kcalloc(count
, sizeof(struct xgbe_channel
), GFP_KERNEL
);
145 tx_ring
= kcalloc(pdata
->tx_ring_count
, sizeof(struct xgbe_ring
),
150 rx_ring
= kcalloc(pdata
->rx_ring_count
, sizeof(struct xgbe_ring
),
155 for (i
= 0, channel
= channel_mem
; i
< count
; i
++, channel
++) {
156 snprintf(channel
->name
, sizeof(channel
->name
), "channel-%u", i
);
157 channel
->pdata
= pdata
;
158 channel
->queue_index
= i
;
159 channel
->dma_regs
= pdata
->xgmac_regs
+ DMA_CH_BASE
+
162 if (pdata
->per_channel_irq
)
163 channel
->dma_irq
= pdata
->channel_irq
[i
];
165 if (i
< pdata
->tx_ring_count
) {
166 spin_lock_init(&tx_ring
->lock
);
167 channel
->tx_ring
= tx_ring
++;
170 if (i
< pdata
->rx_ring_count
) {
171 spin_lock_init(&rx_ring
->lock
);
172 channel
->rx_ring
= rx_ring
++;
175 netif_dbg(pdata
, drv
, pdata
->netdev
,
176 "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
177 channel
->name
, channel
->dma_regs
, channel
->dma_irq
,
178 channel
->tx_ring
, channel
->rx_ring
);
181 pdata
->channel
= channel_mem
;
182 pdata
->channel_count
= count
;
196 static void xgbe_free_channels(struct xgbe_prv_data
*pdata
)
201 kfree(pdata
->channel
->rx_ring
);
202 kfree(pdata
->channel
->tx_ring
);
203 kfree(pdata
->channel
);
205 pdata
->channel
= NULL
;
206 pdata
->channel_count
= 0;
209 static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring
*ring
)
211 return (ring
->rdesc_count
- (ring
->cur
- ring
->dirty
));
214 static inline unsigned int xgbe_rx_dirty_desc(struct xgbe_ring
*ring
)
216 return (ring
->cur
- ring
->dirty
);
219 static int xgbe_maybe_stop_tx_queue(struct xgbe_channel
*channel
,
220 struct xgbe_ring
*ring
, unsigned int count
)
222 struct xgbe_prv_data
*pdata
= channel
->pdata
;
224 if (count
> xgbe_tx_avail_desc(ring
)) {
225 netif_info(pdata
, drv
, pdata
->netdev
,
226 "Tx queue stopped, not enough descriptors available\n");
227 netif_stop_subqueue(pdata
->netdev
, channel
->queue_index
);
228 ring
->tx
.queue_stopped
= 1;
230 /* If we haven't notified the hardware because of xmit_more
231 * support, tell it now
233 if (ring
->tx
.xmit_more
)
234 pdata
->hw_if
.tx_start_xmit(channel
, ring
);
236 return NETDEV_TX_BUSY
;
242 static int xgbe_calc_rx_buf_size(struct net_device
*netdev
, unsigned int mtu
)
244 unsigned int rx_buf_size
;
246 rx_buf_size
= mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
247 rx_buf_size
= clamp_val(rx_buf_size
, XGBE_RX_MIN_BUF_SIZE
, PAGE_SIZE
);
249 rx_buf_size
= (rx_buf_size
+ XGBE_RX_BUF_ALIGN
- 1) &
250 ~(XGBE_RX_BUF_ALIGN
- 1);
255 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data
*pdata
,
256 struct xgbe_channel
*channel
)
258 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
259 enum xgbe_int int_id
;
261 if (channel
->tx_ring
&& channel
->rx_ring
)
262 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
263 else if (channel
->tx_ring
)
264 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
265 else if (channel
->rx_ring
)
266 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
270 hw_if
->enable_int(channel
, int_id
);
273 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
275 struct xgbe_channel
*channel
;
278 channel
= pdata
->channel
;
279 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
280 xgbe_enable_rx_tx_int(pdata
, channel
);
283 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data
*pdata
,
284 struct xgbe_channel
*channel
)
286 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
287 enum xgbe_int int_id
;
289 if (channel
->tx_ring
&& channel
->rx_ring
)
290 int_id
= XGMAC_INT_DMA_CH_SR_TI_RI
;
291 else if (channel
->tx_ring
)
292 int_id
= XGMAC_INT_DMA_CH_SR_TI
;
293 else if (channel
->rx_ring
)
294 int_id
= XGMAC_INT_DMA_CH_SR_RI
;
298 hw_if
->disable_int(channel
, int_id
);
301 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data
*pdata
)
303 struct xgbe_channel
*channel
;
306 channel
= pdata
->channel
;
307 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
308 xgbe_disable_rx_tx_int(pdata
, channel
);
311 static irqreturn_t
xgbe_isr(int irq
, void *data
)
313 struct xgbe_prv_data
*pdata
= data
;
314 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
315 struct xgbe_channel
*channel
;
316 unsigned int dma_isr
, dma_ch_isr
;
317 unsigned int mac_isr
, mac_tssr
;
320 /* The DMA interrupt status register also reports MAC and MTL
321 * interrupts. So for polling mode, we just need to check for
322 * this register to be non-zero
324 dma_isr
= XGMAC_IOREAD(pdata
, DMA_ISR
);
328 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_ISR=%#010x\n", dma_isr
);
330 for (i
= 0; i
< pdata
->channel_count
; i
++) {
331 if (!(dma_isr
& (1 << i
)))
334 channel
= pdata
->channel
+ i
;
336 dma_ch_isr
= XGMAC_DMA_IOREAD(channel
, DMA_CH_SR
);
337 netif_dbg(pdata
, intr
, pdata
->netdev
, "DMA_CH%u_ISR=%#010x\n",
340 /* The TI or RI interrupt bits may still be set even if using
341 * per channel DMA interrupts. Check to be sure those are not
342 * enabled before using the private data napi structure.
344 if (!pdata
->per_channel_irq
&&
345 (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
) ||
346 XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
))) {
347 if (napi_schedule_prep(&pdata
->napi
)) {
348 /* Disable Tx and Rx interrupts */
349 xgbe_disable_rx_tx_ints(pdata
);
351 /* Turn on polling */
352 __napi_schedule_irqoff(&pdata
->napi
);
355 /* Don't clear Rx/Tx status if doing per channel DMA
356 * interrupts, these will be cleared by the ISR for
357 * per channel DMA interrupts.
359 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, TI
, 0);
360 XGMAC_SET_BITS(dma_ch_isr
, DMA_CH_SR
, RI
, 0);
363 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, RBU
))
364 pdata
->ext_stats
.rx_buffer_unavailable
++;
366 /* Restart the device on a Fatal Bus Error */
367 if (XGMAC_GET_BITS(dma_ch_isr
, DMA_CH_SR
, FBE
))
368 schedule_work(&pdata
->restart_work
);
370 /* Clear interrupt signals */
371 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_ch_isr
);
374 if (XGMAC_GET_BITS(dma_isr
, DMA_ISR
, MACIS
)) {
375 mac_isr
= XGMAC_IOREAD(pdata
, MAC_ISR
);
377 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCTXIS
))
378 hw_if
->tx_mmc_int(pdata
);
380 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, MMCRXIS
))
381 hw_if
->rx_mmc_int(pdata
);
383 if (XGMAC_GET_BITS(mac_isr
, MAC_ISR
, TSIS
)) {
384 mac_tssr
= XGMAC_IOREAD(pdata
, MAC_TSSR
);
386 if (XGMAC_GET_BITS(mac_tssr
, MAC_TSSR
, TXTSC
)) {
387 /* Read Tx Timestamp to clear interrupt */
389 hw_if
->get_tx_tstamp(pdata
);
390 queue_work(pdata
->dev_workqueue
,
391 &pdata
->tx_tstamp_work
);
396 /* If there is not a separate AN irq, handle it here */
397 if (pdata
->dev_irq
== pdata
->an_irq
)
398 pdata
->phy_if
.an_isr(irq
, pdata
);
404 static irqreturn_t
xgbe_dma_isr(int irq
, void *data
)
406 struct xgbe_channel
*channel
= data
;
407 struct xgbe_prv_data
*pdata
= channel
->pdata
;
408 unsigned int dma_status
;
410 /* Per channel DMA interrupts are enabled, so we use the per
411 * channel napi structure and not the private data napi structure
413 if (napi_schedule_prep(&channel
->napi
)) {
414 /* Disable Tx and Rx interrupts */
415 if (pdata
->channel_irq_mode
)
416 xgbe_disable_rx_tx_int(pdata
, channel
);
418 disable_irq_nosync(channel
->dma_irq
);
420 /* Turn on polling */
421 __napi_schedule_irqoff(&channel
->napi
);
424 /* Clear Tx/Rx signals */
426 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, TI
, 1);
427 XGMAC_SET_BITS(dma_status
, DMA_CH_SR
, RI
, 1);
428 XGMAC_DMA_IOWRITE(channel
, DMA_CH_SR
, dma_status
);
433 static void xgbe_tx_timer(unsigned long data
)
435 struct xgbe_channel
*channel
= (struct xgbe_channel
*)data
;
436 struct xgbe_prv_data
*pdata
= channel
->pdata
;
437 struct napi_struct
*napi
;
439 DBGPR("-->xgbe_tx_timer\n");
441 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
443 if (napi_schedule_prep(napi
)) {
444 /* Disable Tx and Rx interrupts */
445 if (pdata
->per_channel_irq
)
446 if (pdata
->channel_irq_mode
)
447 xgbe_disable_rx_tx_int(pdata
, channel
);
449 disable_irq_nosync(channel
->dma_irq
);
451 xgbe_disable_rx_tx_ints(pdata
);
453 /* Turn on polling */
454 __napi_schedule(napi
);
457 channel
->tx_timer_active
= 0;
459 DBGPR("<--xgbe_tx_timer\n");
462 static void xgbe_service(struct work_struct
*work
)
464 struct xgbe_prv_data
*pdata
= container_of(work
,
465 struct xgbe_prv_data
,
468 pdata
->phy_if
.phy_status(pdata
);
471 static void xgbe_service_timer(unsigned long data
)
473 struct xgbe_prv_data
*pdata
= (struct xgbe_prv_data
*)data
;
475 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
477 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
480 static void xgbe_init_timers(struct xgbe_prv_data
*pdata
)
482 struct xgbe_channel
*channel
;
485 setup_timer(&pdata
->service_timer
, xgbe_service_timer
,
486 (unsigned long)pdata
);
488 channel
= pdata
->channel
;
489 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
490 if (!channel
->tx_ring
)
493 setup_timer(&channel
->tx_timer
, xgbe_tx_timer
,
494 (unsigned long)channel
);
498 static void xgbe_start_timers(struct xgbe_prv_data
*pdata
)
500 mod_timer(&pdata
->service_timer
, jiffies
+ HZ
);
503 static void xgbe_stop_timers(struct xgbe_prv_data
*pdata
)
505 struct xgbe_channel
*channel
;
508 del_timer_sync(&pdata
->service_timer
);
510 channel
= pdata
->channel
;
511 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
512 if (!channel
->tx_ring
)
515 del_timer_sync(&channel
->tx_timer
);
519 void xgbe_get_all_hw_features(struct xgbe_prv_data
*pdata
)
521 unsigned int mac_hfr0
, mac_hfr1
, mac_hfr2
;
522 struct xgbe_hw_features
*hw_feat
= &pdata
->hw_feat
;
524 DBGPR("-->xgbe_get_all_hw_features\n");
526 mac_hfr0
= XGMAC_IOREAD(pdata
, MAC_HWF0R
);
527 mac_hfr1
= XGMAC_IOREAD(pdata
, MAC_HWF1R
);
528 mac_hfr2
= XGMAC_IOREAD(pdata
, MAC_HWF2R
);
530 memset(hw_feat
, 0, sizeof(*hw_feat
));
532 hw_feat
->version
= XGMAC_IOREAD(pdata
, MAC_VR
);
534 /* Hardware feature register 0 */
535 hw_feat
->gmii
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, GMIISEL
);
536 hw_feat
->vlhash
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, VLHASH
);
537 hw_feat
->sma
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SMASEL
);
538 hw_feat
->rwk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RWKSEL
);
539 hw_feat
->mgk
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MGKSEL
);
540 hw_feat
->mmc
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, MMCSEL
);
541 hw_feat
->aoe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, ARPOFFSEL
);
542 hw_feat
->ts
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSEL
);
543 hw_feat
->eee
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, EEESEL
);
544 hw_feat
->tx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TXCOESEL
);
545 hw_feat
->rx_coe
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, RXCOESEL
);
546 hw_feat
->addn_mac
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
,
548 hw_feat
->ts_src
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, TSSTSSEL
);
549 hw_feat
->sa_vlan_ins
= XGMAC_GET_BITS(mac_hfr0
, MAC_HWF0R
, SAVLANINS
);
551 /* Hardware feature register 1 */
552 hw_feat
->rx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
554 hw_feat
->tx_fifo_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
556 hw_feat
->adv_ts_hi
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADVTHWORD
);
557 hw_feat
->dma_width
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, ADDR64
);
558 hw_feat
->dcb
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DCBEN
);
559 hw_feat
->sph
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, SPHEN
);
560 hw_feat
->tso
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, TSOEN
);
561 hw_feat
->dma_debug
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, DBGMEMA
);
562 hw_feat
->rss
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, RSSEN
);
563 hw_feat
->tc_cnt
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
, NUMTC
);
564 hw_feat
->hash_table_size
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
566 hw_feat
->l3l4_filter_num
= XGMAC_GET_BITS(mac_hfr1
, MAC_HWF1R
,
569 /* Hardware feature register 2 */
570 hw_feat
->rx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXQCNT
);
571 hw_feat
->tx_q_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXQCNT
);
572 hw_feat
->rx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, RXCHCNT
);
573 hw_feat
->tx_ch_cnt
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, TXCHCNT
);
574 hw_feat
->pps_out_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, PPSOUTNUM
);
575 hw_feat
->aux_snap_num
= XGMAC_GET_BITS(mac_hfr2
, MAC_HWF2R
, AUXSNAPNUM
);
577 /* Translate the Hash Table size into actual number */
578 switch (hw_feat
->hash_table_size
) {
582 hw_feat
->hash_table_size
= 64;
585 hw_feat
->hash_table_size
= 128;
588 hw_feat
->hash_table_size
= 256;
592 /* Translate the address width setting into actual number */
593 switch (hw_feat
->dma_width
) {
595 hw_feat
->dma_width
= 32;
598 hw_feat
->dma_width
= 40;
601 hw_feat
->dma_width
= 48;
604 hw_feat
->dma_width
= 32;
607 /* The Queue, Channel and TC counts are zero based so increment them
608 * to get the actual number
612 hw_feat
->rx_ch_cnt
++;
613 hw_feat
->tx_ch_cnt
++;
616 /* Translate the fifo sizes into actual numbers */
617 hw_feat
->rx_fifo_size
= 1 << (hw_feat
->rx_fifo_size
+ 7);
618 hw_feat
->tx_fifo_size
= 1 << (hw_feat
->tx_fifo_size
+ 7);
620 DBGPR("<--xgbe_get_all_hw_features\n");
623 static void xgbe_napi_enable(struct xgbe_prv_data
*pdata
, unsigned int add
)
625 struct xgbe_channel
*channel
;
628 if (pdata
->per_channel_irq
) {
629 channel
= pdata
->channel
;
630 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
632 netif_napi_add(pdata
->netdev
, &channel
->napi
,
633 xgbe_one_poll
, NAPI_POLL_WEIGHT
);
635 napi_enable(&channel
->napi
);
639 netif_napi_add(pdata
->netdev
, &pdata
->napi
,
640 xgbe_all_poll
, NAPI_POLL_WEIGHT
);
642 napi_enable(&pdata
->napi
);
646 static void xgbe_napi_disable(struct xgbe_prv_data
*pdata
, unsigned int del
)
648 struct xgbe_channel
*channel
;
651 if (pdata
->per_channel_irq
) {
652 channel
= pdata
->channel
;
653 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
654 napi_disable(&channel
->napi
);
657 netif_napi_del(&channel
->napi
);
660 napi_disable(&pdata
->napi
);
663 netif_napi_del(&pdata
->napi
);
667 static int xgbe_request_irqs(struct xgbe_prv_data
*pdata
)
669 struct xgbe_channel
*channel
;
670 struct net_device
*netdev
= pdata
->netdev
;
674 ret
= devm_request_irq(pdata
->dev
, pdata
->dev_irq
, xgbe_isr
, 0,
675 netdev
->name
, pdata
);
677 netdev_alert(netdev
, "error requesting irq %d\n",
682 if (!pdata
->per_channel_irq
)
685 channel
= pdata
->channel
;
686 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
687 snprintf(channel
->dma_irq_name
,
688 sizeof(channel
->dma_irq_name
) - 1,
689 "%s-TxRx-%u", netdev_name(netdev
),
690 channel
->queue_index
);
692 ret
= devm_request_irq(pdata
->dev
, channel
->dma_irq
,
694 channel
->dma_irq_name
, channel
);
696 netdev_alert(netdev
, "error requesting irq %d\n",
705 /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
706 for (i
--, channel
--; i
< pdata
->channel_count
; i
--, channel
--)
707 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
709 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
714 static void xgbe_free_irqs(struct xgbe_prv_data
*pdata
)
716 struct xgbe_channel
*channel
;
719 devm_free_irq(pdata
->dev
, pdata
->dev_irq
, pdata
);
721 if (!pdata
->per_channel_irq
)
724 channel
= pdata
->channel
;
725 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
726 devm_free_irq(pdata
->dev
, channel
->dma_irq
, channel
);
729 void xgbe_init_tx_coalesce(struct xgbe_prv_data
*pdata
)
731 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
733 DBGPR("-->xgbe_init_tx_coalesce\n");
735 pdata
->tx_usecs
= XGMAC_INIT_DMA_TX_USECS
;
736 pdata
->tx_frames
= XGMAC_INIT_DMA_TX_FRAMES
;
738 hw_if
->config_tx_coalesce(pdata
);
740 DBGPR("<--xgbe_init_tx_coalesce\n");
743 void xgbe_init_rx_coalesce(struct xgbe_prv_data
*pdata
)
745 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
747 DBGPR("-->xgbe_init_rx_coalesce\n");
749 pdata
->rx_riwt
= hw_if
->usec_to_riwt(pdata
, XGMAC_INIT_DMA_RX_USECS
);
750 pdata
->rx_usecs
= XGMAC_INIT_DMA_RX_USECS
;
751 pdata
->rx_frames
= XGMAC_INIT_DMA_RX_FRAMES
;
753 hw_if
->config_rx_coalesce(pdata
);
755 DBGPR("<--xgbe_init_rx_coalesce\n");
758 static void xgbe_free_tx_data(struct xgbe_prv_data
*pdata
)
760 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
761 struct xgbe_channel
*channel
;
762 struct xgbe_ring
*ring
;
763 struct xgbe_ring_data
*rdata
;
766 DBGPR("-->xgbe_free_tx_data\n");
768 channel
= pdata
->channel
;
769 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
770 ring
= channel
->tx_ring
;
774 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
775 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
776 desc_if
->unmap_rdata(pdata
, rdata
);
780 DBGPR("<--xgbe_free_tx_data\n");
783 static void xgbe_free_rx_data(struct xgbe_prv_data
*pdata
)
785 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
786 struct xgbe_channel
*channel
;
787 struct xgbe_ring
*ring
;
788 struct xgbe_ring_data
*rdata
;
791 DBGPR("-->xgbe_free_rx_data\n");
793 channel
= pdata
->channel
;
794 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
795 ring
= channel
->rx_ring
;
799 for (j
= 0; j
< ring
->rdesc_count
; j
++) {
800 rdata
= XGBE_GET_DESC_DATA(ring
, j
);
801 desc_if
->unmap_rdata(pdata
, rdata
);
805 DBGPR("<--xgbe_free_rx_data\n");
808 static int xgbe_phy_reset(struct xgbe_prv_data
*pdata
)
810 pdata
->phy_link
= -1;
811 pdata
->phy_speed
= SPEED_UNKNOWN
;
813 return pdata
->phy_if
.phy_reset(pdata
);
816 int xgbe_powerdown(struct net_device
*netdev
, unsigned int caller
)
818 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
819 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
822 DBGPR("-->xgbe_powerdown\n");
824 if (!netif_running(netdev
) ||
825 (caller
== XGMAC_IOCTL_CONTEXT
&& pdata
->power_down
)) {
826 netdev_alert(netdev
, "Device is already powered down\n");
827 DBGPR("<--xgbe_powerdown\n");
831 spin_lock_irqsave(&pdata
->lock
, flags
);
833 if (caller
== XGMAC_DRIVER_CONTEXT
)
834 netif_device_detach(netdev
);
836 netif_tx_stop_all_queues(netdev
);
838 xgbe_stop_timers(pdata
);
839 flush_workqueue(pdata
->dev_workqueue
);
841 hw_if
->powerdown_tx(pdata
);
842 hw_if
->powerdown_rx(pdata
);
844 xgbe_napi_disable(pdata
, 0);
846 pdata
->power_down
= 1;
848 spin_unlock_irqrestore(&pdata
->lock
, flags
);
850 DBGPR("<--xgbe_powerdown\n");
855 int xgbe_powerup(struct net_device
*netdev
, unsigned int caller
)
857 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
858 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
861 DBGPR("-->xgbe_powerup\n");
863 if (!netif_running(netdev
) ||
864 (caller
== XGMAC_IOCTL_CONTEXT
&& !pdata
->power_down
)) {
865 netdev_alert(netdev
, "Device is already powered up\n");
866 DBGPR("<--xgbe_powerup\n");
870 spin_lock_irqsave(&pdata
->lock
, flags
);
872 pdata
->power_down
= 0;
874 xgbe_napi_enable(pdata
, 0);
876 hw_if
->powerup_tx(pdata
);
877 hw_if
->powerup_rx(pdata
);
879 if (caller
== XGMAC_DRIVER_CONTEXT
)
880 netif_device_attach(netdev
);
882 netif_tx_start_all_queues(netdev
);
884 xgbe_start_timers(pdata
);
886 spin_unlock_irqrestore(&pdata
->lock
, flags
);
888 DBGPR("<--xgbe_powerup\n");
893 static int xgbe_start(struct xgbe_prv_data
*pdata
)
895 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
896 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
897 struct net_device
*netdev
= pdata
->netdev
;
900 DBGPR("-->xgbe_start\n");
904 xgbe_napi_enable(pdata
, 1);
906 ret
= xgbe_request_irqs(pdata
);
910 ret
= phy_if
->phy_start(pdata
);
914 hw_if
->enable_tx(pdata
);
915 hw_if
->enable_rx(pdata
);
917 netif_tx_start_all_queues(netdev
);
919 xgbe_start_timers(pdata
);
920 queue_work(pdata
->dev_workqueue
, &pdata
->service_work
);
922 DBGPR("<--xgbe_start\n");
927 xgbe_free_irqs(pdata
);
930 xgbe_napi_disable(pdata
, 1);
937 static void xgbe_stop(struct xgbe_prv_data
*pdata
)
939 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
940 struct xgbe_phy_if
*phy_if
= &pdata
->phy_if
;
941 struct xgbe_channel
*channel
;
942 struct net_device
*netdev
= pdata
->netdev
;
943 struct netdev_queue
*txq
;
946 DBGPR("-->xgbe_stop\n");
948 netif_tx_stop_all_queues(netdev
);
950 xgbe_stop_timers(pdata
);
951 flush_workqueue(pdata
->dev_workqueue
);
953 hw_if
->disable_tx(pdata
);
954 hw_if
->disable_rx(pdata
);
956 xgbe_free_irqs(pdata
);
958 xgbe_napi_disable(pdata
, 1);
960 phy_if
->phy_stop(pdata
);
964 channel
= pdata
->channel
;
965 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
966 if (!channel
->tx_ring
)
969 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
970 netdev_tx_reset_queue(txq
);
973 DBGPR("<--xgbe_stop\n");
976 static void xgbe_restart_dev(struct xgbe_prv_data
*pdata
)
978 DBGPR("-->xgbe_restart_dev\n");
980 /* If not running, "restart" will happen on open */
981 if (!netif_running(pdata
->netdev
))
986 xgbe_free_tx_data(pdata
);
987 xgbe_free_rx_data(pdata
);
991 DBGPR("<--xgbe_restart_dev\n");
994 static void xgbe_restart(struct work_struct
*work
)
996 struct xgbe_prv_data
*pdata
= container_of(work
,
997 struct xgbe_prv_data
,
1002 xgbe_restart_dev(pdata
);
1007 static void xgbe_tx_tstamp(struct work_struct
*work
)
1009 struct xgbe_prv_data
*pdata
= container_of(work
,
1010 struct xgbe_prv_data
,
1012 struct skb_shared_hwtstamps hwtstamps
;
1014 unsigned long flags
;
1016 if (pdata
->tx_tstamp
) {
1017 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
1020 memset(&hwtstamps
, 0, sizeof(hwtstamps
));
1021 hwtstamps
.hwtstamp
= ns_to_ktime(nsec
);
1022 skb_tstamp_tx(pdata
->tx_tstamp_skb
, &hwtstamps
);
1025 dev_kfree_skb_any(pdata
->tx_tstamp_skb
);
1027 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1028 pdata
->tx_tstamp_skb
= NULL
;
1029 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1032 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1033 struct ifreq
*ifreq
)
1035 if (copy_to_user(ifreq
->ifr_data
, &pdata
->tstamp_config
,
1036 sizeof(pdata
->tstamp_config
)))
1042 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data
*pdata
,
1043 struct ifreq
*ifreq
)
1045 struct hwtstamp_config config
;
1046 unsigned int mac_tscr
;
1048 if (copy_from_user(&config
, ifreq
->ifr_data
, sizeof(config
)))
1056 switch (config
.tx_type
) {
1057 case HWTSTAMP_TX_OFF
:
1060 case HWTSTAMP_TX_ON
:
1061 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1068 switch (config
.rx_filter
) {
1069 case HWTSTAMP_FILTER_NONE
:
1072 case HWTSTAMP_FILTER_ALL
:
1073 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENALL
, 1);
1074 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1077 /* PTP v2, UDP, any kind of event packet */
1078 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1079 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1080 /* PTP v1, UDP, any kind of event packet */
1081 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1082 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1083 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1084 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1085 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1088 /* PTP v2, UDP, Sync packet */
1089 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1090 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1091 /* PTP v1, UDP, Sync packet */
1092 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1093 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1094 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1095 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1096 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1099 /* PTP v2, UDP, Delay_req packet */
1100 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1101 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1102 /* PTP v1, UDP, Delay_req packet */
1103 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1104 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1105 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1106 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1107 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1108 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1111 /* 802.AS1, Ethernet, any kind of event packet */
1112 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1113 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1114 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1115 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1118 /* 802.AS1, Ethernet, Sync packet */
1119 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1120 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1121 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1122 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1125 /* 802.AS1, Ethernet, Delay_req packet */
1126 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1127 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, AV8021ASMEN
, 1);
1128 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1129 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1130 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1133 /* PTP v2/802.AS1, any layer, any kind of event packet */
1134 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1135 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1136 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1137 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1138 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1139 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, SNAPTYPSEL
, 1);
1140 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1143 /* PTP v2/802.AS1, any layer, Sync packet */
1144 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1145 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1146 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1147 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1148 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1149 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1150 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1153 /* PTP v2/802.AS1, any layer, Delay_req packet */
1154 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1155 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSVER2ENA
, 1);
1156 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPENA
, 1);
1157 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV4ENA
, 1);
1158 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSIPV6ENA
, 1);
1159 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSMSTRENA
, 1);
1160 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSEVNTENA
, 1);
1161 XGMAC_SET_BITS(mac_tscr
, MAC_TSCR
, TSENA
, 1);
1168 pdata
->hw_if
.config_tstamp(pdata
, mac_tscr
);
1170 memcpy(&pdata
->tstamp_config
, &config
, sizeof(config
));
1175 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data
*pdata
,
1176 struct sk_buff
*skb
,
1177 struct xgbe_packet_data
*packet
)
1179 unsigned long flags
;
1181 if (XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
)) {
1182 spin_lock_irqsave(&pdata
->tstamp_lock
, flags
);
1183 if (pdata
->tx_tstamp_skb
) {
1184 /* Another timestamp in progress, ignore this one */
1185 XGMAC_SET_BITS(packet
->attributes
,
1186 TX_PACKET_ATTRIBUTES
, PTP
, 0);
1188 pdata
->tx_tstamp_skb
= skb_get(skb
);
1189 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1191 spin_unlock_irqrestore(&pdata
->tstamp_lock
, flags
);
1194 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
, PTP
))
1195 skb_tx_timestamp(skb
);
1198 static void xgbe_prep_vlan(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1200 if (skb_vlan_tag_present(skb
))
1201 packet
->vlan_ctag
= skb_vlan_tag_get(skb
);
1204 static int xgbe_prep_tso(struct sk_buff
*skb
, struct xgbe_packet_data
*packet
)
1208 if (!XGMAC_GET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1212 ret
= skb_cow_head(skb
, 0);
1216 packet
->header_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1217 packet
->tcp_header_len
= tcp_hdrlen(skb
);
1218 packet
->tcp_payload_len
= skb
->len
- packet
->header_len
;
1219 packet
->mss
= skb_shinfo(skb
)->gso_size
;
1220 DBGPR(" packet->header_len=%u\n", packet
->header_len
);
1221 DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
1222 packet
->tcp_header_len
, packet
->tcp_payload_len
);
1223 DBGPR(" packet->mss=%u\n", packet
->mss
);
1225 /* Update the number of packets that will ultimately be transmitted
1226 * along with the extra bytes for each extra packet
1228 packet
->tx_packets
= skb_shinfo(skb
)->gso_segs
;
1229 packet
->tx_bytes
+= (packet
->tx_packets
- 1) * packet
->header_len
;
1234 static int xgbe_is_tso(struct sk_buff
*skb
)
1236 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
1239 if (!skb_is_gso(skb
))
1242 DBGPR(" TSO packet to be processed\n");
1247 static void xgbe_packet_info(struct xgbe_prv_data
*pdata
,
1248 struct xgbe_ring
*ring
, struct sk_buff
*skb
,
1249 struct xgbe_packet_data
*packet
)
1251 struct skb_frag_struct
*frag
;
1252 unsigned int context_desc
;
1259 packet
->rdesc_count
= 0;
1261 packet
->tx_packets
= 1;
1262 packet
->tx_bytes
= skb
->len
;
1264 if (xgbe_is_tso(skb
)) {
1265 /* TSO requires an extra descriptor if mss is different */
1266 if (skb_shinfo(skb
)->gso_size
!= ring
->tx
.cur_mss
) {
1268 packet
->rdesc_count
++;
1271 /* TSO requires an extra descriptor for TSO header */
1272 packet
->rdesc_count
++;
1274 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1276 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1278 } else if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1279 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1282 if (skb_vlan_tag_present(skb
)) {
1283 /* VLAN requires an extra descriptor if tag is different */
1284 if (skb_vlan_tag_get(skb
) != ring
->tx
.cur_vlan_ctag
)
1285 /* We can share with the TSO context descriptor */
1286 if (!context_desc
) {
1288 packet
->rdesc_count
++;
1291 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1295 if ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1296 (pdata
->tstamp_config
.tx_type
== HWTSTAMP_TX_ON
))
1297 XGMAC_SET_BITS(packet
->attributes
, TX_PACKET_ATTRIBUTES
,
1300 for (len
= skb_headlen(skb
); len
;) {
1301 packet
->rdesc_count
++;
1302 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1305 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1306 frag
= &skb_shinfo(skb
)->frags
[i
];
1307 for (len
= skb_frag_size(frag
); len
; ) {
1308 packet
->rdesc_count
++;
1309 len
-= min_t(unsigned int, len
, XGBE_TX_MAX_BUF_SIZE
);
1314 static int xgbe_open(struct net_device
*netdev
)
1316 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1317 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1320 DBGPR("-->xgbe_open\n");
1322 /* Reset the phy settings */
1323 ret
= xgbe_phy_reset(pdata
);
1327 /* Enable the clocks */
1328 ret
= clk_prepare_enable(pdata
->sysclk
);
1330 netdev_alert(netdev
, "dma clk_prepare_enable failed\n");
1334 ret
= clk_prepare_enable(pdata
->ptpclk
);
1336 netdev_alert(netdev
, "ptp clk_prepare_enable failed\n");
1340 /* Calculate the Rx buffer size before allocating rings */
1341 ret
= xgbe_calc_rx_buf_size(netdev
, netdev
->mtu
);
1344 pdata
->rx_buf_size
= ret
;
1346 /* Allocate the channel and ring structures */
1347 ret
= xgbe_alloc_channels(pdata
);
1351 /* Allocate the ring descriptors and buffers */
1352 ret
= desc_if
->alloc_ring_resources(pdata
);
1356 INIT_WORK(&pdata
->service_work
, xgbe_service
);
1357 INIT_WORK(&pdata
->restart_work
, xgbe_restart
);
1358 INIT_WORK(&pdata
->tx_tstamp_work
, xgbe_tx_tstamp
);
1359 xgbe_init_timers(pdata
);
1361 ret
= xgbe_start(pdata
);
1365 clear_bit(XGBE_DOWN
, &pdata
->dev_state
);
1367 DBGPR("<--xgbe_open\n");
1372 desc_if
->free_ring_resources(pdata
);
1375 xgbe_free_channels(pdata
);
1378 clk_disable_unprepare(pdata
->ptpclk
);
1381 clk_disable_unprepare(pdata
->sysclk
);
1386 static int xgbe_close(struct net_device
*netdev
)
1388 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1389 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1391 DBGPR("-->xgbe_close\n");
1393 /* Stop the device */
1396 /* Free the ring descriptors and buffers */
1397 desc_if
->free_ring_resources(pdata
);
1399 /* Free the channel and ring structures */
1400 xgbe_free_channels(pdata
);
1402 /* Disable the clocks */
1403 clk_disable_unprepare(pdata
->ptpclk
);
1404 clk_disable_unprepare(pdata
->sysclk
);
1406 set_bit(XGBE_DOWN
, &pdata
->dev_state
);
1408 DBGPR("<--xgbe_close\n");
1413 static int xgbe_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
1415 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1416 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1417 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1418 struct xgbe_channel
*channel
;
1419 struct xgbe_ring
*ring
;
1420 struct xgbe_packet_data
*packet
;
1421 struct netdev_queue
*txq
;
1424 DBGPR("-->xgbe_xmit: skb->len = %d\n", skb
->len
);
1426 channel
= pdata
->channel
+ skb
->queue_mapping
;
1427 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1428 ring
= channel
->tx_ring
;
1429 packet
= &ring
->packet_data
;
1433 if (skb
->len
== 0) {
1434 netif_err(pdata
, tx_err
, netdev
,
1435 "empty skb received from stack\n");
1436 dev_kfree_skb_any(skb
);
1437 goto tx_netdev_return
;
1440 /* Calculate preliminary packet info */
1441 memset(packet
, 0, sizeof(*packet
));
1442 xgbe_packet_info(pdata
, ring
, skb
, packet
);
1444 /* Check that there are enough descriptors available */
1445 ret
= xgbe_maybe_stop_tx_queue(channel
, ring
, packet
->rdesc_count
);
1447 goto tx_netdev_return
;
1449 ret
= xgbe_prep_tso(skb
, packet
);
1451 netif_err(pdata
, tx_err
, netdev
,
1452 "error processing TSO packet\n");
1453 dev_kfree_skb_any(skb
);
1454 goto tx_netdev_return
;
1456 xgbe_prep_vlan(skb
, packet
);
1458 if (!desc_if
->map_tx_skb(channel
, skb
)) {
1459 dev_kfree_skb_any(skb
);
1460 goto tx_netdev_return
;
1463 xgbe_prep_tx_tstamp(pdata
, skb
, packet
);
1465 /* Report on the actual number of bytes (to be) sent */
1466 netdev_tx_sent_queue(txq
, packet
->tx_bytes
);
1468 /* Configure required descriptor fields for transmission */
1469 hw_if
->dev_xmit(channel
);
1471 if (netif_msg_pktdata(pdata
))
1472 xgbe_print_pkt(netdev
, skb
, true);
1474 /* Stop the queue in advance if there may not be enough descriptors */
1475 xgbe_maybe_stop_tx_queue(channel
, ring
, XGBE_TX_MAX_DESCS
);
1483 static void xgbe_set_rx_mode(struct net_device
*netdev
)
1485 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1486 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1488 DBGPR("-->xgbe_set_rx_mode\n");
1490 hw_if
->config_rx_mode(pdata
);
1492 DBGPR("<--xgbe_set_rx_mode\n");
1495 static int xgbe_set_mac_address(struct net_device
*netdev
, void *addr
)
1497 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1498 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1499 struct sockaddr
*saddr
= addr
;
1501 DBGPR("-->xgbe_set_mac_address\n");
1503 if (!is_valid_ether_addr(saddr
->sa_data
))
1504 return -EADDRNOTAVAIL
;
1506 memcpy(netdev
->dev_addr
, saddr
->sa_data
, netdev
->addr_len
);
1508 hw_if
->set_mac_address(pdata
, netdev
->dev_addr
);
1510 DBGPR("<--xgbe_set_mac_address\n");
1515 static int xgbe_ioctl(struct net_device
*netdev
, struct ifreq
*ifreq
, int cmd
)
1517 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1522 ret
= xgbe_get_hwtstamp_settings(pdata
, ifreq
);
1526 ret
= xgbe_set_hwtstamp_settings(pdata
, ifreq
);
1536 static int xgbe_change_mtu(struct net_device
*netdev
, int mtu
)
1538 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1541 DBGPR("-->xgbe_change_mtu\n");
1543 ret
= xgbe_calc_rx_buf_size(netdev
, mtu
);
1547 pdata
->rx_buf_size
= ret
;
1550 xgbe_restart_dev(pdata
);
1552 DBGPR("<--xgbe_change_mtu\n");
1557 static void xgbe_tx_timeout(struct net_device
*netdev
)
1559 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1561 netdev_warn(netdev
, "tx timeout, device restarting\n");
1562 schedule_work(&pdata
->restart_work
);
1565 static struct rtnl_link_stats64
*xgbe_get_stats64(struct net_device
*netdev
,
1566 struct rtnl_link_stats64
*s
)
1568 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1569 struct xgbe_mmc_stats
*pstats
= &pdata
->mmc_stats
;
1571 DBGPR("-->%s\n", __func__
);
1573 pdata
->hw_if
.read_mmc_stats(pdata
);
1575 s
->rx_packets
= pstats
->rxframecount_gb
;
1576 s
->rx_bytes
= pstats
->rxoctetcount_gb
;
1577 s
->rx_errors
= pstats
->rxframecount_gb
-
1578 pstats
->rxbroadcastframes_g
-
1579 pstats
->rxmulticastframes_g
-
1580 pstats
->rxunicastframes_g
;
1581 s
->multicast
= pstats
->rxmulticastframes_g
;
1582 s
->rx_length_errors
= pstats
->rxlengtherror
;
1583 s
->rx_crc_errors
= pstats
->rxcrcerror
;
1584 s
->rx_fifo_errors
= pstats
->rxfifooverflow
;
1586 s
->tx_packets
= pstats
->txframecount_gb
;
1587 s
->tx_bytes
= pstats
->txoctetcount_gb
;
1588 s
->tx_errors
= pstats
->txframecount_gb
- pstats
->txframecount_g
;
1589 s
->tx_dropped
= netdev
->stats
.tx_dropped
;
1591 DBGPR("<--%s\n", __func__
);
1596 static int xgbe_vlan_rx_add_vid(struct net_device
*netdev
, __be16 proto
,
1599 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1600 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1602 DBGPR("-->%s\n", __func__
);
1604 set_bit(vid
, pdata
->active_vlans
);
1605 hw_if
->update_vlan_hash_table(pdata
);
1607 DBGPR("<--%s\n", __func__
);
1612 static int xgbe_vlan_rx_kill_vid(struct net_device
*netdev
, __be16 proto
,
1615 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1616 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1618 DBGPR("-->%s\n", __func__
);
1620 clear_bit(vid
, pdata
->active_vlans
);
1621 hw_if
->update_vlan_hash_table(pdata
);
1623 DBGPR("<--%s\n", __func__
);
1628 #ifdef CONFIG_NET_POLL_CONTROLLER
1629 static void xgbe_poll_controller(struct net_device
*netdev
)
1631 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1632 struct xgbe_channel
*channel
;
1635 DBGPR("-->xgbe_poll_controller\n");
1637 if (pdata
->per_channel_irq
) {
1638 channel
= pdata
->channel
;
1639 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++)
1640 xgbe_dma_isr(channel
->dma_irq
, channel
);
1642 disable_irq(pdata
->dev_irq
);
1643 xgbe_isr(pdata
->dev_irq
, pdata
);
1644 enable_irq(pdata
->dev_irq
);
1647 DBGPR("<--xgbe_poll_controller\n");
1649 #endif /* End CONFIG_NET_POLL_CONTROLLER */
1651 static int xgbe_setup_tc(struct net_device
*netdev
, u32 handle
, __be16 proto
,
1652 struct tc_to_netdev
*tc_to_netdev
)
1654 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1657 if (tc_to_netdev
->type
!= TC_SETUP_MQPRIO
)
1660 tc
= tc_to_netdev
->tc
;
1662 if (tc
> pdata
->hw_feat
.tc_cnt
)
1665 pdata
->num_tcs
= tc
;
1666 pdata
->hw_if
.config_tc(pdata
);
1671 static int xgbe_set_features(struct net_device
*netdev
,
1672 netdev_features_t features
)
1674 struct xgbe_prv_data
*pdata
= netdev_priv(netdev
);
1675 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1676 netdev_features_t rxhash
, rxcsum
, rxvlan
, rxvlan_filter
;
1679 rxhash
= pdata
->netdev_features
& NETIF_F_RXHASH
;
1680 rxcsum
= pdata
->netdev_features
& NETIF_F_RXCSUM
;
1681 rxvlan
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_RX
;
1682 rxvlan_filter
= pdata
->netdev_features
& NETIF_F_HW_VLAN_CTAG_FILTER
;
1684 if ((features
& NETIF_F_RXHASH
) && !rxhash
)
1685 ret
= hw_if
->enable_rss(pdata
);
1686 else if (!(features
& NETIF_F_RXHASH
) && rxhash
)
1687 ret
= hw_if
->disable_rss(pdata
);
1691 if ((features
& NETIF_F_RXCSUM
) && !rxcsum
)
1692 hw_if
->enable_rx_csum(pdata
);
1693 else if (!(features
& NETIF_F_RXCSUM
) && rxcsum
)
1694 hw_if
->disable_rx_csum(pdata
);
1696 if ((features
& NETIF_F_HW_VLAN_CTAG_RX
) && !rxvlan
)
1697 hw_if
->enable_rx_vlan_stripping(pdata
);
1698 else if (!(features
& NETIF_F_HW_VLAN_CTAG_RX
) && rxvlan
)
1699 hw_if
->disable_rx_vlan_stripping(pdata
);
1701 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && !rxvlan_filter
)
1702 hw_if
->enable_rx_vlan_filtering(pdata
);
1703 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) && rxvlan_filter
)
1704 hw_if
->disable_rx_vlan_filtering(pdata
);
1706 pdata
->netdev_features
= features
;
1708 DBGPR("<--xgbe_set_features\n");
1713 static const struct net_device_ops xgbe_netdev_ops
= {
1714 .ndo_open
= xgbe_open
,
1715 .ndo_stop
= xgbe_close
,
1716 .ndo_start_xmit
= xgbe_xmit
,
1717 .ndo_set_rx_mode
= xgbe_set_rx_mode
,
1718 .ndo_set_mac_address
= xgbe_set_mac_address
,
1719 .ndo_validate_addr
= eth_validate_addr
,
1720 .ndo_do_ioctl
= xgbe_ioctl
,
1721 .ndo_change_mtu
= xgbe_change_mtu
,
1722 .ndo_tx_timeout
= xgbe_tx_timeout
,
1723 .ndo_get_stats64
= xgbe_get_stats64
,
1724 .ndo_vlan_rx_add_vid
= xgbe_vlan_rx_add_vid
,
1725 .ndo_vlan_rx_kill_vid
= xgbe_vlan_rx_kill_vid
,
1726 #ifdef CONFIG_NET_POLL_CONTROLLER
1727 .ndo_poll_controller
= xgbe_poll_controller
,
1729 .ndo_setup_tc
= xgbe_setup_tc
,
1730 .ndo_set_features
= xgbe_set_features
,
1733 const struct net_device_ops
*xgbe_get_netdev_ops(void)
1735 return &xgbe_netdev_ops
;
1738 static void xgbe_rx_refresh(struct xgbe_channel
*channel
)
1740 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1741 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1742 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1743 struct xgbe_ring
*ring
= channel
->rx_ring
;
1744 struct xgbe_ring_data
*rdata
;
1746 while (ring
->dirty
!= ring
->cur
) {
1747 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
1749 /* Reset rdata values */
1750 desc_if
->unmap_rdata(pdata
, rdata
);
1752 if (desc_if
->map_rx_buffer(pdata
, ring
, rdata
))
1755 hw_if
->rx_desc_reset(pdata
, rdata
, ring
->dirty
);
1760 /* Make sure everything is written before the register write */
1763 /* Update the Rx Tail Pointer Register with address of
1764 * the last cleaned entry */
1765 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
- 1);
1766 XGMAC_DMA_IOWRITE(channel
, DMA_CH_RDTR_LO
,
1767 lower_32_bits(rdata
->rdesc_dma
));
1770 static struct sk_buff
*xgbe_create_skb(struct xgbe_prv_data
*pdata
,
1771 struct napi_struct
*napi
,
1772 struct xgbe_ring_data
*rdata
,
1775 struct sk_buff
*skb
;
1777 unsigned int copy_len
;
1779 skb
= napi_alloc_skb(napi
, rdata
->rx
.hdr
.dma_len
);
1783 /* Start with the header buffer which may contain just the header
1784 * or the header plus data
1786 dma_sync_single_range_for_cpu(pdata
->dev
, rdata
->rx
.hdr
.dma_base
,
1787 rdata
->rx
.hdr
.dma_off
,
1788 rdata
->rx
.hdr
.dma_len
, DMA_FROM_DEVICE
);
1790 packet
= page_address(rdata
->rx
.hdr
.pa
.pages
) +
1791 rdata
->rx
.hdr
.pa
.pages_offset
;
1792 copy_len
= (rdata
->rx
.hdr_len
) ? rdata
->rx
.hdr_len
: len
;
1793 copy_len
= min(rdata
->rx
.hdr
.dma_len
, copy_len
);
1794 skb_copy_to_linear_data(skb
, packet
, copy_len
);
1795 skb_put(skb
, copy_len
);
1799 /* Add the remaining data as a frag */
1800 dma_sync_single_range_for_cpu(pdata
->dev
,
1801 rdata
->rx
.buf
.dma_base
,
1802 rdata
->rx
.buf
.dma_off
,
1803 rdata
->rx
.buf
.dma_len
,
1806 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
1807 rdata
->rx
.buf
.pa
.pages
,
1808 rdata
->rx
.buf
.pa
.pages_offset
,
1809 len
, rdata
->rx
.buf
.dma_len
);
1810 rdata
->rx
.buf
.pa
.pages
= NULL
;
1816 static int xgbe_tx_poll(struct xgbe_channel
*channel
)
1818 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1819 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1820 struct xgbe_desc_if
*desc_if
= &pdata
->desc_if
;
1821 struct xgbe_ring
*ring
= channel
->tx_ring
;
1822 struct xgbe_ring_data
*rdata
;
1823 struct xgbe_ring_desc
*rdesc
;
1824 struct net_device
*netdev
= pdata
->netdev
;
1825 struct netdev_queue
*txq
;
1827 unsigned int tx_packets
= 0, tx_bytes
= 0;
1830 DBGPR("-->xgbe_tx_poll\n");
1832 /* Nothing to do if there isn't a Tx ring for this channel */
1838 /* Be sure we get ring->cur before accessing descriptor data */
1841 txq
= netdev_get_tx_queue(netdev
, channel
->queue_index
);
1843 while ((processed
< XGBE_TX_DESC_MAX_PROC
) &&
1844 (ring
->dirty
!= cur
)) {
1845 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->dirty
);
1846 rdesc
= rdata
->rdesc
;
1848 if (!hw_if
->tx_complete(rdesc
))
1851 /* Make sure descriptor fields are read after reading the OWN
1855 if (netif_msg_tx_done(pdata
))
1856 xgbe_dump_tx_desc(pdata
, ring
, ring
->dirty
, 1, 0);
1858 if (hw_if
->is_last_desc(rdesc
)) {
1859 tx_packets
+= rdata
->tx
.packets
;
1860 tx_bytes
+= rdata
->tx
.bytes
;
1863 /* Free the SKB and reset the descriptor for re-use */
1864 desc_if
->unmap_rdata(pdata
, rdata
);
1865 hw_if
->tx_desc_reset(rdata
);
1874 netdev_tx_completed_queue(txq
, tx_packets
, tx_bytes
);
1876 if ((ring
->tx
.queue_stopped
== 1) &&
1877 (xgbe_tx_avail_desc(ring
) > XGBE_TX_DESC_MIN_FREE
)) {
1878 ring
->tx
.queue_stopped
= 0;
1879 netif_tx_wake_queue(txq
);
1882 DBGPR("<--xgbe_tx_poll: processed=%d\n", processed
);
1887 static int xgbe_rx_poll(struct xgbe_channel
*channel
, int budget
)
1889 struct xgbe_prv_data
*pdata
= channel
->pdata
;
1890 struct xgbe_hw_if
*hw_if
= &pdata
->hw_if
;
1891 struct xgbe_ring
*ring
= channel
->rx_ring
;
1892 struct xgbe_ring_data
*rdata
;
1893 struct xgbe_packet_data
*packet
;
1894 struct net_device
*netdev
= pdata
->netdev
;
1895 struct napi_struct
*napi
;
1896 struct sk_buff
*skb
;
1897 struct skb_shared_hwtstamps
*hwtstamps
;
1898 unsigned int incomplete
, error
, context_next
, context
;
1899 unsigned int len
, rdesc_len
, max_len
;
1900 unsigned int received
= 0;
1901 int packet_count
= 0;
1903 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget
);
1905 /* Nothing to do if there isn't a Rx ring for this channel */
1912 napi
= (pdata
->per_channel_irq
) ? &channel
->napi
: &pdata
->napi
;
1914 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1915 packet
= &ring
->packet_data
;
1916 while (packet_count
< budget
) {
1917 DBGPR(" cur = %d\n", ring
->cur
);
1919 /* First time in loop see if we need to restore state */
1920 if (!received
&& rdata
->state_saved
) {
1921 skb
= rdata
->state
.skb
;
1922 error
= rdata
->state
.error
;
1923 len
= rdata
->state
.len
;
1925 memset(packet
, 0, sizeof(*packet
));
1932 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
1934 if (xgbe_rx_dirty_desc(ring
) > (XGBE_RX_DESC_CNT
>> 3))
1935 xgbe_rx_refresh(channel
);
1937 if (hw_if
->dev_read(channel
))
1943 incomplete
= XGMAC_GET_BITS(packet
->attributes
,
1944 RX_PACKET_ATTRIBUTES
,
1946 context_next
= XGMAC_GET_BITS(packet
->attributes
,
1947 RX_PACKET_ATTRIBUTES
,
1949 context
= XGMAC_GET_BITS(packet
->attributes
,
1950 RX_PACKET_ATTRIBUTES
,
1953 /* Earlier error, just drain the remaining data */
1954 if ((incomplete
|| context_next
) && error
)
1957 if (error
|| packet
->errors
) {
1959 netif_err(pdata
, rx_err
, netdev
,
1960 "error in received packet\n");
1966 /* Length is cumulative, get this descriptor's length */
1967 rdesc_len
= rdata
->rx
.len
- len
;
1970 if (rdesc_len
&& !skb
) {
1971 skb
= xgbe_create_skb(pdata
, napi
, rdata
,
1975 } else if (rdesc_len
) {
1976 dma_sync_single_range_for_cpu(pdata
->dev
,
1977 rdata
->rx
.buf
.dma_base
,
1978 rdata
->rx
.buf
.dma_off
,
1979 rdata
->rx
.buf
.dma_len
,
1982 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
1983 rdata
->rx
.buf
.pa
.pages
,
1984 rdata
->rx
.buf
.pa
.pages_offset
,
1986 rdata
->rx
.buf
.dma_len
);
1987 rdata
->rx
.buf
.pa
.pages
= NULL
;
1991 if (incomplete
|| context_next
)
1997 /* Be sure we don't exceed the configured MTU */
1998 max_len
= netdev
->mtu
+ ETH_HLEN
;
1999 if (!(netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2000 (skb
->protocol
== htons(ETH_P_8021Q
)))
2001 max_len
+= VLAN_HLEN
;
2003 if (skb
->len
> max_len
) {
2004 netif_err(pdata
, rx_err
, netdev
,
2005 "packet length exceeds configured MTU\n");
2010 if (netif_msg_pktdata(pdata
))
2011 xgbe_print_pkt(netdev
, skb
, false);
2013 skb_checksum_none_assert(skb
);
2014 if (XGMAC_GET_BITS(packet
->attributes
,
2015 RX_PACKET_ATTRIBUTES
, CSUM_DONE
))
2016 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2018 if (XGMAC_GET_BITS(packet
->attributes
,
2019 RX_PACKET_ATTRIBUTES
, VLAN_CTAG
))
2020 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
2023 if (XGMAC_GET_BITS(packet
->attributes
,
2024 RX_PACKET_ATTRIBUTES
, RX_TSTAMP
)) {
2027 nsec
= timecounter_cyc2time(&pdata
->tstamp_tc
,
2029 hwtstamps
= skb_hwtstamps(skb
);
2030 hwtstamps
->hwtstamp
= ns_to_ktime(nsec
);
2033 if (XGMAC_GET_BITS(packet
->attributes
,
2034 RX_PACKET_ATTRIBUTES
, RSS_HASH
))
2035 skb_set_hash(skb
, packet
->rss_hash
,
2036 packet
->rss_hash_type
);
2039 skb
->protocol
= eth_type_trans(skb
, netdev
);
2040 skb_record_rx_queue(skb
, channel
->queue_index
);
2042 napi_gro_receive(napi
, skb
);
2048 /* Check if we need to save state before leaving */
2049 if (received
&& (incomplete
|| context_next
)) {
2050 rdata
= XGBE_GET_DESC_DATA(ring
, ring
->cur
);
2051 rdata
->state_saved
= 1;
2052 rdata
->state
.skb
= skb
;
2053 rdata
->state
.len
= len
;
2054 rdata
->state
.error
= error
;
2057 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count
);
2059 return packet_count
;
2062 static int xgbe_one_poll(struct napi_struct
*napi
, int budget
)
2064 struct xgbe_channel
*channel
= container_of(napi
, struct xgbe_channel
,
2066 struct xgbe_prv_data
*pdata
= channel
->pdata
;
2069 DBGPR("-->xgbe_one_poll: budget=%d\n", budget
);
2071 /* Cleanup Tx ring first */
2072 xgbe_tx_poll(channel
);
2074 /* Process Rx ring next */
2075 processed
= xgbe_rx_poll(channel
, budget
);
2077 /* If we processed everything, we are done */
2078 if (processed
< budget
) {
2079 /* Turn off polling */
2080 napi_complete_done(napi
, processed
);
2082 /* Enable Tx and Rx interrupts */
2083 if (pdata
->channel_irq_mode
)
2084 xgbe_enable_rx_tx_int(pdata
, channel
);
2086 enable_irq(channel
->dma_irq
);
2089 DBGPR("<--xgbe_one_poll: received = %d\n", processed
);
2094 static int xgbe_all_poll(struct napi_struct
*napi
, int budget
)
2096 struct xgbe_prv_data
*pdata
= container_of(napi
, struct xgbe_prv_data
,
2098 struct xgbe_channel
*channel
;
2100 int processed
, last_processed
;
2103 DBGPR("-->xgbe_all_poll: budget=%d\n", budget
);
2106 ring_budget
= budget
/ pdata
->rx_ring_count
;
2108 last_processed
= processed
;
2110 channel
= pdata
->channel
;
2111 for (i
= 0; i
< pdata
->channel_count
; i
++, channel
++) {
2112 /* Cleanup Tx ring first */
2113 xgbe_tx_poll(channel
);
2115 /* Process Rx ring next */
2116 if (ring_budget
> (budget
- processed
))
2117 ring_budget
= budget
- processed
;
2118 processed
+= xgbe_rx_poll(channel
, ring_budget
);
2120 } while ((processed
< budget
) && (processed
!= last_processed
));
2122 /* If we processed everything, we are done */
2123 if (processed
< budget
) {
2124 /* Turn off polling */
2125 napi_complete_done(napi
, processed
);
2127 /* Enable Tx and Rx interrupts */
2128 xgbe_enable_rx_tx_ints(pdata
);
2131 DBGPR("<--xgbe_all_poll: received = %d\n", processed
);
2136 void xgbe_dump_tx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2137 unsigned int idx
, unsigned int count
, unsigned int flag
)
2139 struct xgbe_ring_data
*rdata
;
2140 struct xgbe_ring_desc
*rdesc
;
2143 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2144 rdesc
= rdata
->rdesc
;
2145 netdev_dbg(pdata
->netdev
,
2146 "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx
,
2147 (flag
== 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
2148 le32_to_cpu(rdesc
->desc0
),
2149 le32_to_cpu(rdesc
->desc1
),
2150 le32_to_cpu(rdesc
->desc2
),
2151 le32_to_cpu(rdesc
->desc3
));
2156 void xgbe_dump_rx_desc(struct xgbe_prv_data
*pdata
, struct xgbe_ring
*ring
,
2159 struct xgbe_ring_data
*rdata
;
2160 struct xgbe_ring_desc
*rdesc
;
2162 rdata
= XGBE_GET_DESC_DATA(ring
, idx
);
2163 rdesc
= rdata
->rdesc
;
2164 netdev_dbg(pdata
->netdev
,
2165 "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
2166 idx
, le32_to_cpu(rdesc
->desc0
), le32_to_cpu(rdesc
->desc1
),
2167 le32_to_cpu(rdesc
->desc2
), le32_to_cpu(rdesc
->desc3
));
2170 void xgbe_print_pkt(struct net_device
*netdev
, struct sk_buff
*skb
, bool tx_rx
)
2172 struct ethhdr
*eth
= (struct ethhdr
*)skb
->data
;
2173 unsigned char *buf
= skb
->data
;
2174 unsigned char buffer
[128];
2177 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");
2179 netdev_dbg(netdev
, "%s packet of %d bytes\n",
2180 (tx_rx
? "TX" : "RX"), skb
->len
);
2182 netdev_dbg(netdev
, "Dst MAC addr: %pM\n", eth
->h_dest
);
2183 netdev_dbg(netdev
, "Src MAC addr: %pM\n", eth
->h_source
);
2184 netdev_dbg(netdev
, "Protocol: %#06hx\n", ntohs(eth
->h_proto
));
2186 for (i
= 0, j
= 0; i
< skb
->len
;) {
2187 j
+= snprintf(buffer
+ j
, sizeof(buffer
) - j
, "%02hhx",
2190 if ((i
% 32) == 0) {
2191 netdev_dbg(netdev
, " %#06x: %s\n", i
- 32, buffer
);
2193 } else if ((i
% 16) == 0) {
2196 } else if ((i
% 4) == 0) {
2201 netdev_dbg(netdev
, " %#06x: %s\n", i
- (i
% 32), buffer
);
2203 netdev_dbg(netdev
, "\n************** SKB dump ****************\n");