1 /* 10G controller driver for Samsung SoCs
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/clk.h>
16 #include <linux/crc32.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/etherdevice.h>
19 #include <linux/ethtool.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
26 #include <linux/kernel.h>
27 #include <linux/mii.h>
28 #include <linux/module.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/platform_device.h>
33 #include <linux/prefetch.h>
34 #include <linux/skbuff.h>
35 #include <linux/slab.h>
36 #include <linux/tcp.h>
37 #include <linux/sxgbe_platform.h>
39 #include "sxgbe_common.h"
40 #include "sxgbe_desc.h"
41 #include "sxgbe_dma.h"
42 #include "sxgbe_mtl.h"
43 #include "sxgbe_reg.h"
45 #define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46 #define JUMBO_LEN 9000
48 /* Module parameters */
50 #define DMA_TX_SIZE 512
51 #define DMA_RX_SIZE 1024
53 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54 /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55 #define SXGBE_DEFAULT_LPI_TIMER 1000
57 static int debug
= -1;
58 static int eee_timer
= SXGBE_DEFAULT_LPI_TIMER
;
60 module_param(eee_timer
, int, S_IRUGO
| S_IWUSR
);
62 module_param(debug
, int, S_IRUGO
| S_IWUSR
);
63 static const u32 default_msg_level
= (NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
64 NETIF_MSG_LINK
| NETIF_MSG_IFUP
|
65 NETIF_MSG_IFDOWN
| NETIF_MSG_TIMER
);
67 static irqreturn_t
sxgbe_common_interrupt(int irq
, void *dev_id
);
68 static irqreturn_t
sxgbe_tx_interrupt(int irq
, void *dev_id
);
69 static irqreturn_t
sxgbe_rx_interrupt(int irq
, void *dev_id
);
71 #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
73 #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
76 * sxgbe_verify_args - verify the driver parameters.
77 * Description: it verifies if some wrong parameter is passed to the driver.
78 * Note that wrong parameters are replaced with the default values.
80 static void sxgbe_verify_args(void)
82 if (unlikely(eee_timer
< 0))
83 eee_timer
= SXGBE_DEFAULT_LPI_TIMER
;
86 static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data
*priv
)
88 /* Check and enter in LPI mode */
89 if (!priv
->tx_path_in_lpi_mode
)
90 priv
->hw
->mac
->set_eee_mode(priv
->ioaddr
);
93 void sxgbe_disable_eee_mode(struct sxgbe_priv_data
* const priv
)
95 /* Exit and disable EEE in case of we are are in LPI state. */
96 priv
->hw
->mac
->reset_eee_mode(priv
->ioaddr
);
97 del_timer_sync(&priv
->eee_ctrl_timer
);
98 priv
->tx_path_in_lpi_mode
= false;
102 * sxgbe_eee_ctrl_timer
105 * If there is no data transfer and if we are not in LPI state,
106 * then MAC Transmitter can be moved to LPI state.
108 static void sxgbe_eee_ctrl_timer(unsigned long arg
)
110 struct sxgbe_priv_data
*priv
= (struct sxgbe_priv_data
*)arg
;
112 sxgbe_enable_eee_mode(priv
);
113 mod_timer(&priv
->eee_ctrl_timer
, SXGBE_LPI_TIMER(eee_timer
));
118 * @priv: private device pointer
120 * If the EEE support has been enabled while configuring the driver,
121 * if the GMAC actually supports the EEE (from the HW cap reg) and the
122 * phy can also manage EEE, so enable the LPI state and start the timer
123 * to verify if the tx path can enter in LPI state.
125 bool sxgbe_eee_init(struct sxgbe_priv_data
* const priv
)
129 /* MAC core supports the EEE feature. */
130 if (priv
->hw_cap
.eee
) {
131 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv
->phydev
, 1))
135 priv
->eee_active
= 1;
136 init_timer(&priv
->eee_ctrl_timer
);
137 priv
->eee_ctrl_timer
.function
= sxgbe_eee_ctrl_timer
;
138 priv
->eee_ctrl_timer
.data
= (unsigned long)priv
;
139 priv
->eee_ctrl_timer
.expires
= SXGBE_LPI_TIMER(eee_timer
);
140 add_timer(&priv
->eee_ctrl_timer
);
142 priv
->hw
->mac
->set_eee_timer(priv
->ioaddr
,
143 SXGBE_DEFAULT_LPI_TIMER
,
146 pr_info("Energy-Efficient Ethernet initialized\n");
154 static void sxgbe_eee_adjust(const struct sxgbe_priv_data
*priv
)
156 /* When the EEE has been already initialised we have to
157 * modify the PLS bit in the LPI ctrl & status reg according
158 * to the PHY link status. For this reason.
160 if (priv
->eee_enabled
)
161 priv
->hw
->mac
->set_eee_pls(priv
->ioaddr
, priv
->phydev
->link
);
165 * sxgbe_clk_csr_set - dynamically set the MDC clock
166 * @priv: driver private structure
167 * Description: this is to dynamically set the MDC clock according to the csr
170 static void sxgbe_clk_csr_set(struct sxgbe_priv_data
*priv
)
172 u32 clk_rate
= clk_get_rate(priv
->sxgbe_clk
);
174 /* assign the proper divider, this will be used during
177 if (clk_rate
< SXGBE_CSR_F_150M
)
178 priv
->clk_csr
= SXGBE_CSR_100_150M
;
179 else if (clk_rate
<= SXGBE_CSR_F_250M
)
180 priv
->clk_csr
= SXGBE_CSR_150_250M
;
181 else if (clk_rate
<= SXGBE_CSR_F_300M
)
182 priv
->clk_csr
= SXGBE_CSR_250_300M
;
183 else if (clk_rate
<= SXGBE_CSR_F_350M
)
184 priv
->clk_csr
= SXGBE_CSR_300_350M
;
185 else if (clk_rate
<= SXGBE_CSR_F_400M
)
186 priv
->clk_csr
= SXGBE_CSR_350_400M
;
187 else if (clk_rate
<= SXGBE_CSR_F_500M
)
188 priv
->clk_csr
= SXGBE_CSR_400_500M
;
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
194 static inline u32
sxgbe_tx_avail(struct sxgbe_tx_queue
*queue
, int tx_qsize
)
196 return queue
->dirty_tx
+ tx_qsize
- queue
->cur_tx
- 1;
201 * @dev: net device structure
202 * Description: it adjusts the link parameters.
204 static void sxgbe_adjust_link(struct net_device
*dev
)
206 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
207 struct phy_device
*phydev
= priv
->phydev
;
214 /* SXGBE is not supporting auto-negotiation and
215 * half duplex mode. so, not handling duplex change
216 * in this function. only handling speed and link status
219 if (phydev
->speed
!= priv
->speed
) {
221 switch (phydev
->speed
) {
223 speed
= SXGBE_SPEED_10G
;
226 speed
= SXGBE_SPEED_2_5G
;
229 speed
= SXGBE_SPEED_1G
;
232 netif_err(priv
, link
, dev
,
233 "Speed (%d) not supported\n",
237 priv
->speed
= phydev
->speed
;
238 priv
->hw
->mac
->set_speed(priv
->ioaddr
, speed
);
241 if (!priv
->oldlink
) {
245 } else if (priv
->oldlink
) {
248 priv
->speed
= SPEED_UNKNOWN
;
251 if (new_state
& netif_msg_link(priv
))
252 phy_print_status(phydev
);
254 /* Alter the MAC settings for EEE */
255 sxgbe_eee_adjust(priv
);
259 * sxgbe_init_phy - PHY initialization
260 * @dev: net device structure
261 * Description: it initializes the driver's PHY state, and attaches the PHY
266 static int sxgbe_init_phy(struct net_device
*ndev
)
268 char phy_id_fmt
[MII_BUS_ID_SIZE
+ 3];
269 char bus_id
[MII_BUS_ID_SIZE
];
270 struct phy_device
*phydev
;
271 struct sxgbe_priv_data
*priv
= netdev_priv(ndev
);
272 int phy_iface
= priv
->plat
->interface
;
274 /* assign default link status */
276 priv
->speed
= SPEED_UNKNOWN
;
277 priv
->oldduplex
= DUPLEX_UNKNOWN
;
279 if (priv
->plat
->phy_bus_name
)
280 snprintf(bus_id
, MII_BUS_ID_SIZE
, "%s-%x",
281 priv
->plat
->phy_bus_name
, priv
->plat
->bus_id
);
283 snprintf(bus_id
, MII_BUS_ID_SIZE
, "sxgbe-%x",
286 snprintf(phy_id_fmt
, MII_BUS_ID_SIZE
+ 3, PHY_ID_FMT
, bus_id
,
287 priv
->plat
->phy_addr
);
288 netdev_dbg(ndev
, "%s: trying to attach to %s\n", __func__
, phy_id_fmt
);
290 phydev
= phy_connect(ndev
, phy_id_fmt
, &sxgbe_adjust_link
, phy_iface
);
292 if (IS_ERR(phydev
)) {
293 netdev_err(ndev
, "Could not attach to PHY\n");
294 return PTR_ERR(phydev
);
297 /* Stop Advertising 1000BASE Capability if interface is not GMII */
298 if ((phy_iface
== PHY_INTERFACE_MODE_MII
) ||
299 (phy_iface
== PHY_INTERFACE_MODE_RMII
))
300 phydev
->advertising
&= ~(SUPPORTED_1000baseT_Half
|
301 SUPPORTED_1000baseT_Full
);
302 if (phydev
->phy_id
== 0) {
303 phy_disconnect(phydev
);
307 netdev_dbg(ndev
, "%s: attached to PHY (UID 0x%x) Link = %d\n",
308 __func__
, phydev
->phy_id
, phydev
->link
);
310 /* save phy device in private structure */
311 priv
->phydev
= phydev
;
317 * sxgbe_clear_descriptors: clear descriptors
318 * @priv: driver private structure
319 * Description: this function is called to clear the tx and rx descriptors
320 * in case of both basic and extended descriptors are used.
322 static void sxgbe_clear_descriptors(struct sxgbe_priv_data
*priv
)
325 unsigned int txsize
= priv
->dma_tx_size
;
326 unsigned int rxsize
= priv
->dma_rx_size
;
328 /* Clear the Rx/Tx descriptors */
329 for (j
= 0; j
< SXGBE_RX_QUEUES
; j
++) {
330 for (i
= 0; i
< rxsize
; i
++)
331 priv
->hw
->desc
->init_rx_desc(&priv
->rxq
[j
]->dma_rx
[i
],
332 priv
->use_riwt
, priv
->mode
,
336 for (j
= 0; j
< SXGBE_TX_QUEUES
; j
++) {
337 for (i
= 0; i
< txsize
; i
++)
338 priv
->hw
->desc
->init_tx_desc(&priv
->txq
[j
]->dma_tx
[i
]);
342 static int sxgbe_init_rx_buffers(struct net_device
*dev
,
343 struct sxgbe_rx_norm_desc
*p
, int i
,
344 unsigned int dma_buf_sz
,
345 struct sxgbe_rx_queue
*rx_ring
)
347 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
350 skb
= __netdev_alloc_skb_ip_align(dev
, dma_buf_sz
, GFP_KERNEL
);
354 rx_ring
->rx_skbuff
[i
] = skb
;
355 rx_ring
->rx_skbuff_dma
[i
] = dma_map_single(priv
->device
, skb
->data
,
356 dma_buf_sz
, DMA_FROM_DEVICE
);
358 if (dma_mapping_error(priv
->device
, rx_ring
->rx_skbuff_dma
[i
])) {
359 netdev_err(dev
, "%s: DMA mapping error\n", __func__
);
360 dev_kfree_skb_any(skb
);
364 p
->rdes23
.rx_rd_des23
.buf2_addr
= rx_ring
->rx_skbuff_dma
[i
];
369 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure
371 * @tx_ring: ring to be intialised
372 * @tx_rsize: ring size
373 * Description: this function initializes the DMA TX descriptor
375 static int init_tx_ring(struct device
*dev
, u8 queue_no
,
376 struct sxgbe_tx_queue
*tx_ring
, int tx_rsize
)
378 /* TX ring is not allcoated */
380 dev_err(dev
, "No memory for TX queue of SXGBE\n");
384 /* allocate memory for TX descriptors */
385 tx_ring
->dma_tx
= dma_zalloc_coherent(dev
,
386 tx_rsize
* sizeof(struct sxgbe_tx_norm_desc
),
387 &tx_ring
->dma_tx_phy
, GFP_KERNEL
);
388 if (!tx_ring
->dma_tx
)
391 /* allocate memory for TX skbuff array */
392 tx_ring
->tx_skbuff_dma
= devm_kcalloc(dev
, tx_rsize
,
393 sizeof(dma_addr_t
), GFP_KERNEL
);
394 if (!tx_ring
->tx_skbuff_dma
)
397 tx_ring
->tx_skbuff
= devm_kcalloc(dev
, tx_rsize
,
398 sizeof(struct sk_buff
*), GFP_KERNEL
);
400 if (!tx_ring
->tx_skbuff
)
403 /* assign queue number */
404 tx_ring
->queue_no
= queue_no
;
406 /* initalise counters */
407 tx_ring
->dirty_tx
= 0;
410 /* initalise TX queue lock */
411 spin_lock_init(&tx_ring
->tx_lock
);
416 dma_free_coherent(dev
, tx_rsize
* sizeof(struct sxgbe_tx_norm_desc
),
417 tx_ring
->dma_tx
, tx_ring
->dma_tx_phy
);
422 * free_rx_ring - free the RX descriptor ring
423 * @dev: net device structure
424 * @rx_ring: ring to be intialised
425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor
428 void free_rx_ring(struct device
*dev
, struct sxgbe_rx_queue
*rx_ring
,
431 dma_free_coherent(dev
, rx_rsize
* sizeof(struct sxgbe_rx_norm_desc
),
432 rx_ring
->dma_rx
, rx_ring
->dma_rx_phy
);
433 kfree(rx_ring
->rx_skbuff_dma
);
434 kfree(rx_ring
->rx_skbuff
);
438 * init_rx_ring - init the RX descriptor ring
439 * @dev: net device structure
440 * @rx_ring: ring to be intialised
441 * @rx_rsize: ring size
442 * Description: this function initializes the DMA RX descriptor
444 static int init_rx_ring(struct net_device
*dev
, u8 queue_no
,
445 struct sxgbe_rx_queue
*rx_ring
, int rx_rsize
)
447 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
449 unsigned int bfsize
= 0;
450 unsigned int ret
= 0;
452 /* Set the max buffer size according to the MTU. */
453 bfsize
= ALIGN(dev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ NET_IP_ALIGN
, 8);
455 netif_dbg(priv
, probe
, dev
, "%s: bfsize %d\n", __func__
, bfsize
);
457 /* RX ring is not allcoated */
458 if (rx_ring
== NULL
) {
459 netdev_err(dev
, "No memory for RX queue\n");
463 /* assign queue number */
464 rx_ring
->queue_no
= queue_no
;
466 /* allocate memory for RX descriptors */
467 rx_ring
->dma_rx
= dma_zalloc_coherent(priv
->device
,
468 rx_rsize
* sizeof(struct sxgbe_rx_norm_desc
),
469 &rx_ring
->dma_rx_phy
, GFP_KERNEL
);
471 if (rx_ring
->dma_rx
== NULL
)
474 /* allocate memory for RX skbuff array */
475 rx_ring
->rx_skbuff_dma
= kmalloc_array(rx_rsize
,
476 sizeof(dma_addr_t
), GFP_KERNEL
);
477 if (rx_ring
->rx_skbuff_dma
== NULL
)
480 rx_ring
->rx_skbuff
= kmalloc_array(rx_rsize
,
481 sizeof(struct sk_buff
*), GFP_KERNEL
);
482 if (rx_ring
->rx_skbuff
== NULL
)
485 /* initialise the buffers */
486 for (desc_index
= 0; desc_index
< rx_rsize
; desc_index
++) {
487 struct sxgbe_rx_norm_desc
*p
;
488 p
= rx_ring
->dma_rx
+ desc_index
;
489 ret
= sxgbe_init_rx_buffers(dev
, p
, desc_index
,
492 goto err_init_rx_buffers
;
495 /* initalise counters */
497 rx_ring
->dirty_rx
= (unsigned int)(desc_index
- rx_rsize
);
498 priv
->dma_buf_sz
= bfsize
;
503 while (--desc_index
>= 0)
504 free_rx_ring(priv
->device
, rx_ring
, desc_index
);
505 kfree(rx_ring
->rx_skbuff
);
507 kfree(rx_ring
->rx_skbuff_dma
);
509 dma_free_coherent(priv
->device
,
510 rx_rsize
* sizeof(struct sxgbe_rx_norm_desc
),
511 rx_ring
->dma_rx
, rx_ring
->dma_rx_phy
);
516 * free_tx_ring - free the TX descriptor ring
517 * @dev: net device structure
518 * @tx_ring: ring to be intialised
519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor
522 void free_tx_ring(struct device
*dev
, struct sxgbe_tx_queue
*tx_ring
,
525 dma_free_coherent(dev
, tx_rsize
* sizeof(struct sxgbe_tx_norm_desc
),
526 tx_ring
->dma_tx
, tx_ring
->dma_tx_phy
);
530 * init_dma_desc_rings - init the RX/TX descriptor rings
531 * @dev: net device structure
532 * Description: this function initializes the DMA RX/TX descriptors
533 * and allocates the socket buffers. It suppors the chained and ring
536 static int init_dma_desc_rings(struct net_device
*netd
)
539 struct sxgbe_priv_data
*priv
= netdev_priv(netd
);
540 int tx_rsize
= priv
->dma_tx_size
;
541 int rx_rsize
= priv
->dma_rx_size
;
543 /* Allocate memory for queue structures and TX descs */
544 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
545 ret
= init_tx_ring(priv
->device
, queue_num
,
546 priv
->txq
[queue_num
], tx_rsize
);
548 dev_err(&netd
->dev
, "TX DMA ring allocation failed!\n");
552 /* save private pointer in each ring this
553 * pointer is needed during cleaing TX queue
555 priv
->txq
[queue_num
]->priv_ptr
= priv
;
558 /* Allocate memory for queue structures and RX descs */
559 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, queue_num
) {
560 ret
= init_rx_ring(netd
, queue_num
,
561 priv
->rxq
[queue_num
], rx_rsize
);
563 netdev_err(netd
, "RX DMA ring allocation failed!!\n");
567 /* save private pointer in each ring this
568 * pointer is needed during cleaing TX queue
570 priv
->rxq
[queue_num
]->priv_ptr
= priv
;
573 sxgbe_clear_descriptors(priv
);
579 free_tx_ring(priv
->device
, priv
->txq
[queue_num
], tx_rsize
);
584 free_rx_ring(priv
->device
, priv
->rxq
[queue_num
], rx_rsize
);
588 static void tx_free_ring_skbufs(struct sxgbe_tx_queue
*txqueue
)
591 struct sxgbe_priv_data
*priv
= txqueue
->priv_ptr
;
592 int tx_rsize
= priv
->dma_tx_size
;
594 for (dma_desc
= 0; dma_desc
< tx_rsize
; dma_desc
++) {
595 struct sxgbe_tx_norm_desc
*tdesc
= txqueue
->dma_tx
+ dma_desc
;
597 if (txqueue
->tx_skbuff_dma
[dma_desc
])
598 dma_unmap_single(priv
->device
,
599 txqueue
->tx_skbuff_dma
[dma_desc
],
600 priv
->hw
->desc
->get_tx_len(tdesc
),
603 dev_kfree_skb_any(txqueue
->tx_skbuff
[dma_desc
]);
604 txqueue
->tx_skbuff
[dma_desc
] = NULL
;
605 txqueue
->tx_skbuff_dma
[dma_desc
] = 0;
610 static void dma_free_tx_skbufs(struct sxgbe_priv_data
*priv
)
614 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
615 struct sxgbe_tx_queue
*tqueue
= priv
->txq
[queue_num
];
616 tx_free_ring_skbufs(tqueue
);
620 static void free_dma_desc_resources(struct sxgbe_priv_data
*priv
)
623 int tx_rsize
= priv
->dma_tx_size
;
624 int rx_rsize
= priv
->dma_rx_size
;
626 /* Release the DMA TX buffers */
627 dma_free_tx_skbufs(priv
);
629 /* Release the TX ring memory also */
630 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
631 free_tx_ring(priv
->device
, priv
->txq
[queue_num
], tx_rsize
);
634 /* Release the RX ring memory also */
635 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, queue_num
) {
636 free_rx_ring(priv
->device
, priv
->rxq
[queue_num
], rx_rsize
);
640 static int txring_mem_alloc(struct sxgbe_priv_data
*priv
)
644 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
645 priv
->txq
[queue_num
] = devm_kmalloc(priv
->device
,
646 sizeof(struct sxgbe_tx_queue
), GFP_KERNEL
);
647 if (!priv
->txq
[queue_num
])
654 static int rxring_mem_alloc(struct sxgbe_priv_data
*priv
)
658 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, queue_num
) {
659 priv
->rxq
[queue_num
] = devm_kmalloc(priv
->device
,
660 sizeof(struct sxgbe_rx_queue
), GFP_KERNEL
);
661 if (!priv
->rxq
[queue_num
])
669 * sxgbe_mtl_operation_mode - HW MTL operation mode
670 * @priv: driver private structure
671 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
672 * or Store-And-Forward capability.
674 static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data
*priv
)
678 /* TX/RX threshold control */
679 if (likely(priv
->plat
->force_sf_dma_mode
)) {
680 /* set TC mode for TX QUEUES */
681 SXGBE_FOR_EACH_QUEUE(priv
->hw_cap
.tx_mtl_queues
, queue_num
)
682 priv
->hw
->mtl
->set_tx_mtl_mode(priv
->ioaddr
, queue_num
,
684 priv
->tx_tc
= SXGBE_MTL_SFMODE
;
686 /* set TC mode for RX QUEUES */
687 SXGBE_FOR_EACH_QUEUE(priv
->hw_cap
.rx_mtl_queues
, queue_num
)
688 priv
->hw
->mtl
->set_rx_mtl_mode(priv
->ioaddr
, queue_num
,
690 priv
->rx_tc
= SXGBE_MTL_SFMODE
;
691 } else if (unlikely(priv
->plat
->force_thresh_dma_mode
)) {
692 /* set TC mode for TX QUEUES */
693 SXGBE_FOR_EACH_QUEUE(priv
->hw_cap
.tx_mtl_queues
, queue_num
)
694 priv
->hw
->mtl
->set_tx_mtl_mode(priv
->ioaddr
, queue_num
,
696 /* set TC mode for RX QUEUES */
697 SXGBE_FOR_EACH_QUEUE(priv
->hw_cap
.rx_mtl_queues
, queue_num
)
698 priv
->hw
->mtl
->set_rx_mtl_mode(priv
->ioaddr
, queue_num
,
701 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__
);
706 * sxgbe_tx_queue_clean:
707 * @priv: driver private structure
708 * Description: it reclaims resources after transmission completes.
710 static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue
*tqueue
)
712 struct sxgbe_priv_data
*priv
= tqueue
->priv_ptr
;
713 unsigned int tx_rsize
= priv
->dma_tx_size
;
714 struct netdev_queue
*dev_txq
;
715 u8 queue_no
= tqueue
->queue_no
;
717 dev_txq
= netdev_get_tx_queue(priv
->dev
, queue_no
);
719 spin_lock(&tqueue
->tx_lock
);
721 priv
->xstats
.tx_clean
++;
722 while (tqueue
->dirty_tx
!= tqueue
->cur_tx
) {
723 unsigned int entry
= tqueue
->dirty_tx
% tx_rsize
;
724 struct sk_buff
*skb
= tqueue
->tx_skbuff
[entry
];
725 struct sxgbe_tx_norm_desc
*p
;
727 p
= tqueue
->dma_tx
+ entry
;
729 /* Check if the descriptor is owned by the DMA. */
730 if (priv
->hw
->desc
->get_tx_owner(p
))
733 if (netif_msg_tx_done(priv
))
734 pr_debug("%s: curr %d, dirty %d\n",
735 __func__
, tqueue
->cur_tx
, tqueue
->dirty_tx
);
737 if (likely(tqueue
->tx_skbuff_dma
[entry
])) {
738 dma_unmap_single(priv
->device
,
739 tqueue
->tx_skbuff_dma
[entry
],
740 priv
->hw
->desc
->get_tx_len(p
),
742 tqueue
->tx_skbuff_dma
[entry
] = 0;
747 tqueue
->tx_skbuff
[entry
] = NULL
;
750 priv
->hw
->desc
->release_tx_desc(p
);
756 if (unlikely(netif_tx_queue_stopped(dev_txq
) &&
757 sxgbe_tx_avail(tqueue
, tx_rsize
) > SXGBE_TX_THRESH(priv
))) {
758 netif_tx_lock(priv
->dev
);
759 if (netif_tx_queue_stopped(dev_txq
) &&
760 sxgbe_tx_avail(tqueue
, tx_rsize
) > SXGBE_TX_THRESH(priv
)) {
761 if (netif_msg_tx_done(priv
))
762 pr_debug("%s: restart transmit\n", __func__
);
763 netif_tx_wake_queue(dev_txq
);
765 netif_tx_unlock(priv
->dev
);
768 spin_unlock(&tqueue
->tx_lock
);
773 * @priv: driver private structure
774 * Description: it reclaims resources after transmission completes.
776 static void sxgbe_tx_all_clean(struct sxgbe_priv_data
* const priv
)
780 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
781 struct sxgbe_tx_queue
*tqueue
= priv
->txq
[queue_num
];
783 sxgbe_tx_queue_clean(tqueue
);
786 if ((priv
->eee_enabled
) && (!priv
->tx_path_in_lpi_mode
)) {
787 sxgbe_enable_eee_mode(priv
);
788 mod_timer(&priv
->eee_ctrl_timer
, SXGBE_LPI_TIMER(eee_timer
));
793 * sxgbe_restart_tx_queue: irq tx error mng function
794 * @priv: driver private structure
795 * Description: it cleans the descriptors and restarts the transmission
798 static void sxgbe_restart_tx_queue(struct sxgbe_priv_data
*priv
, int queue_num
)
800 struct sxgbe_tx_queue
*tx_ring
= priv
->txq
[queue_num
];
801 struct netdev_queue
*dev_txq
= netdev_get_tx_queue(priv
->dev
,
805 netif_tx_stop_queue(dev_txq
);
807 /* stop the tx dma */
808 priv
->hw
->dma
->stop_tx_queue(priv
->ioaddr
, queue_num
);
810 /* free the skbuffs of the ring */
811 tx_free_ring_skbufs(tx_ring
);
813 /* initalise counters */
815 tx_ring
->dirty_tx
= 0;
817 /* start the tx dma */
818 priv
->hw
->dma
->start_tx_queue(priv
->ioaddr
, queue_num
);
820 priv
->dev
->stats
.tx_errors
++;
822 /* wakeup the queue */
823 netif_tx_wake_queue(dev_txq
);
827 * sxgbe_reset_all_tx_queues: irq tx error mng function
828 * @priv: driver private structure
829 * Description: it cleans all the descriptors and
830 * restarts the transmission on all queues in case of errors.
832 static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data
*priv
)
836 /* On TX timeout of net device, resetting of all queues
837 * may not be proper way, revisit this later if needed
839 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
)
840 sxgbe_restart_tx_queue(priv
, queue_num
);
844 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
845 * @priv: driver private structure
847 * new GMAC chip generations have a new register to indicate the
848 * presence of the optional feature/functions.
849 * This can be also used to override the value passed through the
850 * platform and necessary for old MAC10/100 and GMAC chips.
852 static int sxgbe_get_hw_features(struct sxgbe_priv_data
* const priv
)
855 struct sxgbe_hw_features
*features
= &priv
->hw_cap
;
857 /* Read First Capability Register CAP[0] */
858 rval
= priv
->hw
->mac
->get_hw_feature(priv
->ioaddr
, 0);
860 features
->pmt_remote_wake_up
=
861 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval
);
862 features
->pmt_magic_frame
= SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval
);
863 features
->atime_stamp
= SXGBE_HW_FEAT_IEEE1500_2008(rval
);
864 features
->tx_csum_offload
=
865 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval
);
866 features
->rx_csum_offload
=
867 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval
);
868 features
->multi_macaddr
= SXGBE_HW_FEAT_MACADDR_COUNT(rval
);
869 features
->tstamp_srcselect
= SXGBE_HW_FEAT_TSTMAP_SRC(rval
);
870 features
->sa_vlan_insert
= SXGBE_HW_FEAT_SRCADDR_VLAN(rval
);
871 features
->eee
= SXGBE_HW_FEAT_EEE(rval
);
874 /* Read First Capability Register CAP[1] */
875 rval
= priv
->hw
->mac
->get_hw_feature(priv
->ioaddr
, 1);
877 features
->rxfifo_size
= SXGBE_HW_FEAT_RX_FIFO_SIZE(rval
);
878 features
->txfifo_size
= SXGBE_HW_FEAT_TX_FIFO_SIZE(rval
);
879 features
->atstmap_hword
= SXGBE_HW_FEAT_TX_FIFO_SIZE(rval
);
880 features
->dcb_enable
= SXGBE_HW_FEAT_DCB(rval
);
881 features
->splithead_enable
= SXGBE_HW_FEAT_SPLIT_HDR(rval
);
882 features
->tcpseg_offload
= SXGBE_HW_FEAT_TSO(rval
);
883 features
->debug_mem
= SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval
);
884 features
->rss_enable
= SXGBE_HW_FEAT_RSS(rval
);
885 features
->hash_tsize
= SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval
);
886 features
->l3l4_filer_size
= SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval
);
889 /* Read First Capability Register CAP[2] */
890 rval
= priv
->hw
->mac
->get_hw_feature(priv
->ioaddr
, 2);
892 features
->rx_mtl_queues
= SXGBE_HW_FEAT_RX_MTL_QUEUES(rval
);
893 features
->tx_mtl_queues
= SXGBE_HW_FEAT_TX_MTL_QUEUES(rval
);
894 features
->rx_dma_channels
= SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval
);
895 features
->tx_dma_channels
= SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval
);
896 features
->pps_output_count
= SXGBE_HW_FEAT_PPS_OUTPUTS(rval
);
897 features
->aux_input_count
= SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval
);
904 * sxgbe_check_ether_addr: check if the MAC addr is valid
905 * @priv: driver private structure
907 * it is to verify if the MAC address is valid, in case of failures it
908 * generates a random MAC address
910 static void sxgbe_check_ether_addr(struct sxgbe_priv_data
*priv
)
912 if (!is_valid_ether_addr(priv
->dev
->dev_addr
)) {
913 priv
->hw
->mac
->get_umac_addr((void __iomem
*)
915 priv
->dev
->dev_addr
, 0);
916 if (!is_valid_ether_addr(priv
->dev
->dev_addr
))
917 eth_hw_addr_random(priv
->dev
);
919 dev_info(priv
->device
, "device MAC address %pM\n",
920 priv
->dev
->dev_addr
);
924 * sxgbe_init_dma_engine: DMA init.
925 * @priv: driver private structure
927 * It inits the DMA invoking the specific SXGBE callback.
928 * Some DMA parameters can be passed from the platform;
929 * in case of these are not passed a default is kept for the MAC or GMAC.
931 static int sxgbe_init_dma_engine(struct sxgbe_priv_data
*priv
)
933 int pbl
= DEFAULT_DMA_PBL
, fixed_burst
= 0, burst_map
= 0;
936 if (priv
->plat
->dma_cfg
) {
937 pbl
= priv
->plat
->dma_cfg
->pbl
;
938 fixed_burst
= priv
->plat
->dma_cfg
->fixed_burst
;
939 burst_map
= priv
->plat
->dma_cfg
->burst_map
;
942 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
)
943 priv
->hw
->dma
->cha_init(priv
->ioaddr
, queue_num
,
945 (priv
->txq
[queue_num
])->dma_tx_phy
,
946 (priv
->rxq
[queue_num
])->dma_rx_phy
,
947 priv
->dma_tx_size
, priv
->dma_rx_size
);
949 return priv
->hw
->dma
->init(priv
->ioaddr
, fixed_burst
, burst_map
);
953 * sxgbe_init_mtl_engine: MTL init.
954 * @priv: driver private structure
956 * It inits the MTL invoking the specific SXGBE callback.
958 static void sxgbe_init_mtl_engine(struct sxgbe_priv_data
*priv
)
962 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
963 priv
->hw
->mtl
->mtl_set_txfifosize(priv
->ioaddr
, queue_num
,
964 priv
->hw_cap
.tx_mtl_qsize
);
965 priv
->hw
->mtl
->mtl_enable_txqueue(priv
->ioaddr
, queue_num
);
970 * sxgbe_disable_mtl_engine: MTL disable.
971 * @priv: driver private structure
973 * It disables the MTL queues by invoking the specific SXGBE callback.
975 static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data
*priv
)
979 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
)
980 priv
->hw
->mtl
->mtl_disable_txqueue(priv
->ioaddr
, queue_num
);
985 * sxgbe_tx_timer: mitigation sw timer for tx.
986 * @data: data pointer
988 * This is the timer handler to directly invoke the sxgbe_tx_clean.
990 static void sxgbe_tx_timer(unsigned long data
)
992 struct sxgbe_tx_queue
*p
= (struct sxgbe_tx_queue
*)data
;
993 sxgbe_tx_queue_clean(p
);
997 * sxgbe_init_tx_coalesce: init tx mitigation options.
998 * @priv: driver private structure
1000 * This inits the transmit coalesce parameters: i.e. timer rate,
1001 * timer handler and default threshold used for enabling the
1002 * interrupt on completion bit.
1004 static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data
*priv
)
1008 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
1009 struct sxgbe_tx_queue
*p
= priv
->txq
[queue_num
];
1010 p
->tx_coal_frames
= SXGBE_TX_FRAMES
;
1011 p
->tx_coal_timer
= SXGBE_COAL_TX_TIMER
;
1012 init_timer(&p
->txtimer
);
1013 p
->txtimer
.expires
= SXGBE_COAL_TIMER(p
->tx_coal_timer
);
1014 p
->txtimer
.data
= (unsigned long)&priv
->txq
[queue_num
];
1015 p
->txtimer
.function
= sxgbe_tx_timer
;
1016 add_timer(&p
->txtimer
);
1020 static void sxgbe_tx_del_timer(struct sxgbe_priv_data
*priv
)
1024 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
1025 struct sxgbe_tx_queue
*p
= priv
->txq
[queue_num
];
1026 del_timer_sync(&p
->txtimer
);
1031 * sxgbe_open - open entry point of the driver
1032 * @dev : pointer to the device structure.
1034 * This function is the open entry point of the driver.
1036 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1039 static int sxgbe_open(struct net_device
*dev
)
1041 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1044 clk_prepare_enable(priv
->sxgbe_clk
);
1046 sxgbe_check_ether_addr(priv
);
1049 ret
= sxgbe_init_phy(dev
);
1051 netdev_err(dev
, "%s: Cannot attach to PHY (error: %d)\n",
1056 /* Create and initialize the TX/RX descriptors chains. */
1057 priv
->dma_tx_size
= SXGBE_ALIGN(DMA_TX_SIZE
);
1058 priv
->dma_rx_size
= SXGBE_ALIGN(DMA_RX_SIZE
);
1059 priv
->dma_buf_sz
= SXGBE_ALIGN(DMA_BUFFER_SIZE
);
1060 priv
->tx_tc
= TC_DEFAULT
;
1061 priv
->rx_tc
= TC_DEFAULT
;
1062 init_dma_desc_rings(dev
);
1064 /* DMA initialization and SW reset */
1065 ret
= sxgbe_init_dma_engine(priv
);
1067 netdev_err(dev
, "%s: DMA initialization failed\n", __func__
);
1071 /* MTL initialization */
1072 sxgbe_init_mtl_engine(priv
);
1074 /* Copy the MAC addr into the HW */
1075 priv
->hw
->mac
->set_umac_addr(priv
->ioaddr
, dev
->dev_addr
, 0);
1077 /* Initialize the MAC Core */
1078 priv
->hw
->mac
->core_init(priv
->ioaddr
);
1080 /* Request the IRQ lines */
1081 ret
= devm_request_irq(priv
->device
, priv
->irq
, sxgbe_common_interrupt
,
1082 IRQF_SHARED
, dev
->name
, dev
);
1083 if (unlikely(ret
< 0)) {
1084 netdev_err(dev
, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1085 __func__
, priv
->irq
, ret
);
1089 /* If the LPI irq is different from the mac irq
1090 * register a dedicated handler
1092 if (priv
->lpi_irq
!= dev
->irq
) {
1093 ret
= devm_request_irq(priv
->device
, priv
->lpi_irq
,
1094 sxgbe_common_interrupt
,
1095 IRQF_SHARED
, dev
->name
, dev
);
1096 if (unlikely(ret
< 0)) {
1097 netdev_err(dev
, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1098 __func__
, priv
->lpi_irq
, ret
);
1103 /* Request TX DMA irq lines */
1104 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
1105 ret
= devm_request_irq(priv
->device
,
1106 (priv
->txq
[queue_num
])->irq_no
,
1107 sxgbe_tx_interrupt
, 0,
1108 dev
->name
, priv
->txq
[queue_num
]);
1109 if (unlikely(ret
< 0)) {
1110 netdev_err(dev
, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1111 __func__
, priv
->irq
, ret
);
1116 /* Request RX DMA irq lines */
1117 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES
, queue_num
) {
1118 ret
= devm_request_irq(priv
->device
,
1119 (priv
->rxq
[queue_num
])->irq_no
,
1120 sxgbe_rx_interrupt
, 0,
1121 dev
->name
, priv
->rxq
[queue_num
]);
1122 if (unlikely(ret
< 0)) {
1123 netdev_err(dev
, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1124 __func__
, priv
->irq
, ret
);
1129 /* Enable the MAC Rx/Tx */
1130 priv
->hw
->mac
->enable_tx(priv
->ioaddr
, true);
1131 priv
->hw
->mac
->enable_rx(priv
->ioaddr
, true);
1133 /* Set the HW DMA mode and the COE */
1134 sxgbe_mtl_operation_mode(priv
);
1136 /* Extra statistics */
1137 memset(&priv
->xstats
, 0, sizeof(struct sxgbe_extra_stats
));
1139 priv
->xstats
.tx_threshold
= priv
->tx_tc
;
1140 priv
->xstats
.rx_threshold
= priv
->rx_tc
;
1142 /* Start the ball rolling... */
1143 netdev_dbg(dev
, "DMA RX/TX processes started...\n");
1144 priv
->hw
->dma
->start_tx(priv
->ioaddr
, SXGBE_TX_QUEUES
);
1145 priv
->hw
->dma
->start_rx(priv
->ioaddr
, SXGBE_RX_QUEUES
);
1148 phy_start(priv
->phydev
);
1150 /* initalise TX coalesce parameters */
1151 sxgbe_tx_init_coalesce(priv
);
1153 if ((priv
->use_riwt
) && (priv
->hw
->dma
->rx_watchdog
)) {
1154 priv
->rx_riwt
= SXGBE_MAX_DMA_RIWT
;
1155 priv
->hw
->dma
->rx_watchdog(priv
->ioaddr
, SXGBE_MAX_DMA_RIWT
);
1158 priv
->tx_lpi_timer
= SXGBE_DEFAULT_LPI_TIMER
;
1159 priv
->eee_enabled
= sxgbe_eee_init(priv
);
1161 napi_enable(&priv
->napi
);
1162 netif_start_queue(dev
);
1167 free_dma_desc_resources(priv
);
1169 phy_disconnect(priv
->phydev
);
1171 clk_disable_unprepare(priv
->sxgbe_clk
);
1177 * sxgbe_release - close entry point of the driver
1178 * @dev : device pointer.
1180 * This is the stop entry point of the driver.
1182 static int sxgbe_release(struct net_device
*dev
)
1184 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1186 if (priv
->eee_enabled
)
1187 del_timer_sync(&priv
->eee_ctrl_timer
);
1189 /* Stop and disconnect the PHY */
1191 phy_stop(priv
->phydev
);
1192 phy_disconnect(priv
->phydev
);
1193 priv
->phydev
= NULL
;
1196 netif_tx_stop_all_queues(dev
);
1198 napi_disable(&priv
->napi
);
1200 /* delete TX timers */
1201 sxgbe_tx_del_timer(priv
);
1203 /* Stop TX/RX DMA and clear the descriptors */
1204 priv
->hw
->dma
->stop_tx(priv
->ioaddr
, SXGBE_TX_QUEUES
);
1205 priv
->hw
->dma
->stop_rx(priv
->ioaddr
, SXGBE_RX_QUEUES
);
1207 /* disable MTL queue */
1208 sxgbe_disable_mtl_engine(priv
);
1210 /* Release and free the Rx/Tx resources */
1211 free_dma_desc_resources(priv
);
1213 /* Disable the MAC Rx/Tx */
1214 priv
->hw
->mac
->enable_tx(priv
->ioaddr
, false);
1215 priv
->hw
->mac
->enable_rx(priv
->ioaddr
, false);
1217 clk_disable_unprepare(priv
->sxgbe_clk
);
1222 /* Prepare first Tx descriptor for doing TSO operation */
1223 void sxgbe_tso_prepare(struct sxgbe_priv_data
*priv
,
1224 struct sxgbe_tx_norm_desc
*first_desc
,
1225 struct sk_buff
*skb
)
1227 unsigned int total_hdr_len
, tcp_hdr_len
;
1229 /* Write first Tx descriptor with appropriate value */
1230 tcp_hdr_len
= tcp_hdrlen(skb
);
1231 total_hdr_len
= skb_transport_offset(skb
) + tcp_hdr_len
;
1233 first_desc
->tdes01
= dma_map_single(priv
->device
, skb
->data
,
1234 total_hdr_len
, DMA_TO_DEVICE
);
1235 if (dma_mapping_error(priv
->device
, first_desc
->tdes01
))
1236 pr_err("%s: TX dma mapping failed!!\n", __func__
);
1238 first_desc
->tdes23
.tx_rd_des23
.first_desc
= 1;
1239 priv
->hw
->desc
->tx_desc_enable_tse(first_desc
, 1, total_hdr_len
,
1241 skb
->len
- total_hdr_len
);
1245 * sxgbe_xmit: Tx entry point of the driver
1246 * @skb : the socket buffer
1247 * @dev : device pointer
1248 * Description : this is the tx entry point of the driver.
1249 * It programs the chain or the ring and supports oversized frames
1252 static netdev_tx_t
sxgbe_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1254 unsigned int entry
, frag_num
;
1256 struct netdev_queue
*dev_txq
;
1257 unsigned txq_index
= skb_get_queue_mapping(skb
);
1258 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1259 unsigned int tx_rsize
= priv
->dma_tx_size
;
1260 struct sxgbe_tx_queue
*tqueue
= priv
->txq
[txq_index
];
1261 struct sxgbe_tx_norm_desc
*tx_desc
, *first_desc
;
1262 struct sxgbe_tx_ctxt_desc
*ctxt_desc
= NULL
;
1263 int nr_frags
= skb_shinfo(skb
)->nr_frags
;
1264 int no_pagedlen
= skb_headlen(skb
);
1266 u16 cur_mss
= skb_shinfo(skb
)->gso_size
;
1267 u32 ctxt_desc_req
= 0;
1269 /* get the TX queue handle */
1270 dev_txq
= netdev_get_tx_queue(dev
, txq_index
);
1272 if (unlikely(skb_is_gso(skb
) && tqueue
->prev_mss
!= cur_mss
))
1275 if (unlikely(vlan_tx_tag_present(skb
) ||
1276 ((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1277 tqueue
->hwts_tx_en
)))
1280 /* get the spinlock */
1281 spin_lock(&tqueue
->tx_lock
);
1283 if (priv
->tx_path_in_lpi_mode
)
1284 sxgbe_disable_eee_mode(priv
);
1286 if (unlikely(sxgbe_tx_avail(tqueue
, tx_rsize
) < nr_frags
+ 1)) {
1287 if (!netif_tx_queue_stopped(dev_txq
)) {
1288 netif_tx_stop_queue(dev_txq
);
1289 netdev_err(dev
, "%s: Tx Ring is full when %d queue is awake\n",
1290 __func__
, txq_index
);
1292 /* release the spin lock in case of BUSY */
1293 spin_unlock(&tqueue
->tx_lock
);
1294 return NETDEV_TX_BUSY
;
1297 entry
= tqueue
->cur_tx
% tx_rsize
;
1298 tx_desc
= tqueue
->dma_tx
+ entry
;
1300 first_desc
= tx_desc
;
1302 ctxt_desc
= (struct sxgbe_tx_ctxt_desc
*)first_desc
;
1304 /* save the skb address */
1305 tqueue
->tx_skbuff
[entry
] = skb
;
1308 if (likely(skb_is_gso(skb
))) {
1310 if (unlikely(tqueue
->prev_mss
!= cur_mss
)) {
1311 priv
->hw
->desc
->tx_ctxt_desc_set_mss(
1312 ctxt_desc
, cur_mss
);
1313 priv
->hw
->desc
->tx_ctxt_desc_set_tcmssv(
1315 priv
->hw
->desc
->tx_ctxt_desc_reset_ostc(
1317 priv
->hw
->desc
->tx_ctxt_desc_set_ctxt(
1319 priv
->hw
->desc
->tx_ctxt_desc_set_owner(
1322 entry
= (++tqueue
->cur_tx
) % tx_rsize
;
1323 first_desc
= tqueue
->dma_tx
+ entry
;
1325 tqueue
->prev_mss
= cur_mss
;
1327 sxgbe_tso_prepare(priv
, first_desc
, skb
);
1329 tx_desc
->tdes01
= dma_map_single(priv
->device
,
1330 skb
->data
, no_pagedlen
, DMA_TO_DEVICE
);
1331 if (dma_mapping_error(priv
->device
, tx_desc
->tdes01
))
1332 netdev_err(dev
, "%s: TX dma mapping failed!!\n",
1335 priv
->hw
->desc
->prepare_tx_desc(tx_desc
, 1, no_pagedlen
,
1336 no_pagedlen
, cksum_flag
);
1340 for (frag_num
= 0; frag_num
< nr_frags
; frag_num
++) {
1341 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_num
];
1342 int len
= skb_frag_size(frag
);
1344 entry
= (++tqueue
->cur_tx
) % tx_rsize
;
1345 tx_desc
= tqueue
->dma_tx
+ entry
;
1346 tx_desc
->tdes01
= skb_frag_dma_map(priv
->device
, frag
, 0, len
,
1349 tqueue
->tx_skbuff_dma
[entry
] = tx_desc
->tdes01
;
1350 tqueue
->tx_skbuff
[entry
] = NULL
;
1352 /* prepare the descriptor */
1353 priv
->hw
->desc
->prepare_tx_desc(tx_desc
, 0, len
,
1355 /* memory barrier to flush descriptor */
1359 priv
->hw
->desc
->set_tx_owner(tx_desc
);
1362 /* close the descriptors */
1363 priv
->hw
->desc
->close_tx_desc(tx_desc
);
1365 /* memory barrier to flush descriptor */
1368 tqueue
->tx_count_frames
+= nr_frags
+ 1;
1369 if (tqueue
->tx_count_frames
> tqueue
->tx_coal_frames
) {
1370 priv
->hw
->desc
->clear_tx_ic(tx_desc
);
1371 priv
->xstats
.tx_reset_ic_bit
++;
1372 mod_timer(&tqueue
->txtimer
,
1373 SXGBE_COAL_TIMER(tqueue
->tx_coal_timer
));
1375 tqueue
->tx_count_frames
= 0;
1378 /* set owner for first desc */
1379 priv
->hw
->desc
->set_tx_owner(first_desc
);
1381 /* memory barrier to flush descriptor */
1386 /* display current ring */
1387 netif_dbg(priv
, pktdata
, dev
, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1388 __func__
, tqueue
->cur_tx
% tx_rsize
,
1389 tqueue
->dirty_tx
% tx_rsize
, entry
,
1390 first_desc
, nr_frags
);
1392 if (unlikely(sxgbe_tx_avail(tqueue
, tx_rsize
) <= (MAX_SKB_FRAGS
+ 1))) {
1393 netif_dbg(priv
, hw
, dev
, "%s: stop transmitted packets\n",
1395 netif_tx_stop_queue(dev_txq
);
1398 dev
->stats
.tx_bytes
+= skb
->len
;
1400 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
1401 tqueue
->hwts_tx_en
)) {
1402 /* declare that device is doing timestamping */
1403 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
1404 priv
->hw
->desc
->tx_enable_tstamp(first_desc
);
1407 if (!tqueue
->hwts_tx_en
)
1408 skb_tx_timestamp(skb
);
1410 priv
->hw
->dma
->enable_dma_transmission(priv
->ioaddr
, txq_index
);
1412 spin_unlock(&tqueue
->tx_lock
);
1414 return NETDEV_TX_OK
;
1418 * sxgbe_rx_refill: refill used skb preallocated buffers
1419 * @priv: driver private structure
1420 * Description : this is to reallocate the skb for the reception process
1421 * that is based on zero-copy.
1423 static void sxgbe_rx_refill(struct sxgbe_priv_data
*priv
)
1425 unsigned int rxsize
= priv
->dma_rx_size
;
1426 int bfsize
= priv
->dma_buf_sz
;
1427 u8 qnum
= priv
->cur_rx_qnum
;
1429 for (; priv
->rxq
[qnum
]->cur_rx
- priv
->rxq
[qnum
]->dirty_rx
> 0;
1430 priv
->rxq
[qnum
]->dirty_rx
++) {
1431 unsigned int entry
= priv
->rxq
[qnum
]->dirty_rx
% rxsize
;
1432 struct sxgbe_rx_norm_desc
*p
;
1434 p
= priv
->rxq
[qnum
]->dma_rx
+ entry
;
1436 if (likely(priv
->rxq
[qnum
]->rx_skbuff
[entry
] == NULL
)) {
1437 struct sk_buff
*skb
;
1439 skb
= netdev_alloc_skb_ip_align(priv
->dev
, bfsize
);
1441 if (unlikely(skb
== NULL
))
1444 priv
->rxq
[qnum
]->rx_skbuff
[entry
] = skb
;
1445 priv
->rxq
[qnum
]->rx_skbuff_dma
[entry
] =
1446 dma_map_single(priv
->device
, skb
->data
, bfsize
,
1449 p
->rdes23
.rx_rd_des23
.buf2_addr
=
1450 priv
->rxq
[qnum
]->rx_skbuff_dma
[entry
];
1453 /* Added memory barrier for RX descriptor modification */
1455 priv
->hw
->desc
->set_rx_owner(p
);
1456 /* Added memory barrier for RX descriptor modification */
1462 * sxgbe_rx: receive the frames from the remote host
1463 * @priv: driver private structure
1464 * @limit: napi bugget.
1465 * Description : this the function called by the napi poll method.
1466 * It gets all the frames inside the ring.
1468 static int sxgbe_rx(struct sxgbe_priv_data
*priv
, int limit
)
1470 u8 qnum
= priv
->cur_rx_qnum
;
1471 unsigned int rxsize
= priv
->dma_rx_size
;
1472 unsigned int entry
= priv
->rxq
[qnum
]->cur_rx
;
1473 unsigned int next_entry
= 0;
1474 unsigned int count
= 0;
1478 while (count
< limit
) {
1479 struct sxgbe_rx_norm_desc
*p
;
1480 struct sk_buff
*skb
;
1483 p
= priv
->rxq
[qnum
]->dma_rx
+ entry
;
1485 if (priv
->hw
->desc
->get_rx_owner(p
))
1490 next_entry
= (++priv
->rxq
[qnum
]->cur_rx
) % rxsize
;
1491 prefetch(priv
->rxq
[qnum
]->dma_rx
+ next_entry
);
1493 /* Read the status of the incoming frame and also get checksum
1494 * value based on whether it is enabled in SXGBE hardware or
1497 status
= priv
->hw
->desc
->rx_wbstatus(p
, &priv
->xstats
,
1499 if (unlikely(status
< 0)) {
1503 if (unlikely(!priv
->rxcsum_insertion
))
1504 checksum
= CHECKSUM_NONE
;
1506 skb
= priv
->rxq
[qnum
]->rx_skbuff
[entry
];
1509 netdev_err(priv
->dev
, "rx descriptor is not consistent\n");
1511 prefetch(skb
->data
- NET_IP_ALIGN
);
1512 priv
->rxq
[qnum
]->rx_skbuff
[entry
] = NULL
;
1514 frame_len
= priv
->hw
->desc
->get_rx_frame_len(p
);
1516 skb_put(skb
, frame_len
);
1518 skb
->ip_summed
= checksum
;
1519 if (checksum
== CHECKSUM_NONE
)
1520 netif_receive_skb(skb
);
1522 napi_gro_receive(&priv
->napi
, skb
);
1527 sxgbe_rx_refill(priv
);
1533 * sxgbe_poll - sxgbe poll method (NAPI)
1534 * @napi : pointer to the napi structure.
1535 * @budget : maximum number of packets that the current CPU can receive from
1538 * To look at the incoming frames and clear the tx resources.
1540 static int sxgbe_poll(struct napi_struct
*napi
, int budget
)
1542 struct sxgbe_priv_data
*priv
= container_of(napi
,
1543 struct sxgbe_priv_data
, napi
);
1545 u8 qnum
= priv
->cur_rx_qnum
;
1547 priv
->xstats
.napi_poll
++;
1548 /* first, clean the tx queues */
1549 sxgbe_tx_all_clean(priv
);
1551 work_done
= sxgbe_rx(priv
, budget
);
1552 if (work_done
< budget
) {
1553 napi_complete(napi
);
1554 priv
->hw
->dma
->enable_dma_irq(priv
->ioaddr
, qnum
);
1562 * @dev : Pointer to net device structure
1563 * Description: this function is called when a packet transmission fails to
1564 * complete within a reasonable time. The driver will mark the error in the
1565 * netdev structure and arrange for the device to be reset to a sane state
1566 * in order to transmit a new packet.
1568 static void sxgbe_tx_timeout(struct net_device
*dev
)
1570 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1572 sxgbe_reset_all_tx_queues(priv
);
1576 * sxgbe_common_interrupt - main ISR
1577 * @irq: interrupt number.
1578 * @dev_id: to pass the net device pointer.
1579 * Description: this is the main driver interrupt service routine.
1580 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1583 static irqreturn_t
sxgbe_common_interrupt(int irq
, void *dev_id
)
1585 struct net_device
*netdev
= (struct net_device
*)dev_id
;
1586 struct sxgbe_priv_data
*priv
= netdev_priv(netdev
);
1589 status
= priv
->hw
->mac
->host_irq_status(priv
->ioaddr
, &priv
->xstats
);
1590 /* For LPI we need to save the tx status */
1591 if (status
& TX_ENTRY_LPI_MODE
) {
1592 priv
->xstats
.tx_lpi_entry_n
++;
1593 priv
->tx_path_in_lpi_mode
= true;
1595 if (status
& TX_EXIT_LPI_MODE
) {
1596 priv
->xstats
.tx_lpi_exit_n
++;
1597 priv
->tx_path_in_lpi_mode
= false;
1599 if (status
& RX_ENTRY_LPI_MODE
)
1600 priv
->xstats
.rx_lpi_entry_n
++;
1601 if (status
& RX_EXIT_LPI_MODE
)
1602 priv
->xstats
.rx_lpi_exit_n
++;
1608 * sxgbe_tx_interrupt - TX DMA ISR
1609 * @irq: interrupt number.
1610 * @dev_id: to pass the net device pointer.
1611 * Description: this is the tx dma interrupt service routine.
1613 static irqreturn_t
sxgbe_tx_interrupt(int irq
, void *dev_id
)
1616 struct sxgbe_tx_queue
*txq
= (struct sxgbe_tx_queue
*)dev_id
;
1617 struct sxgbe_priv_data
*priv
= txq
->priv_ptr
;
1619 /* get the channel status */
1620 status
= priv
->hw
->dma
->tx_dma_int_status(priv
->ioaddr
, txq
->queue_no
,
1622 /* check for normal path */
1623 if (likely((status
& handle_tx
)))
1624 napi_schedule(&priv
->napi
);
1626 /* check for unrecoverable error */
1627 if (unlikely((status
& tx_hard_error
)))
1628 sxgbe_restart_tx_queue(priv
, txq
->queue_no
);
1630 /* check for TC configuration change */
1631 if (unlikely((status
& tx_bump_tc
) &&
1632 (priv
->tx_tc
!= SXGBE_MTL_SFMODE
) &&
1633 (priv
->tx_tc
< 512))) {
1634 /* step of TX TC is 32 till 128, otherwise 64 */
1635 priv
->tx_tc
+= (priv
->tx_tc
< 128) ? 32 : 64;
1636 priv
->hw
->mtl
->set_tx_mtl_mode(priv
->ioaddr
,
1637 txq
->queue_no
, priv
->tx_tc
);
1638 priv
->xstats
.tx_threshold
= priv
->tx_tc
;
1645 * sxgbe_rx_interrupt - RX DMA ISR
1646 * @irq: interrupt number.
1647 * @dev_id: to pass the net device pointer.
1648 * Description: this is the rx dma interrupt service routine.
1650 static irqreturn_t
sxgbe_rx_interrupt(int irq
, void *dev_id
)
1653 struct sxgbe_rx_queue
*rxq
= (struct sxgbe_rx_queue
*)dev_id
;
1654 struct sxgbe_priv_data
*priv
= rxq
->priv_ptr
;
1656 /* get the channel status */
1657 status
= priv
->hw
->dma
->rx_dma_int_status(priv
->ioaddr
, rxq
->queue_no
,
1660 if (likely((status
& handle_rx
) && (napi_schedule_prep(&priv
->napi
)))) {
1661 priv
->hw
->dma
->disable_dma_irq(priv
->ioaddr
, rxq
->queue_no
);
1662 __napi_schedule(&priv
->napi
);
1665 /* check for TC configuration change */
1666 if (unlikely((status
& rx_bump_tc
) &&
1667 (priv
->rx_tc
!= SXGBE_MTL_SFMODE
) &&
1668 (priv
->rx_tc
< 128))) {
1669 /* step of TC is 32 */
1671 priv
->hw
->mtl
->set_rx_mtl_mode(priv
->ioaddr
,
1672 rxq
->queue_no
, priv
->rx_tc
);
1673 priv
->xstats
.rx_threshold
= priv
->rx_tc
;
1679 static inline u64
sxgbe_get_stat64(void __iomem
*ioaddr
, int reg_lo
, int reg_hi
)
1681 u64 val
= readl(ioaddr
+ reg_lo
);
1683 val
|= ((u64
)readl(ioaddr
+ reg_hi
)) << 32;
1689 /* sxgbe_get_stats64 - entry point to see statistical information of device
1690 * @dev : device pointer.
1691 * @stats : pointer to hold all the statistical information of device.
1693 * This function is a driver entry point whenever ifconfig command gets
1694 * executed to see device statistics. Statistics are number of
1695 * bytes sent or received, errors occured etc.
1697 * This function returns various statistical information of device.
1699 static struct rtnl_link_stats64
*sxgbe_get_stats64(struct net_device
*dev
,
1700 struct rtnl_link_stats64
*stats
)
1702 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1703 void __iomem
*ioaddr
= priv
->ioaddr
;
1706 spin_lock(&priv
->stats_lock
);
1707 /* Freeze the counter registers before reading value otherwise it may
1708 * get updated by hardware while we are reading them
1710 writel(SXGBE_MMC_CTRL_CNT_FRZ
, ioaddr
+ SXGBE_MMC_CTL_REG
);
1712 stats
->rx_bytes
= sxgbe_get_stat64(ioaddr
,
1713 SXGBE_MMC_RXOCTETLO_GCNT_REG
,
1714 SXGBE_MMC_RXOCTETHI_GCNT_REG
);
1716 stats
->rx_packets
= sxgbe_get_stat64(ioaddr
,
1717 SXGBE_MMC_RXFRAMELO_GBCNT_REG
,
1718 SXGBE_MMC_RXFRAMEHI_GBCNT_REG
);
1720 stats
->multicast
= sxgbe_get_stat64(ioaddr
,
1721 SXGBE_MMC_RXMULTILO_GCNT_REG
,
1722 SXGBE_MMC_RXMULTIHI_GCNT_REG
);
1724 stats
->rx_crc_errors
= sxgbe_get_stat64(ioaddr
,
1725 SXGBE_MMC_RXCRCERRLO_REG
,
1726 SXGBE_MMC_RXCRCERRHI_REG
);
1728 stats
->rx_length_errors
= sxgbe_get_stat64(ioaddr
,
1729 SXGBE_MMC_RXLENERRLO_REG
,
1730 SXGBE_MMC_RXLENERRHI_REG
);
1732 stats
->rx_missed_errors
= sxgbe_get_stat64(ioaddr
,
1733 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG
,
1734 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG
);
1736 stats
->tx_bytes
= sxgbe_get_stat64(ioaddr
,
1737 SXGBE_MMC_TXOCTETLO_GCNT_REG
,
1738 SXGBE_MMC_TXOCTETHI_GCNT_REG
);
1740 count
= sxgbe_get_stat64(ioaddr
, SXGBE_MMC_TXFRAMELO_GBCNT_REG
,
1741 SXGBE_MMC_TXFRAMEHI_GBCNT_REG
);
1743 stats
->tx_errors
= sxgbe_get_stat64(ioaddr
, SXGBE_MMC_TXFRAMELO_GCNT_REG
,
1744 SXGBE_MMC_TXFRAMEHI_GCNT_REG
);
1745 stats
->tx_errors
= count
- stats
->tx_errors
;
1746 stats
->tx_packets
= count
;
1747 stats
->tx_fifo_errors
= sxgbe_get_stat64(ioaddr
, SXGBE_MMC_TXUFLWLO_GBCNT_REG
,
1748 SXGBE_MMC_TXUFLWHI_GBCNT_REG
);
1749 writel(0, ioaddr
+ SXGBE_MMC_CTL_REG
);
1750 spin_unlock(&priv
->stats_lock
);
1755 /* sxgbe_set_features - entry point to set offload features of the device.
1756 * @dev : device pointer.
1757 * @features : features which are required to be set.
1759 * This function is a driver entry point and called by Linux kernel whenever
1760 * any device features are set or reset by user.
1762 * This function returns 0 after setting or resetting device features.
1764 static int sxgbe_set_features(struct net_device
*dev
,
1765 netdev_features_t features
)
1767 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1768 netdev_features_t changed
= dev
->features
^ features
;
1770 if (changed
& NETIF_F_RXCSUM
) {
1771 if (features
& NETIF_F_RXCSUM
) {
1772 priv
->hw
->mac
->enable_rx_csum(priv
->ioaddr
);
1773 priv
->rxcsum_insertion
= true;
1775 priv
->hw
->mac
->disable_rx_csum(priv
->ioaddr
);
1776 priv
->rxcsum_insertion
= false;
1783 /* sxgbe_change_mtu - entry point to change MTU size for the device.
1784 * @dev : device pointer.
1785 * @new_mtu : the new MTU size for the device.
1786 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1787 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1788 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1790 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1793 static int sxgbe_change_mtu(struct net_device
*dev
, int new_mtu
)
1795 /* RFC 791, page 25, "Every internet module must be able to forward
1796 * a datagram of 68 octets without further fragmentation."
1798 if (new_mtu
< MIN_MTU
|| (new_mtu
> MAX_MTU
)) {
1799 netdev_err(dev
, "invalid MTU, MTU should be in between %d and %d\n",
1804 /* Return if the buffer sizes will not change */
1805 if (dev
->mtu
== new_mtu
)
1810 if (!netif_running(dev
))
1813 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1814 * changed then reinitilisation of the receive ring buffers need to be
1815 * done. Hence bring interface down and bring interface back up
1818 return sxgbe_open(dev
);
1821 static void sxgbe_set_umac_addr(void __iomem
*ioaddr
, unsigned char *addr
,
1826 data
= (addr
[5] << 8) | addr
[4];
1827 /* For MAC Addr registers se have to set the Address Enable (AE)
1828 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1831 writel(data
| SXGBE_HI_REG_AE
, ioaddr
+ SXGBE_ADDR_HIGH(reg_n
));
1832 data
= (addr
[3] << 24) | (addr
[2] << 16) | (addr
[1] << 8) | addr
[0];
1833 writel(data
, ioaddr
+ SXGBE_ADDR_LOW(reg_n
));
1837 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1838 * a device. unicast, multicast addressing
1839 * @dev : pointer to the device structure
1841 * This function is a driver entry point which gets called by the kernel
1842 * whenever different receive mode like unicast, multicast and promiscuous
1843 * must be enabled/disabled.
1847 static void sxgbe_set_rx_mode(struct net_device
*dev
)
1849 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1850 void __iomem
*ioaddr
= (void __iomem
*)priv
->ioaddr
;
1851 unsigned int value
= 0;
1853 struct netdev_hw_addr
*ha
;
1856 netdev_dbg(dev
, "%s: # mcasts %d, # unicast %d\n",
1857 __func__
, netdev_mc_count(dev
), netdev_uc_count(dev
));
1859 if (dev
->flags
& IFF_PROMISC
) {
1860 value
= SXGBE_FRAME_FILTER_PR
;
1862 } else if ((netdev_mc_count(dev
) > SXGBE_HASH_TABLE_SIZE
) ||
1863 (dev
->flags
& IFF_ALLMULTI
)) {
1864 value
= SXGBE_FRAME_FILTER_PM
; /* pass all multi */
1865 writel(0xffffffff, ioaddr
+ SXGBE_HASH_HIGH
);
1866 writel(0xffffffff, ioaddr
+ SXGBE_HASH_LOW
);
1868 } else if (!netdev_mc_empty(dev
)) {
1869 /* Hash filter for multicast */
1870 value
= SXGBE_FRAME_FILTER_HMC
;
1872 memset(mc_filter
, 0, sizeof(mc_filter
));
1873 netdev_for_each_mc_addr(ha
, dev
) {
1874 /* The upper 6 bits of the calculated CRC are used to
1875 * index the contens of the hash table
1877 int bit_nr
= bitrev32(~crc32_le(~0, ha
->addr
, 6)) >> 26;
1879 /* The most significant bit determines the register to
1880 * use (H/L) while the other 5 bits determine the bit
1881 * within the register.
1883 mc_filter
[bit_nr
>> 5] |= 1 << (bit_nr
& 31);
1885 writel(mc_filter
[0], ioaddr
+ SXGBE_HASH_LOW
);
1886 writel(mc_filter
[1], ioaddr
+ SXGBE_HASH_HIGH
);
1889 /* Handle multiple unicast addresses (perfect filtering) */
1890 if (netdev_uc_count(dev
) > SXGBE_MAX_PERFECT_ADDRESSES
)
1891 /* Switch to promiscuous mode if more than 16 addrs
1894 value
|= SXGBE_FRAME_FILTER_PR
;
1896 netdev_for_each_uc_addr(ha
, dev
) {
1897 sxgbe_set_umac_addr(ioaddr
, ha
->addr
, reg
);
1901 #ifdef FRAME_FILTER_DEBUG
1902 /* Enable Receive all mode (to debug filtering_fail errors) */
1903 value
|= SXGBE_FRAME_FILTER_RA
;
1905 writel(value
, ioaddr
+ SXGBE_FRAME_FILTER
);
1907 netdev_dbg(dev
, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1908 readl(ioaddr
+ SXGBE_FRAME_FILTER
),
1909 readl(ioaddr
+ SXGBE_HASH_HIGH
),
1910 readl(ioaddr
+ SXGBE_HASH_LOW
));
1914 * sxgbe_config - entry point for changing configuration mode passed on by
1916 * @dev : pointer to the device structure
1917 * @map : pointer to the device mapping structure
1919 * This function is a driver entry point which gets called by the kernel
1920 * whenever some device configuration is changed.
1922 * This function returns 0 if success and appropriate error otherwise.
1924 static int sxgbe_config(struct net_device
*dev
, struct ifmap
*map
)
1926 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1928 /* Can't act on a running interface */
1929 if (dev
->flags
& IFF_UP
)
1932 /* Don't allow changing the I/O address */
1933 if (map
->base_addr
!= (unsigned long)priv
->ioaddr
) {
1934 netdev_warn(dev
, "can't change I/O address\n");
1938 /* Don't allow changing the IRQ */
1939 if (map
->irq
!= priv
->irq
) {
1940 netdev_warn(dev
, "not change IRQ number %d\n", priv
->irq
);
1947 #ifdef CONFIG_NET_POLL_CONTROLLER
1949 * sxgbe_poll_controller - entry point for polling receive by device
1950 * @dev : pointer to the device structure
1952 * This function is used by NETCONSOLE and other diagnostic tools
1953 * to allow network I/O with interrupts disabled.
1957 static void sxgbe_poll_controller(struct net_device
*dev
)
1959 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1961 disable_irq(priv
->irq
);
1962 sxgbe_rx_interrupt(priv
->irq
, dev
);
1963 enable_irq(priv
->irq
);
1967 /* sxgbe_ioctl - Entry point for the Ioctl
1968 * @dev: Device pointer.
1969 * @rq: An IOCTL specefic structure, that can contain a pointer to
1970 * a proprietary structure used to pass information to the driver.
1971 * @cmd: IOCTL command
1973 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1975 static int sxgbe_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1977 struct sxgbe_priv_data
*priv
= netdev_priv(dev
);
1978 int ret
= -EOPNOTSUPP
;
1980 if (!netif_running(dev
))
1989 ret
= phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
1998 static const struct net_device_ops sxgbe_netdev_ops
= {
1999 .ndo_open
= sxgbe_open
,
2000 .ndo_start_xmit
= sxgbe_xmit
,
2001 .ndo_stop
= sxgbe_release
,
2002 .ndo_get_stats64
= sxgbe_get_stats64
,
2003 .ndo_change_mtu
= sxgbe_change_mtu
,
2004 .ndo_set_features
= sxgbe_set_features
,
2005 .ndo_set_rx_mode
= sxgbe_set_rx_mode
,
2006 .ndo_tx_timeout
= sxgbe_tx_timeout
,
2007 .ndo_do_ioctl
= sxgbe_ioctl
,
2008 .ndo_set_config
= sxgbe_config
,
2009 #ifdef CONFIG_NET_POLL_CONTROLLER
2010 .ndo_poll_controller
= sxgbe_poll_controller
,
2012 .ndo_set_mac_address
= eth_mac_addr
,
2015 /* Get the hardware ops */
2016 static void sxgbe_get_ops(struct sxgbe_ops
* const ops_ptr
)
2018 ops_ptr
->mac
= sxgbe_get_core_ops();
2019 ops_ptr
->desc
= sxgbe_get_desc_ops();
2020 ops_ptr
->dma
= sxgbe_get_dma_ops();
2021 ops_ptr
->mtl
= sxgbe_get_mtl_ops();
2023 /* set the MDIO communication Address/Data regisers */
2024 ops_ptr
->mii
.addr
= SXGBE_MDIO_SCMD_ADD_REG
;
2025 ops_ptr
->mii
.data
= SXGBE_MDIO_SCMD_DATA_REG
;
2027 /* Assigning the default link settings
2028 * no SXGBE defined default values to be set in registers,
2029 * so assigning as 0 for port and duplex
2031 ops_ptr
->link
.port
= 0;
2032 ops_ptr
->link
.duplex
= 0;
2033 ops_ptr
->link
.speed
= SXGBE_SPEED_10G
;
2037 * sxgbe_hw_init - Init the GMAC device
2038 * @priv: driver private structure
2039 * Description: this function checks the HW capability
2040 * (if supported) and sets the driver's features.
2042 static int sxgbe_hw_init(struct sxgbe_priv_data
* const priv
)
2046 priv
->hw
= kmalloc(sizeof(*priv
->hw
), GFP_KERNEL
);
2050 /* get the hardware ops */
2051 sxgbe_get_ops(priv
->hw
);
2053 /* get the controller id */
2054 ctrl_ids
= priv
->hw
->mac
->get_controller_version(priv
->ioaddr
);
2055 priv
->hw
->ctrl_uid
= (ctrl_ids
& 0x00ff0000) >> 16;
2056 priv
->hw
->ctrl_id
= (ctrl_ids
& 0x000000ff);
2057 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2058 priv
->hw
->ctrl_uid
, priv
->hw
->ctrl_id
);
2060 /* get the H/W features */
2061 if (!sxgbe_get_hw_features(priv
))
2062 pr_info("Hardware features not found\n");
2064 if (priv
->hw_cap
.tx_csum_offload
)
2065 pr_info("TX Checksum offload supported\n");
2067 if (priv
->hw_cap
.rx_csum_offload
)
2068 pr_info("RX Checksum offload supported\n");
2075 * @device: device pointer
2076 * @plat_dat: platform data pointer
2077 * @addr: iobase memory address
2078 * Description: this is the main probe function used to
2079 * call the alloc_etherdev, allocate the priv structure.
2081 struct sxgbe_priv_data
*sxgbe_drv_probe(struct device
*device
,
2082 struct sxgbe_plat_data
*plat_dat
,
2085 struct sxgbe_priv_data
*priv
;
2086 struct net_device
*ndev
;
2090 ndev
= alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data
),
2091 SXGBE_TX_QUEUES
, SXGBE_RX_QUEUES
);
2095 SET_NETDEV_DEV(ndev
, device
);
2097 priv
= netdev_priv(ndev
);
2098 priv
->device
= device
;
2101 sxgbe_set_ethtool_ops(ndev
);
2102 priv
->plat
= plat_dat
;
2103 priv
->ioaddr
= addr
;
2105 /* Verify driver arguments */
2106 sxgbe_verify_args();
2108 /* Init MAC and get the capabilities */
2109 ret
= sxgbe_hw_init(priv
);
2111 goto error_free_netdev
;
2113 /* allocate memory resources for Descriptor rings */
2114 ret
= txring_mem_alloc(priv
);
2116 goto error_free_netdev
;
2118 ret
= rxring_mem_alloc(priv
);
2120 goto error_free_netdev
;
2122 ndev
->netdev_ops
= &sxgbe_netdev_ops
;
2124 ndev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2125 NETIF_F_RXCSUM
| NETIF_F_TSO
| NETIF_F_TSO6
|
2127 ndev
->features
|= ndev
->hw_features
| NETIF_F_HIGHDMA
;
2128 ndev
->watchdog_timeo
= msecs_to_jiffies(TX_TIMEO
);
2130 /* assign filtering support */
2131 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
2133 priv
->msg_enable
= netif_msg_init(debug
, default_msg_level
);
2135 /* Enable TCP segmentation offload for all DMA channels */
2136 if (priv
->hw_cap
.tcpseg_offload
) {
2137 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES
, queue_num
) {
2138 priv
->hw
->dma
->enable_tso(priv
->ioaddr
, queue_num
);
2142 /* Enable Rx checksum offload */
2143 if (priv
->hw_cap
.rx_csum_offload
) {
2144 priv
->hw
->mac
->enable_rx_csum(priv
->ioaddr
);
2145 priv
->rxcsum_insertion
= true;
2148 /* Initialise pause frame settings */
2152 /* Rx Watchdog is available, enable depend on platform data */
2153 if (!priv
->plat
->riwt_off
) {
2155 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2158 netif_napi_add(ndev
, &priv
->napi
, sxgbe_poll
, 64);
2160 spin_lock_init(&priv
->stats_lock
);
2162 priv
->sxgbe_clk
= clk_get(priv
->device
, SXGBE_RESOURCE_NAME
);
2163 if (IS_ERR(priv
->sxgbe_clk
)) {
2164 netdev_warn(ndev
, "%s: warning: cannot get CSR clock\n",
2169 /* If a specific clk_csr value is passed from the platform
2170 * this means that the CSR Clock Range selection cannot be
2171 * changed at run-time and it is fixed. Viceversa the driver'll try to
2172 * set the MDC clock dynamically according to the csr actual
2175 if (!priv
->plat
->clk_csr
)
2176 sxgbe_clk_csr_set(priv
);
2178 priv
->clk_csr
= priv
->plat
->clk_csr
;
2180 /* MDIO bus Registration */
2181 ret
= sxgbe_mdio_register(ndev
);
2183 netdev_dbg(ndev
, "%s: MDIO bus (id: %d) registration failed\n",
2184 __func__
, priv
->plat
->bus_id
);
2185 goto error_mdio_register
;
2188 ret
= register_netdev(ndev
);
2190 pr_err("%s: ERROR %i registering the device\n", __func__
, ret
);
2191 goto error_netdev_register
;
2194 sxgbe_check_ether_addr(priv
);
2198 error_mdio_register
:
2199 clk_put(priv
->sxgbe_clk
);
2201 error_netdev_register
:
2202 netif_napi_del(&priv
->napi
);
2211 * @ndev: net device pointer
2212 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2213 * changes the link status, releases the DMA descriptor rings.
2215 int sxgbe_drv_remove(struct net_device
*ndev
)
2217 struct sxgbe_priv_data
*priv
= netdev_priv(ndev
);
2219 netdev_info(ndev
, "%s: removing driver\n", __func__
);
2221 priv
->hw
->dma
->stop_rx(priv
->ioaddr
, SXGBE_RX_QUEUES
);
2222 priv
->hw
->dma
->stop_tx(priv
->ioaddr
, SXGBE_TX_QUEUES
);
2224 priv
->hw
->mac
->enable_tx(priv
->ioaddr
, false);
2225 priv
->hw
->mac
->enable_rx(priv
->ioaddr
, false);
2227 netif_napi_del(&priv
->napi
);
2229 sxgbe_mdio_unregister(ndev
);
2231 unregister_netdev(ndev
);
2239 int sxgbe_suspend(struct net_device
*ndev
)
2244 int sxgbe_resume(struct net_device
*ndev
)
2249 int sxgbe_freeze(struct net_device
*ndev
)
2254 int sxgbe_restore(struct net_device
*ndev
)
2258 #endif /* CONFIG_PM */
2260 /* Driver is configured as Platform driver */
2261 static int __init
sxgbe_init(void)
2265 ret
= sxgbe_register_platform();
2270 pr_err("driver registration failed\n");
2274 static void __exit
sxgbe_exit(void)
2276 sxgbe_unregister_platform();
2279 module_init(sxgbe_init
);
2280 module_exit(sxgbe_exit
);
2283 static int __init
sxgbe_cmdline_opt(char *str
)
2289 while ((opt
= strsep(&str
, ",")) != NULL
) {
2290 if (!strncmp(opt
, "eee_timer:", 6)) {
2291 if (kstrtoint(opt
+ 10, 0, &eee_timer
))
2298 pr_err("%s: ERROR broken module parameter conversion\n", __func__
);
2302 __setup("sxgbeeth=", sxgbe_cmdline_opt
);
2307 MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2309 MODULE_PARM_DESC(debug
, "Message Level (-1: default, 0: no output, 16: all)");
2310 MODULE_PARM_DESC(eee_timer
, "EEE-LPI Default LS timer value");
2312 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2313 MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2314 MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2315 MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2317 MODULE_LICENSE("GPL");