]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[mirror_ubuntu-bionic-kernel.git] / drivers / net / ethernet / samsung / sxgbe / sxgbe_main.c
1 /* 10G controller driver for Samsung SoCs
2 *
3 * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15 #include <linux/clk.h>
16 #include <linux/crc32.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/etherdevice.h>
19 #include <linux/ethtool.h>
20 #include <linux/if.h>
21 #include <linux/if_ether.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/ip.h>
26 #include <linux/kernel.h>
27 #include <linux/mii.h>
28 #include <linux/module.h>
29 #include <linux/net_tstamp.h>
30 #include <linux/netdevice.h>
31 #include <linux/phy.h>
32 #include <linux/platform_device.h>
33 #include <linux/prefetch.h>
34 #include <linux/skbuff.h>
35 #include <linux/slab.h>
36 #include <linux/tcp.h>
37 #include <linux/sxgbe_platform.h>
38
39 #include "sxgbe_common.h"
40 #include "sxgbe_desc.h"
41 #include "sxgbe_dma.h"
42 #include "sxgbe_mtl.h"
43 #include "sxgbe_reg.h"
44
45 #define SXGBE_ALIGN(x) L1_CACHE_ALIGN(x)
46 #define JUMBO_LEN 9000
47
48 /* Module parameters */
49 #define TX_TIMEO 5000
50 #define DMA_TX_SIZE 512
51 #define DMA_RX_SIZE 1024
52 #define TC_DEFAULT 64
53 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
54 /* The default timer value as per the sxgbe specification 1 sec(1000 ms) */
55 #define SXGBE_DEFAULT_LPI_TIMER 1000
56
57 static int debug = -1;
58 static int eee_timer = SXGBE_DEFAULT_LPI_TIMER;
59
60 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
61
62 module_param(debug, int, S_IRUGO | S_IWUSR);
63 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
64 NETIF_MSG_LINK | NETIF_MSG_IFUP |
65 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
66
67 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id);
68 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id);
69 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id);
70
71 #define SXGBE_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
72
73 #define SXGBE_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
74
75 /**
76 * sxgbe_verify_args - verify the driver parameters.
77 * Description: it verifies if some wrong parameter is passed to the driver.
78 * Note that wrong parameters are replaced with the default values.
79 */
80 static void sxgbe_verify_args(void)
81 {
82 if (unlikely(eee_timer < 0))
83 eee_timer = SXGBE_DEFAULT_LPI_TIMER;
84 }
85
86 static void sxgbe_enable_eee_mode(const struct sxgbe_priv_data *priv)
87 {
88 /* Check and enter in LPI mode */
89 if (!priv->tx_path_in_lpi_mode)
90 priv->hw->mac->set_eee_mode(priv->ioaddr);
91 }
92
93 void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
94 {
95 /* Exit and disable EEE in case of we are are in LPI state. */
96 priv->hw->mac->reset_eee_mode(priv->ioaddr);
97 del_timer_sync(&priv->eee_ctrl_timer);
98 priv->tx_path_in_lpi_mode = false;
99 }
100
101 /**
102 * sxgbe_eee_ctrl_timer
103 * @arg : data hook
104 * Description:
105 * If there is no data transfer and if we are not in LPI state,
106 * then MAC Transmitter can be moved to LPI state.
107 */
108 static void sxgbe_eee_ctrl_timer(unsigned long arg)
109 {
110 struct sxgbe_priv_data *priv = (struct sxgbe_priv_data *)arg;
111
112 sxgbe_enable_eee_mode(priv);
113 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
114 }
115
116 /**
117 * sxgbe_eee_init
118 * @priv: private device pointer
119 * Description:
120 * If the EEE support has been enabled while configuring the driver,
121 * if the GMAC actually supports the EEE (from the HW cap reg) and the
122 * phy can also manage EEE, so enable the LPI state and start the timer
123 * to verify if the tx path can enter in LPI state.
124 */
125 bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
126 {
127 bool ret = false;
128
129 /* MAC core supports the EEE feature. */
130 if (priv->hw_cap.eee) {
131 /* Check if the PHY supports EEE */
132 if (phy_init_eee(priv->phydev, 1))
133 return false;
134
135 priv->eee_active = 1;
136 init_timer(&priv->eee_ctrl_timer);
137 priv->eee_ctrl_timer.function = sxgbe_eee_ctrl_timer;
138 priv->eee_ctrl_timer.data = (unsigned long)priv;
139 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer);
140 add_timer(&priv->eee_ctrl_timer);
141
142 priv->hw->mac->set_eee_timer(priv->ioaddr,
143 SXGBE_DEFAULT_LPI_TIMER,
144 priv->tx_lpi_timer);
145
146 pr_info("Energy-Efficient Ethernet initialized\n");
147
148 ret = true;
149 }
150
151 return ret;
152 }
153
154 static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
155 {
156 /* When the EEE has been already initialised we have to
157 * modify the PLS bit in the LPI ctrl & status reg according
158 * to the PHY link status. For this reason.
159 */
160 if (priv->eee_enabled)
161 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
162 }
163
164 /**
165 * sxgbe_clk_csr_set - dynamically set the MDC clock
166 * @priv: driver private structure
167 * Description: this is to dynamically set the MDC clock according to the csr
168 * clock input.
169 */
170 static void sxgbe_clk_csr_set(struct sxgbe_priv_data *priv)
171 {
172 u32 clk_rate = clk_get_rate(priv->sxgbe_clk);
173
174 /* assign the proper divider, this will be used during
175 * mdio communication
176 */
177 if (clk_rate < SXGBE_CSR_F_150M)
178 priv->clk_csr = SXGBE_CSR_100_150M;
179 else if (clk_rate <= SXGBE_CSR_F_250M)
180 priv->clk_csr = SXGBE_CSR_150_250M;
181 else if (clk_rate <= SXGBE_CSR_F_300M)
182 priv->clk_csr = SXGBE_CSR_250_300M;
183 else if (clk_rate <= SXGBE_CSR_F_350M)
184 priv->clk_csr = SXGBE_CSR_300_350M;
185 else if (clk_rate <= SXGBE_CSR_F_400M)
186 priv->clk_csr = SXGBE_CSR_350_400M;
187 else if (clk_rate <= SXGBE_CSR_F_500M)
188 priv->clk_csr = SXGBE_CSR_400_500M;
189 }
190
191 /* minimum number of free TX descriptors required to wake up TX process */
192 #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
193
194 static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
195 {
196 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1;
197 }
198
199 /**
200 * sxgbe_adjust_link
201 * @dev: net device structure
202 * Description: it adjusts the link parameters.
203 */
204 static void sxgbe_adjust_link(struct net_device *dev)
205 {
206 struct sxgbe_priv_data *priv = netdev_priv(dev);
207 struct phy_device *phydev = priv->phydev;
208 u8 new_state = 0;
209 u8 speed = 0xff;
210
211 if (!phydev)
212 return;
213
214 /* SXGBE is not supporting auto-negotiation and
215 * half duplex mode. so, not handling duplex change
216 * in this function. only handling speed and link status
217 */
218 if (phydev->link) {
219 if (phydev->speed != priv->speed) {
220 new_state = 1;
221 switch (phydev->speed) {
222 case SPEED_10000:
223 speed = SXGBE_SPEED_10G;
224 break;
225 case SPEED_2500:
226 speed = SXGBE_SPEED_2_5G;
227 break;
228 case SPEED_1000:
229 speed = SXGBE_SPEED_1G;
230 break;
231 default:
232 netif_err(priv, link, dev,
233 "Speed (%d) not supported\n",
234 phydev->speed);
235 }
236
237 priv->speed = phydev->speed;
238 priv->hw->mac->set_speed(priv->ioaddr, speed);
239 }
240
241 if (!priv->oldlink) {
242 new_state = 1;
243 priv->oldlink = 1;
244 }
245 } else if (priv->oldlink) {
246 new_state = 1;
247 priv->oldlink = 0;
248 priv->speed = SPEED_UNKNOWN;
249 }
250
251 if (new_state & netif_msg_link(priv))
252 phy_print_status(phydev);
253
254 /* Alter the MAC settings for EEE */
255 sxgbe_eee_adjust(priv);
256 }
257
258 /**
259 * sxgbe_init_phy - PHY initialization
260 * @dev: net device structure
261 * Description: it initializes the driver's PHY state, and attaches the PHY
262 * to the mac driver.
263 * Return value:
264 * 0 on success
265 */
266 static int sxgbe_init_phy(struct net_device *ndev)
267 {
268 char phy_id_fmt[MII_BUS_ID_SIZE + 3];
269 char bus_id[MII_BUS_ID_SIZE];
270 struct phy_device *phydev;
271 struct sxgbe_priv_data *priv = netdev_priv(ndev);
272 int phy_iface = priv->plat->interface;
273
274 /* assign default link status */
275 priv->oldlink = 0;
276 priv->speed = SPEED_UNKNOWN;
277 priv->oldduplex = DUPLEX_UNKNOWN;
278
279 if (priv->plat->phy_bus_name)
280 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
281 priv->plat->phy_bus_name, priv->plat->bus_id);
282 else
283 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x",
284 priv->plat->bus_id);
285
286 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
287 priv->plat->phy_addr);
288 netdev_dbg(ndev, "%s: trying to attach to %s\n", __func__, phy_id_fmt);
289
290 phydev = phy_connect(ndev, phy_id_fmt, &sxgbe_adjust_link, phy_iface);
291
292 if (IS_ERR(phydev)) {
293 netdev_err(ndev, "Could not attach to PHY\n");
294 return PTR_ERR(phydev);
295 }
296
297 /* Stop Advertising 1000BASE Capability if interface is not GMII */
298 if ((phy_iface == PHY_INTERFACE_MODE_MII) ||
299 (phy_iface == PHY_INTERFACE_MODE_RMII))
300 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
301 SUPPORTED_1000baseT_Full);
302 if (phydev->phy_id == 0) {
303 phy_disconnect(phydev);
304 return -ENODEV;
305 }
306
307 netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
308 __func__, phydev->phy_id, phydev->link);
309
310 /* save phy device in private structure */
311 priv->phydev = phydev;
312
313 return 0;
314 }
315
316 /**
317 * sxgbe_clear_descriptors: clear descriptors
318 * @priv: driver private structure
319 * Description: this function is called to clear the tx and rx descriptors
320 * in case of both basic and extended descriptors are used.
321 */
322 static void sxgbe_clear_descriptors(struct sxgbe_priv_data *priv)
323 {
324 int i, j;
325 unsigned int txsize = priv->dma_tx_size;
326 unsigned int rxsize = priv->dma_rx_size;
327
328 /* Clear the Rx/Tx descriptors */
329 for (j = 0; j < SXGBE_RX_QUEUES; j++) {
330 for (i = 0; i < rxsize; i++)
331 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i],
332 priv->use_riwt, priv->mode,
333 (i == rxsize - 1));
334 }
335
336 for (j = 0; j < SXGBE_TX_QUEUES; j++) {
337 for (i = 0; i < txsize; i++)
338 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]);
339 }
340 }
341
342 static int sxgbe_init_rx_buffers(struct net_device *dev,
343 struct sxgbe_rx_norm_desc *p, int i,
344 unsigned int dma_buf_sz,
345 struct sxgbe_rx_queue *rx_ring)
346 {
347 struct sxgbe_priv_data *priv = netdev_priv(dev);
348 struct sk_buff *skb;
349
350 skb = __netdev_alloc_skb_ip_align(dev, dma_buf_sz, GFP_KERNEL);
351 if (!skb)
352 return -ENOMEM;
353
354 rx_ring->rx_skbuff[i] = skb;
355 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
356 dma_buf_sz, DMA_FROM_DEVICE);
357
358 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) {
359 netdev_err(dev, "%s: DMA mapping error\n", __func__);
360 dev_kfree_skb_any(skb);
361 return -EINVAL;
362 }
363
364 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i];
365
366 return 0;
367 }
368 /**
369 * init_tx_ring - init the TX descriptor ring
370 * @dev: net device structure
371 * @tx_ring: ring to be intialised
372 * @tx_rsize: ring size
373 * Description: this function initializes the DMA TX descriptor
374 */
375 static int init_tx_ring(struct device *dev, u8 queue_no,
376 struct sxgbe_tx_queue *tx_ring, int tx_rsize)
377 {
378 /* TX ring is not allcoated */
379 if (!tx_ring) {
380 dev_err(dev, "No memory for TX queue of SXGBE\n");
381 return -ENOMEM;
382 }
383
384 /* allocate memory for TX descriptors */
385 tx_ring->dma_tx = dma_zalloc_coherent(dev,
386 tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
387 &tx_ring->dma_tx_phy, GFP_KERNEL);
388 if (!tx_ring->dma_tx)
389 return -ENOMEM;
390
391 /* allocate memory for TX skbuff array */
392 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize,
393 sizeof(dma_addr_t), GFP_KERNEL);
394 if (!tx_ring->tx_skbuff_dma)
395 goto dmamem_err;
396
397 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize,
398 sizeof(struct sk_buff *), GFP_KERNEL);
399
400 if (!tx_ring->tx_skbuff)
401 goto dmamem_err;
402
403 /* assign queue number */
404 tx_ring->queue_no = queue_no;
405
406 /* initalise counters */
407 tx_ring->dirty_tx = 0;
408 tx_ring->cur_tx = 0;
409
410 /* initalise TX queue lock */
411 spin_lock_init(&tx_ring->tx_lock);
412
413 return 0;
414
415 dmamem_err:
416 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
417 tx_ring->dma_tx, tx_ring->dma_tx_phy);
418 return -ENOMEM;
419 }
420
421 /**
422 * free_rx_ring - free the RX descriptor ring
423 * @dev: net device structure
424 * @rx_ring: ring to be intialised
425 * @rx_rsize: ring size
426 * Description: this function initializes the DMA RX descriptor
427 */
428 static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
429 int rx_rsize)
430 {
431 dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
432 rx_ring->dma_rx, rx_ring->dma_rx_phy);
433 kfree(rx_ring->rx_skbuff_dma);
434 kfree(rx_ring->rx_skbuff);
435 }
436
437 /**
438 * init_rx_ring - init the RX descriptor ring
439 * @dev: net device structure
440 * @rx_ring: ring to be intialised
441 * @rx_rsize: ring size
442 * Description: this function initializes the DMA RX descriptor
443 */
444 static int init_rx_ring(struct net_device *dev, u8 queue_no,
445 struct sxgbe_rx_queue *rx_ring, int rx_rsize)
446 {
447 struct sxgbe_priv_data *priv = netdev_priv(dev);
448 int desc_index;
449 unsigned int bfsize = 0;
450 unsigned int ret = 0;
451
452 /* Set the max buffer size according to the MTU. */
453 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
454
455 netif_dbg(priv, probe, dev, "%s: bfsize %d\n", __func__, bfsize);
456
457 /* RX ring is not allcoated */
458 if (rx_ring == NULL) {
459 netdev_err(dev, "No memory for RX queue\n");
460 goto error;
461 }
462
463 /* assign queue number */
464 rx_ring->queue_no = queue_no;
465
466 /* allocate memory for RX descriptors */
467 rx_ring->dma_rx = dma_zalloc_coherent(priv->device,
468 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
469 &rx_ring->dma_rx_phy, GFP_KERNEL);
470
471 if (rx_ring->dma_rx == NULL)
472 goto error;
473
474 /* allocate memory for RX skbuff array */
475 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
476 sizeof(dma_addr_t), GFP_KERNEL);
477 if (rx_ring->rx_skbuff_dma == NULL)
478 goto dmamem_err;
479
480 rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
481 sizeof(struct sk_buff *), GFP_KERNEL);
482 if (rx_ring->rx_skbuff == NULL)
483 goto rxbuff_err;
484
485 /* initialise the buffers */
486 for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
487 struct sxgbe_rx_norm_desc *p;
488 p = rx_ring->dma_rx + desc_index;
489 ret = sxgbe_init_rx_buffers(dev, p, desc_index,
490 bfsize, rx_ring);
491 if (ret)
492 goto err_init_rx_buffers;
493 }
494
495 /* initalise counters */
496 rx_ring->cur_rx = 0;
497 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize);
498 priv->dma_buf_sz = bfsize;
499
500 return 0;
501
502 err_init_rx_buffers:
503 while (--desc_index >= 0)
504 free_rx_ring(priv->device, rx_ring, desc_index);
505 kfree(rx_ring->rx_skbuff);
506 rxbuff_err:
507 kfree(rx_ring->rx_skbuff_dma);
508 dmamem_err:
509 dma_free_coherent(priv->device,
510 rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
511 rx_ring->dma_rx, rx_ring->dma_rx_phy);
512 error:
513 return -ENOMEM;
514 }
515 /**
516 * free_tx_ring - free the TX descriptor ring
517 * @dev: net device structure
518 * @tx_ring: ring to be intialised
519 * @tx_rsize: ring size
520 * Description: this function initializes the DMA TX descriptor
521 */
522 static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
523 int tx_rsize)
524 {
525 dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
526 tx_ring->dma_tx, tx_ring->dma_tx_phy);
527 }
528
529 /**
530 * init_dma_desc_rings - init the RX/TX descriptor rings
531 * @dev: net device structure
532 * Description: this function initializes the DMA RX/TX descriptors
533 * and allocates the socket buffers. It suppors the chained and ring
534 * modes.
535 */
536 static int init_dma_desc_rings(struct net_device *netd)
537 {
538 int queue_num, ret;
539 struct sxgbe_priv_data *priv = netdev_priv(netd);
540 int tx_rsize = priv->dma_tx_size;
541 int rx_rsize = priv->dma_rx_size;
542
543 /* Allocate memory for queue structures and TX descs */
544 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
545 ret = init_tx_ring(priv->device, queue_num,
546 priv->txq[queue_num], tx_rsize);
547 if (ret) {
548 dev_err(&netd->dev, "TX DMA ring allocation failed!\n");
549 goto txalloc_err;
550 }
551
552 /* save private pointer in each ring this
553 * pointer is needed during cleaing TX queue
554 */
555 priv->txq[queue_num]->priv_ptr = priv;
556 }
557
558 /* Allocate memory for queue structures and RX descs */
559 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
560 ret = init_rx_ring(netd, queue_num,
561 priv->rxq[queue_num], rx_rsize);
562 if (ret) {
563 netdev_err(netd, "RX DMA ring allocation failed!!\n");
564 goto rxalloc_err;
565 }
566
567 /* save private pointer in each ring this
568 * pointer is needed during cleaing TX queue
569 */
570 priv->rxq[queue_num]->priv_ptr = priv;
571 }
572
573 sxgbe_clear_descriptors(priv);
574
575 return 0;
576
577 txalloc_err:
578 while (queue_num--)
579 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
580 return ret;
581
582 rxalloc_err:
583 while (queue_num--)
584 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
585 return ret;
586 }
587
588 static void tx_free_ring_skbufs(struct sxgbe_tx_queue *txqueue)
589 {
590 int dma_desc;
591 struct sxgbe_priv_data *priv = txqueue->priv_ptr;
592 int tx_rsize = priv->dma_tx_size;
593
594 for (dma_desc = 0; dma_desc < tx_rsize; dma_desc++) {
595 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc;
596
597 if (txqueue->tx_skbuff_dma[dma_desc])
598 dma_unmap_single(priv->device,
599 txqueue->tx_skbuff_dma[dma_desc],
600 priv->hw->desc->get_tx_len(tdesc),
601 DMA_TO_DEVICE);
602
603 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]);
604 txqueue->tx_skbuff[dma_desc] = NULL;
605 txqueue->tx_skbuff_dma[dma_desc] = 0;
606 }
607 }
608
609
610 static void dma_free_tx_skbufs(struct sxgbe_priv_data *priv)
611 {
612 int queue_num;
613
614 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
615 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
616 tx_free_ring_skbufs(tqueue);
617 }
618 }
619
620 static void free_dma_desc_resources(struct sxgbe_priv_data *priv)
621 {
622 int queue_num;
623 int tx_rsize = priv->dma_tx_size;
624 int rx_rsize = priv->dma_rx_size;
625
626 /* Release the DMA TX buffers */
627 dma_free_tx_skbufs(priv);
628
629 /* Release the TX ring memory also */
630 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
631 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize);
632 }
633
634 /* Release the RX ring memory also */
635 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
636 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize);
637 }
638 }
639
640 static int txring_mem_alloc(struct sxgbe_priv_data *priv)
641 {
642 int queue_num;
643
644 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
645 priv->txq[queue_num] = devm_kmalloc(priv->device,
646 sizeof(struct sxgbe_tx_queue), GFP_KERNEL);
647 if (!priv->txq[queue_num])
648 return -ENOMEM;
649 }
650
651 return 0;
652 }
653
654 static int rxring_mem_alloc(struct sxgbe_priv_data *priv)
655 {
656 int queue_num;
657
658 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
659 priv->rxq[queue_num] = devm_kmalloc(priv->device,
660 sizeof(struct sxgbe_rx_queue), GFP_KERNEL);
661 if (!priv->rxq[queue_num])
662 return -ENOMEM;
663 }
664
665 return 0;
666 }
667
668 /**
669 * sxgbe_mtl_operation_mode - HW MTL operation mode
670 * @priv: driver private structure
671 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
672 * or Store-And-Forward capability.
673 */
674 static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
675 {
676 int queue_num;
677
678 /* TX/RX threshold control */
679 if (likely(priv->plat->force_sf_dma_mode)) {
680 /* set TC mode for TX QUEUES */
681 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
682 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
683 SXGBE_MTL_SFMODE);
684 priv->tx_tc = SXGBE_MTL_SFMODE;
685
686 /* set TC mode for RX QUEUES */
687 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
688 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
689 SXGBE_MTL_SFMODE);
690 priv->rx_tc = SXGBE_MTL_SFMODE;
691 } else if (unlikely(priv->plat->force_thresh_dma_mode)) {
692 /* set TC mode for TX QUEUES */
693 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num)
694 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num,
695 priv->tx_tc);
696 /* set TC mode for RX QUEUES */
697 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num)
698 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num,
699 priv->rx_tc);
700 } else {
701 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__);
702 }
703 }
704
705 /**
706 * sxgbe_tx_queue_clean:
707 * @priv: driver private structure
708 * Description: it reclaims resources after transmission completes.
709 */
710 static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
711 {
712 struct sxgbe_priv_data *priv = tqueue->priv_ptr;
713 unsigned int tx_rsize = priv->dma_tx_size;
714 struct netdev_queue *dev_txq;
715 u8 queue_no = tqueue->queue_no;
716
717 dev_txq = netdev_get_tx_queue(priv->dev, queue_no);
718
719 spin_lock(&tqueue->tx_lock);
720
721 priv->xstats.tx_clean++;
722 while (tqueue->dirty_tx != tqueue->cur_tx) {
723 unsigned int entry = tqueue->dirty_tx % tx_rsize;
724 struct sk_buff *skb = tqueue->tx_skbuff[entry];
725 struct sxgbe_tx_norm_desc *p;
726
727 p = tqueue->dma_tx + entry;
728
729 /* Check if the descriptor is owned by the DMA. */
730 if (priv->hw->desc->get_tx_owner(p))
731 break;
732
733 if (netif_msg_tx_done(priv))
734 pr_debug("%s: curr %d, dirty %d\n",
735 __func__, tqueue->cur_tx, tqueue->dirty_tx);
736
737 if (likely(tqueue->tx_skbuff_dma[entry])) {
738 dma_unmap_single(priv->device,
739 tqueue->tx_skbuff_dma[entry],
740 priv->hw->desc->get_tx_len(p),
741 DMA_TO_DEVICE);
742 tqueue->tx_skbuff_dma[entry] = 0;
743 }
744
745 if (likely(skb)) {
746 dev_kfree_skb(skb);
747 tqueue->tx_skbuff[entry] = NULL;
748 }
749
750 priv->hw->desc->release_tx_desc(p);
751
752 tqueue->dirty_tx++;
753 }
754
755 /* wake up queue */
756 if (unlikely(netif_tx_queue_stopped(dev_txq) &&
757 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv))) {
758 netif_tx_lock(priv->dev);
759 if (netif_tx_queue_stopped(dev_txq) &&
760 sxgbe_tx_avail(tqueue, tx_rsize) > SXGBE_TX_THRESH(priv)) {
761 if (netif_msg_tx_done(priv))
762 pr_debug("%s: restart transmit\n", __func__);
763 netif_tx_wake_queue(dev_txq);
764 }
765 netif_tx_unlock(priv->dev);
766 }
767
768 spin_unlock(&tqueue->tx_lock);
769 }
770
771 /**
772 * sxgbe_tx_clean:
773 * @priv: driver private structure
774 * Description: it reclaims resources after transmission completes.
775 */
776 static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
777 {
778 u8 queue_num;
779
780 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
781 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num];
782
783 sxgbe_tx_queue_clean(tqueue);
784 }
785
786 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
787 sxgbe_enable_eee_mode(priv);
788 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer));
789 }
790 }
791
792 /**
793 * sxgbe_restart_tx_queue: irq tx error mng function
794 * @priv: driver private structure
795 * Description: it cleans the descriptors and restarts the transmission
796 * in case of errors.
797 */
798 static void sxgbe_restart_tx_queue(struct sxgbe_priv_data *priv, int queue_num)
799 {
800 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num];
801 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev,
802 queue_num);
803
804 /* stop the queue */
805 netif_tx_stop_queue(dev_txq);
806
807 /* stop the tx dma */
808 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num);
809
810 /* free the skbuffs of the ring */
811 tx_free_ring_skbufs(tx_ring);
812
813 /* initalise counters */
814 tx_ring->cur_tx = 0;
815 tx_ring->dirty_tx = 0;
816
817 /* start the tx dma */
818 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num);
819
820 priv->dev->stats.tx_errors++;
821
822 /* wakeup the queue */
823 netif_tx_wake_queue(dev_txq);
824 }
825
826 /**
827 * sxgbe_reset_all_tx_queues: irq tx error mng function
828 * @priv: driver private structure
829 * Description: it cleans all the descriptors and
830 * restarts the transmission on all queues in case of errors.
831 */
832 static void sxgbe_reset_all_tx_queues(struct sxgbe_priv_data *priv)
833 {
834 int queue_num;
835
836 /* On TX timeout of net device, resetting of all queues
837 * may not be proper way, revisit this later if needed
838 */
839 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
840 sxgbe_restart_tx_queue(priv, queue_num);
841 }
842
843 /**
844 * sxgbe_get_hw_features: get XMAC capabilities from the HW cap. register.
845 * @priv: driver private structure
846 * Description:
847 * new GMAC chip generations have a new register to indicate the
848 * presence of the optional feature/functions.
849 * This can be also used to override the value passed through the
850 * platform and necessary for old MAC10/100 and GMAC chips.
851 */
852 static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
853 {
854 int rval = 0;
855 struct sxgbe_hw_features *features = &priv->hw_cap;
856
857 /* Read First Capability Register CAP[0] */
858 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0);
859 if (rval) {
860 features->pmt_remote_wake_up =
861 SXGBE_HW_FEAT_PMT_TEMOTE_WOP(rval);
862 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval);
863 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval);
864 features->tx_csum_offload =
865 SXGBE_HW_FEAT_TX_CSUM_OFFLOAD(rval);
866 features->rx_csum_offload =
867 SXGBE_HW_FEAT_RX_CSUM_OFFLOAD(rval);
868 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval);
869 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval);
870 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval);
871 features->eee = SXGBE_HW_FEAT_EEE(rval);
872 }
873
874 /* Read First Capability Register CAP[1] */
875 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1);
876 if (rval) {
877 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval);
878 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
879 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval);
880 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval);
881 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval);
882 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval);
883 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval);
884 features->rss_enable = SXGBE_HW_FEAT_RSS(rval);
885 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval);
886 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval);
887 }
888
889 /* Read First Capability Register CAP[2] */
890 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2);
891 if (rval) {
892 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval);
893 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval);
894 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval);
895 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval);
896 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval);
897 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval);
898 }
899
900 return rval;
901 }
902
903 /**
904 * sxgbe_check_ether_addr: check if the MAC addr is valid
905 * @priv: driver private structure
906 * Description:
907 * it is to verify if the MAC address is valid, in case of failures it
908 * generates a random MAC address
909 */
910 static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
911 {
912 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
913 priv->hw->mac->get_umac_addr((void __iomem *)
914 priv->ioaddr,
915 priv->dev->dev_addr, 0);
916 if (!is_valid_ether_addr(priv->dev->dev_addr))
917 eth_hw_addr_random(priv->dev);
918 }
919 dev_info(priv->device, "device MAC address %pM\n",
920 priv->dev->dev_addr);
921 }
922
923 /**
924 * sxgbe_init_dma_engine: DMA init.
925 * @priv: driver private structure
926 * Description:
927 * It inits the DMA invoking the specific SXGBE callback.
928 * Some DMA parameters can be passed from the platform;
929 * in case of these are not passed a default is kept for the MAC or GMAC.
930 */
931 static int sxgbe_init_dma_engine(struct sxgbe_priv_data *priv)
932 {
933 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_map = 0;
934 int queue_num;
935
936 if (priv->plat->dma_cfg) {
937 pbl = priv->plat->dma_cfg->pbl;
938 fixed_burst = priv->plat->dma_cfg->fixed_burst;
939 burst_map = priv->plat->dma_cfg->burst_map;
940 }
941
942 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
943 priv->hw->dma->cha_init(priv->ioaddr, queue_num,
944 fixed_burst, pbl,
945 (priv->txq[queue_num])->dma_tx_phy,
946 (priv->rxq[queue_num])->dma_rx_phy,
947 priv->dma_tx_size, priv->dma_rx_size);
948
949 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map);
950 }
951
952 /**
953 * sxgbe_init_mtl_engine: MTL init.
954 * @priv: driver private structure
955 * Description:
956 * It inits the MTL invoking the specific SXGBE callback.
957 */
958 static void sxgbe_init_mtl_engine(struct sxgbe_priv_data *priv)
959 {
960 int queue_num;
961
962 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
963 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num,
964 priv->hw_cap.tx_mtl_qsize);
965 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num);
966 }
967 }
968
969 /**
970 * sxgbe_disable_mtl_engine: MTL disable.
971 * @priv: driver private structure
972 * Description:
973 * It disables the MTL queues by invoking the specific SXGBE callback.
974 */
975 static void sxgbe_disable_mtl_engine(struct sxgbe_priv_data *priv)
976 {
977 int queue_num;
978
979 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num)
980 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num);
981 }
982
983
984 /**
985 * sxgbe_tx_timer: mitigation sw timer for tx.
986 * @data: data pointer
987 * Description:
988 * This is the timer handler to directly invoke the sxgbe_tx_clean.
989 */
990 static void sxgbe_tx_timer(unsigned long data)
991 {
992 struct sxgbe_tx_queue *p = (struct sxgbe_tx_queue *)data;
993 sxgbe_tx_queue_clean(p);
994 }
995
996 /**
997 * sxgbe_init_tx_coalesce: init tx mitigation options.
998 * @priv: driver private structure
999 * Description:
1000 * This inits the transmit coalesce parameters: i.e. timer rate,
1001 * timer handler and default threshold used for enabling the
1002 * interrupt on completion bit.
1003 */
1004 static void sxgbe_tx_init_coalesce(struct sxgbe_priv_data *priv)
1005 {
1006 u8 queue_num;
1007
1008 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1009 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1010 p->tx_coal_frames = SXGBE_TX_FRAMES;
1011 p->tx_coal_timer = SXGBE_COAL_TX_TIMER;
1012 init_timer(&p->txtimer);
1013 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer);
1014 p->txtimer.data = (unsigned long)&priv->txq[queue_num];
1015 p->txtimer.function = sxgbe_tx_timer;
1016 add_timer(&p->txtimer);
1017 }
1018 }
1019
1020 static void sxgbe_tx_del_timer(struct sxgbe_priv_data *priv)
1021 {
1022 u8 queue_num;
1023
1024 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1025 struct sxgbe_tx_queue *p = priv->txq[queue_num];
1026 del_timer_sync(&p->txtimer);
1027 }
1028 }
1029
1030 /**
1031 * sxgbe_open - open entry point of the driver
1032 * @dev : pointer to the device structure.
1033 * Description:
1034 * This function is the open entry point of the driver.
1035 * Return value:
1036 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1037 * file on failure.
1038 */
1039 static int sxgbe_open(struct net_device *dev)
1040 {
1041 struct sxgbe_priv_data *priv = netdev_priv(dev);
1042 int ret, queue_num;
1043
1044 clk_prepare_enable(priv->sxgbe_clk);
1045
1046 sxgbe_check_ether_addr(priv);
1047
1048 /* Init the phy */
1049 ret = sxgbe_init_phy(dev);
1050 if (ret) {
1051 netdev_err(dev, "%s: Cannot attach to PHY (error: %d)\n",
1052 __func__, ret);
1053 goto phy_error;
1054 }
1055
1056 /* Create and initialize the TX/RX descriptors chains. */
1057 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE);
1058 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE);
1059 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE);
1060 priv->tx_tc = TC_DEFAULT;
1061 priv->rx_tc = TC_DEFAULT;
1062 init_dma_desc_rings(dev);
1063
1064 /* DMA initialization and SW reset */
1065 ret = sxgbe_init_dma_engine(priv);
1066 if (ret < 0) {
1067 netdev_err(dev, "%s: DMA initialization failed\n", __func__);
1068 goto init_error;
1069 }
1070
1071 /* MTL initialization */
1072 sxgbe_init_mtl_engine(priv);
1073
1074 /* Copy the MAC addr into the HW */
1075 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1076
1077 /* Initialize the MAC Core */
1078 priv->hw->mac->core_init(priv->ioaddr);
1079
1080 /* Request the IRQ lines */
1081 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
1082 IRQF_SHARED, dev->name, dev);
1083 if (unlikely(ret < 0)) {
1084 netdev_err(dev, "%s: ERROR: allocating the IRQ %d (error: %d)\n",
1085 __func__, priv->irq, ret);
1086 goto init_error;
1087 }
1088
1089 /* If the LPI irq is different from the mac irq
1090 * register a dedicated handler
1091 */
1092 if (priv->lpi_irq != dev->irq) {
1093 ret = devm_request_irq(priv->device, priv->lpi_irq,
1094 sxgbe_common_interrupt,
1095 IRQF_SHARED, dev->name, dev);
1096 if (unlikely(ret < 0)) {
1097 netdev_err(dev, "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1098 __func__, priv->lpi_irq, ret);
1099 goto init_error;
1100 }
1101 }
1102
1103 /* Request TX DMA irq lines */
1104 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
1105 ret = devm_request_irq(priv->device,
1106 (priv->txq[queue_num])->irq_no,
1107 sxgbe_tx_interrupt, 0,
1108 dev->name, priv->txq[queue_num]);
1109 if (unlikely(ret < 0)) {
1110 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1111 __func__, priv->irq, ret);
1112 goto init_error;
1113 }
1114 }
1115
1116 /* Request RX DMA irq lines */
1117 SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
1118 ret = devm_request_irq(priv->device,
1119 (priv->rxq[queue_num])->irq_no,
1120 sxgbe_rx_interrupt, 0,
1121 dev->name, priv->rxq[queue_num]);
1122 if (unlikely(ret < 0)) {
1123 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n",
1124 __func__, priv->irq, ret);
1125 goto init_error;
1126 }
1127 }
1128
1129 /* Enable the MAC Rx/Tx */
1130 priv->hw->mac->enable_tx(priv->ioaddr, true);
1131 priv->hw->mac->enable_rx(priv->ioaddr, true);
1132
1133 /* Set the HW DMA mode and the COE */
1134 sxgbe_mtl_operation_mode(priv);
1135
1136 /* Extra statistics */
1137 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats));
1138
1139 priv->xstats.tx_threshold = priv->tx_tc;
1140 priv->xstats.rx_threshold = priv->rx_tc;
1141
1142 /* Start the ball rolling... */
1143 netdev_dbg(dev, "DMA RX/TX processes started...\n");
1144 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1145 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1146
1147 if (priv->phydev)
1148 phy_start(priv->phydev);
1149
1150 /* initalise TX coalesce parameters */
1151 sxgbe_tx_init_coalesce(priv);
1152
1153 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1154 priv->rx_riwt = SXGBE_MAX_DMA_RIWT;
1155 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT);
1156 }
1157
1158 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER;
1159 priv->eee_enabled = sxgbe_eee_init(priv);
1160
1161 napi_enable(&priv->napi);
1162 netif_start_queue(dev);
1163
1164 return 0;
1165
1166 init_error:
1167 free_dma_desc_resources(priv);
1168 if (priv->phydev)
1169 phy_disconnect(priv->phydev);
1170 phy_error:
1171 clk_disable_unprepare(priv->sxgbe_clk);
1172
1173 return ret;
1174 }
1175
1176 /**
1177 * sxgbe_release - close entry point of the driver
1178 * @dev : device pointer.
1179 * Description:
1180 * This is the stop entry point of the driver.
1181 */
1182 static int sxgbe_release(struct net_device *dev)
1183 {
1184 struct sxgbe_priv_data *priv = netdev_priv(dev);
1185
1186 if (priv->eee_enabled)
1187 del_timer_sync(&priv->eee_ctrl_timer);
1188
1189 /* Stop and disconnect the PHY */
1190 if (priv->phydev) {
1191 phy_stop(priv->phydev);
1192 phy_disconnect(priv->phydev);
1193 priv->phydev = NULL;
1194 }
1195
1196 netif_tx_stop_all_queues(dev);
1197
1198 napi_disable(&priv->napi);
1199
1200 /* delete TX timers */
1201 sxgbe_tx_del_timer(priv);
1202
1203 /* Stop TX/RX DMA and clear the descriptors */
1204 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
1205 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
1206
1207 /* disable MTL queue */
1208 sxgbe_disable_mtl_engine(priv);
1209
1210 /* Release and free the Rx/Tx resources */
1211 free_dma_desc_resources(priv);
1212
1213 /* Disable the MAC Rx/Tx */
1214 priv->hw->mac->enable_tx(priv->ioaddr, false);
1215 priv->hw->mac->enable_rx(priv->ioaddr, false);
1216
1217 clk_disable_unprepare(priv->sxgbe_clk);
1218
1219 return 0;
1220 }
1221 /* Prepare first Tx descriptor for doing TSO operation */
1222 static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
1223 struct sxgbe_tx_norm_desc *first_desc,
1224 struct sk_buff *skb)
1225 {
1226 unsigned int total_hdr_len, tcp_hdr_len;
1227
1228 /* Write first Tx descriptor with appropriate value */
1229 tcp_hdr_len = tcp_hdrlen(skb);
1230 total_hdr_len = skb_transport_offset(skb) + tcp_hdr_len;
1231
1232 first_desc->tdes01 = dma_map_single(priv->device, skb->data,
1233 total_hdr_len, DMA_TO_DEVICE);
1234 if (dma_mapping_error(priv->device, first_desc->tdes01))
1235 pr_err("%s: TX dma mapping failed!!\n", __func__);
1236
1237 first_desc->tdes23.tx_rd_des23.first_desc = 1;
1238 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len,
1239 tcp_hdr_len,
1240 skb->len - total_hdr_len);
1241 }
1242
1243 /**
1244 * sxgbe_xmit: Tx entry point of the driver
1245 * @skb : the socket buffer
1246 * @dev : device pointer
1247 * Description : this is the tx entry point of the driver.
1248 * It programs the chain or the ring and supports oversized frames
1249 * and SG feature.
1250 */
1251 static netdev_tx_t sxgbe_xmit(struct sk_buff *skb, struct net_device *dev)
1252 {
1253 unsigned int entry, frag_num;
1254 int cksum_flag = 0;
1255 struct netdev_queue *dev_txq;
1256 unsigned txq_index = skb_get_queue_mapping(skb);
1257 struct sxgbe_priv_data *priv = netdev_priv(dev);
1258 unsigned int tx_rsize = priv->dma_tx_size;
1259 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index];
1260 struct sxgbe_tx_norm_desc *tx_desc, *first_desc;
1261 struct sxgbe_tx_ctxt_desc *ctxt_desc = NULL;
1262 int nr_frags = skb_shinfo(skb)->nr_frags;
1263 int no_pagedlen = skb_headlen(skb);
1264 int is_jumbo = 0;
1265 u16 cur_mss = skb_shinfo(skb)->gso_size;
1266 u32 ctxt_desc_req = 0;
1267
1268 /* get the TX queue handle */
1269 dev_txq = netdev_get_tx_queue(dev, txq_index);
1270
1271 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss))
1272 ctxt_desc_req = 1;
1273
1274 if (unlikely(vlan_tx_tag_present(skb) ||
1275 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1276 tqueue->hwts_tx_en)))
1277 ctxt_desc_req = 1;
1278
1279 /* get the spinlock */
1280 spin_lock(&tqueue->tx_lock);
1281
1282 if (priv->tx_path_in_lpi_mode)
1283 sxgbe_disable_eee_mode(priv);
1284
1285 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) < nr_frags + 1)) {
1286 if (!netif_tx_queue_stopped(dev_txq)) {
1287 netif_tx_stop_queue(dev_txq);
1288 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n",
1289 __func__, txq_index);
1290 }
1291 /* release the spin lock in case of BUSY */
1292 spin_unlock(&tqueue->tx_lock);
1293 return NETDEV_TX_BUSY;
1294 }
1295
1296 entry = tqueue->cur_tx % tx_rsize;
1297 tx_desc = tqueue->dma_tx + entry;
1298
1299 first_desc = tx_desc;
1300 if (ctxt_desc_req)
1301 ctxt_desc = (struct sxgbe_tx_ctxt_desc *)first_desc;
1302
1303 /* save the skb address */
1304 tqueue->tx_skbuff[entry] = skb;
1305
1306 if (!is_jumbo) {
1307 if (likely(skb_is_gso(skb))) {
1308 /* TSO support */
1309 if (unlikely(tqueue->prev_mss != cur_mss)) {
1310 priv->hw->desc->tx_ctxt_desc_set_mss(
1311 ctxt_desc, cur_mss);
1312 priv->hw->desc->tx_ctxt_desc_set_tcmssv(
1313 ctxt_desc);
1314 priv->hw->desc->tx_ctxt_desc_reset_ostc(
1315 ctxt_desc);
1316 priv->hw->desc->tx_ctxt_desc_set_ctxt(
1317 ctxt_desc);
1318 priv->hw->desc->tx_ctxt_desc_set_owner(
1319 ctxt_desc);
1320
1321 entry = (++tqueue->cur_tx) % tx_rsize;
1322 first_desc = tqueue->dma_tx + entry;
1323
1324 tqueue->prev_mss = cur_mss;
1325 }
1326 sxgbe_tso_prepare(priv, first_desc, skb);
1327 } else {
1328 tx_desc->tdes01 = dma_map_single(priv->device,
1329 skb->data, no_pagedlen, DMA_TO_DEVICE);
1330 if (dma_mapping_error(priv->device, tx_desc->tdes01))
1331 netdev_err(dev, "%s: TX dma mapping failed!!\n",
1332 __func__);
1333
1334 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen,
1335 no_pagedlen, cksum_flag);
1336 }
1337 }
1338
1339 for (frag_num = 0; frag_num < nr_frags; frag_num++) {
1340 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
1341 int len = skb_frag_size(frag);
1342
1343 entry = (++tqueue->cur_tx) % tx_rsize;
1344 tx_desc = tqueue->dma_tx + entry;
1345 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len,
1346 DMA_TO_DEVICE);
1347
1348 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01;
1349 tqueue->tx_skbuff[entry] = NULL;
1350
1351 /* prepare the descriptor */
1352 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len,
1353 len, cksum_flag);
1354 /* memory barrier to flush descriptor */
1355 wmb();
1356
1357 /* set the owner */
1358 priv->hw->desc->set_tx_owner(tx_desc);
1359 }
1360
1361 /* close the descriptors */
1362 priv->hw->desc->close_tx_desc(tx_desc);
1363
1364 /* memory barrier to flush descriptor */
1365 wmb();
1366
1367 tqueue->tx_count_frames += nr_frags + 1;
1368 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) {
1369 priv->hw->desc->clear_tx_ic(tx_desc);
1370 priv->xstats.tx_reset_ic_bit++;
1371 mod_timer(&tqueue->txtimer,
1372 SXGBE_COAL_TIMER(tqueue->tx_coal_timer));
1373 } else {
1374 tqueue->tx_count_frames = 0;
1375 }
1376
1377 /* set owner for first desc */
1378 priv->hw->desc->set_tx_owner(first_desc);
1379
1380 /* memory barrier to flush descriptor */
1381 wmb();
1382
1383 tqueue->cur_tx++;
1384
1385 /* display current ring */
1386 netif_dbg(priv, pktdata, dev, "%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d\n",
1387 __func__, tqueue->cur_tx % tx_rsize,
1388 tqueue->dirty_tx % tx_rsize, entry,
1389 first_desc, nr_frags);
1390
1391 if (unlikely(sxgbe_tx_avail(tqueue, tx_rsize) <= (MAX_SKB_FRAGS + 1))) {
1392 netif_dbg(priv, hw, dev, "%s: stop transmitted packets\n",
1393 __func__);
1394 netif_tx_stop_queue(dev_txq);
1395 }
1396
1397 dev->stats.tx_bytes += skb->len;
1398
1399 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1400 tqueue->hwts_tx_en)) {
1401 /* declare that device is doing timestamping */
1402 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1403 priv->hw->desc->tx_enable_tstamp(first_desc);
1404 }
1405
1406 if (!tqueue->hwts_tx_en)
1407 skb_tx_timestamp(skb);
1408
1409 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index);
1410
1411 spin_unlock(&tqueue->tx_lock);
1412
1413 return NETDEV_TX_OK;
1414 }
1415
1416 /**
1417 * sxgbe_rx_refill: refill used skb preallocated buffers
1418 * @priv: driver private structure
1419 * Description : this is to reallocate the skb for the reception process
1420 * that is based on zero-copy.
1421 */
1422 static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
1423 {
1424 unsigned int rxsize = priv->dma_rx_size;
1425 int bfsize = priv->dma_buf_sz;
1426 u8 qnum = priv->cur_rx_qnum;
1427
1428 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0;
1429 priv->rxq[qnum]->dirty_rx++) {
1430 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize;
1431 struct sxgbe_rx_norm_desc *p;
1432
1433 p = priv->rxq[qnum]->dma_rx + entry;
1434
1435 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) {
1436 struct sk_buff *skb;
1437
1438 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1439
1440 if (unlikely(skb == NULL))
1441 break;
1442
1443 priv->rxq[qnum]->rx_skbuff[entry] = skb;
1444 priv->rxq[qnum]->rx_skbuff_dma[entry] =
1445 dma_map_single(priv->device, skb->data, bfsize,
1446 DMA_FROM_DEVICE);
1447
1448 p->rdes23.rx_rd_des23.buf2_addr =
1449 priv->rxq[qnum]->rx_skbuff_dma[entry];
1450 }
1451
1452 /* Added memory barrier for RX descriptor modification */
1453 wmb();
1454 priv->hw->desc->set_rx_owner(p);
1455 /* Added memory barrier for RX descriptor modification */
1456 wmb();
1457 }
1458 }
1459
1460 /**
1461 * sxgbe_rx: receive the frames from the remote host
1462 * @priv: driver private structure
1463 * @limit: napi bugget.
1464 * Description : this the function called by the napi poll method.
1465 * It gets all the frames inside the ring.
1466 */
1467 static int sxgbe_rx(struct sxgbe_priv_data *priv, int limit)
1468 {
1469 u8 qnum = priv->cur_rx_qnum;
1470 unsigned int rxsize = priv->dma_rx_size;
1471 unsigned int entry = priv->rxq[qnum]->cur_rx;
1472 unsigned int next_entry = 0;
1473 unsigned int count = 0;
1474 int checksum;
1475 int status;
1476
1477 while (count < limit) {
1478 struct sxgbe_rx_norm_desc *p;
1479 struct sk_buff *skb;
1480 int frame_len;
1481
1482 p = priv->rxq[qnum]->dma_rx + entry;
1483
1484 if (priv->hw->desc->get_rx_owner(p))
1485 break;
1486
1487 count++;
1488
1489 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize;
1490 prefetch(priv->rxq[qnum]->dma_rx + next_entry);
1491
1492 /* Read the status of the incoming frame and also get checksum
1493 * value based on whether it is enabled in SXGBE hardware or
1494 * not.
1495 */
1496 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats,
1497 &checksum);
1498 if (unlikely(status < 0)) {
1499 entry = next_entry;
1500 continue;
1501 }
1502 if (unlikely(!priv->rxcsum_insertion))
1503 checksum = CHECKSUM_NONE;
1504
1505 skb = priv->rxq[qnum]->rx_skbuff[entry];
1506
1507 if (unlikely(!skb))
1508 netdev_err(priv->dev, "rx descriptor is not consistent\n");
1509
1510 prefetch(skb->data - NET_IP_ALIGN);
1511 priv->rxq[qnum]->rx_skbuff[entry] = NULL;
1512
1513 frame_len = priv->hw->desc->get_rx_frame_len(p);
1514
1515 skb_put(skb, frame_len);
1516
1517 skb->ip_summed = checksum;
1518 if (checksum == CHECKSUM_NONE)
1519 netif_receive_skb(skb);
1520 else
1521 napi_gro_receive(&priv->napi, skb);
1522
1523 entry = next_entry;
1524 }
1525
1526 sxgbe_rx_refill(priv);
1527
1528 return count;
1529 }
1530
1531 /**
1532 * sxgbe_poll - sxgbe poll method (NAPI)
1533 * @napi : pointer to the napi structure.
1534 * @budget : maximum number of packets that the current CPU can receive from
1535 * all interfaces.
1536 * Description :
1537 * To look at the incoming frames and clear the tx resources.
1538 */
1539 static int sxgbe_poll(struct napi_struct *napi, int budget)
1540 {
1541 struct sxgbe_priv_data *priv = container_of(napi,
1542 struct sxgbe_priv_data, napi);
1543 int work_done = 0;
1544 u8 qnum = priv->cur_rx_qnum;
1545
1546 priv->xstats.napi_poll++;
1547 /* first, clean the tx queues */
1548 sxgbe_tx_all_clean(priv);
1549
1550 work_done = sxgbe_rx(priv, budget);
1551 if (work_done < budget) {
1552 napi_complete(napi);
1553 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
1554 }
1555
1556 return work_done;
1557 }
1558
1559 /**
1560 * sxgbe_tx_timeout
1561 * @dev : Pointer to net device structure
1562 * Description: this function is called when a packet transmission fails to
1563 * complete within a reasonable time. The driver will mark the error in the
1564 * netdev structure and arrange for the device to be reset to a sane state
1565 * in order to transmit a new packet.
1566 */
1567 static void sxgbe_tx_timeout(struct net_device *dev)
1568 {
1569 struct sxgbe_priv_data *priv = netdev_priv(dev);
1570
1571 sxgbe_reset_all_tx_queues(priv);
1572 }
1573
1574 /**
1575 * sxgbe_common_interrupt - main ISR
1576 * @irq: interrupt number.
1577 * @dev_id: to pass the net device pointer.
1578 * Description: this is the main driver interrupt service routine.
1579 * It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
1580 * interrupts.
1581 */
1582 static irqreturn_t sxgbe_common_interrupt(int irq, void *dev_id)
1583 {
1584 struct net_device *netdev = (struct net_device *)dev_id;
1585 struct sxgbe_priv_data *priv = netdev_priv(netdev);
1586 int status;
1587
1588 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats);
1589 /* For LPI we need to save the tx status */
1590 if (status & TX_ENTRY_LPI_MODE) {
1591 priv->xstats.tx_lpi_entry_n++;
1592 priv->tx_path_in_lpi_mode = true;
1593 }
1594 if (status & TX_EXIT_LPI_MODE) {
1595 priv->xstats.tx_lpi_exit_n++;
1596 priv->tx_path_in_lpi_mode = false;
1597 }
1598 if (status & RX_ENTRY_LPI_MODE)
1599 priv->xstats.rx_lpi_entry_n++;
1600 if (status & RX_EXIT_LPI_MODE)
1601 priv->xstats.rx_lpi_exit_n++;
1602
1603 return IRQ_HANDLED;
1604 }
1605
1606 /**
1607 * sxgbe_tx_interrupt - TX DMA ISR
1608 * @irq: interrupt number.
1609 * @dev_id: to pass the net device pointer.
1610 * Description: this is the tx dma interrupt service routine.
1611 */
1612 static irqreturn_t sxgbe_tx_interrupt(int irq, void *dev_id)
1613 {
1614 int status;
1615 struct sxgbe_tx_queue *txq = (struct sxgbe_tx_queue *)dev_id;
1616 struct sxgbe_priv_data *priv = txq->priv_ptr;
1617
1618 /* get the channel status */
1619 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no,
1620 &priv->xstats);
1621 /* check for normal path */
1622 if (likely((status & handle_tx)))
1623 napi_schedule(&priv->napi);
1624
1625 /* check for unrecoverable error */
1626 if (unlikely((status & tx_hard_error)))
1627 sxgbe_restart_tx_queue(priv, txq->queue_no);
1628
1629 /* check for TC configuration change */
1630 if (unlikely((status & tx_bump_tc) &&
1631 (priv->tx_tc != SXGBE_MTL_SFMODE) &&
1632 (priv->tx_tc < 512))) {
1633 /* step of TX TC is 32 till 128, otherwise 64 */
1634 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64;
1635 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr,
1636 txq->queue_no, priv->tx_tc);
1637 priv->xstats.tx_threshold = priv->tx_tc;
1638 }
1639
1640 return IRQ_HANDLED;
1641 }
1642
1643 /**
1644 * sxgbe_rx_interrupt - RX DMA ISR
1645 * @irq: interrupt number.
1646 * @dev_id: to pass the net device pointer.
1647 * Description: this is the rx dma interrupt service routine.
1648 */
1649 static irqreturn_t sxgbe_rx_interrupt(int irq, void *dev_id)
1650 {
1651 int status;
1652 struct sxgbe_rx_queue *rxq = (struct sxgbe_rx_queue *)dev_id;
1653 struct sxgbe_priv_data *priv = rxq->priv_ptr;
1654
1655 /* get the channel status */
1656 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no,
1657 &priv->xstats);
1658
1659 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) {
1660 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no);
1661 __napi_schedule(&priv->napi);
1662 }
1663
1664 /* check for TC configuration change */
1665 if (unlikely((status & rx_bump_tc) &&
1666 (priv->rx_tc != SXGBE_MTL_SFMODE) &&
1667 (priv->rx_tc < 128))) {
1668 /* step of TC is 32 */
1669 priv->rx_tc += 32;
1670 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr,
1671 rxq->queue_no, priv->rx_tc);
1672 priv->xstats.rx_threshold = priv->rx_tc;
1673 }
1674
1675 return IRQ_HANDLED;
1676 }
1677
1678 static inline u64 sxgbe_get_stat64(void __iomem *ioaddr, int reg_lo, int reg_hi)
1679 {
1680 u64 val = readl(ioaddr + reg_lo);
1681
1682 val |= ((u64)readl(ioaddr + reg_hi)) << 32;
1683
1684 return val;
1685 }
1686
1687
1688 /* sxgbe_get_stats64 - entry point to see statistical information of device
1689 * @dev : device pointer.
1690 * @stats : pointer to hold all the statistical information of device.
1691 * Description:
1692 * This function is a driver entry point whenever ifconfig command gets
1693 * executed to see device statistics. Statistics are number of
1694 * bytes sent or received, errors occured etc.
1695 * Return value:
1696 * This function returns various statistical information of device.
1697 */
1698 static struct rtnl_link_stats64 *sxgbe_get_stats64(struct net_device *dev,
1699 struct rtnl_link_stats64 *stats)
1700 {
1701 struct sxgbe_priv_data *priv = netdev_priv(dev);
1702 void __iomem *ioaddr = priv->ioaddr;
1703 u64 count;
1704
1705 spin_lock(&priv->stats_lock);
1706 /* Freeze the counter registers before reading value otherwise it may
1707 * get updated by hardware while we are reading them
1708 */
1709 writel(SXGBE_MMC_CTRL_CNT_FRZ, ioaddr + SXGBE_MMC_CTL_REG);
1710
1711 stats->rx_bytes = sxgbe_get_stat64(ioaddr,
1712 SXGBE_MMC_RXOCTETLO_GCNT_REG,
1713 SXGBE_MMC_RXOCTETHI_GCNT_REG);
1714
1715 stats->rx_packets = sxgbe_get_stat64(ioaddr,
1716 SXGBE_MMC_RXFRAMELO_GBCNT_REG,
1717 SXGBE_MMC_RXFRAMEHI_GBCNT_REG);
1718
1719 stats->multicast = sxgbe_get_stat64(ioaddr,
1720 SXGBE_MMC_RXMULTILO_GCNT_REG,
1721 SXGBE_MMC_RXMULTIHI_GCNT_REG);
1722
1723 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr,
1724 SXGBE_MMC_RXCRCERRLO_REG,
1725 SXGBE_MMC_RXCRCERRHI_REG);
1726
1727 stats->rx_length_errors = sxgbe_get_stat64(ioaddr,
1728 SXGBE_MMC_RXLENERRLO_REG,
1729 SXGBE_MMC_RXLENERRHI_REG);
1730
1731 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr,
1732 SXGBE_MMC_RXFIFOOVERFLOWLO_GBCNT_REG,
1733 SXGBE_MMC_RXFIFOOVERFLOWHI_GBCNT_REG);
1734
1735 stats->tx_bytes = sxgbe_get_stat64(ioaddr,
1736 SXGBE_MMC_TXOCTETLO_GCNT_REG,
1737 SXGBE_MMC_TXOCTETHI_GCNT_REG);
1738
1739 count = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GBCNT_REG,
1740 SXGBE_MMC_TXFRAMEHI_GBCNT_REG);
1741
1742 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG,
1743 SXGBE_MMC_TXFRAMEHI_GCNT_REG);
1744 stats->tx_errors = count - stats->tx_errors;
1745 stats->tx_packets = count;
1746 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG,
1747 SXGBE_MMC_TXUFLWHI_GBCNT_REG);
1748 writel(0, ioaddr + SXGBE_MMC_CTL_REG);
1749 spin_unlock(&priv->stats_lock);
1750
1751 return stats;
1752 }
1753
1754 /* sxgbe_set_features - entry point to set offload features of the device.
1755 * @dev : device pointer.
1756 * @features : features which are required to be set.
1757 * Description:
1758 * This function is a driver entry point and called by Linux kernel whenever
1759 * any device features are set or reset by user.
1760 * Return value:
1761 * This function returns 0 after setting or resetting device features.
1762 */
1763 static int sxgbe_set_features(struct net_device *dev,
1764 netdev_features_t features)
1765 {
1766 struct sxgbe_priv_data *priv = netdev_priv(dev);
1767 netdev_features_t changed = dev->features ^ features;
1768
1769 if (changed & NETIF_F_RXCSUM) {
1770 if (features & NETIF_F_RXCSUM) {
1771 priv->hw->mac->enable_rx_csum(priv->ioaddr);
1772 priv->rxcsum_insertion = true;
1773 } else {
1774 priv->hw->mac->disable_rx_csum(priv->ioaddr);
1775 priv->rxcsum_insertion = false;
1776 }
1777 }
1778
1779 return 0;
1780 }
1781
1782 /* sxgbe_change_mtu - entry point to change MTU size for the device.
1783 * @dev : device pointer.
1784 * @new_mtu : the new MTU size for the device.
1785 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1786 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1787 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1788 * Return value:
1789 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1790 * file on failure.
1791 */
1792 static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
1793 {
1794 /* RFC 791, page 25, "Every internet module must be able to forward
1795 * a datagram of 68 octets without further fragmentation."
1796 */
1797 if (new_mtu < MIN_MTU || (new_mtu > MAX_MTU)) {
1798 netdev_err(dev, "invalid MTU, MTU should be in between %d and %d\n",
1799 MIN_MTU, MAX_MTU);
1800 return -EINVAL;
1801 }
1802
1803 /* Return if the buffer sizes will not change */
1804 if (dev->mtu == new_mtu)
1805 return 0;
1806
1807 dev->mtu = new_mtu;
1808
1809 if (!netif_running(dev))
1810 return 0;
1811
1812 /* Recevice ring buffer size is needed to be set based on MTU. If MTU is
1813 * changed then reinitilisation of the receive ring buffers need to be
1814 * done. Hence bring interface down and bring interface back up
1815 */
1816 sxgbe_release(dev);
1817 return sxgbe_open(dev);
1818 }
1819
1820 static void sxgbe_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
1821 unsigned int reg_n)
1822 {
1823 unsigned long data;
1824
1825 data = (addr[5] << 8) | addr[4];
1826 /* For MAC Addr registers se have to set the Address Enable (AE)
1827 * bit that has no effect on the High Reg 0 where the bit 31 (MO)
1828 * is RO.
1829 */
1830 writel(data | SXGBE_HI_REG_AE, ioaddr + SXGBE_ADDR_HIGH(reg_n));
1831 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
1832 writel(data, ioaddr + SXGBE_ADDR_LOW(reg_n));
1833 }
1834
1835 /**
1836 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1837 * a device. unicast, multicast addressing
1838 * @dev : pointer to the device structure
1839 * Description:
1840 * This function is a driver entry point which gets called by the kernel
1841 * whenever different receive mode like unicast, multicast and promiscuous
1842 * must be enabled/disabled.
1843 * Return value:
1844 * void.
1845 */
1846 static void sxgbe_set_rx_mode(struct net_device *dev)
1847 {
1848 struct sxgbe_priv_data *priv = netdev_priv(dev);
1849 void __iomem *ioaddr = (void __iomem *)priv->ioaddr;
1850 unsigned int value = 0;
1851 u32 mc_filter[2];
1852 struct netdev_hw_addr *ha;
1853 int reg = 1;
1854
1855 netdev_dbg(dev, "%s: # mcasts %d, # unicast %d\n",
1856 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
1857
1858 if (dev->flags & IFF_PROMISC) {
1859 value = SXGBE_FRAME_FILTER_PR;
1860
1861 } else if ((netdev_mc_count(dev) > SXGBE_HASH_TABLE_SIZE) ||
1862 (dev->flags & IFF_ALLMULTI)) {
1863 value = SXGBE_FRAME_FILTER_PM; /* pass all multi */
1864 writel(0xffffffff, ioaddr + SXGBE_HASH_HIGH);
1865 writel(0xffffffff, ioaddr + SXGBE_HASH_LOW);
1866
1867 } else if (!netdev_mc_empty(dev)) {
1868 /* Hash filter for multicast */
1869 value = SXGBE_FRAME_FILTER_HMC;
1870
1871 memset(mc_filter, 0, sizeof(mc_filter));
1872 netdev_for_each_mc_addr(ha, dev) {
1873 /* The upper 6 bits of the calculated CRC are used to
1874 * index the contens of the hash table
1875 */
1876 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
1877
1878 /* The most significant bit determines the register to
1879 * use (H/L) while the other 5 bits determine the bit
1880 * within the register.
1881 */
1882 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1883 }
1884 writel(mc_filter[0], ioaddr + SXGBE_HASH_LOW);
1885 writel(mc_filter[1], ioaddr + SXGBE_HASH_HIGH);
1886 }
1887
1888 /* Handle multiple unicast addresses (perfect filtering) */
1889 if (netdev_uc_count(dev) > SXGBE_MAX_PERFECT_ADDRESSES)
1890 /* Switch to promiscuous mode if more than 16 addrs
1891 * are required
1892 */
1893 value |= SXGBE_FRAME_FILTER_PR;
1894 else {
1895 netdev_for_each_uc_addr(ha, dev) {
1896 sxgbe_set_umac_addr(ioaddr, ha->addr, reg);
1897 reg++;
1898 }
1899 }
1900 #ifdef FRAME_FILTER_DEBUG
1901 /* Enable Receive all mode (to debug filtering_fail errors) */
1902 value |= SXGBE_FRAME_FILTER_RA;
1903 #endif
1904 writel(value, ioaddr + SXGBE_FRAME_FILTER);
1905
1906 netdev_dbg(dev, "Filter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
1907 readl(ioaddr + SXGBE_FRAME_FILTER),
1908 readl(ioaddr + SXGBE_HASH_HIGH),
1909 readl(ioaddr + SXGBE_HASH_LOW));
1910 }
1911
1912 #ifdef CONFIG_NET_POLL_CONTROLLER
1913 /**
1914 * sxgbe_poll_controller - entry point for polling receive by device
1915 * @dev : pointer to the device structure
1916 * Description:
1917 * This function is used by NETCONSOLE and other diagnostic tools
1918 * to allow network I/O with interrupts disabled.
1919 * Return value:
1920 * Void.
1921 */
1922 static void sxgbe_poll_controller(struct net_device *dev)
1923 {
1924 struct sxgbe_priv_data *priv = netdev_priv(dev);
1925
1926 disable_irq(priv->irq);
1927 sxgbe_rx_interrupt(priv->irq, dev);
1928 enable_irq(priv->irq);
1929 }
1930 #endif
1931
1932 /* sxgbe_ioctl - Entry point for the Ioctl
1933 * @dev: Device pointer.
1934 * @rq: An IOCTL specefic structure, that can contain a pointer to
1935 * a proprietary structure used to pass information to the driver.
1936 * @cmd: IOCTL command
1937 * Description:
1938 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
1939 */
1940 static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1941 {
1942 struct sxgbe_priv_data *priv = netdev_priv(dev);
1943 int ret = -EOPNOTSUPP;
1944
1945 if (!netif_running(dev))
1946 return -EINVAL;
1947
1948 switch (cmd) {
1949 case SIOCGMIIPHY:
1950 case SIOCGMIIREG:
1951 case SIOCSMIIREG:
1952 if (!priv->phydev)
1953 return -EINVAL;
1954 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
1955 break;
1956 default:
1957 break;
1958 }
1959
1960 return ret;
1961 }
1962
1963 static const struct net_device_ops sxgbe_netdev_ops = {
1964 .ndo_open = sxgbe_open,
1965 .ndo_start_xmit = sxgbe_xmit,
1966 .ndo_stop = sxgbe_release,
1967 .ndo_get_stats64 = sxgbe_get_stats64,
1968 .ndo_change_mtu = sxgbe_change_mtu,
1969 .ndo_set_features = sxgbe_set_features,
1970 .ndo_set_rx_mode = sxgbe_set_rx_mode,
1971 .ndo_tx_timeout = sxgbe_tx_timeout,
1972 .ndo_do_ioctl = sxgbe_ioctl,
1973 #ifdef CONFIG_NET_POLL_CONTROLLER
1974 .ndo_poll_controller = sxgbe_poll_controller,
1975 #endif
1976 .ndo_set_mac_address = eth_mac_addr,
1977 };
1978
1979 /* Get the hardware ops */
1980 static void sxgbe_get_ops(struct sxgbe_ops * const ops_ptr)
1981 {
1982 ops_ptr->mac = sxgbe_get_core_ops();
1983 ops_ptr->desc = sxgbe_get_desc_ops();
1984 ops_ptr->dma = sxgbe_get_dma_ops();
1985 ops_ptr->mtl = sxgbe_get_mtl_ops();
1986
1987 /* set the MDIO communication Address/Data regisers */
1988 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG;
1989 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG;
1990
1991 /* Assigning the default link settings
1992 * no SXGBE defined default values to be set in registers,
1993 * so assigning as 0 for port and duplex
1994 */
1995 ops_ptr->link.port = 0;
1996 ops_ptr->link.duplex = 0;
1997 ops_ptr->link.speed = SXGBE_SPEED_10G;
1998 }
1999
2000 /**
2001 * sxgbe_hw_init - Init the GMAC device
2002 * @priv: driver private structure
2003 * Description: this function checks the HW capability
2004 * (if supported) and sets the driver's features.
2005 */
2006 static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
2007 {
2008 u32 ctrl_ids;
2009
2010 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL);
2011 if(!priv->hw)
2012 return -ENOMEM;
2013
2014 /* get the hardware ops */
2015 sxgbe_get_ops(priv->hw);
2016
2017 /* get the controller id */
2018 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr);
2019 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16;
2020 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff);
2021 pr_info("user ID: 0x%x, Controller ID: 0x%x\n",
2022 priv->hw->ctrl_uid, priv->hw->ctrl_id);
2023
2024 /* get the H/W features */
2025 if (!sxgbe_get_hw_features(priv))
2026 pr_info("Hardware features not found\n");
2027
2028 if (priv->hw_cap.tx_csum_offload)
2029 pr_info("TX Checksum offload supported\n");
2030
2031 if (priv->hw_cap.rx_csum_offload)
2032 pr_info("RX Checksum offload supported\n");
2033
2034 return 0;
2035 }
2036
2037 /**
2038 * sxgbe_drv_probe
2039 * @device: device pointer
2040 * @plat_dat: platform data pointer
2041 * @addr: iobase memory address
2042 * Description: this is the main probe function used to
2043 * call the alloc_etherdev, allocate the priv structure.
2044 */
2045 struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
2046 struct sxgbe_plat_data *plat_dat,
2047 void __iomem *addr)
2048 {
2049 struct sxgbe_priv_data *priv;
2050 struct net_device *ndev;
2051 int ret;
2052 u8 queue_num;
2053
2054 ndev = alloc_etherdev_mqs(sizeof(struct sxgbe_priv_data),
2055 SXGBE_TX_QUEUES, SXGBE_RX_QUEUES);
2056 if (!ndev)
2057 return NULL;
2058
2059 SET_NETDEV_DEV(ndev, device);
2060
2061 priv = netdev_priv(ndev);
2062 priv->device = device;
2063 priv->dev = ndev;
2064
2065 sxgbe_set_ethtool_ops(ndev);
2066 priv->plat = plat_dat;
2067 priv->ioaddr = addr;
2068
2069 /* Verify driver arguments */
2070 sxgbe_verify_args();
2071
2072 /* Init MAC and get the capabilities */
2073 ret = sxgbe_hw_init(priv);
2074 if (ret)
2075 goto error_free_netdev;
2076
2077 /* allocate memory resources for Descriptor rings */
2078 ret = txring_mem_alloc(priv);
2079 if (ret)
2080 goto error_free_hw;
2081
2082 ret = rxring_mem_alloc(priv);
2083 if (ret)
2084 goto error_free_hw;
2085
2086 ndev->netdev_ops = &sxgbe_netdev_ops;
2087
2088 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2089 NETIF_F_RXCSUM | NETIF_F_TSO | NETIF_F_TSO6 |
2090 NETIF_F_GRO;
2091 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2092 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO);
2093
2094 /* assign filtering support */
2095 ndev->priv_flags |= IFF_UNICAST_FLT;
2096
2097 priv->msg_enable = netif_msg_init(debug, default_msg_level);
2098
2099 /* Enable TCP segmentation offload for all DMA channels */
2100 if (priv->hw_cap.tcpseg_offload) {
2101 SXGBE_FOR_EACH_QUEUE(SXGBE_TX_QUEUES, queue_num) {
2102 priv->hw->dma->enable_tso(priv->ioaddr, queue_num);
2103 }
2104 }
2105
2106 /* Enable Rx checksum offload */
2107 if (priv->hw_cap.rx_csum_offload) {
2108 priv->hw->mac->enable_rx_csum(priv->ioaddr);
2109 priv->rxcsum_insertion = true;
2110 }
2111
2112 /* Initialise pause frame settings */
2113 priv->rx_pause = 1;
2114 priv->tx_pause = 1;
2115
2116 /* Rx Watchdog is available, enable depend on platform data */
2117 if (!priv->plat->riwt_off) {
2118 priv->use_riwt = 1;
2119 pr_info("Enable RX Mitigation via HW Watchdog Timer\n");
2120 }
2121
2122 netif_napi_add(ndev, &priv->napi, sxgbe_poll, 64);
2123
2124 spin_lock_init(&priv->stats_lock);
2125
2126 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME);
2127 if (IS_ERR(priv->sxgbe_clk)) {
2128 netdev_warn(ndev, "%s: warning: cannot get CSR clock\n",
2129 __func__);
2130 goto error_napi_del;
2131 }
2132
2133 /* If a specific clk_csr value is passed from the platform
2134 * this means that the CSR Clock Range selection cannot be
2135 * changed at run-time and it is fixed. Viceversa the driver'll try to
2136 * set the MDC clock dynamically according to the csr actual
2137 * clock input.
2138 */
2139 if (!priv->plat->clk_csr)
2140 sxgbe_clk_csr_set(priv);
2141 else
2142 priv->clk_csr = priv->plat->clk_csr;
2143
2144 /* MDIO bus Registration */
2145 ret = sxgbe_mdio_register(ndev);
2146 if (ret < 0) {
2147 netdev_dbg(ndev, "%s: MDIO bus (id: %d) registration failed\n",
2148 __func__, priv->plat->bus_id);
2149 goto error_clk_put;
2150 }
2151
2152 ret = register_netdev(ndev);
2153 if (ret) {
2154 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2155 goto error_mdio_unregister;
2156 }
2157
2158 sxgbe_check_ether_addr(priv);
2159
2160 return priv;
2161
2162 error_mdio_unregister:
2163 sxgbe_mdio_unregister(ndev);
2164 error_clk_put:
2165 clk_put(priv->sxgbe_clk);
2166 error_napi_del:
2167 netif_napi_del(&priv->napi);
2168 error_free_hw:
2169 kfree(priv->hw);
2170 error_free_netdev:
2171 free_netdev(ndev);
2172
2173 return NULL;
2174 }
2175
2176 /**
2177 * sxgbe_drv_remove
2178 * @ndev: net device pointer
2179 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2180 * changes the link status, releases the DMA descriptor rings.
2181 */
2182 int sxgbe_drv_remove(struct net_device *ndev)
2183 {
2184 struct sxgbe_priv_data *priv = netdev_priv(ndev);
2185
2186 netdev_info(ndev, "%s: removing driver\n", __func__);
2187
2188 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
2189 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
2190
2191 priv->hw->mac->enable_tx(priv->ioaddr, false);
2192 priv->hw->mac->enable_rx(priv->ioaddr, false);
2193
2194 unregister_netdev(ndev);
2195
2196 sxgbe_mdio_unregister(ndev);
2197
2198 clk_put(priv->sxgbe_clk);
2199
2200 netif_napi_del(&priv->napi);
2201
2202 kfree(priv->hw);
2203
2204 free_netdev(ndev);
2205
2206 return 0;
2207 }
2208
2209 #ifdef CONFIG_PM
2210 int sxgbe_suspend(struct net_device *ndev)
2211 {
2212 return 0;
2213 }
2214
2215 int sxgbe_resume(struct net_device *ndev)
2216 {
2217 return 0;
2218 }
2219
2220 int sxgbe_freeze(struct net_device *ndev)
2221 {
2222 return -ENOSYS;
2223 }
2224
2225 int sxgbe_restore(struct net_device *ndev)
2226 {
2227 return -ENOSYS;
2228 }
2229 #endif /* CONFIG_PM */
2230
2231 /* Driver is configured as Platform driver */
2232 static int __init sxgbe_init(void)
2233 {
2234 int ret;
2235
2236 ret = sxgbe_register_platform();
2237 if (ret)
2238 goto err;
2239 return 0;
2240 err:
2241 pr_err("driver registration failed\n");
2242 return ret;
2243 }
2244
2245 static void __exit sxgbe_exit(void)
2246 {
2247 sxgbe_unregister_platform();
2248 }
2249
2250 module_init(sxgbe_init);
2251 module_exit(sxgbe_exit);
2252
2253 #ifndef MODULE
2254 static int __init sxgbe_cmdline_opt(char *str)
2255 {
2256 char *opt;
2257
2258 if (!str || !*str)
2259 return -EINVAL;
2260 while ((opt = strsep(&str, ",")) != NULL) {
2261 if (!strncmp(opt, "eee_timer:", 6)) {
2262 if (kstrtoint(opt + 10, 0, &eee_timer))
2263 goto err;
2264 }
2265 }
2266 return 0;
2267
2268 err:
2269 pr_err("%s: ERROR broken module parameter conversion\n", __func__);
2270 return -EINVAL;
2271 }
2272
2273 __setup("sxgbeeth=", sxgbe_cmdline_opt);
2274 #endif /* MODULE */
2275
2276
2277
2278 MODULE_DESCRIPTION("SAMSUNG 10G/2.5G/1G Ethernet PLATFORM driver");
2279
2280 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2281 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");
2282
2283 MODULE_AUTHOR("Siva Reddy Kallam <siva.kallam@samsung.com>");
2284 MODULE_AUTHOR("ByungHo An <bh74.an@samsung.com>");
2285 MODULE_AUTHOR("Girish K S <ks.giri@samsung.com>");
2286 MODULE_AUTHOR("Vipul Pandya <vipul.pandya@samsung.com>");
2287
2288 MODULE_LICENSE("GPL");