2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include <linux/of_address.h>
32 #include <linux/phy.h>
33 #include <linux/clk.h>
36 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
37 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1)
38 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
39 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
40 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
41 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
42 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
43 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
44 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
45 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
46 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
47 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
48 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
49 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
50 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
51 #define MVNETA_PORT_RX_RESET 0x1cc0
52 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
53 #define MVNETA_PHY_ADDR 0x2000
54 #define MVNETA_PHY_ADDR_MASK 0x1f
55 #define MVNETA_MBUS_RETRY 0x2010
56 #define MVNETA_UNIT_INTR_CAUSE 0x2080
57 #define MVNETA_UNIT_CONTROL 0x20B0
58 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
59 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
60 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
61 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
62 #define MVNETA_BASE_ADDR_ENABLE 0x2290
63 #define MVNETA_PORT_CONFIG 0x2400
64 #define MVNETA_UNI_PROMISC_MODE BIT(0)
65 #define MVNETA_DEF_RXQ(q) ((q) << 1)
66 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
67 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
68 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
69 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
70 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
71 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
72 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
73 MVNETA_DEF_RXQ_ARP(q) | \
74 MVNETA_DEF_RXQ_TCP(q) | \
75 MVNETA_DEF_RXQ_UDP(q) | \
76 MVNETA_DEF_RXQ_BPDU(q) | \
77 MVNETA_TX_UNSET_ERR_SUM | \
78 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
79 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
80 #define MVNETA_MAC_ADDR_LOW 0x2414
81 #define MVNETA_MAC_ADDR_HIGH 0x2418
82 #define MVNETA_SDMA_CONFIG 0x241c
83 #define MVNETA_SDMA_BRST_SIZE_16 4
84 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
85 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
86 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
87 #define MVNETA_DESC_SWAP BIT(6)
88 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
89 #define MVNETA_PORT_STATUS 0x2444
90 #define MVNETA_TX_IN_PRGRS BIT(1)
91 #define MVNETA_TX_FIFO_EMPTY BIT(8)
92 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
93 #define MVNETA_SERDES_CFG 0x24A0
94 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
95 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
96 #define MVNETA_TYPE_PRIO 0x24bc
97 #define MVNETA_FORCE_UNI BIT(21)
98 #define MVNETA_TXQ_CMD_1 0x24e4
99 #define MVNETA_TXQ_CMD 0x2448
100 #define MVNETA_TXQ_DISABLE_SHIFT 8
101 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
102 #define MVNETA_ACC_MODE 0x2500
103 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
104 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
105 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
106 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
108 /* Exception Interrupt Port/Queue Cause register */
110 #define MVNETA_INTR_NEW_CAUSE 0x25a0
111 #define MVNETA_INTR_NEW_MASK 0x25a4
113 /* bits 0..7 = TXQ SENT, one bit per queue.
114 * bits 8..15 = RXQ OCCUP, one bit per queue.
115 * bits 16..23 = RXQ FREE, one bit per queue.
116 * bit 29 = OLD_REG_SUM, see old reg ?
117 * bit 30 = TX_ERR_SUM, one bit for 4 ports
118 * bit 31 = MISC_SUM, one bit for 4 ports
120 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
121 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
122 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
123 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
125 #define MVNETA_INTR_OLD_CAUSE 0x25a8
126 #define MVNETA_INTR_OLD_MASK 0x25ac
128 /* Data Path Port/Queue Cause Register */
129 #define MVNETA_INTR_MISC_CAUSE 0x25b0
130 #define MVNETA_INTR_MISC_MASK 0x25b4
132 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
133 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
134 #define MVNETA_CAUSE_PTP BIT(4)
136 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
137 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
138 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
139 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
140 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
141 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
142 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
143 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
145 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
146 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
147 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
149 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
150 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
151 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
153 #define MVNETA_INTR_ENABLE 0x25b8
154 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
155 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0xff000000 // note: neta says it's 0x000000FF
157 #define MVNETA_RXQ_CMD 0x2680
158 #define MVNETA_RXQ_DISABLE_SHIFT 8
159 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
160 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
161 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
162 #define MVNETA_GMAC_CTRL_0 0x2c00
163 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
164 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
165 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
166 #define MVNETA_GMAC_CTRL_2 0x2c08
167 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
168 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
169 #define MVNETA_GMAC2_PORT_RESET BIT(6)
170 #define MVNETA_GMAC_STATUS 0x2c10
171 #define MVNETA_GMAC_LINK_UP BIT(0)
172 #define MVNETA_GMAC_SPEED_1000 BIT(1)
173 #define MVNETA_GMAC_SPEED_100 BIT(2)
174 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
175 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
176 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
177 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
178 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
179 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
180 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
181 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
182 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
183 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
184 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
185 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
186 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
187 #define MVNETA_MIB_COUNTERS_BASE 0x3080
188 #define MVNETA_MIB_LATE_COLLISION 0x7c
189 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
190 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
191 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
192 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
193 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
194 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
195 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
196 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
197 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
198 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
199 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
200 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
201 #define MVNETA_PORT_TX_RESET 0x3cf0
202 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
203 #define MVNETA_TX_MTU 0x3e0c
204 #define MVNETA_TX_TOKEN_SIZE 0x3e14
205 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
206 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
207 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
209 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
211 /* Descriptor ring Macros */
212 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
213 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
215 /* Various constants */
218 #define MVNETA_TXDONE_COAL_PKTS 16
219 #define MVNETA_RX_COAL_PKTS 32
220 #define MVNETA_RX_COAL_USEC 100
222 /* The two bytes Marvell header. Either contains a special value used
223 * by Marvell switches when a specific hardware mode is enabled (not
224 * supported by this driver) or is filled automatically by zeroes on
225 * the RX side. Those two bytes being at the front of the Ethernet
226 * header, they allow to have the IP header aligned on a 4 bytes
227 * boundary automatically: the hardware skips those two bytes on its
230 #define MVNETA_MH_SIZE 2
232 #define MVNETA_VLAN_TAG_LEN 4
234 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
235 #define MVNETA_TX_CSUM_MAX_SIZE 9800
236 #define MVNETA_ACC_MODE_EXT 1
238 /* Timeout constants */
239 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
240 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
241 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
243 #define MVNETA_TX_MTU_MAX 0x3ffff
245 /* TSO header size */
246 #define TSO_HEADER_SIZE 128
248 /* Max number of Rx descriptors */
249 #define MVNETA_MAX_RXD 128
251 /* Max number of Tx descriptors */
252 #define MVNETA_MAX_TXD 532
254 /* Max number of allowed TCP segments for software TSO */
255 #define MVNETA_MAX_TSO_SEGS 100
257 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
259 /* descriptor aligned size */
260 #define MVNETA_DESC_ALIGNED_SIZE 32
262 #define MVNETA_RX_PKT_SIZE(mtu) \
263 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
264 ETH_HLEN + ETH_FCS_LEN, \
265 MVNETA_CPU_D_CACHE_LINE_SIZE)
267 #define IS_TSO_HEADER(txq, addr) \
268 ((addr >= txq->tso_hdrs_phys) && \
269 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
271 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
273 struct mvneta_pcpu_stats
{
274 struct u64_stats_sync syncp
;
283 unsigned int frag_size
;
285 struct mvneta_rx_queue
*rxqs
;
286 struct mvneta_tx_queue
*txqs
;
287 struct net_device
*dev
;
290 struct napi_struct napi
;
297 struct mvneta_pcpu_stats
*stats
;
299 struct mii_bus
*mii_bus
;
300 struct phy_device
*phy_dev
;
301 phy_interface_t phy_interface
;
302 struct device_node
*phy_node
;
308 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
309 * layout of the transmit and reception DMA descriptors, and their
310 * layout is therefore defined by the hardware design
313 #define MVNETA_TX_L3_OFF_SHIFT 0
314 #define MVNETA_TX_IP_HLEN_SHIFT 8
315 #define MVNETA_TX_L4_UDP BIT(16)
316 #define MVNETA_TX_L3_IP6 BIT(17)
317 #define MVNETA_TXD_IP_CSUM BIT(18)
318 #define MVNETA_TXD_Z_PAD BIT(19)
319 #define MVNETA_TXD_L_DESC BIT(20)
320 #define MVNETA_TXD_F_DESC BIT(21)
321 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
322 MVNETA_TXD_L_DESC | \
324 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
325 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
327 #define MVNETA_RXD_ERR_CRC 0x0
328 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
329 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
330 #define MVNETA_RXD_ERR_LEN BIT(18)
331 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
332 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
333 #define MVNETA_RXD_L3_IP4 BIT(25)
334 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
335 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
337 #if defined(__LITTLE_ENDIAN)
338 struct mvneta_tx_desc
{
339 u32 command
; /* Options used by HW for packet transmitting.*/
340 u16 reserverd1
; /* csum_l4 (for future use) */
341 u16 data_size
; /* Data size of transmitted packet in bytes */
342 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
343 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
344 u32 reserved3
[4]; /* Reserved - (for future use) */
347 struct mvneta_rx_desc
{
348 u32 status
; /* Info about received packet */
349 u16 reserved1
; /* pnc_info - (for future use, PnC) */
350 u16 data_size
; /* Size of received packet in bytes */
352 u32 buf_phys_addr
; /* Physical address of the buffer */
353 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
355 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
356 u16 reserved3
; /* prefetch_cmd, for future use */
357 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
359 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
360 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
363 struct mvneta_tx_desc
{
364 u16 data_size
; /* Data size of transmitted packet in bytes */
365 u16 reserverd1
; /* csum_l4 (for future use) */
366 u32 command
; /* Options used by HW for packet transmitting.*/
367 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
368 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
369 u32 reserved3
[4]; /* Reserved - (for future use) */
372 struct mvneta_rx_desc
{
373 u16 data_size
; /* Size of received packet in bytes */
374 u16 reserved1
; /* pnc_info - (for future use, PnC) */
375 u32 status
; /* Info about received packet */
377 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
378 u32 buf_phys_addr
; /* Physical address of the buffer */
380 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
381 u16 reserved3
; /* prefetch_cmd, for future use */
382 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
384 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
385 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
389 struct mvneta_tx_queue
{
390 /* Number of this TX queue, in the range 0-7 */
393 /* Number of TX DMA descriptors in the descriptor ring */
396 /* Number of currently used TX DMA descriptor in the
400 int tx_stop_threshold
;
401 int tx_wake_threshold
;
403 /* Array of transmitted skb */
404 struct sk_buff
**tx_skb
;
406 /* Index of last TX DMA descriptor that was inserted */
409 /* Index of the TX DMA descriptor to be cleaned up */
414 /* Virtual address of the TX DMA descriptors array */
415 struct mvneta_tx_desc
*descs
;
417 /* DMA address of the TX DMA descriptors array */
418 dma_addr_t descs_phys
;
420 /* Index of the last TX DMA descriptor */
423 /* Index of the next TX DMA descriptor to process */
424 int next_desc_to_proc
;
426 /* DMA buffers for TSO headers */
429 /* DMA address of TSO headers */
430 dma_addr_t tso_hdrs_phys
;
433 struct mvneta_rx_queue
{
434 /* rx queue number, in the range 0-7 */
437 /* num of rx descriptors in the rx descriptor ring */
440 /* counter of times when mvneta_refill() failed */
446 /* Virtual address of the RX DMA descriptors array */
447 struct mvneta_rx_desc
*descs
;
449 /* DMA address of the RX DMA descriptors array */
450 dma_addr_t descs_phys
;
452 /* Index of the last RX DMA descriptor */
455 /* Index of the next RX DMA descriptor to process */
456 int next_desc_to_proc
;
459 /* The hardware supports eight (8) rx queues, but we are only allowing
460 * the first one to be used. Therefore, let's just allocate one queue.
462 static int rxq_number
= 1;
463 static int txq_number
= 8;
467 static int rx_copybreak __read_mostly
= 256;
469 #define MVNETA_DRIVER_NAME "mvneta"
470 #define MVNETA_DRIVER_VERSION "1.0"
472 /* Utility/helper methods */
474 /* Write helper method */
475 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
477 writel(data
, pp
->base
+ offset
);
480 /* Read helper method */
481 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
483 return readl(pp
->base
+ offset
);
486 /* Increment txq get counter */
487 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
489 txq
->txq_get_index
++;
490 if (txq
->txq_get_index
== txq
->size
)
491 txq
->txq_get_index
= 0;
494 /* Increment txq put counter */
495 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
497 txq
->txq_put_index
++;
498 if (txq
->txq_put_index
== txq
->size
)
499 txq
->txq_put_index
= 0;
503 /* Clear all MIB counters */
504 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
509 /* Perform dummy reads from MIB counters */
510 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
511 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
514 /* Get System Network Statistics */
515 struct rtnl_link_stats64
*mvneta_get_stats64(struct net_device
*dev
,
516 struct rtnl_link_stats64
*stats
)
518 struct mvneta_port
*pp
= netdev_priv(dev
);
522 for_each_possible_cpu(cpu
) {
523 struct mvneta_pcpu_stats
*cpu_stats
;
529 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
531 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
532 rx_packets
= cpu_stats
->rx_packets
;
533 rx_bytes
= cpu_stats
->rx_bytes
;
534 tx_packets
= cpu_stats
->tx_packets
;
535 tx_bytes
= cpu_stats
->tx_bytes
;
536 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
538 stats
->rx_packets
+= rx_packets
;
539 stats
->rx_bytes
+= rx_bytes
;
540 stats
->tx_packets
+= tx_packets
;
541 stats
->tx_bytes
+= tx_bytes
;
544 stats
->rx_errors
= dev
->stats
.rx_errors
;
545 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
547 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
552 /* Rx descriptors helper methods */
554 /* Checks whether the RX descriptor having this status is both the first
555 * and the last descriptor for the RX packet. Each RX packet is currently
556 * received through a single RX descriptor, so not having each RX
557 * descriptor with its first and last bits set is an error
559 static int mvneta_rxq_desc_is_first_last(u32 status
)
561 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
562 MVNETA_RXD_FIRST_LAST_DESC
;
565 /* Add number of descriptors ready to receive new packets */
566 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
567 struct mvneta_rx_queue
*rxq
,
570 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
573 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
574 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
575 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
576 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
577 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
580 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
581 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
584 /* Get number of RX descriptors occupied by received packets */
585 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
586 struct mvneta_rx_queue
*rxq
)
590 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
591 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
594 /* Update num of rx desc called upon return from rx path or
595 * from mvneta_rxq_drop_pkts().
597 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
598 struct mvneta_rx_queue
*rxq
,
599 int rx_done
, int rx_filled
)
603 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
605 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
606 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
610 /* Only 255 descriptors can be added at once */
611 while ((rx_done
> 0) || (rx_filled
> 0)) {
612 if (rx_done
<= 0xff) {
619 if (rx_filled
<= 0xff) {
620 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
623 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
626 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
630 /* Get pointer to next RX descriptor to be processed by SW */
631 static struct mvneta_rx_desc
*
632 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
634 int rx_desc
= rxq
->next_desc_to_proc
;
636 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
637 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
638 return rxq
->descs
+ rx_desc
;
641 /* Change maximum receive size of the port. */
642 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
646 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
647 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
648 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
649 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
650 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
654 /* Set rx queue offset */
655 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
656 struct mvneta_rx_queue
*rxq
,
661 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
662 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
665 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
666 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
670 /* Tx descriptors helper methods */
672 /* Update HW with number of TX descriptors to be sent */
673 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
674 struct mvneta_tx_queue
*txq
,
679 /* Only 255 descriptors can be added at once ; Assume caller
680 * process TX desriptors in quanta less than 256
683 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
686 /* Get pointer to next TX descriptor to be processed (send) by HW */
687 static struct mvneta_tx_desc
*
688 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
690 int tx_desc
= txq
->next_desc_to_proc
;
692 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
693 return txq
->descs
+ tx_desc
;
696 /* Release the last allocated TX descriptor. Useful to handle DMA
697 * mapping failures in the TX path.
699 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
701 if (txq
->next_desc_to_proc
== 0)
702 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
704 txq
->next_desc_to_proc
--;
707 /* Set rxq buf size */
708 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
709 struct mvneta_rx_queue
*rxq
,
714 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
716 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
717 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
719 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
722 /* Disable buffer management (BM) */
723 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
724 struct mvneta_rx_queue
*rxq
)
728 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
729 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
730 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
733 /* Start the Ethernet port RX and TX activity */
734 static void mvneta_port_up(struct mvneta_port
*pp
)
739 /* Enable all initialized TXs. */
740 mvneta_mib_counters_clear(pp
);
742 for (queue
= 0; queue
< txq_number
; queue
++) {
743 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
744 if (txq
->descs
!= NULL
)
745 q_map
|= (1 << queue
);
747 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
749 /* Enable all initialized RXQs. */
751 for (queue
= 0; queue
< rxq_number
; queue
++) {
752 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
753 if (rxq
->descs
!= NULL
)
754 q_map
|= (1 << queue
);
757 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
760 /* Stop the Ethernet port activity */
761 static void mvneta_port_down(struct mvneta_port
*pp
)
766 /* Stop Rx port activity. Check port Rx activity. */
767 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
769 /* Issue stop command for active channels only */
771 mvreg_write(pp
, MVNETA_RXQ_CMD
,
772 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
774 /* Wait for all Rx activity to terminate. */
777 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
779 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
785 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
786 } while (val
& 0xff);
788 /* Stop Tx port activity. Check port Tx activity. Issue stop
789 * command for active channels only
791 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
794 mvreg_write(pp
, MVNETA_TXQ_CMD
,
795 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
797 /* Wait for all Tx activity to terminate. */
800 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
802 "TIMEOUT for TX stopped status=0x%08x\n",
808 /* Check TX Command reg that all Txqs are stopped */
809 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
811 } while (val
& 0xff);
813 /* Double check to verify that TX FIFO is empty */
816 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
818 "TX FIFO empty timeout status=0x08%x\n",
824 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
825 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
826 (val
& MVNETA_TX_IN_PRGRS
));
831 /* Enable the port by setting the port enable bit of the MAC control register */
832 static void mvneta_port_enable(struct mvneta_port
*pp
)
837 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
838 val
|= MVNETA_GMAC0_PORT_ENABLE
;
839 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
842 /* Disable the port and wait for about 200 usec before retuning */
843 static void mvneta_port_disable(struct mvneta_port
*pp
)
847 /* Reset the Enable bit in the Serial Control Register */
848 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
849 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
850 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
855 /* Multicast tables methods */
857 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
858 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
866 val
= 0x1 | (queue
<< 1);
867 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
870 for (offset
= 0; offset
<= 0xc; offset
+= 4)
871 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
874 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
875 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
883 val
= 0x1 | (queue
<< 1);
884 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
887 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
888 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
892 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
893 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
899 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
902 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
903 val
= 0x1 | (queue
<< 1);
904 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
907 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
908 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
911 /* This method sets defaults to the NETA port:
912 * Clears interrupt Cause and Mask registers.
913 * Clears all MAC tables.
914 * Sets defaults to all registers.
915 * Resets RX and TX descriptor rings.
917 * This method can be called after mvneta_port_down() to return the port
918 * settings to defaults.
920 static void mvneta_defaults_set(struct mvneta_port
*pp
)
926 /* Clear all Cause registers */
927 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
928 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
929 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
931 /* Mask all interrupts */
932 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
933 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
934 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
935 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
937 /* Enable MBUS Retry bit16 */
938 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
940 /* Set CPU queue access map - all CPUs have access to all RX
941 * queues and to all TX queues
943 for (cpu
= 0; cpu
< CONFIG_NR_CPUS
; cpu
++)
944 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
),
945 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK
|
946 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
));
948 /* Reset RX and TX DMAs */
949 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
950 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
952 /* Disable Legacy WRR, Disable EJP, Release from reset */
953 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
954 for (queue
= 0; queue
< txq_number
; queue
++) {
955 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
956 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
959 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
960 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
962 /* Set Port Acceleration Mode */
963 val
= MVNETA_ACC_MODE_EXT
;
964 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
966 /* Update val of portCfg register accordingly with all RxQueue types */
967 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def
);
968 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
971 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
972 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
974 /* Build PORT_SDMA_CONFIG_REG */
977 /* Default burst size */
978 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
979 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
980 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
982 #if defined(__BIG_ENDIAN)
983 val
|= MVNETA_DESC_SWAP
;
986 /* Assign port SDMA configuration */
987 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
989 /* Disable PHY polling in hardware, since we're using the
990 * kernel phylib to do this.
992 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
993 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
994 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
996 mvneta_set_ucast_table(pp
, -1);
997 mvneta_set_special_mcast_table(pp
, -1);
998 mvneta_set_other_mcast_table(pp
, -1);
1000 /* Set port interrupt enable register - default enable all */
1001 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1002 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1003 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1006 /* Set max sizes for tx queues */
1007 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1013 mtu
= max_tx_size
* 8;
1014 if (mtu
> MVNETA_TX_MTU_MAX
)
1015 mtu
= MVNETA_TX_MTU_MAX
;
1018 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1019 val
&= ~MVNETA_TX_MTU_MAX
;
1021 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1023 /* TX token size and all TXQs token size must be larger that MTU */
1024 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1026 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1029 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1031 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1033 for (queue
= 0; queue
< txq_number
; queue
++) {
1034 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1036 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1039 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1041 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1046 /* Set unicast address */
1047 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1050 unsigned int unicast_reg
;
1051 unsigned int tbl_offset
;
1052 unsigned int reg_offset
;
1054 /* Locate the Unicast table entry */
1055 last_nibble
= (0xf & last_nibble
);
1057 /* offset from unicast tbl base */
1058 tbl_offset
= (last_nibble
/ 4) * 4;
1060 /* offset within the above reg */
1061 reg_offset
= last_nibble
% 4;
1063 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1066 /* Clear accepts frame bit at specified unicast DA tbl entry */
1067 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1069 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1070 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1073 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1076 /* Set mac address */
1077 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1084 mac_l
= (addr
[4] << 8) | (addr
[5]);
1085 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1086 (addr
[2] << 8) | (addr
[3] << 0);
1088 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1089 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1092 /* Accept frames of this address */
1093 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1096 /* Set the number of packets that will be received before RX interrupt
1097 * will be generated by HW.
1099 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1100 struct mvneta_rx_queue
*rxq
, u32 value
)
1102 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1103 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1104 rxq
->pkts_coal
= value
;
1107 /* Set the time delay in usec before RX interrupt will be generated by
1110 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1111 struct mvneta_rx_queue
*rxq
, u32 value
)
1114 unsigned long clk_rate
;
1116 clk_rate
= clk_get_rate(pp
->clk
);
1117 val
= (clk_rate
/ 1000000) * value
;
1119 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1120 rxq
->time_coal
= value
;
1123 /* Set threshold for TX_DONE pkts coalescing */
1124 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1125 struct mvneta_tx_queue
*txq
, u32 value
)
1129 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1131 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1132 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1134 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1136 txq
->done_pkts_coal
= value
;
1139 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1140 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1141 u32 phys_addr
, u32 cookie
)
1143 rx_desc
->buf_cookie
= cookie
;
1144 rx_desc
->buf_phys_addr
= phys_addr
;
1147 /* Decrement sent descriptors counter */
1148 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1149 struct mvneta_tx_queue
*txq
,
1154 /* Only 255 TX descriptors can be updated at once */
1155 while (sent_desc
> 0xff) {
1156 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1157 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1158 sent_desc
= sent_desc
- 0xff;
1161 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1162 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1165 /* Get number of TX descriptors already sent by HW */
1166 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1167 struct mvneta_tx_queue
*txq
)
1172 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1173 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1174 MVNETA_TXQ_SENT_DESC_SHIFT
;
1179 /* Get number of sent descriptors and decrement counter.
1180 * The number of sent descriptors is returned.
1182 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1183 struct mvneta_tx_queue
*txq
)
1187 /* Get number of sent descriptors */
1188 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1190 /* Decrement sent descriptors counter */
1192 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1197 /* Set TXQ descriptors fields relevant for CSUM calculation */
1198 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1199 int ip_hdr_len
, int l4_proto
)
1203 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1204 * G_L4_chk, L4_type; required only for checksum
1207 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1208 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1210 if (l3_proto
== htons(ETH_P_IP
))
1211 command
|= MVNETA_TXD_IP_CSUM
;
1213 command
|= MVNETA_TX_L3_IP6
;
1215 if (l4_proto
== IPPROTO_TCP
)
1216 command
|= MVNETA_TX_L4_CSUM_FULL
;
1217 else if (l4_proto
== IPPROTO_UDP
)
1218 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1220 command
|= MVNETA_TX_L4_CSUM_NOT
;
1226 /* Display more error info */
1227 static void mvneta_rx_error(struct mvneta_port
*pp
,
1228 struct mvneta_rx_desc
*rx_desc
)
1230 u32 status
= rx_desc
->status
;
1232 if (!mvneta_rxq_desc_is_first_last(status
)) {
1234 "bad rx status %08x (buffer oversize), size=%d\n",
1235 status
, rx_desc
->data_size
);
1239 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1240 case MVNETA_RXD_ERR_CRC
:
1241 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1242 status
, rx_desc
->data_size
);
1244 case MVNETA_RXD_ERR_OVERRUN
:
1245 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1246 status
, rx_desc
->data_size
);
1248 case MVNETA_RXD_ERR_LEN
:
1249 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1250 status
, rx_desc
->data_size
);
1252 case MVNETA_RXD_ERR_RESOURCE
:
1253 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1254 status
, rx_desc
->data_size
);
1259 /* Handle RX checksum offload based on the descriptor's status */
1260 static void mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
,
1261 struct sk_buff
*skb
)
1263 if ((status
& MVNETA_RXD_L3_IP4
) &&
1264 (status
& MVNETA_RXD_L4_CSUM_OK
)) {
1266 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1270 skb
->ip_summed
= CHECKSUM_NONE
;
1273 /* Return tx queue pointer (find last set bit) according to <cause> returned
1274 * form tx_done reg. <cause> must not be null. The return value is always a
1275 * valid queue for matching the first one found in <cause>.
1277 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1280 int queue
= fls(cause
) - 1;
1282 return &pp
->txqs
[queue
];
1285 /* Free tx queue skbuffs */
1286 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1287 struct mvneta_tx_queue
*txq
, int num
)
1291 for (i
= 0; i
< num
; i
++) {
1292 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1294 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1296 mvneta_txq_inc_get(txq
);
1298 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1299 dma_unmap_single(pp
->dev
->dev
.parent
,
1300 tx_desc
->buf_phys_addr
,
1301 tx_desc
->data_size
, DMA_TO_DEVICE
);
1304 dev_kfree_skb_any(skb
);
1308 /* Handle end of transmission */
1309 static void mvneta_txq_done(struct mvneta_port
*pp
,
1310 struct mvneta_tx_queue
*txq
)
1312 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1315 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1319 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1321 txq
->count
-= tx_done
;
1323 if (netif_tx_queue_stopped(nq
)) {
1324 if (txq
->count
<= txq
->tx_wake_threshold
)
1325 netif_tx_wake_queue(nq
);
1329 static void *mvneta_frag_alloc(const struct mvneta_port
*pp
)
1331 if (likely(pp
->frag_size
<= PAGE_SIZE
))
1332 return netdev_alloc_frag(pp
->frag_size
);
1334 return kmalloc(pp
->frag_size
, GFP_ATOMIC
);
1337 static void mvneta_frag_free(const struct mvneta_port
*pp
, void *data
)
1339 if (likely(pp
->frag_size
<= PAGE_SIZE
))
1340 put_page(virt_to_head_page(data
));
1345 /* Refill processing */
1346 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1347 struct mvneta_rx_desc
*rx_desc
)
1350 dma_addr_t phys_addr
;
1353 data
= mvneta_frag_alloc(pp
);
1357 phys_addr
= dma_map_single(pp
->dev
->dev
.parent
, data
,
1358 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1360 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1361 mvneta_frag_free(pp
, data
);
1365 mvneta_rx_desc_fill(rx_desc
, phys_addr
, (u32
)data
);
1369 /* Handle tx checksum */
1370 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1372 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1376 if (skb
->protocol
== htons(ETH_P_IP
)) {
1377 struct iphdr
*ip4h
= ip_hdr(skb
);
1379 /* Calculate IPv4 checksum and L4 checksum */
1380 ip_hdr_len
= ip4h
->ihl
;
1381 l4_proto
= ip4h
->protocol
;
1382 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
1383 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1385 /* Read l4_protocol from one of IPv6 extra headers */
1386 if (skb_network_header_len(skb
) > 0)
1387 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1388 l4_proto
= ip6h
->nexthdr
;
1390 return MVNETA_TX_L4_CSUM_NOT
;
1392 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1393 skb
->protocol
, ip_hdr_len
, l4_proto
);
1396 return MVNETA_TX_L4_CSUM_NOT
;
1399 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1402 static struct mvneta_rx_queue
*mvneta_rx_policy(struct mvneta_port
*pp
,
1405 int queue
= fls(cause
>> 8) - 1;
1407 return (queue
< 0 || queue
>= rxq_number
) ? NULL
: &pp
->rxqs
[queue
];
1410 /* Drop packets received by the RXQ and free buffers */
1411 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1412 struct mvneta_rx_queue
*rxq
)
1416 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1417 for (i
= 0; i
< rxq
->size
; i
++) {
1418 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1419 void *data
= (void *)rx_desc
->buf_cookie
;
1421 mvneta_frag_free(pp
, data
);
1422 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1423 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1427 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1430 /* Main rx processing */
1431 static int mvneta_rx(struct mvneta_port
*pp
, int rx_todo
,
1432 struct mvneta_rx_queue
*rxq
)
1434 struct net_device
*dev
= pp
->dev
;
1435 int rx_done
, rx_filled
;
1439 /* Get number of received packets */
1440 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1442 if (rx_todo
> rx_done
)
1448 /* Fairness NAPI loop */
1449 while (rx_done
< rx_todo
) {
1450 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1451 struct sk_buff
*skb
;
1452 unsigned char *data
;
1458 rx_status
= rx_desc
->status
;
1459 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1460 data
= (unsigned char *)rx_desc
->buf_cookie
;
1462 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
1463 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
1465 dev
->stats
.rx_errors
++;
1466 mvneta_rx_error(pp
, rx_desc
);
1467 /* leave the descriptor untouched */
1471 if (rx_bytes
<= rx_copybreak
) {
1472 /* better copy a small frame and not unmap the DMA region */
1473 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
1475 goto err_drop_frame
;
1477 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
1478 rx_desc
->buf_phys_addr
,
1479 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1482 memcpy(skb_put(skb
, rx_bytes
),
1483 data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1486 skb
->protocol
= eth_type_trans(skb
, dev
);
1487 mvneta_rx_csum(pp
, rx_status
, skb
);
1488 napi_gro_receive(&pp
->napi
, skb
);
1491 rcvd_bytes
+= rx_bytes
;
1493 /* leave the descriptor and buffer untouched */
1497 skb
= build_skb(data
, pp
->frag_size
> PAGE_SIZE
? 0 : pp
->frag_size
);
1499 goto err_drop_frame
;
1501 dma_unmap_single(dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1502 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1505 rcvd_bytes
+= rx_bytes
;
1507 /* Linux processing */
1508 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
1509 skb_put(skb
, rx_bytes
);
1511 skb
->protocol
= eth_type_trans(skb
, dev
);
1513 mvneta_rx_csum(pp
, rx_status
, skb
);
1515 napi_gro_receive(&pp
->napi
, skb
);
1517 /* Refill processing */
1518 err
= mvneta_rx_refill(pp
, rx_desc
);
1520 netdev_err(dev
, "Linux processing - Can't refill\n");
1527 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1529 u64_stats_update_begin(&stats
->syncp
);
1530 stats
->rx_packets
+= rcvd_pkts
;
1531 stats
->rx_bytes
+= rcvd_bytes
;
1532 u64_stats_update_end(&stats
->syncp
);
1535 /* Update rxq management counters */
1536 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_filled
);
1542 mvneta_tso_put_hdr(struct sk_buff
*skb
,
1543 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
1545 struct mvneta_tx_desc
*tx_desc
;
1546 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1548 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1549 tx_desc
= mvneta_txq_next_desc_get(txq
);
1550 tx_desc
->data_size
= hdr_len
;
1551 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
1552 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
1553 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
1554 txq
->txq_put_index
* TSO_HEADER_SIZE
;
1555 mvneta_txq_inc_put(txq
);
1559 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
1560 struct sk_buff
*skb
, char *data
, int size
,
1561 bool last_tcp
, bool is_last
)
1563 struct mvneta_tx_desc
*tx_desc
;
1565 tx_desc
= mvneta_txq_next_desc_get(txq
);
1566 tx_desc
->data_size
= size
;
1567 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
1568 size
, DMA_TO_DEVICE
);
1569 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1570 tx_desc
->buf_phys_addr
))) {
1571 mvneta_txq_desc_put(txq
);
1575 tx_desc
->command
= 0;
1576 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1579 /* last descriptor in the TCP packet */
1580 tx_desc
->command
= MVNETA_TXD_L_DESC
;
1582 /* last descriptor in SKB */
1584 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1586 mvneta_txq_inc_put(txq
);
1590 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
1591 struct mvneta_tx_queue
*txq
)
1593 int total_len
, data_left
;
1595 struct mvneta_port
*pp
= netdev_priv(dev
);
1597 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1600 /* Count needed descriptors */
1601 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
1604 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
1605 pr_info("*** Is this even possible???!?!?\n");
1609 /* Initialize the TSO handler, and prepare the first payload */
1610 tso_start(skb
, &tso
);
1612 total_len
= skb
->len
- hdr_len
;
1613 while (total_len
> 0) {
1616 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1617 total_len
-= data_left
;
1620 /* prepare packet headers: MAC + IP + TCP */
1621 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
1622 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1624 mvneta_tso_put_hdr(skb
, pp
, txq
);
1626 while (data_left
> 0) {
1630 size
= min_t(int, tso
.size
, data_left
);
1632 if (mvneta_tso_put_data(dev
, txq
, skb
,
1639 tso_build_data(skb
, &tso
, size
);
1646 /* Release all used data descriptors; header descriptors must not
1649 for (i
= desc_count
- 1; i
>= 0; i
--) {
1650 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
1651 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1652 dma_unmap_single(pp
->dev
->dev
.parent
,
1653 tx_desc
->buf_phys_addr
,
1656 mvneta_txq_desc_put(txq
);
1661 /* Handle tx fragmentation processing */
1662 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
1663 struct mvneta_tx_queue
*txq
)
1665 struct mvneta_tx_desc
*tx_desc
;
1666 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1668 for (i
= 0; i
< nr_frags
; i
++) {
1669 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1670 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
1672 tx_desc
= mvneta_txq_next_desc_get(txq
);
1673 tx_desc
->data_size
= frag
->size
;
1675 tx_desc
->buf_phys_addr
=
1676 dma_map_single(pp
->dev
->dev
.parent
, addr
,
1677 tx_desc
->data_size
, DMA_TO_DEVICE
);
1679 if (dma_mapping_error(pp
->dev
->dev
.parent
,
1680 tx_desc
->buf_phys_addr
)) {
1681 mvneta_txq_desc_put(txq
);
1685 if (i
== nr_frags
- 1) {
1686 /* Last descriptor */
1687 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
1688 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1690 /* Descriptor in the middle: Not First, Not Last */
1691 tx_desc
->command
= 0;
1692 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1694 mvneta_txq_inc_put(txq
);
1700 /* Release all descriptors that were used to map fragments of
1701 * this packet, as well as the corresponding DMA mappings
1703 for (i
= i
- 1; i
>= 0; i
--) {
1704 tx_desc
= txq
->descs
+ i
;
1705 dma_unmap_single(pp
->dev
->dev
.parent
,
1706 tx_desc
->buf_phys_addr
,
1709 mvneta_txq_desc_put(txq
);
1715 /* Main tx processing */
1716 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1718 struct mvneta_port
*pp
= netdev_priv(dev
);
1719 u16 txq_id
= skb_get_queue_mapping(skb
);
1720 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
1721 struct mvneta_tx_desc
*tx_desc
;
1725 if (!netif_running(dev
))
1728 if (skb_is_gso(skb
)) {
1729 frags
= mvneta_tx_tso(skb
, dev
, txq
);
1733 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
1735 /* Get a descriptor for the first part of the packet */
1736 tx_desc
= mvneta_txq_next_desc_get(txq
);
1738 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
1740 tx_desc
->data_size
= skb_headlen(skb
);
1742 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
1745 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1746 tx_desc
->buf_phys_addr
))) {
1747 mvneta_txq_desc_put(txq
);
1753 /* First and Last descriptor */
1754 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
1755 tx_desc
->command
= tx_cmd
;
1756 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1757 mvneta_txq_inc_put(txq
);
1759 /* First but not Last */
1760 tx_cmd
|= MVNETA_TXD_F_DESC
;
1761 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1762 mvneta_txq_inc_put(txq
);
1763 tx_desc
->command
= tx_cmd
;
1764 /* Continue with other skb fragments */
1765 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
1766 dma_unmap_single(dev
->dev
.parent
,
1767 tx_desc
->buf_phys_addr
,
1770 mvneta_txq_desc_put(txq
);
1778 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1779 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
1781 txq
->count
+= frags
;
1782 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
1784 if (txq
->count
>= txq
->tx_stop_threshold
)
1785 netif_tx_stop_queue(nq
);
1787 u64_stats_update_begin(&stats
->syncp
);
1788 stats
->tx_packets
++;
1789 stats
->tx_bytes
+= skb
->len
;
1790 u64_stats_update_end(&stats
->syncp
);
1792 dev
->stats
.tx_dropped
++;
1793 dev_kfree_skb_any(skb
);
1796 return NETDEV_TX_OK
;
1800 /* Free tx resources, when resetting a port */
1801 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
1802 struct mvneta_tx_queue
*txq
)
1805 int tx_done
= txq
->count
;
1807 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1811 txq
->txq_put_index
= 0;
1812 txq
->txq_get_index
= 0;
1815 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1816 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1818 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
1820 struct mvneta_tx_queue
*txq
;
1821 struct netdev_queue
*nq
;
1823 while (cause_tx_done
) {
1824 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
1826 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1827 __netif_tx_lock(nq
, smp_processor_id());
1830 mvneta_txq_done(pp
, txq
);
1832 __netif_tx_unlock(nq
);
1833 cause_tx_done
&= ~((1 << txq
->id
));
1837 /* Compute crc8 of the specified address, using a unique algorithm ,
1838 * according to hw spec, different than generic crc8 algorithm
1840 static int mvneta_addr_crc(unsigned char *addr
)
1845 for (i
= 0; i
< ETH_ALEN
; i
++) {
1848 crc
= (crc
^ addr
[i
]) << 8;
1849 for (j
= 7; j
>= 0; j
--) {
1850 if (crc
& (0x100 << j
))
1858 /* This method controls the net device special MAC multicast support.
1859 * The Special Multicast Table for MAC addresses supports MAC of the form
1860 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1861 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1862 * Table entries in the DA-Filter table. This method set the Special
1863 * Multicast Table appropriate entry.
1865 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
1866 unsigned char last_byte
,
1869 unsigned int smc_table_reg
;
1870 unsigned int tbl_offset
;
1871 unsigned int reg_offset
;
1873 /* Register offset from SMC table base */
1874 tbl_offset
= (last_byte
/ 4);
1875 /* Entry offset within the above reg */
1876 reg_offset
= last_byte
% 4;
1878 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
1882 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
1884 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
1885 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1888 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
1892 /* This method controls the network device Other MAC multicast support.
1893 * The Other Multicast Table is used for multicast of another type.
1894 * A CRC-8 is used as an index to the Other Multicast Table entries
1895 * in the DA-Filter table.
1896 * The method gets the CRC-8 value from the calling routine and
1897 * sets the Other Multicast Table appropriate entry according to the
1900 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
1904 unsigned int omc_table_reg
;
1905 unsigned int tbl_offset
;
1906 unsigned int reg_offset
;
1908 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
1909 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
1911 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
1914 /* Clear accepts frame bit at specified Other DA table entry */
1915 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
1917 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
1918 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1921 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
1924 /* The network device supports multicast using two tables:
1925 * 1) Special Multicast Table for MAC addresses of the form
1926 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1927 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1928 * Table entries in the DA-Filter table.
1929 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1930 * is used as an index to the Other Multicast Table entries in the
1933 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
1936 unsigned char crc_result
= 0;
1938 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
1939 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
1943 crc_result
= mvneta_addr_crc(p_addr
);
1945 if (pp
->mcast_count
[crc_result
] == 0) {
1946 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
1951 pp
->mcast_count
[crc_result
]--;
1952 if (pp
->mcast_count
[crc_result
] != 0) {
1953 netdev_info(pp
->dev
,
1954 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1955 pp
->mcast_count
[crc_result
], crc_result
);
1959 pp
->mcast_count
[crc_result
]++;
1961 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
1966 /* Configure Fitering mode of Ethernet port */
1967 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
1970 u32 port_cfg_reg
, val
;
1972 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
1974 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
1976 /* Set / Clear UPM bit in port configuration register */
1978 /* Accept all Unicast addresses */
1979 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
1980 val
|= MVNETA_FORCE_UNI
;
1981 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
1982 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
1984 /* Reject all Unicast addresses */
1985 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
1986 val
&= ~MVNETA_FORCE_UNI
;
1989 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
1990 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
1993 /* register unicast and multicast addresses */
1994 static void mvneta_set_rx_mode(struct net_device
*dev
)
1996 struct mvneta_port
*pp
= netdev_priv(dev
);
1997 struct netdev_hw_addr
*ha
;
1999 if (dev
->flags
& IFF_PROMISC
) {
2000 /* Accept all: Multicast + Unicast */
2001 mvneta_rx_unicast_promisc_set(pp
, 1);
2002 mvneta_set_ucast_table(pp
, rxq_def
);
2003 mvneta_set_special_mcast_table(pp
, rxq_def
);
2004 mvneta_set_other_mcast_table(pp
, rxq_def
);
2006 /* Accept single Unicast */
2007 mvneta_rx_unicast_promisc_set(pp
, 0);
2008 mvneta_set_ucast_table(pp
, -1);
2009 mvneta_mac_addr_set(pp
, dev
->dev_addr
, rxq_def
);
2011 if (dev
->flags
& IFF_ALLMULTI
) {
2012 /* Accept all multicast */
2013 mvneta_set_special_mcast_table(pp
, rxq_def
);
2014 mvneta_set_other_mcast_table(pp
, rxq_def
);
2016 /* Accept only initialized multicast */
2017 mvneta_set_special_mcast_table(pp
, -1);
2018 mvneta_set_other_mcast_table(pp
, -1);
2020 if (!netdev_mc_empty(dev
)) {
2021 netdev_for_each_mc_addr(ha
, dev
) {
2022 mvneta_mcast_addr_set(pp
, ha
->addr
,
2030 /* Interrupt handling - the callback for request_irq() */
2031 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
2033 struct mvneta_port
*pp
= (struct mvneta_port
*)dev_id
;
2035 /* Mask all interrupts */
2036 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2038 napi_schedule(&pp
->napi
);
2044 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2045 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2046 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2047 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2048 * Each CPU has its own causeRxTx register
2050 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
2054 unsigned long flags
;
2055 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
2057 if (!netif_running(pp
->dev
)) {
2058 napi_complete(napi
);
2062 /* Read cause register */
2063 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
) &
2064 (MVNETA_RX_INTR_MASK(rxq_number
) | MVNETA_TX_INTR_MASK(txq_number
));
2066 /* Release Tx descriptors */
2067 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
2068 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
2069 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
2072 /* For the case where the last mvneta_poll did not process all
2075 cause_rx_tx
|= pp
->cause_rx_tx
;
2076 if (rxq_number
> 1) {
2077 while ((cause_rx_tx
& MVNETA_RX_INTR_MASK_ALL
) && (budget
> 0)) {
2079 struct mvneta_rx_queue
*rxq
;
2080 /* get rx queue number from cause_rx_tx */
2081 rxq
= mvneta_rx_policy(pp
, cause_rx_tx
);
2085 /* process the packet in that rx queue */
2086 count
= mvneta_rx(pp
, budget
, rxq
);
2090 /* set off the rx bit of the
2091 * corresponding bit in the cause rx
2092 * tx register, so that next iteration
2093 * will find the next rx queue where
2094 * packets are received on
2096 cause_rx_tx
&= ~((1 << rxq
->id
) << 8);
2100 rx_done
= mvneta_rx(pp
, budget
, &pp
->rxqs
[rxq_def
]);
2106 napi_complete(napi
);
2107 local_irq_save(flags
);
2108 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2109 MVNETA_RX_INTR_MASK(rxq_number
) | MVNETA_TX_INTR_MASK(txq_number
));
2110 local_irq_restore(flags
);
2113 pp
->cause_rx_tx
= cause_rx_tx
;
2117 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2118 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2123 for (i
= 0; i
< num
; i
++) {
2124 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
2125 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
) != 0) {
2126 netdev_err(pp
->dev
, "%s:rxq %d, %d of %d buffs filled\n",
2127 __func__
, rxq
->id
, i
, num
);
2132 /* Add this number of RX descriptors as non occupied (ready to
2135 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
2140 /* Free all packets pending transmit from all TXQs and reset TX port */
2141 static void mvneta_tx_reset(struct mvneta_port
*pp
)
2145 /* free the skb's in the tx ring */
2146 for (queue
= 0; queue
< txq_number
; queue
++)
2147 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
2149 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
2150 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
2153 static void mvneta_rx_reset(struct mvneta_port
*pp
)
2155 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
2156 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
2159 /* Rx/Tx queue initialization/cleanup methods */
2161 /* Create a specified RX queue */
2162 static int mvneta_rxq_init(struct mvneta_port
*pp
,
2163 struct mvneta_rx_queue
*rxq
)
2166 rxq
->size
= pp
->rx_ring_size
;
2168 /* Allocate memory for RX descriptors */
2169 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2170 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2171 &rxq
->descs_phys
, GFP_KERNEL
);
2172 if (rxq
->descs
== NULL
)
2175 BUG_ON(rxq
->descs
!=
2176 PTR_ALIGN(rxq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2178 rxq
->last_desc
= rxq
->size
- 1;
2180 /* Set Rx descriptors queue starting address */
2181 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
2182 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
2185 mvneta_rxq_offset_set(pp
, rxq
, NET_SKB_PAD
);
2187 /* Set coalescing pkts and time */
2188 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2189 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2191 /* Fill RXQ with buffers from RX pool */
2192 mvneta_rxq_buf_size_set(pp
, rxq
, MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
2193 mvneta_rxq_bm_disable(pp
, rxq
);
2194 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2199 /* Cleanup Rx queue */
2200 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2201 struct mvneta_rx_queue
*rxq
)
2203 mvneta_rxq_drop_pkts(pp
, rxq
);
2206 dma_free_coherent(pp
->dev
->dev
.parent
,
2207 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2213 rxq
->next_desc_to_proc
= 0;
2214 rxq
->descs_phys
= 0;
2217 /* Create and initialize a tx queue */
2218 static int mvneta_txq_init(struct mvneta_port
*pp
,
2219 struct mvneta_tx_queue
*txq
)
2221 txq
->size
= pp
->tx_ring_size
;
2223 /* A queue must always have room for at least one skb.
2224 * Therefore, stop the queue when the free entries reaches
2225 * the maximum number of descriptors per skb.
2227 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
2228 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
2231 /* Allocate memory for TX descriptors */
2232 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2233 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2234 &txq
->descs_phys
, GFP_KERNEL
);
2235 if (txq
->descs
== NULL
)
2238 /* Make sure descriptor address is cache line size aligned */
2239 BUG_ON(txq
->descs
!=
2240 PTR_ALIGN(txq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2242 txq
->last_desc
= txq
->size
- 1;
2244 /* Set maximum bandwidth for enabled TXQs */
2245 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
2246 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
2248 /* Set Tx descriptors queue starting address */
2249 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
2250 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
2252 txq
->tx_skb
= kmalloc(txq
->size
* sizeof(*txq
->tx_skb
), GFP_KERNEL
);
2253 if (txq
->tx_skb
== NULL
) {
2254 dma_free_coherent(pp
->dev
->dev
.parent
,
2255 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2256 txq
->descs
, txq
->descs_phys
);
2260 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2261 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2262 txq
->size
* TSO_HEADER_SIZE
,
2263 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
2264 if (txq
->tso_hdrs
== NULL
) {
2266 dma_free_coherent(pp
->dev
->dev
.parent
,
2267 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2268 txq
->descs
, txq
->descs_phys
);
2271 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2276 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2277 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
2278 struct mvneta_tx_queue
*txq
)
2283 dma_free_coherent(pp
->dev
->dev
.parent
,
2284 txq
->size
* TSO_HEADER_SIZE
,
2285 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
2287 dma_free_coherent(pp
->dev
->dev
.parent
,
2288 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2289 txq
->descs
, txq
->descs_phys
);
2293 txq
->next_desc_to_proc
= 0;
2294 txq
->descs_phys
= 0;
2296 /* Set minimum bandwidth for disabled TXQs */
2297 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
2298 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
2300 /* Set Tx descriptors queue starting address and size */
2301 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
2302 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
2305 /* Cleanup all Tx queues */
2306 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
2310 for (queue
= 0; queue
< txq_number
; queue
++)
2311 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
2314 /* Cleanup all Rx queues */
2315 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
2319 for (queue
= 0; queue
< rxq_number
; queue
++)
2320 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
2324 /* Init all Rx queues */
2325 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
2329 for (queue
= 0; queue
< rxq_number
; queue
++) {
2330 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
2332 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
2334 mvneta_cleanup_rxqs(pp
);
2342 /* Init all tx queues */
2343 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
2347 for (queue
= 0; queue
< txq_number
; queue
++) {
2348 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
2350 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
2352 mvneta_cleanup_txqs(pp
);
2360 static void mvneta_start_dev(struct mvneta_port
*pp
)
2362 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
2363 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
2365 /* start the Rx/Tx activity */
2366 mvneta_port_enable(pp
);
2368 /* Enable polling on the port */
2369 napi_enable(&pp
->napi
);
2371 /* Unmask interrupts */
2372 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2373 MVNETA_RX_INTR_MASK(rxq_number
) | MVNETA_TX_INTR_MASK(txq_number
));
2375 phy_start(pp
->phy_dev
);
2376 netif_tx_start_all_queues(pp
->dev
);
2379 static void mvneta_stop_dev(struct mvneta_port
*pp
)
2381 phy_stop(pp
->phy_dev
);
2383 napi_disable(&pp
->napi
);
2385 netif_carrier_off(pp
->dev
);
2387 mvneta_port_down(pp
);
2388 netif_tx_stop_all_queues(pp
->dev
);
2390 /* Stop the port activity */
2391 mvneta_port_disable(pp
);
2393 /* Clear all ethernet port interrupts */
2394 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2395 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
2397 /* Mask all ethernet port interrupts */
2398 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2399 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2400 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2402 mvneta_tx_reset(pp
);
2403 mvneta_rx_reset(pp
);
2406 /* Return positive if MTU is valid */
2407 static int mvneta_check_mtu_valid(struct net_device
*dev
, int mtu
)
2410 netdev_err(dev
, "cannot change mtu to less than 68\n");
2414 /* 9676 == 9700 - 20 and rounding to 8 */
2416 netdev_info(dev
, "Illegal MTU value %d, round to 9676\n", mtu
);
2420 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
2421 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
2422 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
2423 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
2429 /* Change the device mtu */
2430 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
2432 struct mvneta_port
*pp
= netdev_priv(dev
);
2435 mtu
= mvneta_check_mtu_valid(dev
, mtu
);
2441 if (!netif_running(dev
))
2444 /* The interface is running, so we have to force a
2445 * reallocation of the queues
2447 mvneta_stop_dev(pp
);
2449 mvneta_cleanup_txqs(pp
);
2450 mvneta_cleanup_rxqs(pp
);
2452 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
2453 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
2454 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2456 ret
= mvneta_setup_rxqs(pp
);
2458 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
2462 ret
= mvneta_setup_txqs(pp
);
2464 netdev_err(dev
, "unable to setup txqs after MTU change\n");
2468 mvneta_start_dev(pp
);
2474 /* Get mac address */
2475 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
2477 u32 mac_addr_l
, mac_addr_h
;
2479 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
2480 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
2481 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
2482 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
2483 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
2484 addr
[3] = mac_addr_h
& 0xFF;
2485 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
2486 addr
[5] = mac_addr_l
& 0xFF;
2489 /* Handle setting mac address */
2490 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
2492 struct mvneta_port
*pp
= netdev_priv(dev
);
2493 struct sockaddr
*sockaddr
= addr
;
2496 ret
= eth_prepare_mac_addr_change(dev
, addr
);
2499 /* Remove previous address table entry */
2500 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
2502 /* Set new addr in hw */
2503 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, rxq_def
);
2505 eth_commit_mac_addr_change(dev
, addr
);
2509 static void mvneta_adjust_link(struct net_device
*ndev
)
2511 struct mvneta_port
*pp
= netdev_priv(ndev
);
2512 struct phy_device
*phydev
= pp
->phy_dev
;
2513 int status_change
= 0;
2516 if ((pp
->speed
!= phydev
->speed
) ||
2517 (pp
->duplex
!= phydev
->duplex
)) {
2520 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
2521 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
2522 MVNETA_GMAC_CONFIG_GMII_SPEED
|
2523 MVNETA_GMAC_CONFIG_FULL_DUPLEX
|
2524 MVNETA_GMAC_AN_SPEED_EN
|
2525 MVNETA_GMAC_AN_DUPLEX_EN
);
2528 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
2530 if (phydev
->speed
== SPEED_1000
)
2531 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
2532 else if (phydev
->speed
== SPEED_100
)
2533 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
2535 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
2537 pp
->duplex
= phydev
->duplex
;
2538 pp
->speed
= phydev
->speed
;
2542 if (phydev
->link
!= pp
->link
) {
2543 if (!phydev
->link
) {
2548 pp
->link
= phydev
->link
;
2552 if (status_change
) {
2554 u32 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
2555 val
|= (MVNETA_GMAC_FORCE_LINK_PASS
|
2556 MVNETA_GMAC_FORCE_LINK_DOWN
);
2557 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
2559 netdev_info(pp
->dev
, "link up\n");
2561 mvneta_port_down(pp
);
2562 netdev_info(pp
->dev
, "link down\n");
2567 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
2569 struct phy_device
*phy_dev
;
2571 phy_dev
= of_phy_connect(pp
->dev
, pp
->phy_node
, mvneta_adjust_link
, 0,
2574 netdev_err(pp
->dev
, "could not find the PHY\n");
2578 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
2579 phy_dev
->advertising
= phy_dev
->supported
;
2581 pp
->phy_dev
= phy_dev
;
2589 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
2591 phy_disconnect(pp
->phy_dev
);
2595 static int mvneta_open(struct net_device
*dev
)
2597 struct mvneta_port
*pp
= netdev_priv(dev
);
2600 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
2601 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
2602 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2604 ret
= mvneta_setup_rxqs(pp
);
2608 ret
= mvneta_setup_txqs(pp
);
2610 goto err_cleanup_rxqs
;
2612 /* Connect to port interrupt line */
2613 ret
= request_irq(pp
->dev
->irq
, mvneta_isr
, 0,
2614 MVNETA_DRIVER_NAME
, pp
);
2616 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
2617 goto err_cleanup_txqs
;
2620 /* In default link is down */
2621 netif_carrier_off(pp
->dev
);
2623 ret
= mvneta_mdio_probe(pp
);
2625 netdev_err(dev
, "cannot probe MDIO bus\n");
2629 mvneta_start_dev(pp
);
2634 free_irq(pp
->dev
->irq
, pp
);
2636 mvneta_cleanup_txqs(pp
);
2638 mvneta_cleanup_rxqs(pp
);
2642 /* Stop the port, free port interrupt line */
2643 static int mvneta_stop(struct net_device
*dev
)
2645 struct mvneta_port
*pp
= netdev_priv(dev
);
2647 mvneta_stop_dev(pp
);
2648 mvneta_mdio_remove(pp
);
2649 free_irq(dev
->irq
, pp
);
2650 mvneta_cleanup_rxqs(pp
);
2651 mvneta_cleanup_txqs(pp
);
2656 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2658 struct mvneta_port
*pp
= netdev_priv(dev
);
2664 ret
= phy_mii_ioctl(pp
->phy_dev
, ifr
, cmd
);
2666 mvneta_adjust_link(dev
);
2671 /* Ethtool methods */
2673 /* Get settings (phy address, speed) for ethtools */
2674 int mvneta_ethtool_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2676 struct mvneta_port
*pp
= netdev_priv(dev
);
2681 return phy_ethtool_gset(pp
->phy_dev
, cmd
);
2684 /* Set settings (phy address, speed) for ethtools */
2685 int mvneta_ethtool_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2687 struct mvneta_port
*pp
= netdev_priv(dev
);
2692 return phy_ethtool_sset(pp
->phy_dev
, cmd
);
2695 /* Set interrupt coalescing for ethtools */
2696 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
2697 struct ethtool_coalesce
*c
)
2699 struct mvneta_port
*pp
= netdev_priv(dev
);
2702 for (queue
= 0; queue
< rxq_number
; queue
++) {
2703 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
2704 rxq
->time_coal
= c
->rx_coalesce_usecs
;
2705 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
2706 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2707 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2710 for (queue
= 0; queue
< txq_number
; queue
++) {
2711 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
2712 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
2713 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2719 /* get coalescing for ethtools */
2720 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
2721 struct ethtool_coalesce
*c
)
2723 struct mvneta_port
*pp
= netdev_priv(dev
);
2725 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
2726 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
2728 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
2733 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
2734 struct ethtool_drvinfo
*drvinfo
)
2736 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
2737 sizeof(drvinfo
->driver
));
2738 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
2739 sizeof(drvinfo
->version
));
2740 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
2741 sizeof(drvinfo
->bus_info
));
2745 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
2746 struct ethtool_ringparam
*ring
)
2748 struct mvneta_port
*pp
= netdev_priv(netdev
);
2750 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
2751 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
2752 ring
->rx_pending
= pp
->rx_ring_size
;
2753 ring
->tx_pending
= pp
->tx_ring_size
;
2756 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
2757 struct ethtool_ringparam
*ring
)
2759 struct mvneta_port
*pp
= netdev_priv(dev
);
2761 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
2763 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
2764 ring
->rx_pending
: MVNETA_MAX_RXD
;
2766 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
2767 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
2768 if (pp
->tx_ring_size
!= ring
->tx_pending
)
2769 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
2770 pp
->tx_ring_size
, ring
->tx_pending
);
2772 if (netif_running(dev
)) {
2774 if (mvneta_open(dev
)) {
2776 "error on opening device after ring param change\n");
2784 static const struct net_device_ops mvneta_netdev_ops
= {
2785 .ndo_open
= mvneta_open
,
2786 .ndo_stop
= mvneta_stop
,
2787 .ndo_start_xmit
= mvneta_tx
,
2788 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
2789 .ndo_set_mac_address
= mvneta_set_mac_addr
,
2790 .ndo_change_mtu
= mvneta_change_mtu
,
2791 .ndo_get_stats64
= mvneta_get_stats64
,
2792 .ndo_do_ioctl
= mvneta_ioctl
,
2795 const struct ethtool_ops mvneta_eth_tool_ops
= {
2796 .get_link
= ethtool_op_get_link
,
2797 .get_settings
= mvneta_ethtool_get_settings
,
2798 .set_settings
= mvneta_ethtool_set_settings
,
2799 .set_coalesce
= mvneta_ethtool_set_coalesce
,
2800 .get_coalesce
= mvneta_ethtool_get_coalesce
,
2801 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
2802 .get_ringparam
= mvneta_ethtool_get_ringparam
,
2803 .set_ringparam
= mvneta_ethtool_set_ringparam
,
2807 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
2812 mvneta_port_disable(pp
);
2814 /* Set port default values */
2815 mvneta_defaults_set(pp
);
2817 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(struct mvneta_tx_queue
),
2822 /* Initialize TX descriptor rings */
2823 for (queue
= 0; queue
< txq_number
; queue
++) {
2824 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
2826 txq
->size
= pp
->tx_ring_size
;
2827 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
2830 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(struct mvneta_rx_queue
),
2835 /* Create Rx descriptor rings */
2836 for (queue
= 0; queue
< rxq_number
; queue
++) {
2837 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
2839 rxq
->size
= pp
->rx_ring_size
;
2840 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
2841 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
2847 /* platform glue : initialize decoding windows */
2848 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
2849 const struct mbus_dram_target_info
*dram
)
2855 for (i
= 0; i
< 6; i
++) {
2856 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
2857 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
2860 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
2866 for (i
= 0; i
< dram
->num_cs
; i
++) {
2867 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
2868 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (cs
->base
& 0xffff0000) |
2869 (cs
->mbus_attr
<< 8) | dram
->mbus_dram_target_id
);
2871 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
2872 (cs
->size
- 1) & 0xffff0000);
2874 win_enable
&= ~(1 << i
);
2875 win_protect
|= 3 << (2 * i
);
2878 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
2881 /* Power up the port */
2882 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
2886 /* MAC Cause register should be cleared */
2887 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
2889 ctrl
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
2891 /* Even though it might look weird, when we're configured in
2892 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2895 case PHY_INTERFACE_MODE_QSGMII
:
2896 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_QSGMII_SERDES_PROTO
);
2897 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
2899 case PHY_INTERFACE_MODE_SGMII
:
2900 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_SGMII_SERDES_PROTO
);
2901 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
2903 case PHY_INTERFACE_MODE_RGMII
:
2904 case PHY_INTERFACE_MODE_RGMII_ID
:
2905 ctrl
|= MVNETA_GMAC2_PORT_RGMII
;
2911 /* Cancel Port Reset */
2912 ctrl
&= ~MVNETA_GMAC2_PORT_RESET
;
2913 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, ctrl
);
2915 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
2916 MVNETA_GMAC2_PORT_RESET
) != 0)
2922 /* Device initialization routine */
2923 static int mvneta_probe(struct platform_device
*pdev
)
2925 const struct mbus_dram_target_info
*dram_target_info
;
2926 struct resource
*res
;
2927 struct device_node
*dn
= pdev
->dev
.of_node
;
2928 struct device_node
*phy_node
;
2929 struct mvneta_port
*pp
;
2930 struct net_device
*dev
;
2931 const char *dt_mac_addr
;
2932 char hw_mac_addr
[ETH_ALEN
];
2933 const char *mac_from
;
2937 /* Our multiqueue support is not complete, so for now, only
2938 * allow the usage of the first RX queue
2941 dev_err(&pdev
->dev
, "Invalid rxq_def argument: %d\n", rxq_def
);
2945 dev
= alloc_etherdev_mqs(sizeof(struct mvneta_port
), txq_number
, rxq_number
);
2949 dev
->irq
= irq_of_parse_and_map(dn
, 0);
2950 if (dev
->irq
== 0) {
2952 goto err_free_netdev
;
2955 phy_node
= of_parse_phandle(dn
, "phy", 0);
2957 if (!of_phy_is_fixed_link(dn
)) {
2958 dev_err(&pdev
->dev
, "no PHY specified\n");
2963 err
= of_phy_register_fixed_link(dn
);
2965 dev_err(&pdev
->dev
, "cannot register fixed PHY\n");
2969 /* In the case of a fixed PHY, the DT node associated
2970 * to the PHY is the Ethernet MAC DT node.
2975 phy_mode
= of_get_phy_mode(dn
);
2977 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
2982 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
2983 dev
->watchdog_timeo
= 5 * HZ
;
2984 dev
->netdev_ops
= &mvneta_netdev_ops
;
2986 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
2988 pp
= netdev_priv(dev
);
2989 pp
->phy_node
= phy_node
;
2990 pp
->phy_interface
= phy_mode
;
2992 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
2993 if (IS_ERR(pp
->clk
)) {
2994 err
= PTR_ERR(pp
->clk
);
2998 clk_prepare_enable(pp
->clk
);
3000 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3001 pp
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
3002 if (IS_ERR(pp
->base
)) {
3003 err
= PTR_ERR(pp
->base
);
3007 /* Alloc per-cpu stats */
3008 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
3014 dt_mac_addr
= of_get_mac_address(dn
);
3016 mac_from
= "device tree";
3017 memcpy(dev
->dev_addr
, dt_mac_addr
, ETH_ALEN
);
3019 mvneta_get_mac_addr(pp
, hw_mac_addr
);
3020 if (is_valid_ether_addr(hw_mac_addr
)) {
3021 mac_from
= "hardware";
3022 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
3024 mac_from
= "random";
3025 eth_hw_addr_random(dev
);
3029 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
3030 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
3033 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3035 err
= mvneta_init(&pdev
->dev
, pp
);
3037 goto err_free_stats
;
3039 err
= mvneta_port_power_up(pp
, phy_mode
);
3041 dev_err(&pdev
->dev
, "can't power up port\n");
3042 goto err_free_stats
;
3045 dram_target_info
= mv_mbus_dram_info();
3046 if (dram_target_info
)
3047 mvneta_conf_mbus_windows(pp
, dram_target_info
);
3049 netif_napi_add(dev
, &pp
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
3051 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
3052 dev
->hw_features
|= dev
->features
;
3053 dev
->vlan_features
|= dev
->features
;
3054 dev
->priv_flags
|= IFF_UNICAST_FLT
;
3055 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
3057 err
= register_netdev(dev
);
3059 dev_err(&pdev
->dev
, "failed to register\n");
3060 goto err_free_stats
;
3063 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
3066 platform_set_drvdata(pdev
, pp
->dev
);
3071 free_percpu(pp
->stats
);
3073 clk_disable_unprepare(pp
->clk
);
3075 irq_dispose_mapping(dev
->irq
);
3081 /* Device removal routine */
3082 static int mvneta_remove(struct platform_device
*pdev
)
3084 struct net_device
*dev
= platform_get_drvdata(pdev
);
3085 struct mvneta_port
*pp
= netdev_priv(dev
);
3087 unregister_netdev(dev
);
3088 clk_disable_unprepare(pp
->clk
);
3089 free_percpu(pp
->stats
);
3090 irq_dispose_mapping(dev
->irq
);
3096 static const struct of_device_id mvneta_match
[] = {
3097 { .compatible
= "marvell,armada-370-neta" },
3100 MODULE_DEVICE_TABLE(of
, mvneta_match
);
3102 static struct platform_driver mvneta_driver
= {
3103 .probe
= mvneta_probe
,
3104 .remove
= mvneta_remove
,
3106 .name
= MVNETA_DRIVER_NAME
,
3107 .of_match_table
= mvneta_match
,
3111 module_platform_driver(mvneta_driver
);
3113 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3114 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3115 MODULE_LICENSE("GPL");
3117 module_param(rxq_number
, int, S_IRUGO
);
3118 module_param(txq_number
, int, S_IRUGO
);
3120 module_param(rxq_def
, int, S_IRUGO
);
3121 module_param(rx_copybreak
, int, S_IRUGO
| S_IWUSR
);