2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/phylink.h>
32 #include <linux/platform_device.h>
33 #include <linux/skbuff.h>
35 #include "mvneta_bm.h"
41 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
42 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
43 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
44 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
45 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
46 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
47 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
48 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
49 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
50 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
51 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
52 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
53 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
54 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
55 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
56 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
57 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
58 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
59 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
60 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
61 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
62 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
63 #define MVNETA_PORT_RX_RESET 0x1cc0
64 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
65 #define MVNETA_PHY_ADDR 0x2000
66 #define MVNETA_PHY_ADDR_MASK 0x1f
67 #define MVNETA_MBUS_RETRY 0x2010
68 #define MVNETA_UNIT_INTR_CAUSE 0x2080
69 #define MVNETA_UNIT_CONTROL 0x20B0
70 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
71 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
72 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
73 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
74 #define MVNETA_BASE_ADDR_ENABLE 0x2290
75 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
76 #define MVNETA_PORT_CONFIG 0x2400
77 #define MVNETA_UNI_PROMISC_MODE BIT(0)
78 #define MVNETA_DEF_RXQ(q) ((q) << 1)
79 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
80 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
81 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
82 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
83 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
84 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
85 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
86 MVNETA_DEF_RXQ_ARP(q) | \
87 MVNETA_DEF_RXQ_TCP(q) | \
88 MVNETA_DEF_RXQ_UDP(q) | \
89 MVNETA_DEF_RXQ_BPDU(q) | \
90 MVNETA_TX_UNSET_ERR_SUM | \
91 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
93 #define MVNETA_MAC_ADDR_LOW 0x2414
94 #define MVNETA_MAC_ADDR_HIGH 0x2418
95 #define MVNETA_SDMA_CONFIG 0x241c
96 #define MVNETA_SDMA_BRST_SIZE_16 4
97 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
98 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
99 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
100 #define MVNETA_DESC_SWAP BIT(6)
101 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
102 #define MVNETA_PORT_STATUS 0x2444
103 #define MVNETA_TX_IN_PRGRS BIT(1)
104 #define MVNETA_TX_FIFO_EMPTY BIT(8)
105 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
106 #define MVNETA_SERDES_CFG 0x24A0
107 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
108 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
109 #define MVNETA_TYPE_PRIO 0x24bc
110 #define MVNETA_FORCE_UNI BIT(21)
111 #define MVNETA_TXQ_CMD_1 0x24e4
112 #define MVNETA_TXQ_CMD 0x2448
113 #define MVNETA_TXQ_DISABLE_SHIFT 8
114 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
115 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
116 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
117 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
118 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
119 #define MVNETA_ACC_MODE 0x2500
120 #define MVNETA_BM_ADDRESS 0x2504
121 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
122 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
123 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
124 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
125 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
126 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
128 /* Exception Interrupt Port/Queue Cause register
130 * Their behavior depend of the mapping done using the PCPX2Q
131 * registers. For a given CPU if the bit associated to a queue is not
132 * set, then for the register a read from this CPU will always return
133 * 0 and a write won't do anything
136 #define MVNETA_INTR_NEW_CAUSE 0x25a0
137 #define MVNETA_INTR_NEW_MASK 0x25a4
139 /* bits 0..7 = TXQ SENT, one bit per queue.
140 * bits 8..15 = RXQ OCCUP, one bit per queue.
141 * bits 16..23 = RXQ FREE, one bit per queue.
142 * bit 29 = OLD_REG_SUM, see old reg ?
143 * bit 30 = TX_ERR_SUM, one bit for 4 ports
144 * bit 31 = MISC_SUM, one bit for 4 ports
146 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
147 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
148 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
149 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
150 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
152 #define MVNETA_INTR_OLD_CAUSE 0x25a8
153 #define MVNETA_INTR_OLD_MASK 0x25ac
155 /* Data Path Port/Queue Cause Register */
156 #define MVNETA_INTR_MISC_CAUSE 0x25b0
157 #define MVNETA_INTR_MISC_MASK 0x25b4
159 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
160 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
161 #define MVNETA_CAUSE_PTP BIT(4)
163 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
164 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
165 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
166 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
167 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
168 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
169 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
170 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
172 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
173 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
176 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
177 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
180 #define MVNETA_INTR_ENABLE 0x25b8
181 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
182 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
184 #define MVNETA_RXQ_CMD 0x2680
185 #define MVNETA_RXQ_DISABLE_SHIFT 8
186 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
187 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
188 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
189 #define MVNETA_GMAC_CTRL_0 0x2c00
190 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
191 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
192 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
193 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
194 #define MVNETA_GMAC_CTRL_2 0x2c08
195 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
196 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
197 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
198 #define MVNETA_GMAC2_PORT_RESET BIT(6)
199 #define MVNETA_GMAC_STATUS 0x2c10
200 #define MVNETA_GMAC_LINK_UP BIT(0)
201 #define MVNETA_GMAC_SPEED_1000 BIT(1)
202 #define MVNETA_GMAC_SPEED_100 BIT(2)
203 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
204 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
205 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
206 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
207 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
208 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
209 #define MVNETA_GMAC_SYNC_OK BIT(14)
210 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
211 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
212 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
213 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
214 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
215 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
216 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
217 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
218 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
219 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
220 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
221 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
222 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
223 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
224 #define MVNETA_MIB_COUNTERS_BASE 0x3000
225 #define MVNETA_MIB_LATE_COLLISION 0x7c
226 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
227 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
228 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
229 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
230 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
231 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
232 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
233 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
234 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
235 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
236 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
237 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
238 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
239 #define MVNETA_PORT_TX_RESET 0x3cf0
240 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
241 #define MVNETA_TX_MTU 0x3e0c
242 #define MVNETA_TX_TOKEN_SIZE 0x3e14
243 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
244 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
245 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
247 #define MVNETA_LPI_CTRL_0 0x2cc0
248 #define MVNETA_LPI_CTRL_1 0x2cc4
249 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
250 #define MVNETA_LPI_CTRL_2 0x2cc8
251 #define MVNETA_LPI_STATUS 0x2ccc
253 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
255 /* Descriptor ring Macros */
256 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
257 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
259 /* Various constants */
262 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
263 #define MVNETA_RX_COAL_PKTS 32
264 #define MVNETA_RX_COAL_USEC 100
266 /* The two bytes Marvell header. Either contains a special value used
267 * by Marvell switches when a specific hardware mode is enabled (not
268 * supported by this driver) or is filled automatically by zeroes on
269 * the RX side. Those two bytes being at the front of the Ethernet
270 * header, they allow to have the IP header aligned on a 4 bytes
271 * boundary automatically: the hardware skips those two bytes on its
274 #define MVNETA_MH_SIZE 2
276 #define MVNETA_VLAN_TAG_LEN 4
278 #define MVNETA_TX_CSUM_DEF_SIZE 1600
279 #define MVNETA_TX_CSUM_MAX_SIZE 9800
280 #define MVNETA_ACC_MODE_EXT1 1
281 #define MVNETA_ACC_MODE_EXT2 2
283 #define MVNETA_MAX_DECODE_WIN 6
285 /* Timeout constants */
286 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
287 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
288 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
290 #define MVNETA_TX_MTU_MAX 0x3ffff
292 /* The RSS lookup table actually has 256 entries but we do not use
295 #define MVNETA_RSS_LU_TABLE_SIZE 1
297 /* Max number of Rx descriptors */
298 #define MVNETA_MAX_RXD 512
300 /* Max number of Tx descriptors */
301 #define MVNETA_MAX_TXD 1024
303 /* Max number of allowed TCP segments for software TSO */
304 #define MVNETA_MAX_TSO_SEGS 100
306 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
308 /* descriptor aligned size */
309 #define MVNETA_DESC_ALIGNED_SIZE 32
311 /* Number of bytes to be taken into account by HW when putting incoming data
312 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
313 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
315 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
317 #define MVNETA_RX_PKT_SIZE(mtu) \
318 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
319 ETH_HLEN + ETH_FCS_LEN, \
322 #define IS_TSO_HEADER(txq, addr) \
323 ((addr >= txq->tso_hdrs_phys) && \
324 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
326 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
327 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
330 ETHTOOL_STAT_EEE_WAKEUP
,
331 ETHTOOL_STAT_SKB_ALLOC_ERR
,
332 ETHTOOL_STAT_REFILL_ERR
,
336 struct mvneta_statistic
{
337 unsigned short offset
;
339 const char name
[ETH_GSTRING_LEN
];
346 static const struct mvneta_statistic mvneta_statistics
[] = {
347 { 0x3000, T_REG_64
, "good_octets_received", },
348 { 0x3010, T_REG_32
, "good_frames_received", },
349 { 0x3008, T_REG_32
, "bad_octets_received", },
350 { 0x3014, T_REG_32
, "bad_frames_received", },
351 { 0x3018, T_REG_32
, "broadcast_frames_received", },
352 { 0x301c, T_REG_32
, "multicast_frames_received", },
353 { 0x3050, T_REG_32
, "unrec_mac_control_received", },
354 { 0x3058, T_REG_32
, "good_fc_received", },
355 { 0x305c, T_REG_32
, "bad_fc_received", },
356 { 0x3060, T_REG_32
, "undersize_received", },
357 { 0x3064, T_REG_32
, "fragments_received", },
358 { 0x3068, T_REG_32
, "oversize_received", },
359 { 0x306c, T_REG_32
, "jabber_received", },
360 { 0x3070, T_REG_32
, "mac_receive_error", },
361 { 0x3074, T_REG_32
, "bad_crc_event", },
362 { 0x3078, T_REG_32
, "collision", },
363 { 0x307c, T_REG_32
, "late_collision", },
364 { 0x2484, T_REG_32
, "rx_discard", },
365 { 0x2488, T_REG_32
, "rx_overrun", },
366 { 0x3020, T_REG_32
, "frames_64_octets", },
367 { 0x3024, T_REG_32
, "frames_65_to_127_octets", },
368 { 0x3028, T_REG_32
, "frames_128_to_255_octets", },
369 { 0x302c, T_REG_32
, "frames_256_to_511_octets", },
370 { 0x3030, T_REG_32
, "frames_512_to_1023_octets", },
371 { 0x3034, T_REG_32
, "frames_1024_to_max_octets", },
372 { 0x3038, T_REG_64
, "good_octets_sent", },
373 { 0x3040, T_REG_32
, "good_frames_sent", },
374 { 0x3044, T_REG_32
, "excessive_collision", },
375 { 0x3048, T_REG_32
, "multicast_frames_sent", },
376 { 0x304c, T_REG_32
, "broadcast_frames_sent", },
377 { 0x3054, T_REG_32
, "fc_sent", },
378 { 0x300c, T_REG_32
, "internal_mac_transmit_err", },
379 { ETHTOOL_STAT_EEE_WAKEUP
, T_SW
, "eee_wakeup_errors", },
380 { ETHTOOL_STAT_SKB_ALLOC_ERR
, T_SW
, "skb_alloc_errors", },
381 { ETHTOOL_STAT_REFILL_ERR
, T_SW
, "refill_errors", },
384 struct mvneta_pcpu_stats
{
385 struct u64_stats_sync syncp
;
392 struct mvneta_pcpu_port
{
393 /* Pointer to the shared port */
394 struct mvneta_port
*pp
;
396 /* Pointer to the CPU-local NAPI struct */
397 struct napi_struct napi
;
399 /* Cause of the previous interrupt */
405 struct mvneta_pcpu_port __percpu
*ports
;
406 struct mvneta_pcpu_stats __percpu
*stats
;
409 unsigned int frag_size
;
411 struct mvneta_rx_queue
*rxqs
;
412 struct mvneta_tx_queue
*txqs
;
413 struct net_device
*dev
;
414 struct hlist_node node_online
;
415 struct hlist_node node_dead
;
417 /* Protect the access to the percpu interrupt registers,
418 * ensuring that the configuration remains coherent.
424 struct napi_struct napi
;
434 phy_interface_t phy_interface
;
435 struct device_node
*dn
;
436 unsigned int tx_csum_limit
;
437 struct phylink
*phylink
;
439 struct mvneta_bm
*bm_priv
;
440 struct mvneta_bm_pool
*pool_long
;
441 struct mvneta_bm_pool
*pool_short
;
448 u64 ethtool_stats
[ARRAY_SIZE(mvneta_statistics
)];
450 u32 indir
[MVNETA_RSS_LU_TABLE_SIZE
];
452 /* Flags for special SoC configurations */
453 bool neta_armada3700
;
454 u16 rx_offset_correction
;
455 const struct mbus_dram_target_info
*dram_target_info
;
458 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
459 * layout of the transmit and reception DMA descriptors, and their
460 * layout is therefore defined by the hardware design
463 #define MVNETA_TX_L3_OFF_SHIFT 0
464 #define MVNETA_TX_IP_HLEN_SHIFT 8
465 #define MVNETA_TX_L4_UDP BIT(16)
466 #define MVNETA_TX_L3_IP6 BIT(17)
467 #define MVNETA_TXD_IP_CSUM BIT(18)
468 #define MVNETA_TXD_Z_PAD BIT(19)
469 #define MVNETA_TXD_L_DESC BIT(20)
470 #define MVNETA_TXD_F_DESC BIT(21)
471 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
472 MVNETA_TXD_L_DESC | \
474 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
475 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
477 #define MVNETA_RXD_ERR_CRC 0x0
478 #define MVNETA_RXD_BM_POOL_SHIFT 13
479 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
480 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
481 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
482 #define MVNETA_RXD_ERR_LEN BIT(18)
483 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
484 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
485 #define MVNETA_RXD_L3_IP4 BIT(25)
486 #define MVNETA_RXD_LAST_DESC BIT(26)
487 #define MVNETA_RXD_FIRST_DESC BIT(27)
488 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
489 MVNETA_RXD_LAST_DESC)
490 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
492 #if defined(__LITTLE_ENDIAN)
493 struct mvneta_tx_desc
{
494 u32 command
; /* Options used by HW for packet transmitting.*/
495 u16 reserverd1
; /* csum_l4 (for future use) */
496 u16 data_size
; /* Data size of transmitted packet in bytes */
497 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
498 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
499 u32 reserved3
[4]; /* Reserved - (for future use) */
502 struct mvneta_rx_desc
{
503 u32 status
; /* Info about received packet */
504 u16 reserved1
; /* pnc_info - (for future use, PnC) */
505 u16 data_size
; /* Size of received packet in bytes */
507 u32 buf_phys_addr
; /* Physical address of the buffer */
508 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
510 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
511 u16 reserved3
; /* prefetch_cmd, for future use */
512 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
514 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
515 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
518 struct mvneta_tx_desc
{
519 u16 data_size
; /* Data size of transmitted packet in bytes */
520 u16 reserverd1
; /* csum_l4 (for future use) */
521 u32 command
; /* Options used by HW for packet transmitting.*/
522 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
523 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
524 u32 reserved3
[4]; /* Reserved - (for future use) */
527 struct mvneta_rx_desc
{
528 u16 data_size
; /* Size of received packet in bytes */
529 u16 reserved1
; /* pnc_info - (for future use, PnC) */
530 u32 status
; /* Info about received packet */
532 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
533 u32 buf_phys_addr
; /* Physical address of the buffer */
535 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
536 u16 reserved3
; /* prefetch_cmd, for future use */
537 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
539 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
540 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
544 struct mvneta_tx_queue
{
545 /* Number of this TX queue, in the range 0-7 */
548 /* Number of TX DMA descriptors in the descriptor ring */
551 /* Number of currently used TX DMA descriptor in the
556 int tx_stop_threshold
;
557 int tx_wake_threshold
;
559 /* Array of transmitted skb */
560 struct sk_buff
**tx_skb
;
562 /* Index of last TX DMA descriptor that was inserted */
565 /* Index of the TX DMA descriptor to be cleaned up */
570 /* Virtual address of the TX DMA descriptors array */
571 struct mvneta_tx_desc
*descs
;
573 /* DMA address of the TX DMA descriptors array */
574 dma_addr_t descs_phys
;
576 /* Index of the last TX DMA descriptor */
579 /* Index of the next TX DMA descriptor to process */
580 int next_desc_to_proc
;
582 /* DMA buffers for TSO headers */
585 /* DMA address of TSO headers */
586 dma_addr_t tso_hdrs_phys
;
588 /* Affinity mask for CPUs*/
589 cpumask_t affinity_mask
;
592 struct mvneta_rx_queue
{
593 /* rx queue number, in the range 0-7 */
596 /* num of rx descriptors in the rx descriptor ring */
602 /* Virtual address of the RX buffer */
603 void **buf_virt_addr
;
605 /* Virtual address of the RX DMA descriptors array */
606 struct mvneta_rx_desc
*descs
;
608 /* DMA address of the RX DMA descriptors array */
609 dma_addr_t descs_phys
;
611 /* Index of the last RX DMA descriptor */
614 /* Index of the next RX DMA descriptor to process */
615 int next_desc_to_proc
;
617 /* Index of first RX DMA descriptor to refill */
621 /* pointer to uncomplete skb buffer */
630 static enum cpuhp_state online_hpstate
;
631 /* The hardware supports eight (8) rx queues, but we are only allowing
632 * the first one to be used. Therefore, let's just allocate one queue.
634 static int rxq_number
= 8;
635 static int txq_number
= 8;
639 static int rx_copybreak __read_mostly
= 256;
640 static int rx_header_size __read_mostly
= 128;
642 /* HW BM need that each port be identify by a unique ID */
643 static int global_port_id
;
645 #define MVNETA_DRIVER_NAME "mvneta"
646 #define MVNETA_DRIVER_VERSION "1.0"
648 /* Utility/helper methods */
650 /* Write helper method */
651 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
653 writel(data
, pp
->base
+ offset
);
656 /* Read helper method */
657 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
659 return readl(pp
->base
+ offset
);
662 /* Increment txq get counter */
663 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
665 txq
->txq_get_index
++;
666 if (txq
->txq_get_index
== txq
->size
)
667 txq
->txq_get_index
= 0;
670 /* Increment txq put counter */
671 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
673 txq
->txq_put_index
++;
674 if (txq
->txq_put_index
== txq
->size
)
675 txq
->txq_put_index
= 0;
679 /* Clear all MIB counters */
680 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
685 /* Perform dummy reads from MIB counters */
686 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
687 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
688 dummy
= mvreg_read(pp
, MVNETA_RX_DISCARD_FRAME_COUNT
);
689 dummy
= mvreg_read(pp
, MVNETA_OVERRUN_FRAME_COUNT
);
692 /* Get System Network Statistics */
694 mvneta_get_stats64(struct net_device
*dev
,
695 struct rtnl_link_stats64
*stats
)
697 struct mvneta_port
*pp
= netdev_priv(dev
);
701 for_each_possible_cpu(cpu
) {
702 struct mvneta_pcpu_stats
*cpu_stats
;
708 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
710 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
711 rx_packets
= cpu_stats
->rx_packets
;
712 rx_bytes
= cpu_stats
->rx_bytes
;
713 tx_packets
= cpu_stats
->tx_packets
;
714 tx_bytes
= cpu_stats
->tx_bytes
;
715 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
717 stats
->rx_packets
+= rx_packets
;
718 stats
->rx_bytes
+= rx_bytes
;
719 stats
->tx_packets
+= tx_packets
;
720 stats
->tx_bytes
+= tx_bytes
;
723 stats
->rx_errors
= dev
->stats
.rx_errors
;
724 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
726 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
729 /* Rx descriptors helper methods */
731 /* Checks whether the RX descriptor having this status is both the first
732 * and the last descriptor for the RX packet. Each RX packet is currently
733 * received through a single RX descriptor, so not having each RX
734 * descriptor with its first and last bits set is an error
736 static int mvneta_rxq_desc_is_first_last(u32 status
)
738 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
739 MVNETA_RXD_FIRST_LAST_DESC
;
742 /* Add number of descriptors ready to receive new packets */
743 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
744 struct mvneta_rx_queue
*rxq
,
747 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
750 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
751 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
752 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
753 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
754 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
757 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
758 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
761 /* Get number of RX descriptors occupied by received packets */
762 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
763 struct mvneta_rx_queue
*rxq
)
767 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
768 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
771 /* Update num of rx desc called upon return from rx path or
772 * from mvneta_rxq_drop_pkts().
774 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
775 struct mvneta_rx_queue
*rxq
,
776 int rx_done
, int rx_filled
)
780 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
782 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
783 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
787 /* Only 255 descriptors can be added at once */
788 while ((rx_done
> 0) || (rx_filled
> 0)) {
789 if (rx_done
<= 0xff) {
796 if (rx_filled
<= 0xff) {
797 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
800 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
803 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
807 /* Get pointer to next RX descriptor to be processed by SW */
808 static struct mvneta_rx_desc
*
809 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
811 int rx_desc
= rxq
->next_desc_to_proc
;
813 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
814 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
815 return rxq
->descs
+ rx_desc
;
818 /* Change maximum receive size of the port. */
819 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
823 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
824 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
825 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
826 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
827 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
831 /* Set rx queue offset */
832 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
833 struct mvneta_rx_queue
*rxq
,
838 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
839 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
842 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
843 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
847 /* Tx descriptors helper methods */
849 /* Update HW with number of TX descriptors to be sent */
850 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
851 struct mvneta_tx_queue
*txq
,
856 pend_desc
+= txq
->pending
;
858 /* Only 255 Tx descriptors can be added at once */
860 val
= min(pend_desc
, 255);
861 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
863 } while (pend_desc
> 0);
867 /* Get pointer to next TX descriptor to be processed (send) by HW */
868 static struct mvneta_tx_desc
*
869 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
871 int tx_desc
= txq
->next_desc_to_proc
;
873 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
874 return txq
->descs
+ tx_desc
;
877 /* Release the last allocated TX descriptor. Useful to handle DMA
878 * mapping failures in the TX path.
880 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
882 if (txq
->next_desc_to_proc
== 0)
883 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
885 txq
->next_desc_to_proc
--;
888 /* Set rxq buf size */
889 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
890 struct mvneta_rx_queue
*rxq
,
895 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
897 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
898 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
900 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
903 /* Disable buffer management (BM) */
904 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
905 struct mvneta_rx_queue
*rxq
)
909 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
910 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
911 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
914 /* Enable buffer management (BM) */
915 static void mvneta_rxq_bm_enable(struct mvneta_port
*pp
,
916 struct mvneta_rx_queue
*rxq
)
920 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
921 val
|= MVNETA_RXQ_HW_BUF_ALLOC
;
922 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
925 /* Notify HW about port's assignment of pool for bigger packets */
926 static void mvneta_rxq_long_pool_set(struct mvneta_port
*pp
,
927 struct mvneta_rx_queue
*rxq
)
931 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
932 val
&= ~MVNETA_RXQ_LONG_POOL_ID_MASK
;
933 val
|= (pp
->pool_long
->id
<< MVNETA_RXQ_LONG_POOL_ID_SHIFT
);
935 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
938 /* Notify HW about port's assignment of pool for smaller packets */
939 static void mvneta_rxq_short_pool_set(struct mvneta_port
*pp
,
940 struct mvneta_rx_queue
*rxq
)
944 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
945 val
&= ~MVNETA_RXQ_SHORT_POOL_ID_MASK
;
946 val
|= (pp
->pool_short
->id
<< MVNETA_RXQ_SHORT_POOL_ID_SHIFT
);
948 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
951 /* Set port's receive buffer size for assigned BM pool */
952 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port
*pp
,
958 if (!IS_ALIGNED(buf_size
, 8)) {
959 dev_warn(pp
->dev
->dev
.parent
,
960 "illegal buf_size value %d, round to %d\n",
961 buf_size
, ALIGN(buf_size
, 8));
962 buf_size
= ALIGN(buf_size
, 8);
965 val
= mvreg_read(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
));
966 val
|= buf_size
& MVNETA_PORT_POOL_BUFFER_SZ_MASK
;
967 mvreg_write(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
), val
);
970 /* Configure MBUS window in order to enable access BM internal SRAM */
971 static int mvneta_mbus_io_win_set(struct mvneta_port
*pp
, u32 base
, u32 wsize
,
974 u32 win_enable
, win_protect
;
977 win_enable
= mvreg_read(pp
, MVNETA_BASE_ADDR_ENABLE
);
979 if (pp
->bm_win_id
< 0) {
980 /* Find first not occupied window */
981 for (i
= 0; i
< MVNETA_MAX_DECODE_WIN
; i
++) {
982 if (win_enable
& (1 << i
)) {
987 if (i
== MVNETA_MAX_DECODE_WIN
)
993 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
994 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
997 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
999 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (base
& 0xffff0000) |
1000 (attr
<< 8) | target
);
1002 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), (wsize
- 1) & 0xffff0000);
1004 win_protect
= mvreg_read(pp
, MVNETA_ACCESS_PROTECT_ENABLE
);
1005 win_protect
|= 3 << (2 * i
);
1006 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
1008 win_enable
&= ~(1 << i
);
1009 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
1014 static int mvneta_bm_port_mbus_init(struct mvneta_port
*pp
)
1020 /* Get BM window information */
1021 err
= mvebu_mbus_get_io_win_info(pp
->bm_priv
->bppi_phys_addr
, &wsize
,
1028 /* Open NETA -> BM window */
1029 err
= mvneta_mbus_io_win_set(pp
, pp
->bm_priv
->bppi_phys_addr
, wsize
,
1032 netdev_info(pp
->dev
, "fail to configure mbus window to BM\n");
1038 /* Assign and initialize pools for port. In case of fail
1039 * buffer manager will remain disabled for current port.
1041 static int mvneta_bm_port_init(struct platform_device
*pdev
,
1042 struct mvneta_port
*pp
)
1044 struct device_node
*dn
= pdev
->dev
.of_node
;
1045 u32 long_pool_id
, short_pool_id
;
1047 if (!pp
->neta_armada3700
) {
1050 ret
= mvneta_bm_port_mbus_init(pp
);
1055 if (of_property_read_u32(dn
, "bm,pool-long", &long_pool_id
)) {
1056 netdev_info(pp
->dev
, "missing long pool id\n");
1060 /* Create port's long pool depending on mtu */
1061 pp
->pool_long
= mvneta_bm_pool_use(pp
->bm_priv
, long_pool_id
,
1062 MVNETA_BM_LONG
, pp
->id
,
1063 MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
));
1064 if (!pp
->pool_long
) {
1065 netdev_info(pp
->dev
, "fail to obtain long pool for port\n");
1069 pp
->pool_long
->port_map
|= 1 << pp
->id
;
1071 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_long
->buf_size
,
1074 /* If short pool id is not defined, assume using single pool */
1075 if (of_property_read_u32(dn
, "bm,pool-short", &short_pool_id
))
1076 short_pool_id
= long_pool_id
;
1078 /* Create port's short pool */
1079 pp
->pool_short
= mvneta_bm_pool_use(pp
->bm_priv
, short_pool_id
,
1080 MVNETA_BM_SHORT
, pp
->id
,
1081 MVNETA_BM_SHORT_PKT_SIZE
);
1082 if (!pp
->pool_short
) {
1083 netdev_info(pp
->dev
, "fail to obtain short pool for port\n");
1084 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1088 if (short_pool_id
!= long_pool_id
) {
1089 pp
->pool_short
->port_map
|= 1 << pp
->id
;
1090 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_short
->buf_size
,
1091 pp
->pool_short
->id
);
1097 /* Update settings of a pool for bigger packets */
1098 static void mvneta_bm_update_mtu(struct mvneta_port
*pp
, int mtu
)
1100 struct mvneta_bm_pool
*bm_pool
= pp
->pool_long
;
1101 struct hwbm_pool
*hwbm_pool
= &bm_pool
->hwbm_pool
;
1104 /* Release all buffers from long pool */
1105 mvneta_bm_bufs_free(pp
->bm_priv
, bm_pool
, 1 << pp
->id
);
1106 if (hwbm_pool
->buf_num
) {
1107 WARN(1, "cannot free all buffers in pool %d\n",
1112 bm_pool
->pkt_size
= MVNETA_RX_PKT_SIZE(mtu
);
1113 bm_pool
->buf_size
= MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
);
1114 hwbm_pool
->frag_size
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) +
1115 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
));
1117 /* Fill entire long pool */
1118 num
= hwbm_pool_add(hwbm_pool
, hwbm_pool
->size
, GFP_ATOMIC
);
1119 if (num
!= hwbm_pool
->size
) {
1120 WARN(1, "pool %d: %d of %d allocated\n",
1121 bm_pool
->id
, num
, hwbm_pool
->size
);
1124 mvneta_bm_pool_bufsize_set(pp
, bm_pool
->buf_size
, bm_pool
->id
);
1129 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1130 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
, 1 << pp
->id
);
1133 mvreg_write(pp
, MVNETA_ACC_MODE
, MVNETA_ACC_MODE_EXT1
);
1134 netdev_info(pp
->dev
, "fail to update MTU, fall back to software BM\n");
1137 /* Start the Ethernet port RX and TX activity */
1138 static void mvneta_port_up(struct mvneta_port
*pp
)
1143 /* Enable all initialized TXs. */
1145 for (queue
= 0; queue
< txq_number
; queue
++) {
1146 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
1148 q_map
|= (1 << queue
);
1150 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
1153 /* Enable all initialized RXQs. */
1154 for (queue
= 0; queue
< rxq_number
; queue
++) {
1155 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
1158 q_map
|= (1 << queue
);
1160 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
1163 /* Stop the Ethernet port activity */
1164 static void mvneta_port_down(struct mvneta_port
*pp
)
1169 /* Stop Rx port activity. Check port Rx activity. */
1170 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
1172 /* Issue stop command for active channels only */
1174 mvreg_write(pp
, MVNETA_RXQ_CMD
,
1175 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
1177 /* Wait for all Rx activity to terminate. */
1180 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
1181 netdev_warn(pp
->dev
,
1182 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1188 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
1189 } while (val
& MVNETA_RXQ_ENABLE_MASK
);
1191 /* Stop Tx port activity. Check port Tx activity. Issue stop
1192 * command for active channels only
1194 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
1197 mvreg_write(pp
, MVNETA_TXQ_CMD
,
1198 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
1200 /* Wait for all Tx activity to terminate. */
1203 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
1204 netdev_warn(pp
->dev
,
1205 "TIMEOUT for TX stopped status=0x%08x\n",
1211 /* Check TX Command reg that all Txqs are stopped */
1212 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
1214 } while (val
& MVNETA_TXQ_ENABLE_MASK
);
1216 /* Double check to verify that TX FIFO is empty */
1219 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
1220 netdev_warn(pp
->dev
,
1221 "TX FIFO empty timeout status=0x%08x\n",
1227 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
1228 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
1229 (val
& MVNETA_TX_IN_PRGRS
));
1234 /* Enable the port by setting the port enable bit of the MAC control register */
1235 static void mvneta_port_enable(struct mvneta_port
*pp
)
1240 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1241 val
|= MVNETA_GMAC0_PORT_ENABLE
;
1242 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1245 /* Disable the port and wait for about 200 usec before retuning */
1246 static void mvneta_port_disable(struct mvneta_port
*pp
)
1250 /* Reset the Enable bit in the Serial Control Register */
1251 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1252 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
1253 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1258 /* Multicast tables methods */
1260 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1261 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
1269 val
= 0x1 | (queue
<< 1);
1270 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1273 for (offset
= 0; offset
<= 0xc; offset
+= 4)
1274 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
1277 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1278 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
1286 val
= 0x1 | (queue
<< 1);
1287 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1290 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1291 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
1295 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1296 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
1302 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
1305 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
1306 val
= 0x1 | (queue
<< 1);
1307 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1310 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1311 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
1314 static void mvneta_percpu_unmask_interrupt(void *arg
)
1316 struct mvneta_port
*pp
= arg
;
1318 /* All the queue are unmasked, but actually only the ones
1319 * mapped to this CPU will be unmasked
1321 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
1322 MVNETA_RX_INTR_MASK_ALL
|
1323 MVNETA_TX_INTR_MASK_ALL
|
1324 MVNETA_MISCINTR_INTR_MASK
);
1327 static void mvneta_percpu_mask_interrupt(void *arg
)
1329 struct mvneta_port
*pp
= arg
;
1331 /* All the queue are masked, but actually only the ones
1332 * mapped to this CPU will be masked
1334 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
1335 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
1336 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
1339 static void mvneta_percpu_clear_intr_cause(void *arg
)
1341 struct mvneta_port
*pp
= arg
;
1343 /* All the queue are cleared, but actually only the ones
1344 * mapped to this CPU will be cleared
1346 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
1347 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
1348 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
1351 /* This method sets defaults to the NETA port:
1352 * Clears interrupt Cause and Mask registers.
1353 * Clears all MAC tables.
1354 * Sets defaults to all registers.
1355 * Resets RX and TX descriptor rings.
1357 * This method can be called after mvneta_port_down() to return the port
1358 * settings to defaults.
1360 static void mvneta_defaults_set(struct mvneta_port
*pp
)
1365 int max_cpu
= num_present_cpus();
1367 /* Clear all Cause registers */
1368 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
1370 /* Mask all interrupts */
1371 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
1372 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
1374 /* Enable MBUS Retry bit16 */
1375 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
1377 /* Set CPU queue access map. CPUs are assigned to the RX and
1378 * TX queues modulo their number. If there is only one TX
1379 * queue then it is assigned to the CPU associated to the
1382 for_each_present_cpu(cpu
) {
1383 int rxq_map
= 0, txq_map
= 0;
1385 if (!pp
->neta_armada3700
) {
1386 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
1387 if ((rxq
% max_cpu
) == cpu
)
1388 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
1390 for (txq
= 0; txq
< txq_number
; txq
++)
1391 if ((txq
% max_cpu
) == cpu
)
1392 txq_map
|= MVNETA_CPU_TXQ_ACCESS(txq
);
1394 /* With only one TX queue we configure a special case
1395 * which will allow to get all the irq on a single
1398 if (txq_number
== 1)
1399 txq_map
= (cpu
== pp
->rxq_def
) ?
1400 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1403 txq_map
= MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
1404 rxq_map
= MVNETA_CPU_RXQ_ACCESS_ALL_MASK
;
1407 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
1410 /* Reset RX and TX DMAs */
1411 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1412 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1414 /* Disable Legacy WRR, Disable EJP, Release from reset */
1415 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
1416 for (queue
= 0; queue
< txq_number
; queue
++) {
1417 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
1418 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
1421 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1422 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1424 /* Set Port Acceleration Mode */
1426 /* HW buffer management + legacy parser */
1427 val
= MVNETA_ACC_MODE_EXT2
;
1429 /* SW buffer management + legacy parser */
1430 val
= MVNETA_ACC_MODE_EXT1
;
1431 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
1434 mvreg_write(pp
, MVNETA_BM_ADDRESS
, pp
->bm_priv
->bppi_phys_addr
);
1436 /* Update val of portCfg register accordingly with all RxQueue types */
1437 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
1438 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
1441 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
1442 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
1444 /* Build PORT_SDMA_CONFIG_REG */
1447 /* Default burst size */
1448 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1449 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1450 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
1452 #if defined(__BIG_ENDIAN)
1453 val
|= MVNETA_DESC_SWAP
;
1456 /* Assign port SDMA configuration */
1457 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
1459 /* Disable PHY polling in hardware, since we're using the
1460 * kernel phylib to do this.
1462 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
1463 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
1464 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
1466 mvneta_set_ucast_table(pp
, -1);
1467 mvneta_set_special_mcast_table(pp
, -1);
1468 mvneta_set_other_mcast_table(pp
, -1);
1470 /* Set port interrupt enable register - default enable all */
1471 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1472 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1473 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1475 mvneta_mib_counters_clear(pp
);
1478 /* Set max sizes for tx queues */
1479 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1485 mtu
= max_tx_size
* 8;
1486 if (mtu
> MVNETA_TX_MTU_MAX
)
1487 mtu
= MVNETA_TX_MTU_MAX
;
1490 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1491 val
&= ~MVNETA_TX_MTU_MAX
;
1493 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1495 /* TX token size and all TXQs token size must be larger that MTU */
1496 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1498 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1501 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1503 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1505 for (queue
= 0; queue
< txq_number
; queue
++) {
1506 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1508 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1511 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1513 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1518 /* Set unicast address */
1519 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1522 unsigned int unicast_reg
;
1523 unsigned int tbl_offset
;
1524 unsigned int reg_offset
;
1526 /* Locate the Unicast table entry */
1527 last_nibble
= (0xf & last_nibble
);
1529 /* offset from unicast tbl base */
1530 tbl_offset
= (last_nibble
/ 4) * 4;
1532 /* offset within the above reg */
1533 reg_offset
= last_nibble
% 4;
1535 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1538 /* Clear accepts frame bit at specified unicast DA tbl entry */
1539 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1541 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1542 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1545 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1548 /* Set mac address */
1549 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1556 mac_l
= (addr
[4] << 8) | (addr
[5]);
1557 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1558 (addr
[2] << 8) | (addr
[3] << 0);
1560 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1561 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1564 /* Accept frames of this address */
1565 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1568 /* Set the number of packets that will be received before RX interrupt
1569 * will be generated by HW.
1571 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1572 struct mvneta_rx_queue
*rxq
, u32 value
)
1574 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1575 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1578 /* Set the time delay in usec before RX interrupt will be generated by
1581 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1582 struct mvneta_rx_queue
*rxq
, u32 value
)
1585 unsigned long clk_rate
;
1587 clk_rate
= clk_get_rate(pp
->clk
);
1588 val
= (clk_rate
/ 1000000) * value
;
1590 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1593 /* Set threshold for TX_DONE pkts coalescing */
1594 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1595 struct mvneta_tx_queue
*txq
, u32 value
)
1599 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1601 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1602 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1604 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1607 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1608 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1609 u32 phys_addr
, void *virt_addr
,
1610 struct mvneta_rx_queue
*rxq
)
1614 rx_desc
->buf_phys_addr
= phys_addr
;
1615 i
= rx_desc
- rxq
->descs
;
1616 rxq
->buf_virt_addr
[i
] = virt_addr
;
1619 /* Decrement sent descriptors counter */
1620 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1621 struct mvneta_tx_queue
*txq
,
1626 /* Only 255 TX descriptors can be updated at once */
1627 while (sent_desc
> 0xff) {
1628 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1629 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1630 sent_desc
= sent_desc
- 0xff;
1633 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1634 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1637 /* Get number of TX descriptors already sent by HW */
1638 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1639 struct mvneta_tx_queue
*txq
)
1644 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1645 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1646 MVNETA_TXQ_SENT_DESC_SHIFT
;
1651 /* Get number of sent descriptors and decrement counter.
1652 * The number of sent descriptors is returned.
1654 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1655 struct mvneta_tx_queue
*txq
)
1659 /* Get number of sent descriptors */
1660 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1662 /* Decrement sent descriptors counter */
1664 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1669 /* Set TXQ descriptors fields relevant for CSUM calculation */
1670 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1671 int ip_hdr_len
, int l4_proto
)
1675 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1676 * G_L4_chk, L4_type; required only for checksum
1679 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1680 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1682 if (l3_proto
== htons(ETH_P_IP
))
1683 command
|= MVNETA_TXD_IP_CSUM
;
1685 command
|= MVNETA_TX_L3_IP6
;
1687 if (l4_proto
== IPPROTO_TCP
)
1688 command
|= MVNETA_TX_L4_CSUM_FULL
;
1689 else if (l4_proto
== IPPROTO_UDP
)
1690 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1692 command
|= MVNETA_TX_L4_CSUM_NOT
;
1698 /* Display more error info */
1699 static void mvneta_rx_error(struct mvneta_port
*pp
,
1700 struct mvneta_rx_desc
*rx_desc
)
1702 u32 status
= rx_desc
->status
;
1704 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1705 case MVNETA_RXD_ERR_CRC
:
1706 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1707 status
, rx_desc
->data_size
);
1709 case MVNETA_RXD_ERR_OVERRUN
:
1710 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1711 status
, rx_desc
->data_size
);
1713 case MVNETA_RXD_ERR_LEN
:
1714 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1715 status
, rx_desc
->data_size
);
1717 case MVNETA_RXD_ERR_RESOURCE
:
1718 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1719 status
, rx_desc
->data_size
);
1724 /* Handle RX checksum offload based on the descriptor's status */
1725 static void mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
,
1726 struct sk_buff
*skb
)
1728 if ((pp
->dev
->features
& NETIF_F_RXCSUM
) &&
1729 (status
& MVNETA_RXD_L3_IP4
) &&
1730 (status
& MVNETA_RXD_L4_CSUM_OK
)) {
1732 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1736 skb
->ip_summed
= CHECKSUM_NONE
;
1739 /* Return tx queue pointer (find last set bit) according to <cause> returned
1740 * form tx_done reg. <cause> must not be null. The return value is always a
1741 * valid queue for matching the first one found in <cause>.
1743 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1746 int queue
= fls(cause
) - 1;
1748 return &pp
->txqs
[queue
];
1751 /* Free tx queue skbuffs */
1752 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1753 struct mvneta_tx_queue
*txq
, int num
,
1754 struct netdev_queue
*nq
)
1756 unsigned int bytes_compl
= 0, pkts_compl
= 0;
1759 for (i
= 0; i
< num
; i
++) {
1760 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1762 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1765 bytes_compl
+= skb
->len
;
1769 mvneta_txq_inc_get(txq
);
1771 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1772 dma_unmap_single(pp
->dev
->dev
.parent
,
1773 tx_desc
->buf_phys_addr
,
1774 tx_desc
->data_size
, DMA_TO_DEVICE
);
1777 dev_kfree_skb_any(skb
);
1780 netdev_tx_completed_queue(nq
, pkts_compl
, bytes_compl
);
1783 /* Handle end of transmission */
1784 static void mvneta_txq_done(struct mvneta_port
*pp
,
1785 struct mvneta_tx_queue
*txq
)
1787 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1790 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1794 mvneta_txq_bufs_free(pp
, txq
, tx_done
, nq
);
1796 txq
->count
-= tx_done
;
1798 if (netif_tx_queue_stopped(nq
)) {
1799 if (txq
->count
<= txq
->tx_wake_threshold
)
1800 netif_tx_wake_queue(nq
);
1804 /* Refill processing for SW buffer management */
1805 /* Allocate page per descriptor */
1806 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1807 struct mvneta_rx_desc
*rx_desc
,
1808 struct mvneta_rx_queue
*rxq
,
1811 dma_addr_t phys_addr
;
1814 page
= __dev_alloc_page(gfp_mask
);
1818 /* map page for use */
1819 phys_addr
= dma_map_page(pp
->dev
->dev
.parent
, page
, 0, PAGE_SIZE
,
1821 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1826 phys_addr
+= pp
->rx_offset_correction
;
1827 mvneta_rx_desc_fill(rx_desc
, phys_addr
, page
, rxq
);
1831 /* Handle tx checksum */
1832 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1834 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1836 __be16 l3_proto
= vlan_get_protocol(skb
);
1839 if (l3_proto
== htons(ETH_P_IP
)) {
1840 struct iphdr
*ip4h
= ip_hdr(skb
);
1842 /* Calculate IPv4 checksum and L4 checksum */
1843 ip_hdr_len
= ip4h
->ihl
;
1844 l4_proto
= ip4h
->protocol
;
1845 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
1846 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1848 /* Read l4_protocol from one of IPv6 extra headers */
1849 if (skb_network_header_len(skb
) > 0)
1850 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1851 l4_proto
= ip6h
->nexthdr
;
1853 return MVNETA_TX_L4_CSUM_NOT
;
1855 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1856 l3_proto
, ip_hdr_len
, l4_proto
);
1859 return MVNETA_TX_L4_CSUM_NOT
;
1862 /* Drop packets received by the RXQ and free buffers */
1863 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1864 struct mvneta_rx_queue
*rxq
)
1868 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1870 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1873 for (i
= 0; i
< rx_done
; i
++) {
1874 struct mvneta_rx_desc
*rx_desc
=
1875 mvneta_rxq_next_desc_get(rxq
);
1876 u8 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
1877 struct mvneta_bm_pool
*bm_pool
;
1879 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
1880 /* Return dropped buffer to the pool */
1881 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
1882 rx_desc
->buf_phys_addr
);
1887 for (i
= 0; i
< rxq
->size
; i
++) {
1888 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1889 void *data
= rxq
->buf_virt_addr
[i
];
1890 if (!data
|| !(rx_desc
->buf_phys_addr
))
1893 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1894 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1900 int mvneta_rx_refill_queue(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
)
1902 struct mvneta_rx_desc
*rx_desc
;
1903 int curr_desc
= rxq
->first_to_refill
;
1906 for (i
= 0; (i
< rxq
->refill_num
) && (i
< 64); i
++) {
1907 rx_desc
= rxq
->descs
+ curr_desc
;
1908 if (!(rx_desc
->buf_phys_addr
)) {
1909 if (mvneta_rx_refill(pp
, rx_desc
, rxq
, GFP_ATOMIC
)) {
1910 pr_err("Can't refill queue %d. Done %d from %d\n",
1911 rxq
->id
, i
, rxq
->refill_num
);
1916 curr_desc
= MVNETA_QUEUE_NEXT_DESC(rxq
, curr_desc
);
1918 rxq
->refill_num
-= i
;
1919 rxq
->first_to_refill
= curr_desc
;
1924 /* Main rx processing when using software buffer management */
1925 static int mvneta_rx_swbm(struct napi_struct
*napi
,
1926 struct mvneta_port
*pp
, int budget
,
1927 struct mvneta_rx_queue
*rxq
)
1929 struct net_device
*dev
= pp
->dev
;
1930 int rx_todo
, rx_proc
;
1935 /* Get number of received packets */
1936 rx_todo
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1939 /* Fairness NAPI loop */
1940 while ((rcvd_pkts
< budget
) && (rx_proc
< rx_todo
)) {
1941 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1942 unsigned char *data
;
1944 dma_addr_t phys_addr
;
1945 u32 rx_status
, index
;
1946 int rx_bytes
, skb_size
, copy_size
;
1947 int frag_num
, frag_size
, frag_offset
;
1949 index
= rx_desc
- rxq
->descs
;
1950 page
= (struct page
*)rxq
->buf_virt_addr
[index
];
1951 data
= page_address(page
);
1952 /* Prefetch header */
1955 phys_addr
= rx_desc
->buf_phys_addr
;
1956 rx_status
= rx_desc
->status
;
1960 if (rx_status
& MVNETA_RXD_FIRST_DESC
) {
1961 /* Check errors only for FIRST descriptor */
1962 if (rx_status
& MVNETA_RXD_ERR_SUMMARY
) {
1963 mvneta_rx_error(pp
, rx_desc
);
1964 dev
->stats
.rx_errors
++;
1965 /* leave the descriptor untouched */
1968 rx_bytes
= rx_desc
->data_size
-
1969 (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1971 /* Allocate small skb for each new packet */
1972 skb_size
= max(rx_copybreak
, rx_header_size
);
1973 rxq
->skb
= netdev_alloc_skb_ip_align(dev
, skb_size
);
1974 if (unlikely(!rxq
->skb
)) {
1976 "Can't allocate skb on queue %d\n",
1978 dev
->stats
.rx_dropped
++;
1979 rxq
->skb_alloc_err
++;
1982 copy_size
= min(skb_size
, rx_bytes
);
1984 /* Copy data from buffer to SKB, skip Marvell header */
1985 memcpy(rxq
->skb
->data
, data
+ MVNETA_MH_SIZE
,
1987 skb_put(rxq
->skb
, copy_size
);
1988 rxq
->left_size
= rx_bytes
- copy_size
;
1990 mvneta_rx_csum(pp
, rx_status
, rxq
->skb
);
1991 if (rxq
->left_size
== 0) {
1992 int size
= copy_size
+ MVNETA_MH_SIZE
;
1994 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
1999 /* leave the descriptor and buffer untouched */
2001 /* refill descriptor with new buffer later */
2002 rx_desc
->buf_phys_addr
= 0;
2005 frag_offset
= copy_size
+ MVNETA_MH_SIZE
;
2006 frag_size
= min(rxq
->left_size
,
2007 (int)(PAGE_SIZE
- frag_offset
));
2008 skb_add_rx_frag(rxq
->skb
, frag_num
, page
,
2009 frag_offset
, frag_size
,
2011 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
2012 PAGE_SIZE
, DMA_FROM_DEVICE
);
2013 rxq
->left_size
-= frag_size
;
2016 /* Middle or Last descriptor */
2017 if (unlikely(!rxq
->skb
)) {
2018 pr_debug("no skb for rx_status 0x%x\n",
2022 if (!rxq
->left_size
) {
2023 /* last descriptor has only FCS */
2024 /* and can be discarded */
2025 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
2029 /* leave the descriptor and buffer untouched */
2031 /* refill descriptor with new buffer later */
2032 rx_desc
->buf_phys_addr
= 0;
2034 frag_num
= skb_shinfo(rxq
->skb
)->nr_frags
;
2036 frag_size
= min(rxq
->left_size
,
2037 (int)(PAGE_SIZE
- frag_offset
));
2038 skb_add_rx_frag(rxq
->skb
, frag_num
, page
,
2039 frag_offset
, frag_size
,
2042 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
2046 rxq
->left_size
-= frag_size
;
2048 } /* Middle or Last descriptor */
2050 if (!(rx_status
& MVNETA_RXD_LAST_DESC
))
2051 /* no last descriptor this time */
2054 if (rxq
->left_size
) {
2055 pr_err("get last desc, but left_size (%d) != 0\n",
2057 dev_kfree_skb_any(rxq
->skb
);
2063 rcvd_bytes
+= rxq
->skb
->len
;
2065 /* Linux processing */
2066 rxq
->skb
->protocol
= eth_type_trans(rxq
->skb
, dev
);
2068 napi_gro_receive(napi
, rxq
->skb
);
2070 /* clean uncomplete skb pointer in queue */
2076 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2078 u64_stats_update_begin(&stats
->syncp
);
2079 stats
->rx_packets
+= rcvd_pkts
;
2080 stats
->rx_bytes
+= rcvd_bytes
;
2081 u64_stats_update_end(&stats
->syncp
);
2084 /* return some buffers to hardware queue, one at a time is too slow */
2085 refill
= mvneta_rx_refill_queue(pp
, rxq
);
2087 /* Update rxq management counters */
2088 mvneta_rxq_desc_num_update(pp
, rxq
, rx_proc
, refill
);
2093 /* Main rx processing when using hardware buffer management */
2094 static int mvneta_rx_hwbm(struct napi_struct
*napi
,
2095 struct mvneta_port
*pp
, int rx_todo
,
2096 struct mvneta_rx_queue
*rxq
)
2098 struct net_device
*dev
= pp
->dev
;
2103 /* Get number of received packets */
2104 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
2106 if (rx_todo
> rx_done
)
2111 /* Fairness NAPI loop */
2112 while (rx_done
< rx_todo
) {
2113 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
2114 struct mvneta_bm_pool
*bm_pool
= NULL
;
2115 struct sk_buff
*skb
;
2116 unsigned char *data
;
2117 dma_addr_t phys_addr
;
2118 u32 rx_status
, frag_size
;
2123 rx_status
= rx_desc
->status
;
2124 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
2125 data
= (u8
*)(uintptr_t)rx_desc
->buf_cookie
;
2126 phys_addr
= rx_desc
->buf_phys_addr
;
2127 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
2128 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
2130 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
2131 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
2132 err_drop_frame_ret_pool
:
2133 /* Return the buffer to the pool */
2134 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2135 rx_desc
->buf_phys_addr
);
2137 dev
->stats
.rx_errors
++;
2138 mvneta_rx_error(pp
, rx_desc
);
2139 /* leave the descriptor untouched */
2143 if (rx_bytes
<= rx_copybreak
) {
2144 /* better copy a small frame and not unmap the DMA region */
2145 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
2147 goto err_drop_frame_ret_pool
;
2149 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
2150 rx_desc
->buf_phys_addr
,
2151 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2154 skb_put_data(skb
, data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2157 skb
->protocol
= eth_type_trans(skb
, dev
);
2158 mvneta_rx_csum(pp
, rx_status
, skb
);
2159 napi_gro_receive(napi
, skb
);
2162 rcvd_bytes
+= rx_bytes
;
2164 /* Return the buffer to the pool */
2165 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2166 rx_desc
->buf_phys_addr
);
2168 /* leave the descriptor and buffer untouched */
2172 /* Refill processing */
2173 err
= hwbm_pool_refill(&bm_pool
->hwbm_pool
, GFP_ATOMIC
);
2175 netdev_err(dev
, "Linux processing - Can't refill\n");
2177 goto err_drop_frame_ret_pool
;
2180 frag_size
= bm_pool
->hwbm_pool
.frag_size
;
2182 skb
= build_skb(data
, frag_size
> PAGE_SIZE
? 0 : frag_size
);
2184 /* After refill old buffer has to be unmapped regardless
2185 * the skb is successfully built or not.
2187 dma_unmap_single(&pp
->bm_priv
->pdev
->dev
, phys_addr
,
2188 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
2190 goto err_drop_frame
;
2193 rcvd_bytes
+= rx_bytes
;
2195 /* Linux processing */
2196 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
2197 skb_put(skb
, rx_bytes
);
2199 skb
->protocol
= eth_type_trans(skb
, dev
);
2201 mvneta_rx_csum(pp
, rx_status
, skb
);
2203 napi_gro_receive(napi
, skb
);
2207 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2209 u64_stats_update_begin(&stats
->syncp
);
2210 stats
->rx_packets
+= rcvd_pkts
;
2211 stats
->rx_bytes
+= rcvd_bytes
;
2212 u64_stats_update_end(&stats
->syncp
);
2215 /* Update rxq management counters */
2216 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
2222 mvneta_tso_put_hdr(struct sk_buff
*skb
,
2223 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
2225 struct mvneta_tx_desc
*tx_desc
;
2226 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2228 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2229 tx_desc
= mvneta_txq_next_desc_get(txq
);
2230 tx_desc
->data_size
= hdr_len
;
2231 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
2232 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
2233 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
2234 txq
->txq_put_index
* TSO_HEADER_SIZE
;
2235 mvneta_txq_inc_put(txq
);
2239 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
2240 struct sk_buff
*skb
, char *data
, int size
,
2241 bool last_tcp
, bool is_last
)
2243 struct mvneta_tx_desc
*tx_desc
;
2245 tx_desc
= mvneta_txq_next_desc_get(txq
);
2246 tx_desc
->data_size
= size
;
2247 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
2248 size
, DMA_TO_DEVICE
);
2249 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2250 tx_desc
->buf_phys_addr
))) {
2251 mvneta_txq_desc_put(txq
);
2255 tx_desc
->command
= 0;
2256 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2259 /* last descriptor in the TCP packet */
2260 tx_desc
->command
= MVNETA_TXD_L_DESC
;
2262 /* last descriptor in SKB */
2264 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2266 mvneta_txq_inc_put(txq
);
2270 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
2271 struct mvneta_tx_queue
*txq
)
2273 int total_len
, data_left
;
2275 struct mvneta_port
*pp
= netdev_priv(dev
);
2277 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2280 /* Count needed descriptors */
2281 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
2284 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
2285 pr_info("*** Is this even possible???!?!?\n");
2289 /* Initialize the TSO handler, and prepare the first payload */
2290 tso_start(skb
, &tso
);
2292 total_len
= skb
->len
- hdr_len
;
2293 while (total_len
> 0) {
2296 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
2297 total_len
-= data_left
;
2300 /* prepare packet headers: MAC + IP + TCP */
2301 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
2302 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
2304 mvneta_tso_put_hdr(skb
, pp
, txq
);
2306 while (data_left
> 0) {
2310 size
= min_t(int, tso
.size
, data_left
);
2312 if (mvneta_tso_put_data(dev
, txq
, skb
,
2319 tso_build_data(skb
, &tso
, size
);
2326 /* Release all used data descriptors; header descriptors must not
2329 for (i
= desc_count
- 1; i
>= 0; i
--) {
2330 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
2331 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
2332 dma_unmap_single(pp
->dev
->dev
.parent
,
2333 tx_desc
->buf_phys_addr
,
2336 mvneta_txq_desc_put(txq
);
2341 /* Handle tx fragmentation processing */
2342 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
2343 struct mvneta_tx_queue
*txq
)
2345 struct mvneta_tx_desc
*tx_desc
;
2346 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
2348 for (i
= 0; i
< nr_frags
; i
++) {
2349 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2350 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
2352 tx_desc
= mvneta_txq_next_desc_get(txq
);
2353 tx_desc
->data_size
= frag
->size
;
2355 tx_desc
->buf_phys_addr
=
2356 dma_map_single(pp
->dev
->dev
.parent
, addr
,
2357 tx_desc
->data_size
, DMA_TO_DEVICE
);
2359 if (dma_mapping_error(pp
->dev
->dev
.parent
,
2360 tx_desc
->buf_phys_addr
)) {
2361 mvneta_txq_desc_put(txq
);
2365 if (i
== nr_frags
- 1) {
2366 /* Last descriptor */
2367 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
2368 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2370 /* Descriptor in the middle: Not First, Not Last */
2371 tx_desc
->command
= 0;
2372 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2374 mvneta_txq_inc_put(txq
);
2380 /* Release all descriptors that were used to map fragments of
2381 * this packet, as well as the corresponding DMA mappings
2383 for (i
= i
- 1; i
>= 0; i
--) {
2384 tx_desc
= txq
->descs
+ i
;
2385 dma_unmap_single(pp
->dev
->dev
.parent
,
2386 tx_desc
->buf_phys_addr
,
2389 mvneta_txq_desc_put(txq
);
2395 /* Main tx processing */
2396 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2398 struct mvneta_port
*pp
= netdev_priv(dev
);
2399 u16 txq_id
= skb_get_queue_mapping(skb
);
2400 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
2401 struct mvneta_tx_desc
*tx_desc
;
2406 if (!netif_running(dev
))
2409 if (skb_is_gso(skb
)) {
2410 frags
= mvneta_tx_tso(skb
, dev
, txq
);
2414 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2416 /* Get a descriptor for the first part of the packet */
2417 tx_desc
= mvneta_txq_next_desc_get(txq
);
2419 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
2421 tx_desc
->data_size
= skb_headlen(skb
);
2423 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
2426 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2427 tx_desc
->buf_phys_addr
))) {
2428 mvneta_txq_desc_put(txq
);
2434 /* First and Last descriptor */
2435 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
2436 tx_desc
->command
= tx_cmd
;
2437 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2438 mvneta_txq_inc_put(txq
);
2440 /* First but not Last */
2441 tx_cmd
|= MVNETA_TXD_F_DESC
;
2442 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2443 mvneta_txq_inc_put(txq
);
2444 tx_desc
->command
= tx_cmd
;
2445 /* Continue with other skb fragments */
2446 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
2447 dma_unmap_single(dev
->dev
.parent
,
2448 tx_desc
->buf_phys_addr
,
2451 mvneta_txq_desc_put(txq
);
2459 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2460 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
2462 netdev_tx_sent_queue(nq
, len
);
2464 txq
->count
+= frags
;
2465 if (txq
->count
>= txq
->tx_stop_threshold
)
2466 netif_tx_stop_queue(nq
);
2468 if (!skb
->xmit_more
|| netif_xmit_stopped(nq
) ||
2469 txq
->pending
+ frags
> MVNETA_TXQ_DEC_SENT_MASK
)
2470 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
2472 txq
->pending
+= frags
;
2474 u64_stats_update_begin(&stats
->syncp
);
2475 stats
->tx_packets
++;
2476 stats
->tx_bytes
+= len
;
2477 u64_stats_update_end(&stats
->syncp
);
2479 dev
->stats
.tx_dropped
++;
2480 dev_kfree_skb_any(skb
);
2483 return NETDEV_TX_OK
;
2487 /* Free tx resources, when resetting a port */
2488 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
2489 struct mvneta_tx_queue
*txq
)
2492 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2493 int tx_done
= txq
->count
;
2495 mvneta_txq_bufs_free(pp
, txq
, tx_done
, nq
);
2499 txq
->txq_put_index
= 0;
2500 txq
->txq_get_index
= 0;
2503 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2504 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2506 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
2508 struct mvneta_tx_queue
*txq
;
2509 struct netdev_queue
*nq
;
2510 int cpu
= smp_processor_id();
2512 while (cause_tx_done
) {
2513 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
2515 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2516 __netif_tx_lock(nq
, cpu
);
2519 mvneta_txq_done(pp
, txq
);
2521 __netif_tx_unlock(nq
);
2522 cause_tx_done
&= ~((1 << txq
->id
));
2526 /* Compute crc8 of the specified address, using a unique algorithm ,
2527 * according to hw spec, different than generic crc8 algorithm
2529 static int mvneta_addr_crc(unsigned char *addr
)
2534 for (i
= 0; i
< ETH_ALEN
; i
++) {
2537 crc
= (crc
^ addr
[i
]) << 8;
2538 for (j
= 7; j
>= 0; j
--) {
2539 if (crc
& (0x100 << j
))
2547 /* This method controls the net device special MAC multicast support.
2548 * The Special Multicast Table for MAC addresses supports MAC of the form
2549 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2550 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2551 * Table entries in the DA-Filter table. This method set the Special
2552 * Multicast Table appropriate entry.
2554 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
2555 unsigned char last_byte
,
2558 unsigned int smc_table_reg
;
2559 unsigned int tbl_offset
;
2560 unsigned int reg_offset
;
2562 /* Register offset from SMC table base */
2563 tbl_offset
= (last_byte
/ 4);
2564 /* Entry offset within the above reg */
2565 reg_offset
= last_byte
% 4;
2567 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
2571 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2573 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2574 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2577 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
2581 /* This method controls the network device Other MAC multicast support.
2582 * The Other Multicast Table is used for multicast of another type.
2583 * A CRC-8 is used as an index to the Other Multicast Table entries
2584 * in the DA-Filter table.
2585 * The method gets the CRC-8 value from the calling routine and
2586 * sets the Other Multicast Table appropriate entry according to the
2589 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
2593 unsigned int omc_table_reg
;
2594 unsigned int tbl_offset
;
2595 unsigned int reg_offset
;
2597 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
2598 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
2600 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
2603 /* Clear accepts frame bit at specified Other DA table entry */
2604 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2606 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2607 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2610 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
2613 /* The network device supports multicast using two tables:
2614 * 1) Special Multicast Table for MAC addresses of the form
2615 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2616 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2617 * Table entries in the DA-Filter table.
2618 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2619 * is used as an index to the Other Multicast Table entries in the
2622 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
2625 unsigned char crc_result
= 0;
2627 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
2628 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
2632 crc_result
= mvneta_addr_crc(p_addr
);
2634 if (pp
->mcast_count
[crc_result
] == 0) {
2635 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
2640 pp
->mcast_count
[crc_result
]--;
2641 if (pp
->mcast_count
[crc_result
] != 0) {
2642 netdev_info(pp
->dev
,
2643 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2644 pp
->mcast_count
[crc_result
], crc_result
);
2648 pp
->mcast_count
[crc_result
]++;
2650 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
2655 /* Configure Fitering mode of Ethernet port */
2656 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
2659 u32 port_cfg_reg
, val
;
2661 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
2663 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
2665 /* Set / Clear UPM bit in port configuration register */
2667 /* Accept all Unicast addresses */
2668 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
2669 val
|= MVNETA_FORCE_UNI
;
2670 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
2671 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
2673 /* Reject all Unicast addresses */
2674 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
2675 val
&= ~MVNETA_FORCE_UNI
;
2678 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
2679 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
2682 /* register unicast and multicast addresses */
2683 static void mvneta_set_rx_mode(struct net_device
*dev
)
2685 struct mvneta_port
*pp
= netdev_priv(dev
);
2686 struct netdev_hw_addr
*ha
;
2688 if (dev
->flags
& IFF_PROMISC
) {
2689 /* Accept all: Multicast + Unicast */
2690 mvneta_rx_unicast_promisc_set(pp
, 1);
2691 mvneta_set_ucast_table(pp
, pp
->rxq_def
);
2692 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2693 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2695 /* Accept single Unicast */
2696 mvneta_rx_unicast_promisc_set(pp
, 0);
2697 mvneta_set_ucast_table(pp
, -1);
2698 mvneta_mac_addr_set(pp
, dev
->dev_addr
, pp
->rxq_def
);
2700 if (dev
->flags
& IFF_ALLMULTI
) {
2701 /* Accept all multicast */
2702 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2703 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2705 /* Accept only initialized multicast */
2706 mvneta_set_special_mcast_table(pp
, -1);
2707 mvneta_set_other_mcast_table(pp
, -1);
2709 if (!netdev_mc_empty(dev
)) {
2710 netdev_for_each_mc_addr(ha
, dev
) {
2711 mvneta_mcast_addr_set(pp
, ha
->addr
,
2719 /* Interrupt handling - the callback for request_irq() */
2720 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
2722 struct mvneta_port
*pp
= (struct mvneta_port
*)dev_id
;
2724 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2725 napi_schedule(&pp
->napi
);
2730 /* Interrupt handling - the callback for request_percpu_irq() */
2731 static irqreturn_t
mvneta_percpu_isr(int irq
, void *dev_id
)
2733 struct mvneta_pcpu_port
*port
= (struct mvneta_pcpu_port
*)dev_id
;
2735 disable_percpu_irq(port
->pp
->dev
->irq
);
2736 napi_schedule(&port
->napi
);
2741 static void mvneta_link_change(struct mvneta_port
*pp
)
2743 u32 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
2745 phylink_mac_change(pp
->phylink
, !!(gmac_stat
& MVNETA_GMAC_LINK_UP
));
2749 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2750 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2751 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2752 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2753 * Each CPU has its own causeRxTx register
2755 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
2760 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
2761 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
2763 if (!netif_running(pp
->dev
)) {
2764 napi_complete(napi
);
2768 /* Read cause register */
2769 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
);
2770 if (cause_rx_tx
& MVNETA_MISCINTR_INTR_MASK
) {
2771 u32 cause_misc
= mvreg_read(pp
, MVNETA_INTR_MISC_CAUSE
);
2773 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2775 if (cause_misc
& (MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2776 MVNETA_CAUSE_LINK_CHANGE
))
2777 mvneta_link_change(pp
);
2780 /* Release Tx descriptors */
2781 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
2782 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
2783 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
2786 /* For the case where the last mvneta_poll did not process all
2789 rx_queue
= fls(((cause_rx_tx
>> 8) & 0xff));
2791 cause_rx_tx
|= pp
->neta_armada3700
? pp
->cause_rx_tx
:
2795 rx_queue
= rx_queue
- 1;
2797 rx_done
= mvneta_rx_hwbm(napi
, pp
, budget
,
2798 &pp
->rxqs
[rx_queue
]);
2800 rx_done
= mvneta_rx_swbm(napi
, pp
, budget
,
2801 &pp
->rxqs
[rx_queue
]);
2804 if (rx_done
< budget
) {
2806 napi_complete_done(napi
, rx_done
);
2808 if (pp
->neta_armada3700
) {
2809 unsigned long flags
;
2811 local_irq_save(flags
);
2812 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2813 MVNETA_RX_INTR_MASK(rxq_number
) |
2814 MVNETA_TX_INTR_MASK(txq_number
) |
2815 MVNETA_MISCINTR_INTR_MASK
);
2816 local_irq_restore(flags
);
2818 enable_percpu_irq(pp
->dev
->irq
, 0);
2822 if (pp
->neta_armada3700
)
2823 pp
->cause_rx_tx
= cause_rx_tx
;
2825 port
->cause_rx_tx
= cause_rx_tx
;
2830 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2831 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2836 for (i
= 0; i
< num
; i
++) {
2837 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
2838 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
, rxq
,
2841 "%s:rxq %d, %d of %d buffs filled\n",
2842 __func__
, rxq
->id
, i
, num
);
2847 /* Add this number of RX descriptors as non occupied (ready to
2850 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
2855 /* Free all packets pending transmit from all TXQs and reset TX port */
2856 static void mvneta_tx_reset(struct mvneta_port
*pp
)
2860 /* free the skb's in the tx ring */
2861 for (queue
= 0; queue
< txq_number
; queue
++)
2862 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
2864 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
2865 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
2868 static void mvneta_rx_reset(struct mvneta_port
*pp
)
2870 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
2871 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
2874 /* Rx/Tx queue initialization/cleanup methods */
2876 static int mvneta_rxq_sw_init(struct mvneta_port
*pp
,
2877 struct mvneta_rx_queue
*rxq
)
2879 rxq
->size
= pp
->rx_ring_size
;
2881 /* Allocate memory for RX descriptors */
2882 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2883 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2884 &rxq
->descs_phys
, GFP_KERNEL
);
2888 rxq
->last_desc
= rxq
->size
- 1;
2893 static void mvneta_rxq_hw_init(struct mvneta_port
*pp
,
2894 struct mvneta_rx_queue
*rxq
)
2896 /* Set Rx descriptors queue starting address */
2897 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
2898 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
2900 /* Set coalescing pkts and time */
2901 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2902 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2906 mvneta_rxq_offset_set(pp
, rxq
, 0);
2907 mvneta_rxq_buf_size_set(pp
, rxq
, pp
->frag_size
);
2908 mvneta_rxq_bm_disable(pp
, rxq
);
2909 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2912 mvneta_rxq_offset_set(pp
, rxq
,
2913 NET_SKB_PAD
- pp
->rx_offset_correction
);
2915 mvneta_rxq_bm_enable(pp
, rxq
);
2916 /* Fill RXQ with buffers from RX pool */
2917 mvneta_rxq_long_pool_set(pp
, rxq
);
2918 mvneta_rxq_short_pool_set(pp
, rxq
);
2919 mvneta_rxq_non_occup_desc_add(pp
, rxq
, rxq
->size
);
2923 /* Create a specified RX queue */
2924 static int mvneta_rxq_init(struct mvneta_port
*pp
,
2925 struct mvneta_rx_queue
*rxq
)
2930 ret
= mvneta_rxq_sw_init(pp
, rxq
);
2934 mvneta_rxq_hw_init(pp
, rxq
);
2939 /* Cleanup Rx queue */
2940 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2941 struct mvneta_rx_queue
*rxq
)
2943 mvneta_rxq_drop_pkts(pp
, rxq
);
2946 dev_kfree_skb_any(rxq
->skb
);
2949 dma_free_coherent(pp
->dev
->dev
.parent
,
2950 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2956 rxq
->next_desc_to_proc
= 0;
2957 rxq
->descs_phys
= 0;
2958 rxq
->first_to_refill
= 0;
2959 rxq
->refill_num
= 0;
2964 static int mvneta_txq_sw_init(struct mvneta_port
*pp
,
2965 struct mvneta_tx_queue
*txq
)
2969 txq
->size
= pp
->tx_ring_size
;
2971 /* A queue must always have room for at least one skb.
2972 * Therefore, stop the queue when the free entries reaches
2973 * the maximum number of descriptors per skb.
2975 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
2976 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
2978 /* Allocate memory for TX descriptors */
2979 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2980 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2981 &txq
->descs_phys
, GFP_KERNEL
);
2985 txq
->last_desc
= txq
->size
- 1;
2987 txq
->tx_skb
= kmalloc_array(txq
->size
, sizeof(*txq
->tx_skb
),
2990 dma_free_coherent(pp
->dev
->dev
.parent
,
2991 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2992 txq
->descs
, txq
->descs_phys
);
2996 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2997 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2998 txq
->size
* TSO_HEADER_SIZE
,
2999 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
3000 if (!txq
->tso_hdrs
) {
3002 dma_free_coherent(pp
->dev
->dev
.parent
,
3003 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
3004 txq
->descs
, txq
->descs_phys
);
3008 /* Setup XPS mapping */
3010 cpu
= txq
->id
% num_present_cpus();
3012 cpu
= pp
->rxq_def
% num_present_cpus();
3013 cpumask_set_cpu(cpu
, &txq
->affinity_mask
);
3014 netif_set_xps_queue(pp
->dev
, &txq
->affinity_mask
, txq
->id
);
3019 static void mvneta_txq_hw_init(struct mvneta_port
*pp
,
3020 struct mvneta_tx_queue
*txq
)
3022 /* Set maximum bandwidth for enabled TXQs */
3023 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
3024 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
3026 /* Set Tx descriptors queue starting address */
3027 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
3028 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
3030 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
3033 /* Create and initialize a tx queue */
3034 static int mvneta_txq_init(struct mvneta_port
*pp
,
3035 struct mvneta_tx_queue
*txq
)
3039 ret
= mvneta_txq_sw_init(pp
, txq
);
3043 mvneta_txq_hw_init(pp
, txq
);
3048 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3049 static void mvneta_txq_sw_deinit(struct mvneta_port
*pp
,
3050 struct mvneta_tx_queue
*txq
)
3052 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
3057 dma_free_coherent(pp
->dev
->dev
.parent
,
3058 txq
->size
* TSO_HEADER_SIZE
,
3059 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
3061 dma_free_coherent(pp
->dev
->dev
.parent
,
3062 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
3063 txq
->descs
, txq
->descs_phys
);
3065 netdev_tx_reset_queue(nq
);
3069 txq
->next_desc_to_proc
= 0;
3070 txq
->descs_phys
= 0;
3073 static void mvneta_txq_hw_deinit(struct mvneta_port
*pp
,
3074 struct mvneta_tx_queue
*txq
)
3076 /* Set minimum bandwidth for disabled TXQs */
3077 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
3078 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
3080 /* Set Tx descriptors queue starting address and size */
3081 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
3082 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
3085 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
3086 struct mvneta_tx_queue
*txq
)
3088 mvneta_txq_sw_deinit(pp
, txq
);
3089 mvneta_txq_hw_deinit(pp
, txq
);
3092 /* Cleanup all Tx queues */
3093 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
3097 for (queue
= 0; queue
< txq_number
; queue
++)
3098 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
3101 /* Cleanup all Rx queues */
3102 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
3106 for (queue
= 0; queue
< rxq_number
; queue
++)
3107 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
3111 /* Init all Rx queues */
3112 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
3116 for (queue
= 0; queue
< rxq_number
; queue
++) {
3117 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
3120 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
3122 mvneta_cleanup_rxqs(pp
);
3130 /* Init all tx queues */
3131 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
3135 for (queue
= 0; queue
< txq_number
; queue
++) {
3136 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
3138 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
3140 mvneta_cleanup_txqs(pp
);
3148 static void mvneta_start_dev(struct mvneta_port
*pp
)
3152 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
3153 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
3155 /* start the Rx/Tx activity */
3156 mvneta_port_enable(pp
);
3158 if (!pp
->neta_armada3700
) {
3159 /* Enable polling on the port */
3160 for_each_online_cpu(cpu
) {
3161 struct mvneta_pcpu_port
*port
=
3162 per_cpu_ptr(pp
->ports
, cpu
);
3164 napi_enable(&port
->napi
);
3167 napi_enable(&pp
->napi
);
3170 /* Unmask interrupts. It has to be done from each CPU */
3171 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3173 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3174 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3175 MVNETA_CAUSE_LINK_CHANGE
);
3177 phylink_start(pp
->phylink
);
3178 netif_tx_start_all_queues(pp
->dev
);
3181 static void mvneta_stop_dev(struct mvneta_port
*pp
)
3185 phylink_stop(pp
->phylink
);
3187 if (!pp
->neta_armada3700
) {
3188 for_each_online_cpu(cpu
) {
3189 struct mvneta_pcpu_port
*port
=
3190 per_cpu_ptr(pp
->ports
, cpu
);
3192 napi_disable(&port
->napi
);
3195 napi_disable(&pp
->napi
);
3198 netif_carrier_off(pp
->dev
);
3200 mvneta_port_down(pp
);
3201 netif_tx_stop_all_queues(pp
->dev
);
3203 /* Stop the port activity */
3204 mvneta_port_disable(pp
);
3206 /* Clear all ethernet port interrupts */
3207 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
3209 /* Mask all ethernet port interrupts */
3210 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3212 mvneta_tx_reset(pp
);
3213 mvneta_rx_reset(pp
);
3216 static void mvneta_percpu_enable(void *arg
)
3218 struct mvneta_port
*pp
= arg
;
3220 enable_percpu_irq(pp
->dev
->irq
, IRQ_TYPE_NONE
);
3223 static void mvneta_percpu_disable(void *arg
)
3225 struct mvneta_port
*pp
= arg
;
3227 disable_percpu_irq(pp
->dev
->irq
);
3230 /* Change the device mtu */
3231 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
3233 struct mvneta_port
*pp
= netdev_priv(dev
);
3236 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
3237 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
3238 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
3239 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
3244 if (!netif_running(dev
)) {
3246 mvneta_bm_update_mtu(pp
, mtu
);
3248 netdev_update_features(dev
);
3252 /* The interface is running, so we have to force a
3253 * reallocation of the queues
3255 mvneta_stop_dev(pp
);
3256 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3258 mvneta_cleanup_txqs(pp
);
3259 mvneta_cleanup_rxqs(pp
);
3262 mvneta_bm_update_mtu(pp
, mtu
);
3264 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
3266 ret
= mvneta_setup_rxqs(pp
);
3268 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
3272 ret
= mvneta_setup_txqs(pp
);
3274 netdev_err(dev
, "unable to setup txqs after MTU change\n");
3278 on_each_cpu(mvneta_percpu_enable
, pp
, true);
3279 mvneta_start_dev(pp
);
3281 netdev_update_features(dev
);
3286 static netdev_features_t
mvneta_fix_features(struct net_device
*dev
,
3287 netdev_features_t features
)
3289 struct mvneta_port
*pp
= netdev_priv(dev
);
3291 if (pp
->tx_csum_limit
&& dev
->mtu
> pp
->tx_csum_limit
) {
3292 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
3294 "Disable IP checksum for MTU greater than %dB\n",
3301 /* Get mac address */
3302 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
3304 u32 mac_addr_l
, mac_addr_h
;
3306 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
3307 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
3308 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3309 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3310 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3311 addr
[3] = mac_addr_h
& 0xFF;
3312 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
3313 addr
[5] = mac_addr_l
& 0xFF;
3316 /* Handle setting mac address */
3317 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
3319 struct mvneta_port
*pp
= netdev_priv(dev
);
3320 struct sockaddr
*sockaddr
= addr
;
3323 ret
= eth_prepare_mac_addr_change(dev
, addr
);
3326 /* Remove previous address table entry */
3327 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
3329 /* Set new addr in hw */
3330 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, pp
->rxq_def
);
3332 eth_commit_mac_addr_change(dev
, addr
);
3336 static void mvneta_validate(struct net_device
*ndev
, unsigned long *supported
,
3337 struct phylink_link_state
*state
)
3339 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
3341 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3342 if (state
->interface
!= PHY_INTERFACE_MODE_NA
&&
3343 state
->interface
!= PHY_INTERFACE_MODE_QSGMII
&&
3344 state
->interface
!= PHY_INTERFACE_MODE_SGMII
&&
3345 !phy_interface_mode_is_8023z(state
->interface
) &&
3346 !phy_interface_mode_is_rgmii(state
->interface
)) {
3347 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
3351 /* Allow all the expected bits */
3352 phylink_set(mask
, Autoneg
);
3353 phylink_set_port_modes(mask
);
3355 /* Asymmetric pause is unsupported */
3356 phylink_set(mask
, Pause
);
3357 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3358 phylink_set(mask
, 1000baseT_Full
);
3359 phylink_set(mask
, 1000baseX_Full
);
3361 if (!phy_interface_mode_is_8023z(state
->interface
)) {
3362 /* 10M and 100M are only supported in non-802.3z mode */
3363 phylink_set(mask
, 10baseT_Half
);
3364 phylink_set(mask
, 10baseT_Full
);
3365 phylink_set(mask
, 100baseT_Half
);
3366 phylink_set(mask
, 100baseT_Full
);
3369 bitmap_and(supported
, supported
, mask
,
3370 __ETHTOOL_LINK_MODE_MASK_NBITS
);
3371 bitmap_and(state
->advertising
, state
->advertising
, mask
,
3372 __ETHTOOL_LINK_MODE_MASK_NBITS
);
3375 static int mvneta_mac_link_state(struct net_device
*ndev
,
3376 struct phylink_link_state
*state
)
3378 struct mvneta_port
*pp
= netdev_priv(ndev
);
3381 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
3383 if (gmac_stat
& MVNETA_GMAC_SPEED_1000
)
3384 state
->speed
= SPEED_1000
;
3385 else if (gmac_stat
& MVNETA_GMAC_SPEED_100
)
3386 state
->speed
= SPEED_100
;
3388 state
->speed
= SPEED_10
;
3390 state
->an_complete
= !!(gmac_stat
& MVNETA_GMAC_AN_COMPLETE
);
3391 state
->link
= !!(gmac_stat
& MVNETA_GMAC_LINK_UP
);
3392 state
->duplex
= !!(gmac_stat
& MVNETA_GMAC_FULL_DUPLEX
);
3395 if (gmac_stat
& MVNETA_GMAC_RX_FLOW_CTRL_ENABLE
)
3396 state
->pause
|= MLO_PAUSE_RX
;
3397 if (gmac_stat
& MVNETA_GMAC_TX_FLOW_CTRL_ENABLE
)
3398 state
->pause
|= MLO_PAUSE_TX
;
3403 static void mvneta_mac_an_restart(struct net_device
*ndev
)
3405 struct mvneta_port
*pp
= netdev_priv(ndev
);
3406 u32 gmac_an
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3408 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3409 gmac_an
| MVNETA_GMAC_INBAND_RESTART_AN
);
3410 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3411 gmac_an
& ~MVNETA_GMAC_INBAND_RESTART_AN
);
3414 static void mvneta_mac_config(struct net_device
*ndev
, unsigned int mode
,
3415 const struct phylink_link_state
*state
)
3417 struct mvneta_port
*pp
= netdev_priv(ndev
);
3418 u32 new_ctrl0
, gmac_ctrl0
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
3419 u32 new_ctrl2
, gmac_ctrl2
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
3420 u32 new_clk
, gmac_clk
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
3421 u32 new_an
, gmac_an
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3423 new_ctrl0
= gmac_ctrl0
& ~MVNETA_GMAC0_PORT_1000BASE_X
;
3424 new_ctrl2
= gmac_ctrl2
& ~(MVNETA_GMAC2_INBAND_AN_ENABLE
|
3425 MVNETA_GMAC2_PORT_RESET
);
3426 new_clk
= gmac_clk
& ~MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3427 new_an
= gmac_an
& ~(MVNETA_GMAC_INBAND_AN_ENABLE
|
3428 MVNETA_GMAC_INBAND_RESTART_AN
|
3429 MVNETA_GMAC_CONFIG_MII_SPEED
|
3430 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3431 MVNETA_GMAC_AN_SPEED_EN
|
3432 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
|
3433 MVNETA_GMAC_CONFIG_FLOW_CTRL
|
3434 MVNETA_GMAC_AN_FLOW_CTRL_EN
|
3435 MVNETA_GMAC_CONFIG_FULL_DUPLEX
|
3436 MVNETA_GMAC_AN_DUPLEX_EN
);
3438 /* Even though it might look weird, when we're configured in
3439 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3441 new_ctrl2
|= MVNETA_GMAC2_PORT_RGMII
;
3443 if (state
->interface
== PHY_INTERFACE_MODE_QSGMII
||
3444 state
->interface
== PHY_INTERFACE_MODE_SGMII
||
3445 phy_interface_mode_is_8023z(state
->interface
))
3446 new_ctrl2
|= MVNETA_GMAC2_PCS_ENABLE
;
3448 if (phylink_test(state
->advertising
, Pause
))
3449 new_an
|= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
;
3450 if (state
->pause
& MLO_PAUSE_TXRX_MASK
)
3451 new_an
|= MVNETA_GMAC_CONFIG_FLOW_CTRL
;
3453 if (!phylink_autoneg_inband(mode
)) {
3454 /* Phy or fixed speed */
3456 new_an
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3458 if (state
->speed
== SPEED_1000
)
3459 new_an
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
3460 else if (state
->speed
== SPEED_100
)
3461 new_an
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
3462 } else if (state
->interface
== PHY_INTERFACE_MODE_SGMII
) {
3463 /* SGMII mode receives the state from the PHY */
3464 new_ctrl2
|= MVNETA_GMAC2_INBAND_AN_ENABLE
;
3465 new_clk
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3466 new_an
= (new_an
& ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
3467 MVNETA_GMAC_FORCE_LINK_PASS
)) |
3468 MVNETA_GMAC_INBAND_AN_ENABLE
|
3469 MVNETA_GMAC_AN_SPEED_EN
|
3470 MVNETA_GMAC_AN_DUPLEX_EN
;
3472 /* 802.3z negotiation - only 1000base-X */
3473 new_ctrl0
|= MVNETA_GMAC0_PORT_1000BASE_X
;
3474 new_clk
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3475 new_an
= (new_an
& ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
3476 MVNETA_GMAC_FORCE_LINK_PASS
)) |
3477 MVNETA_GMAC_INBAND_AN_ENABLE
|
3478 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3479 /* The MAC only supports FD mode */
3480 MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3482 if (state
->pause
& MLO_PAUSE_AN
&& state
->an_enabled
)
3483 new_an
|= MVNETA_GMAC_AN_FLOW_CTRL_EN
;
3486 /* Armada 370 documentation says we can only change the port mode
3487 * and in-band enable when the link is down, so force it down
3488 * while making these changes. We also do this for GMAC_CTRL2 */
3489 if ((new_ctrl0
^ gmac_ctrl0
) & MVNETA_GMAC0_PORT_1000BASE_X
||
3490 (new_ctrl2
^ gmac_ctrl2
) & MVNETA_GMAC2_INBAND_AN_ENABLE
||
3491 (new_an
^ gmac_an
) & MVNETA_GMAC_INBAND_AN_ENABLE
) {
3492 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3493 (gmac_an
& ~MVNETA_GMAC_FORCE_LINK_PASS
) |
3494 MVNETA_GMAC_FORCE_LINK_DOWN
);
3497 if (new_ctrl0
!= gmac_ctrl0
)
3498 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, new_ctrl0
);
3499 if (new_ctrl2
!= gmac_ctrl2
)
3500 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, new_ctrl2
);
3501 if (new_clk
!= gmac_clk
)
3502 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, new_clk
);
3503 if (new_an
!= gmac_an
)
3504 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, new_an
);
3506 if (gmac_ctrl2
& MVNETA_GMAC2_PORT_RESET
) {
3507 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
3508 MVNETA_GMAC2_PORT_RESET
) != 0)
3513 static void mvneta_set_eee(struct mvneta_port
*pp
, bool enable
)
3517 lpi_ctl1
= mvreg_read(pp
, MVNETA_LPI_CTRL_1
);
3519 lpi_ctl1
|= MVNETA_LPI_REQUEST_ENABLE
;
3521 lpi_ctl1
&= ~MVNETA_LPI_REQUEST_ENABLE
;
3522 mvreg_write(pp
, MVNETA_LPI_CTRL_1
, lpi_ctl1
);
3525 static void mvneta_mac_link_down(struct net_device
*ndev
, unsigned int mode
,
3526 phy_interface_t interface
)
3528 struct mvneta_port
*pp
= netdev_priv(ndev
);
3531 mvneta_port_down(pp
);
3533 if (!phylink_autoneg_inband(mode
)) {
3534 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3535 val
&= ~MVNETA_GMAC_FORCE_LINK_PASS
;
3536 val
|= MVNETA_GMAC_FORCE_LINK_DOWN
;
3537 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3540 pp
->eee_active
= false;
3541 mvneta_set_eee(pp
, false);
3544 static void mvneta_mac_link_up(struct net_device
*ndev
, unsigned int mode
,
3545 phy_interface_t interface
,
3546 struct phy_device
*phy
)
3548 struct mvneta_port
*pp
= netdev_priv(ndev
);
3551 if (!phylink_autoneg_inband(mode
)) {
3552 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3553 val
&= ~MVNETA_GMAC_FORCE_LINK_DOWN
;
3554 val
|= MVNETA_GMAC_FORCE_LINK_PASS
;
3555 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3560 if (phy
&& pp
->eee_enabled
) {
3561 pp
->eee_active
= phy_init_eee(phy
, 0) >= 0;
3562 mvneta_set_eee(pp
, pp
->eee_active
&& pp
->tx_lpi_enabled
);
3566 static const struct phylink_mac_ops mvneta_phylink_ops
= {
3567 .validate
= mvneta_validate
,
3568 .mac_link_state
= mvneta_mac_link_state
,
3569 .mac_an_restart
= mvneta_mac_an_restart
,
3570 .mac_config
= mvneta_mac_config
,
3571 .mac_link_down
= mvneta_mac_link_down
,
3572 .mac_link_up
= mvneta_mac_link_up
,
3575 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
3577 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
3578 int err
= phylink_of_phy_connect(pp
->phylink
, pp
->dn
, 0);
3581 netdev_err(pp
->dev
, "could not attach PHY: %d\n", err
);
3583 phylink_ethtool_get_wol(pp
->phylink
, &wol
);
3584 device_set_wakeup_capable(&pp
->dev
->dev
, !!wol
.supported
);
3589 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
3591 phylink_disconnect_phy(pp
->phylink
);
3594 /* Electing a CPU must be done in an atomic way: it should be done
3595 * after or before the removal/insertion of a CPU and this function is
3598 static void mvneta_percpu_elect(struct mvneta_port
*pp
)
3600 int elected_cpu
= 0, max_cpu
, cpu
, i
= 0;
3602 /* Use the cpu associated to the rxq when it is online, in all
3603 * the other cases, use the cpu 0 which can't be offline.
3605 if (cpu_online(pp
->rxq_def
))
3606 elected_cpu
= pp
->rxq_def
;
3608 max_cpu
= num_present_cpus();
3610 for_each_online_cpu(cpu
) {
3611 int rxq_map
= 0, txq_map
= 0;
3614 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3615 if ((rxq
% max_cpu
) == cpu
)
3616 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
3618 if (cpu
== elected_cpu
)
3619 /* Map the default receive queue queue to the
3622 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(pp
->rxq_def
);
3624 /* We update the TX queue map only if we have one
3625 * queue. In this case we associate the TX queue to
3626 * the CPU bound to the default RX queue
3628 if (txq_number
== 1)
3629 txq_map
= (cpu
== elected_cpu
) ?
3630 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3632 txq_map
= mvreg_read(pp
, MVNETA_CPU_MAP(cpu
)) &
3633 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
3635 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
3637 /* Update the interrupt mask on each CPU according the
3640 smp_call_function_single(cpu
, mvneta_percpu_unmask_interrupt
,
3647 static int mvneta_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
3650 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
3652 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3655 spin_lock(&pp
->lock
);
3657 * Configuring the driver for a new CPU while the driver is
3658 * stopping is racy, so just avoid it.
3660 if (pp
->is_stopped
) {
3661 spin_unlock(&pp
->lock
);
3664 netif_tx_stop_all_queues(pp
->dev
);
3667 * We have to synchronise on tha napi of each CPU except the one
3668 * just being woken up
3670 for_each_online_cpu(other_cpu
) {
3671 if (other_cpu
!= cpu
) {
3672 struct mvneta_pcpu_port
*other_port
=
3673 per_cpu_ptr(pp
->ports
, other_cpu
);
3675 napi_synchronize(&other_port
->napi
);
3679 /* Mask all ethernet port interrupts */
3680 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3681 napi_enable(&port
->napi
);
3684 * Enable per-CPU interrupts on the CPU that is
3687 mvneta_percpu_enable(pp
);
3690 * Enable per-CPU interrupt on the one CPU we care
3693 mvneta_percpu_elect(pp
);
3695 /* Unmask all ethernet port interrupts */
3696 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3697 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3698 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3699 MVNETA_CAUSE_LINK_CHANGE
);
3700 netif_tx_start_all_queues(pp
->dev
);
3701 spin_unlock(&pp
->lock
);
3705 static int mvneta_cpu_down_prepare(unsigned int cpu
, struct hlist_node
*node
)
3707 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
3709 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3712 * Thanks to this lock we are sure that any pending cpu election is
3715 spin_lock(&pp
->lock
);
3716 /* Mask all ethernet port interrupts */
3717 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3718 spin_unlock(&pp
->lock
);
3720 napi_synchronize(&port
->napi
);
3721 napi_disable(&port
->napi
);
3722 /* Disable per-CPU interrupts on the CPU that is brought down. */
3723 mvneta_percpu_disable(pp
);
3727 static int mvneta_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
3729 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
3732 /* Check if a new CPU must be elected now this on is down */
3733 spin_lock(&pp
->lock
);
3734 mvneta_percpu_elect(pp
);
3735 spin_unlock(&pp
->lock
);
3736 /* Unmask all ethernet port interrupts */
3737 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3738 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3739 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3740 MVNETA_CAUSE_LINK_CHANGE
);
3741 netif_tx_start_all_queues(pp
->dev
);
3745 static int mvneta_open(struct net_device
*dev
)
3747 struct mvneta_port
*pp
= netdev_priv(dev
);
3750 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
3751 pp
->frag_size
= PAGE_SIZE
;
3753 ret
= mvneta_setup_rxqs(pp
);
3757 ret
= mvneta_setup_txqs(pp
);
3759 goto err_cleanup_rxqs
;
3761 /* Connect to port interrupt line */
3762 if (pp
->neta_armada3700
)
3763 ret
= request_irq(pp
->dev
->irq
, mvneta_isr
, 0,
3766 ret
= request_percpu_irq(pp
->dev
->irq
, mvneta_percpu_isr
,
3767 dev
->name
, pp
->ports
);
3769 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
3770 goto err_cleanup_txqs
;
3773 if (!pp
->neta_armada3700
) {
3774 /* Enable per-CPU interrupt on all the CPU to handle our RX
3777 on_each_cpu(mvneta_percpu_enable
, pp
, true);
3779 pp
->is_stopped
= false;
3780 /* Register a CPU notifier to handle the case where our CPU
3781 * might be taken offline.
3783 ret
= cpuhp_state_add_instance_nocalls(online_hpstate
,
3788 ret
= cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
3791 goto err_free_online_hp
;
3794 ret
= mvneta_mdio_probe(pp
);
3796 netdev_err(dev
, "cannot probe MDIO bus\n");
3797 goto err_free_dead_hp
;
3800 mvneta_start_dev(pp
);
3805 if (!pp
->neta_armada3700
)
3806 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
3809 if (!pp
->neta_armada3700
)
3810 cpuhp_state_remove_instance_nocalls(online_hpstate
,
3813 if (pp
->neta_armada3700
) {
3814 free_irq(pp
->dev
->irq
, pp
);
3816 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3817 free_percpu_irq(pp
->dev
->irq
, pp
->ports
);
3820 mvneta_cleanup_txqs(pp
);
3822 mvneta_cleanup_rxqs(pp
);
3826 /* Stop the port, free port interrupt line */
3827 static int mvneta_stop(struct net_device
*dev
)
3829 struct mvneta_port
*pp
= netdev_priv(dev
);
3831 if (!pp
->neta_armada3700
) {
3832 /* Inform that we are stopping so we don't want to setup the
3833 * driver for new CPUs in the notifiers. The code of the
3834 * notifier for CPU online is protected by the same spinlock,
3835 * so when we get the lock, the notifer work is done.
3837 spin_lock(&pp
->lock
);
3838 pp
->is_stopped
= true;
3839 spin_unlock(&pp
->lock
);
3841 mvneta_stop_dev(pp
);
3842 mvneta_mdio_remove(pp
);
3844 cpuhp_state_remove_instance_nocalls(online_hpstate
,
3846 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
3848 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3849 free_percpu_irq(dev
->irq
, pp
->ports
);
3851 mvneta_stop_dev(pp
);
3852 mvneta_mdio_remove(pp
);
3853 free_irq(dev
->irq
, pp
);
3856 mvneta_cleanup_rxqs(pp
);
3857 mvneta_cleanup_txqs(pp
);
3862 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3864 struct mvneta_port
*pp
= netdev_priv(dev
);
3866 return phylink_mii_ioctl(pp
->phylink
, ifr
, cmd
);
3869 /* Ethtool methods */
3871 /* Set link ksettings (phy address, speed) for ethtools */
3873 mvneta_ethtool_set_link_ksettings(struct net_device
*ndev
,
3874 const struct ethtool_link_ksettings
*cmd
)
3876 struct mvneta_port
*pp
= netdev_priv(ndev
);
3878 return phylink_ethtool_ksettings_set(pp
->phylink
, cmd
);
3881 /* Get link ksettings for ethtools */
3883 mvneta_ethtool_get_link_ksettings(struct net_device
*ndev
,
3884 struct ethtool_link_ksettings
*cmd
)
3886 struct mvneta_port
*pp
= netdev_priv(ndev
);
3888 return phylink_ethtool_ksettings_get(pp
->phylink
, cmd
);
3891 static int mvneta_ethtool_nway_reset(struct net_device
*dev
)
3893 struct mvneta_port
*pp
= netdev_priv(dev
);
3895 return phylink_ethtool_nway_reset(pp
->phylink
);
3898 /* Set interrupt coalescing for ethtools */
3899 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
3900 struct ethtool_coalesce
*c
)
3902 struct mvneta_port
*pp
= netdev_priv(dev
);
3905 for (queue
= 0; queue
< rxq_number
; queue
++) {
3906 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3907 rxq
->time_coal
= c
->rx_coalesce_usecs
;
3908 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
3909 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
3910 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
3913 for (queue
= 0; queue
< txq_number
; queue
++) {
3914 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3915 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
3916 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
3922 /* get coalescing for ethtools */
3923 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
3924 struct ethtool_coalesce
*c
)
3926 struct mvneta_port
*pp
= netdev_priv(dev
);
3928 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
3929 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
3931 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
3936 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
3937 struct ethtool_drvinfo
*drvinfo
)
3939 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
3940 sizeof(drvinfo
->driver
));
3941 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
3942 sizeof(drvinfo
->version
));
3943 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
3944 sizeof(drvinfo
->bus_info
));
3948 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
3949 struct ethtool_ringparam
*ring
)
3951 struct mvneta_port
*pp
= netdev_priv(netdev
);
3953 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
3954 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
3955 ring
->rx_pending
= pp
->rx_ring_size
;
3956 ring
->tx_pending
= pp
->tx_ring_size
;
3959 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
3960 struct ethtool_ringparam
*ring
)
3962 struct mvneta_port
*pp
= netdev_priv(dev
);
3964 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
3966 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
3967 ring
->rx_pending
: MVNETA_MAX_RXD
;
3969 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
3970 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
3971 if (pp
->tx_ring_size
!= ring
->tx_pending
)
3972 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
3973 pp
->tx_ring_size
, ring
->tx_pending
);
3975 if (netif_running(dev
)) {
3977 if (mvneta_open(dev
)) {
3979 "error on opening device after ring param change\n");
3987 static void mvneta_ethtool_get_pauseparam(struct net_device
*dev
,
3988 struct ethtool_pauseparam
*pause
)
3990 struct mvneta_port
*pp
= netdev_priv(dev
);
3992 phylink_ethtool_get_pauseparam(pp
->phylink
, pause
);
3995 static int mvneta_ethtool_set_pauseparam(struct net_device
*dev
,
3996 struct ethtool_pauseparam
*pause
)
3998 struct mvneta_port
*pp
= netdev_priv(dev
);
4000 return phylink_ethtool_set_pauseparam(pp
->phylink
, pause
);
4003 static void mvneta_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
4006 if (sset
== ETH_SS_STATS
) {
4009 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
4010 memcpy(data
+ i
* ETH_GSTRING_LEN
,
4011 mvneta_statistics
[i
].name
, ETH_GSTRING_LEN
);
4015 static void mvneta_ethtool_update_stats(struct mvneta_port
*pp
)
4017 const struct mvneta_statistic
*s
;
4018 void __iomem
*base
= pp
->base
;
4023 for (i
= 0, s
= mvneta_statistics
;
4024 s
< mvneta_statistics
+ ARRAY_SIZE(mvneta_statistics
);
4030 val
= readl_relaxed(base
+ s
->offset
);
4033 /* Docs say to read low 32-bit then high */
4034 low
= readl_relaxed(base
+ s
->offset
);
4035 high
= readl_relaxed(base
+ s
->offset
+ 4);
4036 val
= (u64
)high
<< 32 | low
;
4039 switch (s
->offset
) {
4040 case ETHTOOL_STAT_EEE_WAKEUP
:
4041 val
= phylink_get_eee_err(pp
->phylink
);
4043 case ETHTOOL_STAT_SKB_ALLOC_ERR
:
4044 val
= pp
->rxqs
[0].skb_alloc_err
;
4046 case ETHTOOL_STAT_REFILL_ERR
:
4047 val
= pp
->rxqs
[0].refill_err
;
4053 pp
->ethtool_stats
[i
] += val
;
4057 static void mvneta_ethtool_get_stats(struct net_device
*dev
,
4058 struct ethtool_stats
*stats
, u64
*data
)
4060 struct mvneta_port
*pp
= netdev_priv(dev
);
4063 mvneta_ethtool_update_stats(pp
);
4065 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
4066 *data
++ = pp
->ethtool_stats
[i
];
4069 static int mvneta_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
4071 if (sset
== ETH_SS_STATS
)
4072 return ARRAY_SIZE(mvneta_statistics
);
4076 static u32
mvneta_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
4078 return MVNETA_RSS_LU_TABLE_SIZE
;
4081 static int mvneta_ethtool_get_rxnfc(struct net_device
*dev
,
4082 struct ethtool_rxnfc
*info
,
4083 u32
*rules __always_unused
)
4085 switch (info
->cmd
) {
4086 case ETHTOOL_GRXRINGS
:
4087 info
->data
= rxq_number
;
4096 static int mvneta_config_rss(struct mvneta_port
*pp
)
4101 netif_tx_stop_all_queues(pp
->dev
);
4103 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
4105 if (!pp
->neta_armada3700
) {
4106 /* We have to synchronise on the napi of each CPU */
4107 for_each_online_cpu(cpu
) {
4108 struct mvneta_pcpu_port
*pcpu_port
=
4109 per_cpu_ptr(pp
->ports
, cpu
);
4111 napi_synchronize(&pcpu_port
->napi
);
4112 napi_disable(&pcpu_port
->napi
);
4115 napi_synchronize(&pp
->napi
);
4116 napi_disable(&pp
->napi
);
4119 pp
->rxq_def
= pp
->indir
[0];
4121 /* Update unicast mapping */
4122 mvneta_set_rx_mode(pp
->dev
);
4124 /* Update val of portCfg register accordingly with all RxQueue types */
4125 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
4126 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
4128 /* Update the elected CPU matching the new rxq_def */
4129 spin_lock(&pp
->lock
);
4130 mvneta_percpu_elect(pp
);
4131 spin_unlock(&pp
->lock
);
4133 if (!pp
->neta_armada3700
) {
4134 /* We have to synchronise on the napi of each CPU */
4135 for_each_online_cpu(cpu
) {
4136 struct mvneta_pcpu_port
*pcpu_port
=
4137 per_cpu_ptr(pp
->ports
, cpu
);
4139 napi_enable(&pcpu_port
->napi
);
4142 napi_enable(&pp
->napi
);
4145 netif_tx_start_all_queues(pp
->dev
);
4150 static int mvneta_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
4151 const u8
*key
, const u8 hfunc
)
4153 struct mvneta_port
*pp
= netdev_priv(dev
);
4155 /* Current code for Armada 3700 doesn't support RSS features yet */
4156 if (pp
->neta_armada3700
)
4159 /* We require at least one supported parameter to be changed
4160 * and no change in any of the unsupported parameters
4163 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
4169 memcpy(pp
->indir
, indir
, MVNETA_RSS_LU_TABLE_SIZE
);
4171 return mvneta_config_rss(pp
);
4174 static int mvneta_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
4177 struct mvneta_port
*pp
= netdev_priv(dev
);
4179 /* Current code for Armada 3700 doesn't support RSS features yet */
4180 if (pp
->neta_armada3700
)
4184 *hfunc
= ETH_RSS_HASH_TOP
;
4189 memcpy(indir
, pp
->indir
, MVNETA_RSS_LU_TABLE_SIZE
);
4194 static void mvneta_ethtool_get_wol(struct net_device
*dev
,
4195 struct ethtool_wolinfo
*wol
)
4197 struct mvneta_port
*pp
= netdev_priv(dev
);
4199 phylink_ethtool_get_wol(pp
->phylink
, wol
);
4202 static int mvneta_ethtool_set_wol(struct net_device
*dev
,
4203 struct ethtool_wolinfo
*wol
)
4205 struct mvneta_port
*pp
= netdev_priv(dev
);
4208 ret
= phylink_ethtool_set_wol(pp
->phylink
, wol
);
4210 device_set_wakeup_enable(&dev
->dev
, !!wol
->wolopts
);
4215 static int mvneta_ethtool_get_eee(struct net_device
*dev
,
4216 struct ethtool_eee
*eee
)
4218 struct mvneta_port
*pp
= netdev_priv(dev
);
4221 lpi_ctl0
= mvreg_read(pp
, MVNETA_LPI_CTRL_0
);
4223 eee
->eee_enabled
= pp
->eee_enabled
;
4224 eee
->eee_active
= pp
->eee_active
;
4225 eee
->tx_lpi_enabled
= pp
->tx_lpi_enabled
;
4226 eee
->tx_lpi_timer
= (lpi_ctl0
) >> 8; // * scale;
4228 return phylink_ethtool_get_eee(pp
->phylink
, eee
);
4231 static int mvneta_ethtool_set_eee(struct net_device
*dev
,
4232 struct ethtool_eee
*eee
)
4234 struct mvneta_port
*pp
= netdev_priv(dev
);
4237 /* The Armada 37x documents do not give limits for this other than
4238 * it being an 8-bit register. */
4239 if (eee
->tx_lpi_enabled
&&
4240 (eee
->tx_lpi_timer
< 0 || eee
->tx_lpi_timer
> 255))
4243 lpi_ctl0
= mvreg_read(pp
, MVNETA_LPI_CTRL_0
);
4244 lpi_ctl0
&= ~(0xff << 8);
4245 lpi_ctl0
|= eee
->tx_lpi_timer
<< 8;
4246 mvreg_write(pp
, MVNETA_LPI_CTRL_0
, lpi_ctl0
);
4248 pp
->eee_enabled
= eee
->eee_enabled
;
4249 pp
->tx_lpi_enabled
= eee
->tx_lpi_enabled
;
4251 mvneta_set_eee(pp
, eee
->tx_lpi_enabled
&& eee
->eee_enabled
);
4253 return phylink_ethtool_set_eee(pp
->phylink
, eee
);
4256 static const struct net_device_ops mvneta_netdev_ops
= {
4257 .ndo_open
= mvneta_open
,
4258 .ndo_stop
= mvneta_stop
,
4259 .ndo_start_xmit
= mvneta_tx
,
4260 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
4261 .ndo_set_mac_address
= mvneta_set_mac_addr
,
4262 .ndo_change_mtu
= mvneta_change_mtu
,
4263 .ndo_fix_features
= mvneta_fix_features
,
4264 .ndo_get_stats64
= mvneta_get_stats64
,
4265 .ndo_do_ioctl
= mvneta_ioctl
,
4268 static const struct ethtool_ops mvneta_eth_tool_ops
= {
4269 .nway_reset
= mvneta_ethtool_nway_reset
,
4270 .get_link
= ethtool_op_get_link
,
4271 .set_coalesce
= mvneta_ethtool_set_coalesce
,
4272 .get_coalesce
= mvneta_ethtool_get_coalesce
,
4273 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
4274 .get_ringparam
= mvneta_ethtool_get_ringparam
,
4275 .set_ringparam
= mvneta_ethtool_set_ringparam
,
4276 .get_pauseparam
= mvneta_ethtool_get_pauseparam
,
4277 .set_pauseparam
= mvneta_ethtool_set_pauseparam
,
4278 .get_strings
= mvneta_ethtool_get_strings
,
4279 .get_ethtool_stats
= mvneta_ethtool_get_stats
,
4280 .get_sset_count
= mvneta_ethtool_get_sset_count
,
4281 .get_rxfh_indir_size
= mvneta_ethtool_get_rxfh_indir_size
,
4282 .get_rxnfc
= mvneta_ethtool_get_rxnfc
,
4283 .get_rxfh
= mvneta_ethtool_get_rxfh
,
4284 .set_rxfh
= mvneta_ethtool_set_rxfh
,
4285 .get_link_ksettings
= mvneta_ethtool_get_link_ksettings
,
4286 .set_link_ksettings
= mvneta_ethtool_set_link_ksettings
,
4287 .get_wol
= mvneta_ethtool_get_wol
,
4288 .set_wol
= mvneta_ethtool_set_wol
,
4289 .get_eee
= mvneta_ethtool_get_eee
,
4290 .set_eee
= mvneta_ethtool_set_eee
,
4294 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
4299 mvneta_port_disable(pp
);
4301 /* Set port default values */
4302 mvneta_defaults_set(pp
);
4304 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*pp
->txqs
), GFP_KERNEL
);
4308 /* Initialize TX descriptor rings */
4309 for (queue
= 0; queue
< txq_number
; queue
++) {
4310 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
4312 txq
->size
= pp
->tx_ring_size
;
4313 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
4316 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*pp
->rxqs
), GFP_KERNEL
);
4320 /* Create Rx descriptor rings */
4321 for (queue
= 0; queue
< rxq_number
; queue
++) {
4322 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
4324 rxq
->size
= pp
->rx_ring_size
;
4325 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
4326 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
4328 = devm_kmalloc_array(pp
->dev
->dev
.parent
,
4330 sizeof(*rxq
->buf_virt_addr
),
4332 if (!rxq
->buf_virt_addr
)
4339 /* platform glue : initialize decoding windows */
4340 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
4341 const struct mbus_dram_target_info
*dram
)
4347 for (i
= 0; i
< 6; i
++) {
4348 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
4349 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
4352 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
4359 for (i
= 0; i
< dram
->num_cs
; i
++) {
4360 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
4362 mvreg_write(pp
, MVNETA_WIN_BASE(i
),
4363 (cs
->base
& 0xffff0000) |
4364 (cs
->mbus_attr
<< 8) |
4365 dram
->mbus_dram_target_id
);
4367 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
4368 (cs
->size
- 1) & 0xffff0000);
4370 win_enable
&= ~(1 << i
);
4371 win_protect
|= 3 << (2 * i
);
4374 /* For Armada3700 open default 4GB Mbus window, leaving
4375 * arbitration of target/attribute to a different layer
4378 mvreg_write(pp
, MVNETA_WIN_SIZE(0), 0xffff0000);
4379 win_enable
&= ~BIT(0);
4383 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
4384 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
4387 /* Power up the port */
4388 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
4390 /* MAC Cause register should be cleared */
4391 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
4393 if (phy_mode
== PHY_INTERFACE_MODE_QSGMII
)
4394 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_QSGMII_SERDES_PROTO
);
4395 else if (phy_mode
== PHY_INTERFACE_MODE_SGMII
||
4396 phy_mode
== PHY_INTERFACE_MODE_1000BASEX
)
4397 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_SGMII_SERDES_PROTO
);
4398 else if (!phy_interface_mode_is_rgmii(phy_mode
))
4404 /* Device initialization routine */
4405 static int mvneta_probe(struct platform_device
*pdev
)
4407 struct resource
*res
;
4408 struct device_node
*dn
= pdev
->dev
.of_node
;
4409 struct device_node
*bm_node
;
4410 struct mvneta_port
*pp
;
4411 struct net_device
*dev
;
4412 struct phylink
*phylink
;
4413 const char *dt_mac_addr
;
4414 char hw_mac_addr
[ETH_ALEN
];
4415 const char *mac_from
;
4421 dev
= alloc_etherdev_mqs(sizeof(struct mvneta_port
), txq_number
, rxq_number
);
4425 dev
->irq
= irq_of_parse_and_map(dn
, 0);
4426 if (dev
->irq
== 0) {
4428 goto err_free_netdev
;
4431 phy_mode
= of_get_phy_mode(dn
);
4433 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
4438 phylink
= phylink_create(dev
, pdev
->dev
.fwnode
, phy_mode
,
4439 &mvneta_phylink_ops
);
4440 if (IS_ERR(phylink
)) {
4441 err
= PTR_ERR(phylink
);
4445 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
4446 dev
->watchdog_timeo
= 5 * HZ
;
4447 dev
->netdev_ops
= &mvneta_netdev_ops
;
4449 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
4451 pp
= netdev_priv(dev
);
4452 spin_lock_init(&pp
->lock
);
4453 pp
->phylink
= phylink
;
4454 pp
->phy_interface
= phy_mode
;
4457 pp
->rxq_def
= rxq_def
;
4458 pp
->indir
[0] = rxq_def
;
4460 /* Get special SoC configurations */
4461 if (of_device_is_compatible(dn
, "marvell,armada-3700-neta"))
4462 pp
->neta_armada3700
= true;
4464 pp
->clk
= devm_clk_get(&pdev
->dev
, "core");
4465 if (IS_ERR(pp
->clk
))
4466 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
4467 if (IS_ERR(pp
->clk
)) {
4468 err
= PTR_ERR(pp
->clk
);
4469 goto err_free_phylink
;
4472 clk_prepare_enable(pp
->clk
);
4474 pp
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
4475 if (!IS_ERR(pp
->clk_bus
))
4476 clk_prepare_enable(pp
->clk_bus
);
4478 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
4479 pp
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
4480 if (IS_ERR(pp
->base
)) {
4481 err
= PTR_ERR(pp
->base
);
4485 /* Alloc per-cpu port structure */
4486 pp
->ports
= alloc_percpu(struct mvneta_pcpu_port
);
4492 /* Alloc per-cpu stats */
4493 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
4496 goto err_free_ports
;
4499 dt_mac_addr
= of_get_mac_address(dn
);
4501 mac_from
= "device tree";
4502 memcpy(dev
->dev_addr
, dt_mac_addr
, ETH_ALEN
);
4504 mvneta_get_mac_addr(pp
, hw_mac_addr
);
4505 if (is_valid_ether_addr(hw_mac_addr
)) {
4506 mac_from
= "hardware";
4507 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
4509 mac_from
= "random";
4510 eth_hw_addr_random(dev
);
4514 if (!of_property_read_u32(dn
, "tx-csum-limit", &tx_csum_limit
)) {
4515 if (tx_csum_limit
< 0 ||
4516 tx_csum_limit
> MVNETA_TX_CSUM_MAX_SIZE
) {
4517 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
4518 dev_info(&pdev
->dev
,
4519 "Wrong TX csum limit in DT, set to %dB\n",
4520 MVNETA_TX_CSUM_DEF_SIZE
);
4522 } else if (of_device_is_compatible(dn
, "marvell,armada-370-neta")) {
4523 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
4525 tx_csum_limit
= MVNETA_TX_CSUM_MAX_SIZE
;
4528 pp
->tx_csum_limit
= tx_csum_limit
;
4530 pp
->dram_target_info
= mv_mbus_dram_info();
4531 /* Armada3700 requires setting default configuration of Mbus
4532 * windows, however without using filled mbus_dram_target_info
4535 if (pp
->dram_target_info
|| pp
->neta_armada3700
)
4536 mvneta_conf_mbus_windows(pp
, pp
->dram_target_info
);
4538 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
4539 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
4542 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4544 pp
->id
= global_port_id
++;
4545 pp
->rx_offset_correction
= 0; /* not relevant for SW BM */
4547 /* Obtain access to BM resources if enabled and already initialized */
4548 bm_node
= of_parse_phandle(dn
, "buffer-manager", 0);
4550 pp
->bm_priv
= mvneta_bm_get(bm_node
);
4552 err
= mvneta_bm_port_init(pdev
, pp
);
4554 dev_info(&pdev
->dev
,
4555 "use SW buffer management\n");
4556 mvneta_bm_put(pp
->bm_priv
);
4560 /* Set RX packet offset correction for platforms, whose
4561 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4562 * platforms and 0B for 32-bit ones.
4564 pp
->rx_offset_correction
= max(0,
4566 MVNETA_RX_PKT_OFFSET_CORRECTION
);
4568 of_node_put(bm_node
);
4570 err
= mvneta_init(&pdev
->dev
, pp
);
4574 err
= mvneta_port_power_up(pp
, phy_mode
);
4576 dev_err(&pdev
->dev
, "can't power up port\n");
4580 /* Armada3700 network controller does not support per-cpu
4581 * operation, so only single NAPI should be initialized.
4583 if (pp
->neta_armada3700
) {
4584 netif_napi_add(dev
, &pp
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
4586 for_each_present_cpu(cpu
) {
4587 struct mvneta_pcpu_port
*port
=
4588 per_cpu_ptr(pp
->ports
, cpu
);
4590 netif_napi_add(dev
, &port
->napi
, mvneta_poll
,
4596 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
4597 NETIF_F_TSO
| NETIF_F_RXCSUM
;
4598 dev
->hw_features
|= dev
->features
;
4599 dev
->vlan_features
|= dev
->features
;
4600 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
4601 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
4603 /* MTU range: 68 - 9676 */
4604 dev
->min_mtu
= ETH_MIN_MTU
;
4605 /* 9676 == 9700 - 20 and rounding to 8 */
4606 dev
->max_mtu
= 9676;
4608 err
= register_netdev(dev
);
4610 dev_err(&pdev
->dev
, "failed to register\n");
4611 goto err_free_stats
;
4614 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
4617 platform_set_drvdata(pdev
, pp
->dev
);
4622 unregister_netdev(dev
);
4624 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
4625 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
4627 mvneta_bm_put(pp
->bm_priv
);
4630 free_percpu(pp
->stats
);
4632 free_percpu(pp
->ports
);
4634 clk_disable_unprepare(pp
->clk_bus
);
4635 clk_disable_unprepare(pp
->clk
);
4638 phylink_destroy(pp
->phylink
);
4640 irq_dispose_mapping(dev
->irq
);
4646 /* Device removal routine */
4647 static int mvneta_remove(struct platform_device
*pdev
)
4649 struct net_device
*dev
= platform_get_drvdata(pdev
);
4650 struct mvneta_port
*pp
= netdev_priv(dev
);
4652 unregister_netdev(dev
);
4653 clk_disable_unprepare(pp
->clk_bus
);
4654 clk_disable_unprepare(pp
->clk
);
4655 free_percpu(pp
->ports
);
4656 free_percpu(pp
->stats
);
4657 irq_dispose_mapping(dev
->irq
);
4658 phylink_destroy(pp
->phylink
);
4662 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
4663 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
4665 mvneta_bm_put(pp
->bm_priv
);
4671 #ifdef CONFIG_PM_SLEEP
4672 static int mvneta_suspend(struct device
*device
)
4675 struct net_device
*dev
= dev_get_drvdata(device
);
4676 struct mvneta_port
*pp
= netdev_priv(dev
);
4678 if (!netif_running(dev
))
4681 if (!pp
->neta_armada3700
) {
4682 spin_lock(&pp
->lock
);
4683 pp
->is_stopped
= true;
4684 spin_unlock(&pp
->lock
);
4686 cpuhp_state_remove_instance_nocalls(online_hpstate
,
4688 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
4693 mvneta_stop_dev(pp
);
4696 for (queue
= 0; queue
< rxq_number
; queue
++) {
4697 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
4699 mvneta_rxq_drop_pkts(pp
, rxq
);
4702 for (queue
= 0; queue
< txq_number
; queue
++) {
4703 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
4705 mvneta_txq_hw_deinit(pp
, txq
);
4709 netif_device_detach(dev
);
4710 clk_disable_unprepare(pp
->clk_bus
);
4711 clk_disable_unprepare(pp
->clk
);
4716 static int mvneta_resume(struct device
*device
)
4718 struct platform_device
*pdev
= to_platform_device(device
);
4719 struct net_device
*dev
= dev_get_drvdata(device
);
4720 struct mvneta_port
*pp
= netdev_priv(dev
);
4723 clk_prepare_enable(pp
->clk
);
4724 if (!IS_ERR(pp
->clk_bus
))
4725 clk_prepare_enable(pp
->clk_bus
);
4726 if (pp
->dram_target_info
|| pp
->neta_armada3700
)
4727 mvneta_conf_mbus_windows(pp
, pp
->dram_target_info
);
4729 err
= mvneta_bm_port_init(pdev
, pp
);
4731 dev_info(&pdev
->dev
, "use SW buffer management\n");
4735 mvneta_defaults_set(pp
);
4736 err
= mvneta_port_power_up(pp
, pp
->phy_interface
);
4738 dev_err(device
, "can't power up port\n");
4742 netif_device_attach(dev
);
4744 if (!netif_running(dev
))
4747 for (queue
= 0; queue
< rxq_number
; queue
++) {
4748 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
4750 rxq
->next_desc_to_proc
= 0;
4751 mvneta_rxq_hw_init(pp
, rxq
);
4754 for (queue
= 0; queue
< txq_number
; queue
++) {
4755 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
4757 txq
->next_desc_to_proc
= 0;
4758 mvneta_txq_hw_init(pp
, txq
);
4761 if (!pp
->neta_armada3700
) {
4762 spin_lock(&pp
->lock
);
4763 pp
->is_stopped
= false;
4764 spin_unlock(&pp
->lock
);
4765 cpuhp_state_add_instance_nocalls(online_hpstate
,
4767 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
4772 mvneta_start_dev(pp
);
4774 mvneta_set_rx_mode(dev
);
4780 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops
, mvneta_suspend
, mvneta_resume
);
4782 static const struct of_device_id mvneta_match
[] = {
4783 { .compatible
= "marvell,armada-370-neta" },
4784 { .compatible
= "marvell,armada-xp-neta" },
4785 { .compatible
= "marvell,armada-3700-neta" },
4788 MODULE_DEVICE_TABLE(of
, mvneta_match
);
4790 static struct platform_driver mvneta_driver
= {
4791 .probe
= mvneta_probe
,
4792 .remove
= mvneta_remove
,
4794 .name
= MVNETA_DRIVER_NAME
,
4795 .of_match_table
= mvneta_match
,
4796 .pm
= &mvneta_pm_ops
,
4800 static int __init
mvneta_driver_init(void)
4804 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "net/mvmeta:online",
4806 mvneta_cpu_down_prepare
);
4809 online_hpstate
= ret
;
4810 ret
= cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD
, "net/mvneta:dead",
4811 NULL
, mvneta_cpu_dead
);
4815 ret
= platform_driver_register(&mvneta_driver
);
4821 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD
);
4823 cpuhp_remove_multi_state(online_hpstate
);
4827 module_init(mvneta_driver_init
);
4829 static void __exit
mvneta_driver_exit(void)
4831 platform_driver_unregister(&mvneta_driver
);
4832 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD
);
4833 cpuhp_remove_multi_state(online_hpstate
);
4835 module_exit(mvneta_driver_exit
);
4837 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4838 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4839 MODULE_LICENSE("GPL");
4841 module_param(rxq_number
, int, 0444);
4842 module_param(txq_number
, int, 0444);
4844 module_param(rxq_def
, int, 0444);
4845 module_param(rx_copybreak
, int, 0644);