2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy/phy.h>
31 #include <linux/phy.h>
32 #include <linux/phylink.h>
33 #include <linux/platform_device.h>
34 #include <linux/skbuff.h>
36 #include "mvneta_bm.h"
40 #include <net/page_pool.h>
41 #include <linux/bpf_trace.h>
44 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
45 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
46 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
47 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
48 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
49 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
50 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
51 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
52 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
53 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
54 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
55 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
56 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
57 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
58 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
59 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
60 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
61 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
63 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
64 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
65 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
66 #define MVNETA_PORT_RX_RESET 0x1cc0
67 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
68 #define MVNETA_PHY_ADDR 0x2000
69 #define MVNETA_PHY_ADDR_MASK 0x1f
70 #define MVNETA_MBUS_RETRY 0x2010
71 #define MVNETA_UNIT_INTR_CAUSE 0x2080
72 #define MVNETA_UNIT_CONTROL 0x20B0
73 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
74 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
75 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
76 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
77 #define MVNETA_BASE_ADDR_ENABLE 0x2290
78 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
79 #define MVNETA_PORT_CONFIG 0x2400
80 #define MVNETA_UNI_PROMISC_MODE BIT(0)
81 #define MVNETA_DEF_RXQ(q) ((q) << 1)
82 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
83 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
84 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
85 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
86 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
87 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
88 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
89 MVNETA_DEF_RXQ_ARP(q) | \
90 MVNETA_DEF_RXQ_TCP(q) | \
91 MVNETA_DEF_RXQ_UDP(q) | \
92 MVNETA_DEF_RXQ_BPDU(q) | \
93 MVNETA_TX_UNSET_ERR_SUM | \
94 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
95 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
96 #define MVNETA_MAC_ADDR_LOW 0x2414
97 #define MVNETA_MAC_ADDR_HIGH 0x2418
98 #define MVNETA_SDMA_CONFIG 0x241c
99 #define MVNETA_SDMA_BRST_SIZE_16 4
100 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
101 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
102 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
103 #define MVNETA_DESC_SWAP BIT(6)
104 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
105 #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440
106 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3))
107 #define MVNETA_PORT_STATUS 0x2444
108 #define MVNETA_TX_IN_PRGRS BIT(0)
109 #define MVNETA_TX_FIFO_EMPTY BIT(8)
110 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
111 /* Only exists on Armada XP and Armada 370 */
112 #define MVNETA_SERDES_CFG 0x24A0
113 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
114 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
115 #define MVNETA_HSGMII_SERDES_PROTO 0x1107
116 #define MVNETA_TYPE_PRIO 0x24bc
117 #define MVNETA_FORCE_UNI BIT(21)
118 #define MVNETA_TXQ_CMD_1 0x24e4
119 #define MVNETA_TXQ_CMD 0x2448
120 #define MVNETA_TXQ_DISABLE_SHIFT 8
121 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
122 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
123 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
124 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
125 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
126 #define MVNETA_ACC_MODE 0x2500
127 #define MVNETA_BM_ADDRESS 0x2504
128 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
129 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
130 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
131 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
132 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
133 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
135 /* Exception Interrupt Port/Queue Cause register
137 * Their behavior depend of the mapping done using the PCPX2Q
138 * registers. For a given CPU if the bit associated to a queue is not
139 * set, then for the register a read from this CPU will always return
140 * 0 and a write won't do anything
143 #define MVNETA_INTR_NEW_CAUSE 0x25a0
144 #define MVNETA_INTR_NEW_MASK 0x25a4
146 /* bits 0..7 = TXQ SENT, one bit per queue.
147 * bits 8..15 = RXQ OCCUP, one bit per queue.
148 * bits 16..23 = RXQ FREE, one bit per queue.
149 * bit 29 = OLD_REG_SUM, see old reg ?
150 * bit 30 = TX_ERR_SUM, one bit for 4 ports
151 * bit 31 = MISC_SUM, one bit for 4 ports
153 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
154 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
155 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
156 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
157 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
159 #define MVNETA_INTR_OLD_CAUSE 0x25a8
160 #define MVNETA_INTR_OLD_MASK 0x25ac
162 /* Data Path Port/Queue Cause Register */
163 #define MVNETA_INTR_MISC_CAUSE 0x25b0
164 #define MVNETA_INTR_MISC_MASK 0x25b4
166 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
167 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
168 #define MVNETA_CAUSE_PTP BIT(4)
170 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
171 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
172 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
173 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
174 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
175 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
176 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
177 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
179 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
180 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
181 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
183 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
184 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
185 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
187 #define MVNETA_INTR_ENABLE 0x25b8
188 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
189 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
191 #define MVNETA_RXQ_CMD 0x2680
192 #define MVNETA_RXQ_DISABLE_SHIFT 8
193 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
194 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
195 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
196 #define MVNETA_GMAC_CTRL_0 0x2c00
197 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
198 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
199 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
200 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
201 #define MVNETA_GMAC_CTRL_2 0x2c08
202 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
203 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
204 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
205 #define MVNETA_GMAC2_PORT_RESET BIT(6)
206 #define MVNETA_GMAC_STATUS 0x2c10
207 #define MVNETA_GMAC_LINK_UP BIT(0)
208 #define MVNETA_GMAC_SPEED_1000 BIT(1)
209 #define MVNETA_GMAC_SPEED_100 BIT(2)
210 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
211 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
212 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
213 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
214 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
215 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
216 #define MVNETA_GMAC_SYNC_OK BIT(14)
217 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
218 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
219 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
220 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
221 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
222 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
223 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
224 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
225 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
226 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
227 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
228 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
229 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
230 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
231 #define MVNETA_GMAC_CTRL_4 0x2c90
232 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
233 #define MVNETA_MIB_COUNTERS_BASE 0x3000
234 #define MVNETA_MIB_LATE_COLLISION 0x7c
235 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
236 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
237 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
238 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
239 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
240 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
241 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
242 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
243 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
244 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
245 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
246 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
247 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
248 #define MVNETA_PORT_TX_RESET 0x3cf0
249 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
250 #define MVNETA_TX_MTU 0x3e0c
251 #define MVNETA_TX_TOKEN_SIZE 0x3e14
252 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
253 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
254 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
256 #define MVNETA_LPI_CTRL_0 0x2cc0
257 #define MVNETA_LPI_CTRL_1 0x2cc4
258 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
259 #define MVNETA_LPI_CTRL_2 0x2cc8
260 #define MVNETA_LPI_STATUS 0x2ccc
262 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
264 /* Descriptor ring Macros */
265 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
266 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
268 /* Various constants */
271 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
272 #define MVNETA_RX_COAL_PKTS 32
273 #define MVNETA_RX_COAL_USEC 100
275 /* The two bytes Marvell header. Either contains a special value used
276 * by Marvell switches when a specific hardware mode is enabled (not
277 * supported by this driver) or is filled automatically by zeroes on
278 * the RX side. Those two bytes being at the front of the Ethernet
279 * header, they allow to have the IP header aligned on a 4 bytes
280 * boundary automatically: the hardware skips those two bytes on its
283 #define MVNETA_MH_SIZE 2
285 #define MVNETA_VLAN_TAG_LEN 4
287 #define MVNETA_TX_CSUM_DEF_SIZE 1600
288 #define MVNETA_TX_CSUM_MAX_SIZE 9800
289 #define MVNETA_ACC_MODE_EXT1 1
290 #define MVNETA_ACC_MODE_EXT2 2
292 #define MVNETA_MAX_DECODE_WIN 6
294 /* Timeout constants */
295 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
296 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
297 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
299 #define MVNETA_TX_MTU_MAX 0x3ffff
301 /* The RSS lookup table actually has 256 entries but we do not use
304 #define MVNETA_RSS_LU_TABLE_SIZE 1
306 /* Max number of Rx descriptors */
307 #define MVNETA_MAX_RXD 512
309 /* Max number of Tx descriptors */
310 #define MVNETA_MAX_TXD 1024
312 /* Max number of allowed TCP segments for software TSO */
313 #define MVNETA_MAX_TSO_SEGS 100
315 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
317 /* descriptor aligned size */
318 #define MVNETA_DESC_ALIGNED_SIZE 32
320 /* Number of bytes to be taken into account by HW when putting incoming data
321 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
322 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
324 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
326 #define MVNETA_RX_PKT_SIZE(mtu) \
327 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
328 ETH_HLEN + ETH_FCS_LEN, \
331 /* Driver assumes that the last 3 bits are 0 */
332 #define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
333 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
334 MVNETA_SKB_HEADROOM))
335 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
337 #define IS_TSO_HEADER(txq, addr) \
338 ((addr >= txq->tso_hdrs_phys) && \
339 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
341 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
342 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
345 ETHTOOL_STAT_EEE_WAKEUP
,
346 ETHTOOL_STAT_SKB_ALLOC_ERR
,
347 ETHTOOL_STAT_REFILL_ERR
,
348 ETHTOOL_XDP_REDIRECT
,
354 ETHTOOL_XDP_XMIT_ERR
,
358 struct mvneta_statistic
{
359 unsigned short offset
;
361 const char name
[ETH_GSTRING_LEN
];
368 #define MVNETA_XDP_PASS 0
369 #define MVNETA_XDP_DROPPED BIT(0)
370 #define MVNETA_XDP_TX BIT(1)
371 #define MVNETA_XDP_REDIR BIT(2)
373 static const struct mvneta_statistic mvneta_statistics
[] = {
374 { 0x3000, T_REG_64
, "good_octets_received", },
375 { 0x3010, T_REG_32
, "good_frames_received", },
376 { 0x3008, T_REG_32
, "bad_octets_received", },
377 { 0x3014, T_REG_32
, "bad_frames_received", },
378 { 0x3018, T_REG_32
, "broadcast_frames_received", },
379 { 0x301c, T_REG_32
, "multicast_frames_received", },
380 { 0x3050, T_REG_32
, "unrec_mac_control_received", },
381 { 0x3058, T_REG_32
, "good_fc_received", },
382 { 0x305c, T_REG_32
, "bad_fc_received", },
383 { 0x3060, T_REG_32
, "undersize_received", },
384 { 0x3064, T_REG_32
, "fragments_received", },
385 { 0x3068, T_REG_32
, "oversize_received", },
386 { 0x306c, T_REG_32
, "jabber_received", },
387 { 0x3070, T_REG_32
, "mac_receive_error", },
388 { 0x3074, T_REG_32
, "bad_crc_event", },
389 { 0x3078, T_REG_32
, "collision", },
390 { 0x307c, T_REG_32
, "late_collision", },
391 { 0x2484, T_REG_32
, "rx_discard", },
392 { 0x2488, T_REG_32
, "rx_overrun", },
393 { 0x3020, T_REG_32
, "frames_64_octets", },
394 { 0x3024, T_REG_32
, "frames_65_to_127_octets", },
395 { 0x3028, T_REG_32
, "frames_128_to_255_octets", },
396 { 0x302c, T_REG_32
, "frames_256_to_511_octets", },
397 { 0x3030, T_REG_32
, "frames_512_to_1023_octets", },
398 { 0x3034, T_REG_32
, "frames_1024_to_max_octets", },
399 { 0x3038, T_REG_64
, "good_octets_sent", },
400 { 0x3040, T_REG_32
, "good_frames_sent", },
401 { 0x3044, T_REG_32
, "excessive_collision", },
402 { 0x3048, T_REG_32
, "multicast_frames_sent", },
403 { 0x304c, T_REG_32
, "broadcast_frames_sent", },
404 { 0x3054, T_REG_32
, "fc_sent", },
405 { 0x300c, T_REG_32
, "internal_mac_transmit_err", },
406 { ETHTOOL_STAT_EEE_WAKEUP
, T_SW
, "eee_wakeup_errors", },
407 { ETHTOOL_STAT_SKB_ALLOC_ERR
, T_SW
, "skb_alloc_errors", },
408 { ETHTOOL_STAT_REFILL_ERR
, T_SW
, "refill_errors", },
409 { ETHTOOL_XDP_REDIRECT
, T_SW
, "rx_xdp_redirect", },
410 { ETHTOOL_XDP_PASS
, T_SW
, "rx_xdp_pass", },
411 { ETHTOOL_XDP_DROP
, T_SW
, "rx_xdp_drop", },
412 { ETHTOOL_XDP_TX
, T_SW
, "rx_xdp_tx", },
413 { ETHTOOL_XDP_TX_ERR
, T_SW
, "rx_xdp_tx_errors", },
414 { ETHTOOL_XDP_XMIT
, T_SW
, "tx_xdp_xmit", },
415 { ETHTOOL_XDP_XMIT_ERR
, T_SW
, "tx_xdp_xmit_errors", },
418 struct mvneta_stats
{
433 struct mvneta_ethtool_stats
{
434 struct mvneta_stats ps
;
439 struct mvneta_pcpu_stats
{
440 struct u64_stats_sync syncp
;
442 struct mvneta_ethtool_stats es
;
447 struct mvneta_pcpu_port
{
448 /* Pointer to the shared port */
449 struct mvneta_port
*pp
;
451 /* Pointer to the CPU-local NAPI struct */
452 struct napi_struct napi
;
454 /* Cause of the previous interrupt */
464 struct mvneta_pcpu_port __percpu
*ports
;
465 struct mvneta_pcpu_stats __percpu
*stats
;
471 struct mvneta_rx_queue
*rxqs
;
472 struct mvneta_tx_queue
*txqs
;
473 struct net_device
*dev
;
474 struct hlist_node node_online
;
475 struct hlist_node node_dead
;
477 /* Protect the access to the percpu interrupt registers,
478 * ensuring that the configuration remains coherent.
484 struct napi_struct napi
;
486 struct bpf_prog
*xdp_prog
;
497 phy_interface_t phy_interface
;
498 struct device_node
*dn
;
499 unsigned int tx_csum_limit
;
500 struct phylink
*phylink
;
501 struct phylink_config phylink_config
;
504 struct mvneta_bm
*bm_priv
;
505 struct mvneta_bm_pool
*pool_long
;
506 struct mvneta_bm_pool
*pool_short
;
513 u64 ethtool_stats
[ARRAY_SIZE(mvneta_statistics
)];
515 u32 indir
[MVNETA_RSS_LU_TABLE_SIZE
];
517 /* Flags for special SoC configurations */
518 bool neta_armada3700
;
519 u16 rx_offset_correction
;
520 const struct mbus_dram_target_info
*dram_target_info
;
523 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
524 * layout of the transmit and reception DMA descriptors, and their
525 * layout is therefore defined by the hardware design
528 #define MVNETA_TX_L3_OFF_SHIFT 0
529 #define MVNETA_TX_IP_HLEN_SHIFT 8
530 #define MVNETA_TX_L4_UDP BIT(16)
531 #define MVNETA_TX_L3_IP6 BIT(17)
532 #define MVNETA_TXD_IP_CSUM BIT(18)
533 #define MVNETA_TXD_Z_PAD BIT(19)
534 #define MVNETA_TXD_L_DESC BIT(20)
535 #define MVNETA_TXD_F_DESC BIT(21)
536 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
537 MVNETA_TXD_L_DESC | \
539 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
540 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
542 #define MVNETA_RXD_ERR_CRC 0x0
543 #define MVNETA_RXD_BM_POOL_SHIFT 13
544 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
545 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
546 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
547 #define MVNETA_RXD_ERR_LEN BIT(18)
548 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
549 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
550 #define MVNETA_RXD_L3_IP4 BIT(25)
551 #define MVNETA_RXD_LAST_DESC BIT(26)
552 #define MVNETA_RXD_FIRST_DESC BIT(27)
553 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
554 MVNETA_RXD_LAST_DESC)
555 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
557 #if defined(__LITTLE_ENDIAN)
558 struct mvneta_tx_desc
{
559 u32 command
; /* Options used by HW for packet transmitting.*/
560 u16 reserved1
; /* csum_l4 (for future use) */
561 u16 data_size
; /* Data size of transmitted packet in bytes */
562 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
563 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
564 u32 reserved3
[4]; /* Reserved - (for future use) */
567 struct mvneta_rx_desc
{
568 u32 status
; /* Info about received packet */
569 u16 reserved1
; /* pnc_info - (for future use, PnC) */
570 u16 data_size
; /* Size of received packet in bytes */
572 u32 buf_phys_addr
; /* Physical address of the buffer */
573 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
575 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
576 u16 reserved3
; /* prefetch_cmd, for future use */
577 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
579 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
580 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
583 struct mvneta_tx_desc
{
584 u16 data_size
; /* Data size of transmitted packet in bytes */
585 u16 reserved1
; /* csum_l4 (for future use) */
586 u32 command
; /* Options used by HW for packet transmitting.*/
587 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
588 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
589 u32 reserved3
[4]; /* Reserved - (for future use) */
592 struct mvneta_rx_desc
{
593 u16 data_size
; /* Size of received packet in bytes */
594 u16 reserved1
; /* pnc_info - (for future use, PnC) */
595 u32 status
; /* Info about received packet */
597 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
598 u32 buf_phys_addr
; /* Physical address of the buffer */
600 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
601 u16 reserved3
; /* prefetch_cmd, for future use */
602 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
604 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
605 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
609 enum mvneta_tx_buf_type
{
615 struct mvneta_tx_buf
{
616 enum mvneta_tx_buf_type type
;
618 struct xdp_frame
*xdpf
;
623 struct mvneta_tx_queue
{
624 /* Number of this TX queue, in the range 0-7 */
627 /* Number of TX DMA descriptors in the descriptor ring */
630 /* Number of currently used TX DMA descriptor in the
635 int tx_stop_threshold
;
636 int tx_wake_threshold
;
638 /* Array of transmitted buffers */
639 struct mvneta_tx_buf
*buf
;
641 /* Index of last TX DMA descriptor that was inserted */
644 /* Index of the TX DMA descriptor to be cleaned up */
649 /* Virtual address of the TX DMA descriptors array */
650 struct mvneta_tx_desc
*descs
;
652 /* DMA address of the TX DMA descriptors array */
653 dma_addr_t descs_phys
;
655 /* Index of the last TX DMA descriptor */
658 /* Index of the next TX DMA descriptor to process */
659 int next_desc_to_proc
;
661 /* DMA buffers for TSO headers */
664 /* DMA address of TSO headers */
665 dma_addr_t tso_hdrs_phys
;
667 /* Affinity mask for CPUs*/
668 cpumask_t affinity_mask
;
671 struct mvneta_rx_queue
{
672 /* rx queue number, in the range 0-7 */
675 /* num of rx descriptors in the rx descriptor ring */
682 struct page_pool
*page_pool
;
683 struct xdp_rxq_info xdp_rxq
;
685 /* Virtual address of the RX buffer */
686 void **buf_virt_addr
;
688 /* Virtual address of the RX DMA descriptors array */
689 struct mvneta_rx_desc
*descs
;
691 /* DMA address of the RX DMA descriptors array */
692 dma_addr_t descs_phys
;
694 /* Index of the last RX DMA descriptor */
697 /* Index of the next RX DMA descriptor to process */
698 int next_desc_to_proc
;
700 /* Index of first RX DMA descriptor to refill */
705 static enum cpuhp_state online_hpstate
;
706 /* The hardware supports eight (8) rx queues, but we are only allowing
707 * the first one to be used. Therefore, let's just allocate one queue.
709 static int rxq_number
= 8;
710 static int txq_number
= 8;
714 static int rx_copybreak __read_mostly
= 256;
716 /* HW BM need that each port be identify by a unique ID */
717 static int global_port_id
;
719 #define MVNETA_DRIVER_NAME "mvneta"
720 #define MVNETA_DRIVER_VERSION "1.0"
722 /* Utility/helper methods */
724 /* Write helper method */
725 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
727 writel(data
, pp
->base
+ offset
);
730 /* Read helper method */
731 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
733 return readl(pp
->base
+ offset
);
736 /* Increment txq get counter */
737 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
739 txq
->txq_get_index
++;
740 if (txq
->txq_get_index
== txq
->size
)
741 txq
->txq_get_index
= 0;
744 /* Increment txq put counter */
745 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
747 txq
->txq_put_index
++;
748 if (txq
->txq_put_index
== txq
->size
)
749 txq
->txq_put_index
= 0;
753 /* Clear all MIB counters */
754 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
758 /* Perform dummy reads from MIB counters */
759 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
760 mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
761 mvreg_read(pp
, MVNETA_RX_DISCARD_FRAME_COUNT
);
762 mvreg_read(pp
, MVNETA_OVERRUN_FRAME_COUNT
);
765 /* Get System Network Statistics */
767 mvneta_get_stats64(struct net_device
*dev
,
768 struct rtnl_link_stats64
*stats
)
770 struct mvneta_port
*pp
= netdev_priv(dev
);
774 for_each_possible_cpu(cpu
) {
775 struct mvneta_pcpu_stats
*cpu_stats
;
783 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
785 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
786 rx_packets
= cpu_stats
->es
.ps
.rx_packets
;
787 rx_bytes
= cpu_stats
->es
.ps
.rx_bytes
;
788 rx_dropped
= cpu_stats
->rx_dropped
;
789 rx_errors
= cpu_stats
->rx_errors
;
790 tx_packets
= cpu_stats
->es
.ps
.tx_packets
;
791 tx_bytes
= cpu_stats
->es
.ps
.tx_bytes
;
792 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
794 stats
->rx_packets
+= rx_packets
;
795 stats
->rx_bytes
+= rx_bytes
;
796 stats
->rx_dropped
+= rx_dropped
;
797 stats
->rx_errors
+= rx_errors
;
798 stats
->tx_packets
+= tx_packets
;
799 stats
->tx_bytes
+= tx_bytes
;
802 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
805 /* Rx descriptors helper methods */
807 /* Checks whether the RX descriptor having this status is both the first
808 * and the last descriptor for the RX packet. Each RX packet is currently
809 * received through a single RX descriptor, so not having each RX
810 * descriptor with its first and last bits set is an error
812 static int mvneta_rxq_desc_is_first_last(u32 status
)
814 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
815 MVNETA_RXD_FIRST_LAST_DESC
;
818 /* Add number of descriptors ready to receive new packets */
819 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
820 struct mvneta_rx_queue
*rxq
,
823 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
826 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
827 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
828 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
829 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
830 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
833 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
834 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
837 /* Get number of RX descriptors occupied by received packets */
838 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
839 struct mvneta_rx_queue
*rxq
)
843 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
844 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
847 /* Update num of rx desc called upon return from rx path or
848 * from mvneta_rxq_drop_pkts().
850 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
851 struct mvneta_rx_queue
*rxq
,
852 int rx_done
, int rx_filled
)
856 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
858 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
859 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
863 /* Only 255 descriptors can be added at once */
864 while ((rx_done
> 0) || (rx_filled
> 0)) {
865 if (rx_done
<= 0xff) {
872 if (rx_filled
<= 0xff) {
873 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
876 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
879 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
883 /* Get pointer to next RX descriptor to be processed by SW */
884 static struct mvneta_rx_desc
*
885 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
887 int rx_desc
= rxq
->next_desc_to_proc
;
889 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
890 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
891 return rxq
->descs
+ rx_desc
;
894 /* Change maximum receive size of the port. */
895 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
899 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
900 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
901 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
902 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
903 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
907 /* Set rx queue offset */
908 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
909 struct mvneta_rx_queue
*rxq
,
914 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
915 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
918 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
919 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
923 /* Tx descriptors helper methods */
925 /* Update HW with number of TX descriptors to be sent */
926 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
927 struct mvneta_tx_queue
*txq
,
932 pend_desc
+= txq
->pending
;
934 /* Only 255 Tx descriptors can be added at once */
936 val
= min(pend_desc
, 255);
937 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
939 } while (pend_desc
> 0);
943 /* Get pointer to next TX descriptor to be processed (send) by HW */
944 static struct mvneta_tx_desc
*
945 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
947 int tx_desc
= txq
->next_desc_to_proc
;
949 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
950 return txq
->descs
+ tx_desc
;
953 /* Release the last allocated TX descriptor. Useful to handle DMA
954 * mapping failures in the TX path.
956 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
958 if (txq
->next_desc_to_proc
== 0)
959 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
961 txq
->next_desc_to_proc
--;
964 /* Set rxq buf size */
965 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
966 struct mvneta_rx_queue
*rxq
,
971 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
973 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
974 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
976 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
979 /* Disable buffer management (BM) */
980 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
981 struct mvneta_rx_queue
*rxq
)
985 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
986 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
987 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
990 /* Enable buffer management (BM) */
991 static void mvneta_rxq_bm_enable(struct mvneta_port
*pp
,
992 struct mvneta_rx_queue
*rxq
)
996 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
997 val
|= MVNETA_RXQ_HW_BUF_ALLOC
;
998 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
1001 /* Notify HW about port's assignment of pool for bigger packets */
1002 static void mvneta_rxq_long_pool_set(struct mvneta_port
*pp
,
1003 struct mvneta_rx_queue
*rxq
)
1007 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
1008 val
&= ~MVNETA_RXQ_LONG_POOL_ID_MASK
;
1009 val
|= (pp
->pool_long
->id
<< MVNETA_RXQ_LONG_POOL_ID_SHIFT
);
1011 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
1014 /* Notify HW about port's assignment of pool for smaller packets */
1015 static void mvneta_rxq_short_pool_set(struct mvneta_port
*pp
,
1016 struct mvneta_rx_queue
*rxq
)
1020 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
1021 val
&= ~MVNETA_RXQ_SHORT_POOL_ID_MASK
;
1022 val
|= (pp
->pool_short
->id
<< MVNETA_RXQ_SHORT_POOL_ID_SHIFT
);
1024 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
1027 /* Set port's receive buffer size for assigned BM pool */
1028 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port
*pp
,
1034 if (!IS_ALIGNED(buf_size
, 8)) {
1035 dev_warn(pp
->dev
->dev
.parent
,
1036 "illegal buf_size value %d, round to %d\n",
1037 buf_size
, ALIGN(buf_size
, 8));
1038 buf_size
= ALIGN(buf_size
, 8);
1041 val
= mvreg_read(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
));
1042 val
|= buf_size
& MVNETA_PORT_POOL_BUFFER_SZ_MASK
;
1043 mvreg_write(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
), val
);
1046 /* Configure MBUS window in order to enable access BM internal SRAM */
1047 static int mvneta_mbus_io_win_set(struct mvneta_port
*pp
, u32 base
, u32 wsize
,
1050 u32 win_enable
, win_protect
;
1053 win_enable
= mvreg_read(pp
, MVNETA_BASE_ADDR_ENABLE
);
1055 if (pp
->bm_win_id
< 0) {
1056 /* Find first not occupied window */
1057 for (i
= 0; i
< MVNETA_MAX_DECODE_WIN
; i
++) {
1058 if (win_enable
& (1 << i
)) {
1063 if (i
== MVNETA_MAX_DECODE_WIN
)
1069 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
1070 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
1073 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
1075 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (base
& 0xffff0000) |
1076 (attr
<< 8) | target
);
1078 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), (wsize
- 1) & 0xffff0000);
1080 win_protect
= mvreg_read(pp
, MVNETA_ACCESS_PROTECT_ENABLE
);
1081 win_protect
|= 3 << (2 * i
);
1082 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
1084 win_enable
&= ~(1 << i
);
1085 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
1090 static int mvneta_bm_port_mbus_init(struct mvneta_port
*pp
)
1096 /* Get BM window information */
1097 err
= mvebu_mbus_get_io_win_info(pp
->bm_priv
->bppi_phys_addr
, &wsize
,
1104 /* Open NETA -> BM window */
1105 err
= mvneta_mbus_io_win_set(pp
, pp
->bm_priv
->bppi_phys_addr
, wsize
,
1108 netdev_info(pp
->dev
, "fail to configure mbus window to BM\n");
1114 /* Assign and initialize pools for port. In case of fail
1115 * buffer manager will remain disabled for current port.
1117 static int mvneta_bm_port_init(struct platform_device
*pdev
,
1118 struct mvneta_port
*pp
)
1120 struct device_node
*dn
= pdev
->dev
.of_node
;
1121 u32 long_pool_id
, short_pool_id
;
1123 if (!pp
->neta_armada3700
) {
1126 ret
= mvneta_bm_port_mbus_init(pp
);
1131 if (of_property_read_u32(dn
, "bm,pool-long", &long_pool_id
)) {
1132 netdev_info(pp
->dev
, "missing long pool id\n");
1136 /* Create port's long pool depending on mtu */
1137 pp
->pool_long
= mvneta_bm_pool_use(pp
->bm_priv
, long_pool_id
,
1138 MVNETA_BM_LONG
, pp
->id
,
1139 MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
));
1140 if (!pp
->pool_long
) {
1141 netdev_info(pp
->dev
, "fail to obtain long pool for port\n");
1145 pp
->pool_long
->port_map
|= 1 << pp
->id
;
1147 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_long
->buf_size
,
1150 /* If short pool id is not defined, assume using single pool */
1151 if (of_property_read_u32(dn
, "bm,pool-short", &short_pool_id
))
1152 short_pool_id
= long_pool_id
;
1154 /* Create port's short pool */
1155 pp
->pool_short
= mvneta_bm_pool_use(pp
->bm_priv
, short_pool_id
,
1156 MVNETA_BM_SHORT
, pp
->id
,
1157 MVNETA_BM_SHORT_PKT_SIZE
);
1158 if (!pp
->pool_short
) {
1159 netdev_info(pp
->dev
, "fail to obtain short pool for port\n");
1160 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1164 if (short_pool_id
!= long_pool_id
) {
1165 pp
->pool_short
->port_map
|= 1 << pp
->id
;
1166 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_short
->buf_size
,
1167 pp
->pool_short
->id
);
1173 /* Update settings of a pool for bigger packets */
1174 static void mvneta_bm_update_mtu(struct mvneta_port
*pp
, int mtu
)
1176 struct mvneta_bm_pool
*bm_pool
= pp
->pool_long
;
1177 struct hwbm_pool
*hwbm_pool
= &bm_pool
->hwbm_pool
;
1180 /* Release all buffers from long pool */
1181 mvneta_bm_bufs_free(pp
->bm_priv
, bm_pool
, 1 << pp
->id
);
1182 if (hwbm_pool
->buf_num
) {
1183 WARN(1, "cannot free all buffers in pool %d\n",
1188 bm_pool
->pkt_size
= MVNETA_RX_PKT_SIZE(mtu
);
1189 bm_pool
->buf_size
= MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
);
1190 hwbm_pool
->frag_size
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) +
1191 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
));
1193 /* Fill entire long pool */
1194 num
= hwbm_pool_add(hwbm_pool
, hwbm_pool
->size
);
1195 if (num
!= hwbm_pool
->size
) {
1196 WARN(1, "pool %d: %d of %d allocated\n",
1197 bm_pool
->id
, num
, hwbm_pool
->size
);
1200 mvneta_bm_pool_bufsize_set(pp
, bm_pool
->buf_size
, bm_pool
->id
);
1205 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1206 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
, 1 << pp
->id
);
1209 pp
->rx_offset_correction
= MVNETA_SKB_HEADROOM
;
1210 mvreg_write(pp
, MVNETA_ACC_MODE
, MVNETA_ACC_MODE_EXT1
);
1211 netdev_info(pp
->dev
, "fail to update MTU, fall back to software BM\n");
1214 /* Start the Ethernet port RX and TX activity */
1215 static void mvneta_port_up(struct mvneta_port
*pp
)
1220 /* Enable all initialized TXs. */
1222 for (queue
= 0; queue
< txq_number
; queue
++) {
1223 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
1225 q_map
|= (1 << queue
);
1227 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
1230 /* Enable all initialized RXQs. */
1231 for (queue
= 0; queue
< rxq_number
; queue
++) {
1232 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
1235 q_map
|= (1 << queue
);
1237 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
1240 /* Stop the Ethernet port activity */
1241 static void mvneta_port_down(struct mvneta_port
*pp
)
1246 /* Stop Rx port activity. Check port Rx activity. */
1247 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
1249 /* Issue stop command for active channels only */
1251 mvreg_write(pp
, MVNETA_RXQ_CMD
,
1252 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
1254 /* Wait for all Rx activity to terminate. */
1257 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
1258 netdev_warn(pp
->dev
,
1259 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1265 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
1266 } while (val
& MVNETA_RXQ_ENABLE_MASK
);
1268 /* Stop Tx port activity. Check port Tx activity. Issue stop
1269 * command for active channels only
1271 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
1274 mvreg_write(pp
, MVNETA_TXQ_CMD
,
1275 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
1277 /* Wait for all Tx activity to terminate. */
1280 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
1281 netdev_warn(pp
->dev
,
1282 "TIMEOUT for TX stopped status=0x%08x\n",
1288 /* Check TX Command reg that all Txqs are stopped */
1289 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
1291 } while (val
& MVNETA_TXQ_ENABLE_MASK
);
1293 /* Double check to verify that TX FIFO is empty */
1296 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
1297 netdev_warn(pp
->dev
,
1298 "TX FIFO empty timeout status=0x%08x\n",
1304 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
1305 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
1306 (val
& MVNETA_TX_IN_PRGRS
));
1311 /* Enable the port by setting the port enable bit of the MAC control register */
1312 static void mvneta_port_enable(struct mvneta_port
*pp
)
1317 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1318 val
|= MVNETA_GMAC0_PORT_ENABLE
;
1319 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1322 /* Disable the port and wait for about 200 usec before retuning */
1323 static void mvneta_port_disable(struct mvneta_port
*pp
)
1327 /* Reset the Enable bit in the Serial Control Register */
1328 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1329 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
1330 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1335 /* Multicast tables methods */
1337 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1338 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
1346 val
= 0x1 | (queue
<< 1);
1347 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1350 for (offset
= 0; offset
<= 0xc; offset
+= 4)
1351 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
1354 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1355 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
1363 val
= 0x1 | (queue
<< 1);
1364 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1367 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1368 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
1372 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1373 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
1379 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
1382 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
1383 val
= 0x1 | (queue
<< 1);
1384 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1387 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1388 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
1391 static void mvneta_percpu_unmask_interrupt(void *arg
)
1393 struct mvneta_port
*pp
= arg
;
1395 /* All the queue are unmasked, but actually only the ones
1396 * mapped to this CPU will be unmasked
1398 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
1399 MVNETA_RX_INTR_MASK_ALL
|
1400 MVNETA_TX_INTR_MASK_ALL
|
1401 MVNETA_MISCINTR_INTR_MASK
);
1404 static void mvneta_percpu_mask_interrupt(void *arg
)
1406 struct mvneta_port
*pp
= arg
;
1408 /* All the queue are masked, but actually only the ones
1409 * mapped to this CPU will be masked
1411 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
1412 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
1413 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
1416 static void mvneta_percpu_clear_intr_cause(void *arg
)
1418 struct mvneta_port
*pp
= arg
;
1420 /* All the queue are cleared, but actually only the ones
1421 * mapped to this CPU will be cleared
1423 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
1424 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
1425 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
1428 /* This method sets defaults to the NETA port:
1429 * Clears interrupt Cause and Mask registers.
1430 * Clears all MAC tables.
1431 * Sets defaults to all registers.
1432 * Resets RX and TX descriptor rings.
1434 * This method can be called after mvneta_port_down() to return the port
1435 * settings to defaults.
1437 static void mvneta_defaults_set(struct mvneta_port
*pp
)
1442 int max_cpu
= num_present_cpus();
1444 /* Clear all Cause registers */
1445 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
1447 /* Mask all interrupts */
1448 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
1449 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
1451 /* Enable MBUS Retry bit16 */
1452 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
1454 /* Set CPU queue access map. CPUs are assigned to the RX and
1455 * TX queues modulo their number. If there is only one TX
1456 * queue then it is assigned to the CPU associated to the
1459 for_each_present_cpu(cpu
) {
1460 int rxq_map
= 0, txq_map
= 0;
1462 if (!pp
->neta_armada3700
) {
1463 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
1464 if ((rxq
% max_cpu
) == cpu
)
1465 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
1467 for (txq
= 0; txq
< txq_number
; txq
++)
1468 if ((txq
% max_cpu
) == cpu
)
1469 txq_map
|= MVNETA_CPU_TXQ_ACCESS(txq
);
1471 /* With only one TX queue we configure a special case
1472 * which will allow to get all the irq on a single
1475 if (txq_number
== 1)
1476 txq_map
= (cpu
== pp
->rxq_def
) ?
1477 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1480 txq_map
= MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
1481 rxq_map
= MVNETA_CPU_RXQ_ACCESS_ALL_MASK
;
1484 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
1487 /* Reset RX and TX DMAs */
1488 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1489 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1491 /* Disable Legacy WRR, Disable EJP, Release from reset */
1492 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
1493 for (queue
= 0; queue
< txq_number
; queue
++) {
1494 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
1495 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
1498 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1499 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1501 /* Set Port Acceleration Mode */
1503 /* HW buffer management + legacy parser */
1504 val
= MVNETA_ACC_MODE_EXT2
;
1506 /* SW buffer management + legacy parser */
1507 val
= MVNETA_ACC_MODE_EXT1
;
1508 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
1511 mvreg_write(pp
, MVNETA_BM_ADDRESS
, pp
->bm_priv
->bppi_phys_addr
);
1513 /* Update val of portCfg register accordingly with all RxQueue types */
1514 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
1515 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
1518 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
1519 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
1521 /* Build PORT_SDMA_CONFIG_REG */
1524 /* Default burst size */
1525 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1526 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1527 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
1529 #if defined(__BIG_ENDIAN)
1530 val
|= MVNETA_DESC_SWAP
;
1533 /* Assign port SDMA configuration */
1534 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
1536 /* Disable PHY polling in hardware, since we're using the
1537 * kernel phylib to do this.
1539 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
1540 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
1541 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
1543 mvneta_set_ucast_table(pp
, -1);
1544 mvneta_set_special_mcast_table(pp
, -1);
1545 mvneta_set_other_mcast_table(pp
, -1);
1547 /* Set port interrupt enable register - default enable all */
1548 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1549 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1550 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1552 mvneta_mib_counters_clear(pp
);
1555 /* Set max sizes for tx queues */
1556 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1562 mtu
= max_tx_size
* 8;
1563 if (mtu
> MVNETA_TX_MTU_MAX
)
1564 mtu
= MVNETA_TX_MTU_MAX
;
1567 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1568 val
&= ~MVNETA_TX_MTU_MAX
;
1570 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1572 /* TX token size and all TXQs token size must be larger that MTU */
1573 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1575 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1578 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1580 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1582 for (queue
= 0; queue
< txq_number
; queue
++) {
1583 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1585 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1588 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1590 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1595 /* Set unicast address */
1596 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1599 unsigned int unicast_reg
;
1600 unsigned int tbl_offset
;
1601 unsigned int reg_offset
;
1603 /* Locate the Unicast table entry */
1604 last_nibble
= (0xf & last_nibble
);
1606 /* offset from unicast tbl base */
1607 tbl_offset
= (last_nibble
/ 4) * 4;
1609 /* offset within the above reg */
1610 reg_offset
= last_nibble
% 4;
1612 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1615 /* Clear accepts frame bit at specified unicast DA tbl entry */
1616 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1618 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1619 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1622 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1625 /* Set mac address */
1626 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1633 mac_l
= (addr
[4] << 8) | (addr
[5]);
1634 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1635 (addr
[2] << 8) | (addr
[3] << 0);
1637 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1638 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1641 /* Accept frames of this address */
1642 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1645 /* Set the number of packets that will be received before RX interrupt
1646 * will be generated by HW.
1648 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1649 struct mvneta_rx_queue
*rxq
, u32 value
)
1651 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1652 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1655 /* Set the time delay in usec before RX interrupt will be generated by
1658 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1659 struct mvneta_rx_queue
*rxq
, u32 value
)
1662 unsigned long clk_rate
;
1664 clk_rate
= clk_get_rate(pp
->clk
);
1665 val
= (clk_rate
/ 1000000) * value
;
1667 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1670 /* Set threshold for TX_DONE pkts coalescing */
1671 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1672 struct mvneta_tx_queue
*txq
, u32 value
)
1676 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1678 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1679 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1681 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1684 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1685 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1686 u32 phys_addr
, void *virt_addr
,
1687 struct mvneta_rx_queue
*rxq
)
1691 rx_desc
->buf_phys_addr
= phys_addr
;
1692 i
= rx_desc
- rxq
->descs
;
1693 rxq
->buf_virt_addr
[i
] = virt_addr
;
1696 /* Decrement sent descriptors counter */
1697 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1698 struct mvneta_tx_queue
*txq
,
1703 /* Only 255 TX descriptors can be updated at once */
1704 while (sent_desc
> 0xff) {
1705 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1706 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1707 sent_desc
= sent_desc
- 0xff;
1710 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1711 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1714 /* Get number of TX descriptors already sent by HW */
1715 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1716 struct mvneta_tx_queue
*txq
)
1721 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1722 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1723 MVNETA_TXQ_SENT_DESC_SHIFT
;
1728 /* Get number of sent descriptors and decrement counter.
1729 * The number of sent descriptors is returned.
1731 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1732 struct mvneta_tx_queue
*txq
)
1736 /* Get number of sent descriptors */
1737 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1739 /* Decrement sent descriptors counter */
1741 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1746 /* Set TXQ descriptors fields relevant for CSUM calculation */
1747 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1748 int ip_hdr_len
, int l4_proto
)
1752 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1753 * G_L4_chk, L4_type; required only for checksum
1756 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1757 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1759 if (l3_proto
== htons(ETH_P_IP
))
1760 command
|= MVNETA_TXD_IP_CSUM
;
1762 command
|= MVNETA_TX_L3_IP6
;
1764 if (l4_proto
== IPPROTO_TCP
)
1765 command
|= MVNETA_TX_L4_CSUM_FULL
;
1766 else if (l4_proto
== IPPROTO_UDP
)
1767 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1769 command
|= MVNETA_TX_L4_CSUM_NOT
;
1775 /* Display more error info */
1776 static void mvneta_rx_error(struct mvneta_port
*pp
,
1777 struct mvneta_rx_desc
*rx_desc
)
1779 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1780 u32 status
= rx_desc
->status
;
1782 /* update per-cpu counter */
1783 u64_stats_update_begin(&stats
->syncp
);
1785 u64_stats_update_end(&stats
->syncp
);
1787 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1788 case MVNETA_RXD_ERR_CRC
:
1789 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1790 status
, rx_desc
->data_size
);
1792 case MVNETA_RXD_ERR_OVERRUN
:
1793 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1794 status
, rx_desc
->data_size
);
1796 case MVNETA_RXD_ERR_LEN
:
1797 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1798 status
, rx_desc
->data_size
);
1800 case MVNETA_RXD_ERR_RESOURCE
:
1801 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1802 status
, rx_desc
->data_size
);
1807 /* Handle RX checksum offload based on the descriptor's status */
1808 static int mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
)
1810 if ((pp
->dev
->features
& NETIF_F_RXCSUM
) &&
1811 (status
& MVNETA_RXD_L3_IP4
) &&
1812 (status
& MVNETA_RXD_L4_CSUM_OK
))
1813 return CHECKSUM_UNNECESSARY
;
1815 return CHECKSUM_NONE
;
1818 /* Return tx queue pointer (find last set bit) according to <cause> returned
1819 * form tx_done reg. <cause> must not be null. The return value is always a
1820 * valid queue for matching the first one found in <cause>.
1822 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1825 int queue
= fls(cause
) - 1;
1827 return &pp
->txqs
[queue
];
1830 /* Free tx queue skbuffs */
1831 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1832 struct mvneta_tx_queue
*txq
, int num
,
1833 struct netdev_queue
*nq
, bool napi
)
1835 unsigned int bytes_compl
= 0, pkts_compl
= 0;
1836 struct xdp_frame_bulk bq
;
1839 xdp_frame_bulk_init(&bq
);
1841 rcu_read_lock(); /* need for xdp_return_frame_bulk */
1843 for (i
= 0; i
< num
; i
++) {
1844 struct mvneta_tx_buf
*buf
= &txq
->buf
[txq
->txq_get_index
];
1845 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1848 mvneta_txq_inc_get(txq
);
1850 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
) &&
1851 buf
->type
!= MVNETA_TYPE_XDP_TX
)
1852 dma_unmap_single(pp
->dev
->dev
.parent
,
1853 tx_desc
->buf_phys_addr
,
1854 tx_desc
->data_size
, DMA_TO_DEVICE
);
1855 if (buf
->type
== MVNETA_TYPE_SKB
&& buf
->skb
) {
1856 bytes_compl
+= buf
->skb
->len
;
1858 dev_kfree_skb_any(buf
->skb
);
1859 } else if (buf
->type
== MVNETA_TYPE_XDP_TX
||
1860 buf
->type
== MVNETA_TYPE_XDP_NDO
) {
1861 if (napi
&& buf
->type
== MVNETA_TYPE_XDP_TX
)
1862 xdp_return_frame_rx_napi(buf
->xdpf
);
1864 xdp_return_frame_bulk(buf
->xdpf
, &bq
);
1867 xdp_flush_frame_bulk(&bq
);
1871 netdev_tx_completed_queue(nq
, pkts_compl
, bytes_compl
);
1874 /* Handle end of transmission */
1875 static void mvneta_txq_done(struct mvneta_port
*pp
,
1876 struct mvneta_tx_queue
*txq
)
1878 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1881 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1885 mvneta_txq_bufs_free(pp
, txq
, tx_done
, nq
, true);
1887 txq
->count
-= tx_done
;
1889 if (netif_tx_queue_stopped(nq
)) {
1890 if (txq
->count
<= txq
->tx_wake_threshold
)
1891 netif_tx_wake_queue(nq
);
1895 /* Refill processing for SW buffer management */
1896 /* Allocate page per descriptor */
1897 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1898 struct mvneta_rx_desc
*rx_desc
,
1899 struct mvneta_rx_queue
*rxq
,
1902 dma_addr_t phys_addr
;
1905 page
= page_pool_alloc_pages(rxq
->page_pool
,
1906 gfp_mask
| __GFP_NOWARN
);
1910 phys_addr
= page_pool_get_dma_addr(page
) + pp
->rx_offset_correction
;
1911 mvneta_rx_desc_fill(rx_desc
, phys_addr
, page
, rxq
);
1916 /* Handle tx checksum */
1917 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1919 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1921 __be16 l3_proto
= vlan_get_protocol(skb
);
1924 if (l3_proto
== htons(ETH_P_IP
)) {
1925 struct iphdr
*ip4h
= ip_hdr(skb
);
1927 /* Calculate IPv4 checksum and L4 checksum */
1928 ip_hdr_len
= ip4h
->ihl
;
1929 l4_proto
= ip4h
->protocol
;
1930 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
1931 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1933 /* Read l4_protocol from one of IPv6 extra headers */
1934 if (skb_network_header_len(skb
) > 0)
1935 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1936 l4_proto
= ip6h
->nexthdr
;
1938 return MVNETA_TX_L4_CSUM_NOT
;
1940 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1941 l3_proto
, ip_hdr_len
, l4_proto
);
1944 return MVNETA_TX_L4_CSUM_NOT
;
1947 /* Drop packets received by the RXQ and free buffers */
1948 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1949 struct mvneta_rx_queue
*rxq
)
1953 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1955 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1958 for (i
= 0; i
< rx_done
; i
++) {
1959 struct mvneta_rx_desc
*rx_desc
=
1960 mvneta_rxq_next_desc_get(rxq
);
1961 u8 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
1962 struct mvneta_bm_pool
*bm_pool
;
1964 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
1965 /* Return dropped buffer to the pool */
1966 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
1967 rx_desc
->buf_phys_addr
);
1972 for (i
= 0; i
< rxq
->size
; i
++) {
1973 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1974 void *data
= rxq
->buf_virt_addr
[i
];
1975 if (!data
|| !(rx_desc
->buf_phys_addr
))
1978 page_pool_put_full_page(rxq
->page_pool
, data
, false);
1980 if (xdp_rxq_info_is_reg(&rxq
->xdp_rxq
))
1981 xdp_rxq_info_unreg(&rxq
->xdp_rxq
);
1982 page_pool_destroy(rxq
->page_pool
);
1983 rxq
->page_pool
= NULL
;
1987 mvneta_update_stats(struct mvneta_port
*pp
,
1988 struct mvneta_stats
*ps
)
1990 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1992 u64_stats_update_begin(&stats
->syncp
);
1993 stats
->es
.ps
.rx_packets
+= ps
->rx_packets
;
1994 stats
->es
.ps
.rx_bytes
+= ps
->rx_bytes
;
1996 stats
->es
.ps
.xdp_redirect
+= ps
->xdp_redirect
;
1997 stats
->es
.ps
.xdp_pass
+= ps
->xdp_pass
;
1998 stats
->es
.ps
.xdp_drop
+= ps
->xdp_drop
;
1999 u64_stats_update_end(&stats
->syncp
);
2003 int mvneta_rx_refill_queue(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
)
2005 struct mvneta_rx_desc
*rx_desc
;
2006 int curr_desc
= rxq
->first_to_refill
;
2009 for (i
= 0; (i
< rxq
->refill_num
) && (i
< 64); i
++) {
2010 rx_desc
= rxq
->descs
+ curr_desc
;
2011 if (!(rx_desc
->buf_phys_addr
)) {
2012 if (mvneta_rx_refill(pp
, rx_desc
, rxq
, GFP_ATOMIC
)) {
2013 struct mvneta_pcpu_stats
*stats
;
2015 pr_err("Can't refill queue %d. Done %d from %d\n",
2016 rxq
->id
, i
, rxq
->refill_num
);
2018 stats
= this_cpu_ptr(pp
->stats
);
2019 u64_stats_update_begin(&stats
->syncp
);
2020 stats
->es
.refill_error
++;
2021 u64_stats_update_end(&stats
->syncp
);
2025 curr_desc
= MVNETA_QUEUE_NEXT_DESC(rxq
, curr_desc
);
2027 rxq
->refill_num
-= i
;
2028 rxq
->first_to_refill
= curr_desc
;
2034 mvneta_xdp_put_buff(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2035 struct xdp_buff
*xdp
, struct skb_shared_info
*sinfo
,
2040 for (i
= 0; i
< sinfo
->nr_frags
; i
++)
2041 page_pool_put_full_page(rxq
->page_pool
,
2042 skb_frag_page(&sinfo
->frags
[i
]), true);
2043 page_pool_put_page(rxq
->page_pool
, virt_to_head_page(xdp
->data
),
2048 mvneta_xdp_submit_frame(struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
,
2049 struct xdp_frame
*xdpf
, bool dma_map
)
2051 struct mvneta_tx_desc
*tx_desc
;
2052 struct mvneta_tx_buf
*buf
;
2053 dma_addr_t dma_addr
;
2055 if (txq
->count
>= txq
->tx_stop_threshold
)
2056 return MVNETA_XDP_DROPPED
;
2058 tx_desc
= mvneta_txq_next_desc_get(txq
);
2060 buf
= &txq
->buf
[txq
->txq_put_index
];
2063 dma_addr
= dma_map_single(pp
->dev
->dev
.parent
, xdpf
->data
,
2064 xdpf
->len
, DMA_TO_DEVICE
);
2065 if (dma_mapping_error(pp
->dev
->dev
.parent
, dma_addr
)) {
2066 mvneta_txq_desc_put(txq
);
2067 return MVNETA_XDP_DROPPED
;
2069 buf
->type
= MVNETA_TYPE_XDP_NDO
;
2071 struct page
*page
= virt_to_page(xdpf
->data
);
2073 dma_addr
= page_pool_get_dma_addr(page
) +
2074 sizeof(*xdpf
) + xdpf
->headroom
;
2075 dma_sync_single_for_device(pp
->dev
->dev
.parent
, dma_addr
,
2076 xdpf
->len
, DMA_BIDIRECTIONAL
);
2077 buf
->type
= MVNETA_TYPE_XDP_TX
;
2081 tx_desc
->command
= MVNETA_TXD_FLZ_DESC
;
2082 tx_desc
->buf_phys_addr
= dma_addr
;
2083 tx_desc
->data_size
= xdpf
->len
;
2085 mvneta_txq_inc_put(txq
);
2089 return MVNETA_XDP_TX
;
2093 mvneta_xdp_xmit_back(struct mvneta_port
*pp
, struct xdp_buff
*xdp
)
2095 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2096 struct mvneta_tx_queue
*txq
;
2097 struct netdev_queue
*nq
;
2098 struct xdp_frame
*xdpf
;
2102 xdpf
= xdp_convert_buff_to_frame(xdp
);
2103 if (unlikely(!xdpf
))
2104 return MVNETA_XDP_DROPPED
;
2106 cpu
= smp_processor_id();
2107 txq
= &pp
->txqs
[cpu
% txq_number
];
2108 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2110 __netif_tx_lock(nq
, cpu
);
2111 ret
= mvneta_xdp_submit_frame(pp
, txq
, xdpf
, false);
2112 if (ret
== MVNETA_XDP_TX
) {
2113 u64_stats_update_begin(&stats
->syncp
);
2114 stats
->es
.ps
.tx_bytes
+= xdpf
->len
;
2115 stats
->es
.ps
.tx_packets
++;
2116 stats
->es
.ps
.xdp_tx
++;
2117 u64_stats_update_end(&stats
->syncp
);
2119 mvneta_txq_pend_desc_add(pp
, txq
, 0);
2121 u64_stats_update_begin(&stats
->syncp
);
2122 stats
->es
.ps
.xdp_tx_err
++;
2123 u64_stats_update_end(&stats
->syncp
);
2125 __netif_tx_unlock(nq
);
2131 mvneta_xdp_xmit(struct net_device
*dev
, int num_frame
,
2132 struct xdp_frame
**frames
, u32 flags
)
2134 struct mvneta_port
*pp
= netdev_priv(dev
);
2135 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2136 int i
, nxmit_byte
= 0, nxmit
= 0;
2137 int cpu
= smp_processor_id();
2138 struct mvneta_tx_queue
*txq
;
2139 struct netdev_queue
*nq
;
2142 if (unlikely(test_bit(__MVNETA_DOWN
, &pp
->state
)))
2145 if (unlikely(flags
& ~XDP_XMIT_FLAGS_MASK
))
2148 txq
= &pp
->txqs
[cpu
% txq_number
];
2149 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2151 __netif_tx_lock(nq
, cpu
);
2152 for (i
= 0; i
< num_frame
; i
++) {
2153 ret
= mvneta_xdp_submit_frame(pp
, txq
, frames
[i
], true);
2154 if (ret
!= MVNETA_XDP_TX
)
2157 nxmit_byte
+= frames
[i
]->len
;
2161 if (unlikely(flags
& XDP_XMIT_FLUSH
))
2162 mvneta_txq_pend_desc_add(pp
, txq
, 0);
2163 __netif_tx_unlock(nq
);
2165 u64_stats_update_begin(&stats
->syncp
);
2166 stats
->es
.ps
.tx_bytes
+= nxmit_byte
;
2167 stats
->es
.ps
.tx_packets
+= nxmit
;
2168 stats
->es
.ps
.xdp_xmit
+= nxmit
;
2169 stats
->es
.ps
.xdp_xmit_err
+= num_frame
- nxmit
;
2170 u64_stats_update_end(&stats
->syncp
);
2176 mvneta_run_xdp(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2177 struct bpf_prog
*prog
, struct xdp_buff
*xdp
,
2178 u32 frame_sz
, struct mvneta_stats
*stats
)
2180 struct skb_shared_info
*sinfo
= xdp_get_shared_info_from_buff(xdp
);
2181 unsigned int len
, data_len
, sync
;
2184 len
= xdp
->data_end
- xdp
->data_hard_start
- pp
->rx_offset_correction
;
2185 data_len
= xdp
->data_end
- xdp
->data
;
2186 act
= bpf_prog_run_xdp(prog
, xdp
);
2188 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
2189 sync
= xdp
->data_end
- xdp
->data_hard_start
- pp
->rx_offset_correction
;
2190 sync
= max(sync
, len
);
2195 return MVNETA_XDP_PASS
;
2196 case XDP_REDIRECT
: {
2199 err
= xdp_do_redirect(pp
->dev
, xdp
, prog
);
2200 if (unlikely(err
)) {
2201 mvneta_xdp_put_buff(pp
, rxq
, xdp
, sinfo
, sync
);
2202 ret
= MVNETA_XDP_DROPPED
;
2204 ret
= MVNETA_XDP_REDIR
;
2205 stats
->xdp_redirect
++;
2210 ret
= mvneta_xdp_xmit_back(pp
, xdp
);
2211 if (ret
!= MVNETA_XDP_TX
)
2212 mvneta_xdp_put_buff(pp
, rxq
, xdp
, sinfo
, sync
);
2215 bpf_warn_invalid_xdp_action(act
);
2218 trace_xdp_exception(pp
->dev
, prog
, act
);
2221 mvneta_xdp_put_buff(pp
, rxq
, xdp
, sinfo
, sync
);
2222 ret
= MVNETA_XDP_DROPPED
;
2227 stats
->rx_bytes
+= frame_sz
+ xdp
->data_end
- xdp
->data
- data_len
;
2228 stats
->rx_packets
++;
2234 mvneta_swbm_rx_frame(struct mvneta_port
*pp
,
2235 struct mvneta_rx_desc
*rx_desc
,
2236 struct mvneta_rx_queue
*rxq
,
2237 struct xdp_buff
*xdp
, int *size
,
2240 unsigned char *data
= page_address(page
);
2241 int data_len
= -MVNETA_MH_SIZE
, len
;
2242 struct net_device
*dev
= pp
->dev
;
2243 enum dma_data_direction dma_dir
;
2244 struct skb_shared_info
*sinfo
;
2246 if (*size
> MVNETA_MAX_RX_BUF_SIZE
) {
2247 len
= MVNETA_MAX_RX_BUF_SIZE
;
2251 data_len
+= len
- ETH_FCS_LEN
;
2253 *size
= *size
- len
;
2255 dma_dir
= page_pool_get_dma_dir(rxq
->page_pool
);
2256 dma_sync_single_for_cpu(dev
->dev
.parent
,
2257 rx_desc
->buf_phys_addr
,
2260 rx_desc
->buf_phys_addr
= 0;
2262 /* Prefetch header */
2264 xdp_prepare_buff(xdp
, data
, pp
->rx_offset_correction
+ MVNETA_MH_SIZE
,
2267 sinfo
= xdp_get_shared_info_from_buff(xdp
);
2268 sinfo
->nr_frags
= 0;
2272 mvneta_swbm_add_rx_fragment(struct mvneta_port
*pp
,
2273 struct mvneta_rx_desc
*rx_desc
,
2274 struct mvneta_rx_queue
*rxq
,
2275 struct xdp_buff
*xdp
, int *size
,
2276 struct skb_shared_info
*xdp_sinfo
,
2279 struct net_device
*dev
= pp
->dev
;
2280 enum dma_data_direction dma_dir
;
2283 if (*size
> MVNETA_MAX_RX_BUF_SIZE
) {
2284 len
= MVNETA_MAX_RX_BUF_SIZE
;
2288 data_len
= len
- ETH_FCS_LEN
;
2290 dma_dir
= page_pool_get_dma_dir(rxq
->page_pool
);
2291 dma_sync_single_for_cpu(dev
->dev
.parent
,
2292 rx_desc
->buf_phys_addr
,
2294 rx_desc
->buf_phys_addr
= 0;
2296 if (data_len
> 0 && xdp_sinfo
->nr_frags
< MAX_SKB_FRAGS
) {
2297 skb_frag_t
*frag
= &xdp_sinfo
->frags
[xdp_sinfo
->nr_frags
++];
2299 skb_frag_off_set(frag
, pp
->rx_offset_correction
);
2300 skb_frag_size_set(frag
, data_len
);
2301 __skb_frag_set_page(frag
, page
);
2303 page_pool_put_full_page(rxq
->page_pool
, page
, true);
2308 struct skb_shared_info
*sinfo
;
2310 sinfo
= xdp_get_shared_info_from_buff(xdp
);
2311 sinfo
->nr_frags
= xdp_sinfo
->nr_frags
;
2312 memcpy(sinfo
->frags
, xdp_sinfo
->frags
,
2313 sinfo
->nr_frags
* sizeof(skb_frag_t
));
2318 static struct sk_buff
*
2319 mvneta_swbm_build_skb(struct mvneta_port
*pp
, struct page_pool
*pool
,
2320 struct xdp_buff
*xdp
, u32 desc_status
)
2322 struct skb_shared_info
*sinfo
= xdp_get_shared_info_from_buff(xdp
);
2323 int i
, num_frags
= sinfo
->nr_frags
;
2324 struct sk_buff
*skb
;
2326 skb
= build_skb(xdp
->data_hard_start
, PAGE_SIZE
);
2328 return ERR_PTR(-ENOMEM
);
2330 skb_mark_for_recycle(skb
);
2332 skb_reserve(skb
, xdp
->data
- xdp
->data_hard_start
);
2333 skb_put(skb
, xdp
->data_end
- xdp
->data
);
2334 skb
->ip_summed
= mvneta_rx_csum(pp
, desc_status
);
2336 for (i
= 0; i
< num_frags
; i
++) {
2337 skb_frag_t
*frag
= &sinfo
->frags
[i
];
2339 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
2340 skb_frag_page(frag
), skb_frag_off(frag
),
2341 skb_frag_size(frag
), PAGE_SIZE
);
2347 /* Main rx processing when using software buffer management */
2348 static int mvneta_rx_swbm(struct napi_struct
*napi
,
2349 struct mvneta_port
*pp
, int budget
,
2350 struct mvneta_rx_queue
*rxq
)
2352 int rx_proc
= 0, rx_todo
, refill
, size
= 0;
2353 struct net_device
*dev
= pp
->dev
;
2354 struct skb_shared_info sinfo
;
2355 struct mvneta_stats ps
= {};
2356 struct bpf_prog
*xdp_prog
;
2357 u32 desc_status
, frame_sz
;
2358 struct xdp_buff xdp_buf
;
2360 xdp_init_buff(&xdp_buf
, PAGE_SIZE
, &rxq
->xdp_rxq
);
2361 xdp_buf
.data_hard_start
= NULL
;
2365 /* Get number of received packets */
2366 rx_todo
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
2368 xdp_prog
= READ_ONCE(pp
->xdp_prog
);
2370 /* Fairness NAPI loop */
2371 while (rx_proc
< budget
&& rx_proc
< rx_todo
) {
2372 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
2373 u32 rx_status
, index
;
2374 struct sk_buff
*skb
;
2377 index
= rx_desc
- rxq
->descs
;
2378 page
= (struct page
*)rxq
->buf_virt_addr
[index
];
2380 rx_status
= rx_desc
->status
;
2384 if (rx_status
& MVNETA_RXD_FIRST_DESC
) {
2385 /* Check errors only for FIRST descriptor */
2386 if (rx_status
& MVNETA_RXD_ERR_SUMMARY
) {
2387 mvneta_rx_error(pp
, rx_desc
);
2391 size
= rx_desc
->data_size
;
2392 frame_sz
= size
- ETH_FCS_LEN
;
2393 desc_status
= rx_status
;
2395 mvneta_swbm_rx_frame(pp
, rx_desc
, rxq
, &xdp_buf
,
2398 if (unlikely(!xdp_buf
.data_hard_start
)) {
2399 rx_desc
->buf_phys_addr
= 0;
2400 page_pool_put_full_page(rxq
->page_pool
, page
,
2405 mvneta_swbm_add_rx_fragment(pp
, rx_desc
, rxq
, &xdp_buf
,
2406 &size
, &sinfo
, page
);
2407 } /* Middle or Last descriptor */
2409 if (!(rx_status
& MVNETA_RXD_LAST_DESC
))
2410 /* no last descriptor this time */
2414 mvneta_xdp_put_buff(pp
, rxq
, &xdp_buf
, &sinfo
, -1);
2419 mvneta_run_xdp(pp
, rxq
, xdp_prog
, &xdp_buf
, frame_sz
, &ps
))
2422 skb
= mvneta_swbm_build_skb(pp
, rxq
->page_pool
, &xdp_buf
, desc_status
);
2424 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2426 mvneta_xdp_put_buff(pp
, rxq
, &xdp_buf
, &sinfo
, -1);
2428 u64_stats_update_begin(&stats
->syncp
);
2429 stats
->es
.skb_alloc_error
++;
2430 stats
->rx_dropped
++;
2431 u64_stats_update_end(&stats
->syncp
);
2436 ps
.rx_bytes
+= skb
->len
;
2439 skb
->protocol
= eth_type_trans(skb
, dev
);
2440 napi_gro_receive(napi
, skb
);
2442 xdp_buf
.data_hard_start
= NULL
;
2446 if (xdp_buf
.data_hard_start
)
2447 mvneta_xdp_put_buff(pp
, rxq
, &xdp_buf
, &sinfo
, -1);
2449 if (ps
.xdp_redirect
)
2453 mvneta_update_stats(pp
, &ps
);
2455 /* return some buffers to hardware queue, one at a time is too slow */
2456 refill
= mvneta_rx_refill_queue(pp
, rxq
);
2458 /* Update rxq management counters */
2459 mvneta_rxq_desc_num_update(pp
, rxq
, rx_proc
, refill
);
2461 return ps
.rx_packets
;
2464 /* Main rx processing when using hardware buffer management */
2465 static int mvneta_rx_hwbm(struct napi_struct
*napi
,
2466 struct mvneta_port
*pp
, int rx_todo
,
2467 struct mvneta_rx_queue
*rxq
)
2469 struct net_device
*dev
= pp
->dev
;
2474 /* Get number of received packets */
2475 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
2477 if (rx_todo
> rx_done
)
2482 /* Fairness NAPI loop */
2483 while (rx_done
< rx_todo
) {
2484 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
2485 struct mvneta_bm_pool
*bm_pool
= NULL
;
2486 struct sk_buff
*skb
;
2487 unsigned char *data
;
2488 dma_addr_t phys_addr
;
2489 u32 rx_status
, frag_size
;
2494 rx_status
= rx_desc
->status
;
2495 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
2496 data
= (u8
*)(uintptr_t)rx_desc
->buf_cookie
;
2497 phys_addr
= rx_desc
->buf_phys_addr
;
2498 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
2499 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
2501 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
2502 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
2503 err_drop_frame_ret_pool
:
2504 /* Return the buffer to the pool */
2505 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2506 rx_desc
->buf_phys_addr
);
2508 mvneta_rx_error(pp
, rx_desc
);
2509 /* leave the descriptor untouched */
2513 if (rx_bytes
<= rx_copybreak
) {
2514 /* better copy a small frame and not unmap the DMA region */
2515 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
2517 goto err_drop_frame_ret_pool
;
2519 dma_sync_single_range_for_cpu(&pp
->bm_priv
->pdev
->dev
,
2520 rx_desc
->buf_phys_addr
,
2521 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2524 skb_put_data(skb
, data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2527 skb
->protocol
= eth_type_trans(skb
, dev
);
2528 skb
->ip_summed
= mvneta_rx_csum(pp
, rx_status
);
2529 napi_gro_receive(napi
, skb
);
2532 rcvd_bytes
+= rx_bytes
;
2534 /* Return the buffer to the pool */
2535 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2536 rx_desc
->buf_phys_addr
);
2538 /* leave the descriptor and buffer untouched */
2542 /* Refill processing */
2543 err
= hwbm_pool_refill(&bm_pool
->hwbm_pool
, GFP_ATOMIC
);
2545 struct mvneta_pcpu_stats
*stats
;
2547 netdev_err(dev
, "Linux processing - Can't refill\n");
2549 stats
= this_cpu_ptr(pp
->stats
);
2550 u64_stats_update_begin(&stats
->syncp
);
2551 stats
->es
.refill_error
++;
2552 u64_stats_update_end(&stats
->syncp
);
2554 goto err_drop_frame_ret_pool
;
2557 frag_size
= bm_pool
->hwbm_pool
.frag_size
;
2559 skb
= build_skb(data
, frag_size
> PAGE_SIZE
? 0 : frag_size
);
2561 /* After refill old buffer has to be unmapped regardless
2562 * the skb is successfully built or not.
2564 dma_unmap_single(&pp
->bm_priv
->pdev
->dev
, phys_addr
,
2565 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
2567 goto err_drop_frame
;
2570 rcvd_bytes
+= rx_bytes
;
2572 /* Linux processing */
2573 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
2574 skb_put(skb
, rx_bytes
);
2576 skb
->protocol
= eth_type_trans(skb
, dev
);
2577 skb
->ip_summed
= mvneta_rx_csum(pp
, rx_status
);
2579 napi_gro_receive(napi
, skb
);
2583 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2585 u64_stats_update_begin(&stats
->syncp
);
2586 stats
->es
.ps
.rx_packets
+= rcvd_pkts
;
2587 stats
->es
.ps
.rx_bytes
+= rcvd_bytes
;
2588 u64_stats_update_end(&stats
->syncp
);
2591 /* Update rxq management counters */
2592 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
2598 mvneta_tso_put_hdr(struct sk_buff
*skb
,
2599 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
2601 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2602 struct mvneta_tx_buf
*buf
= &txq
->buf
[txq
->txq_put_index
];
2603 struct mvneta_tx_desc
*tx_desc
;
2605 tx_desc
= mvneta_txq_next_desc_get(txq
);
2606 tx_desc
->data_size
= hdr_len
;
2607 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
2608 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
2609 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
2610 txq
->txq_put_index
* TSO_HEADER_SIZE
;
2611 buf
->type
= MVNETA_TYPE_SKB
;
2614 mvneta_txq_inc_put(txq
);
2618 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
2619 struct sk_buff
*skb
, char *data
, int size
,
2620 bool last_tcp
, bool is_last
)
2622 struct mvneta_tx_buf
*buf
= &txq
->buf
[txq
->txq_put_index
];
2623 struct mvneta_tx_desc
*tx_desc
;
2625 tx_desc
= mvneta_txq_next_desc_get(txq
);
2626 tx_desc
->data_size
= size
;
2627 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
2628 size
, DMA_TO_DEVICE
);
2629 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2630 tx_desc
->buf_phys_addr
))) {
2631 mvneta_txq_desc_put(txq
);
2635 tx_desc
->command
= 0;
2636 buf
->type
= MVNETA_TYPE_SKB
;
2640 /* last descriptor in the TCP packet */
2641 tx_desc
->command
= MVNETA_TXD_L_DESC
;
2643 /* last descriptor in SKB */
2647 mvneta_txq_inc_put(txq
);
2651 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
2652 struct mvneta_tx_queue
*txq
)
2654 int hdr_len
, total_len
, data_left
;
2656 struct mvneta_port
*pp
= netdev_priv(dev
);
2660 /* Count needed descriptors */
2661 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
2664 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
2665 pr_info("*** Is this even possible?\n");
2669 /* Initialize the TSO handler, and prepare the first payload */
2670 hdr_len
= tso_start(skb
, &tso
);
2672 total_len
= skb
->len
- hdr_len
;
2673 while (total_len
> 0) {
2676 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
2677 total_len
-= data_left
;
2680 /* prepare packet headers: MAC + IP + TCP */
2681 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
2682 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
2684 mvneta_tso_put_hdr(skb
, pp
, txq
);
2686 while (data_left
> 0) {
2690 size
= min_t(int, tso
.size
, data_left
);
2692 if (mvneta_tso_put_data(dev
, txq
, skb
,
2699 tso_build_data(skb
, &tso
, size
);
2706 /* Release all used data descriptors; header descriptors must not
2709 for (i
= desc_count
- 1; i
>= 0; i
--) {
2710 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
2711 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
2712 dma_unmap_single(pp
->dev
->dev
.parent
,
2713 tx_desc
->buf_phys_addr
,
2716 mvneta_txq_desc_put(txq
);
2721 /* Handle tx fragmentation processing */
2722 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
2723 struct mvneta_tx_queue
*txq
)
2725 struct mvneta_tx_desc
*tx_desc
;
2726 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
2728 for (i
= 0; i
< nr_frags
; i
++) {
2729 struct mvneta_tx_buf
*buf
= &txq
->buf
[txq
->txq_put_index
];
2730 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2731 void *addr
= skb_frag_address(frag
);
2733 tx_desc
= mvneta_txq_next_desc_get(txq
);
2734 tx_desc
->data_size
= skb_frag_size(frag
);
2736 tx_desc
->buf_phys_addr
=
2737 dma_map_single(pp
->dev
->dev
.parent
, addr
,
2738 tx_desc
->data_size
, DMA_TO_DEVICE
);
2740 if (dma_mapping_error(pp
->dev
->dev
.parent
,
2741 tx_desc
->buf_phys_addr
)) {
2742 mvneta_txq_desc_put(txq
);
2746 if (i
== nr_frags
- 1) {
2747 /* Last descriptor */
2748 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
2751 /* Descriptor in the middle: Not First, Not Last */
2752 tx_desc
->command
= 0;
2755 buf
->type
= MVNETA_TYPE_SKB
;
2756 mvneta_txq_inc_put(txq
);
2762 /* Release all descriptors that were used to map fragments of
2763 * this packet, as well as the corresponding DMA mappings
2765 for (i
= i
- 1; i
>= 0; i
--) {
2766 tx_desc
= txq
->descs
+ i
;
2767 dma_unmap_single(pp
->dev
->dev
.parent
,
2768 tx_desc
->buf_phys_addr
,
2771 mvneta_txq_desc_put(txq
);
2777 /* Main tx processing */
2778 static netdev_tx_t
mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2780 struct mvneta_port
*pp
= netdev_priv(dev
);
2781 u16 txq_id
= skb_get_queue_mapping(skb
);
2782 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
2783 struct mvneta_tx_buf
*buf
= &txq
->buf
[txq
->txq_put_index
];
2784 struct mvneta_tx_desc
*tx_desc
;
2789 if (!netif_running(dev
))
2792 if (skb_is_gso(skb
)) {
2793 frags
= mvneta_tx_tso(skb
, dev
, txq
);
2797 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2799 /* Get a descriptor for the first part of the packet */
2800 tx_desc
= mvneta_txq_next_desc_get(txq
);
2802 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
2804 tx_desc
->data_size
= skb_headlen(skb
);
2806 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
2809 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2810 tx_desc
->buf_phys_addr
))) {
2811 mvneta_txq_desc_put(txq
);
2816 buf
->type
= MVNETA_TYPE_SKB
;
2818 /* First and Last descriptor */
2819 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
2820 tx_desc
->command
= tx_cmd
;
2822 mvneta_txq_inc_put(txq
);
2824 /* First but not Last */
2825 tx_cmd
|= MVNETA_TXD_F_DESC
;
2827 mvneta_txq_inc_put(txq
);
2828 tx_desc
->command
= tx_cmd
;
2829 /* Continue with other skb fragments */
2830 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
2831 dma_unmap_single(dev
->dev
.parent
,
2832 tx_desc
->buf_phys_addr
,
2835 mvneta_txq_desc_put(txq
);
2843 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
2844 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2846 netdev_tx_sent_queue(nq
, len
);
2848 txq
->count
+= frags
;
2849 if (txq
->count
>= txq
->tx_stop_threshold
)
2850 netif_tx_stop_queue(nq
);
2852 if (!netdev_xmit_more() || netif_xmit_stopped(nq
) ||
2853 txq
->pending
+ frags
> MVNETA_TXQ_DEC_SENT_MASK
)
2854 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
2856 txq
->pending
+= frags
;
2858 u64_stats_update_begin(&stats
->syncp
);
2859 stats
->es
.ps
.tx_bytes
+= len
;
2860 stats
->es
.ps
.tx_packets
++;
2861 u64_stats_update_end(&stats
->syncp
);
2863 dev
->stats
.tx_dropped
++;
2864 dev_kfree_skb_any(skb
);
2867 return NETDEV_TX_OK
;
2871 /* Free tx resources, when resetting a port */
2872 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
2873 struct mvneta_tx_queue
*txq
)
2876 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2877 int tx_done
= txq
->count
;
2879 mvneta_txq_bufs_free(pp
, txq
, tx_done
, nq
, false);
2883 txq
->txq_put_index
= 0;
2884 txq
->txq_get_index
= 0;
2887 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2888 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2890 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
2892 struct mvneta_tx_queue
*txq
;
2893 struct netdev_queue
*nq
;
2894 int cpu
= smp_processor_id();
2896 while (cause_tx_done
) {
2897 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
2899 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2900 __netif_tx_lock(nq
, cpu
);
2903 mvneta_txq_done(pp
, txq
);
2905 __netif_tx_unlock(nq
);
2906 cause_tx_done
&= ~((1 << txq
->id
));
2910 /* Compute crc8 of the specified address, using a unique algorithm ,
2911 * according to hw spec, different than generic crc8 algorithm
2913 static int mvneta_addr_crc(unsigned char *addr
)
2918 for (i
= 0; i
< ETH_ALEN
; i
++) {
2921 crc
= (crc
^ addr
[i
]) << 8;
2922 for (j
= 7; j
>= 0; j
--) {
2923 if (crc
& (0x100 << j
))
2931 /* This method controls the net device special MAC multicast support.
2932 * The Special Multicast Table for MAC addresses supports MAC of the form
2933 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2934 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2935 * Table entries in the DA-Filter table. This method set the Special
2936 * Multicast Table appropriate entry.
2938 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
2939 unsigned char last_byte
,
2942 unsigned int smc_table_reg
;
2943 unsigned int tbl_offset
;
2944 unsigned int reg_offset
;
2946 /* Register offset from SMC table base */
2947 tbl_offset
= (last_byte
/ 4);
2948 /* Entry offset within the above reg */
2949 reg_offset
= last_byte
% 4;
2951 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
2955 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2957 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2958 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2961 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
2965 /* This method controls the network device Other MAC multicast support.
2966 * The Other Multicast Table is used for multicast of another type.
2967 * A CRC-8 is used as an index to the Other Multicast Table entries
2968 * in the DA-Filter table.
2969 * The method gets the CRC-8 value from the calling routine and
2970 * sets the Other Multicast Table appropriate entry according to the
2973 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
2977 unsigned int omc_table_reg
;
2978 unsigned int tbl_offset
;
2979 unsigned int reg_offset
;
2981 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
2982 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
2984 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
2987 /* Clear accepts frame bit at specified Other DA table entry */
2988 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2990 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2991 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2994 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
2997 /* The network device supports multicast using two tables:
2998 * 1) Special Multicast Table for MAC addresses of the form
2999 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
3000 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
3001 * Table entries in the DA-Filter table.
3002 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
3003 * is used as an index to the Other Multicast Table entries in the
3006 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
3009 unsigned char crc_result
= 0;
3011 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
3012 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
3016 crc_result
= mvneta_addr_crc(p_addr
);
3018 if (pp
->mcast_count
[crc_result
] == 0) {
3019 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
3024 pp
->mcast_count
[crc_result
]--;
3025 if (pp
->mcast_count
[crc_result
] != 0) {
3026 netdev_info(pp
->dev
,
3027 "After delete there are %d valid Mcast for crc8=0x%02x\n",
3028 pp
->mcast_count
[crc_result
], crc_result
);
3032 pp
->mcast_count
[crc_result
]++;
3034 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
3039 /* Configure Fitering mode of Ethernet port */
3040 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
3043 u32 port_cfg_reg
, val
;
3045 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
3047 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
3049 /* Set / Clear UPM bit in port configuration register */
3051 /* Accept all Unicast addresses */
3052 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
3053 val
|= MVNETA_FORCE_UNI
;
3054 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
3055 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
3057 /* Reject all Unicast addresses */
3058 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
3059 val
&= ~MVNETA_FORCE_UNI
;
3062 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
3063 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
3066 /* register unicast and multicast addresses */
3067 static void mvneta_set_rx_mode(struct net_device
*dev
)
3069 struct mvneta_port
*pp
= netdev_priv(dev
);
3070 struct netdev_hw_addr
*ha
;
3072 if (dev
->flags
& IFF_PROMISC
) {
3073 /* Accept all: Multicast + Unicast */
3074 mvneta_rx_unicast_promisc_set(pp
, 1);
3075 mvneta_set_ucast_table(pp
, pp
->rxq_def
);
3076 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
3077 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
3079 /* Accept single Unicast */
3080 mvneta_rx_unicast_promisc_set(pp
, 0);
3081 mvneta_set_ucast_table(pp
, -1);
3082 mvneta_mac_addr_set(pp
, dev
->dev_addr
, pp
->rxq_def
);
3084 if (dev
->flags
& IFF_ALLMULTI
) {
3085 /* Accept all multicast */
3086 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
3087 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
3089 /* Accept only initialized multicast */
3090 mvneta_set_special_mcast_table(pp
, -1);
3091 mvneta_set_other_mcast_table(pp
, -1);
3093 if (!netdev_mc_empty(dev
)) {
3094 netdev_for_each_mc_addr(ha
, dev
) {
3095 mvneta_mcast_addr_set(pp
, ha
->addr
,
3103 /* Interrupt handling - the callback for request_irq() */
3104 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
3106 struct mvneta_port
*pp
= (struct mvneta_port
*)dev_id
;
3108 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
3109 napi_schedule(&pp
->napi
);
3114 /* Interrupt handling - the callback for request_percpu_irq() */
3115 static irqreturn_t
mvneta_percpu_isr(int irq
, void *dev_id
)
3117 struct mvneta_pcpu_port
*port
= (struct mvneta_pcpu_port
*)dev_id
;
3119 disable_percpu_irq(port
->pp
->dev
->irq
);
3120 napi_schedule(&port
->napi
);
3125 static void mvneta_link_change(struct mvneta_port
*pp
)
3127 u32 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
3129 phylink_mac_change(pp
->phylink
, !!(gmac_stat
& MVNETA_GMAC_LINK_UP
));
3133 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3134 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3135 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3136 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3137 * Each CPU has its own causeRxTx register
3139 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
3144 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
3145 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
3147 if (!netif_running(pp
->dev
)) {
3148 napi_complete(napi
);
3152 /* Read cause register */
3153 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
);
3154 if (cause_rx_tx
& MVNETA_MISCINTR_INTR_MASK
) {
3155 u32 cause_misc
= mvreg_read(pp
, MVNETA_INTR_MISC_CAUSE
);
3157 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
3159 if (cause_misc
& (MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3160 MVNETA_CAUSE_LINK_CHANGE
))
3161 mvneta_link_change(pp
);
3164 /* Release Tx descriptors */
3165 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
3166 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
3167 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
3170 /* For the case where the last mvneta_poll did not process all
3173 cause_rx_tx
|= pp
->neta_armada3700
? pp
->cause_rx_tx
:
3176 rx_queue
= fls(((cause_rx_tx
>> 8) & 0xff));
3178 rx_queue
= rx_queue
- 1;
3180 rx_done
= mvneta_rx_hwbm(napi
, pp
, budget
,
3181 &pp
->rxqs
[rx_queue
]);
3183 rx_done
= mvneta_rx_swbm(napi
, pp
, budget
,
3184 &pp
->rxqs
[rx_queue
]);
3187 if (rx_done
< budget
) {
3189 napi_complete_done(napi
, rx_done
);
3191 if (pp
->neta_armada3700
) {
3192 unsigned long flags
;
3194 local_irq_save(flags
);
3195 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
3196 MVNETA_RX_INTR_MASK(rxq_number
) |
3197 MVNETA_TX_INTR_MASK(txq_number
) |
3198 MVNETA_MISCINTR_INTR_MASK
);
3199 local_irq_restore(flags
);
3201 enable_percpu_irq(pp
->dev
->irq
, 0);
3205 if (pp
->neta_armada3700
)
3206 pp
->cause_rx_tx
= cause_rx_tx
;
3208 port
->cause_rx_tx
= cause_rx_tx
;
3213 static int mvneta_create_page_pool(struct mvneta_port
*pp
,
3214 struct mvneta_rx_queue
*rxq
, int size
)
3216 struct bpf_prog
*xdp_prog
= READ_ONCE(pp
->xdp_prog
);
3217 struct page_pool_params pp_params
= {
3219 .flags
= PP_FLAG_DMA_MAP
| PP_FLAG_DMA_SYNC_DEV
,
3221 .nid
= NUMA_NO_NODE
,
3222 .dev
= pp
->dev
->dev
.parent
,
3223 .dma_dir
= xdp_prog
? DMA_BIDIRECTIONAL
: DMA_FROM_DEVICE
,
3224 .offset
= pp
->rx_offset_correction
,
3225 .max_len
= MVNETA_MAX_RX_BUF_SIZE
,
3229 rxq
->page_pool
= page_pool_create(&pp_params
);
3230 if (IS_ERR(rxq
->page_pool
)) {
3231 err
= PTR_ERR(rxq
->page_pool
);
3232 rxq
->page_pool
= NULL
;
3236 err
= xdp_rxq_info_reg(&rxq
->xdp_rxq
, pp
->dev
, rxq
->id
, 0);
3240 err
= xdp_rxq_info_reg_mem_model(&rxq
->xdp_rxq
, MEM_TYPE_PAGE_POOL
,
3243 goto err_unregister_rxq
;
3248 xdp_rxq_info_unreg(&rxq
->xdp_rxq
);
3250 page_pool_destroy(rxq
->page_pool
);
3251 rxq
->page_pool
= NULL
;
3255 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3256 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
3261 err
= mvneta_create_page_pool(pp
, rxq
, num
);
3265 for (i
= 0; i
< num
; i
++) {
3266 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
3267 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
, rxq
,
3270 "%s:rxq %d, %d of %d buffs filled\n",
3271 __func__
, rxq
->id
, i
, num
);
3276 /* Add this number of RX descriptors as non occupied (ready to
3279 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
3284 /* Free all packets pending transmit from all TXQs and reset TX port */
3285 static void mvneta_tx_reset(struct mvneta_port
*pp
)
3289 /* free the skb's in the tx ring */
3290 for (queue
= 0; queue
< txq_number
; queue
++)
3291 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
3293 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
3294 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
3297 static void mvneta_rx_reset(struct mvneta_port
*pp
)
3299 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
3300 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
3303 /* Rx/Tx queue initialization/cleanup methods */
3305 static int mvneta_rxq_sw_init(struct mvneta_port
*pp
,
3306 struct mvneta_rx_queue
*rxq
)
3308 rxq
->size
= pp
->rx_ring_size
;
3310 /* Allocate memory for RX descriptors */
3311 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
3312 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
3313 &rxq
->descs_phys
, GFP_KERNEL
);
3317 rxq
->last_desc
= rxq
->size
- 1;
3322 static void mvneta_rxq_hw_init(struct mvneta_port
*pp
,
3323 struct mvneta_rx_queue
*rxq
)
3325 /* Set Rx descriptors queue starting address */
3326 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
3327 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
3329 /* Set coalescing pkts and time */
3330 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
3331 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
3335 mvneta_rxq_offset_set(pp
, rxq
, 0);
3336 mvneta_rxq_buf_size_set(pp
, rxq
, PAGE_SIZE
< SZ_64K
?
3337 MVNETA_MAX_RX_BUF_SIZE
:
3338 MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
3339 mvneta_rxq_bm_disable(pp
, rxq
);
3340 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
3343 mvneta_rxq_offset_set(pp
, rxq
,
3344 NET_SKB_PAD
- pp
->rx_offset_correction
);
3346 mvneta_rxq_bm_enable(pp
, rxq
);
3347 /* Fill RXQ with buffers from RX pool */
3348 mvneta_rxq_long_pool_set(pp
, rxq
);
3349 mvneta_rxq_short_pool_set(pp
, rxq
);
3350 mvneta_rxq_non_occup_desc_add(pp
, rxq
, rxq
->size
);
3354 /* Create a specified RX queue */
3355 static int mvneta_rxq_init(struct mvneta_port
*pp
,
3356 struct mvneta_rx_queue
*rxq
)
3361 ret
= mvneta_rxq_sw_init(pp
, rxq
);
3365 mvneta_rxq_hw_init(pp
, rxq
);
3370 /* Cleanup Rx queue */
3371 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
3372 struct mvneta_rx_queue
*rxq
)
3374 mvneta_rxq_drop_pkts(pp
, rxq
);
3377 dma_free_coherent(pp
->dev
->dev
.parent
,
3378 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
3384 rxq
->next_desc_to_proc
= 0;
3385 rxq
->descs_phys
= 0;
3386 rxq
->first_to_refill
= 0;
3387 rxq
->refill_num
= 0;
3390 static int mvneta_txq_sw_init(struct mvneta_port
*pp
,
3391 struct mvneta_tx_queue
*txq
)
3395 txq
->size
= pp
->tx_ring_size
;
3397 /* A queue must always have room for at least one skb.
3398 * Therefore, stop the queue when the free entries reaches
3399 * the maximum number of descriptors per skb.
3401 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
3402 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
3404 /* Allocate memory for TX descriptors */
3405 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
3406 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
3407 &txq
->descs_phys
, GFP_KERNEL
);
3411 txq
->last_desc
= txq
->size
- 1;
3413 txq
->buf
= kmalloc_array(txq
->size
, sizeof(*txq
->buf
), GFP_KERNEL
);
3417 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3418 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
3419 txq
->size
* TSO_HEADER_SIZE
,
3420 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
3424 /* Setup XPS mapping */
3425 if (pp
->neta_armada3700
)
3427 else if (txq_number
> 1)
3428 cpu
= txq
->id
% num_present_cpus();
3430 cpu
= pp
->rxq_def
% num_present_cpus();
3431 cpumask_set_cpu(cpu
, &txq
->affinity_mask
);
3432 netif_set_xps_queue(pp
->dev
, &txq
->affinity_mask
, txq
->id
);
3437 static void mvneta_txq_hw_init(struct mvneta_port
*pp
,
3438 struct mvneta_tx_queue
*txq
)
3440 /* Set maximum bandwidth for enabled TXQs */
3441 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
3442 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
3444 /* Set Tx descriptors queue starting address */
3445 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
3446 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
3448 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
3451 /* Create and initialize a tx queue */
3452 static int mvneta_txq_init(struct mvneta_port
*pp
,
3453 struct mvneta_tx_queue
*txq
)
3457 ret
= mvneta_txq_sw_init(pp
, txq
);
3461 mvneta_txq_hw_init(pp
, txq
);
3466 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3467 static void mvneta_txq_sw_deinit(struct mvneta_port
*pp
,
3468 struct mvneta_tx_queue
*txq
)
3470 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
3475 dma_free_coherent(pp
->dev
->dev
.parent
,
3476 txq
->size
* TSO_HEADER_SIZE
,
3477 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
3479 dma_free_coherent(pp
->dev
->dev
.parent
,
3480 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
3481 txq
->descs
, txq
->descs_phys
);
3483 netdev_tx_reset_queue(nq
);
3487 txq
->next_desc_to_proc
= 0;
3488 txq
->descs_phys
= 0;
3491 static void mvneta_txq_hw_deinit(struct mvneta_port
*pp
,
3492 struct mvneta_tx_queue
*txq
)
3494 /* Set minimum bandwidth for disabled TXQs */
3495 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
3496 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
3498 /* Set Tx descriptors queue starting address and size */
3499 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
3500 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
3503 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
3504 struct mvneta_tx_queue
*txq
)
3506 mvneta_txq_sw_deinit(pp
, txq
);
3507 mvneta_txq_hw_deinit(pp
, txq
);
3510 /* Cleanup all Tx queues */
3511 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
3515 for (queue
= 0; queue
< txq_number
; queue
++)
3516 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
3519 /* Cleanup all Rx queues */
3520 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
3524 for (queue
= 0; queue
< rxq_number
; queue
++)
3525 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
3529 /* Init all Rx queues */
3530 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
3534 for (queue
= 0; queue
< rxq_number
; queue
++) {
3535 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
3538 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
3540 mvneta_cleanup_rxqs(pp
);
3548 /* Init all tx queues */
3549 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
3553 for (queue
= 0; queue
< txq_number
; queue
++) {
3554 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
3556 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
3558 mvneta_cleanup_txqs(pp
);
3566 static int mvneta_comphy_init(struct mvneta_port
*pp
, phy_interface_t interface
)
3570 ret
= phy_set_mode_ext(pp
->comphy
, PHY_MODE_ETHERNET
, interface
);
3574 return phy_power_on(pp
->comphy
);
3577 static int mvneta_config_interface(struct mvneta_port
*pp
,
3578 phy_interface_t interface
)
3583 if (interface
== PHY_INTERFACE_MODE_SGMII
||
3584 interface
== PHY_INTERFACE_MODE_1000BASEX
||
3585 interface
== PHY_INTERFACE_MODE_2500BASEX
) {
3586 ret
= mvneta_comphy_init(pp
, interface
);
3589 switch (interface
) {
3590 case PHY_INTERFACE_MODE_QSGMII
:
3591 mvreg_write(pp
, MVNETA_SERDES_CFG
,
3592 MVNETA_QSGMII_SERDES_PROTO
);
3595 case PHY_INTERFACE_MODE_SGMII
:
3596 case PHY_INTERFACE_MODE_1000BASEX
:
3597 mvreg_write(pp
, MVNETA_SERDES_CFG
,
3598 MVNETA_SGMII_SERDES_PROTO
);
3601 case PHY_INTERFACE_MODE_2500BASEX
:
3602 mvreg_write(pp
, MVNETA_SERDES_CFG
,
3603 MVNETA_HSGMII_SERDES_PROTO
);
3610 pp
->phy_interface
= interface
;
3615 static void mvneta_start_dev(struct mvneta_port
*pp
)
3619 WARN_ON(mvneta_config_interface(pp
, pp
->phy_interface
));
3621 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
3622 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
3624 /* start the Rx/Tx activity */
3625 mvneta_port_enable(pp
);
3627 if (!pp
->neta_armada3700
) {
3628 /* Enable polling on the port */
3629 for_each_online_cpu(cpu
) {
3630 struct mvneta_pcpu_port
*port
=
3631 per_cpu_ptr(pp
->ports
, cpu
);
3633 napi_enable(&port
->napi
);
3636 napi_enable(&pp
->napi
);
3639 /* Unmask interrupts. It has to be done from each CPU */
3640 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3642 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3643 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3644 MVNETA_CAUSE_LINK_CHANGE
);
3646 phylink_start(pp
->phylink
);
3648 /* We may have called phylink_speed_down before */
3649 phylink_speed_up(pp
->phylink
);
3651 netif_tx_start_all_queues(pp
->dev
);
3653 clear_bit(__MVNETA_DOWN
, &pp
->state
);
3656 static void mvneta_stop_dev(struct mvneta_port
*pp
)
3660 set_bit(__MVNETA_DOWN
, &pp
->state
);
3662 if (device_may_wakeup(&pp
->dev
->dev
))
3663 phylink_speed_down(pp
->phylink
, false);
3665 phylink_stop(pp
->phylink
);
3667 if (!pp
->neta_armada3700
) {
3668 for_each_online_cpu(cpu
) {
3669 struct mvneta_pcpu_port
*port
=
3670 per_cpu_ptr(pp
->ports
, cpu
);
3672 napi_disable(&port
->napi
);
3675 napi_disable(&pp
->napi
);
3678 netif_carrier_off(pp
->dev
);
3680 mvneta_port_down(pp
);
3681 netif_tx_stop_all_queues(pp
->dev
);
3683 /* Stop the port activity */
3684 mvneta_port_disable(pp
);
3686 /* Clear all ethernet port interrupts */
3687 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
3689 /* Mask all ethernet port interrupts */
3690 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3692 mvneta_tx_reset(pp
);
3693 mvneta_rx_reset(pp
);
3695 WARN_ON(phy_power_off(pp
->comphy
));
3698 static void mvneta_percpu_enable(void *arg
)
3700 struct mvneta_port
*pp
= arg
;
3702 enable_percpu_irq(pp
->dev
->irq
, IRQ_TYPE_NONE
);
3705 static void mvneta_percpu_disable(void *arg
)
3707 struct mvneta_port
*pp
= arg
;
3709 disable_percpu_irq(pp
->dev
->irq
);
3712 /* Change the device mtu */
3713 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
3715 struct mvneta_port
*pp
= netdev_priv(dev
);
3718 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
3719 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
3720 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
3721 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
3724 if (pp
->xdp_prog
&& mtu
> MVNETA_MAX_RX_BUF_SIZE
) {
3725 netdev_info(dev
, "Illegal MTU value %d for XDP mode\n", mtu
);
3731 if (!netif_running(dev
)) {
3733 mvneta_bm_update_mtu(pp
, mtu
);
3735 netdev_update_features(dev
);
3739 /* The interface is running, so we have to force a
3740 * reallocation of the queues
3742 mvneta_stop_dev(pp
);
3743 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3745 mvneta_cleanup_txqs(pp
);
3746 mvneta_cleanup_rxqs(pp
);
3749 mvneta_bm_update_mtu(pp
, mtu
);
3751 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
3753 ret
= mvneta_setup_rxqs(pp
);
3755 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
3759 ret
= mvneta_setup_txqs(pp
);
3761 netdev_err(dev
, "unable to setup txqs after MTU change\n");
3765 on_each_cpu(mvneta_percpu_enable
, pp
, true);
3766 mvneta_start_dev(pp
);
3768 netdev_update_features(dev
);
3773 static netdev_features_t
mvneta_fix_features(struct net_device
*dev
,
3774 netdev_features_t features
)
3776 struct mvneta_port
*pp
= netdev_priv(dev
);
3778 if (pp
->tx_csum_limit
&& dev
->mtu
> pp
->tx_csum_limit
) {
3779 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
3781 "Disable IP checksum for MTU greater than %dB\n",
3788 /* Get mac address */
3789 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
3791 u32 mac_addr_l
, mac_addr_h
;
3793 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
3794 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
3795 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3796 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3797 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3798 addr
[3] = mac_addr_h
& 0xFF;
3799 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
3800 addr
[5] = mac_addr_l
& 0xFF;
3803 /* Handle setting mac address */
3804 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
3806 struct mvneta_port
*pp
= netdev_priv(dev
);
3807 struct sockaddr
*sockaddr
= addr
;
3810 ret
= eth_prepare_mac_addr_change(dev
, addr
);
3813 /* Remove previous address table entry */
3814 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
3816 /* Set new addr in hw */
3817 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, pp
->rxq_def
);
3819 eth_commit_mac_addr_change(dev
, addr
);
3823 static void mvneta_validate(struct phylink_config
*config
,
3824 unsigned long *supported
,
3825 struct phylink_link_state
*state
)
3827 struct net_device
*ndev
= to_net_dev(config
->dev
);
3828 struct mvneta_port
*pp
= netdev_priv(ndev
);
3829 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
3831 /* We only support QSGMII, SGMII, 802.3z and RGMII modes.
3832 * When in 802.3z mode, we must have AN enabled:
3833 * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
3834 * When <PortType> = 1 (1000BASE-X) this field must be set to 1."
3836 if (phy_interface_mode_is_8023z(state
->interface
)) {
3837 if (!phylink_test(state
->advertising
, Autoneg
)) {
3838 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
3841 } else if (state
->interface
!= PHY_INTERFACE_MODE_NA
&&
3842 state
->interface
!= PHY_INTERFACE_MODE_QSGMII
&&
3843 state
->interface
!= PHY_INTERFACE_MODE_SGMII
&&
3844 !phy_interface_mode_is_rgmii(state
->interface
)) {
3845 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
3849 /* Allow all the expected bits */
3850 phylink_set(mask
, Autoneg
);
3851 phylink_set_port_modes(mask
);
3853 /* Asymmetric pause is unsupported */
3854 phylink_set(mask
, Pause
);
3856 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3857 if (pp
->comphy
|| state
->interface
!= PHY_INTERFACE_MODE_2500BASEX
) {
3858 phylink_set(mask
, 1000baseT_Full
);
3859 phylink_set(mask
, 1000baseX_Full
);
3861 if (pp
->comphy
|| state
->interface
== PHY_INTERFACE_MODE_2500BASEX
) {
3862 phylink_set(mask
, 2500baseT_Full
);
3863 phylink_set(mask
, 2500baseX_Full
);
3866 if (!phy_interface_mode_is_8023z(state
->interface
)) {
3867 /* 10M and 100M are only supported in non-802.3z mode */
3868 phylink_set(mask
, 10baseT_Half
);
3869 phylink_set(mask
, 10baseT_Full
);
3870 phylink_set(mask
, 100baseT_Half
);
3871 phylink_set(mask
, 100baseT_Full
);
3874 bitmap_and(supported
, supported
, mask
,
3875 __ETHTOOL_LINK_MODE_MASK_NBITS
);
3876 bitmap_and(state
->advertising
, state
->advertising
, mask
,
3877 __ETHTOOL_LINK_MODE_MASK_NBITS
);
3879 /* We can only operate at 2500BaseX or 1000BaseX. If requested
3880 * to advertise both, only report advertising at 2500BaseX.
3882 phylink_helper_basex_speed(state
);
3885 static void mvneta_mac_pcs_get_state(struct phylink_config
*config
,
3886 struct phylink_link_state
*state
)
3888 struct net_device
*ndev
= to_net_dev(config
->dev
);
3889 struct mvneta_port
*pp
= netdev_priv(ndev
);
3892 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
3894 if (gmac_stat
& MVNETA_GMAC_SPEED_1000
)
3896 state
->interface
== PHY_INTERFACE_MODE_2500BASEX
?
3897 SPEED_2500
: SPEED_1000
;
3898 else if (gmac_stat
& MVNETA_GMAC_SPEED_100
)
3899 state
->speed
= SPEED_100
;
3901 state
->speed
= SPEED_10
;
3903 state
->an_complete
= !!(gmac_stat
& MVNETA_GMAC_AN_COMPLETE
);
3904 state
->link
= !!(gmac_stat
& MVNETA_GMAC_LINK_UP
);
3905 state
->duplex
= !!(gmac_stat
& MVNETA_GMAC_FULL_DUPLEX
);
3908 if (gmac_stat
& MVNETA_GMAC_RX_FLOW_CTRL_ENABLE
)
3909 state
->pause
|= MLO_PAUSE_RX
;
3910 if (gmac_stat
& MVNETA_GMAC_TX_FLOW_CTRL_ENABLE
)
3911 state
->pause
|= MLO_PAUSE_TX
;
3914 static void mvneta_mac_an_restart(struct phylink_config
*config
)
3916 struct net_device
*ndev
= to_net_dev(config
->dev
);
3917 struct mvneta_port
*pp
= netdev_priv(ndev
);
3918 u32 gmac_an
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3920 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3921 gmac_an
| MVNETA_GMAC_INBAND_RESTART_AN
);
3922 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3923 gmac_an
& ~MVNETA_GMAC_INBAND_RESTART_AN
);
3926 static void mvneta_mac_config(struct phylink_config
*config
, unsigned int mode
,
3927 const struct phylink_link_state
*state
)
3929 struct net_device
*ndev
= to_net_dev(config
->dev
);
3930 struct mvneta_port
*pp
= netdev_priv(ndev
);
3931 u32 new_ctrl0
, gmac_ctrl0
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
3932 u32 new_ctrl2
, gmac_ctrl2
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
3933 u32 new_ctrl4
, gmac_ctrl4
= mvreg_read(pp
, MVNETA_GMAC_CTRL_4
);
3934 u32 new_clk
, gmac_clk
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
3935 u32 new_an
, gmac_an
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3937 new_ctrl0
= gmac_ctrl0
& ~MVNETA_GMAC0_PORT_1000BASE_X
;
3938 new_ctrl2
= gmac_ctrl2
& ~(MVNETA_GMAC2_INBAND_AN_ENABLE
|
3939 MVNETA_GMAC2_PORT_RESET
);
3940 new_ctrl4
= gmac_ctrl4
& ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE
);
3941 new_clk
= gmac_clk
& ~MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3942 new_an
= gmac_an
& ~(MVNETA_GMAC_INBAND_AN_ENABLE
|
3943 MVNETA_GMAC_INBAND_RESTART_AN
|
3944 MVNETA_GMAC_AN_SPEED_EN
|
3945 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
|
3946 MVNETA_GMAC_AN_FLOW_CTRL_EN
|
3947 MVNETA_GMAC_AN_DUPLEX_EN
);
3949 /* Even though it might look weird, when we're configured in
3950 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3952 new_ctrl2
|= MVNETA_GMAC2_PORT_RGMII
;
3954 if (state
->interface
== PHY_INTERFACE_MODE_QSGMII
||
3955 state
->interface
== PHY_INTERFACE_MODE_SGMII
||
3956 phy_interface_mode_is_8023z(state
->interface
))
3957 new_ctrl2
|= MVNETA_GMAC2_PCS_ENABLE
;
3959 if (phylink_test(state
->advertising
, Pause
))
3960 new_an
|= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
;
3962 if (!phylink_autoneg_inband(mode
)) {
3963 /* Phy or fixed speed - nothing to do, leave the
3964 * configured speed, duplex and flow control as-is.
3966 } else if (state
->interface
== PHY_INTERFACE_MODE_SGMII
) {
3967 /* SGMII mode receives the state from the PHY */
3968 new_ctrl2
|= MVNETA_GMAC2_INBAND_AN_ENABLE
;
3969 new_clk
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3970 new_an
= (new_an
& ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
3971 MVNETA_GMAC_FORCE_LINK_PASS
|
3972 MVNETA_GMAC_CONFIG_MII_SPEED
|
3973 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3974 MVNETA_GMAC_CONFIG_FULL_DUPLEX
)) |
3975 MVNETA_GMAC_INBAND_AN_ENABLE
|
3976 MVNETA_GMAC_AN_SPEED_EN
|
3977 MVNETA_GMAC_AN_DUPLEX_EN
;
3979 /* 802.3z negotiation - only 1000base-X */
3980 new_ctrl0
|= MVNETA_GMAC0_PORT_1000BASE_X
;
3981 new_clk
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3982 new_an
= (new_an
& ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
3983 MVNETA_GMAC_FORCE_LINK_PASS
|
3984 MVNETA_GMAC_CONFIG_MII_SPEED
)) |
3985 MVNETA_GMAC_INBAND_AN_ENABLE
|
3986 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3987 /* The MAC only supports FD mode */
3988 MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3990 if (state
->pause
& MLO_PAUSE_AN
&& state
->an_enabled
)
3991 new_an
|= MVNETA_GMAC_AN_FLOW_CTRL_EN
;
3994 /* Armada 370 documentation says we can only change the port mode
3995 * and in-band enable when the link is down, so force it down
3996 * while making these changes. We also do this for GMAC_CTRL2
3998 if ((new_ctrl0
^ gmac_ctrl0
) & MVNETA_GMAC0_PORT_1000BASE_X
||
3999 (new_ctrl2
^ gmac_ctrl2
) & MVNETA_GMAC2_INBAND_AN_ENABLE
||
4000 (new_an
^ gmac_an
) & MVNETA_GMAC_INBAND_AN_ENABLE
) {
4001 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
4002 (gmac_an
& ~MVNETA_GMAC_FORCE_LINK_PASS
) |
4003 MVNETA_GMAC_FORCE_LINK_DOWN
);
4007 /* When at 2.5G, the link partner can send frames with shortened
4010 if (state
->interface
== PHY_INTERFACE_MODE_2500BASEX
)
4011 new_ctrl4
|= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE
;
4013 if (pp
->phy_interface
!= state
->interface
) {
4015 WARN_ON(phy_power_off(pp
->comphy
));
4016 WARN_ON(mvneta_config_interface(pp
, state
->interface
));
4019 if (new_ctrl0
!= gmac_ctrl0
)
4020 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, new_ctrl0
);
4021 if (new_ctrl2
!= gmac_ctrl2
)
4022 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, new_ctrl2
);
4023 if (new_ctrl4
!= gmac_ctrl4
)
4024 mvreg_write(pp
, MVNETA_GMAC_CTRL_4
, new_ctrl4
);
4025 if (new_clk
!= gmac_clk
)
4026 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, new_clk
);
4027 if (new_an
!= gmac_an
)
4028 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, new_an
);
4030 if (gmac_ctrl2
& MVNETA_GMAC2_PORT_RESET
) {
4031 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
4032 MVNETA_GMAC2_PORT_RESET
) != 0)
4037 static void mvneta_set_eee(struct mvneta_port
*pp
, bool enable
)
4041 lpi_ctl1
= mvreg_read(pp
, MVNETA_LPI_CTRL_1
);
4043 lpi_ctl1
|= MVNETA_LPI_REQUEST_ENABLE
;
4045 lpi_ctl1
&= ~MVNETA_LPI_REQUEST_ENABLE
;
4046 mvreg_write(pp
, MVNETA_LPI_CTRL_1
, lpi_ctl1
);
4049 static void mvneta_mac_link_down(struct phylink_config
*config
,
4050 unsigned int mode
, phy_interface_t interface
)
4052 struct net_device
*ndev
= to_net_dev(config
->dev
);
4053 struct mvneta_port
*pp
= netdev_priv(ndev
);
4056 mvneta_port_down(pp
);
4058 if (!phylink_autoneg_inband(mode
)) {
4059 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
4060 val
&= ~MVNETA_GMAC_FORCE_LINK_PASS
;
4061 val
|= MVNETA_GMAC_FORCE_LINK_DOWN
;
4062 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
4065 pp
->eee_active
= false;
4066 mvneta_set_eee(pp
, false);
4069 static void mvneta_mac_link_up(struct phylink_config
*config
,
4070 struct phy_device
*phy
,
4071 unsigned int mode
, phy_interface_t interface
,
4072 int speed
, int duplex
,
4073 bool tx_pause
, bool rx_pause
)
4075 struct net_device
*ndev
= to_net_dev(config
->dev
);
4076 struct mvneta_port
*pp
= netdev_priv(ndev
);
4079 if (!phylink_autoneg_inband(mode
)) {
4080 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
4081 val
&= ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
4082 MVNETA_GMAC_CONFIG_MII_SPEED
|
4083 MVNETA_GMAC_CONFIG_GMII_SPEED
|
4084 MVNETA_GMAC_CONFIG_FLOW_CTRL
|
4085 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
4086 val
|= MVNETA_GMAC_FORCE_LINK_PASS
;
4088 if (speed
== SPEED_1000
|| speed
== SPEED_2500
)
4089 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
4090 else if (speed
== SPEED_100
)
4091 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
4093 if (duplex
== DUPLEX_FULL
)
4094 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
4096 if (tx_pause
|| rx_pause
)
4097 val
|= MVNETA_GMAC_CONFIG_FLOW_CTRL
;
4099 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
4101 /* When inband doesn't cover flow control or flow control is
4102 * disabled, we need to manually configure it. This bit will
4103 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
4105 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
4106 val
&= ~MVNETA_GMAC_CONFIG_FLOW_CTRL
;
4108 if (tx_pause
|| rx_pause
)
4109 val
|= MVNETA_GMAC_CONFIG_FLOW_CTRL
;
4111 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
4116 if (phy
&& pp
->eee_enabled
) {
4117 pp
->eee_active
= phy_init_eee(phy
, 0) >= 0;
4118 mvneta_set_eee(pp
, pp
->eee_active
&& pp
->tx_lpi_enabled
);
4122 static const struct phylink_mac_ops mvneta_phylink_ops
= {
4123 .validate
= mvneta_validate
,
4124 .mac_pcs_get_state
= mvneta_mac_pcs_get_state
,
4125 .mac_an_restart
= mvneta_mac_an_restart
,
4126 .mac_config
= mvneta_mac_config
,
4127 .mac_link_down
= mvneta_mac_link_down
,
4128 .mac_link_up
= mvneta_mac_link_up
,
4131 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
4133 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
4134 int err
= phylink_of_phy_connect(pp
->phylink
, pp
->dn
, 0);
4137 netdev_err(pp
->dev
, "could not attach PHY: %d\n", err
);
4139 phylink_ethtool_get_wol(pp
->phylink
, &wol
);
4140 device_set_wakeup_capable(&pp
->dev
->dev
, !!wol
.supported
);
4142 /* PHY WoL may be enabled but device wakeup disabled */
4144 device_set_wakeup_enable(&pp
->dev
->dev
, !!wol
.wolopts
);
4149 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
4151 phylink_disconnect_phy(pp
->phylink
);
4154 /* Electing a CPU must be done in an atomic way: it should be done
4155 * after or before the removal/insertion of a CPU and this function is
4158 static void mvneta_percpu_elect(struct mvneta_port
*pp
)
4160 int elected_cpu
= 0, max_cpu
, cpu
, i
= 0;
4162 /* Use the cpu associated to the rxq when it is online, in all
4163 * the other cases, use the cpu 0 which can't be offline.
4165 if (cpu_online(pp
->rxq_def
))
4166 elected_cpu
= pp
->rxq_def
;
4168 max_cpu
= num_present_cpus();
4170 for_each_online_cpu(cpu
) {
4171 int rxq_map
= 0, txq_map
= 0;
4174 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
4175 if ((rxq
% max_cpu
) == cpu
)
4176 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
4178 if (cpu
== elected_cpu
)
4179 /* Map the default receive queue to the elected CPU */
4180 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(pp
->rxq_def
);
4182 /* We update the TX queue map only if we have one
4183 * queue. In this case we associate the TX queue to
4184 * the CPU bound to the default RX queue
4186 if (txq_number
== 1)
4187 txq_map
= (cpu
== elected_cpu
) ?
4188 MVNETA_CPU_TXQ_ACCESS(1) : 0;
4190 txq_map
= mvreg_read(pp
, MVNETA_CPU_MAP(cpu
)) &
4191 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
4193 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
4195 /* Update the interrupt mask on each CPU according the
4198 smp_call_function_single(cpu
, mvneta_percpu_unmask_interrupt
,
4205 static int mvneta_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
4208 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
4210 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
4212 /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
4213 * are routed to CPU 0, so we don't need all the cpu-hotplug support
4215 if (pp
->neta_armada3700
)
4218 spin_lock(&pp
->lock
);
4220 * Configuring the driver for a new CPU while the driver is
4221 * stopping is racy, so just avoid it.
4223 if (pp
->is_stopped
) {
4224 spin_unlock(&pp
->lock
);
4227 netif_tx_stop_all_queues(pp
->dev
);
4230 * We have to synchronise on tha napi of each CPU except the one
4231 * just being woken up
4233 for_each_online_cpu(other_cpu
) {
4234 if (other_cpu
!= cpu
) {
4235 struct mvneta_pcpu_port
*other_port
=
4236 per_cpu_ptr(pp
->ports
, other_cpu
);
4238 napi_synchronize(&other_port
->napi
);
4242 /* Mask all ethernet port interrupts */
4243 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
4244 napi_enable(&port
->napi
);
4247 * Enable per-CPU interrupts on the CPU that is
4250 mvneta_percpu_enable(pp
);
4253 * Enable per-CPU interrupt on the one CPU we care
4256 mvneta_percpu_elect(pp
);
4258 /* Unmask all ethernet port interrupts */
4259 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
4260 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
4261 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
4262 MVNETA_CAUSE_LINK_CHANGE
);
4263 netif_tx_start_all_queues(pp
->dev
);
4264 spin_unlock(&pp
->lock
);
4268 static int mvneta_cpu_down_prepare(unsigned int cpu
, struct hlist_node
*node
)
4270 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
4272 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
4275 * Thanks to this lock we are sure that any pending cpu election is
4278 spin_lock(&pp
->lock
);
4279 /* Mask all ethernet port interrupts */
4280 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
4281 spin_unlock(&pp
->lock
);
4283 napi_synchronize(&port
->napi
);
4284 napi_disable(&port
->napi
);
4285 /* Disable per-CPU interrupts on the CPU that is brought down. */
4286 mvneta_percpu_disable(pp
);
4290 static int mvneta_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
4292 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
4295 /* Check if a new CPU must be elected now this on is down */
4296 spin_lock(&pp
->lock
);
4297 mvneta_percpu_elect(pp
);
4298 spin_unlock(&pp
->lock
);
4299 /* Unmask all ethernet port interrupts */
4300 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
4301 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
4302 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
4303 MVNETA_CAUSE_LINK_CHANGE
);
4304 netif_tx_start_all_queues(pp
->dev
);
4308 static int mvneta_open(struct net_device
*dev
)
4310 struct mvneta_port
*pp
= netdev_priv(dev
);
4313 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
4315 ret
= mvneta_setup_rxqs(pp
);
4319 ret
= mvneta_setup_txqs(pp
);
4321 goto err_cleanup_rxqs
;
4323 /* Connect to port interrupt line */
4324 if (pp
->neta_armada3700
)
4325 ret
= request_irq(pp
->dev
->irq
, mvneta_isr
, 0,
4328 ret
= request_percpu_irq(pp
->dev
->irq
, mvneta_percpu_isr
,
4329 dev
->name
, pp
->ports
);
4331 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
4332 goto err_cleanup_txqs
;
4335 if (!pp
->neta_armada3700
) {
4336 /* Enable per-CPU interrupt on all the CPU to handle our RX
4339 on_each_cpu(mvneta_percpu_enable
, pp
, true);
4341 pp
->is_stopped
= false;
4342 /* Register a CPU notifier to handle the case where our CPU
4343 * might be taken offline.
4345 ret
= cpuhp_state_add_instance_nocalls(online_hpstate
,
4350 ret
= cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
4353 goto err_free_online_hp
;
4356 ret
= mvneta_mdio_probe(pp
);
4358 netdev_err(dev
, "cannot probe MDIO bus\n");
4359 goto err_free_dead_hp
;
4362 mvneta_start_dev(pp
);
4367 if (!pp
->neta_armada3700
)
4368 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
4371 if (!pp
->neta_armada3700
)
4372 cpuhp_state_remove_instance_nocalls(online_hpstate
,
4375 if (pp
->neta_armada3700
) {
4376 free_irq(pp
->dev
->irq
, pp
);
4378 on_each_cpu(mvneta_percpu_disable
, pp
, true);
4379 free_percpu_irq(pp
->dev
->irq
, pp
->ports
);
4382 mvneta_cleanup_txqs(pp
);
4384 mvneta_cleanup_rxqs(pp
);
4388 /* Stop the port, free port interrupt line */
4389 static int mvneta_stop(struct net_device
*dev
)
4391 struct mvneta_port
*pp
= netdev_priv(dev
);
4393 if (!pp
->neta_armada3700
) {
4394 /* Inform that we are stopping so we don't want to setup the
4395 * driver for new CPUs in the notifiers. The code of the
4396 * notifier for CPU online is protected by the same spinlock,
4397 * so when we get the lock, the notifer work is done.
4399 spin_lock(&pp
->lock
);
4400 pp
->is_stopped
= true;
4401 spin_unlock(&pp
->lock
);
4403 mvneta_stop_dev(pp
);
4404 mvneta_mdio_remove(pp
);
4406 cpuhp_state_remove_instance_nocalls(online_hpstate
,
4408 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
4410 on_each_cpu(mvneta_percpu_disable
, pp
, true);
4411 free_percpu_irq(dev
->irq
, pp
->ports
);
4413 mvneta_stop_dev(pp
);
4414 mvneta_mdio_remove(pp
);
4415 free_irq(dev
->irq
, pp
);
4418 mvneta_cleanup_rxqs(pp
);
4419 mvneta_cleanup_txqs(pp
);
4424 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
4426 struct mvneta_port
*pp
= netdev_priv(dev
);
4428 return phylink_mii_ioctl(pp
->phylink
, ifr
, cmd
);
4431 static int mvneta_xdp_setup(struct net_device
*dev
, struct bpf_prog
*prog
,
4432 struct netlink_ext_ack
*extack
)
4434 bool need_update
, running
= netif_running(dev
);
4435 struct mvneta_port
*pp
= netdev_priv(dev
);
4436 struct bpf_prog
*old_prog
;
4438 if (prog
&& dev
->mtu
> MVNETA_MAX_RX_BUF_SIZE
) {
4439 NL_SET_ERR_MSG_MOD(extack
, "MTU too large for XDP");
4444 NL_SET_ERR_MSG_MOD(extack
,
4445 "Hardware Buffer Management not supported on XDP");
4449 need_update
= !!pp
->xdp_prog
!= !!prog
;
4450 if (running
&& need_update
)
4453 old_prog
= xchg(&pp
->xdp_prog
, prog
);
4455 bpf_prog_put(old_prog
);
4457 if (running
&& need_update
)
4458 return mvneta_open(dev
);
4463 static int mvneta_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
)
4465 switch (xdp
->command
) {
4466 case XDP_SETUP_PROG
:
4467 return mvneta_xdp_setup(dev
, xdp
->prog
, xdp
->extack
);
4473 /* Ethtool methods */
4475 /* Set link ksettings (phy address, speed) for ethtools */
4477 mvneta_ethtool_set_link_ksettings(struct net_device
*ndev
,
4478 const struct ethtool_link_ksettings
*cmd
)
4480 struct mvneta_port
*pp
= netdev_priv(ndev
);
4482 return phylink_ethtool_ksettings_set(pp
->phylink
, cmd
);
4485 /* Get link ksettings for ethtools */
4487 mvneta_ethtool_get_link_ksettings(struct net_device
*ndev
,
4488 struct ethtool_link_ksettings
*cmd
)
4490 struct mvneta_port
*pp
= netdev_priv(ndev
);
4492 return phylink_ethtool_ksettings_get(pp
->phylink
, cmd
);
4495 static int mvneta_ethtool_nway_reset(struct net_device
*dev
)
4497 struct mvneta_port
*pp
= netdev_priv(dev
);
4499 return phylink_ethtool_nway_reset(pp
->phylink
);
4502 /* Set interrupt coalescing for ethtools */
4504 mvneta_ethtool_set_coalesce(struct net_device
*dev
,
4505 struct ethtool_coalesce
*c
,
4506 struct kernel_ethtool_coalesce
*kernel_coal
,
4507 struct netlink_ext_ack
*extack
)
4509 struct mvneta_port
*pp
= netdev_priv(dev
);
4512 for (queue
= 0; queue
< rxq_number
; queue
++) {
4513 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
4514 rxq
->time_coal
= c
->rx_coalesce_usecs
;
4515 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
4516 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
4517 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
4520 for (queue
= 0; queue
< txq_number
; queue
++) {
4521 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
4522 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
4523 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
4529 /* get coalescing for ethtools */
4531 mvneta_ethtool_get_coalesce(struct net_device
*dev
,
4532 struct ethtool_coalesce
*c
,
4533 struct kernel_ethtool_coalesce
*kernel_coal
,
4534 struct netlink_ext_ack
*extack
)
4536 struct mvneta_port
*pp
= netdev_priv(dev
);
4538 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
4539 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
4541 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
4546 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
4547 struct ethtool_drvinfo
*drvinfo
)
4549 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
4550 sizeof(drvinfo
->driver
));
4551 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
4552 sizeof(drvinfo
->version
));
4553 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
4554 sizeof(drvinfo
->bus_info
));
4558 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
4559 struct ethtool_ringparam
*ring
)
4561 struct mvneta_port
*pp
= netdev_priv(netdev
);
4563 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
4564 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
4565 ring
->rx_pending
= pp
->rx_ring_size
;
4566 ring
->tx_pending
= pp
->tx_ring_size
;
4569 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
4570 struct ethtool_ringparam
*ring
)
4572 struct mvneta_port
*pp
= netdev_priv(dev
);
4574 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
4576 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
4577 ring
->rx_pending
: MVNETA_MAX_RXD
;
4579 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
4580 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
4581 if (pp
->tx_ring_size
!= ring
->tx_pending
)
4582 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
4583 pp
->tx_ring_size
, ring
->tx_pending
);
4585 if (netif_running(dev
)) {
4587 if (mvneta_open(dev
)) {
4589 "error on opening device after ring param change\n");
4597 static void mvneta_ethtool_get_pauseparam(struct net_device
*dev
,
4598 struct ethtool_pauseparam
*pause
)
4600 struct mvneta_port
*pp
= netdev_priv(dev
);
4602 phylink_ethtool_get_pauseparam(pp
->phylink
, pause
);
4605 static int mvneta_ethtool_set_pauseparam(struct net_device
*dev
,
4606 struct ethtool_pauseparam
*pause
)
4608 struct mvneta_port
*pp
= netdev_priv(dev
);
4610 return phylink_ethtool_set_pauseparam(pp
->phylink
, pause
);
4613 static void mvneta_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
4616 if (sset
== ETH_SS_STATS
) {
4619 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
4620 memcpy(data
+ i
* ETH_GSTRING_LEN
,
4621 mvneta_statistics
[i
].name
, ETH_GSTRING_LEN
);
4626 mvneta_ethtool_update_pcpu_stats(struct mvneta_port
*pp
,
4627 struct mvneta_ethtool_stats
*es
)
4632 for_each_possible_cpu(cpu
) {
4633 struct mvneta_pcpu_stats
*stats
;
4634 u64 skb_alloc_error
;
4644 stats
= per_cpu_ptr(pp
->stats
, cpu
);
4646 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
4647 skb_alloc_error
= stats
->es
.skb_alloc_error
;
4648 refill_error
= stats
->es
.refill_error
;
4649 xdp_redirect
= stats
->es
.ps
.xdp_redirect
;
4650 xdp_pass
= stats
->es
.ps
.xdp_pass
;
4651 xdp_drop
= stats
->es
.ps
.xdp_drop
;
4652 xdp_xmit
= stats
->es
.ps
.xdp_xmit
;
4653 xdp_xmit_err
= stats
->es
.ps
.xdp_xmit_err
;
4654 xdp_tx
= stats
->es
.ps
.xdp_tx
;
4655 xdp_tx_err
= stats
->es
.ps
.xdp_tx_err
;
4656 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
4658 es
->skb_alloc_error
+= skb_alloc_error
;
4659 es
->refill_error
+= refill_error
;
4660 es
->ps
.xdp_redirect
+= xdp_redirect
;
4661 es
->ps
.xdp_pass
+= xdp_pass
;
4662 es
->ps
.xdp_drop
+= xdp_drop
;
4663 es
->ps
.xdp_xmit
+= xdp_xmit
;
4664 es
->ps
.xdp_xmit_err
+= xdp_xmit_err
;
4665 es
->ps
.xdp_tx
+= xdp_tx
;
4666 es
->ps
.xdp_tx_err
+= xdp_tx_err
;
4670 static void mvneta_ethtool_update_stats(struct mvneta_port
*pp
)
4672 struct mvneta_ethtool_stats stats
= {};
4673 const struct mvneta_statistic
*s
;
4674 void __iomem
*base
= pp
->base
;
4679 mvneta_ethtool_update_pcpu_stats(pp
, &stats
);
4680 for (i
= 0, s
= mvneta_statistics
;
4681 s
< mvneta_statistics
+ ARRAY_SIZE(mvneta_statistics
);
4685 val
= readl_relaxed(base
+ s
->offset
);
4686 pp
->ethtool_stats
[i
] += val
;
4689 /* Docs say to read low 32-bit then high */
4690 low
= readl_relaxed(base
+ s
->offset
);
4691 high
= readl_relaxed(base
+ s
->offset
+ 4);
4692 val
= (u64
)high
<< 32 | low
;
4693 pp
->ethtool_stats
[i
] += val
;
4696 switch (s
->offset
) {
4697 case ETHTOOL_STAT_EEE_WAKEUP
:
4698 val
= phylink_get_eee_err(pp
->phylink
);
4699 pp
->ethtool_stats
[i
] += val
;
4701 case ETHTOOL_STAT_SKB_ALLOC_ERR
:
4702 pp
->ethtool_stats
[i
] = stats
.skb_alloc_error
;
4704 case ETHTOOL_STAT_REFILL_ERR
:
4705 pp
->ethtool_stats
[i
] = stats
.refill_error
;
4707 case ETHTOOL_XDP_REDIRECT
:
4708 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_redirect
;
4710 case ETHTOOL_XDP_PASS
:
4711 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_pass
;
4713 case ETHTOOL_XDP_DROP
:
4714 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_drop
;
4716 case ETHTOOL_XDP_TX
:
4717 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_tx
;
4719 case ETHTOOL_XDP_TX_ERR
:
4720 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_tx_err
;
4722 case ETHTOOL_XDP_XMIT
:
4723 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_xmit
;
4725 case ETHTOOL_XDP_XMIT_ERR
:
4726 pp
->ethtool_stats
[i
] = stats
.ps
.xdp_xmit_err
;
4734 static void mvneta_ethtool_get_stats(struct net_device
*dev
,
4735 struct ethtool_stats
*stats
, u64
*data
)
4737 struct mvneta_port
*pp
= netdev_priv(dev
);
4740 mvneta_ethtool_update_stats(pp
);
4742 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
4743 *data
++ = pp
->ethtool_stats
[i
];
4746 static int mvneta_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
4748 if (sset
== ETH_SS_STATS
)
4749 return ARRAY_SIZE(mvneta_statistics
);
4753 static u32
mvneta_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
4755 return MVNETA_RSS_LU_TABLE_SIZE
;
4758 static int mvneta_ethtool_get_rxnfc(struct net_device
*dev
,
4759 struct ethtool_rxnfc
*info
,
4760 u32
*rules __always_unused
)
4762 switch (info
->cmd
) {
4763 case ETHTOOL_GRXRINGS
:
4764 info
->data
= rxq_number
;
4773 static int mvneta_config_rss(struct mvneta_port
*pp
)
4778 netif_tx_stop_all_queues(pp
->dev
);
4780 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
4782 if (!pp
->neta_armada3700
) {
4783 /* We have to synchronise on the napi of each CPU */
4784 for_each_online_cpu(cpu
) {
4785 struct mvneta_pcpu_port
*pcpu_port
=
4786 per_cpu_ptr(pp
->ports
, cpu
);
4788 napi_synchronize(&pcpu_port
->napi
);
4789 napi_disable(&pcpu_port
->napi
);
4792 napi_synchronize(&pp
->napi
);
4793 napi_disable(&pp
->napi
);
4796 pp
->rxq_def
= pp
->indir
[0];
4798 /* Update unicast mapping */
4799 mvneta_set_rx_mode(pp
->dev
);
4801 /* Update val of portCfg register accordingly with all RxQueue types */
4802 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
4803 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
4805 /* Update the elected CPU matching the new rxq_def */
4806 spin_lock(&pp
->lock
);
4807 mvneta_percpu_elect(pp
);
4808 spin_unlock(&pp
->lock
);
4810 if (!pp
->neta_armada3700
) {
4811 /* We have to synchronise on the napi of each CPU */
4812 for_each_online_cpu(cpu
) {
4813 struct mvneta_pcpu_port
*pcpu_port
=
4814 per_cpu_ptr(pp
->ports
, cpu
);
4816 napi_enable(&pcpu_port
->napi
);
4819 napi_enable(&pp
->napi
);
4822 netif_tx_start_all_queues(pp
->dev
);
4827 static int mvneta_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
4828 const u8
*key
, const u8 hfunc
)
4830 struct mvneta_port
*pp
= netdev_priv(dev
);
4832 /* Current code for Armada 3700 doesn't support RSS features yet */
4833 if (pp
->neta_armada3700
)
4836 /* We require at least one supported parameter to be changed
4837 * and no change in any of the unsupported parameters
4840 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
4846 memcpy(pp
->indir
, indir
, MVNETA_RSS_LU_TABLE_SIZE
);
4848 return mvneta_config_rss(pp
);
4851 static int mvneta_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
4854 struct mvneta_port
*pp
= netdev_priv(dev
);
4856 /* Current code for Armada 3700 doesn't support RSS features yet */
4857 if (pp
->neta_armada3700
)
4861 *hfunc
= ETH_RSS_HASH_TOP
;
4866 memcpy(indir
, pp
->indir
, MVNETA_RSS_LU_TABLE_SIZE
);
4871 static void mvneta_ethtool_get_wol(struct net_device
*dev
,
4872 struct ethtool_wolinfo
*wol
)
4874 struct mvneta_port
*pp
= netdev_priv(dev
);
4876 phylink_ethtool_get_wol(pp
->phylink
, wol
);
4879 static int mvneta_ethtool_set_wol(struct net_device
*dev
,
4880 struct ethtool_wolinfo
*wol
)
4882 struct mvneta_port
*pp
= netdev_priv(dev
);
4885 ret
= phylink_ethtool_set_wol(pp
->phylink
, wol
);
4887 device_set_wakeup_enable(&dev
->dev
, !!wol
->wolopts
);
4892 static int mvneta_ethtool_get_eee(struct net_device
*dev
,
4893 struct ethtool_eee
*eee
)
4895 struct mvneta_port
*pp
= netdev_priv(dev
);
4898 lpi_ctl0
= mvreg_read(pp
, MVNETA_LPI_CTRL_0
);
4900 eee
->eee_enabled
= pp
->eee_enabled
;
4901 eee
->eee_active
= pp
->eee_active
;
4902 eee
->tx_lpi_enabled
= pp
->tx_lpi_enabled
;
4903 eee
->tx_lpi_timer
= (lpi_ctl0
) >> 8; // * scale;
4905 return phylink_ethtool_get_eee(pp
->phylink
, eee
);
4908 static int mvneta_ethtool_set_eee(struct net_device
*dev
,
4909 struct ethtool_eee
*eee
)
4911 struct mvneta_port
*pp
= netdev_priv(dev
);
4914 /* The Armada 37x documents do not give limits for this other than
4915 * it being an 8-bit register.
4917 if (eee
->tx_lpi_enabled
&& eee
->tx_lpi_timer
> 255)
4920 lpi_ctl0
= mvreg_read(pp
, MVNETA_LPI_CTRL_0
);
4921 lpi_ctl0
&= ~(0xff << 8);
4922 lpi_ctl0
|= eee
->tx_lpi_timer
<< 8;
4923 mvreg_write(pp
, MVNETA_LPI_CTRL_0
, lpi_ctl0
);
4925 pp
->eee_enabled
= eee
->eee_enabled
;
4926 pp
->tx_lpi_enabled
= eee
->tx_lpi_enabled
;
4928 mvneta_set_eee(pp
, eee
->tx_lpi_enabled
&& eee
->eee_enabled
);
4930 return phylink_ethtool_set_eee(pp
->phylink
, eee
);
4933 static void mvneta_clear_rx_prio_map(struct mvneta_port
*pp
)
4935 mvreg_write(pp
, MVNETA_VLAN_PRIO_TO_RXQ
, 0);
4938 static void mvneta_setup_rx_prio_map(struct mvneta_port
*pp
)
4943 for (i
= 0; i
< rxq_number
; i
++)
4944 val
|= MVNETA_VLAN_PRIO_RXQ_MAP(i
, pp
->prio_tc_map
[i
]);
4946 mvreg_write(pp
, MVNETA_VLAN_PRIO_TO_RXQ
, val
);
4949 static int mvneta_setup_mqprio(struct net_device
*dev
,
4950 struct tc_mqprio_qopt
*qopt
)
4952 struct mvneta_port
*pp
= netdev_priv(dev
);
4956 qopt
->hw
= TC_MQPRIO_HW_OFFLOAD_TCS
;
4957 num_tc
= qopt
->num_tc
;
4959 if (num_tc
> rxq_number
)
4963 mvneta_clear_rx_prio_map(pp
);
4964 netdev_reset_tc(dev
);
4968 memcpy(pp
->prio_tc_map
, qopt
->prio_tc_map
, sizeof(pp
->prio_tc_map
));
4970 mvneta_setup_rx_prio_map(pp
);
4972 netdev_set_num_tc(dev
, qopt
->num_tc
);
4973 for (i
= 0; i
< qopt
->num_tc
; i
++)
4974 netdev_set_tc_queue(dev
, i
, qopt
->count
[i
], qopt
->offset
[i
]);
4979 static int mvneta_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
4983 case TC_SETUP_QDISC_MQPRIO
:
4984 return mvneta_setup_mqprio(dev
, type_data
);
4990 static const struct net_device_ops mvneta_netdev_ops
= {
4991 .ndo_open
= mvneta_open
,
4992 .ndo_stop
= mvneta_stop
,
4993 .ndo_start_xmit
= mvneta_tx
,
4994 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
4995 .ndo_set_mac_address
= mvneta_set_mac_addr
,
4996 .ndo_change_mtu
= mvneta_change_mtu
,
4997 .ndo_fix_features
= mvneta_fix_features
,
4998 .ndo_get_stats64
= mvneta_get_stats64
,
4999 .ndo_eth_ioctl
= mvneta_ioctl
,
5000 .ndo_bpf
= mvneta_xdp
,
5001 .ndo_xdp_xmit
= mvneta_xdp_xmit
,
5002 .ndo_setup_tc
= mvneta_setup_tc
,
5005 static const struct ethtool_ops mvneta_eth_tool_ops
= {
5006 .supported_coalesce_params
= ETHTOOL_COALESCE_RX_USECS
|
5007 ETHTOOL_COALESCE_MAX_FRAMES
,
5008 .nway_reset
= mvneta_ethtool_nway_reset
,
5009 .get_link
= ethtool_op_get_link
,
5010 .set_coalesce
= mvneta_ethtool_set_coalesce
,
5011 .get_coalesce
= mvneta_ethtool_get_coalesce
,
5012 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
5013 .get_ringparam
= mvneta_ethtool_get_ringparam
,
5014 .set_ringparam
= mvneta_ethtool_set_ringparam
,
5015 .get_pauseparam
= mvneta_ethtool_get_pauseparam
,
5016 .set_pauseparam
= mvneta_ethtool_set_pauseparam
,
5017 .get_strings
= mvneta_ethtool_get_strings
,
5018 .get_ethtool_stats
= mvneta_ethtool_get_stats
,
5019 .get_sset_count
= mvneta_ethtool_get_sset_count
,
5020 .get_rxfh_indir_size
= mvneta_ethtool_get_rxfh_indir_size
,
5021 .get_rxnfc
= mvneta_ethtool_get_rxnfc
,
5022 .get_rxfh
= mvneta_ethtool_get_rxfh
,
5023 .set_rxfh
= mvneta_ethtool_set_rxfh
,
5024 .get_link_ksettings
= mvneta_ethtool_get_link_ksettings
,
5025 .set_link_ksettings
= mvneta_ethtool_set_link_ksettings
,
5026 .get_wol
= mvneta_ethtool_get_wol
,
5027 .set_wol
= mvneta_ethtool_set_wol
,
5028 .get_eee
= mvneta_ethtool_get_eee
,
5029 .set_eee
= mvneta_ethtool_set_eee
,
5033 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
5038 mvneta_port_disable(pp
);
5040 /* Set port default values */
5041 mvneta_defaults_set(pp
);
5043 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*pp
->txqs
), GFP_KERNEL
);
5047 /* Initialize TX descriptor rings */
5048 for (queue
= 0; queue
< txq_number
; queue
++) {
5049 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
5051 txq
->size
= pp
->tx_ring_size
;
5052 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
5055 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*pp
->rxqs
), GFP_KERNEL
);
5059 /* Create Rx descriptor rings */
5060 for (queue
= 0; queue
< rxq_number
; queue
++) {
5061 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
5063 rxq
->size
= pp
->rx_ring_size
;
5064 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
5065 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
5067 = devm_kmalloc_array(pp
->dev
->dev
.parent
,
5069 sizeof(*rxq
->buf_virt_addr
),
5071 if (!rxq
->buf_virt_addr
)
5078 /* platform glue : initialize decoding windows */
5079 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
5080 const struct mbus_dram_target_info
*dram
)
5086 for (i
= 0; i
< 6; i
++) {
5087 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
5088 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
5091 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
5098 for (i
= 0; i
< dram
->num_cs
; i
++) {
5099 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
5101 mvreg_write(pp
, MVNETA_WIN_BASE(i
),
5102 (cs
->base
& 0xffff0000) |
5103 (cs
->mbus_attr
<< 8) |
5104 dram
->mbus_dram_target_id
);
5106 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
5107 (cs
->size
- 1) & 0xffff0000);
5109 win_enable
&= ~(1 << i
);
5110 win_protect
|= 3 << (2 * i
);
5113 /* For Armada3700 open default 4GB Mbus window, leaving
5114 * arbitration of target/attribute to a different layer
5117 mvreg_write(pp
, MVNETA_WIN_SIZE(0), 0xffff0000);
5118 win_enable
&= ~BIT(0);
5122 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
5123 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
5126 /* Power up the port */
5127 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
5129 /* MAC Cause register should be cleared */
5130 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
5132 if (phy_mode
!= PHY_INTERFACE_MODE_QSGMII
&&
5133 phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
5134 !phy_interface_mode_is_8023z(phy_mode
) &&
5135 !phy_interface_mode_is_rgmii(phy_mode
))
5141 /* Device initialization routine */
5142 static int mvneta_probe(struct platform_device
*pdev
)
5144 struct device_node
*dn
= pdev
->dev
.of_node
;
5145 struct device_node
*bm_node
;
5146 struct mvneta_port
*pp
;
5147 struct net_device
*dev
;
5148 struct phylink
*phylink
;
5150 char hw_mac_addr
[ETH_ALEN
];
5151 phy_interface_t phy_mode
;
5152 const char *mac_from
;
5157 dev
= devm_alloc_etherdev_mqs(&pdev
->dev
, sizeof(struct mvneta_port
),
5158 txq_number
, rxq_number
);
5162 dev
->irq
= irq_of_parse_and_map(dn
, 0);
5166 err
= of_get_phy_mode(dn
, &phy_mode
);
5168 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
5172 comphy
= devm_of_phy_get(&pdev
->dev
, dn
, NULL
);
5173 if (comphy
== ERR_PTR(-EPROBE_DEFER
)) {
5174 err
= -EPROBE_DEFER
;
5176 } else if (IS_ERR(comphy
)) {
5180 pp
= netdev_priv(dev
);
5181 spin_lock_init(&pp
->lock
);
5183 pp
->phylink_config
.dev
= &dev
->dev
;
5184 pp
->phylink_config
.type
= PHYLINK_NETDEV
;
5186 phylink
= phylink_create(&pp
->phylink_config
, pdev
->dev
.fwnode
,
5187 phy_mode
, &mvneta_phylink_ops
);
5188 if (IS_ERR(phylink
)) {
5189 err
= PTR_ERR(phylink
);
5193 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
5194 dev
->watchdog_timeo
= 5 * HZ
;
5195 dev
->netdev_ops
= &mvneta_netdev_ops
;
5197 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
5199 pp
->phylink
= phylink
;
5200 pp
->comphy
= comphy
;
5201 pp
->phy_interface
= phy_mode
;
5204 pp
->rxq_def
= rxq_def
;
5205 pp
->indir
[0] = rxq_def
;
5207 /* Get special SoC configurations */
5208 if (of_device_is_compatible(dn
, "marvell,armada-3700-neta"))
5209 pp
->neta_armada3700
= true;
5211 pp
->clk
= devm_clk_get(&pdev
->dev
, "core");
5212 if (IS_ERR(pp
->clk
))
5213 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
5214 if (IS_ERR(pp
->clk
)) {
5215 err
= PTR_ERR(pp
->clk
);
5216 goto err_free_phylink
;
5219 clk_prepare_enable(pp
->clk
);
5221 pp
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
5222 if (!IS_ERR(pp
->clk_bus
))
5223 clk_prepare_enable(pp
->clk_bus
);
5225 pp
->base
= devm_platform_ioremap_resource(pdev
, 0);
5226 if (IS_ERR(pp
->base
)) {
5227 err
= PTR_ERR(pp
->base
);
5231 /* Alloc per-cpu port structure */
5232 pp
->ports
= alloc_percpu(struct mvneta_pcpu_port
);
5238 /* Alloc per-cpu stats */
5239 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
5242 goto err_free_ports
;
5245 err
= of_get_mac_address(dn
, dev
->dev_addr
);
5247 mac_from
= "device tree";
5249 mvneta_get_mac_addr(pp
, hw_mac_addr
);
5250 if (is_valid_ether_addr(hw_mac_addr
)) {
5251 mac_from
= "hardware";
5252 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
5254 mac_from
= "random";
5255 eth_hw_addr_random(dev
);
5259 if (!of_property_read_u32(dn
, "tx-csum-limit", &tx_csum_limit
)) {
5260 if (tx_csum_limit
< 0 ||
5261 tx_csum_limit
> MVNETA_TX_CSUM_MAX_SIZE
) {
5262 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
5263 dev_info(&pdev
->dev
,
5264 "Wrong TX csum limit in DT, set to %dB\n",
5265 MVNETA_TX_CSUM_DEF_SIZE
);
5267 } else if (of_device_is_compatible(dn
, "marvell,armada-370-neta")) {
5268 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
5270 tx_csum_limit
= MVNETA_TX_CSUM_MAX_SIZE
;
5273 pp
->tx_csum_limit
= tx_csum_limit
;
5275 pp
->dram_target_info
= mv_mbus_dram_info();
5276 /* Armada3700 requires setting default configuration of Mbus
5277 * windows, however without using filled mbus_dram_target_info
5280 if (pp
->dram_target_info
|| pp
->neta_armada3700
)
5281 mvneta_conf_mbus_windows(pp
, pp
->dram_target_info
);
5283 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
5284 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
5287 SET_NETDEV_DEV(dev
, &pdev
->dev
);
5289 pp
->id
= global_port_id
++;
5291 /* Obtain access to BM resources if enabled and already initialized */
5292 bm_node
= of_parse_phandle(dn
, "buffer-manager", 0);
5294 pp
->bm_priv
= mvneta_bm_get(bm_node
);
5296 err
= mvneta_bm_port_init(pdev
, pp
);
5298 dev_info(&pdev
->dev
,
5299 "use SW buffer management\n");
5300 mvneta_bm_put(pp
->bm_priv
);
5304 /* Set RX packet offset correction for platforms, whose
5305 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
5306 * platforms and 0B for 32-bit ones.
5308 pp
->rx_offset_correction
= max(0,
5310 MVNETA_RX_PKT_OFFSET_CORRECTION
);
5312 of_node_put(bm_node
);
5314 /* sw buffer management */
5316 pp
->rx_offset_correction
= MVNETA_SKB_HEADROOM
;
5318 err
= mvneta_init(&pdev
->dev
, pp
);
5322 err
= mvneta_port_power_up(pp
, pp
->phy_interface
);
5324 dev_err(&pdev
->dev
, "can't power up port\n");
5328 /* Armada3700 network controller does not support per-cpu
5329 * operation, so only single NAPI should be initialized.
5331 if (pp
->neta_armada3700
) {
5332 netif_napi_add(dev
, &pp
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
5334 for_each_present_cpu(cpu
) {
5335 struct mvneta_pcpu_port
*port
=
5336 per_cpu_ptr(pp
->ports
, cpu
);
5338 netif_napi_add(dev
, &port
->napi
, mvneta_poll
,
5344 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
5345 NETIF_F_TSO
| NETIF_F_RXCSUM
;
5346 dev
->hw_features
|= dev
->features
;
5347 dev
->vlan_features
|= dev
->features
;
5348 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
5349 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
5351 /* MTU range: 68 - 9676 */
5352 dev
->min_mtu
= ETH_MIN_MTU
;
5353 /* 9676 == 9700 - 20 and rounding to 8 */
5354 dev
->max_mtu
= 9676;
5356 err
= register_netdev(dev
);
5358 dev_err(&pdev
->dev
, "failed to register\n");
5362 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
5365 platform_set_drvdata(pdev
, pp
->dev
);
5371 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
5372 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
5374 mvneta_bm_put(pp
->bm_priv
);
5376 free_percpu(pp
->stats
);
5378 free_percpu(pp
->ports
);
5380 clk_disable_unprepare(pp
->clk_bus
);
5381 clk_disable_unprepare(pp
->clk
);
5384 phylink_destroy(pp
->phylink
);
5386 irq_dispose_mapping(dev
->irq
);
5390 /* Device removal routine */
5391 static int mvneta_remove(struct platform_device
*pdev
)
5393 struct net_device
*dev
= platform_get_drvdata(pdev
);
5394 struct mvneta_port
*pp
= netdev_priv(dev
);
5396 unregister_netdev(dev
);
5397 clk_disable_unprepare(pp
->clk_bus
);
5398 clk_disable_unprepare(pp
->clk
);
5399 free_percpu(pp
->ports
);
5400 free_percpu(pp
->stats
);
5401 irq_dispose_mapping(dev
->irq
);
5402 phylink_destroy(pp
->phylink
);
5405 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
5406 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
5408 mvneta_bm_put(pp
->bm_priv
);
5414 #ifdef CONFIG_PM_SLEEP
5415 static int mvneta_suspend(struct device
*device
)
5418 struct net_device
*dev
= dev_get_drvdata(device
);
5419 struct mvneta_port
*pp
= netdev_priv(dev
);
5421 if (!netif_running(dev
))
5424 if (!pp
->neta_armada3700
) {
5425 spin_lock(&pp
->lock
);
5426 pp
->is_stopped
= true;
5427 spin_unlock(&pp
->lock
);
5429 cpuhp_state_remove_instance_nocalls(online_hpstate
,
5431 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
5436 mvneta_stop_dev(pp
);
5439 for (queue
= 0; queue
< rxq_number
; queue
++) {
5440 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
5442 mvneta_rxq_drop_pkts(pp
, rxq
);
5445 for (queue
= 0; queue
< txq_number
; queue
++) {
5446 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
5448 mvneta_txq_hw_deinit(pp
, txq
);
5452 netif_device_detach(dev
);
5453 clk_disable_unprepare(pp
->clk_bus
);
5454 clk_disable_unprepare(pp
->clk
);
5459 static int mvneta_resume(struct device
*device
)
5461 struct platform_device
*pdev
= to_platform_device(device
);
5462 struct net_device
*dev
= dev_get_drvdata(device
);
5463 struct mvneta_port
*pp
= netdev_priv(dev
);
5466 clk_prepare_enable(pp
->clk
);
5467 if (!IS_ERR(pp
->clk_bus
))
5468 clk_prepare_enable(pp
->clk_bus
);
5469 if (pp
->dram_target_info
|| pp
->neta_armada3700
)
5470 mvneta_conf_mbus_windows(pp
, pp
->dram_target_info
);
5472 err
= mvneta_bm_port_init(pdev
, pp
);
5474 dev_info(&pdev
->dev
, "use SW buffer management\n");
5475 pp
->rx_offset_correction
= MVNETA_SKB_HEADROOM
;
5479 mvneta_defaults_set(pp
);
5480 err
= mvneta_port_power_up(pp
, pp
->phy_interface
);
5482 dev_err(device
, "can't power up port\n");
5486 netif_device_attach(dev
);
5488 if (!netif_running(dev
))
5491 for (queue
= 0; queue
< rxq_number
; queue
++) {
5492 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
5494 rxq
->next_desc_to_proc
= 0;
5495 mvneta_rxq_hw_init(pp
, rxq
);
5498 for (queue
= 0; queue
< txq_number
; queue
++) {
5499 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
5501 txq
->next_desc_to_proc
= 0;
5502 mvneta_txq_hw_init(pp
, txq
);
5505 if (!pp
->neta_armada3700
) {
5506 spin_lock(&pp
->lock
);
5507 pp
->is_stopped
= false;
5508 spin_unlock(&pp
->lock
);
5509 cpuhp_state_add_instance_nocalls(online_hpstate
,
5511 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
5516 mvneta_start_dev(pp
);
5518 mvneta_set_rx_mode(dev
);
5524 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops
, mvneta_suspend
, mvneta_resume
);
5526 static const struct of_device_id mvneta_match
[] = {
5527 { .compatible
= "marvell,armada-370-neta" },
5528 { .compatible
= "marvell,armada-xp-neta" },
5529 { .compatible
= "marvell,armada-3700-neta" },
5532 MODULE_DEVICE_TABLE(of
, mvneta_match
);
5534 static struct platform_driver mvneta_driver
= {
5535 .probe
= mvneta_probe
,
5536 .remove
= mvneta_remove
,
5538 .name
= MVNETA_DRIVER_NAME
,
5539 .of_match_table
= mvneta_match
,
5540 .pm
= &mvneta_pm_ops
,
5544 static int __init
mvneta_driver_init(void)
5548 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "net/mvneta:online",
5550 mvneta_cpu_down_prepare
);
5553 online_hpstate
= ret
;
5554 ret
= cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD
, "net/mvneta:dead",
5555 NULL
, mvneta_cpu_dead
);
5559 ret
= platform_driver_register(&mvneta_driver
);
5565 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD
);
5567 cpuhp_remove_multi_state(online_hpstate
);
5571 module_init(mvneta_driver_init
);
5573 static void __exit
mvneta_driver_exit(void)
5575 platform_driver_unregister(&mvneta_driver
);
5576 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD
);
5577 cpuhp_remove_multi_state(online_hpstate
);
5579 module_exit(mvneta_driver_exit
);
5581 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5582 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5583 MODULE_LICENSE("GPL");
5585 module_param(rxq_number
, int, 0444);
5586 module_param(txq_number
, int, 0444);
5588 module_param(rxq_def
, int, 0444);
5589 module_param(rx_copybreak
, int, 0644);