]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - drivers/net/ethernet/marvell/mvneta.c
Merge tag 'sound-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / marvell / mvneta.c
1 /*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
25 #include <linux/of.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy/phy.h>
31 #include <linux/phy.h>
32 #include <linux/phylink.h>
33 #include <linux/platform_device.h>
34 #include <linux/skbuff.h>
35 #include <net/hwbm.h>
36 #include "mvneta_bm.h"
37 #include <net/ip.h>
38 #include <net/ipv6.h>
39 #include <net/tso.h>
40 #include <net/page_pool.h>
41 #include <linux/bpf_trace.h>
42
43 /* Registers */
44 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
45 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
46 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
47 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
48 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
49 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
50 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
51 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
52 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
53 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
54 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
55 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
56 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
57 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
58 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
59 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
60 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
61 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
62 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
63 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
64 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
65 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
66 #define MVNETA_PORT_RX_RESET 0x1cc0
67 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
68 #define MVNETA_PHY_ADDR 0x2000
69 #define MVNETA_PHY_ADDR_MASK 0x1f
70 #define MVNETA_MBUS_RETRY 0x2010
71 #define MVNETA_UNIT_INTR_CAUSE 0x2080
72 #define MVNETA_UNIT_CONTROL 0x20B0
73 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
74 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
75 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
76 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
77 #define MVNETA_BASE_ADDR_ENABLE 0x2290
78 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
79 #define MVNETA_PORT_CONFIG 0x2400
80 #define MVNETA_UNI_PROMISC_MODE BIT(0)
81 #define MVNETA_DEF_RXQ(q) ((q) << 1)
82 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
83 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
84 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
85 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
86 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
87 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
88 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
89 MVNETA_DEF_RXQ_ARP(q) | \
90 MVNETA_DEF_RXQ_TCP(q) | \
91 MVNETA_DEF_RXQ_UDP(q) | \
92 MVNETA_DEF_RXQ_BPDU(q) | \
93 MVNETA_TX_UNSET_ERR_SUM | \
94 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
95 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
96 #define MVNETA_MAC_ADDR_LOW 0x2414
97 #define MVNETA_MAC_ADDR_HIGH 0x2418
98 #define MVNETA_SDMA_CONFIG 0x241c
99 #define MVNETA_SDMA_BRST_SIZE_16 4
100 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
101 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
102 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
103 #define MVNETA_DESC_SWAP BIT(6)
104 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
105 #define MVNETA_PORT_STATUS 0x2444
106 #define MVNETA_TX_IN_PRGRS BIT(1)
107 #define MVNETA_TX_FIFO_EMPTY BIT(8)
108 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
109 #define MVNETA_SERDES_CFG 0x24A0
110 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
111 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
112 #define MVNETA_TYPE_PRIO 0x24bc
113 #define MVNETA_FORCE_UNI BIT(21)
114 #define MVNETA_TXQ_CMD_1 0x24e4
115 #define MVNETA_TXQ_CMD 0x2448
116 #define MVNETA_TXQ_DISABLE_SHIFT 8
117 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
118 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
119 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
120 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
121 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
122 #define MVNETA_ACC_MODE 0x2500
123 #define MVNETA_BM_ADDRESS 0x2504
124 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
125 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
126 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
127 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
128 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
129 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
130
131 /* Exception Interrupt Port/Queue Cause register
132 *
133 * Their behavior depend of the mapping done using the PCPX2Q
134 * registers. For a given CPU if the bit associated to a queue is not
135 * set, then for the register a read from this CPU will always return
136 * 0 and a write won't do anything
137 */
138
139 #define MVNETA_INTR_NEW_CAUSE 0x25a0
140 #define MVNETA_INTR_NEW_MASK 0x25a4
141
142 /* bits 0..7 = TXQ SENT, one bit per queue.
143 * bits 8..15 = RXQ OCCUP, one bit per queue.
144 * bits 16..23 = RXQ FREE, one bit per queue.
145 * bit 29 = OLD_REG_SUM, see old reg ?
146 * bit 30 = TX_ERR_SUM, one bit for 4 ports
147 * bit 31 = MISC_SUM, one bit for 4 ports
148 */
149 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
150 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
151 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
152 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
153 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
154
155 #define MVNETA_INTR_OLD_CAUSE 0x25a8
156 #define MVNETA_INTR_OLD_MASK 0x25ac
157
158 /* Data Path Port/Queue Cause Register */
159 #define MVNETA_INTR_MISC_CAUSE 0x25b0
160 #define MVNETA_INTR_MISC_MASK 0x25b4
161
162 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
163 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
164 #define MVNETA_CAUSE_PTP BIT(4)
165
166 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
167 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
168 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
169 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
170 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
171 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
172 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
173 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
174
175 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
176 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
177 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
178
179 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
180 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
181 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
182
183 #define MVNETA_INTR_ENABLE 0x25b8
184 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
185 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
186
187 #define MVNETA_RXQ_CMD 0x2680
188 #define MVNETA_RXQ_DISABLE_SHIFT 8
189 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
190 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
191 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
192 #define MVNETA_GMAC_CTRL_0 0x2c00
193 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
194 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
195 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
196 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
197 #define MVNETA_GMAC_CTRL_2 0x2c08
198 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
199 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
200 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
201 #define MVNETA_GMAC2_PORT_RESET BIT(6)
202 #define MVNETA_GMAC_STATUS 0x2c10
203 #define MVNETA_GMAC_LINK_UP BIT(0)
204 #define MVNETA_GMAC_SPEED_1000 BIT(1)
205 #define MVNETA_GMAC_SPEED_100 BIT(2)
206 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
207 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
208 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
209 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
210 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
211 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
212 #define MVNETA_GMAC_SYNC_OK BIT(14)
213 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
214 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
215 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
216 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
217 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
218 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
219 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
220 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
221 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
222 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
223 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
224 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
225 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
226 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
227 #define MVNETA_GMAC_CTRL_4 0x2c90
228 #define MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE BIT(1)
229 #define MVNETA_MIB_COUNTERS_BASE 0x3000
230 #define MVNETA_MIB_LATE_COLLISION 0x7c
231 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
232 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
233 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
234 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
235 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
236 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
237 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
238 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
239 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
240 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
241 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
242 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
243 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
244 #define MVNETA_PORT_TX_RESET 0x3cf0
245 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
246 #define MVNETA_TX_MTU 0x3e0c
247 #define MVNETA_TX_TOKEN_SIZE 0x3e14
248 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
249 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
250 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
251
252 #define MVNETA_LPI_CTRL_0 0x2cc0
253 #define MVNETA_LPI_CTRL_1 0x2cc4
254 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
255 #define MVNETA_LPI_CTRL_2 0x2cc8
256 #define MVNETA_LPI_STATUS 0x2ccc
257
258 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
259
260 /* Descriptor ring Macros */
261 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
262 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
263
264 /* Various constants */
265
266 /* Coalescing */
267 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
268 #define MVNETA_RX_COAL_PKTS 32
269 #define MVNETA_RX_COAL_USEC 100
270
271 /* The two bytes Marvell header. Either contains a special value used
272 * by Marvell switches when a specific hardware mode is enabled (not
273 * supported by this driver) or is filled automatically by zeroes on
274 * the RX side. Those two bytes being at the front of the Ethernet
275 * header, they allow to have the IP header aligned on a 4 bytes
276 * boundary automatically: the hardware skips those two bytes on its
277 * own.
278 */
279 #define MVNETA_MH_SIZE 2
280
281 #define MVNETA_VLAN_TAG_LEN 4
282
283 #define MVNETA_TX_CSUM_DEF_SIZE 1600
284 #define MVNETA_TX_CSUM_MAX_SIZE 9800
285 #define MVNETA_ACC_MODE_EXT1 1
286 #define MVNETA_ACC_MODE_EXT2 2
287
288 #define MVNETA_MAX_DECODE_WIN 6
289
290 /* Timeout constants */
291 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
292 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
293 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
294
295 #define MVNETA_TX_MTU_MAX 0x3ffff
296
297 /* The RSS lookup table actually has 256 entries but we do not use
298 * them yet
299 */
300 #define MVNETA_RSS_LU_TABLE_SIZE 1
301
302 /* Max number of Rx descriptors */
303 #define MVNETA_MAX_RXD 512
304
305 /* Max number of Tx descriptors */
306 #define MVNETA_MAX_TXD 1024
307
308 /* Max number of allowed TCP segments for software TSO */
309 #define MVNETA_MAX_TSO_SEGS 100
310
311 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
312
313 /* descriptor aligned size */
314 #define MVNETA_DESC_ALIGNED_SIZE 32
315
316 /* Number of bytes to be taken into account by HW when putting incoming data
317 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
318 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
319 */
320 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
321
322 #define MVNETA_RX_PKT_SIZE(mtu) \
323 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
324 ETH_HLEN + ETH_FCS_LEN, \
325 cache_line_size())
326
327 #define MVNETA_SKB_HEADROOM max(XDP_PACKET_HEADROOM, NET_SKB_PAD)
328 #define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
329 MVNETA_SKB_HEADROOM))
330 #define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
331 #define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
332
333 #define IS_TSO_HEADER(txq, addr) \
334 ((addr >= txq->tso_hdrs_phys) && \
335 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
336
337 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
338 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
339
340 enum {
341 ETHTOOL_STAT_EEE_WAKEUP,
342 ETHTOOL_STAT_SKB_ALLOC_ERR,
343 ETHTOOL_STAT_REFILL_ERR,
344 ETHTOOL_XDP_REDIRECT,
345 ETHTOOL_XDP_PASS,
346 ETHTOOL_XDP_DROP,
347 ETHTOOL_XDP_TX,
348 ETHTOOL_XDP_TX_ERR,
349 ETHTOOL_XDP_XMIT,
350 ETHTOOL_XDP_XMIT_ERR,
351 ETHTOOL_MAX_STATS,
352 };
353
354 struct mvneta_statistic {
355 unsigned short offset;
356 unsigned short type;
357 const char name[ETH_GSTRING_LEN];
358 };
359
360 #define T_REG_32 32
361 #define T_REG_64 64
362 #define T_SW 1
363
364 #define MVNETA_XDP_PASS 0
365 #define MVNETA_XDP_DROPPED BIT(0)
366 #define MVNETA_XDP_TX BIT(1)
367 #define MVNETA_XDP_REDIR BIT(2)
368
369 static const struct mvneta_statistic mvneta_statistics[] = {
370 { 0x3000, T_REG_64, "good_octets_received", },
371 { 0x3010, T_REG_32, "good_frames_received", },
372 { 0x3008, T_REG_32, "bad_octets_received", },
373 { 0x3014, T_REG_32, "bad_frames_received", },
374 { 0x3018, T_REG_32, "broadcast_frames_received", },
375 { 0x301c, T_REG_32, "multicast_frames_received", },
376 { 0x3050, T_REG_32, "unrec_mac_control_received", },
377 { 0x3058, T_REG_32, "good_fc_received", },
378 { 0x305c, T_REG_32, "bad_fc_received", },
379 { 0x3060, T_REG_32, "undersize_received", },
380 { 0x3064, T_REG_32, "fragments_received", },
381 { 0x3068, T_REG_32, "oversize_received", },
382 { 0x306c, T_REG_32, "jabber_received", },
383 { 0x3070, T_REG_32, "mac_receive_error", },
384 { 0x3074, T_REG_32, "bad_crc_event", },
385 { 0x3078, T_REG_32, "collision", },
386 { 0x307c, T_REG_32, "late_collision", },
387 { 0x2484, T_REG_32, "rx_discard", },
388 { 0x2488, T_REG_32, "rx_overrun", },
389 { 0x3020, T_REG_32, "frames_64_octets", },
390 { 0x3024, T_REG_32, "frames_65_to_127_octets", },
391 { 0x3028, T_REG_32, "frames_128_to_255_octets", },
392 { 0x302c, T_REG_32, "frames_256_to_511_octets", },
393 { 0x3030, T_REG_32, "frames_512_to_1023_octets", },
394 { 0x3034, T_REG_32, "frames_1024_to_max_octets", },
395 { 0x3038, T_REG_64, "good_octets_sent", },
396 { 0x3040, T_REG_32, "good_frames_sent", },
397 { 0x3044, T_REG_32, "excessive_collision", },
398 { 0x3048, T_REG_32, "multicast_frames_sent", },
399 { 0x304c, T_REG_32, "broadcast_frames_sent", },
400 { 0x3054, T_REG_32, "fc_sent", },
401 { 0x300c, T_REG_32, "internal_mac_transmit_err", },
402 { ETHTOOL_STAT_EEE_WAKEUP, T_SW, "eee_wakeup_errors", },
403 { ETHTOOL_STAT_SKB_ALLOC_ERR, T_SW, "skb_alloc_errors", },
404 { ETHTOOL_STAT_REFILL_ERR, T_SW, "refill_errors", },
405 { ETHTOOL_XDP_REDIRECT, T_SW, "rx_xdp_redirect", },
406 { ETHTOOL_XDP_PASS, T_SW, "rx_xdp_pass", },
407 { ETHTOOL_XDP_DROP, T_SW, "rx_xdp_drop", },
408 { ETHTOOL_XDP_TX, T_SW, "rx_xdp_tx", },
409 { ETHTOOL_XDP_TX_ERR, T_SW, "rx_xdp_tx_errors", },
410 { ETHTOOL_XDP_XMIT, T_SW, "tx_xdp_xmit", },
411 { ETHTOOL_XDP_XMIT_ERR, T_SW, "tx_xdp_xmit_errors", },
412 };
413
414 struct mvneta_stats {
415 u64 rx_packets;
416 u64 rx_bytes;
417 u64 tx_packets;
418 u64 tx_bytes;
419 /* xdp */
420 u64 xdp_redirect;
421 u64 xdp_pass;
422 u64 xdp_drop;
423 u64 xdp_xmit;
424 u64 xdp_xmit_err;
425 u64 xdp_tx;
426 u64 xdp_tx_err;
427 };
428
429 struct mvneta_ethtool_stats {
430 struct mvneta_stats ps;
431 u64 skb_alloc_error;
432 u64 refill_error;
433 };
434
435 struct mvneta_pcpu_stats {
436 struct u64_stats_sync syncp;
437
438 struct mvneta_ethtool_stats es;
439 u64 rx_dropped;
440 u64 rx_errors;
441 };
442
443 struct mvneta_pcpu_port {
444 /* Pointer to the shared port */
445 struct mvneta_port *pp;
446
447 /* Pointer to the CPU-local NAPI struct */
448 struct napi_struct napi;
449
450 /* Cause of the previous interrupt */
451 u32 cause_rx_tx;
452 };
453
454 struct mvneta_port {
455 u8 id;
456 struct mvneta_pcpu_port __percpu *ports;
457 struct mvneta_pcpu_stats __percpu *stats;
458
459 int pkt_size;
460 void __iomem *base;
461 struct mvneta_rx_queue *rxqs;
462 struct mvneta_tx_queue *txqs;
463 struct net_device *dev;
464 struct hlist_node node_online;
465 struct hlist_node node_dead;
466 int rxq_def;
467 /* Protect the access to the percpu interrupt registers,
468 * ensuring that the configuration remains coherent.
469 */
470 spinlock_t lock;
471 bool is_stopped;
472
473 u32 cause_rx_tx;
474 struct napi_struct napi;
475
476 struct bpf_prog *xdp_prog;
477
478 /* Core clock */
479 struct clk *clk;
480 /* AXI clock */
481 struct clk *clk_bus;
482 u8 mcast_count[256];
483 u16 tx_ring_size;
484 u16 rx_ring_size;
485
486 phy_interface_t phy_interface;
487 struct device_node *dn;
488 unsigned int tx_csum_limit;
489 struct phylink *phylink;
490 struct phylink_config phylink_config;
491 struct phy *comphy;
492
493 struct mvneta_bm *bm_priv;
494 struct mvneta_bm_pool *pool_long;
495 struct mvneta_bm_pool *pool_short;
496 int bm_win_id;
497
498 bool eee_enabled;
499 bool eee_active;
500 bool tx_lpi_enabled;
501
502 u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
503
504 u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
505
506 /* Flags for special SoC configurations */
507 bool neta_armada3700;
508 u16 rx_offset_correction;
509 const struct mbus_dram_target_info *dram_target_info;
510 };
511
512 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
513 * layout of the transmit and reception DMA descriptors, and their
514 * layout is therefore defined by the hardware design
515 */
516
517 #define MVNETA_TX_L3_OFF_SHIFT 0
518 #define MVNETA_TX_IP_HLEN_SHIFT 8
519 #define MVNETA_TX_L4_UDP BIT(16)
520 #define MVNETA_TX_L3_IP6 BIT(17)
521 #define MVNETA_TXD_IP_CSUM BIT(18)
522 #define MVNETA_TXD_Z_PAD BIT(19)
523 #define MVNETA_TXD_L_DESC BIT(20)
524 #define MVNETA_TXD_F_DESC BIT(21)
525 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
526 MVNETA_TXD_L_DESC | \
527 MVNETA_TXD_F_DESC)
528 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
529 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
530
531 #define MVNETA_RXD_ERR_CRC 0x0
532 #define MVNETA_RXD_BM_POOL_SHIFT 13
533 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
534 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
535 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
536 #define MVNETA_RXD_ERR_LEN BIT(18)
537 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
538 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
539 #define MVNETA_RXD_L3_IP4 BIT(25)
540 #define MVNETA_RXD_LAST_DESC BIT(26)
541 #define MVNETA_RXD_FIRST_DESC BIT(27)
542 #define MVNETA_RXD_FIRST_LAST_DESC (MVNETA_RXD_FIRST_DESC | \
543 MVNETA_RXD_LAST_DESC)
544 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
545
546 #if defined(__LITTLE_ENDIAN)
547 struct mvneta_tx_desc {
548 u32 command; /* Options used by HW for packet transmitting.*/
549 u16 reserved1; /* csum_l4 (for future use) */
550 u16 data_size; /* Data size of transmitted packet in bytes */
551 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
552 u32 reserved2; /* hw_cmd - (for future use, PMT) */
553 u32 reserved3[4]; /* Reserved - (for future use) */
554 };
555
556 struct mvneta_rx_desc {
557 u32 status; /* Info about received packet */
558 u16 reserved1; /* pnc_info - (for future use, PnC) */
559 u16 data_size; /* Size of received packet in bytes */
560
561 u32 buf_phys_addr; /* Physical address of the buffer */
562 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
563
564 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
565 u16 reserved3; /* prefetch_cmd, for future use */
566 u16 reserved4; /* csum_l4 - (for future use, PnC) */
567
568 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
569 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
570 };
571 #else
572 struct mvneta_tx_desc {
573 u16 data_size; /* Data size of transmitted packet in bytes */
574 u16 reserved1; /* csum_l4 (for future use) */
575 u32 command; /* Options used by HW for packet transmitting.*/
576 u32 reserved2; /* hw_cmd - (for future use, PMT) */
577 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
578 u32 reserved3[4]; /* Reserved - (for future use) */
579 };
580
581 struct mvneta_rx_desc {
582 u16 data_size; /* Size of received packet in bytes */
583 u16 reserved1; /* pnc_info - (for future use, PnC) */
584 u32 status; /* Info about received packet */
585
586 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
587 u32 buf_phys_addr; /* Physical address of the buffer */
588
589 u16 reserved4; /* csum_l4 - (for future use, PnC) */
590 u16 reserved3; /* prefetch_cmd, for future use */
591 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
592
593 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
594 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
595 };
596 #endif
597
598 enum mvneta_tx_buf_type {
599 MVNETA_TYPE_SKB,
600 MVNETA_TYPE_XDP_TX,
601 MVNETA_TYPE_XDP_NDO,
602 };
603
604 struct mvneta_tx_buf {
605 enum mvneta_tx_buf_type type;
606 union {
607 struct xdp_frame *xdpf;
608 struct sk_buff *skb;
609 };
610 };
611
612 struct mvneta_tx_queue {
613 /* Number of this TX queue, in the range 0-7 */
614 u8 id;
615
616 /* Number of TX DMA descriptors in the descriptor ring */
617 int size;
618
619 /* Number of currently used TX DMA descriptor in the
620 * descriptor ring
621 */
622 int count;
623 int pending;
624 int tx_stop_threshold;
625 int tx_wake_threshold;
626
627 /* Array of transmitted buffers */
628 struct mvneta_tx_buf *buf;
629
630 /* Index of last TX DMA descriptor that was inserted */
631 int txq_put_index;
632
633 /* Index of the TX DMA descriptor to be cleaned up */
634 int txq_get_index;
635
636 u32 done_pkts_coal;
637
638 /* Virtual address of the TX DMA descriptors array */
639 struct mvneta_tx_desc *descs;
640
641 /* DMA address of the TX DMA descriptors array */
642 dma_addr_t descs_phys;
643
644 /* Index of the last TX DMA descriptor */
645 int last_desc;
646
647 /* Index of the next TX DMA descriptor to process */
648 int next_desc_to_proc;
649
650 /* DMA buffers for TSO headers */
651 char *tso_hdrs;
652
653 /* DMA address of TSO headers */
654 dma_addr_t tso_hdrs_phys;
655
656 /* Affinity mask for CPUs*/
657 cpumask_t affinity_mask;
658 };
659
660 struct mvneta_rx_queue {
661 /* rx queue number, in the range 0-7 */
662 u8 id;
663
664 /* num of rx descriptors in the rx descriptor ring */
665 int size;
666
667 u32 pkts_coal;
668 u32 time_coal;
669
670 /* page_pool */
671 struct page_pool *page_pool;
672 struct xdp_rxq_info xdp_rxq;
673
674 /* Virtual address of the RX buffer */
675 void **buf_virt_addr;
676
677 /* Virtual address of the RX DMA descriptors array */
678 struct mvneta_rx_desc *descs;
679
680 /* DMA address of the RX DMA descriptors array */
681 dma_addr_t descs_phys;
682
683 /* Index of the last RX DMA descriptor */
684 int last_desc;
685
686 /* Index of the next RX DMA descriptor to process */
687 int next_desc_to_proc;
688
689 /* Index of first RX DMA descriptor to refill */
690 int first_to_refill;
691 u32 refill_num;
692
693 /* pointer to uncomplete skb buffer */
694 struct sk_buff *skb;
695 int left_size;
696 };
697
698 static enum cpuhp_state online_hpstate;
699 /* The hardware supports eight (8) rx queues, but we are only allowing
700 * the first one to be used. Therefore, let's just allocate one queue.
701 */
702 static int rxq_number = 8;
703 static int txq_number = 8;
704
705 static int rxq_def;
706
707 static int rx_copybreak __read_mostly = 256;
708
709 /* HW BM need that each port be identify by a unique ID */
710 static int global_port_id;
711
712 #define MVNETA_DRIVER_NAME "mvneta"
713 #define MVNETA_DRIVER_VERSION "1.0"
714
715 /* Utility/helper methods */
716
717 /* Write helper method */
718 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
719 {
720 writel(data, pp->base + offset);
721 }
722
723 /* Read helper method */
724 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
725 {
726 return readl(pp->base + offset);
727 }
728
729 /* Increment txq get counter */
730 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
731 {
732 txq->txq_get_index++;
733 if (txq->txq_get_index == txq->size)
734 txq->txq_get_index = 0;
735 }
736
737 /* Increment txq put counter */
738 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
739 {
740 txq->txq_put_index++;
741 if (txq->txq_put_index == txq->size)
742 txq->txq_put_index = 0;
743 }
744
745
746 /* Clear all MIB counters */
747 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
748 {
749 int i;
750 u32 dummy;
751
752 /* Perform dummy reads from MIB counters */
753 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
754 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
755 dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
756 dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
757 }
758
759 /* Get System Network Statistics */
760 static void
761 mvneta_get_stats64(struct net_device *dev,
762 struct rtnl_link_stats64 *stats)
763 {
764 struct mvneta_port *pp = netdev_priv(dev);
765 unsigned int start;
766 int cpu;
767
768 for_each_possible_cpu(cpu) {
769 struct mvneta_pcpu_stats *cpu_stats;
770 u64 rx_packets;
771 u64 rx_bytes;
772 u64 rx_dropped;
773 u64 rx_errors;
774 u64 tx_packets;
775 u64 tx_bytes;
776
777 cpu_stats = per_cpu_ptr(pp->stats, cpu);
778 do {
779 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
780 rx_packets = cpu_stats->es.ps.rx_packets;
781 rx_bytes = cpu_stats->es.ps.rx_bytes;
782 rx_dropped = cpu_stats->rx_dropped;
783 rx_errors = cpu_stats->rx_errors;
784 tx_packets = cpu_stats->es.ps.tx_packets;
785 tx_bytes = cpu_stats->es.ps.tx_bytes;
786 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
787
788 stats->rx_packets += rx_packets;
789 stats->rx_bytes += rx_bytes;
790 stats->rx_dropped += rx_dropped;
791 stats->rx_errors += rx_errors;
792 stats->tx_packets += tx_packets;
793 stats->tx_bytes += tx_bytes;
794 }
795
796 stats->tx_dropped = dev->stats.tx_dropped;
797 }
798
799 /* Rx descriptors helper methods */
800
801 /* Checks whether the RX descriptor having this status is both the first
802 * and the last descriptor for the RX packet. Each RX packet is currently
803 * received through a single RX descriptor, so not having each RX
804 * descriptor with its first and last bits set is an error
805 */
806 static int mvneta_rxq_desc_is_first_last(u32 status)
807 {
808 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
809 MVNETA_RXD_FIRST_LAST_DESC;
810 }
811
812 /* Add number of descriptors ready to receive new packets */
813 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
814 struct mvneta_rx_queue *rxq,
815 int ndescs)
816 {
817 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
818 * be added at once
819 */
820 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
821 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
822 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
823 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
824 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
825 }
826
827 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
828 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
829 }
830
831 /* Get number of RX descriptors occupied by received packets */
832 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
833 struct mvneta_rx_queue *rxq)
834 {
835 u32 val;
836
837 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
838 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
839 }
840
841 /* Update num of rx desc called upon return from rx path or
842 * from mvneta_rxq_drop_pkts().
843 */
844 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
845 struct mvneta_rx_queue *rxq,
846 int rx_done, int rx_filled)
847 {
848 u32 val;
849
850 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
851 val = rx_done |
852 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
853 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
854 return;
855 }
856
857 /* Only 255 descriptors can be added at once */
858 while ((rx_done > 0) || (rx_filled > 0)) {
859 if (rx_done <= 0xff) {
860 val = rx_done;
861 rx_done = 0;
862 } else {
863 val = 0xff;
864 rx_done -= 0xff;
865 }
866 if (rx_filled <= 0xff) {
867 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
868 rx_filled = 0;
869 } else {
870 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
871 rx_filled -= 0xff;
872 }
873 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
874 }
875 }
876
877 /* Get pointer to next RX descriptor to be processed by SW */
878 static struct mvneta_rx_desc *
879 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
880 {
881 int rx_desc = rxq->next_desc_to_proc;
882
883 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
884 prefetch(rxq->descs + rxq->next_desc_to_proc);
885 return rxq->descs + rx_desc;
886 }
887
888 /* Change maximum receive size of the port. */
889 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
890 {
891 u32 val;
892
893 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
894 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
895 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
896 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
897 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
898 }
899
900
901 /* Set rx queue offset */
902 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
903 struct mvneta_rx_queue *rxq,
904 int offset)
905 {
906 u32 val;
907
908 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
909 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
910
911 /* Offset is in */
912 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
913 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
914 }
915
916
917 /* Tx descriptors helper methods */
918
919 /* Update HW with number of TX descriptors to be sent */
920 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
921 struct mvneta_tx_queue *txq,
922 int pend_desc)
923 {
924 u32 val;
925
926 pend_desc += txq->pending;
927
928 /* Only 255 Tx descriptors can be added at once */
929 do {
930 val = min(pend_desc, 255);
931 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
932 pend_desc -= val;
933 } while (pend_desc > 0);
934 txq->pending = 0;
935 }
936
937 /* Get pointer to next TX descriptor to be processed (send) by HW */
938 static struct mvneta_tx_desc *
939 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
940 {
941 int tx_desc = txq->next_desc_to_proc;
942
943 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
944 return txq->descs + tx_desc;
945 }
946
947 /* Release the last allocated TX descriptor. Useful to handle DMA
948 * mapping failures in the TX path.
949 */
950 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
951 {
952 if (txq->next_desc_to_proc == 0)
953 txq->next_desc_to_proc = txq->last_desc - 1;
954 else
955 txq->next_desc_to_proc--;
956 }
957
958 /* Set rxq buf size */
959 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
960 struct mvneta_rx_queue *rxq,
961 int buf_size)
962 {
963 u32 val;
964
965 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
966
967 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
968 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
969
970 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
971 }
972
973 /* Disable buffer management (BM) */
974 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
975 struct mvneta_rx_queue *rxq)
976 {
977 u32 val;
978
979 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
980 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
981 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
982 }
983
984 /* Enable buffer management (BM) */
985 static void mvneta_rxq_bm_enable(struct mvneta_port *pp,
986 struct mvneta_rx_queue *rxq)
987 {
988 u32 val;
989
990 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
991 val |= MVNETA_RXQ_HW_BUF_ALLOC;
992 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
993 }
994
995 /* Notify HW about port's assignment of pool for bigger packets */
996 static void mvneta_rxq_long_pool_set(struct mvneta_port *pp,
997 struct mvneta_rx_queue *rxq)
998 {
999 u32 val;
1000
1001 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1002 val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK;
1003 val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT);
1004
1005 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1006 }
1007
1008 /* Notify HW about port's assignment of pool for smaller packets */
1009 static void mvneta_rxq_short_pool_set(struct mvneta_port *pp,
1010 struct mvneta_rx_queue *rxq)
1011 {
1012 u32 val;
1013
1014 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
1015 val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK;
1016 val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT);
1017
1018 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
1019 }
1020
1021 /* Set port's receive buffer size for assigned BM pool */
1022 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp,
1023 int buf_size,
1024 u8 pool_id)
1025 {
1026 u32 val;
1027
1028 if (!IS_ALIGNED(buf_size, 8)) {
1029 dev_warn(pp->dev->dev.parent,
1030 "illegal buf_size value %d, round to %d\n",
1031 buf_size, ALIGN(buf_size, 8));
1032 buf_size = ALIGN(buf_size, 8);
1033 }
1034
1035 val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id));
1036 val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK;
1037 mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val);
1038 }
1039
1040 /* Configure MBUS window in order to enable access BM internal SRAM */
1041 static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize,
1042 u8 target, u8 attr)
1043 {
1044 u32 win_enable, win_protect;
1045 int i;
1046
1047 win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE);
1048
1049 if (pp->bm_win_id < 0) {
1050 /* Find first not occupied window */
1051 for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) {
1052 if (win_enable & (1 << i)) {
1053 pp->bm_win_id = i;
1054 break;
1055 }
1056 }
1057 if (i == MVNETA_MAX_DECODE_WIN)
1058 return -ENOMEM;
1059 } else {
1060 i = pp->bm_win_id;
1061 }
1062
1063 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
1064 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
1065
1066 if (i < 4)
1067 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
1068
1069 mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) |
1070 (attr << 8) | target);
1071
1072 mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000);
1073
1074 win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE);
1075 win_protect |= 3 << (2 * i);
1076 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
1077
1078 win_enable &= ~(1 << i);
1079 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
1080
1081 return 0;
1082 }
1083
1084 static int mvneta_bm_port_mbus_init(struct mvneta_port *pp)
1085 {
1086 u32 wsize;
1087 u8 target, attr;
1088 int err;
1089
1090 /* Get BM window information */
1091 err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize,
1092 &target, &attr);
1093 if (err < 0)
1094 return err;
1095
1096 pp->bm_win_id = -1;
1097
1098 /* Open NETA -> BM window */
1099 err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize,
1100 target, attr);
1101 if (err < 0) {
1102 netdev_info(pp->dev, "fail to configure mbus window to BM\n");
1103 return err;
1104 }
1105 return 0;
1106 }
1107
1108 /* Assign and initialize pools for port. In case of fail
1109 * buffer manager will remain disabled for current port.
1110 */
1111 static int mvneta_bm_port_init(struct platform_device *pdev,
1112 struct mvneta_port *pp)
1113 {
1114 struct device_node *dn = pdev->dev.of_node;
1115 u32 long_pool_id, short_pool_id;
1116
1117 if (!pp->neta_armada3700) {
1118 int ret;
1119
1120 ret = mvneta_bm_port_mbus_init(pp);
1121 if (ret)
1122 return ret;
1123 }
1124
1125 if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) {
1126 netdev_info(pp->dev, "missing long pool id\n");
1127 return -EINVAL;
1128 }
1129
1130 /* Create port's long pool depending on mtu */
1131 pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id,
1132 MVNETA_BM_LONG, pp->id,
1133 MVNETA_RX_PKT_SIZE(pp->dev->mtu));
1134 if (!pp->pool_long) {
1135 netdev_info(pp->dev, "fail to obtain long pool for port\n");
1136 return -ENOMEM;
1137 }
1138
1139 pp->pool_long->port_map |= 1 << pp->id;
1140
1141 mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size,
1142 pp->pool_long->id);
1143
1144 /* If short pool id is not defined, assume using single pool */
1145 if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id))
1146 short_pool_id = long_pool_id;
1147
1148 /* Create port's short pool */
1149 pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id,
1150 MVNETA_BM_SHORT, pp->id,
1151 MVNETA_BM_SHORT_PKT_SIZE);
1152 if (!pp->pool_short) {
1153 netdev_info(pp->dev, "fail to obtain short pool for port\n");
1154 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1155 return -ENOMEM;
1156 }
1157
1158 if (short_pool_id != long_pool_id) {
1159 pp->pool_short->port_map |= 1 << pp->id;
1160 mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size,
1161 pp->pool_short->id);
1162 }
1163
1164 return 0;
1165 }
1166
1167 /* Update settings of a pool for bigger packets */
1168 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
1169 {
1170 struct mvneta_bm_pool *bm_pool = pp->pool_long;
1171 struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
1172 int num;
1173
1174 /* Release all buffers from long pool */
1175 mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
1176 if (hwbm_pool->buf_num) {
1177 WARN(1, "cannot free all buffers in pool %d\n",
1178 bm_pool->id);
1179 goto bm_mtu_err;
1180 }
1181
1182 bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
1183 bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
1184 hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
1185 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
1186
1187 /* Fill entire long pool */
1188 num = hwbm_pool_add(hwbm_pool, hwbm_pool->size);
1189 if (num != hwbm_pool->size) {
1190 WARN(1, "pool %d: %d of %d allocated\n",
1191 bm_pool->id, num, hwbm_pool->size);
1192 goto bm_mtu_err;
1193 }
1194 mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
1195
1196 return;
1197
1198 bm_mtu_err:
1199 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
1200 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id);
1201
1202 pp->bm_priv = NULL;
1203 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
1204 mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1);
1205 netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n");
1206 }
1207
1208 /* Start the Ethernet port RX and TX activity */
1209 static void mvneta_port_up(struct mvneta_port *pp)
1210 {
1211 int queue;
1212 u32 q_map;
1213
1214 /* Enable all initialized TXs. */
1215 q_map = 0;
1216 for (queue = 0; queue < txq_number; queue++) {
1217 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1218 if (txq->descs)
1219 q_map |= (1 << queue);
1220 }
1221 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
1222
1223 q_map = 0;
1224 /* Enable all initialized RXQs. */
1225 for (queue = 0; queue < rxq_number; queue++) {
1226 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
1227
1228 if (rxq->descs)
1229 q_map |= (1 << queue);
1230 }
1231 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
1232 }
1233
1234 /* Stop the Ethernet port activity */
1235 static void mvneta_port_down(struct mvneta_port *pp)
1236 {
1237 u32 val;
1238 int count;
1239
1240 /* Stop Rx port activity. Check port Rx activity. */
1241 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
1242
1243 /* Issue stop command for active channels only */
1244 if (val != 0)
1245 mvreg_write(pp, MVNETA_RXQ_CMD,
1246 val << MVNETA_RXQ_DISABLE_SHIFT);
1247
1248 /* Wait for all Rx activity to terminate. */
1249 count = 0;
1250 do {
1251 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
1252 netdev_warn(pp->dev,
1253 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1254 val);
1255 break;
1256 }
1257 mdelay(1);
1258
1259 val = mvreg_read(pp, MVNETA_RXQ_CMD);
1260 } while (val & MVNETA_RXQ_ENABLE_MASK);
1261
1262 /* Stop Tx port activity. Check port Tx activity. Issue stop
1263 * command for active channels only
1264 */
1265 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
1266
1267 if (val != 0)
1268 mvreg_write(pp, MVNETA_TXQ_CMD,
1269 (val << MVNETA_TXQ_DISABLE_SHIFT));
1270
1271 /* Wait for all Tx activity to terminate. */
1272 count = 0;
1273 do {
1274 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
1275 netdev_warn(pp->dev,
1276 "TIMEOUT for TX stopped status=0x%08x\n",
1277 val);
1278 break;
1279 }
1280 mdelay(1);
1281
1282 /* Check TX Command reg that all Txqs are stopped */
1283 val = mvreg_read(pp, MVNETA_TXQ_CMD);
1284
1285 } while (val & MVNETA_TXQ_ENABLE_MASK);
1286
1287 /* Double check to verify that TX FIFO is empty */
1288 count = 0;
1289 do {
1290 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
1291 netdev_warn(pp->dev,
1292 "TX FIFO empty timeout status=0x%08x\n",
1293 val);
1294 break;
1295 }
1296 mdelay(1);
1297
1298 val = mvreg_read(pp, MVNETA_PORT_STATUS);
1299 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
1300 (val & MVNETA_TX_IN_PRGRS));
1301
1302 udelay(200);
1303 }
1304
1305 /* Enable the port by setting the port enable bit of the MAC control register */
1306 static void mvneta_port_enable(struct mvneta_port *pp)
1307 {
1308 u32 val;
1309
1310 /* Enable port */
1311 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1312 val |= MVNETA_GMAC0_PORT_ENABLE;
1313 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1314 }
1315
1316 /* Disable the port and wait for about 200 usec before retuning */
1317 static void mvneta_port_disable(struct mvneta_port *pp)
1318 {
1319 u32 val;
1320
1321 /* Reset the Enable bit in the Serial Control Register */
1322 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
1323 val &= ~MVNETA_GMAC0_PORT_ENABLE;
1324 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
1325
1326 udelay(200);
1327 }
1328
1329 /* Multicast tables methods */
1330
1331 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1332 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
1333 {
1334 int offset;
1335 u32 val;
1336
1337 if (queue == -1) {
1338 val = 0;
1339 } else {
1340 val = 0x1 | (queue << 1);
1341 val |= (val << 24) | (val << 16) | (val << 8);
1342 }
1343
1344 for (offset = 0; offset <= 0xc; offset += 4)
1345 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
1346 }
1347
1348 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1349 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
1350 {
1351 int offset;
1352 u32 val;
1353
1354 if (queue == -1) {
1355 val = 0;
1356 } else {
1357 val = 0x1 | (queue << 1);
1358 val |= (val << 24) | (val << 16) | (val << 8);
1359 }
1360
1361 for (offset = 0; offset <= 0xfc; offset += 4)
1362 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
1363
1364 }
1365
1366 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1367 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
1368 {
1369 int offset;
1370 u32 val;
1371
1372 if (queue == -1) {
1373 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
1374 val = 0;
1375 } else {
1376 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
1377 val = 0x1 | (queue << 1);
1378 val |= (val << 24) | (val << 16) | (val << 8);
1379 }
1380
1381 for (offset = 0; offset <= 0xfc; offset += 4)
1382 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
1383 }
1384
1385 static void mvneta_percpu_unmask_interrupt(void *arg)
1386 {
1387 struct mvneta_port *pp = arg;
1388
1389 /* All the queue are unmasked, but actually only the ones
1390 * mapped to this CPU will be unmasked
1391 */
1392 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1393 MVNETA_RX_INTR_MASK_ALL |
1394 MVNETA_TX_INTR_MASK_ALL |
1395 MVNETA_MISCINTR_INTR_MASK);
1396 }
1397
1398 static void mvneta_percpu_mask_interrupt(void *arg)
1399 {
1400 struct mvneta_port *pp = arg;
1401
1402 /* All the queue are masked, but actually only the ones
1403 * mapped to this CPU will be masked
1404 */
1405 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1406 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1407 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1408 }
1409
1410 static void mvneta_percpu_clear_intr_cause(void *arg)
1411 {
1412 struct mvneta_port *pp = arg;
1413
1414 /* All the queue are cleared, but actually only the ones
1415 * mapped to this CPU will be cleared
1416 */
1417 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1418 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1419 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1420 }
1421
1422 /* This method sets defaults to the NETA port:
1423 * Clears interrupt Cause and Mask registers.
1424 * Clears all MAC tables.
1425 * Sets defaults to all registers.
1426 * Resets RX and TX descriptor rings.
1427 * Resets PHY.
1428 * This method can be called after mvneta_port_down() to return the port
1429 * settings to defaults.
1430 */
1431 static void mvneta_defaults_set(struct mvneta_port *pp)
1432 {
1433 int cpu;
1434 int queue;
1435 u32 val;
1436 int max_cpu = num_present_cpus();
1437
1438 /* Clear all Cause registers */
1439 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1440
1441 /* Mask all interrupts */
1442 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1443 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1444
1445 /* Enable MBUS Retry bit16 */
1446 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
1447
1448 /* Set CPU queue access map. CPUs are assigned to the RX and
1449 * TX queues modulo their number. If there is only one TX
1450 * queue then it is assigned to the CPU associated to the
1451 * default RX queue.
1452 */
1453 for_each_present_cpu(cpu) {
1454 int rxq_map = 0, txq_map = 0;
1455 int rxq, txq;
1456 if (!pp->neta_armada3700) {
1457 for (rxq = 0; rxq < rxq_number; rxq++)
1458 if ((rxq % max_cpu) == cpu)
1459 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
1460
1461 for (txq = 0; txq < txq_number; txq++)
1462 if ((txq % max_cpu) == cpu)
1463 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1464
1465 /* With only one TX queue we configure a special case
1466 * which will allow to get all the irq on a single
1467 * CPU
1468 */
1469 if (txq_number == 1)
1470 txq_map = (cpu == pp->rxq_def) ?
1471 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1472
1473 } else {
1474 txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
1475 rxq_map = MVNETA_CPU_RXQ_ACCESS_ALL_MASK;
1476 }
1477
1478 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
1479 }
1480
1481 /* Reset RX and TX DMAs */
1482 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
1483 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
1484
1485 /* Disable Legacy WRR, Disable EJP, Release from reset */
1486 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
1487 for (queue = 0; queue < txq_number; queue++) {
1488 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
1489 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
1490 }
1491
1492 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
1493 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
1494
1495 /* Set Port Acceleration Mode */
1496 if (pp->bm_priv)
1497 /* HW buffer management + legacy parser */
1498 val = MVNETA_ACC_MODE_EXT2;
1499 else
1500 /* SW buffer management + legacy parser */
1501 val = MVNETA_ACC_MODE_EXT1;
1502 mvreg_write(pp, MVNETA_ACC_MODE, val);
1503
1504 if (pp->bm_priv)
1505 mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr);
1506
1507 /* Update val of portCfg register accordingly with all RxQueue types */
1508 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
1509 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
1510
1511 val = 0;
1512 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
1513 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
1514
1515 /* Build PORT_SDMA_CONFIG_REG */
1516 val = 0;
1517
1518 /* Default burst size */
1519 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1520 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
1521 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
1522
1523 #if defined(__BIG_ENDIAN)
1524 val |= MVNETA_DESC_SWAP;
1525 #endif
1526
1527 /* Assign port SDMA configuration */
1528 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
1529
1530 /* Disable PHY polling in hardware, since we're using the
1531 * kernel phylib to do this.
1532 */
1533 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
1534 val &= ~MVNETA_PHY_POLLING_ENABLE;
1535 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
1536
1537 mvneta_set_ucast_table(pp, -1);
1538 mvneta_set_special_mcast_table(pp, -1);
1539 mvneta_set_other_mcast_table(pp, -1);
1540
1541 /* Set port interrupt enable register - default enable all */
1542 mvreg_write(pp, MVNETA_INTR_ENABLE,
1543 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1544 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1545
1546 mvneta_mib_counters_clear(pp);
1547 }
1548
1549 /* Set max sizes for tx queues */
1550 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1551
1552 {
1553 u32 val, size, mtu;
1554 int queue;
1555
1556 mtu = max_tx_size * 8;
1557 if (mtu > MVNETA_TX_MTU_MAX)
1558 mtu = MVNETA_TX_MTU_MAX;
1559
1560 /* Set MTU */
1561 val = mvreg_read(pp, MVNETA_TX_MTU);
1562 val &= ~MVNETA_TX_MTU_MAX;
1563 val |= mtu;
1564 mvreg_write(pp, MVNETA_TX_MTU, val);
1565
1566 /* TX token size and all TXQs token size must be larger that MTU */
1567 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1568
1569 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1570 if (size < mtu) {
1571 size = mtu;
1572 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1573 val |= size;
1574 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1575 }
1576 for (queue = 0; queue < txq_number; queue++) {
1577 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1578
1579 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1580 if (size < mtu) {
1581 size = mtu;
1582 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1583 val |= size;
1584 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1585 }
1586 }
1587 }
1588
1589 /* Set unicast address */
1590 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1591 int queue)
1592 {
1593 unsigned int unicast_reg;
1594 unsigned int tbl_offset;
1595 unsigned int reg_offset;
1596
1597 /* Locate the Unicast table entry */
1598 last_nibble = (0xf & last_nibble);
1599
1600 /* offset from unicast tbl base */
1601 tbl_offset = (last_nibble / 4) * 4;
1602
1603 /* offset within the above reg */
1604 reg_offset = last_nibble % 4;
1605
1606 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1607
1608 if (queue == -1) {
1609 /* Clear accepts frame bit at specified unicast DA tbl entry */
1610 unicast_reg &= ~(0xff << (8 * reg_offset));
1611 } else {
1612 unicast_reg &= ~(0xff << (8 * reg_offset));
1613 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1614 }
1615
1616 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1617 }
1618
1619 /* Set mac address */
1620 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1621 int queue)
1622 {
1623 unsigned int mac_h;
1624 unsigned int mac_l;
1625
1626 if (queue != -1) {
1627 mac_l = (addr[4] << 8) | (addr[5]);
1628 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1629 (addr[2] << 8) | (addr[3] << 0);
1630
1631 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1632 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1633 }
1634
1635 /* Accept frames of this address */
1636 mvneta_set_ucast_addr(pp, addr[5], queue);
1637 }
1638
1639 /* Set the number of packets that will be received before RX interrupt
1640 * will be generated by HW.
1641 */
1642 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1643 struct mvneta_rx_queue *rxq, u32 value)
1644 {
1645 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1646 value | MVNETA_RXQ_NON_OCCUPIED(0));
1647 }
1648
1649 /* Set the time delay in usec before RX interrupt will be generated by
1650 * HW.
1651 */
1652 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1653 struct mvneta_rx_queue *rxq, u32 value)
1654 {
1655 u32 val;
1656 unsigned long clk_rate;
1657
1658 clk_rate = clk_get_rate(pp->clk);
1659 val = (clk_rate / 1000000) * value;
1660
1661 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1662 }
1663
1664 /* Set threshold for TX_DONE pkts coalescing */
1665 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1666 struct mvneta_tx_queue *txq, u32 value)
1667 {
1668 u32 val;
1669
1670 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1671
1672 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1673 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1674
1675 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1676 }
1677
1678 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1679 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1680 u32 phys_addr, void *virt_addr,
1681 struct mvneta_rx_queue *rxq)
1682 {
1683 int i;
1684
1685 rx_desc->buf_phys_addr = phys_addr;
1686 i = rx_desc - rxq->descs;
1687 rxq->buf_virt_addr[i] = virt_addr;
1688 }
1689
1690 /* Decrement sent descriptors counter */
1691 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1692 struct mvneta_tx_queue *txq,
1693 int sent_desc)
1694 {
1695 u32 val;
1696
1697 /* Only 255 TX descriptors can be updated at once */
1698 while (sent_desc > 0xff) {
1699 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1700 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1701 sent_desc = sent_desc - 0xff;
1702 }
1703
1704 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1705 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1706 }
1707
1708 /* Get number of TX descriptors already sent by HW */
1709 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1710 struct mvneta_tx_queue *txq)
1711 {
1712 u32 val;
1713 int sent_desc;
1714
1715 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1716 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1717 MVNETA_TXQ_SENT_DESC_SHIFT;
1718
1719 return sent_desc;
1720 }
1721
1722 /* Get number of sent descriptors and decrement counter.
1723 * The number of sent descriptors is returned.
1724 */
1725 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1726 struct mvneta_tx_queue *txq)
1727 {
1728 int sent_desc;
1729
1730 /* Get number of sent descriptors */
1731 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1732
1733 /* Decrement sent descriptors counter */
1734 if (sent_desc)
1735 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1736
1737 return sent_desc;
1738 }
1739
1740 /* Set TXQ descriptors fields relevant for CSUM calculation */
1741 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1742 int ip_hdr_len, int l4_proto)
1743 {
1744 u32 command;
1745
1746 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1747 * G_L4_chk, L4_type; required only for checksum
1748 * calculation
1749 */
1750 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1751 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1752
1753 if (l3_proto == htons(ETH_P_IP))
1754 command |= MVNETA_TXD_IP_CSUM;
1755 else
1756 command |= MVNETA_TX_L3_IP6;
1757
1758 if (l4_proto == IPPROTO_TCP)
1759 command |= MVNETA_TX_L4_CSUM_FULL;
1760 else if (l4_proto == IPPROTO_UDP)
1761 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1762 else
1763 command |= MVNETA_TX_L4_CSUM_NOT;
1764
1765 return command;
1766 }
1767
1768
1769 /* Display more error info */
1770 static void mvneta_rx_error(struct mvneta_port *pp,
1771 struct mvneta_rx_desc *rx_desc)
1772 {
1773 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1774 u32 status = rx_desc->status;
1775
1776 /* update per-cpu counter */
1777 u64_stats_update_begin(&stats->syncp);
1778 stats->rx_errors++;
1779 u64_stats_update_end(&stats->syncp);
1780
1781 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1782 case MVNETA_RXD_ERR_CRC:
1783 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1784 status, rx_desc->data_size);
1785 break;
1786 case MVNETA_RXD_ERR_OVERRUN:
1787 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1788 status, rx_desc->data_size);
1789 break;
1790 case MVNETA_RXD_ERR_LEN:
1791 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1792 status, rx_desc->data_size);
1793 break;
1794 case MVNETA_RXD_ERR_RESOURCE:
1795 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1796 status, rx_desc->data_size);
1797 break;
1798 }
1799 }
1800
1801 /* Handle RX checksum offload based on the descriptor's status */
1802 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1803 struct sk_buff *skb)
1804 {
1805 if ((pp->dev->features & NETIF_F_RXCSUM) &&
1806 (status & MVNETA_RXD_L3_IP4) &&
1807 (status & MVNETA_RXD_L4_CSUM_OK)) {
1808 skb->csum = 0;
1809 skb->ip_summed = CHECKSUM_UNNECESSARY;
1810 return;
1811 }
1812
1813 skb->ip_summed = CHECKSUM_NONE;
1814 }
1815
1816 /* Return tx queue pointer (find last set bit) according to <cause> returned
1817 * form tx_done reg. <cause> must not be null. The return value is always a
1818 * valid queue for matching the first one found in <cause>.
1819 */
1820 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1821 u32 cause)
1822 {
1823 int queue = fls(cause) - 1;
1824
1825 return &pp->txqs[queue];
1826 }
1827
1828 /* Free tx queue skbuffs */
1829 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1830 struct mvneta_tx_queue *txq, int num,
1831 struct netdev_queue *nq)
1832 {
1833 unsigned int bytes_compl = 0, pkts_compl = 0;
1834 int i;
1835
1836 for (i = 0; i < num; i++) {
1837 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1838 struct mvneta_tx_desc *tx_desc = txq->descs +
1839 txq->txq_get_index;
1840
1841 mvneta_txq_inc_get(txq);
1842
1843 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
1844 buf->type != MVNETA_TYPE_XDP_TX)
1845 dma_unmap_single(pp->dev->dev.parent,
1846 tx_desc->buf_phys_addr,
1847 tx_desc->data_size, DMA_TO_DEVICE);
1848 if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
1849 bytes_compl += buf->skb->len;
1850 pkts_compl++;
1851 dev_kfree_skb_any(buf->skb);
1852 } else if (buf->type == MVNETA_TYPE_XDP_TX ||
1853 buf->type == MVNETA_TYPE_XDP_NDO) {
1854 xdp_return_frame(buf->xdpf);
1855 }
1856 }
1857
1858 netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
1859 }
1860
1861 /* Handle end of transmission */
1862 static void mvneta_txq_done(struct mvneta_port *pp,
1863 struct mvneta_tx_queue *txq)
1864 {
1865 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1866 int tx_done;
1867
1868 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1869 if (!tx_done)
1870 return;
1871
1872 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
1873
1874 txq->count -= tx_done;
1875
1876 if (netif_tx_queue_stopped(nq)) {
1877 if (txq->count <= txq->tx_wake_threshold)
1878 netif_tx_wake_queue(nq);
1879 }
1880 }
1881
1882 /* Refill processing for SW buffer management */
1883 /* Allocate page per descriptor */
1884 static int mvneta_rx_refill(struct mvneta_port *pp,
1885 struct mvneta_rx_desc *rx_desc,
1886 struct mvneta_rx_queue *rxq,
1887 gfp_t gfp_mask)
1888 {
1889 dma_addr_t phys_addr;
1890 struct page *page;
1891
1892 page = page_pool_alloc_pages(rxq->page_pool,
1893 gfp_mask | __GFP_NOWARN);
1894 if (!page)
1895 return -ENOMEM;
1896
1897 phys_addr = page_pool_get_dma_addr(page) + pp->rx_offset_correction;
1898 mvneta_rx_desc_fill(rx_desc, phys_addr, page, rxq);
1899
1900 return 0;
1901 }
1902
1903 /* Handle tx checksum */
1904 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1905 {
1906 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1907 int ip_hdr_len = 0;
1908 __be16 l3_proto = vlan_get_protocol(skb);
1909 u8 l4_proto;
1910
1911 if (l3_proto == htons(ETH_P_IP)) {
1912 struct iphdr *ip4h = ip_hdr(skb);
1913
1914 /* Calculate IPv4 checksum and L4 checksum */
1915 ip_hdr_len = ip4h->ihl;
1916 l4_proto = ip4h->protocol;
1917 } else if (l3_proto == htons(ETH_P_IPV6)) {
1918 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1919
1920 /* Read l4_protocol from one of IPv6 extra headers */
1921 if (skb_network_header_len(skb) > 0)
1922 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1923 l4_proto = ip6h->nexthdr;
1924 } else
1925 return MVNETA_TX_L4_CSUM_NOT;
1926
1927 return mvneta_txq_desc_csum(skb_network_offset(skb),
1928 l3_proto, ip_hdr_len, l4_proto);
1929 }
1930
1931 return MVNETA_TX_L4_CSUM_NOT;
1932 }
1933
1934 /* Drop packets received by the RXQ and free buffers */
1935 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1936 struct mvneta_rx_queue *rxq)
1937 {
1938 int rx_done, i;
1939
1940 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1941 if (rx_done)
1942 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1943
1944 if (pp->bm_priv) {
1945 for (i = 0; i < rx_done; i++) {
1946 struct mvneta_rx_desc *rx_desc =
1947 mvneta_rxq_next_desc_get(rxq);
1948 u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
1949 struct mvneta_bm_pool *bm_pool;
1950
1951 bm_pool = &pp->bm_priv->bm_pools[pool_id];
1952 /* Return dropped buffer to the pool */
1953 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
1954 rx_desc->buf_phys_addr);
1955 }
1956 return;
1957 }
1958
1959 for (i = 0; i < rxq->size; i++) {
1960 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1961 void *data = rxq->buf_virt_addr[i];
1962 if (!data || !(rx_desc->buf_phys_addr))
1963 continue;
1964
1965 page_pool_put_full_page(rxq->page_pool, data, false);
1966 }
1967 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
1968 xdp_rxq_info_unreg(&rxq->xdp_rxq);
1969 page_pool_destroy(rxq->page_pool);
1970 rxq->page_pool = NULL;
1971 }
1972
1973 static void
1974 mvneta_update_stats(struct mvneta_port *pp,
1975 struct mvneta_stats *ps)
1976 {
1977 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1978
1979 u64_stats_update_begin(&stats->syncp);
1980 stats->es.ps.rx_packets += ps->rx_packets;
1981 stats->es.ps.rx_bytes += ps->rx_bytes;
1982 /* xdp */
1983 stats->es.ps.xdp_redirect += ps->xdp_redirect;
1984 stats->es.ps.xdp_pass += ps->xdp_pass;
1985 stats->es.ps.xdp_drop += ps->xdp_drop;
1986 u64_stats_update_end(&stats->syncp);
1987 }
1988
1989 static inline
1990 int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
1991 {
1992 struct mvneta_rx_desc *rx_desc;
1993 int curr_desc = rxq->first_to_refill;
1994 int i;
1995
1996 for (i = 0; (i < rxq->refill_num) && (i < 64); i++) {
1997 rx_desc = rxq->descs + curr_desc;
1998 if (!(rx_desc->buf_phys_addr)) {
1999 if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) {
2000 struct mvneta_pcpu_stats *stats;
2001
2002 pr_err("Can't refill queue %d. Done %d from %d\n",
2003 rxq->id, i, rxq->refill_num);
2004
2005 stats = this_cpu_ptr(pp->stats);
2006 u64_stats_update_begin(&stats->syncp);
2007 stats->es.refill_error++;
2008 u64_stats_update_end(&stats->syncp);
2009 break;
2010 }
2011 }
2012 curr_desc = MVNETA_QUEUE_NEXT_DESC(rxq, curr_desc);
2013 }
2014 rxq->refill_num -= i;
2015 rxq->first_to_refill = curr_desc;
2016
2017 return i;
2018 }
2019
2020 static int
2021 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2022 struct xdp_frame *xdpf, bool dma_map)
2023 {
2024 struct mvneta_tx_desc *tx_desc;
2025 struct mvneta_tx_buf *buf;
2026 dma_addr_t dma_addr;
2027
2028 if (txq->count >= txq->tx_stop_threshold)
2029 return MVNETA_XDP_DROPPED;
2030
2031 tx_desc = mvneta_txq_next_desc_get(txq);
2032
2033 buf = &txq->buf[txq->txq_put_index];
2034 if (dma_map) {
2035 /* ndo_xdp_xmit */
2036 dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
2037 xdpf->len, DMA_TO_DEVICE);
2038 if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
2039 mvneta_txq_desc_put(txq);
2040 return MVNETA_XDP_DROPPED;
2041 }
2042 buf->type = MVNETA_TYPE_XDP_NDO;
2043 } else {
2044 struct page *page = virt_to_page(xdpf->data);
2045
2046 dma_addr = page_pool_get_dma_addr(page) +
2047 sizeof(*xdpf) + xdpf->headroom;
2048 dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
2049 xdpf->len, DMA_BIDIRECTIONAL);
2050 buf->type = MVNETA_TYPE_XDP_TX;
2051 }
2052 buf->xdpf = xdpf;
2053
2054 tx_desc->command = MVNETA_TXD_FLZ_DESC;
2055 tx_desc->buf_phys_addr = dma_addr;
2056 tx_desc->data_size = xdpf->len;
2057
2058 mvneta_txq_inc_put(txq);
2059 txq->pending++;
2060 txq->count++;
2061
2062 return MVNETA_XDP_TX;
2063 }
2064
2065 static int
2066 mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
2067 {
2068 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2069 struct mvneta_tx_queue *txq;
2070 struct netdev_queue *nq;
2071 struct xdp_frame *xdpf;
2072 int cpu;
2073 u32 ret;
2074
2075 xdpf = convert_to_xdp_frame(xdp);
2076 if (unlikely(!xdpf))
2077 return MVNETA_XDP_DROPPED;
2078
2079 cpu = smp_processor_id();
2080 txq = &pp->txqs[cpu % txq_number];
2081 nq = netdev_get_tx_queue(pp->dev, txq->id);
2082
2083 __netif_tx_lock(nq, cpu);
2084 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
2085 if (ret == MVNETA_XDP_TX) {
2086 u64_stats_update_begin(&stats->syncp);
2087 stats->es.ps.tx_bytes += xdpf->len;
2088 stats->es.ps.tx_packets++;
2089 stats->es.ps.xdp_tx++;
2090 u64_stats_update_end(&stats->syncp);
2091
2092 mvneta_txq_pend_desc_add(pp, txq, 0);
2093 } else {
2094 u64_stats_update_begin(&stats->syncp);
2095 stats->es.ps.xdp_tx_err++;
2096 u64_stats_update_end(&stats->syncp);
2097 }
2098 __netif_tx_unlock(nq);
2099
2100 return ret;
2101 }
2102
2103 static int
2104 mvneta_xdp_xmit(struct net_device *dev, int num_frame,
2105 struct xdp_frame **frames, u32 flags)
2106 {
2107 struct mvneta_port *pp = netdev_priv(dev);
2108 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2109 int i, nxmit_byte = 0, nxmit = num_frame;
2110 int cpu = smp_processor_id();
2111 struct mvneta_tx_queue *txq;
2112 struct netdev_queue *nq;
2113 u32 ret;
2114
2115 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2116 return -EINVAL;
2117
2118 txq = &pp->txqs[cpu % txq_number];
2119 nq = netdev_get_tx_queue(pp->dev, txq->id);
2120
2121 __netif_tx_lock(nq, cpu);
2122 for (i = 0; i < num_frame; i++) {
2123 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
2124 if (ret == MVNETA_XDP_TX) {
2125 nxmit_byte += frames[i]->len;
2126 } else {
2127 xdp_return_frame_rx_napi(frames[i]);
2128 nxmit--;
2129 }
2130 }
2131
2132 if (unlikely(flags & XDP_XMIT_FLUSH))
2133 mvneta_txq_pend_desc_add(pp, txq, 0);
2134 __netif_tx_unlock(nq);
2135
2136 u64_stats_update_begin(&stats->syncp);
2137 stats->es.ps.tx_bytes += nxmit_byte;
2138 stats->es.ps.tx_packets += nxmit;
2139 stats->es.ps.xdp_xmit += nxmit;
2140 stats->es.ps.xdp_xmit_err += num_frame - nxmit;
2141 u64_stats_update_end(&stats->syncp);
2142
2143 return nxmit;
2144 }
2145
2146 static int
2147 mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2148 struct bpf_prog *prog, struct xdp_buff *xdp,
2149 struct mvneta_stats *stats)
2150 {
2151 unsigned int len;
2152 u32 ret, act;
2153
2154 len = xdp->data_end - xdp->data_hard_start - pp->rx_offset_correction;
2155 act = bpf_prog_run_xdp(prog, xdp);
2156
2157 switch (act) {
2158 case XDP_PASS:
2159 stats->xdp_pass++;
2160 return MVNETA_XDP_PASS;
2161 case XDP_REDIRECT: {
2162 int err;
2163
2164 err = xdp_do_redirect(pp->dev, xdp, prog);
2165 if (unlikely(err)) {
2166 ret = MVNETA_XDP_DROPPED;
2167 page_pool_put_page(rxq->page_pool,
2168 virt_to_head_page(xdp->data), len,
2169 true);
2170 } else {
2171 ret = MVNETA_XDP_REDIR;
2172 stats->xdp_redirect++;
2173 }
2174 break;
2175 }
2176 case XDP_TX:
2177 ret = mvneta_xdp_xmit_back(pp, xdp);
2178 if (ret != MVNETA_XDP_TX)
2179 page_pool_put_page(rxq->page_pool,
2180 virt_to_head_page(xdp->data), len,
2181 true);
2182 break;
2183 default:
2184 bpf_warn_invalid_xdp_action(act);
2185 /* fall through */
2186 case XDP_ABORTED:
2187 trace_xdp_exception(pp->dev, prog, act);
2188 /* fall through */
2189 case XDP_DROP:
2190 page_pool_put_page(rxq->page_pool,
2191 virt_to_head_page(xdp->data), len, true);
2192 ret = MVNETA_XDP_DROPPED;
2193 stats->xdp_drop++;
2194 break;
2195 }
2196
2197 stats->rx_bytes += xdp->data_end - xdp->data;
2198 stats->rx_packets++;
2199
2200 return ret;
2201 }
2202
2203 static int
2204 mvneta_swbm_rx_frame(struct mvneta_port *pp,
2205 struct mvneta_rx_desc *rx_desc,
2206 struct mvneta_rx_queue *rxq,
2207 struct xdp_buff *xdp,
2208 struct bpf_prog *xdp_prog,
2209 struct page *page,
2210 struct mvneta_stats *stats)
2211 {
2212 unsigned char *data = page_address(page);
2213 int data_len = -MVNETA_MH_SIZE, len;
2214 struct net_device *dev = pp->dev;
2215 enum dma_data_direction dma_dir;
2216 int ret = 0;
2217
2218 if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
2219 len = MVNETA_MAX_RX_BUF_SIZE;
2220 data_len += len;
2221 } else {
2222 len = rx_desc->data_size;
2223 data_len += len - ETH_FCS_LEN;
2224 }
2225
2226 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2227 dma_sync_single_for_cpu(dev->dev.parent,
2228 rx_desc->buf_phys_addr,
2229 len, dma_dir);
2230
2231 /* Prefetch header */
2232 prefetch(data);
2233
2234 xdp->data_hard_start = data;
2235 xdp->data = data + pp->rx_offset_correction + MVNETA_MH_SIZE;
2236 xdp->data_end = xdp->data + data_len;
2237 xdp_set_data_meta_invalid(xdp);
2238
2239 if (xdp_prog) {
2240 ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
2241 if (ret)
2242 goto out;
2243 }
2244
2245 rxq->skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
2246 if (unlikely(!rxq->skb)) {
2247 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2248
2249 netdev_err(dev, "Can't allocate skb on queue %d\n", rxq->id);
2250
2251 u64_stats_update_begin(&stats->syncp);
2252 stats->es.skb_alloc_error++;
2253 stats->rx_dropped++;
2254 u64_stats_update_end(&stats->syncp);
2255
2256 return -ENOMEM;
2257 }
2258 page_pool_release_page(rxq->page_pool, page);
2259
2260 skb_reserve(rxq->skb,
2261 xdp->data - xdp->data_hard_start);
2262 skb_put(rxq->skb, xdp->data_end - xdp->data);
2263 mvneta_rx_csum(pp, rx_desc->status, rxq->skb);
2264
2265 rxq->left_size = rx_desc->data_size - len;
2266
2267 out:
2268 rx_desc->buf_phys_addr = 0;
2269
2270 return ret;
2271 }
2272
2273 static void
2274 mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
2275 struct mvneta_rx_desc *rx_desc,
2276 struct mvneta_rx_queue *rxq,
2277 struct page *page)
2278 {
2279 struct net_device *dev = pp->dev;
2280 enum dma_data_direction dma_dir;
2281 int data_len, len;
2282
2283 if (rxq->left_size > MVNETA_MAX_RX_BUF_SIZE) {
2284 len = MVNETA_MAX_RX_BUF_SIZE;
2285 data_len = len;
2286 } else {
2287 len = rxq->left_size;
2288 data_len = len - ETH_FCS_LEN;
2289 }
2290 dma_dir = page_pool_get_dma_dir(rxq->page_pool);
2291 dma_sync_single_for_cpu(dev->dev.parent,
2292 rx_desc->buf_phys_addr,
2293 len, dma_dir);
2294 if (data_len > 0) {
2295 /* refill descriptor with new buffer later */
2296 skb_add_rx_frag(rxq->skb,
2297 skb_shinfo(rxq->skb)->nr_frags,
2298 page, pp->rx_offset_correction, data_len,
2299 PAGE_SIZE);
2300 }
2301 page_pool_release_page(rxq->page_pool, page);
2302 rx_desc->buf_phys_addr = 0;
2303 rxq->left_size -= len;
2304 }
2305
2306 /* Main rx processing when using software buffer management */
2307 static int mvneta_rx_swbm(struct napi_struct *napi,
2308 struct mvneta_port *pp, int budget,
2309 struct mvneta_rx_queue *rxq)
2310 {
2311 int rx_proc = 0, rx_todo, refill;
2312 struct net_device *dev = pp->dev;
2313 struct mvneta_stats ps = {};
2314 struct bpf_prog *xdp_prog;
2315 struct xdp_buff xdp_buf;
2316
2317 /* Get number of received packets */
2318 rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq);
2319
2320 rcu_read_lock();
2321 xdp_prog = READ_ONCE(pp->xdp_prog);
2322 xdp_buf.rxq = &rxq->xdp_rxq;
2323
2324 /* Fairness NAPI loop */
2325 while (rx_proc < budget && rx_proc < rx_todo) {
2326 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2327 u32 rx_status, index;
2328 struct page *page;
2329
2330 index = rx_desc - rxq->descs;
2331 page = (struct page *)rxq->buf_virt_addr[index];
2332
2333 rx_status = rx_desc->status;
2334 rx_proc++;
2335 rxq->refill_num++;
2336
2337 if (rx_status & MVNETA_RXD_FIRST_DESC) {
2338 int err;
2339
2340 /* Check errors only for FIRST descriptor */
2341 if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
2342 mvneta_rx_error(pp, rx_desc);
2343 /* leave the descriptor untouched */
2344 continue;
2345 }
2346
2347 err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
2348 xdp_prog, page, &ps);
2349 if (err)
2350 continue;
2351 } else {
2352 if (unlikely(!rxq->skb)) {
2353 pr_debug("no skb for rx_status 0x%x\n",
2354 rx_status);
2355 continue;
2356 }
2357 mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, page);
2358 } /* Middle or Last descriptor */
2359
2360 if (!(rx_status & MVNETA_RXD_LAST_DESC))
2361 /* no last descriptor this time */
2362 continue;
2363
2364 if (rxq->left_size) {
2365 pr_err("get last desc, but left_size (%d) != 0\n",
2366 rxq->left_size);
2367 dev_kfree_skb_any(rxq->skb);
2368 rxq->left_size = 0;
2369 rxq->skb = NULL;
2370 continue;
2371 }
2372
2373 ps.rx_bytes += rxq->skb->len;
2374 ps.rx_packets++;
2375
2376 /* Linux processing */
2377 rxq->skb->protocol = eth_type_trans(rxq->skb, dev);
2378
2379 napi_gro_receive(napi, rxq->skb);
2380
2381 /* clean uncomplete skb pointer in queue */
2382 rxq->skb = NULL;
2383 }
2384 rcu_read_unlock();
2385
2386 if (ps.xdp_redirect)
2387 xdp_do_flush_map();
2388
2389 if (ps.rx_packets)
2390 mvneta_update_stats(pp, &ps);
2391
2392 /* return some buffers to hardware queue, one at a time is too slow */
2393 refill = mvneta_rx_refill_queue(pp, rxq);
2394
2395 /* Update rxq management counters */
2396 mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill);
2397
2398 return ps.rx_packets;
2399 }
2400
2401 /* Main rx processing when using hardware buffer management */
2402 static int mvneta_rx_hwbm(struct napi_struct *napi,
2403 struct mvneta_port *pp, int rx_todo,
2404 struct mvneta_rx_queue *rxq)
2405 {
2406 struct net_device *dev = pp->dev;
2407 int rx_done;
2408 u32 rcvd_pkts = 0;
2409 u32 rcvd_bytes = 0;
2410
2411 /* Get number of received packets */
2412 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
2413
2414 if (rx_todo > rx_done)
2415 rx_todo = rx_done;
2416
2417 rx_done = 0;
2418
2419 /* Fairness NAPI loop */
2420 while (rx_done < rx_todo) {
2421 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
2422 struct mvneta_bm_pool *bm_pool = NULL;
2423 struct sk_buff *skb;
2424 unsigned char *data;
2425 dma_addr_t phys_addr;
2426 u32 rx_status, frag_size;
2427 int rx_bytes, err;
2428 u8 pool_id;
2429
2430 rx_done++;
2431 rx_status = rx_desc->status;
2432 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
2433 data = (u8 *)(uintptr_t)rx_desc->buf_cookie;
2434 phys_addr = rx_desc->buf_phys_addr;
2435 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc);
2436 bm_pool = &pp->bm_priv->bm_pools[pool_id];
2437
2438 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
2439 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
2440 err_drop_frame_ret_pool:
2441 /* Return the buffer to the pool */
2442 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2443 rx_desc->buf_phys_addr);
2444 err_drop_frame:
2445 mvneta_rx_error(pp, rx_desc);
2446 /* leave the descriptor untouched */
2447 continue;
2448 }
2449
2450 if (rx_bytes <= rx_copybreak) {
2451 /* better copy a small frame and not unmap the DMA region */
2452 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
2453 if (unlikely(!skb))
2454 goto err_drop_frame_ret_pool;
2455
2456 dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev,
2457 rx_desc->buf_phys_addr,
2458 MVNETA_MH_SIZE + NET_SKB_PAD,
2459 rx_bytes,
2460 DMA_FROM_DEVICE);
2461 skb_put_data(skb, data + MVNETA_MH_SIZE + NET_SKB_PAD,
2462 rx_bytes);
2463
2464 skb->protocol = eth_type_trans(skb, dev);
2465 mvneta_rx_csum(pp, rx_status, skb);
2466 napi_gro_receive(napi, skb);
2467
2468 rcvd_pkts++;
2469 rcvd_bytes += rx_bytes;
2470
2471 /* Return the buffer to the pool */
2472 mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool,
2473 rx_desc->buf_phys_addr);
2474
2475 /* leave the descriptor and buffer untouched */
2476 continue;
2477 }
2478
2479 /* Refill processing */
2480 err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
2481 if (err) {
2482 struct mvneta_pcpu_stats *stats;
2483
2484 netdev_err(dev, "Linux processing - Can't refill\n");
2485
2486 stats = this_cpu_ptr(pp->stats);
2487 u64_stats_update_begin(&stats->syncp);
2488 stats->es.refill_error++;
2489 u64_stats_update_end(&stats->syncp);
2490
2491 goto err_drop_frame_ret_pool;
2492 }
2493
2494 frag_size = bm_pool->hwbm_pool.frag_size;
2495
2496 skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
2497
2498 /* After refill old buffer has to be unmapped regardless
2499 * the skb is successfully built or not.
2500 */
2501 dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr,
2502 bm_pool->buf_size, DMA_FROM_DEVICE);
2503 if (!skb)
2504 goto err_drop_frame;
2505
2506 rcvd_pkts++;
2507 rcvd_bytes += rx_bytes;
2508
2509 /* Linux processing */
2510 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
2511 skb_put(skb, rx_bytes);
2512
2513 skb->protocol = eth_type_trans(skb, dev);
2514
2515 mvneta_rx_csum(pp, rx_status, skb);
2516
2517 napi_gro_receive(napi, skb);
2518 }
2519
2520 if (rcvd_pkts) {
2521 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2522
2523 u64_stats_update_begin(&stats->syncp);
2524 stats->es.ps.rx_packets += rcvd_pkts;
2525 stats->es.ps.rx_bytes += rcvd_bytes;
2526 u64_stats_update_end(&stats->syncp);
2527 }
2528
2529 /* Update rxq management counters */
2530 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
2531
2532 return rx_done;
2533 }
2534
2535 static inline void
2536 mvneta_tso_put_hdr(struct sk_buff *skb,
2537 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
2538 {
2539 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2540 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2541 struct mvneta_tx_desc *tx_desc;
2542
2543 tx_desc = mvneta_txq_next_desc_get(txq);
2544 tx_desc->data_size = hdr_len;
2545 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
2546 tx_desc->command |= MVNETA_TXD_F_DESC;
2547 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
2548 txq->txq_put_index * TSO_HEADER_SIZE;
2549 buf->type = MVNETA_TYPE_SKB;
2550 buf->skb = NULL;
2551
2552 mvneta_txq_inc_put(txq);
2553 }
2554
2555 static inline int
2556 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2557 struct sk_buff *skb, char *data, int size,
2558 bool last_tcp, bool is_last)
2559 {
2560 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2561 struct mvneta_tx_desc *tx_desc;
2562
2563 tx_desc = mvneta_txq_next_desc_get(txq);
2564 tx_desc->data_size = size;
2565 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
2566 size, DMA_TO_DEVICE);
2567 if (unlikely(dma_mapping_error(dev->dev.parent,
2568 tx_desc->buf_phys_addr))) {
2569 mvneta_txq_desc_put(txq);
2570 return -ENOMEM;
2571 }
2572
2573 tx_desc->command = 0;
2574 buf->type = MVNETA_TYPE_SKB;
2575 buf->skb = NULL;
2576
2577 if (last_tcp) {
2578 /* last descriptor in the TCP packet */
2579 tx_desc->command = MVNETA_TXD_L_DESC;
2580
2581 /* last descriptor in SKB */
2582 if (is_last)
2583 buf->skb = skb;
2584 }
2585 mvneta_txq_inc_put(txq);
2586 return 0;
2587 }
2588
2589 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
2590 struct mvneta_tx_queue *txq)
2591 {
2592 int total_len, data_left;
2593 int desc_count = 0;
2594 struct mvneta_port *pp = netdev_priv(dev);
2595 struct tso_t tso;
2596 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2597 int i;
2598
2599 /* Count needed descriptors */
2600 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2601 return 0;
2602
2603 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
2604 pr_info("*** Is this even possible???!?!?\n");
2605 return 0;
2606 }
2607
2608 /* Initialize the TSO handler, and prepare the first payload */
2609 tso_start(skb, &tso);
2610
2611 total_len = skb->len - hdr_len;
2612 while (total_len > 0) {
2613 char *hdr;
2614
2615 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
2616 total_len -= data_left;
2617 desc_count++;
2618
2619 /* prepare packet headers: MAC + IP + TCP */
2620 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
2621 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
2622
2623 mvneta_tso_put_hdr(skb, pp, txq);
2624
2625 while (data_left > 0) {
2626 int size;
2627 desc_count++;
2628
2629 size = min_t(int, tso.size, data_left);
2630
2631 if (mvneta_tso_put_data(dev, txq, skb,
2632 tso.data, size,
2633 size == data_left,
2634 total_len == 0))
2635 goto err_release;
2636 data_left -= size;
2637
2638 tso_build_data(skb, &tso, size);
2639 }
2640 }
2641
2642 return desc_count;
2643
2644 err_release:
2645 /* Release all used data descriptors; header descriptors must not
2646 * be DMA-unmapped.
2647 */
2648 for (i = desc_count - 1; i >= 0; i--) {
2649 struct mvneta_tx_desc *tx_desc = txq->descs + i;
2650 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
2651 dma_unmap_single(pp->dev->dev.parent,
2652 tx_desc->buf_phys_addr,
2653 tx_desc->data_size,
2654 DMA_TO_DEVICE);
2655 mvneta_txq_desc_put(txq);
2656 }
2657 return 0;
2658 }
2659
2660 /* Handle tx fragmentation processing */
2661 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
2662 struct mvneta_tx_queue *txq)
2663 {
2664 struct mvneta_tx_desc *tx_desc;
2665 int i, nr_frags = skb_shinfo(skb)->nr_frags;
2666
2667 for (i = 0; i < nr_frags; i++) {
2668 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2669 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2670 void *addr = skb_frag_address(frag);
2671
2672 tx_desc = mvneta_txq_next_desc_get(txq);
2673 tx_desc->data_size = skb_frag_size(frag);
2674
2675 tx_desc->buf_phys_addr =
2676 dma_map_single(pp->dev->dev.parent, addr,
2677 tx_desc->data_size, DMA_TO_DEVICE);
2678
2679 if (dma_mapping_error(pp->dev->dev.parent,
2680 tx_desc->buf_phys_addr)) {
2681 mvneta_txq_desc_put(txq);
2682 goto error;
2683 }
2684
2685 if (i == nr_frags - 1) {
2686 /* Last descriptor */
2687 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
2688 buf->skb = skb;
2689 } else {
2690 /* Descriptor in the middle: Not First, Not Last */
2691 tx_desc->command = 0;
2692 buf->skb = NULL;
2693 }
2694 buf->type = MVNETA_TYPE_SKB;
2695 mvneta_txq_inc_put(txq);
2696 }
2697
2698 return 0;
2699
2700 error:
2701 /* Release all descriptors that were used to map fragments of
2702 * this packet, as well as the corresponding DMA mappings
2703 */
2704 for (i = i - 1; i >= 0; i--) {
2705 tx_desc = txq->descs + i;
2706 dma_unmap_single(pp->dev->dev.parent,
2707 tx_desc->buf_phys_addr,
2708 tx_desc->data_size,
2709 DMA_TO_DEVICE);
2710 mvneta_txq_desc_put(txq);
2711 }
2712
2713 return -ENOMEM;
2714 }
2715
2716 /* Main tx processing */
2717 static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
2718 {
2719 struct mvneta_port *pp = netdev_priv(dev);
2720 u16 txq_id = skb_get_queue_mapping(skb);
2721 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2722 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2723 struct mvneta_tx_desc *tx_desc;
2724 int len = skb->len;
2725 int frags = 0;
2726 u32 tx_cmd;
2727
2728 if (!netif_running(dev))
2729 goto out;
2730
2731 if (skb_is_gso(skb)) {
2732 frags = mvneta_tx_tso(skb, dev, txq);
2733 goto out;
2734 }
2735
2736 frags = skb_shinfo(skb)->nr_frags + 1;
2737
2738 /* Get a descriptor for the first part of the packet */
2739 tx_desc = mvneta_txq_next_desc_get(txq);
2740
2741 tx_cmd = mvneta_skb_tx_csum(pp, skb);
2742
2743 tx_desc->data_size = skb_headlen(skb);
2744
2745 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
2746 tx_desc->data_size,
2747 DMA_TO_DEVICE);
2748 if (unlikely(dma_mapping_error(dev->dev.parent,
2749 tx_desc->buf_phys_addr))) {
2750 mvneta_txq_desc_put(txq);
2751 frags = 0;
2752 goto out;
2753 }
2754
2755 buf->type = MVNETA_TYPE_SKB;
2756 if (frags == 1) {
2757 /* First and Last descriptor */
2758 tx_cmd |= MVNETA_TXD_FLZ_DESC;
2759 tx_desc->command = tx_cmd;
2760 buf->skb = skb;
2761 mvneta_txq_inc_put(txq);
2762 } else {
2763 /* First but not Last */
2764 tx_cmd |= MVNETA_TXD_F_DESC;
2765 buf->skb = NULL;
2766 mvneta_txq_inc_put(txq);
2767 tx_desc->command = tx_cmd;
2768 /* Continue with other skb fragments */
2769 if (mvneta_tx_frag_process(pp, skb, txq)) {
2770 dma_unmap_single(dev->dev.parent,
2771 tx_desc->buf_phys_addr,
2772 tx_desc->data_size,
2773 DMA_TO_DEVICE);
2774 mvneta_txq_desc_put(txq);
2775 frags = 0;
2776 goto out;
2777 }
2778 }
2779
2780 out:
2781 if (frags > 0) {
2782 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
2783 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
2784
2785 netdev_tx_sent_queue(nq, len);
2786
2787 txq->count += frags;
2788 if (txq->count >= txq->tx_stop_threshold)
2789 netif_tx_stop_queue(nq);
2790
2791 if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
2792 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2793 mvneta_txq_pend_desc_add(pp, txq, frags);
2794 else
2795 txq->pending += frags;
2796
2797 u64_stats_update_begin(&stats->syncp);
2798 stats->es.ps.tx_bytes += len;
2799 stats->es.ps.tx_packets++;
2800 u64_stats_update_end(&stats->syncp);
2801 } else {
2802 dev->stats.tx_dropped++;
2803 dev_kfree_skb_any(skb);
2804 }
2805
2806 return NETDEV_TX_OK;
2807 }
2808
2809
2810 /* Free tx resources, when resetting a port */
2811 static void mvneta_txq_done_force(struct mvneta_port *pp,
2812 struct mvneta_tx_queue *txq)
2813
2814 {
2815 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
2816 int tx_done = txq->count;
2817
2818 mvneta_txq_bufs_free(pp, txq, tx_done, nq);
2819
2820 /* reset txq */
2821 txq->count = 0;
2822 txq->txq_put_index = 0;
2823 txq->txq_get_index = 0;
2824 }
2825
2826 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2827 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2828 */
2829 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
2830 {
2831 struct mvneta_tx_queue *txq;
2832 struct netdev_queue *nq;
2833 int cpu = smp_processor_id();
2834
2835 while (cause_tx_done) {
2836 txq = mvneta_tx_done_policy(pp, cause_tx_done);
2837
2838 nq = netdev_get_tx_queue(pp->dev, txq->id);
2839 __netif_tx_lock(nq, cpu);
2840
2841 if (txq->count)
2842 mvneta_txq_done(pp, txq);
2843
2844 __netif_tx_unlock(nq);
2845 cause_tx_done &= ~((1 << txq->id));
2846 }
2847 }
2848
2849 /* Compute crc8 of the specified address, using a unique algorithm ,
2850 * according to hw spec, different than generic crc8 algorithm
2851 */
2852 static int mvneta_addr_crc(unsigned char *addr)
2853 {
2854 int crc = 0;
2855 int i;
2856
2857 for (i = 0; i < ETH_ALEN; i++) {
2858 int j;
2859
2860 crc = (crc ^ addr[i]) << 8;
2861 for (j = 7; j >= 0; j--) {
2862 if (crc & (0x100 << j))
2863 crc ^= 0x107 << j;
2864 }
2865 }
2866
2867 return crc;
2868 }
2869
2870 /* This method controls the net device special MAC multicast support.
2871 * The Special Multicast Table for MAC addresses supports MAC of the form
2872 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2873 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2874 * Table entries in the DA-Filter table. This method set the Special
2875 * Multicast Table appropriate entry.
2876 */
2877 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
2878 unsigned char last_byte,
2879 int queue)
2880 {
2881 unsigned int smc_table_reg;
2882 unsigned int tbl_offset;
2883 unsigned int reg_offset;
2884
2885 /* Register offset from SMC table base */
2886 tbl_offset = (last_byte / 4);
2887 /* Entry offset within the above reg */
2888 reg_offset = last_byte % 4;
2889
2890 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
2891 + tbl_offset * 4));
2892
2893 if (queue == -1)
2894 smc_table_reg &= ~(0xff << (8 * reg_offset));
2895 else {
2896 smc_table_reg &= ~(0xff << (8 * reg_offset));
2897 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2898 }
2899
2900 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
2901 smc_table_reg);
2902 }
2903
2904 /* This method controls the network device Other MAC multicast support.
2905 * The Other Multicast Table is used for multicast of another type.
2906 * A CRC-8 is used as an index to the Other Multicast Table entries
2907 * in the DA-Filter table.
2908 * The method gets the CRC-8 value from the calling routine and
2909 * sets the Other Multicast Table appropriate entry according to the
2910 * specified CRC-8 .
2911 */
2912 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
2913 unsigned char crc8,
2914 int queue)
2915 {
2916 unsigned int omc_table_reg;
2917 unsigned int tbl_offset;
2918 unsigned int reg_offset;
2919
2920 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
2921 reg_offset = crc8 % 4; /* Entry offset within the above reg */
2922
2923 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
2924
2925 if (queue == -1) {
2926 /* Clear accepts frame bit at specified Other DA table entry */
2927 omc_table_reg &= ~(0xff << (8 * reg_offset));
2928 } else {
2929 omc_table_reg &= ~(0xff << (8 * reg_offset));
2930 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
2931 }
2932
2933 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
2934 }
2935
2936 /* The network device supports multicast using two tables:
2937 * 1) Special Multicast Table for MAC addresses of the form
2938 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2939 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2940 * Table entries in the DA-Filter table.
2941 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2942 * is used as an index to the Other Multicast Table entries in the
2943 * DA-Filter table.
2944 */
2945 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
2946 int queue)
2947 {
2948 unsigned char crc_result = 0;
2949
2950 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
2951 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
2952 return 0;
2953 }
2954
2955 crc_result = mvneta_addr_crc(p_addr);
2956 if (queue == -1) {
2957 if (pp->mcast_count[crc_result] == 0) {
2958 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
2959 crc_result);
2960 return -EINVAL;
2961 }
2962
2963 pp->mcast_count[crc_result]--;
2964 if (pp->mcast_count[crc_result] != 0) {
2965 netdev_info(pp->dev,
2966 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2967 pp->mcast_count[crc_result], crc_result);
2968 return -EINVAL;
2969 }
2970 } else
2971 pp->mcast_count[crc_result]++;
2972
2973 mvneta_set_other_mcast_addr(pp, crc_result, queue);
2974
2975 return 0;
2976 }
2977
2978 /* Configure Fitering mode of Ethernet port */
2979 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
2980 int is_promisc)
2981 {
2982 u32 port_cfg_reg, val;
2983
2984 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
2985
2986 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
2987
2988 /* Set / Clear UPM bit in port configuration register */
2989 if (is_promisc) {
2990 /* Accept all Unicast addresses */
2991 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
2992 val |= MVNETA_FORCE_UNI;
2993 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
2994 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
2995 } else {
2996 /* Reject all Unicast addresses */
2997 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
2998 val &= ~MVNETA_FORCE_UNI;
2999 }
3000
3001 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
3002 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
3003 }
3004
3005 /* register unicast and multicast addresses */
3006 static void mvneta_set_rx_mode(struct net_device *dev)
3007 {
3008 struct mvneta_port *pp = netdev_priv(dev);
3009 struct netdev_hw_addr *ha;
3010
3011 if (dev->flags & IFF_PROMISC) {
3012 /* Accept all: Multicast + Unicast */
3013 mvneta_rx_unicast_promisc_set(pp, 1);
3014 mvneta_set_ucast_table(pp, pp->rxq_def);
3015 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3016 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3017 } else {
3018 /* Accept single Unicast */
3019 mvneta_rx_unicast_promisc_set(pp, 0);
3020 mvneta_set_ucast_table(pp, -1);
3021 mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
3022
3023 if (dev->flags & IFF_ALLMULTI) {
3024 /* Accept all multicast */
3025 mvneta_set_special_mcast_table(pp, pp->rxq_def);
3026 mvneta_set_other_mcast_table(pp, pp->rxq_def);
3027 } else {
3028 /* Accept only initialized multicast */
3029 mvneta_set_special_mcast_table(pp, -1);
3030 mvneta_set_other_mcast_table(pp, -1);
3031
3032 if (!netdev_mc_empty(dev)) {
3033 netdev_for_each_mc_addr(ha, dev) {
3034 mvneta_mcast_addr_set(pp, ha->addr,
3035 pp->rxq_def);
3036 }
3037 }
3038 }
3039 }
3040 }
3041
3042 /* Interrupt handling - the callback for request_irq() */
3043 static irqreturn_t mvneta_isr(int irq, void *dev_id)
3044 {
3045 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
3046
3047 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
3048 napi_schedule(&pp->napi);
3049
3050 return IRQ_HANDLED;
3051 }
3052
3053 /* Interrupt handling - the callback for request_percpu_irq() */
3054 static irqreturn_t mvneta_percpu_isr(int irq, void *dev_id)
3055 {
3056 struct mvneta_pcpu_port *port = (struct mvneta_pcpu_port *)dev_id;
3057
3058 disable_percpu_irq(port->pp->dev->irq);
3059 napi_schedule(&port->napi);
3060
3061 return IRQ_HANDLED;
3062 }
3063
3064 static void mvneta_link_change(struct mvneta_port *pp)
3065 {
3066 u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3067
3068 phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
3069 }
3070
3071 /* NAPI handler
3072 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
3073 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
3074 * Bits 8 -15 of the cause Rx Tx register indicate that are received
3075 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
3076 * Each CPU has its own causeRxTx register
3077 */
3078 static int mvneta_poll(struct napi_struct *napi, int budget)
3079 {
3080 int rx_done = 0;
3081 u32 cause_rx_tx;
3082 int rx_queue;
3083 struct mvneta_port *pp = netdev_priv(napi->dev);
3084 struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
3085
3086 if (!netif_running(pp->dev)) {
3087 napi_complete(napi);
3088 return rx_done;
3089 }
3090
3091 /* Read cause register */
3092 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE);
3093 if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) {
3094 u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE);
3095
3096 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
3097
3098 if (cause_misc & (MVNETA_CAUSE_PHY_STATUS_CHANGE |
3099 MVNETA_CAUSE_LINK_CHANGE))
3100 mvneta_link_change(pp);
3101 }
3102
3103 /* Release Tx descriptors */
3104 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
3105 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
3106 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
3107 }
3108
3109 /* For the case where the last mvneta_poll did not process all
3110 * RX packets
3111 */
3112 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
3113 port->cause_rx_tx;
3114
3115 rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
3116 if (rx_queue) {
3117 rx_queue = rx_queue - 1;
3118 if (pp->bm_priv)
3119 rx_done = mvneta_rx_hwbm(napi, pp, budget,
3120 &pp->rxqs[rx_queue]);
3121 else
3122 rx_done = mvneta_rx_swbm(napi, pp, budget,
3123 &pp->rxqs[rx_queue]);
3124 }
3125
3126 if (rx_done < budget) {
3127 cause_rx_tx = 0;
3128 napi_complete_done(napi, rx_done);
3129
3130 if (pp->neta_armada3700) {
3131 unsigned long flags;
3132
3133 local_irq_save(flags);
3134 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
3135 MVNETA_RX_INTR_MASK(rxq_number) |
3136 MVNETA_TX_INTR_MASK(txq_number) |
3137 MVNETA_MISCINTR_INTR_MASK);
3138 local_irq_restore(flags);
3139 } else {
3140 enable_percpu_irq(pp->dev->irq, 0);
3141 }
3142 }
3143
3144 if (pp->neta_armada3700)
3145 pp->cause_rx_tx = cause_rx_tx;
3146 else
3147 port->cause_rx_tx = cause_rx_tx;
3148
3149 return rx_done;
3150 }
3151
3152 static int mvneta_create_page_pool(struct mvneta_port *pp,
3153 struct mvneta_rx_queue *rxq, int size)
3154 {
3155 struct bpf_prog *xdp_prog = READ_ONCE(pp->xdp_prog);
3156 struct page_pool_params pp_params = {
3157 .order = 0,
3158 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
3159 .pool_size = size,
3160 .nid = NUMA_NO_NODE,
3161 .dev = pp->dev->dev.parent,
3162 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
3163 .offset = pp->rx_offset_correction,
3164 .max_len = MVNETA_MAX_RX_BUF_SIZE,
3165 };
3166 int err;
3167
3168 rxq->page_pool = page_pool_create(&pp_params);
3169 if (IS_ERR(rxq->page_pool)) {
3170 err = PTR_ERR(rxq->page_pool);
3171 rxq->page_pool = NULL;
3172 return err;
3173 }
3174
3175 err = xdp_rxq_info_reg(&rxq->xdp_rxq, pp->dev, rxq->id);
3176 if (err < 0)
3177 goto err_free_pp;
3178
3179 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
3180 rxq->page_pool);
3181 if (err)
3182 goto err_unregister_rxq;
3183
3184 return 0;
3185
3186 err_unregister_rxq:
3187 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3188 err_free_pp:
3189 page_pool_destroy(rxq->page_pool);
3190 rxq->page_pool = NULL;
3191 return err;
3192 }
3193
3194 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
3195 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
3196 int num)
3197 {
3198 int i, err;
3199
3200 err = mvneta_create_page_pool(pp, rxq, num);
3201 if (err < 0)
3202 return err;
3203
3204 for (i = 0; i < num; i++) {
3205 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
3206 if (mvneta_rx_refill(pp, rxq->descs + i, rxq,
3207 GFP_KERNEL) != 0) {
3208 netdev_err(pp->dev,
3209 "%s:rxq %d, %d of %d buffs filled\n",
3210 __func__, rxq->id, i, num);
3211 break;
3212 }
3213 }
3214
3215 /* Add this number of RX descriptors as non occupied (ready to
3216 * get packets)
3217 */
3218 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
3219
3220 return i;
3221 }
3222
3223 /* Free all packets pending transmit from all TXQs and reset TX port */
3224 static void mvneta_tx_reset(struct mvneta_port *pp)
3225 {
3226 int queue;
3227
3228 /* free the skb's in the tx ring */
3229 for (queue = 0; queue < txq_number; queue++)
3230 mvneta_txq_done_force(pp, &pp->txqs[queue]);
3231
3232 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
3233 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
3234 }
3235
3236 static void mvneta_rx_reset(struct mvneta_port *pp)
3237 {
3238 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
3239 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
3240 }
3241
3242 /* Rx/Tx queue initialization/cleanup methods */
3243
3244 static int mvneta_rxq_sw_init(struct mvneta_port *pp,
3245 struct mvneta_rx_queue *rxq)
3246 {
3247 rxq->size = pp->rx_ring_size;
3248
3249 /* Allocate memory for RX descriptors */
3250 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3251 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3252 &rxq->descs_phys, GFP_KERNEL);
3253 if (!rxq->descs)
3254 return -ENOMEM;
3255
3256 rxq->last_desc = rxq->size - 1;
3257
3258 return 0;
3259 }
3260
3261 static void mvneta_rxq_hw_init(struct mvneta_port *pp,
3262 struct mvneta_rx_queue *rxq)
3263 {
3264 /* Set Rx descriptors queue starting address */
3265 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
3266 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
3267
3268 /* Set coalescing pkts and time */
3269 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
3270 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
3271
3272 if (!pp->bm_priv) {
3273 /* Set Offset */
3274 mvneta_rxq_offset_set(pp, rxq, 0);
3275 mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ?
3276 MVNETA_MAX_RX_BUF_SIZE :
3277 MVNETA_RX_BUF_SIZE(pp->pkt_size));
3278 mvneta_rxq_bm_disable(pp, rxq);
3279 mvneta_rxq_fill(pp, rxq, rxq->size);
3280 } else {
3281 /* Set Offset */
3282 mvneta_rxq_offset_set(pp, rxq,
3283 NET_SKB_PAD - pp->rx_offset_correction);
3284
3285 mvneta_rxq_bm_enable(pp, rxq);
3286 /* Fill RXQ with buffers from RX pool */
3287 mvneta_rxq_long_pool_set(pp, rxq);
3288 mvneta_rxq_short_pool_set(pp, rxq);
3289 mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
3290 }
3291 }
3292
3293 /* Create a specified RX queue */
3294 static int mvneta_rxq_init(struct mvneta_port *pp,
3295 struct mvneta_rx_queue *rxq)
3296
3297 {
3298 int ret;
3299
3300 ret = mvneta_rxq_sw_init(pp, rxq);
3301 if (ret < 0)
3302 return ret;
3303
3304 mvneta_rxq_hw_init(pp, rxq);
3305
3306 return 0;
3307 }
3308
3309 /* Cleanup Rx queue */
3310 static void mvneta_rxq_deinit(struct mvneta_port *pp,
3311 struct mvneta_rx_queue *rxq)
3312 {
3313 mvneta_rxq_drop_pkts(pp, rxq);
3314
3315 if (rxq->skb)
3316 dev_kfree_skb_any(rxq->skb);
3317
3318 if (rxq->descs)
3319 dma_free_coherent(pp->dev->dev.parent,
3320 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
3321 rxq->descs,
3322 rxq->descs_phys);
3323
3324 rxq->descs = NULL;
3325 rxq->last_desc = 0;
3326 rxq->next_desc_to_proc = 0;
3327 rxq->descs_phys = 0;
3328 rxq->first_to_refill = 0;
3329 rxq->refill_num = 0;
3330 rxq->skb = NULL;
3331 rxq->left_size = 0;
3332 }
3333
3334 static int mvneta_txq_sw_init(struct mvneta_port *pp,
3335 struct mvneta_tx_queue *txq)
3336 {
3337 int cpu;
3338
3339 txq->size = pp->tx_ring_size;
3340
3341 /* A queue must always have room for at least one skb.
3342 * Therefore, stop the queue when the free entries reaches
3343 * the maximum number of descriptors per skb.
3344 */
3345 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3346 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3347
3348 /* Allocate memory for TX descriptors */
3349 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3350 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3351 &txq->descs_phys, GFP_KERNEL);
3352 if (!txq->descs)
3353 return -ENOMEM;
3354
3355 txq->last_desc = txq->size - 1;
3356
3357 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3358 if (!txq->buf) {
3359 dma_free_coherent(pp->dev->dev.parent,
3360 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3361 txq->descs, txq->descs_phys);
3362 return -ENOMEM;
3363 }
3364
3365 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
3366 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
3367 txq->size * TSO_HEADER_SIZE,
3368 &txq->tso_hdrs_phys, GFP_KERNEL);
3369 if (!txq->tso_hdrs) {
3370 kfree(txq->buf);
3371 dma_free_coherent(pp->dev->dev.parent,
3372 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3373 txq->descs, txq->descs_phys);
3374 return -ENOMEM;
3375 }
3376
3377 /* Setup XPS mapping */
3378 if (txq_number > 1)
3379 cpu = txq->id % num_present_cpus();
3380 else
3381 cpu = pp->rxq_def % num_present_cpus();
3382 cpumask_set_cpu(cpu, &txq->affinity_mask);
3383 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3384
3385 return 0;
3386 }
3387
3388 static void mvneta_txq_hw_init(struct mvneta_port *pp,
3389 struct mvneta_tx_queue *txq)
3390 {
3391 /* Set maximum bandwidth for enabled TXQs */
3392 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3393 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3394
3395 /* Set Tx descriptors queue starting address */
3396 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3397 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3398
3399 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3400 }
3401
3402 /* Create and initialize a tx queue */
3403 static int mvneta_txq_init(struct mvneta_port *pp,
3404 struct mvneta_tx_queue *txq)
3405 {
3406 int ret;
3407
3408 ret = mvneta_txq_sw_init(pp, txq);
3409 if (ret < 0)
3410 return ret;
3411
3412 mvneta_txq_hw_init(pp, txq);
3413
3414 return 0;
3415 }
3416
3417 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
3418 static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
3419 struct mvneta_tx_queue *txq)
3420 {
3421 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3422
3423 kfree(txq->buf);
3424
3425 if (txq->tso_hdrs)
3426 dma_free_coherent(pp->dev->dev.parent,
3427 txq->size * TSO_HEADER_SIZE,
3428 txq->tso_hdrs, txq->tso_hdrs_phys);
3429 if (txq->descs)
3430 dma_free_coherent(pp->dev->dev.parent,
3431 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3432 txq->descs, txq->descs_phys);
3433
3434 netdev_tx_reset_queue(nq);
3435
3436 txq->descs = NULL;
3437 txq->last_desc = 0;
3438 txq->next_desc_to_proc = 0;
3439 txq->descs_phys = 0;
3440 }
3441
3442 static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
3443 struct mvneta_tx_queue *txq)
3444 {
3445 /* Set minimum bandwidth for disabled TXQs */
3446 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3447 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3448
3449 /* Set Tx descriptors queue starting address and size */
3450 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3451 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3452 }
3453
3454 static void mvneta_txq_deinit(struct mvneta_port *pp,
3455 struct mvneta_tx_queue *txq)
3456 {
3457 mvneta_txq_sw_deinit(pp, txq);
3458 mvneta_txq_hw_deinit(pp, txq);
3459 }
3460
3461 /* Cleanup all Tx queues */
3462 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
3463 {
3464 int queue;
3465
3466 for (queue = 0; queue < txq_number; queue++)
3467 mvneta_txq_deinit(pp, &pp->txqs[queue]);
3468 }
3469
3470 /* Cleanup all Rx queues */
3471 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
3472 {
3473 int queue;
3474
3475 for (queue = 0; queue < rxq_number; queue++)
3476 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
3477 }
3478
3479
3480 /* Init all Rx queues */
3481 static int mvneta_setup_rxqs(struct mvneta_port *pp)
3482 {
3483 int queue;
3484
3485 for (queue = 0; queue < rxq_number; queue++) {
3486 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
3487
3488 if (err) {
3489 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
3490 __func__, queue);
3491 mvneta_cleanup_rxqs(pp);
3492 return err;
3493 }
3494 }
3495
3496 return 0;
3497 }
3498
3499 /* Init all tx queues */
3500 static int mvneta_setup_txqs(struct mvneta_port *pp)
3501 {
3502 int queue;
3503
3504 for (queue = 0; queue < txq_number; queue++) {
3505 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
3506 if (err) {
3507 netdev_err(pp->dev, "%s: can't create txq=%d\n",
3508 __func__, queue);
3509 mvneta_cleanup_txqs(pp);
3510 return err;
3511 }
3512 }
3513
3514 return 0;
3515 }
3516
3517 static int mvneta_comphy_init(struct mvneta_port *pp)
3518 {
3519 int ret;
3520
3521 if (!pp->comphy)
3522 return 0;
3523
3524 ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET,
3525 pp->phy_interface);
3526 if (ret)
3527 return ret;
3528
3529 return phy_power_on(pp->comphy);
3530 }
3531
3532 static void mvneta_start_dev(struct mvneta_port *pp)
3533 {
3534 int cpu;
3535
3536 WARN_ON(mvneta_comphy_init(pp));
3537
3538 mvneta_max_rx_size_set(pp, pp->pkt_size);
3539 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
3540
3541 /* start the Rx/Tx activity */
3542 mvneta_port_enable(pp);
3543
3544 if (!pp->neta_armada3700) {
3545 /* Enable polling on the port */
3546 for_each_online_cpu(cpu) {
3547 struct mvneta_pcpu_port *port =
3548 per_cpu_ptr(pp->ports, cpu);
3549
3550 napi_enable(&port->napi);
3551 }
3552 } else {
3553 napi_enable(&pp->napi);
3554 }
3555
3556 /* Unmask interrupts. It has to be done from each CPU */
3557 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
3558
3559 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
3560 MVNETA_CAUSE_PHY_STATUS_CHANGE |
3561 MVNETA_CAUSE_LINK_CHANGE);
3562
3563 phylink_start(pp->phylink);
3564 netif_tx_start_all_queues(pp->dev);
3565 }
3566
3567 static void mvneta_stop_dev(struct mvneta_port *pp)
3568 {
3569 unsigned int cpu;
3570
3571 phylink_stop(pp->phylink);
3572
3573 if (!pp->neta_armada3700) {
3574 for_each_online_cpu(cpu) {
3575 struct mvneta_pcpu_port *port =
3576 per_cpu_ptr(pp->ports, cpu);
3577
3578 napi_disable(&port->napi);
3579 }
3580 } else {
3581 napi_disable(&pp->napi);
3582 }
3583
3584 netif_carrier_off(pp->dev);
3585
3586 mvneta_port_down(pp);
3587 netif_tx_stop_all_queues(pp->dev);
3588
3589 /* Stop the port activity */
3590 mvneta_port_disable(pp);
3591
3592 /* Clear all ethernet port interrupts */
3593 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
3594
3595 /* Mask all ethernet port interrupts */
3596 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3597
3598 mvneta_tx_reset(pp);
3599 mvneta_rx_reset(pp);
3600
3601 WARN_ON(phy_power_off(pp->comphy));
3602 }
3603
3604 static void mvneta_percpu_enable(void *arg)
3605 {
3606 struct mvneta_port *pp = arg;
3607
3608 enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE);
3609 }
3610
3611 static void mvneta_percpu_disable(void *arg)
3612 {
3613 struct mvneta_port *pp = arg;
3614
3615 disable_percpu_irq(pp->dev->irq);
3616 }
3617
3618 /* Change the device mtu */
3619 static int mvneta_change_mtu(struct net_device *dev, int mtu)
3620 {
3621 struct mvneta_port *pp = netdev_priv(dev);
3622 int ret;
3623
3624 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
3625 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
3626 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
3627 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
3628 }
3629
3630 if (pp->xdp_prog && mtu > MVNETA_MAX_RX_BUF_SIZE) {
3631 netdev_info(dev, "Illegal MTU value %d for XDP mode\n", mtu);
3632 return -EINVAL;
3633 }
3634
3635 dev->mtu = mtu;
3636
3637 if (!netif_running(dev)) {
3638 if (pp->bm_priv)
3639 mvneta_bm_update_mtu(pp, mtu);
3640
3641 netdev_update_features(dev);
3642 return 0;
3643 }
3644
3645 /* The interface is running, so we have to force a
3646 * reallocation of the queues
3647 */
3648 mvneta_stop_dev(pp);
3649 on_each_cpu(mvneta_percpu_disable, pp, true);
3650
3651 mvneta_cleanup_txqs(pp);
3652 mvneta_cleanup_rxqs(pp);
3653
3654 if (pp->bm_priv)
3655 mvneta_bm_update_mtu(pp, mtu);
3656
3657 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
3658
3659 ret = mvneta_setup_rxqs(pp);
3660 if (ret) {
3661 netdev_err(dev, "unable to setup rxqs after MTU change\n");
3662 return ret;
3663 }
3664
3665 ret = mvneta_setup_txqs(pp);
3666 if (ret) {
3667 netdev_err(dev, "unable to setup txqs after MTU change\n");
3668 return ret;
3669 }
3670
3671 on_each_cpu(mvneta_percpu_enable, pp, true);
3672 mvneta_start_dev(pp);
3673
3674 netdev_update_features(dev);
3675
3676 return 0;
3677 }
3678
3679 static netdev_features_t mvneta_fix_features(struct net_device *dev,
3680 netdev_features_t features)
3681 {
3682 struct mvneta_port *pp = netdev_priv(dev);
3683
3684 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
3685 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
3686 netdev_info(dev,
3687 "Disable IP checksum for MTU greater than %dB\n",
3688 pp->tx_csum_limit);
3689 }
3690
3691 return features;
3692 }
3693
3694 /* Get mac address */
3695 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
3696 {
3697 u32 mac_addr_l, mac_addr_h;
3698
3699 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
3700 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
3701 addr[0] = (mac_addr_h >> 24) & 0xFF;
3702 addr[1] = (mac_addr_h >> 16) & 0xFF;
3703 addr[2] = (mac_addr_h >> 8) & 0xFF;
3704 addr[3] = mac_addr_h & 0xFF;
3705 addr[4] = (mac_addr_l >> 8) & 0xFF;
3706 addr[5] = mac_addr_l & 0xFF;
3707 }
3708
3709 /* Handle setting mac address */
3710 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
3711 {
3712 struct mvneta_port *pp = netdev_priv(dev);
3713 struct sockaddr *sockaddr = addr;
3714 int ret;
3715
3716 ret = eth_prepare_mac_addr_change(dev, addr);
3717 if (ret < 0)
3718 return ret;
3719 /* Remove previous address table entry */
3720 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
3721
3722 /* Set new addr in hw */
3723 mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
3724
3725 eth_commit_mac_addr_change(dev, addr);
3726 return 0;
3727 }
3728
3729 static void mvneta_validate(struct phylink_config *config,
3730 unsigned long *supported,
3731 struct phylink_link_state *state)
3732 {
3733 struct net_device *ndev = to_net_dev(config->dev);
3734 struct mvneta_port *pp = netdev_priv(ndev);
3735 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
3736
3737 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3738 if (state->interface != PHY_INTERFACE_MODE_NA &&
3739 state->interface != PHY_INTERFACE_MODE_QSGMII &&
3740 state->interface != PHY_INTERFACE_MODE_SGMII &&
3741 !phy_interface_mode_is_8023z(state->interface) &&
3742 !phy_interface_mode_is_rgmii(state->interface)) {
3743 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
3744 return;
3745 }
3746
3747 /* Allow all the expected bits */
3748 phylink_set(mask, Autoneg);
3749 phylink_set_port_modes(mask);
3750
3751 /* Asymmetric pause is unsupported */
3752 phylink_set(mask, Pause);
3753
3754 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3755 if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
3756 phylink_set(mask, 1000baseT_Full);
3757 phylink_set(mask, 1000baseX_Full);
3758 }
3759 if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
3760 phylink_set(mask, 2500baseT_Full);
3761 phylink_set(mask, 2500baseX_Full);
3762 }
3763
3764 if (!phy_interface_mode_is_8023z(state->interface)) {
3765 /* 10M and 100M are only supported in non-802.3z mode */
3766 phylink_set(mask, 10baseT_Half);
3767 phylink_set(mask, 10baseT_Full);
3768 phylink_set(mask, 100baseT_Half);
3769 phylink_set(mask, 100baseT_Full);
3770 }
3771
3772 bitmap_and(supported, supported, mask,
3773 __ETHTOOL_LINK_MODE_MASK_NBITS);
3774 bitmap_and(state->advertising, state->advertising, mask,
3775 __ETHTOOL_LINK_MODE_MASK_NBITS);
3776
3777 /* We can only operate at 2500BaseX or 1000BaseX. If requested
3778 * to advertise both, only report advertising at 2500BaseX.
3779 */
3780 phylink_helper_basex_speed(state);
3781 }
3782
3783 static void mvneta_mac_pcs_get_state(struct phylink_config *config,
3784 struct phylink_link_state *state)
3785 {
3786 struct net_device *ndev = to_net_dev(config->dev);
3787 struct mvneta_port *pp = netdev_priv(ndev);
3788 u32 gmac_stat;
3789
3790 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
3791
3792 if (gmac_stat & MVNETA_GMAC_SPEED_1000)
3793 state->speed =
3794 state->interface == PHY_INTERFACE_MODE_2500BASEX ?
3795 SPEED_2500 : SPEED_1000;
3796 else if (gmac_stat & MVNETA_GMAC_SPEED_100)
3797 state->speed = SPEED_100;
3798 else
3799 state->speed = SPEED_10;
3800
3801 state->an_complete = !!(gmac_stat & MVNETA_GMAC_AN_COMPLETE);
3802 state->link = !!(gmac_stat & MVNETA_GMAC_LINK_UP);
3803 state->duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX);
3804
3805 state->pause = 0;
3806 if (gmac_stat & MVNETA_GMAC_RX_FLOW_CTRL_ENABLE)
3807 state->pause |= MLO_PAUSE_RX;
3808 if (gmac_stat & MVNETA_GMAC_TX_FLOW_CTRL_ENABLE)
3809 state->pause |= MLO_PAUSE_TX;
3810 }
3811
3812 static void mvneta_mac_an_restart(struct phylink_config *config)
3813 {
3814 struct net_device *ndev = to_net_dev(config->dev);
3815 struct mvneta_port *pp = netdev_priv(ndev);
3816 u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3817
3818 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3819 gmac_an | MVNETA_GMAC_INBAND_RESTART_AN);
3820 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3821 gmac_an & ~MVNETA_GMAC_INBAND_RESTART_AN);
3822 }
3823
3824 static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
3825 const struct phylink_link_state *state)
3826 {
3827 struct net_device *ndev = to_net_dev(config->dev);
3828 struct mvneta_port *pp = netdev_priv(ndev);
3829 u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
3830 u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
3831 u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4);
3832 u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
3833 u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3834
3835 new_ctrl0 = gmac_ctrl0 & ~MVNETA_GMAC0_PORT_1000BASE_X;
3836 new_ctrl2 = gmac_ctrl2 & ~(MVNETA_GMAC2_INBAND_AN_ENABLE |
3837 MVNETA_GMAC2_PORT_RESET);
3838 new_ctrl4 = gmac_ctrl4 & ~(MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE);
3839 new_clk = gmac_clk & ~MVNETA_GMAC_1MS_CLOCK_ENABLE;
3840 new_an = gmac_an & ~(MVNETA_GMAC_INBAND_AN_ENABLE |
3841 MVNETA_GMAC_INBAND_RESTART_AN |
3842 MVNETA_GMAC_AN_SPEED_EN |
3843 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL |
3844 MVNETA_GMAC_AN_FLOW_CTRL_EN |
3845 MVNETA_GMAC_AN_DUPLEX_EN);
3846
3847 /* Even though it might look weird, when we're configured in
3848 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3849 */
3850 new_ctrl2 |= MVNETA_GMAC2_PORT_RGMII;
3851
3852 if (state->interface == PHY_INTERFACE_MODE_QSGMII ||
3853 state->interface == PHY_INTERFACE_MODE_SGMII ||
3854 phy_interface_mode_is_8023z(state->interface))
3855 new_ctrl2 |= MVNETA_GMAC2_PCS_ENABLE;
3856
3857 if (phylink_test(state->advertising, Pause))
3858 new_an |= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL;
3859
3860 if (!phylink_autoneg_inband(mode)) {
3861 /* Phy or fixed speed - nothing to do, leave the
3862 * configured speed, duplex and flow control as-is.
3863 */
3864 } else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
3865 /* SGMII mode receives the state from the PHY */
3866 new_ctrl2 |= MVNETA_GMAC2_INBAND_AN_ENABLE;
3867 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3868 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3869 MVNETA_GMAC_FORCE_LINK_PASS |
3870 MVNETA_GMAC_CONFIG_MII_SPEED |
3871 MVNETA_GMAC_CONFIG_GMII_SPEED |
3872 MVNETA_GMAC_CONFIG_FULL_DUPLEX)) |
3873 MVNETA_GMAC_INBAND_AN_ENABLE |
3874 MVNETA_GMAC_AN_SPEED_EN |
3875 MVNETA_GMAC_AN_DUPLEX_EN;
3876 } else {
3877 /* 802.3z negotiation - only 1000base-X */
3878 new_ctrl0 |= MVNETA_GMAC0_PORT_1000BASE_X;
3879 new_clk |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
3880 new_an = (new_an & ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3881 MVNETA_GMAC_FORCE_LINK_PASS |
3882 MVNETA_GMAC_CONFIG_MII_SPEED)) |
3883 MVNETA_GMAC_INBAND_AN_ENABLE |
3884 MVNETA_GMAC_CONFIG_GMII_SPEED |
3885 /* The MAC only supports FD mode */
3886 MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3887
3888 if (state->pause & MLO_PAUSE_AN && state->an_enabled)
3889 new_an |= MVNETA_GMAC_AN_FLOW_CTRL_EN;
3890 }
3891
3892 /* Armada 370 documentation says we can only change the port mode
3893 * and in-band enable when the link is down, so force it down
3894 * while making these changes. We also do this for GMAC_CTRL2 */
3895 if ((new_ctrl0 ^ gmac_ctrl0) & MVNETA_GMAC0_PORT_1000BASE_X ||
3896 (new_ctrl2 ^ gmac_ctrl2) & MVNETA_GMAC2_INBAND_AN_ENABLE ||
3897 (new_an ^ gmac_an) & MVNETA_GMAC_INBAND_AN_ENABLE) {
3898 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG,
3899 (gmac_an & ~MVNETA_GMAC_FORCE_LINK_PASS) |
3900 MVNETA_GMAC_FORCE_LINK_DOWN);
3901 }
3902
3903
3904 /* When at 2.5G, the link partner can send frames with shortened
3905 * preambles.
3906 */
3907 if (state->speed == SPEED_2500)
3908 new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
3909
3910 if (pp->comphy && pp->phy_interface != state->interface &&
3911 (state->interface == PHY_INTERFACE_MODE_SGMII ||
3912 state->interface == PHY_INTERFACE_MODE_1000BASEX ||
3913 state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
3914 pp->phy_interface = state->interface;
3915
3916 WARN_ON(phy_power_off(pp->comphy));
3917 WARN_ON(mvneta_comphy_init(pp));
3918 }
3919
3920 if (new_ctrl0 != gmac_ctrl0)
3921 mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0);
3922 if (new_ctrl2 != gmac_ctrl2)
3923 mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2);
3924 if (new_ctrl4 != gmac_ctrl4)
3925 mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4);
3926 if (new_clk != gmac_clk)
3927 mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk);
3928 if (new_an != gmac_an)
3929 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an);
3930
3931 if (gmac_ctrl2 & MVNETA_GMAC2_PORT_RESET) {
3932 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
3933 MVNETA_GMAC2_PORT_RESET) != 0)
3934 continue;
3935 }
3936 }
3937
3938 static void mvneta_set_eee(struct mvneta_port *pp, bool enable)
3939 {
3940 u32 lpi_ctl1;
3941
3942 lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1);
3943 if (enable)
3944 lpi_ctl1 |= MVNETA_LPI_REQUEST_ENABLE;
3945 else
3946 lpi_ctl1 &= ~MVNETA_LPI_REQUEST_ENABLE;
3947 mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1);
3948 }
3949
3950 static void mvneta_mac_link_down(struct phylink_config *config,
3951 unsigned int mode, phy_interface_t interface)
3952 {
3953 struct net_device *ndev = to_net_dev(config->dev);
3954 struct mvneta_port *pp = netdev_priv(ndev);
3955 u32 val;
3956
3957 mvneta_port_down(pp);
3958
3959 if (!phylink_autoneg_inband(mode)) {
3960 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3961 val &= ~MVNETA_GMAC_FORCE_LINK_PASS;
3962 val |= MVNETA_GMAC_FORCE_LINK_DOWN;
3963 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
3964 }
3965
3966 pp->eee_active = false;
3967 mvneta_set_eee(pp, false);
3968 }
3969
3970 static void mvneta_mac_link_up(struct phylink_config *config,
3971 struct phy_device *phy,
3972 unsigned int mode, phy_interface_t interface,
3973 int speed, int duplex,
3974 bool tx_pause, bool rx_pause)
3975 {
3976 struct net_device *ndev = to_net_dev(config->dev);
3977 struct mvneta_port *pp = netdev_priv(ndev);
3978 u32 val;
3979
3980 if (!phylink_autoneg_inband(mode)) {
3981 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
3982 val &= ~(MVNETA_GMAC_FORCE_LINK_DOWN |
3983 MVNETA_GMAC_CONFIG_MII_SPEED |
3984 MVNETA_GMAC_CONFIG_GMII_SPEED |
3985 MVNETA_GMAC_CONFIG_FLOW_CTRL |
3986 MVNETA_GMAC_CONFIG_FULL_DUPLEX);
3987 val |= MVNETA_GMAC_FORCE_LINK_PASS;
3988
3989 if (speed == SPEED_1000 || speed == SPEED_2500)
3990 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
3991 else if (speed == SPEED_100)
3992 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
3993
3994 if (duplex == DUPLEX_FULL)
3995 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
3996
3997 if (tx_pause || rx_pause)
3998 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
3999
4000 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4001 } else {
4002 /* When inband doesn't cover flow control or flow control is
4003 * disabled, we need to manually configure it. This bit will
4004 * only have effect if MVNETA_GMAC_AN_FLOW_CTRL_EN is unset.
4005 */
4006 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
4007 val &= ~MVNETA_GMAC_CONFIG_FLOW_CTRL;
4008
4009 if (tx_pause || rx_pause)
4010 val |= MVNETA_GMAC_CONFIG_FLOW_CTRL;
4011
4012 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
4013 }
4014
4015 mvneta_port_up(pp);
4016
4017 if (phy && pp->eee_enabled) {
4018 pp->eee_active = phy_init_eee(phy, 0) >= 0;
4019 mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled);
4020 }
4021 }
4022
4023 static const struct phylink_mac_ops mvneta_phylink_ops = {
4024 .validate = mvneta_validate,
4025 .mac_pcs_get_state = mvneta_mac_pcs_get_state,
4026 .mac_an_restart = mvneta_mac_an_restart,
4027 .mac_config = mvneta_mac_config,
4028 .mac_link_down = mvneta_mac_link_down,
4029 .mac_link_up = mvneta_mac_link_up,
4030 };
4031
4032 static int mvneta_mdio_probe(struct mvneta_port *pp)
4033 {
4034 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
4035 int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0);
4036
4037 if (err)
4038 netdev_err(pp->dev, "could not attach PHY: %d\n", err);
4039
4040 phylink_ethtool_get_wol(pp->phylink, &wol);
4041 device_set_wakeup_capable(&pp->dev->dev, !!wol.supported);
4042
4043 return err;
4044 }
4045
4046 static void mvneta_mdio_remove(struct mvneta_port *pp)
4047 {
4048 phylink_disconnect_phy(pp->phylink);
4049 }
4050
4051 /* Electing a CPU must be done in an atomic way: it should be done
4052 * after or before the removal/insertion of a CPU and this function is
4053 * not reentrant.
4054 */
4055 static void mvneta_percpu_elect(struct mvneta_port *pp)
4056 {
4057 int elected_cpu = 0, max_cpu, cpu, i = 0;
4058
4059 /* Use the cpu associated to the rxq when it is online, in all
4060 * the other cases, use the cpu 0 which can't be offline.
4061 */
4062 if (cpu_online(pp->rxq_def))
4063 elected_cpu = pp->rxq_def;
4064
4065 max_cpu = num_present_cpus();
4066
4067 for_each_online_cpu(cpu) {
4068 int rxq_map = 0, txq_map = 0;
4069 int rxq;
4070
4071 for (rxq = 0; rxq < rxq_number; rxq++)
4072 if ((rxq % max_cpu) == cpu)
4073 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
4074
4075 if (cpu == elected_cpu)
4076 /* Map the default receive queue queue to the
4077 * elected CPU
4078 */
4079 rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
4080
4081 /* We update the TX queue map only if we have one
4082 * queue. In this case we associate the TX queue to
4083 * the CPU bound to the default RX queue
4084 */
4085 if (txq_number == 1)
4086 txq_map = (cpu == elected_cpu) ?
4087 MVNETA_CPU_TXQ_ACCESS(1) : 0;
4088 else
4089 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
4090 MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
4091
4092 mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
4093
4094 /* Update the interrupt mask on each CPU according the
4095 * new mapping
4096 */
4097 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
4098 pp, true);
4099 i++;
4100
4101 }
4102 };
4103
4104 static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
4105 {
4106 int other_cpu;
4107 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4108 node_online);
4109 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4110
4111
4112 spin_lock(&pp->lock);
4113 /*
4114 * Configuring the driver for a new CPU while the driver is
4115 * stopping is racy, so just avoid it.
4116 */
4117 if (pp->is_stopped) {
4118 spin_unlock(&pp->lock);
4119 return 0;
4120 }
4121 netif_tx_stop_all_queues(pp->dev);
4122
4123 /*
4124 * We have to synchronise on tha napi of each CPU except the one
4125 * just being woken up
4126 */
4127 for_each_online_cpu(other_cpu) {
4128 if (other_cpu != cpu) {
4129 struct mvneta_pcpu_port *other_port =
4130 per_cpu_ptr(pp->ports, other_cpu);
4131
4132 napi_synchronize(&other_port->napi);
4133 }
4134 }
4135
4136 /* Mask all ethernet port interrupts */
4137 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4138 napi_enable(&port->napi);
4139
4140 /*
4141 * Enable per-CPU interrupts on the CPU that is
4142 * brought up.
4143 */
4144 mvneta_percpu_enable(pp);
4145
4146 /*
4147 * Enable per-CPU interrupt on the one CPU we care
4148 * about.
4149 */
4150 mvneta_percpu_elect(pp);
4151
4152 /* Unmask all ethernet port interrupts */
4153 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4154 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4155 MVNETA_CAUSE_PHY_STATUS_CHANGE |
4156 MVNETA_CAUSE_LINK_CHANGE);
4157 netif_tx_start_all_queues(pp->dev);
4158 spin_unlock(&pp->lock);
4159 return 0;
4160 }
4161
4162 static int mvneta_cpu_down_prepare(unsigned int cpu, struct hlist_node *node)
4163 {
4164 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4165 node_online);
4166 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
4167
4168 /*
4169 * Thanks to this lock we are sure that any pending cpu election is
4170 * done.
4171 */
4172 spin_lock(&pp->lock);
4173 /* Mask all ethernet port interrupts */
4174 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4175 spin_unlock(&pp->lock);
4176
4177 napi_synchronize(&port->napi);
4178 napi_disable(&port->napi);
4179 /* Disable per-CPU interrupts on the CPU that is brought down. */
4180 mvneta_percpu_disable(pp);
4181 return 0;
4182 }
4183
4184 static int mvneta_cpu_dead(unsigned int cpu, struct hlist_node *node)
4185 {
4186 struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port,
4187 node_dead);
4188
4189 /* Check if a new CPU must be elected now this on is down */
4190 spin_lock(&pp->lock);
4191 mvneta_percpu_elect(pp);
4192 spin_unlock(&pp->lock);
4193 /* Unmask all ethernet port interrupts */
4194 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
4195 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
4196 MVNETA_CAUSE_PHY_STATUS_CHANGE |
4197 MVNETA_CAUSE_LINK_CHANGE);
4198 netif_tx_start_all_queues(pp->dev);
4199 return 0;
4200 }
4201
4202 static int mvneta_open(struct net_device *dev)
4203 {
4204 struct mvneta_port *pp = netdev_priv(dev);
4205 int ret;
4206
4207 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
4208
4209 ret = mvneta_setup_rxqs(pp);
4210 if (ret)
4211 return ret;
4212
4213 ret = mvneta_setup_txqs(pp);
4214 if (ret)
4215 goto err_cleanup_rxqs;
4216
4217 /* Connect to port interrupt line */
4218 if (pp->neta_armada3700)
4219 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
4220 dev->name, pp);
4221 else
4222 ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr,
4223 dev->name, pp->ports);
4224 if (ret) {
4225 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
4226 goto err_cleanup_txqs;
4227 }
4228
4229 if (!pp->neta_armada3700) {
4230 /* Enable per-CPU interrupt on all the CPU to handle our RX
4231 * queue interrupts
4232 */
4233 on_each_cpu(mvneta_percpu_enable, pp, true);
4234
4235 pp->is_stopped = false;
4236 /* Register a CPU notifier to handle the case where our CPU
4237 * might be taken offline.
4238 */
4239 ret = cpuhp_state_add_instance_nocalls(online_hpstate,
4240 &pp->node_online);
4241 if (ret)
4242 goto err_free_irq;
4243
4244 ret = cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4245 &pp->node_dead);
4246 if (ret)
4247 goto err_free_online_hp;
4248 }
4249
4250 ret = mvneta_mdio_probe(pp);
4251 if (ret < 0) {
4252 netdev_err(dev, "cannot probe MDIO bus\n");
4253 goto err_free_dead_hp;
4254 }
4255
4256 mvneta_start_dev(pp);
4257
4258 return 0;
4259
4260 err_free_dead_hp:
4261 if (!pp->neta_armada3700)
4262 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4263 &pp->node_dead);
4264 err_free_online_hp:
4265 if (!pp->neta_armada3700)
4266 cpuhp_state_remove_instance_nocalls(online_hpstate,
4267 &pp->node_online);
4268 err_free_irq:
4269 if (pp->neta_armada3700) {
4270 free_irq(pp->dev->irq, pp);
4271 } else {
4272 on_each_cpu(mvneta_percpu_disable, pp, true);
4273 free_percpu_irq(pp->dev->irq, pp->ports);
4274 }
4275 err_cleanup_txqs:
4276 mvneta_cleanup_txqs(pp);
4277 err_cleanup_rxqs:
4278 mvneta_cleanup_rxqs(pp);
4279 return ret;
4280 }
4281
4282 /* Stop the port, free port interrupt line */
4283 static int mvneta_stop(struct net_device *dev)
4284 {
4285 struct mvneta_port *pp = netdev_priv(dev);
4286
4287 if (!pp->neta_armada3700) {
4288 /* Inform that we are stopping so we don't want to setup the
4289 * driver for new CPUs in the notifiers. The code of the
4290 * notifier for CPU online is protected by the same spinlock,
4291 * so when we get the lock, the notifer work is done.
4292 */
4293 spin_lock(&pp->lock);
4294 pp->is_stopped = true;
4295 spin_unlock(&pp->lock);
4296
4297 mvneta_stop_dev(pp);
4298 mvneta_mdio_remove(pp);
4299
4300 cpuhp_state_remove_instance_nocalls(online_hpstate,
4301 &pp->node_online);
4302 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4303 &pp->node_dead);
4304 on_each_cpu(mvneta_percpu_disable, pp, true);
4305 free_percpu_irq(dev->irq, pp->ports);
4306 } else {
4307 mvneta_stop_dev(pp);
4308 mvneta_mdio_remove(pp);
4309 free_irq(dev->irq, pp);
4310 }
4311
4312 mvneta_cleanup_rxqs(pp);
4313 mvneta_cleanup_txqs(pp);
4314
4315 return 0;
4316 }
4317
4318 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4319 {
4320 struct mvneta_port *pp = netdev_priv(dev);
4321
4322 return phylink_mii_ioctl(pp->phylink, ifr, cmd);
4323 }
4324
4325 static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
4326 struct netlink_ext_ack *extack)
4327 {
4328 bool need_update, running = netif_running(dev);
4329 struct mvneta_port *pp = netdev_priv(dev);
4330 struct bpf_prog *old_prog;
4331
4332 if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
4333 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
4334 return -EOPNOTSUPP;
4335 }
4336
4337 if (pp->bm_priv) {
4338 NL_SET_ERR_MSG_MOD(extack,
4339 "Hardware Buffer Management not supported on XDP");
4340 return -EOPNOTSUPP;
4341 }
4342
4343 need_update = !!pp->xdp_prog != !!prog;
4344 if (running && need_update)
4345 mvneta_stop(dev);
4346
4347 old_prog = xchg(&pp->xdp_prog, prog);
4348 if (old_prog)
4349 bpf_prog_put(old_prog);
4350
4351 if (running && need_update)
4352 return mvneta_open(dev);
4353
4354 return 0;
4355 }
4356
4357 static int mvneta_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4358 {
4359 struct mvneta_port *pp = netdev_priv(dev);
4360
4361 switch (xdp->command) {
4362 case XDP_SETUP_PROG:
4363 return mvneta_xdp_setup(dev, xdp->prog, xdp->extack);
4364 case XDP_QUERY_PROG:
4365 xdp->prog_id = pp->xdp_prog ? pp->xdp_prog->aux->id : 0;
4366 return 0;
4367 default:
4368 return -EINVAL;
4369 }
4370 }
4371
4372 /* Ethtool methods */
4373
4374 /* Set link ksettings (phy address, speed) for ethtools */
4375 static int
4376 mvneta_ethtool_set_link_ksettings(struct net_device *ndev,
4377 const struct ethtool_link_ksettings *cmd)
4378 {
4379 struct mvneta_port *pp = netdev_priv(ndev);
4380
4381 return phylink_ethtool_ksettings_set(pp->phylink, cmd);
4382 }
4383
4384 /* Get link ksettings for ethtools */
4385 static int
4386 mvneta_ethtool_get_link_ksettings(struct net_device *ndev,
4387 struct ethtool_link_ksettings *cmd)
4388 {
4389 struct mvneta_port *pp = netdev_priv(ndev);
4390
4391 return phylink_ethtool_ksettings_get(pp->phylink, cmd);
4392 }
4393
4394 static int mvneta_ethtool_nway_reset(struct net_device *dev)
4395 {
4396 struct mvneta_port *pp = netdev_priv(dev);
4397
4398 return phylink_ethtool_nway_reset(pp->phylink);
4399 }
4400
4401 /* Set interrupt coalescing for ethtools */
4402 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
4403 struct ethtool_coalesce *c)
4404 {
4405 struct mvneta_port *pp = netdev_priv(dev);
4406 int queue;
4407
4408 for (queue = 0; queue < rxq_number; queue++) {
4409 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4410 rxq->time_coal = c->rx_coalesce_usecs;
4411 rxq->pkts_coal = c->rx_max_coalesced_frames;
4412 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
4413 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
4414 }
4415
4416 for (queue = 0; queue < txq_number; queue++) {
4417 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4418 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4419 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
4420 }
4421
4422 return 0;
4423 }
4424
4425 /* get coalescing for ethtools */
4426 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
4427 struct ethtool_coalesce *c)
4428 {
4429 struct mvneta_port *pp = netdev_priv(dev);
4430
4431 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
4432 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
4433
4434 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
4435 return 0;
4436 }
4437
4438
4439 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
4440 struct ethtool_drvinfo *drvinfo)
4441 {
4442 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
4443 sizeof(drvinfo->driver));
4444 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
4445 sizeof(drvinfo->version));
4446 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
4447 sizeof(drvinfo->bus_info));
4448 }
4449
4450
4451 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
4452 struct ethtool_ringparam *ring)
4453 {
4454 struct mvneta_port *pp = netdev_priv(netdev);
4455
4456 ring->rx_max_pending = MVNETA_MAX_RXD;
4457 ring->tx_max_pending = MVNETA_MAX_TXD;
4458 ring->rx_pending = pp->rx_ring_size;
4459 ring->tx_pending = pp->tx_ring_size;
4460 }
4461
4462 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
4463 struct ethtool_ringparam *ring)
4464 {
4465 struct mvneta_port *pp = netdev_priv(dev);
4466
4467 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
4468 return -EINVAL;
4469 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
4470 ring->rx_pending : MVNETA_MAX_RXD;
4471
4472 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
4473 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
4474 if (pp->tx_ring_size != ring->tx_pending)
4475 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
4476 pp->tx_ring_size, ring->tx_pending);
4477
4478 if (netif_running(dev)) {
4479 mvneta_stop(dev);
4480 if (mvneta_open(dev)) {
4481 netdev_err(dev,
4482 "error on opening device after ring param change\n");
4483 return -ENOMEM;
4484 }
4485 }
4486
4487 return 0;
4488 }
4489
4490 static void mvneta_ethtool_get_pauseparam(struct net_device *dev,
4491 struct ethtool_pauseparam *pause)
4492 {
4493 struct mvneta_port *pp = netdev_priv(dev);
4494
4495 phylink_ethtool_get_pauseparam(pp->phylink, pause);
4496 }
4497
4498 static int mvneta_ethtool_set_pauseparam(struct net_device *dev,
4499 struct ethtool_pauseparam *pause)
4500 {
4501 struct mvneta_port *pp = netdev_priv(dev);
4502
4503 return phylink_ethtool_set_pauseparam(pp->phylink, pause);
4504 }
4505
4506 static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
4507 u8 *data)
4508 {
4509 if (sset == ETH_SS_STATS) {
4510 int i;
4511
4512 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4513 memcpy(data + i * ETH_GSTRING_LEN,
4514 mvneta_statistics[i].name, ETH_GSTRING_LEN);
4515 }
4516 }
4517
4518 static void
4519 mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
4520 struct mvneta_ethtool_stats *es)
4521 {
4522 unsigned int start;
4523 int cpu;
4524
4525 for_each_possible_cpu(cpu) {
4526 struct mvneta_pcpu_stats *stats;
4527 u64 skb_alloc_error;
4528 u64 refill_error;
4529 u64 xdp_redirect;
4530 u64 xdp_xmit_err;
4531 u64 xdp_tx_err;
4532 u64 xdp_pass;
4533 u64 xdp_drop;
4534 u64 xdp_xmit;
4535 u64 xdp_tx;
4536
4537 stats = per_cpu_ptr(pp->stats, cpu);
4538 do {
4539 start = u64_stats_fetch_begin_irq(&stats->syncp);
4540 skb_alloc_error = stats->es.skb_alloc_error;
4541 refill_error = stats->es.refill_error;
4542 xdp_redirect = stats->es.ps.xdp_redirect;
4543 xdp_pass = stats->es.ps.xdp_pass;
4544 xdp_drop = stats->es.ps.xdp_drop;
4545 xdp_xmit = stats->es.ps.xdp_xmit;
4546 xdp_xmit_err = stats->es.ps.xdp_xmit_err;
4547 xdp_tx = stats->es.ps.xdp_tx;
4548 xdp_tx_err = stats->es.ps.xdp_tx_err;
4549 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
4550
4551 es->skb_alloc_error += skb_alloc_error;
4552 es->refill_error += refill_error;
4553 es->ps.xdp_redirect += xdp_redirect;
4554 es->ps.xdp_pass += xdp_pass;
4555 es->ps.xdp_drop += xdp_drop;
4556 es->ps.xdp_xmit += xdp_xmit;
4557 es->ps.xdp_xmit_err += xdp_xmit_err;
4558 es->ps.xdp_tx += xdp_tx;
4559 es->ps.xdp_tx_err += xdp_tx_err;
4560 }
4561 }
4562
4563 static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
4564 {
4565 struct mvneta_ethtool_stats stats = {};
4566 const struct mvneta_statistic *s;
4567 void __iomem *base = pp->base;
4568 u32 high, low;
4569 u64 val;
4570 int i;
4571
4572 mvneta_ethtool_update_pcpu_stats(pp, &stats);
4573 for (i = 0, s = mvneta_statistics;
4574 s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
4575 s++, i++) {
4576 switch (s->type) {
4577 case T_REG_32:
4578 val = readl_relaxed(base + s->offset);
4579 pp->ethtool_stats[i] += val;
4580 break;
4581 case T_REG_64:
4582 /* Docs say to read low 32-bit then high */
4583 low = readl_relaxed(base + s->offset);
4584 high = readl_relaxed(base + s->offset + 4);
4585 val = (u64)high << 32 | low;
4586 pp->ethtool_stats[i] += val;
4587 break;
4588 case T_SW:
4589 switch (s->offset) {
4590 case ETHTOOL_STAT_EEE_WAKEUP:
4591 val = phylink_get_eee_err(pp->phylink);
4592 pp->ethtool_stats[i] += val;
4593 break;
4594 case ETHTOOL_STAT_SKB_ALLOC_ERR:
4595 pp->ethtool_stats[i] = stats.skb_alloc_error;
4596 break;
4597 case ETHTOOL_STAT_REFILL_ERR:
4598 pp->ethtool_stats[i] = stats.refill_error;
4599 break;
4600 case ETHTOOL_XDP_REDIRECT:
4601 pp->ethtool_stats[i] = stats.ps.xdp_redirect;
4602 break;
4603 case ETHTOOL_XDP_PASS:
4604 pp->ethtool_stats[i] = stats.ps.xdp_pass;
4605 break;
4606 case ETHTOOL_XDP_DROP:
4607 pp->ethtool_stats[i] = stats.ps.xdp_drop;
4608 break;
4609 case ETHTOOL_XDP_TX:
4610 pp->ethtool_stats[i] = stats.ps.xdp_tx;
4611 break;
4612 case ETHTOOL_XDP_TX_ERR:
4613 pp->ethtool_stats[i] = stats.ps.xdp_tx_err;
4614 break;
4615 case ETHTOOL_XDP_XMIT:
4616 pp->ethtool_stats[i] = stats.ps.xdp_xmit;
4617 break;
4618 case ETHTOOL_XDP_XMIT_ERR:
4619 pp->ethtool_stats[i] = stats.ps.xdp_xmit_err;
4620 break;
4621 }
4622 break;
4623 }
4624 }
4625 }
4626
4627 static void mvneta_ethtool_get_stats(struct net_device *dev,
4628 struct ethtool_stats *stats, u64 *data)
4629 {
4630 struct mvneta_port *pp = netdev_priv(dev);
4631 int i;
4632
4633 mvneta_ethtool_update_stats(pp);
4634
4635 for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
4636 *data++ = pp->ethtool_stats[i];
4637 }
4638
4639 static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
4640 {
4641 if (sset == ETH_SS_STATS)
4642 return ARRAY_SIZE(mvneta_statistics);
4643 return -EOPNOTSUPP;
4644 }
4645
4646 static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
4647 {
4648 return MVNETA_RSS_LU_TABLE_SIZE;
4649 }
4650
4651 static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
4652 struct ethtool_rxnfc *info,
4653 u32 *rules __always_unused)
4654 {
4655 switch (info->cmd) {
4656 case ETHTOOL_GRXRINGS:
4657 info->data = rxq_number;
4658 return 0;
4659 case ETHTOOL_GRXFH:
4660 return -EOPNOTSUPP;
4661 default:
4662 return -EOPNOTSUPP;
4663 }
4664 }
4665
4666 static int mvneta_config_rss(struct mvneta_port *pp)
4667 {
4668 int cpu;
4669 u32 val;
4670
4671 netif_tx_stop_all_queues(pp->dev);
4672
4673 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
4674
4675 if (!pp->neta_armada3700) {
4676 /* We have to synchronise on the napi of each CPU */
4677 for_each_online_cpu(cpu) {
4678 struct mvneta_pcpu_port *pcpu_port =
4679 per_cpu_ptr(pp->ports, cpu);
4680
4681 napi_synchronize(&pcpu_port->napi);
4682 napi_disable(&pcpu_port->napi);
4683 }
4684 } else {
4685 napi_synchronize(&pp->napi);
4686 napi_disable(&pp->napi);
4687 }
4688
4689 pp->rxq_def = pp->indir[0];
4690
4691 /* Update unicast mapping */
4692 mvneta_set_rx_mode(pp->dev);
4693
4694 /* Update val of portCfg register accordingly with all RxQueue types */
4695 val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
4696 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
4697
4698 /* Update the elected CPU matching the new rxq_def */
4699 spin_lock(&pp->lock);
4700 mvneta_percpu_elect(pp);
4701 spin_unlock(&pp->lock);
4702
4703 if (!pp->neta_armada3700) {
4704 /* We have to synchronise on the napi of each CPU */
4705 for_each_online_cpu(cpu) {
4706 struct mvneta_pcpu_port *pcpu_port =
4707 per_cpu_ptr(pp->ports, cpu);
4708
4709 napi_enable(&pcpu_port->napi);
4710 }
4711 } else {
4712 napi_enable(&pp->napi);
4713 }
4714
4715 netif_tx_start_all_queues(pp->dev);
4716
4717 return 0;
4718 }
4719
4720 static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
4721 const u8 *key, const u8 hfunc)
4722 {
4723 struct mvneta_port *pp = netdev_priv(dev);
4724
4725 /* Current code for Armada 3700 doesn't support RSS features yet */
4726 if (pp->neta_armada3700)
4727 return -EOPNOTSUPP;
4728
4729 /* We require at least one supported parameter to be changed
4730 * and no change in any of the unsupported parameters
4731 */
4732 if (key ||
4733 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
4734 return -EOPNOTSUPP;
4735
4736 if (!indir)
4737 return 0;
4738
4739 memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
4740
4741 return mvneta_config_rss(pp);
4742 }
4743
4744 static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
4745 u8 *hfunc)
4746 {
4747 struct mvneta_port *pp = netdev_priv(dev);
4748
4749 /* Current code for Armada 3700 doesn't support RSS features yet */
4750 if (pp->neta_armada3700)
4751 return -EOPNOTSUPP;
4752
4753 if (hfunc)
4754 *hfunc = ETH_RSS_HASH_TOP;
4755
4756 if (!indir)
4757 return 0;
4758
4759 memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
4760
4761 return 0;
4762 }
4763
4764 static void mvneta_ethtool_get_wol(struct net_device *dev,
4765 struct ethtool_wolinfo *wol)
4766 {
4767 struct mvneta_port *pp = netdev_priv(dev);
4768
4769 phylink_ethtool_get_wol(pp->phylink, wol);
4770 }
4771
4772 static int mvneta_ethtool_set_wol(struct net_device *dev,
4773 struct ethtool_wolinfo *wol)
4774 {
4775 struct mvneta_port *pp = netdev_priv(dev);
4776 int ret;
4777
4778 ret = phylink_ethtool_set_wol(pp->phylink, wol);
4779 if (!ret)
4780 device_set_wakeup_enable(&dev->dev, !!wol->wolopts);
4781
4782 return ret;
4783 }
4784
4785 static int mvneta_ethtool_get_eee(struct net_device *dev,
4786 struct ethtool_eee *eee)
4787 {
4788 struct mvneta_port *pp = netdev_priv(dev);
4789 u32 lpi_ctl0;
4790
4791 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4792
4793 eee->eee_enabled = pp->eee_enabled;
4794 eee->eee_active = pp->eee_active;
4795 eee->tx_lpi_enabled = pp->tx_lpi_enabled;
4796 eee->tx_lpi_timer = (lpi_ctl0) >> 8; // * scale;
4797
4798 return phylink_ethtool_get_eee(pp->phylink, eee);
4799 }
4800
4801 static int mvneta_ethtool_set_eee(struct net_device *dev,
4802 struct ethtool_eee *eee)
4803 {
4804 struct mvneta_port *pp = netdev_priv(dev);
4805 u32 lpi_ctl0;
4806
4807 /* The Armada 37x documents do not give limits for this other than
4808 * it being an 8-bit register. */
4809 if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255)
4810 return -EINVAL;
4811
4812 lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0);
4813 lpi_ctl0 &= ~(0xff << 8);
4814 lpi_ctl0 |= eee->tx_lpi_timer << 8;
4815 mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0);
4816
4817 pp->eee_enabled = eee->eee_enabled;
4818 pp->tx_lpi_enabled = eee->tx_lpi_enabled;
4819
4820 mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled);
4821
4822 return phylink_ethtool_set_eee(pp->phylink, eee);
4823 }
4824
4825 static const struct net_device_ops mvneta_netdev_ops = {
4826 .ndo_open = mvneta_open,
4827 .ndo_stop = mvneta_stop,
4828 .ndo_start_xmit = mvneta_tx,
4829 .ndo_set_rx_mode = mvneta_set_rx_mode,
4830 .ndo_set_mac_address = mvneta_set_mac_addr,
4831 .ndo_change_mtu = mvneta_change_mtu,
4832 .ndo_fix_features = mvneta_fix_features,
4833 .ndo_get_stats64 = mvneta_get_stats64,
4834 .ndo_do_ioctl = mvneta_ioctl,
4835 .ndo_bpf = mvneta_xdp,
4836 .ndo_xdp_xmit = mvneta_xdp_xmit,
4837 };
4838
4839 static const struct ethtool_ops mvneta_eth_tool_ops = {
4840 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
4841 ETHTOOL_COALESCE_MAX_FRAMES,
4842 .nway_reset = mvneta_ethtool_nway_reset,
4843 .get_link = ethtool_op_get_link,
4844 .set_coalesce = mvneta_ethtool_set_coalesce,
4845 .get_coalesce = mvneta_ethtool_get_coalesce,
4846 .get_drvinfo = mvneta_ethtool_get_drvinfo,
4847 .get_ringparam = mvneta_ethtool_get_ringparam,
4848 .set_ringparam = mvneta_ethtool_set_ringparam,
4849 .get_pauseparam = mvneta_ethtool_get_pauseparam,
4850 .set_pauseparam = mvneta_ethtool_set_pauseparam,
4851 .get_strings = mvneta_ethtool_get_strings,
4852 .get_ethtool_stats = mvneta_ethtool_get_stats,
4853 .get_sset_count = mvneta_ethtool_get_sset_count,
4854 .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
4855 .get_rxnfc = mvneta_ethtool_get_rxnfc,
4856 .get_rxfh = mvneta_ethtool_get_rxfh,
4857 .set_rxfh = mvneta_ethtool_set_rxfh,
4858 .get_link_ksettings = mvneta_ethtool_get_link_ksettings,
4859 .set_link_ksettings = mvneta_ethtool_set_link_ksettings,
4860 .get_wol = mvneta_ethtool_get_wol,
4861 .set_wol = mvneta_ethtool_set_wol,
4862 .get_eee = mvneta_ethtool_get_eee,
4863 .set_eee = mvneta_ethtool_set_eee,
4864 };
4865
4866 /* Initialize hw */
4867 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
4868 {
4869 int queue;
4870
4871 /* Disable port */
4872 mvneta_port_disable(pp);
4873
4874 /* Set port default values */
4875 mvneta_defaults_set(pp);
4876
4877 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL);
4878 if (!pp->txqs)
4879 return -ENOMEM;
4880
4881 /* Initialize TX descriptor rings */
4882 for (queue = 0; queue < txq_number; queue++) {
4883 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4884 txq->id = queue;
4885 txq->size = pp->tx_ring_size;
4886 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
4887 }
4888
4889 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL);
4890 if (!pp->rxqs)
4891 return -ENOMEM;
4892
4893 /* Create Rx descriptor rings */
4894 for (queue = 0; queue < rxq_number; queue++) {
4895 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4896 rxq->id = queue;
4897 rxq->size = pp->rx_ring_size;
4898 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
4899 rxq->time_coal = MVNETA_RX_COAL_USEC;
4900 rxq->buf_virt_addr
4901 = devm_kmalloc_array(pp->dev->dev.parent,
4902 rxq->size,
4903 sizeof(*rxq->buf_virt_addr),
4904 GFP_KERNEL);
4905 if (!rxq->buf_virt_addr)
4906 return -ENOMEM;
4907 }
4908
4909 return 0;
4910 }
4911
4912 /* platform glue : initialize decoding windows */
4913 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
4914 const struct mbus_dram_target_info *dram)
4915 {
4916 u32 win_enable;
4917 u32 win_protect;
4918 int i;
4919
4920 for (i = 0; i < 6; i++) {
4921 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
4922 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
4923
4924 if (i < 4)
4925 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
4926 }
4927
4928 win_enable = 0x3f;
4929 win_protect = 0;
4930
4931 if (dram) {
4932 for (i = 0; i < dram->num_cs; i++) {
4933 const struct mbus_dram_window *cs = dram->cs + i;
4934
4935 mvreg_write(pp, MVNETA_WIN_BASE(i),
4936 (cs->base & 0xffff0000) |
4937 (cs->mbus_attr << 8) |
4938 dram->mbus_dram_target_id);
4939
4940 mvreg_write(pp, MVNETA_WIN_SIZE(i),
4941 (cs->size - 1) & 0xffff0000);
4942
4943 win_enable &= ~(1 << i);
4944 win_protect |= 3 << (2 * i);
4945 }
4946 } else {
4947 /* For Armada3700 open default 4GB Mbus window, leaving
4948 * arbitration of target/attribute to a different layer
4949 * of configuration.
4950 */
4951 mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000);
4952 win_enable &= ~BIT(0);
4953 win_protect = 3;
4954 }
4955
4956 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
4957 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
4958 }
4959
4960 /* Power up the port */
4961 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
4962 {
4963 /* MAC Cause register should be cleared */
4964 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
4965
4966 if (phy_mode == PHY_INTERFACE_MODE_QSGMII)
4967 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
4968 else if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
4969 phy_interface_mode_is_8023z(phy_mode))
4970 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
4971 else if (!phy_interface_mode_is_rgmii(phy_mode))
4972 return -EINVAL;
4973
4974 return 0;
4975 }
4976
4977 /* Device initialization routine */
4978 static int mvneta_probe(struct platform_device *pdev)
4979 {
4980 struct device_node *dn = pdev->dev.of_node;
4981 struct device_node *bm_node;
4982 struct mvneta_port *pp;
4983 struct net_device *dev;
4984 struct phylink *phylink;
4985 struct phy *comphy;
4986 const char *dt_mac_addr;
4987 char hw_mac_addr[ETH_ALEN];
4988 phy_interface_t phy_mode;
4989 const char *mac_from;
4990 int tx_csum_limit;
4991 int err;
4992 int cpu;
4993
4994 dev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof(struct mvneta_port),
4995 txq_number, rxq_number);
4996 if (!dev)
4997 return -ENOMEM;
4998
4999 dev->irq = irq_of_parse_and_map(dn, 0);
5000 if (dev->irq == 0)
5001 return -EINVAL;
5002
5003 err = of_get_phy_mode(dn, &phy_mode);
5004 if (err) {
5005 dev_err(&pdev->dev, "incorrect phy-mode\n");
5006 goto err_free_irq;
5007 }
5008
5009 comphy = devm_of_phy_get(&pdev->dev, dn, NULL);
5010 if (comphy == ERR_PTR(-EPROBE_DEFER)) {
5011 err = -EPROBE_DEFER;
5012 goto err_free_irq;
5013 } else if (IS_ERR(comphy)) {
5014 comphy = NULL;
5015 }
5016
5017 pp = netdev_priv(dev);
5018 spin_lock_init(&pp->lock);
5019
5020 pp->phylink_config.dev = &dev->dev;
5021 pp->phylink_config.type = PHYLINK_NETDEV;
5022
5023 phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
5024 phy_mode, &mvneta_phylink_ops);
5025 if (IS_ERR(phylink)) {
5026 err = PTR_ERR(phylink);
5027 goto err_free_irq;
5028 }
5029
5030 dev->tx_queue_len = MVNETA_MAX_TXD;
5031 dev->watchdog_timeo = 5 * HZ;
5032 dev->netdev_ops = &mvneta_netdev_ops;
5033
5034 dev->ethtool_ops = &mvneta_eth_tool_ops;
5035
5036 pp->phylink = phylink;
5037 pp->comphy = comphy;
5038 pp->phy_interface = phy_mode;
5039 pp->dn = dn;
5040
5041 pp->rxq_def = rxq_def;
5042 pp->indir[0] = rxq_def;
5043
5044 /* Get special SoC configurations */
5045 if (of_device_is_compatible(dn, "marvell,armada-3700-neta"))
5046 pp->neta_armada3700 = true;
5047
5048 pp->clk = devm_clk_get(&pdev->dev, "core");
5049 if (IS_ERR(pp->clk))
5050 pp->clk = devm_clk_get(&pdev->dev, NULL);
5051 if (IS_ERR(pp->clk)) {
5052 err = PTR_ERR(pp->clk);
5053 goto err_free_phylink;
5054 }
5055
5056 clk_prepare_enable(pp->clk);
5057
5058 pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
5059 if (!IS_ERR(pp->clk_bus))
5060 clk_prepare_enable(pp->clk_bus);
5061
5062 pp->base = devm_platform_ioremap_resource(pdev, 0);
5063 if (IS_ERR(pp->base)) {
5064 err = PTR_ERR(pp->base);
5065 goto err_clk;
5066 }
5067
5068 /* Alloc per-cpu port structure */
5069 pp->ports = alloc_percpu(struct mvneta_pcpu_port);
5070 if (!pp->ports) {
5071 err = -ENOMEM;
5072 goto err_clk;
5073 }
5074
5075 /* Alloc per-cpu stats */
5076 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
5077 if (!pp->stats) {
5078 err = -ENOMEM;
5079 goto err_free_ports;
5080 }
5081
5082 dt_mac_addr = of_get_mac_address(dn);
5083 if (!IS_ERR(dt_mac_addr)) {
5084 mac_from = "device tree";
5085 ether_addr_copy(dev->dev_addr, dt_mac_addr);
5086 } else {
5087 mvneta_get_mac_addr(pp, hw_mac_addr);
5088 if (is_valid_ether_addr(hw_mac_addr)) {
5089 mac_from = "hardware";
5090 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
5091 } else {
5092 mac_from = "random";
5093 eth_hw_addr_random(dev);
5094 }
5095 }
5096
5097 if (!of_property_read_u32(dn, "tx-csum-limit", &tx_csum_limit)) {
5098 if (tx_csum_limit < 0 ||
5099 tx_csum_limit > MVNETA_TX_CSUM_MAX_SIZE) {
5100 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5101 dev_info(&pdev->dev,
5102 "Wrong TX csum limit in DT, set to %dB\n",
5103 MVNETA_TX_CSUM_DEF_SIZE);
5104 }
5105 } else if (of_device_is_compatible(dn, "marvell,armada-370-neta")) {
5106 tx_csum_limit = MVNETA_TX_CSUM_DEF_SIZE;
5107 } else {
5108 tx_csum_limit = MVNETA_TX_CSUM_MAX_SIZE;
5109 }
5110
5111 pp->tx_csum_limit = tx_csum_limit;
5112
5113 pp->dram_target_info = mv_mbus_dram_info();
5114 /* Armada3700 requires setting default configuration of Mbus
5115 * windows, however without using filled mbus_dram_target_info
5116 * structure.
5117 */
5118 if (pp->dram_target_info || pp->neta_armada3700)
5119 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5120
5121 pp->tx_ring_size = MVNETA_MAX_TXD;
5122 pp->rx_ring_size = MVNETA_MAX_RXD;
5123
5124 pp->dev = dev;
5125 SET_NETDEV_DEV(dev, &pdev->dev);
5126
5127 pp->id = global_port_id++;
5128
5129 /* Obtain access to BM resources if enabled and already initialized */
5130 bm_node = of_parse_phandle(dn, "buffer-manager", 0);
5131 if (bm_node) {
5132 pp->bm_priv = mvneta_bm_get(bm_node);
5133 if (pp->bm_priv) {
5134 err = mvneta_bm_port_init(pdev, pp);
5135 if (err < 0) {
5136 dev_info(&pdev->dev,
5137 "use SW buffer management\n");
5138 mvneta_bm_put(pp->bm_priv);
5139 pp->bm_priv = NULL;
5140 }
5141 }
5142 /* Set RX packet offset correction for platforms, whose
5143 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
5144 * platforms and 0B for 32-bit ones.
5145 */
5146 pp->rx_offset_correction = max(0,
5147 NET_SKB_PAD -
5148 MVNETA_RX_PKT_OFFSET_CORRECTION);
5149 }
5150 of_node_put(bm_node);
5151
5152 /* sw buffer management */
5153 if (!pp->bm_priv)
5154 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5155
5156 err = mvneta_init(&pdev->dev, pp);
5157 if (err < 0)
5158 goto err_netdev;
5159
5160 err = mvneta_port_power_up(pp, phy_mode);
5161 if (err < 0) {
5162 dev_err(&pdev->dev, "can't power up port\n");
5163 goto err_netdev;
5164 }
5165
5166 /* Armada3700 network controller does not support per-cpu
5167 * operation, so only single NAPI should be initialized.
5168 */
5169 if (pp->neta_armada3700) {
5170 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
5171 } else {
5172 for_each_present_cpu(cpu) {
5173 struct mvneta_pcpu_port *port =
5174 per_cpu_ptr(pp->ports, cpu);
5175
5176 netif_napi_add(dev, &port->napi, mvneta_poll,
5177 NAPI_POLL_WEIGHT);
5178 port->pp = pp;
5179 }
5180 }
5181
5182 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5183 NETIF_F_TSO | NETIF_F_RXCSUM;
5184 dev->hw_features |= dev->features;
5185 dev->vlan_features |= dev->features;
5186 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
5187 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
5188
5189 /* MTU range: 68 - 9676 */
5190 dev->min_mtu = ETH_MIN_MTU;
5191 /* 9676 == 9700 - 20 and rounding to 8 */
5192 dev->max_mtu = 9676;
5193
5194 err = register_netdev(dev);
5195 if (err < 0) {
5196 dev_err(&pdev->dev, "failed to register\n");
5197 goto err_netdev;
5198 }
5199
5200 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
5201 dev->dev_addr);
5202
5203 platform_set_drvdata(pdev, pp->dev);
5204
5205 return 0;
5206
5207 err_netdev:
5208 if (pp->bm_priv) {
5209 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5210 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5211 1 << pp->id);
5212 mvneta_bm_put(pp->bm_priv);
5213 }
5214 free_percpu(pp->stats);
5215 err_free_ports:
5216 free_percpu(pp->ports);
5217 err_clk:
5218 clk_disable_unprepare(pp->clk_bus);
5219 clk_disable_unprepare(pp->clk);
5220 err_free_phylink:
5221 if (pp->phylink)
5222 phylink_destroy(pp->phylink);
5223 err_free_irq:
5224 irq_dispose_mapping(dev->irq);
5225 return err;
5226 }
5227
5228 /* Device removal routine */
5229 static int mvneta_remove(struct platform_device *pdev)
5230 {
5231 struct net_device *dev = platform_get_drvdata(pdev);
5232 struct mvneta_port *pp = netdev_priv(dev);
5233
5234 unregister_netdev(dev);
5235 clk_disable_unprepare(pp->clk_bus);
5236 clk_disable_unprepare(pp->clk);
5237 free_percpu(pp->ports);
5238 free_percpu(pp->stats);
5239 irq_dispose_mapping(dev->irq);
5240 phylink_destroy(pp->phylink);
5241
5242 if (pp->bm_priv) {
5243 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
5244 mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
5245 1 << pp->id);
5246 mvneta_bm_put(pp->bm_priv);
5247 }
5248
5249 return 0;
5250 }
5251
5252 #ifdef CONFIG_PM_SLEEP
5253 static int mvneta_suspend(struct device *device)
5254 {
5255 int queue;
5256 struct net_device *dev = dev_get_drvdata(device);
5257 struct mvneta_port *pp = netdev_priv(dev);
5258
5259 if (!netif_running(dev))
5260 goto clean_exit;
5261
5262 if (!pp->neta_armada3700) {
5263 spin_lock(&pp->lock);
5264 pp->is_stopped = true;
5265 spin_unlock(&pp->lock);
5266
5267 cpuhp_state_remove_instance_nocalls(online_hpstate,
5268 &pp->node_online);
5269 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5270 &pp->node_dead);
5271 }
5272
5273 rtnl_lock();
5274 mvneta_stop_dev(pp);
5275 rtnl_unlock();
5276
5277 for (queue = 0; queue < rxq_number; queue++) {
5278 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5279
5280 mvneta_rxq_drop_pkts(pp, rxq);
5281 }
5282
5283 for (queue = 0; queue < txq_number; queue++) {
5284 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5285
5286 mvneta_txq_hw_deinit(pp, txq);
5287 }
5288
5289 clean_exit:
5290 netif_device_detach(dev);
5291 clk_disable_unprepare(pp->clk_bus);
5292 clk_disable_unprepare(pp->clk);
5293
5294 return 0;
5295 }
5296
5297 static int mvneta_resume(struct device *device)
5298 {
5299 struct platform_device *pdev = to_platform_device(device);
5300 struct net_device *dev = dev_get_drvdata(device);
5301 struct mvneta_port *pp = netdev_priv(dev);
5302 int err, queue;
5303
5304 clk_prepare_enable(pp->clk);
5305 if (!IS_ERR(pp->clk_bus))
5306 clk_prepare_enable(pp->clk_bus);
5307 if (pp->dram_target_info || pp->neta_armada3700)
5308 mvneta_conf_mbus_windows(pp, pp->dram_target_info);
5309 if (pp->bm_priv) {
5310 err = mvneta_bm_port_init(pdev, pp);
5311 if (err < 0) {
5312 dev_info(&pdev->dev, "use SW buffer management\n");
5313 pp->rx_offset_correction = MVNETA_SKB_HEADROOM;
5314 pp->bm_priv = NULL;
5315 }
5316 }
5317 mvneta_defaults_set(pp);
5318 err = mvneta_port_power_up(pp, pp->phy_interface);
5319 if (err < 0) {
5320 dev_err(device, "can't power up port\n");
5321 return err;
5322 }
5323
5324 netif_device_attach(dev);
5325
5326 if (!netif_running(dev))
5327 return 0;
5328
5329 for (queue = 0; queue < rxq_number; queue++) {
5330 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
5331
5332 rxq->next_desc_to_proc = 0;
5333 mvneta_rxq_hw_init(pp, rxq);
5334 }
5335
5336 for (queue = 0; queue < txq_number; queue++) {
5337 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5338
5339 txq->next_desc_to_proc = 0;
5340 mvneta_txq_hw_init(pp, txq);
5341 }
5342
5343 if (!pp->neta_armada3700) {
5344 spin_lock(&pp->lock);
5345 pp->is_stopped = false;
5346 spin_unlock(&pp->lock);
5347 cpuhp_state_add_instance_nocalls(online_hpstate,
5348 &pp->node_online);
5349 cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
5350 &pp->node_dead);
5351 }
5352
5353 rtnl_lock();
5354 mvneta_start_dev(pp);
5355 rtnl_unlock();
5356 mvneta_set_rx_mode(dev);
5357
5358 return 0;
5359 }
5360 #endif
5361
5362 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops, mvneta_suspend, mvneta_resume);
5363
5364 static const struct of_device_id mvneta_match[] = {
5365 { .compatible = "marvell,armada-370-neta" },
5366 { .compatible = "marvell,armada-xp-neta" },
5367 { .compatible = "marvell,armada-3700-neta" },
5368 { }
5369 };
5370 MODULE_DEVICE_TABLE(of, mvneta_match);
5371
5372 static struct platform_driver mvneta_driver = {
5373 .probe = mvneta_probe,
5374 .remove = mvneta_remove,
5375 .driver = {
5376 .name = MVNETA_DRIVER_NAME,
5377 .of_match_table = mvneta_match,
5378 .pm = &mvneta_pm_ops,
5379 },
5380 };
5381
5382 static int __init mvneta_driver_init(void)
5383 {
5384 int ret;
5385
5386 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "net/mvneta:online",
5387 mvneta_cpu_online,
5388 mvneta_cpu_down_prepare);
5389 if (ret < 0)
5390 goto out;
5391 online_hpstate = ret;
5392 ret = cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD, "net/mvneta:dead",
5393 NULL, mvneta_cpu_dead);
5394 if (ret)
5395 goto err_dead;
5396
5397 ret = platform_driver_register(&mvneta_driver);
5398 if (ret)
5399 goto err;
5400 return 0;
5401
5402 err:
5403 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5404 err_dead:
5405 cpuhp_remove_multi_state(online_hpstate);
5406 out:
5407 return ret;
5408 }
5409 module_init(mvneta_driver_init);
5410
5411 static void __exit mvneta_driver_exit(void)
5412 {
5413 platform_driver_unregister(&mvneta_driver);
5414 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD);
5415 cpuhp_remove_multi_state(online_hpstate);
5416 }
5417 module_exit(mvneta_driver_exit);
5418
5419 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
5420 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
5421 MODULE_LICENSE("GPL");
5422
5423 module_param(rxq_number, int, 0444);
5424 module_param(txq_number, int, 0444);
5425
5426 module_param(rxq_def, int, 0444);
5427 module_param(rx_copybreak, int, 0644);