2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/platform_device.h>
32 #include <linux/skbuff.h>
38 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
39 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
40 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
41 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
42 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
43 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
44 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
45 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
46 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
47 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
48 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
49 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
50 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
51 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
52 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
53 #define MVNETA_PORT_RX_RESET 0x1cc0
54 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
55 #define MVNETA_PHY_ADDR 0x2000
56 #define MVNETA_PHY_ADDR_MASK 0x1f
57 #define MVNETA_MBUS_RETRY 0x2010
58 #define MVNETA_UNIT_INTR_CAUSE 0x2080
59 #define MVNETA_UNIT_CONTROL 0x20B0
60 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
61 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
62 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
63 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
64 #define MVNETA_BASE_ADDR_ENABLE 0x2290
65 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
66 #define MVNETA_PORT_CONFIG 0x2400
67 #define MVNETA_UNI_PROMISC_MODE BIT(0)
68 #define MVNETA_DEF_RXQ(q) ((q) << 1)
69 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
70 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
71 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
72 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
73 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
74 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
75 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
76 MVNETA_DEF_RXQ_ARP(q) | \
77 MVNETA_DEF_RXQ_TCP(q) | \
78 MVNETA_DEF_RXQ_UDP(q) | \
79 MVNETA_DEF_RXQ_BPDU(q) | \
80 MVNETA_TX_UNSET_ERR_SUM | \
81 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
82 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
83 #define MVNETA_MAC_ADDR_LOW 0x2414
84 #define MVNETA_MAC_ADDR_HIGH 0x2418
85 #define MVNETA_SDMA_CONFIG 0x241c
86 #define MVNETA_SDMA_BRST_SIZE_16 4
87 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
88 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
89 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
90 #define MVNETA_DESC_SWAP BIT(6)
91 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
92 #define MVNETA_PORT_STATUS 0x2444
93 #define MVNETA_TX_IN_PRGRS BIT(1)
94 #define MVNETA_TX_FIFO_EMPTY BIT(8)
95 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
96 #define MVNETA_SERDES_CFG 0x24A0
97 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
98 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
99 #define MVNETA_TYPE_PRIO 0x24bc
100 #define MVNETA_FORCE_UNI BIT(21)
101 #define MVNETA_TXQ_CMD_1 0x24e4
102 #define MVNETA_TXQ_CMD 0x2448
103 #define MVNETA_TXQ_DISABLE_SHIFT 8
104 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
105 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
106 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
107 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
108 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
109 #define MVNETA_ACC_MODE 0x2500
110 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
111 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
112 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
113 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
114 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
115 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
117 /* Exception Interrupt Port/Queue Cause register
119 * Their behavior depend of the mapping done using the PCPX2Q
120 * registers. For a given CPU if the bit associated to a queue is not
121 * set, then for the register a read from this CPU will always return
122 * 0 and a write won't do anything
125 #define MVNETA_INTR_NEW_CAUSE 0x25a0
126 #define MVNETA_INTR_NEW_MASK 0x25a4
128 /* bits 0..7 = TXQ SENT, one bit per queue.
129 * bits 8..15 = RXQ OCCUP, one bit per queue.
130 * bits 16..23 = RXQ FREE, one bit per queue.
131 * bit 29 = OLD_REG_SUM, see old reg ?
132 * bit 30 = TX_ERR_SUM, one bit for 4 ports
133 * bit 31 = MISC_SUM, one bit for 4 ports
135 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
136 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
137 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
138 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
139 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
141 #define MVNETA_INTR_OLD_CAUSE 0x25a8
142 #define MVNETA_INTR_OLD_MASK 0x25ac
144 /* Data Path Port/Queue Cause Register */
145 #define MVNETA_INTR_MISC_CAUSE 0x25b0
146 #define MVNETA_INTR_MISC_MASK 0x25b4
148 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
149 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
150 #define MVNETA_CAUSE_PTP BIT(4)
152 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
153 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
154 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
155 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
156 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
157 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
158 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
159 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
161 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
162 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
163 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
165 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
166 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
167 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
169 #define MVNETA_INTR_ENABLE 0x25b8
170 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
171 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
173 #define MVNETA_RXQ_CMD 0x2680
174 #define MVNETA_RXQ_DISABLE_SHIFT 8
175 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
176 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
177 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
178 #define MVNETA_GMAC_CTRL_0 0x2c00
179 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
180 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
181 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
182 #define MVNETA_GMAC_CTRL_2 0x2c08
183 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
184 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
185 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
186 #define MVNETA_GMAC2_PORT_RESET BIT(6)
187 #define MVNETA_GMAC_STATUS 0x2c10
188 #define MVNETA_GMAC_LINK_UP BIT(0)
189 #define MVNETA_GMAC_SPEED_1000 BIT(1)
190 #define MVNETA_GMAC_SPEED_100 BIT(2)
191 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
192 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
193 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
194 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
195 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
196 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
197 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
198 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
199 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
200 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
201 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
202 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
203 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
204 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
205 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
206 #define MVNETA_MIB_COUNTERS_BASE 0x3000
207 #define MVNETA_MIB_LATE_COLLISION 0x7c
208 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
209 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
210 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
211 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
212 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
213 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
214 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
215 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
216 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
217 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
218 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
219 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
220 #define MVNETA_PORT_TX_RESET 0x3cf0
221 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
222 #define MVNETA_TX_MTU 0x3e0c
223 #define MVNETA_TX_TOKEN_SIZE 0x3e14
224 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
225 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
226 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
228 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
230 /* Descriptor ring Macros */
231 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
232 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
234 /* Various constants */
237 #define MVNETA_TXDONE_COAL_PKTS 1
238 #define MVNETA_RX_COAL_PKTS 32
239 #define MVNETA_RX_COAL_USEC 100
241 /* The two bytes Marvell header. Either contains a special value used
242 * by Marvell switches when a specific hardware mode is enabled (not
243 * supported by this driver) or is filled automatically by zeroes on
244 * the RX side. Those two bytes being at the front of the Ethernet
245 * header, they allow to have the IP header aligned on a 4 bytes
246 * boundary automatically: the hardware skips those two bytes on its
249 #define MVNETA_MH_SIZE 2
251 #define MVNETA_VLAN_TAG_LEN 4
253 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
254 #define MVNETA_TX_CSUM_DEF_SIZE 1600
255 #define MVNETA_TX_CSUM_MAX_SIZE 9800
256 #define MVNETA_ACC_MODE_EXT 1
258 /* Timeout constants */
259 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
260 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
261 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
263 #define MVNETA_TX_MTU_MAX 0x3ffff
265 /* The RSS lookup table actually has 256 entries but we do not use
268 #define MVNETA_RSS_LU_TABLE_SIZE 1
270 /* TSO header size */
271 #define TSO_HEADER_SIZE 128
273 /* Max number of Rx descriptors */
274 #define MVNETA_MAX_RXD 128
276 /* Max number of Tx descriptors */
277 #define MVNETA_MAX_TXD 532
279 /* Max number of allowed TCP segments for software TSO */
280 #define MVNETA_MAX_TSO_SEGS 100
282 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
284 /* descriptor aligned size */
285 #define MVNETA_DESC_ALIGNED_SIZE 32
287 #define MVNETA_RX_PKT_SIZE(mtu) \
288 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
289 ETH_HLEN + ETH_FCS_LEN, \
290 MVNETA_CPU_D_CACHE_LINE_SIZE)
292 #define IS_TSO_HEADER(txq, addr) \
293 ((addr >= txq->tso_hdrs_phys) && \
294 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
296 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
298 struct mvneta_statistic
{
299 unsigned short offset
;
301 const char name
[ETH_GSTRING_LEN
];
307 static const struct mvneta_statistic mvneta_statistics
[] = {
308 { 0x3000, T_REG_64
, "good_octets_received", },
309 { 0x3010, T_REG_32
, "good_frames_received", },
310 { 0x3008, T_REG_32
, "bad_octets_received", },
311 { 0x3014, T_REG_32
, "bad_frames_received", },
312 { 0x3018, T_REG_32
, "broadcast_frames_received", },
313 { 0x301c, T_REG_32
, "multicast_frames_received", },
314 { 0x3050, T_REG_32
, "unrec_mac_control_received", },
315 { 0x3058, T_REG_32
, "good_fc_received", },
316 { 0x305c, T_REG_32
, "bad_fc_received", },
317 { 0x3060, T_REG_32
, "undersize_received", },
318 { 0x3064, T_REG_32
, "fragments_received", },
319 { 0x3068, T_REG_32
, "oversize_received", },
320 { 0x306c, T_REG_32
, "jabber_received", },
321 { 0x3070, T_REG_32
, "mac_receive_error", },
322 { 0x3074, T_REG_32
, "bad_crc_event", },
323 { 0x3078, T_REG_32
, "collision", },
324 { 0x307c, T_REG_32
, "late_collision", },
325 { 0x2484, T_REG_32
, "rx_discard", },
326 { 0x2488, T_REG_32
, "rx_overrun", },
327 { 0x3020, T_REG_32
, "frames_64_octets", },
328 { 0x3024, T_REG_32
, "frames_65_to_127_octets", },
329 { 0x3028, T_REG_32
, "frames_128_to_255_octets", },
330 { 0x302c, T_REG_32
, "frames_256_to_511_octets", },
331 { 0x3030, T_REG_32
, "frames_512_to_1023_octets", },
332 { 0x3034, T_REG_32
, "frames_1024_to_max_octets", },
333 { 0x3038, T_REG_64
, "good_octets_sent", },
334 { 0x3040, T_REG_32
, "good_frames_sent", },
335 { 0x3044, T_REG_32
, "excessive_collision", },
336 { 0x3048, T_REG_32
, "multicast_frames_sent", },
337 { 0x304c, T_REG_32
, "broadcast_frames_sent", },
338 { 0x3054, T_REG_32
, "fc_sent", },
339 { 0x300c, T_REG_32
, "internal_mac_transmit_err", },
342 struct mvneta_pcpu_stats
{
343 struct u64_stats_sync syncp
;
350 struct mvneta_pcpu_port
{
351 /* Pointer to the shared port */
352 struct mvneta_port
*pp
;
354 /* Pointer to the CPU-local NAPI struct */
355 struct napi_struct napi
;
357 /* Cause of the previous interrupt */
362 struct mvneta_pcpu_port __percpu
*ports
;
363 struct mvneta_pcpu_stats __percpu
*stats
;
366 unsigned int frag_size
;
368 struct mvneta_rx_queue
*rxqs
;
369 struct mvneta_tx_queue
*txqs
;
370 struct net_device
*dev
;
371 struct notifier_block cpu_notifier
;
382 struct mii_bus
*mii_bus
;
383 struct phy_device
*phy_dev
;
384 phy_interface_t phy_interface
;
385 struct device_node
*phy_node
;
389 unsigned int tx_csum_limit
;
390 unsigned int use_inband_status
:1;
392 u64 ethtool_stats
[ARRAY_SIZE(mvneta_statistics
)];
394 u32 indir
[MVNETA_RSS_LU_TABLE_SIZE
];
397 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
398 * layout of the transmit and reception DMA descriptors, and their
399 * layout is therefore defined by the hardware design
402 #define MVNETA_TX_L3_OFF_SHIFT 0
403 #define MVNETA_TX_IP_HLEN_SHIFT 8
404 #define MVNETA_TX_L4_UDP BIT(16)
405 #define MVNETA_TX_L3_IP6 BIT(17)
406 #define MVNETA_TXD_IP_CSUM BIT(18)
407 #define MVNETA_TXD_Z_PAD BIT(19)
408 #define MVNETA_TXD_L_DESC BIT(20)
409 #define MVNETA_TXD_F_DESC BIT(21)
410 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
411 MVNETA_TXD_L_DESC | \
413 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
414 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
416 #define MVNETA_RXD_ERR_CRC 0x0
417 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
418 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
419 #define MVNETA_RXD_ERR_LEN BIT(18)
420 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
421 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
422 #define MVNETA_RXD_L3_IP4 BIT(25)
423 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
424 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
426 #if defined(__LITTLE_ENDIAN)
427 struct mvneta_tx_desc
{
428 u32 command
; /* Options used by HW for packet transmitting.*/
429 u16 reserverd1
; /* csum_l4 (for future use) */
430 u16 data_size
; /* Data size of transmitted packet in bytes */
431 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
432 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
433 u32 reserved3
[4]; /* Reserved - (for future use) */
436 struct mvneta_rx_desc
{
437 u32 status
; /* Info about received packet */
438 u16 reserved1
; /* pnc_info - (for future use, PnC) */
439 u16 data_size
; /* Size of received packet in bytes */
441 u32 buf_phys_addr
; /* Physical address of the buffer */
442 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
444 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
445 u16 reserved3
; /* prefetch_cmd, for future use */
446 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
448 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
449 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
452 struct mvneta_tx_desc
{
453 u16 data_size
; /* Data size of transmitted packet in bytes */
454 u16 reserverd1
; /* csum_l4 (for future use) */
455 u32 command
; /* Options used by HW for packet transmitting.*/
456 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
457 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
458 u32 reserved3
[4]; /* Reserved - (for future use) */
461 struct mvneta_rx_desc
{
462 u16 data_size
; /* Size of received packet in bytes */
463 u16 reserved1
; /* pnc_info - (for future use, PnC) */
464 u32 status
; /* Info about received packet */
466 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
467 u32 buf_phys_addr
; /* Physical address of the buffer */
469 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
470 u16 reserved3
; /* prefetch_cmd, for future use */
471 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
473 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
474 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
478 struct mvneta_tx_queue
{
479 /* Number of this TX queue, in the range 0-7 */
482 /* Number of TX DMA descriptors in the descriptor ring */
485 /* Number of currently used TX DMA descriptor in the
489 int tx_stop_threshold
;
490 int tx_wake_threshold
;
492 /* Array of transmitted skb */
493 struct sk_buff
**tx_skb
;
495 /* Index of last TX DMA descriptor that was inserted */
498 /* Index of the TX DMA descriptor to be cleaned up */
503 /* Virtual address of the TX DMA descriptors array */
504 struct mvneta_tx_desc
*descs
;
506 /* DMA address of the TX DMA descriptors array */
507 dma_addr_t descs_phys
;
509 /* Index of the last TX DMA descriptor */
512 /* Index of the next TX DMA descriptor to process */
513 int next_desc_to_proc
;
515 /* DMA buffers for TSO headers */
518 /* DMA address of TSO headers */
519 dma_addr_t tso_hdrs_phys
;
521 /* Affinity mask for CPUs*/
522 cpumask_t affinity_mask
;
525 struct mvneta_rx_queue
{
526 /* rx queue number, in the range 0-7 */
529 /* num of rx descriptors in the rx descriptor ring */
532 /* counter of times when mvneta_refill() failed */
538 /* Virtual address of the RX DMA descriptors array */
539 struct mvneta_rx_desc
*descs
;
541 /* DMA address of the RX DMA descriptors array */
542 dma_addr_t descs_phys
;
544 /* Index of the last RX DMA descriptor */
547 /* Index of the next RX DMA descriptor to process */
548 int next_desc_to_proc
;
551 /* The hardware supports eight (8) rx queues, but we are only allowing
552 * the first one to be used. Therefore, let's just allocate one queue.
554 static int rxq_number
= 8;
555 static int txq_number
= 8;
559 static int rx_copybreak __read_mostly
= 256;
561 #define MVNETA_DRIVER_NAME "mvneta"
562 #define MVNETA_DRIVER_VERSION "1.0"
564 /* Utility/helper methods */
566 /* Write helper method */
567 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
569 writel(data
, pp
->base
+ offset
);
572 /* Read helper method */
573 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
575 return readl(pp
->base
+ offset
);
578 /* Increment txq get counter */
579 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
581 txq
->txq_get_index
++;
582 if (txq
->txq_get_index
== txq
->size
)
583 txq
->txq_get_index
= 0;
586 /* Increment txq put counter */
587 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
589 txq
->txq_put_index
++;
590 if (txq
->txq_put_index
== txq
->size
)
591 txq
->txq_put_index
= 0;
595 /* Clear all MIB counters */
596 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
601 /* Perform dummy reads from MIB counters */
602 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
603 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
604 dummy
= mvreg_read(pp
, MVNETA_RX_DISCARD_FRAME_COUNT
);
605 dummy
= mvreg_read(pp
, MVNETA_OVERRUN_FRAME_COUNT
);
608 /* Get System Network Statistics */
609 struct rtnl_link_stats64
*mvneta_get_stats64(struct net_device
*dev
,
610 struct rtnl_link_stats64
*stats
)
612 struct mvneta_port
*pp
= netdev_priv(dev
);
616 for_each_possible_cpu(cpu
) {
617 struct mvneta_pcpu_stats
*cpu_stats
;
623 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
625 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
626 rx_packets
= cpu_stats
->rx_packets
;
627 rx_bytes
= cpu_stats
->rx_bytes
;
628 tx_packets
= cpu_stats
->tx_packets
;
629 tx_bytes
= cpu_stats
->tx_bytes
;
630 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
632 stats
->rx_packets
+= rx_packets
;
633 stats
->rx_bytes
+= rx_bytes
;
634 stats
->tx_packets
+= tx_packets
;
635 stats
->tx_bytes
+= tx_bytes
;
638 stats
->rx_errors
= dev
->stats
.rx_errors
;
639 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
641 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
646 /* Rx descriptors helper methods */
648 /* Checks whether the RX descriptor having this status is both the first
649 * and the last descriptor for the RX packet. Each RX packet is currently
650 * received through a single RX descriptor, so not having each RX
651 * descriptor with its first and last bits set is an error
653 static int mvneta_rxq_desc_is_first_last(u32 status
)
655 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
656 MVNETA_RXD_FIRST_LAST_DESC
;
659 /* Add number of descriptors ready to receive new packets */
660 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
661 struct mvneta_rx_queue
*rxq
,
664 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
667 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
668 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
669 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
670 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
671 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
674 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
675 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
678 /* Get number of RX descriptors occupied by received packets */
679 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
680 struct mvneta_rx_queue
*rxq
)
684 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
685 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
688 /* Update num of rx desc called upon return from rx path or
689 * from mvneta_rxq_drop_pkts().
691 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
692 struct mvneta_rx_queue
*rxq
,
693 int rx_done
, int rx_filled
)
697 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
699 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
700 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
704 /* Only 255 descriptors can be added at once */
705 while ((rx_done
> 0) || (rx_filled
> 0)) {
706 if (rx_done
<= 0xff) {
713 if (rx_filled
<= 0xff) {
714 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
717 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
720 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
724 /* Get pointer to next RX descriptor to be processed by SW */
725 static struct mvneta_rx_desc
*
726 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
728 int rx_desc
= rxq
->next_desc_to_proc
;
730 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
731 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
732 return rxq
->descs
+ rx_desc
;
735 /* Change maximum receive size of the port. */
736 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
740 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
741 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
742 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
743 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
744 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
748 /* Set rx queue offset */
749 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
750 struct mvneta_rx_queue
*rxq
,
755 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
756 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
759 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
760 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
764 /* Tx descriptors helper methods */
766 /* Update HW with number of TX descriptors to be sent */
767 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
768 struct mvneta_tx_queue
*txq
,
773 /* Only 255 descriptors can be added at once ; Assume caller
774 * process TX desriptors in quanta less than 256
777 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
780 /* Get pointer to next TX descriptor to be processed (send) by HW */
781 static struct mvneta_tx_desc
*
782 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
784 int tx_desc
= txq
->next_desc_to_proc
;
786 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
787 return txq
->descs
+ tx_desc
;
790 /* Release the last allocated TX descriptor. Useful to handle DMA
791 * mapping failures in the TX path.
793 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
795 if (txq
->next_desc_to_proc
== 0)
796 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
798 txq
->next_desc_to_proc
--;
801 /* Set rxq buf size */
802 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
803 struct mvneta_rx_queue
*rxq
,
808 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
810 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
811 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
813 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
816 /* Disable buffer management (BM) */
817 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
818 struct mvneta_rx_queue
*rxq
)
822 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
823 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
824 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
827 /* Start the Ethernet port RX and TX activity */
828 static void mvneta_port_up(struct mvneta_port
*pp
)
833 /* Enable all initialized TXs. */
835 for (queue
= 0; queue
< txq_number
; queue
++) {
836 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
837 if (txq
->descs
!= NULL
)
838 q_map
|= (1 << queue
);
840 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
842 /* Enable all initialized RXQs. */
843 for (queue
= 0; queue
< rxq_number
; queue
++) {
844 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
846 if (rxq
->descs
!= NULL
)
847 q_map
|= (1 << queue
);
849 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
852 /* Stop the Ethernet port activity */
853 static void mvneta_port_down(struct mvneta_port
*pp
)
858 /* Stop Rx port activity. Check port Rx activity. */
859 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
861 /* Issue stop command for active channels only */
863 mvreg_write(pp
, MVNETA_RXQ_CMD
,
864 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
866 /* Wait for all Rx activity to terminate. */
869 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
871 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
877 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
878 } while (val
& 0xff);
880 /* Stop Tx port activity. Check port Tx activity. Issue stop
881 * command for active channels only
883 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
886 mvreg_write(pp
, MVNETA_TXQ_CMD
,
887 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
889 /* Wait for all Tx activity to terminate. */
892 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
894 "TIMEOUT for TX stopped status=0x%08x\n",
900 /* Check TX Command reg that all Txqs are stopped */
901 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
903 } while (val
& 0xff);
905 /* Double check to verify that TX FIFO is empty */
908 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
910 "TX FIFO empty timeout status=0x08%x\n",
916 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
917 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
918 (val
& MVNETA_TX_IN_PRGRS
));
923 /* Enable the port by setting the port enable bit of the MAC control register */
924 static void mvneta_port_enable(struct mvneta_port
*pp
)
929 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
930 val
|= MVNETA_GMAC0_PORT_ENABLE
;
931 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
934 /* Disable the port and wait for about 200 usec before retuning */
935 static void mvneta_port_disable(struct mvneta_port
*pp
)
939 /* Reset the Enable bit in the Serial Control Register */
940 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
941 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
942 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
947 /* Multicast tables methods */
949 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
950 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
958 val
= 0x1 | (queue
<< 1);
959 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
962 for (offset
= 0; offset
<= 0xc; offset
+= 4)
963 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
966 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
967 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
975 val
= 0x1 | (queue
<< 1);
976 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
979 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
980 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
984 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
985 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
991 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
994 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
995 val
= 0x1 | (queue
<< 1);
996 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
999 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1000 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
1003 static void mvneta_set_autoneg(struct mvneta_port
*pp
, int enable
)
1008 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
1009 val
&= ~(MVNETA_GMAC_FORCE_LINK_PASS
|
1010 MVNETA_GMAC_FORCE_LINK_DOWN
|
1011 MVNETA_GMAC_AN_FLOW_CTRL_EN
);
1012 val
|= MVNETA_GMAC_INBAND_AN_ENABLE
|
1013 MVNETA_GMAC_AN_SPEED_EN
|
1014 MVNETA_GMAC_AN_DUPLEX_EN
;
1015 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
1017 val
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
1018 val
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
1019 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, val
);
1021 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
1022 val
|= MVNETA_GMAC2_INBAND_AN_ENABLE
;
1023 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
1025 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
1026 val
&= ~(MVNETA_GMAC_INBAND_AN_ENABLE
|
1027 MVNETA_GMAC_AN_SPEED_EN
|
1028 MVNETA_GMAC_AN_DUPLEX_EN
);
1029 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
1031 val
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
1032 val
&= ~MVNETA_GMAC_1MS_CLOCK_ENABLE
;
1033 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, val
);
1035 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
1036 val
&= ~MVNETA_GMAC2_INBAND_AN_ENABLE
;
1037 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, val
);
1041 /* This method sets defaults to the NETA port:
1042 * Clears interrupt Cause and Mask registers.
1043 * Clears all MAC tables.
1044 * Sets defaults to all registers.
1045 * Resets RX and TX descriptor rings.
1047 * This method can be called after mvneta_port_down() to return the port
1048 * settings to defaults.
1050 static void mvneta_defaults_set(struct mvneta_port
*pp
)
1055 int max_cpu
= num_present_cpus();
1057 /* Clear all Cause registers */
1058 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
1059 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
1060 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
1062 /* Mask all interrupts */
1063 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
1064 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
1065 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
1066 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
1068 /* Enable MBUS Retry bit16 */
1069 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
1071 /* Set CPU queue access map. CPUs are assigned to the RX and
1072 * TX queues modulo their number. If there is only one TX
1073 * queue then it is assigned to the CPU associated to the
1076 for_each_present_cpu(cpu
) {
1077 int rxq_map
= 0, txq_map
= 0;
1080 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
1081 if ((rxq
% max_cpu
) == cpu
)
1082 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
1084 for (txq
= 0; txq
< txq_number
; txq
++)
1085 if ((txq
% max_cpu
) == cpu
)
1086 txq_map
|= MVNETA_CPU_TXQ_ACCESS(txq
);
1088 /* With only one TX queue we configure a special case
1089 * which will allow to get all the irq on a single
1092 if (txq_number
== 1)
1093 txq_map
= (cpu
== pp
->rxq_def
) ?
1094 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1096 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
1099 /* Reset RX and TX DMAs */
1100 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1101 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1103 /* Disable Legacy WRR, Disable EJP, Release from reset */
1104 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
1105 for (queue
= 0; queue
< txq_number
; queue
++) {
1106 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
1107 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
1110 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1111 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1113 /* Set Port Acceleration Mode */
1114 val
= MVNETA_ACC_MODE_EXT
;
1115 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
1117 /* Update val of portCfg register accordingly with all RxQueue types */
1118 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
1119 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
1122 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
1123 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
1125 /* Build PORT_SDMA_CONFIG_REG */
1128 /* Default burst size */
1129 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1130 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1131 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
1133 #if defined(__BIG_ENDIAN)
1134 val
|= MVNETA_DESC_SWAP
;
1137 /* Assign port SDMA configuration */
1138 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
1140 /* Disable PHY polling in hardware, since we're using the
1141 * kernel phylib to do this.
1143 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
1144 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
1145 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
1147 mvneta_set_autoneg(pp
, pp
->use_inband_status
);
1148 mvneta_set_ucast_table(pp
, -1);
1149 mvneta_set_special_mcast_table(pp
, -1);
1150 mvneta_set_other_mcast_table(pp
, -1);
1152 /* Set port interrupt enable register - default enable all */
1153 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1154 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1155 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1157 mvneta_mib_counters_clear(pp
);
1160 /* Set max sizes for tx queues */
1161 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1167 mtu
= max_tx_size
* 8;
1168 if (mtu
> MVNETA_TX_MTU_MAX
)
1169 mtu
= MVNETA_TX_MTU_MAX
;
1172 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1173 val
&= ~MVNETA_TX_MTU_MAX
;
1175 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1177 /* TX token size and all TXQs token size must be larger that MTU */
1178 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1180 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1183 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1185 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1187 for (queue
= 0; queue
< txq_number
; queue
++) {
1188 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1190 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1193 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1195 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1200 /* Set unicast address */
1201 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1204 unsigned int unicast_reg
;
1205 unsigned int tbl_offset
;
1206 unsigned int reg_offset
;
1208 /* Locate the Unicast table entry */
1209 last_nibble
= (0xf & last_nibble
);
1211 /* offset from unicast tbl base */
1212 tbl_offset
= (last_nibble
/ 4) * 4;
1214 /* offset within the above reg */
1215 reg_offset
= last_nibble
% 4;
1217 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1220 /* Clear accepts frame bit at specified unicast DA tbl entry */
1221 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1223 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1224 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1227 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1230 /* Set mac address */
1231 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1238 mac_l
= (addr
[4] << 8) | (addr
[5]);
1239 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1240 (addr
[2] << 8) | (addr
[3] << 0);
1242 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1243 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1246 /* Accept frames of this address */
1247 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1250 /* Set the number of packets that will be received before RX interrupt
1251 * will be generated by HW.
1253 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1254 struct mvneta_rx_queue
*rxq
, u32 value
)
1256 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1257 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1258 rxq
->pkts_coal
= value
;
1261 /* Set the time delay in usec before RX interrupt will be generated by
1264 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1265 struct mvneta_rx_queue
*rxq
, u32 value
)
1268 unsigned long clk_rate
;
1270 clk_rate
= clk_get_rate(pp
->clk
);
1271 val
= (clk_rate
/ 1000000) * value
;
1273 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1274 rxq
->time_coal
= value
;
1277 /* Set threshold for TX_DONE pkts coalescing */
1278 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1279 struct mvneta_tx_queue
*txq
, u32 value
)
1283 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1285 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1286 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1288 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1290 txq
->done_pkts_coal
= value
;
1293 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1294 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1295 u32 phys_addr
, u32 cookie
)
1297 rx_desc
->buf_cookie
= cookie
;
1298 rx_desc
->buf_phys_addr
= phys_addr
;
1301 /* Decrement sent descriptors counter */
1302 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1303 struct mvneta_tx_queue
*txq
,
1308 /* Only 255 TX descriptors can be updated at once */
1309 while (sent_desc
> 0xff) {
1310 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1311 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1312 sent_desc
= sent_desc
- 0xff;
1315 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1316 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1319 /* Get number of TX descriptors already sent by HW */
1320 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1321 struct mvneta_tx_queue
*txq
)
1326 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1327 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1328 MVNETA_TXQ_SENT_DESC_SHIFT
;
1333 /* Get number of sent descriptors and decrement counter.
1334 * The number of sent descriptors is returned.
1336 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1337 struct mvneta_tx_queue
*txq
)
1341 /* Get number of sent descriptors */
1342 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1344 /* Decrement sent descriptors counter */
1346 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1351 /* Set TXQ descriptors fields relevant for CSUM calculation */
1352 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1353 int ip_hdr_len
, int l4_proto
)
1357 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1358 * G_L4_chk, L4_type; required only for checksum
1361 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1362 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1364 if (l3_proto
== htons(ETH_P_IP
))
1365 command
|= MVNETA_TXD_IP_CSUM
;
1367 command
|= MVNETA_TX_L3_IP6
;
1369 if (l4_proto
== IPPROTO_TCP
)
1370 command
|= MVNETA_TX_L4_CSUM_FULL
;
1371 else if (l4_proto
== IPPROTO_UDP
)
1372 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1374 command
|= MVNETA_TX_L4_CSUM_NOT
;
1380 /* Display more error info */
1381 static void mvneta_rx_error(struct mvneta_port
*pp
,
1382 struct mvneta_rx_desc
*rx_desc
)
1384 u32 status
= rx_desc
->status
;
1386 if (!mvneta_rxq_desc_is_first_last(status
)) {
1388 "bad rx status %08x (buffer oversize), size=%d\n",
1389 status
, rx_desc
->data_size
);
1393 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1394 case MVNETA_RXD_ERR_CRC
:
1395 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1396 status
, rx_desc
->data_size
);
1398 case MVNETA_RXD_ERR_OVERRUN
:
1399 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1400 status
, rx_desc
->data_size
);
1402 case MVNETA_RXD_ERR_LEN
:
1403 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1404 status
, rx_desc
->data_size
);
1406 case MVNETA_RXD_ERR_RESOURCE
:
1407 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1408 status
, rx_desc
->data_size
);
1413 /* Handle RX checksum offload based on the descriptor's status */
1414 static void mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
,
1415 struct sk_buff
*skb
)
1417 if ((status
& MVNETA_RXD_L3_IP4
) &&
1418 (status
& MVNETA_RXD_L4_CSUM_OK
)) {
1420 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1424 skb
->ip_summed
= CHECKSUM_NONE
;
1427 /* Return tx queue pointer (find last set bit) according to <cause> returned
1428 * form tx_done reg. <cause> must not be null. The return value is always a
1429 * valid queue for matching the first one found in <cause>.
1431 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1434 int queue
= fls(cause
) - 1;
1436 return &pp
->txqs
[queue
];
1439 /* Free tx queue skbuffs */
1440 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1441 struct mvneta_tx_queue
*txq
, int num
)
1445 for (i
= 0; i
< num
; i
++) {
1446 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1448 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1450 mvneta_txq_inc_get(txq
);
1452 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1453 dma_unmap_single(pp
->dev
->dev
.parent
,
1454 tx_desc
->buf_phys_addr
,
1455 tx_desc
->data_size
, DMA_TO_DEVICE
);
1458 dev_kfree_skb_any(skb
);
1462 /* Handle end of transmission */
1463 static void mvneta_txq_done(struct mvneta_port
*pp
,
1464 struct mvneta_tx_queue
*txq
)
1466 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1469 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1473 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1475 txq
->count
-= tx_done
;
1477 if (netif_tx_queue_stopped(nq
)) {
1478 if (txq
->count
<= txq
->tx_wake_threshold
)
1479 netif_tx_wake_queue(nq
);
1483 static void *mvneta_frag_alloc(const struct mvneta_port
*pp
)
1485 if (likely(pp
->frag_size
<= PAGE_SIZE
))
1486 return netdev_alloc_frag(pp
->frag_size
);
1488 return kmalloc(pp
->frag_size
, GFP_ATOMIC
);
1491 static void mvneta_frag_free(const struct mvneta_port
*pp
, void *data
)
1493 if (likely(pp
->frag_size
<= PAGE_SIZE
))
1494 skb_free_frag(data
);
1499 /* Refill processing */
1500 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1501 struct mvneta_rx_desc
*rx_desc
)
1504 dma_addr_t phys_addr
;
1507 data
= mvneta_frag_alloc(pp
);
1511 phys_addr
= dma_map_single(pp
->dev
->dev
.parent
, data
,
1512 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1514 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1515 mvneta_frag_free(pp
, data
);
1519 mvneta_rx_desc_fill(rx_desc
, phys_addr
, (u32
)data
);
1523 /* Handle tx checksum */
1524 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1526 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1528 __be16 l3_proto
= vlan_get_protocol(skb
);
1531 if (l3_proto
== htons(ETH_P_IP
)) {
1532 struct iphdr
*ip4h
= ip_hdr(skb
);
1534 /* Calculate IPv4 checksum and L4 checksum */
1535 ip_hdr_len
= ip4h
->ihl
;
1536 l4_proto
= ip4h
->protocol
;
1537 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
1538 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1540 /* Read l4_protocol from one of IPv6 extra headers */
1541 if (skb_network_header_len(skb
) > 0)
1542 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1543 l4_proto
= ip6h
->nexthdr
;
1545 return MVNETA_TX_L4_CSUM_NOT
;
1547 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1548 l3_proto
, ip_hdr_len
, l4_proto
);
1551 return MVNETA_TX_L4_CSUM_NOT
;
1554 /* Drop packets received by the RXQ and free buffers */
1555 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1556 struct mvneta_rx_queue
*rxq
)
1560 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1561 for (i
= 0; i
< rxq
->size
; i
++) {
1562 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1563 void *data
= (void *)rx_desc
->buf_cookie
;
1565 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1566 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1567 mvneta_frag_free(pp
, data
);
1571 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1574 /* Main rx processing */
1575 static int mvneta_rx(struct mvneta_port
*pp
, int rx_todo
,
1576 struct mvneta_rx_queue
*rxq
)
1578 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
1579 struct net_device
*dev
= pp
->dev
;
1584 /* Get number of received packets */
1585 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1587 if (rx_todo
> rx_done
)
1592 /* Fairness NAPI loop */
1593 while (rx_done
< rx_todo
) {
1594 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1595 struct sk_buff
*skb
;
1596 unsigned char *data
;
1597 dma_addr_t phys_addr
;
1602 rx_status
= rx_desc
->status
;
1603 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1604 data
= (unsigned char *)rx_desc
->buf_cookie
;
1605 phys_addr
= rx_desc
->buf_phys_addr
;
1607 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
1608 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
1610 dev
->stats
.rx_errors
++;
1611 mvneta_rx_error(pp
, rx_desc
);
1612 /* leave the descriptor untouched */
1616 if (rx_bytes
<= rx_copybreak
) {
1617 /* better copy a small frame and not unmap the DMA region */
1618 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
1620 goto err_drop_frame
;
1622 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
1623 rx_desc
->buf_phys_addr
,
1624 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1627 memcpy(skb_put(skb
, rx_bytes
),
1628 data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1631 skb
->protocol
= eth_type_trans(skb
, dev
);
1632 mvneta_rx_csum(pp
, rx_status
, skb
);
1633 napi_gro_receive(&port
->napi
, skb
);
1636 rcvd_bytes
+= rx_bytes
;
1638 /* leave the descriptor and buffer untouched */
1642 /* Refill processing */
1643 err
= mvneta_rx_refill(pp
, rx_desc
);
1645 netdev_err(dev
, "Linux processing - Can't refill\n");
1647 goto err_drop_frame
;
1650 skb
= build_skb(data
, pp
->frag_size
> PAGE_SIZE
? 0 : pp
->frag_size
);
1652 /* After refill old buffer has to be unmapped regardless
1653 * the skb is successfully built or not.
1655 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
1656 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1659 goto err_drop_frame
;
1662 rcvd_bytes
+= rx_bytes
;
1664 /* Linux processing */
1665 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
1666 skb_put(skb
, rx_bytes
);
1668 skb
->protocol
= eth_type_trans(skb
, dev
);
1670 mvneta_rx_csum(pp
, rx_status
, skb
);
1672 napi_gro_receive(&port
->napi
, skb
);
1676 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1678 u64_stats_update_begin(&stats
->syncp
);
1679 stats
->rx_packets
+= rcvd_pkts
;
1680 stats
->rx_bytes
+= rcvd_bytes
;
1681 u64_stats_update_end(&stats
->syncp
);
1684 /* Update rxq management counters */
1685 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1691 mvneta_tso_put_hdr(struct sk_buff
*skb
,
1692 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
1694 struct mvneta_tx_desc
*tx_desc
;
1695 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1697 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1698 tx_desc
= mvneta_txq_next_desc_get(txq
);
1699 tx_desc
->data_size
= hdr_len
;
1700 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
1701 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
1702 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
1703 txq
->txq_put_index
* TSO_HEADER_SIZE
;
1704 mvneta_txq_inc_put(txq
);
1708 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
1709 struct sk_buff
*skb
, char *data
, int size
,
1710 bool last_tcp
, bool is_last
)
1712 struct mvneta_tx_desc
*tx_desc
;
1714 tx_desc
= mvneta_txq_next_desc_get(txq
);
1715 tx_desc
->data_size
= size
;
1716 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
1717 size
, DMA_TO_DEVICE
);
1718 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1719 tx_desc
->buf_phys_addr
))) {
1720 mvneta_txq_desc_put(txq
);
1724 tx_desc
->command
= 0;
1725 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1728 /* last descriptor in the TCP packet */
1729 tx_desc
->command
= MVNETA_TXD_L_DESC
;
1731 /* last descriptor in SKB */
1733 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1735 mvneta_txq_inc_put(txq
);
1739 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
1740 struct mvneta_tx_queue
*txq
)
1742 int total_len
, data_left
;
1744 struct mvneta_port
*pp
= netdev_priv(dev
);
1746 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
1749 /* Count needed descriptors */
1750 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
1753 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
1754 pr_info("*** Is this even possible???!?!?\n");
1758 /* Initialize the TSO handler, and prepare the first payload */
1759 tso_start(skb
, &tso
);
1761 total_len
= skb
->len
- hdr_len
;
1762 while (total_len
> 0) {
1765 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
1766 total_len
-= data_left
;
1769 /* prepare packet headers: MAC + IP + TCP */
1770 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
1771 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
1773 mvneta_tso_put_hdr(skb
, pp
, txq
);
1775 while (data_left
> 0) {
1779 size
= min_t(int, tso
.size
, data_left
);
1781 if (mvneta_tso_put_data(dev
, txq
, skb
,
1788 tso_build_data(skb
, &tso
, size
);
1795 /* Release all used data descriptors; header descriptors must not
1798 for (i
= desc_count
- 1; i
>= 0; i
--) {
1799 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
1800 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1801 dma_unmap_single(pp
->dev
->dev
.parent
,
1802 tx_desc
->buf_phys_addr
,
1805 mvneta_txq_desc_put(txq
);
1810 /* Handle tx fragmentation processing */
1811 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
1812 struct mvneta_tx_queue
*txq
)
1814 struct mvneta_tx_desc
*tx_desc
;
1815 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
1817 for (i
= 0; i
< nr_frags
; i
++) {
1818 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1819 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
1821 tx_desc
= mvneta_txq_next_desc_get(txq
);
1822 tx_desc
->data_size
= frag
->size
;
1824 tx_desc
->buf_phys_addr
=
1825 dma_map_single(pp
->dev
->dev
.parent
, addr
,
1826 tx_desc
->data_size
, DMA_TO_DEVICE
);
1828 if (dma_mapping_error(pp
->dev
->dev
.parent
,
1829 tx_desc
->buf_phys_addr
)) {
1830 mvneta_txq_desc_put(txq
);
1834 if (i
== nr_frags
- 1) {
1835 /* Last descriptor */
1836 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
1837 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1839 /* Descriptor in the middle: Not First, Not Last */
1840 tx_desc
->command
= 0;
1841 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1843 mvneta_txq_inc_put(txq
);
1849 /* Release all descriptors that were used to map fragments of
1850 * this packet, as well as the corresponding DMA mappings
1852 for (i
= i
- 1; i
>= 0; i
--) {
1853 tx_desc
= txq
->descs
+ i
;
1854 dma_unmap_single(pp
->dev
->dev
.parent
,
1855 tx_desc
->buf_phys_addr
,
1858 mvneta_txq_desc_put(txq
);
1864 /* Main tx processing */
1865 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
1867 struct mvneta_port
*pp
= netdev_priv(dev
);
1868 u16 txq_id
= skb_get_queue_mapping(skb
);
1869 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
1870 struct mvneta_tx_desc
*tx_desc
;
1875 if (!netif_running(dev
))
1878 if (skb_is_gso(skb
)) {
1879 frags
= mvneta_tx_tso(skb
, dev
, txq
);
1883 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
1885 /* Get a descriptor for the first part of the packet */
1886 tx_desc
= mvneta_txq_next_desc_get(txq
);
1888 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
1890 tx_desc
->data_size
= skb_headlen(skb
);
1892 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
1895 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
1896 tx_desc
->buf_phys_addr
))) {
1897 mvneta_txq_desc_put(txq
);
1903 /* First and Last descriptor */
1904 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
1905 tx_desc
->command
= tx_cmd
;
1906 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
1907 mvneta_txq_inc_put(txq
);
1909 /* First but not Last */
1910 tx_cmd
|= MVNETA_TXD_F_DESC
;
1911 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
1912 mvneta_txq_inc_put(txq
);
1913 tx_desc
->command
= tx_cmd
;
1914 /* Continue with other skb fragments */
1915 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
1916 dma_unmap_single(dev
->dev
.parent
,
1917 tx_desc
->buf_phys_addr
,
1920 mvneta_txq_desc_put(txq
);
1928 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
1929 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
1931 txq
->count
+= frags
;
1932 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
1934 if (txq
->count
>= txq
->tx_stop_threshold
)
1935 netif_tx_stop_queue(nq
);
1937 u64_stats_update_begin(&stats
->syncp
);
1938 stats
->tx_packets
++;
1939 stats
->tx_bytes
+= len
;
1940 u64_stats_update_end(&stats
->syncp
);
1942 dev
->stats
.tx_dropped
++;
1943 dev_kfree_skb_any(skb
);
1946 return NETDEV_TX_OK
;
1950 /* Free tx resources, when resetting a port */
1951 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
1952 struct mvneta_tx_queue
*txq
)
1955 int tx_done
= txq
->count
;
1957 mvneta_txq_bufs_free(pp
, txq
, tx_done
);
1961 txq
->txq_put_index
= 0;
1962 txq
->txq_get_index
= 0;
1965 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1966 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1968 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
1970 struct mvneta_tx_queue
*txq
;
1971 struct netdev_queue
*nq
;
1973 while (cause_tx_done
) {
1974 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
1976 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1977 __netif_tx_lock(nq
, smp_processor_id());
1980 mvneta_txq_done(pp
, txq
);
1982 __netif_tx_unlock(nq
);
1983 cause_tx_done
&= ~((1 << txq
->id
));
1987 /* Compute crc8 of the specified address, using a unique algorithm ,
1988 * according to hw spec, different than generic crc8 algorithm
1990 static int mvneta_addr_crc(unsigned char *addr
)
1995 for (i
= 0; i
< ETH_ALEN
; i
++) {
1998 crc
= (crc
^ addr
[i
]) << 8;
1999 for (j
= 7; j
>= 0; j
--) {
2000 if (crc
& (0x100 << j
))
2008 /* This method controls the net device special MAC multicast support.
2009 * The Special Multicast Table for MAC addresses supports MAC of the form
2010 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2011 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2012 * Table entries in the DA-Filter table. This method set the Special
2013 * Multicast Table appropriate entry.
2015 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
2016 unsigned char last_byte
,
2019 unsigned int smc_table_reg
;
2020 unsigned int tbl_offset
;
2021 unsigned int reg_offset
;
2023 /* Register offset from SMC table base */
2024 tbl_offset
= (last_byte
/ 4);
2025 /* Entry offset within the above reg */
2026 reg_offset
= last_byte
% 4;
2028 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
2032 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2034 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2035 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2038 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
2042 /* This method controls the network device Other MAC multicast support.
2043 * The Other Multicast Table is used for multicast of another type.
2044 * A CRC-8 is used as an index to the Other Multicast Table entries
2045 * in the DA-Filter table.
2046 * The method gets the CRC-8 value from the calling routine and
2047 * sets the Other Multicast Table appropriate entry according to the
2050 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
2054 unsigned int omc_table_reg
;
2055 unsigned int tbl_offset
;
2056 unsigned int reg_offset
;
2058 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
2059 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
2061 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
2064 /* Clear accepts frame bit at specified Other DA table entry */
2065 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2067 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2068 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2071 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
2074 /* The network device supports multicast using two tables:
2075 * 1) Special Multicast Table for MAC addresses of the form
2076 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2077 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2078 * Table entries in the DA-Filter table.
2079 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2080 * is used as an index to the Other Multicast Table entries in the
2083 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
2086 unsigned char crc_result
= 0;
2088 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
2089 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
2093 crc_result
= mvneta_addr_crc(p_addr
);
2095 if (pp
->mcast_count
[crc_result
] == 0) {
2096 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
2101 pp
->mcast_count
[crc_result
]--;
2102 if (pp
->mcast_count
[crc_result
] != 0) {
2103 netdev_info(pp
->dev
,
2104 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2105 pp
->mcast_count
[crc_result
], crc_result
);
2109 pp
->mcast_count
[crc_result
]++;
2111 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
2116 /* Configure Fitering mode of Ethernet port */
2117 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
2120 u32 port_cfg_reg
, val
;
2122 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
2124 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
2126 /* Set / Clear UPM bit in port configuration register */
2128 /* Accept all Unicast addresses */
2129 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
2130 val
|= MVNETA_FORCE_UNI
;
2131 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
2132 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
2134 /* Reject all Unicast addresses */
2135 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
2136 val
&= ~MVNETA_FORCE_UNI
;
2139 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
2140 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
2143 /* register unicast and multicast addresses */
2144 static void mvneta_set_rx_mode(struct net_device
*dev
)
2146 struct mvneta_port
*pp
= netdev_priv(dev
);
2147 struct netdev_hw_addr
*ha
;
2149 if (dev
->flags
& IFF_PROMISC
) {
2150 /* Accept all: Multicast + Unicast */
2151 mvneta_rx_unicast_promisc_set(pp
, 1);
2152 mvneta_set_ucast_table(pp
, pp
->rxq_def
);
2153 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2154 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2156 /* Accept single Unicast */
2157 mvneta_rx_unicast_promisc_set(pp
, 0);
2158 mvneta_set_ucast_table(pp
, -1);
2159 mvneta_mac_addr_set(pp
, dev
->dev_addr
, pp
->rxq_def
);
2161 if (dev
->flags
& IFF_ALLMULTI
) {
2162 /* Accept all multicast */
2163 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2164 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2166 /* Accept only initialized multicast */
2167 mvneta_set_special_mcast_table(pp
, -1);
2168 mvneta_set_other_mcast_table(pp
, -1);
2170 if (!netdev_mc_empty(dev
)) {
2171 netdev_for_each_mc_addr(ha
, dev
) {
2172 mvneta_mcast_addr_set(pp
, ha
->addr
,
2180 /* Interrupt handling - the callback for request_irq() */
2181 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
2183 struct mvneta_pcpu_port
*port
= (struct mvneta_pcpu_port
*)dev_id
;
2185 disable_percpu_irq(port
->pp
->dev
->irq
);
2186 napi_schedule(&port
->napi
);
2191 static int mvneta_fixed_link_update(struct mvneta_port
*pp
,
2192 struct phy_device
*phy
)
2194 struct fixed_phy_status status
;
2195 struct fixed_phy_status changed
= {};
2196 u32 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
2198 status
.link
= !!(gmac_stat
& MVNETA_GMAC_LINK_UP
);
2199 if (gmac_stat
& MVNETA_GMAC_SPEED_1000
)
2200 status
.speed
= SPEED_1000
;
2201 else if (gmac_stat
& MVNETA_GMAC_SPEED_100
)
2202 status
.speed
= SPEED_100
;
2204 status
.speed
= SPEED_10
;
2205 status
.duplex
= !!(gmac_stat
& MVNETA_GMAC_FULL_DUPLEX
);
2209 fixed_phy_update_state(phy
, &status
, &changed
);
2214 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2215 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2216 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2217 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2218 * Each CPU has its own causeRxTx register
2220 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
2225 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
2226 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
2228 if (!netif_running(pp
->dev
)) {
2229 napi_complete(&port
->napi
);
2233 /* Read cause register */
2234 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
);
2235 if (cause_rx_tx
& MVNETA_MISCINTR_INTR_MASK
) {
2236 u32 cause_misc
= mvreg_read(pp
, MVNETA_INTR_MISC_CAUSE
);
2238 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2239 if (pp
->use_inband_status
&& (cause_misc
&
2240 (MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2241 MVNETA_CAUSE_LINK_CHANGE
|
2242 MVNETA_CAUSE_PSC_SYNC_CHANGE
))) {
2243 mvneta_fixed_link_update(pp
, pp
->phy_dev
);
2247 /* Release Tx descriptors */
2248 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
2249 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
2250 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
2253 /* For the case where the last mvneta_poll did not process all
2256 rx_queue
= fls(((cause_rx_tx
>> 8) & 0xff));
2258 cause_rx_tx
|= port
->cause_rx_tx
;
2261 rx_queue
= rx_queue
- 1;
2262 rx_done
= mvneta_rx(pp
, budget
, &pp
->rxqs
[rx_queue
]);
2269 napi_complete(&port
->napi
);
2270 enable_percpu_irq(pp
->dev
->irq
, 0);
2273 port
->cause_rx_tx
= cause_rx_tx
;
2277 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2278 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2283 for (i
= 0; i
< num
; i
++) {
2284 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
2285 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
) != 0) {
2286 netdev_err(pp
->dev
, "%s:rxq %d, %d of %d buffs filled\n",
2287 __func__
, rxq
->id
, i
, num
);
2292 /* Add this number of RX descriptors as non occupied (ready to
2295 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
2300 /* Free all packets pending transmit from all TXQs and reset TX port */
2301 static void mvneta_tx_reset(struct mvneta_port
*pp
)
2305 /* free the skb's in the tx ring */
2306 for (queue
= 0; queue
< txq_number
; queue
++)
2307 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
2309 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
2310 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
2313 static void mvneta_rx_reset(struct mvneta_port
*pp
)
2315 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
2316 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
2319 /* Rx/Tx queue initialization/cleanup methods */
2321 /* Create a specified RX queue */
2322 static int mvneta_rxq_init(struct mvneta_port
*pp
,
2323 struct mvneta_rx_queue
*rxq
)
2326 rxq
->size
= pp
->rx_ring_size
;
2328 /* Allocate memory for RX descriptors */
2329 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2330 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2331 &rxq
->descs_phys
, GFP_KERNEL
);
2332 if (rxq
->descs
== NULL
)
2335 BUG_ON(rxq
->descs
!=
2336 PTR_ALIGN(rxq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2338 rxq
->last_desc
= rxq
->size
- 1;
2340 /* Set Rx descriptors queue starting address */
2341 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
2342 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
2345 mvneta_rxq_offset_set(pp
, rxq
, NET_SKB_PAD
);
2347 /* Set coalescing pkts and time */
2348 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2349 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2351 /* Fill RXQ with buffers from RX pool */
2352 mvneta_rxq_buf_size_set(pp
, rxq
, MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
2353 mvneta_rxq_bm_disable(pp
, rxq
);
2354 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2359 /* Cleanup Rx queue */
2360 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2361 struct mvneta_rx_queue
*rxq
)
2363 mvneta_rxq_drop_pkts(pp
, rxq
);
2366 dma_free_coherent(pp
->dev
->dev
.parent
,
2367 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2373 rxq
->next_desc_to_proc
= 0;
2374 rxq
->descs_phys
= 0;
2377 /* Create and initialize a tx queue */
2378 static int mvneta_txq_init(struct mvneta_port
*pp
,
2379 struct mvneta_tx_queue
*txq
)
2383 txq
->size
= pp
->tx_ring_size
;
2385 /* A queue must always have room for at least one skb.
2386 * Therefore, stop the queue when the free entries reaches
2387 * the maximum number of descriptors per skb.
2389 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
2390 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
2393 /* Allocate memory for TX descriptors */
2394 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2395 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2396 &txq
->descs_phys
, GFP_KERNEL
);
2397 if (txq
->descs
== NULL
)
2400 /* Make sure descriptor address is cache line size aligned */
2401 BUG_ON(txq
->descs
!=
2402 PTR_ALIGN(txq
->descs
, MVNETA_CPU_D_CACHE_LINE_SIZE
));
2404 txq
->last_desc
= txq
->size
- 1;
2406 /* Set maximum bandwidth for enabled TXQs */
2407 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
2408 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
2410 /* Set Tx descriptors queue starting address */
2411 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
2412 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
2414 txq
->tx_skb
= kmalloc(txq
->size
* sizeof(*txq
->tx_skb
), GFP_KERNEL
);
2415 if (txq
->tx_skb
== NULL
) {
2416 dma_free_coherent(pp
->dev
->dev
.parent
,
2417 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2418 txq
->descs
, txq
->descs_phys
);
2422 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2423 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2424 txq
->size
* TSO_HEADER_SIZE
,
2425 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
2426 if (txq
->tso_hdrs
== NULL
) {
2428 dma_free_coherent(pp
->dev
->dev
.parent
,
2429 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2430 txq
->descs
, txq
->descs_phys
);
2433 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2435 /* Setup XPS mapping */
2437 cpu
= txq
->id
% num_present_cpus();
2439 cpu
= pp
->rxq_def
% num_present_cpus();
2440 cpumask_set_cpu(cpu
, &txq
->affinity_mask
);
2441 netif_set_xps_queue(pp
->dev
, &txq
->affinity_mask
, txq
->id
);
2446 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2447 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
2448 struct mvneta_tx_queue
*txq
)
2453 dma_free_coherent(pp
->dev
->dev
.parent
,
2454 txq
->size
* TSO_HEADER_SIZE
,
2455 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
2457 dma_free_coherent(pp
->dev
->dev
.parent
,
2458 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2459 txq
->descs
, txq
->descs_phys
);
2463 txq
->next_desc_to_proc
= 0;
2464 txq
->descs_phys
= 0;
2466 /* Set minimum bandwidth for disabled TXQs */
2467 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
2468 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
2470 /* Set Tx descriptors queue starting address and size */
2471 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
2472 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
2475 /* Cleanup all Tx queues */
2476 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
2480 for (queue
= 0; queue
< txq_number
; queue
++)
2481 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
2484 /* Cleanup all Rx queues */
2485 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
2489 for (queue
= 0; queue
< txq_number
; queue
++)
2490 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
2494 /* Init all Rx queues */
2495 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
2499 for (queue
= 0; queue
< rxq_number
; queue
++) {
2500 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
2503 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
2505 mvneta_cleanup_rxqs(pp
);
2513 /* Init all tx queues */
2514 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
2518 for (queue
= 0; queue
< txq_number
; queue
++) {
2519 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
2521 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
2523 mvneta_cleanup_txqs(pp
);
2531 static void mvneta_percpu_unmask_interrupt(void *arg
)
2533 struct mvneta_port
*pp
= arg
;
2535 /* All the queue are unmasked, but actually only the ones
2536 * maped to this CPU will be unmasked
2538 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2539 MVNETA_RX_INTR_MASK_ALL
|
2540 MVNETA_TX_INTR_MASK_ALL
|
2541 MVNETA_MISCINTR_INTR_MASK
);
2544 static void mvneta_percpu_mask_interrupt(void *arg
)
2546 struct mvneta_port
*pp
= arg
;
2548 /* All the queue are masked, but actually only the ones
2549 * maped to this CPU will be masked
2551 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2552 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2553 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2556 static void mvneta_start_dev(struct mvneta_port
*pp
)
2560 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
2561 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
2563 /* start the Rx/Tx activity */
2564 mvneta_port_enable(pp
);
2566 /* Enable polling on the port */
2567 for_each_online_cpu(cpu
) {
2568 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2570 napi_enable(&port
->napi
);
2573 /* Unmask interrupts. It has to be done from each CPU */
2574 for_each_online_cpu(cpu
)
2575 smp_call_function_single(cpu
, mvneta_percpu_unmask_interrupt
,
2577 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2578 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2579 MVNETA_CAUSE_LINK_CHANGE
|
2580 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2582 phy_start(pp
->phy_dev
);
2583 netif_tx_start_all_queues(pp
->dev
);
2586 static void mvneta_stop_dev(struct mvneta_port
*pp
)
2590 phy_stop(pp
->phy_dev
);
2592 for_each_online_cpu(cpu
) {
2593 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2595 napi_disable(&port
->napi
);
2598 netif_carrier_off(pp
->dev
);
2600 mvneta_port_down(pp
);
2601 netif_tx_stop_all_queues(pp
->dev
);
2603 /* Stop the port activity */
2604 mvneta_port_disable(pp
);
2606 /* Clear all ethernet port interrupts */
2607 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2608 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
2610 /* Mask all ethernet port interrupts */
2611 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2612 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2613 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2615 mvneta_tx_reset(pp
);
2616 mvneta_rx_reset(pp
);
2619 /* Return positive if MTU is valid */
2620 static int mvneta_check_mtu_valid(struct net_device
*dev
, int mtu
)
2623 netdev_err(dev
, "cannot change mtu to less than 68\n");
2627 /* 9676 == 9700 - 20 and rounding to 8 */
2629 netdev_info(dev
, "Illegal MTU value %d, round to 9676\n", mtu
);
2633 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
2634 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
2635 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
2636 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
2642 /* Change the device mtu */
2643 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
2645 struct mvneta_port
*pp
= netdev_priv(dev
);
2648 mtu
= mvneta_check_mtu_valid(dev
, mtu
);
2654 if (!netif_running(dev
)) {
2655 netdev_update_features(dev
);
2659 /* The interface is running, so we have to force a
2660 * reallocation of the queues
2662 mvneta_stop_dev(pp
);
2664 mvneta_cleanup_txqs(pp
);
2665 mvneta_cleanup_rxqs(pp
);
2667 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
2668 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
2669 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
2671 ret
= mvneta_setup_rxqs(pp
);
2673 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
2677 ret
= mvneta_setup_txqs(pp
);
2679 netdev_err(dev
, "unable to setup txqs after MTU change\n");
2683 mvneta_start_dev(pp
);
2686 netdev_update_features(dev
);
2691 static netdev_features_t
mvneta_fix_features(struct net_device
*dev
,
2692 netdev_features_t features
)
2694 struct mvneta_port
*pp
= netdev_priv(dev
);
2696 if (pp
->tx_csum_limit
&& dev
->mtu
> pp
->tx_csum_limit
) {
2697 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
2699 "Disable IP checksum for MTU greater than %dB\n",
2706 /* Get mac address */
2707 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
2709 u32 mac_addr_l
, mac_addr_h
;
2711 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
2712 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
2713 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
2714 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
2715 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
2716 addr
[3] = mac_addr_h
& 0xFF;
2717 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
2718 addr
[5] = mac_addr_l
& 0xFF;
2721 /* Handle setting mac address */
2722 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
2724 struct mvneta_port
*pp
= netdev_priv(dev
);
2725 struct sockaddr
*sockaddr
= addr
;
2728 ret
= eth_prepare_mac_addr_change(dev
, addr
);
2731 /* Remove previous address table entry */
2732 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
2734 /* Set new addr in hw */
2735 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, pp
->rxq_def
);
2737 eth_commit_mac_addr_change(dev
, addr
);
2741 static void mvneta_adjust_link(struct net_device
*ndev
)
2743 struct mvneta_port
*pp
= netdev_priv(ndev
);
2744 struct phy_device
*phydev
= pp
->phy_dev
;
2745 int status_change
= 0;
2748 if ((pp
->speed
!= phydev
->speed
) ||
2749 (pp
->duplex
!= phydev
->duplex
)) {
2752 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
2753 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
2754 MVNETA_GMAC_CONFIG_GMII_SPEED
|
2755 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
2758 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
2760 if (phydev
->speed
== SPEED_1000
)
2761 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
2762 else if (phydev
->speed
== SPEED_100
)
2763 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
2765 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
2767 pp
->duplex
= phydev
->duplex
;
2768 pp
->speed
= phydev
->speed
;
2772 if (phydev
->link
!= pp
->link
) {
2773 if (!phydev
->link
) {
2778 pp
->link
= phydev
->link
;
2782 if (status_change
) {
2784 if (!pp
->use_inband_status
) {
2785 u32 val
= mvreg_read(pp
,
2786 MVNETA_GMAC_AUTONEG_CONFIG
);
2787 val
&= ~MVNETA_GMAC_FORCE_LINK_DOWN
;
2788 val
|= MVNETA_GMAC_FORCE_LINK_PASS
;
2789 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
2794 if (!pp
->use_inband_status
) {
2795 u32 val
= mvreg_read(pp
,
2796 MVNETA_GMAC_AUTONEG_CONFIG
);
2797 val
&= ~MVNETA_GMAC_FORCE_LINK_PASS
;
2798 val
|= MVNETA_GMAC_FORCE_LINK_DOWN
;
2799 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
2802 mvneta_port_down(pp
);
2804 phy_print_status(phydev
);
2808 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
2810 struct phy_device
*phy_dev
;
2812 phy_dev
= of_phy_connect(pp
->dev
, pp
->phy_node
, mvneta_adjust_link
, 0,
2815 netdev_err(pp
->dev
, "could not find the PHY\n");
2819 phy_dev
->supported
&= PHY_GBIT_FEATURES
;
2820 phy_dev
->advertising
= phy_dev
->supported
;
2822 pp
->phy_dev
= phy_dev
;
2830 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
2832 phy_disconnect(pp
->phy_dev
);
2836 static void mvneta_percpu_enable(void *arg
)
2838 struct mvneta_port
*pp
= arg
;
2840 enable_percpu_irq(pp
->dev
->irq
, IRQ_TYPE_NONE
);
2843 static void mvneta_percpu_disable(void *arg
)
2845 struct mvneta_port
*pp
= arg
;
2847 disable_percpu_irq(pp
->dev
->irq
);
2850 static void mvneta_percpu_elect(struct mvneta_port
*pp
)
2852 int elected_cpu
= 0, max_cpu
, cpu
, i
= 0;
2854 /* Use the cpu associated to the rxq when it is online, in all
2855 * the other cases, use the cpu 0 which can't be offline.
2857 if (cpu_online(pp
->rxq_def
))
2858 elected_cpu
= pp
->rxq_def
;
2860 max_cpu
= num_present_cpus();
2862 for_each_online_cpu(cpu
) {
2863 int rxq_map
= 0, txq_map
= 0;
2866 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
2867 if ((rxq
% max_cpu
) == cpu
)
2868 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
2870 if (cpu
== elected_cpu
)
2871 /* Map the default receive queue queue to the
2874 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(pp
->rxq_def
);
2876 /* We update the TX queue map only if we have one
2877 * queue. In this case we associate the TX queue to
2878 * the CPU bound to the default RX queue
2880 if (txq_number
== 1)
2881 txq_map
= (cpu
== elected_cpu
) ?
2882 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2884 txq_map
= mvreg_read(pp
, MVNETA_CPU_MAP(cpu
)) &
2885 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
2887 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
2889 /* Update the interrupt mask on each CPU according the
2892 smp_call_function_single(cpu
, mvneta_percpu_unmask_interrupt
,
2899 static int mvneta_percpu_notifier(struct notifier_block
*nfb
,
2900 unsigned long action
, void *hcpu
)
2902 struct mvneta_port
*pp
= container_of(nfb
, struct mvneta_port
,
2904 int cpu
= (unsigned long)hcpu
, other_cpu
;
2905 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
2909 case CPU_ONLINE_FROZEN
:
2910 netif_tx_stop_all_queues(pp
->dev
);
2912 /* We have to synchronise on tha napi of each CPU
2913 * except the one just being waked up
2915 for_each_online_cpu(other_cpu
) {
2916 if (other_cpu
!= cpu
) {
2917 struct mvneta_pcpu_port
*other_port
=
2918 per_cpu_ptr(pp
->ports
, other_cpu
);
2920 napi_synchronize(&other_port
->napi
);
2924 /* Mask all ethernet port interrupts */
2925 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2926 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2927 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2928 napi_enable(&port
->napi
);
2931 /* Enable per-CPU interrupts on the CPU that is
2934 smp_call_function_single(cpu
, mvneta_percpu_enable
,
2937 /* Enable per-CPU interrupt on the one CPU we care
2940 mvneta_percpu_elect(pp
);
2942 /* Unmask all ethernet port interrupts, as this
2943 * notifier is called for each CPU then the CPU to
2944 * Queue mapping is applied
2946 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2947 MVNETA_RX_INTR_MASK(rxq_number
) |
2948 MVNETA_TX_INTR_MASK(txq_number
) |
2949 MVNETA_MISCINTR_INTR_MASK
);
2950 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2951 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2952 MVNETA_CAUSE_LINK_CHANGE
|
2953 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2954 netif_tx_start_all_queues(pp
->dev
);
2956 case CPU_DOWN_PREPARE
:
2957 case CPU_DOWN_PREPARE_FROZEN
:
2958 netif_tx_stop_all_queues(pp
->dev
);
2959 /* Mask all ethernet port interrupts */
2960 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2961 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
2962 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
2964 napi_synchronize(&port
->napi
);
2965 napi_disable(&port
->napi
);
2966 /* Disable per-CPU interrupts on the CPU that is
2969 smp_call_function_single(cpu
, mvneta_percpu_disable
,
2974 case CPU_DEAD_FROZEN
:
2975 /* Check if a new CPU must be elected now this on is down */
2976 mvneta_percpu_elect(pp
);
2977 /* Unmask all ethernet port interrupts */
2978 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2979 MVNETA_RX_INTR_MASK(rxq_number
) |
2980 MVNETA_TX_INTR_MASK(txq_number
) |
2981 MVNETA_MISCINTR_INTR_MASK
);
2982 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
2983 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2984 MVNETA_CAUSE_LINK_CHANGE
|
2985 MVNETA_CAUSE_PSC_SYNC_CHANGE
);
2986 netif_tx_start_all_queues(pp
->dev
);
2993 static int mvneta_open(struct net_device
*dev
)
2995 struct mvneta_port
*pp
= netdev_priv(dev
);
2998 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
2999 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
3000 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3002 ret
= mvneta_setup_rxqs(pp
);
3006 ret
= mvneta_setup_txqs(pp
);
3008 goto err_cleanup_rxqs
;
3010 /* Connect to port interrupt line */
3011 ret
= request_percpu_irq(pp
->dev
->irq
, mvneta_isr
,
3012 MVNETA_DRIVER_NAME
, pp
->ports
);
3014 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
3015 goto err_cleanup_txqs
;
3018 /* Even though the documentation says that request_percpu_irq
3019 * doesn't enable the interrupts automatically, it actually
3020 * does so on the local CPU.
3022 * Make sure it's disabled.
3024 mvneta_percpu_disable(pp
);
3026 /* Enable per-CPU interrupt on all the CPU to handle our RX
3029 for_each_online_cpu(cpu
)
3030 smp_call_function_single(cpu
, mvneta_percpu_enable
,
3034 /* Register a CPU notifier to handle the case where our CPU
3035 * might be taken offline.
3037 register_cpu_notifier(&pp
->cpu_notifier
);
3039 /* In default link is down */
3040 netif_carrier_off(pp
->dev
);
3042 ret
= mvneta_mdio_probe(pp
);
3044 netdev_err(dev
, "cannot probe MDIO bus\n");
3048 mvneta_start_dev(pp
);
3053 free_percpu_irq(pp
->dev
->irq
, pp
->ports
);
3055 mvneta_cleanup_txqs(pp
);
3057 mvneta_cleanup_rxqs(pp
);
3061 /* Stop the port, free port interrupt line */
3062 static int mvneta_stop(struct net_device
*dev
)
3064 struct mvneta_port
*pp
= netdev_priv(dev
);
3066 mvneta_stop_dev(pp
);
3067 mvneta_mdio_remove(pp
);
3068 unregister_cpu_notifier(&pp
->cpu_notifier
);
3069 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3070 free_percpu_irq(dev
->irq
, pp
->ports
);
3071 mvneta_cleanup_rxqs(pp
);
3072 mvneta_cleanup_txqs(pp
);
3077 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3079 struct mvneta_port
*pp
= netdev_priv(dev
);
3084 return phy_mii_ioctl(pp
->phy_dev
, ifr
, cmd
);
3087 /* Ethtool methods */
3089 /* Get settings (phy address, speed) for ethtools */
3090 int mvneta_ethtool_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
3092 struct mvneta_port
*pp
= netdev_priv(dev
);
3097 return phy_ethtool_gset(pp
->phy_dev
, cmd
);
3100 /* Set settings (phy address, speed) for ethtools */
3101 int mvneta_ethtool_set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
3103 struct mvneta_port
*pp
= netdev_priv(dev
);
3104 struct phy_device
*phydev
= pp
->phy_dev
;
3109 if ((cmd
->autoneg
== AUTONEG_ENABLE
) != pp
->use_inband_status
) {
3112 mvneta_set_autoneg(pp
, cmd
->autoneg
== AUTONEG_ENABLE
);
3114 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
3115 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3116 val
&= ~(MVNETA_GMAC_CONFIG_MII_SPEED
|
3117 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3118 MVNETA_GMAC_CONFIG_FULL_DUPLEX
);
3121 val
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3123 if (phydev
->speed
== SPEED_1000
)
3124 val
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
3125 else if (phydev
->speed
== SPEED_100
)
3126 val
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
3128 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3131 pp
->use_inband_status
= (cmd
->autoneg
== AUTONEG_ENABLE
);
3132 netdev_info(pp
->dev
, "autoneg status set to %i\n",
3133 pp
->use_inband_status
);
3135 if (netif_running(dev
)) {
3136 mvneta_port_down(pp
);
3141 return phy_ethtool_sset(pp
->phy_dev
, cmd
);
3144 /* Set interrupt coalescing for ethtools */
3145 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
3146 struct ethtool_coalesce
*c
)
3148 struct mvneta_port
*pp
= netdev_priv(dev
);
3151 for (queue
= 0; queue
< rxq_number
; queue
++) {
3152 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3153 rxq
->time_coal
= c
->rx_coalesce_usecs
;
3154 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
3155 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
3156 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
3159 for (queue
= 0; queue
< txq_number
; queue
++) {
3160 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3161 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
3162 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
3168 /* get coalescing for ethtools */
3169 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
3170 struct ethtool_coalesce
*c
)
3172 struct mvneta_port
*pp
= netdev_priv(dev
);
3174 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
3175 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
3177 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
3182 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
3183 struct ethtool_drvinfo
*drvinfo
)
3185 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
3186 sizeof(drvinfo
->driver
));
3187 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
3188 sizeof(drvinfo
->version
));
3189 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
3190 sizeof(drvinfo
->bus_info
));
3194 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
3195 struct ethtool_ringparam
*ring
)
3197 struct mvneta_port
*pp
= netdev_priv(netdev
);
3199 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
3200 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
3201 ring
->rx_pending
= pp
->rx_ring_size
;
3202 ring
->tx_pending
= pp
->tx_ring_size
;
3205 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
3206 struct ethtool_ringparam
*ring
)
3208 struct mvneta_port
*pp
= netdev_priv(dev
);
3210 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
3212 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
3213 ring
->rx_pending
: MVNETA_MAX_RXD
;
3215 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
3216 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
3217 if (pp
->tx_ring_size
!= ring
->tx_pending
)
3218 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
3219 pp
->tx_ring_size
, ring
->tx_pending
);
3221 if (netif_running(dev
)) {
3223 if (mvneta_open(dev
)) {
3225 "error on opening device after ring param change\n");
3233 static void mvneta_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
3236 if (sset
== ETH_SS_STATS
) {
3239 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3240 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3241 mvneta_statistics
[i
].name
, ETH_GSTRING_LEN
);
3245 static void mvneta_ethtool_update_stats(struct mvneta_port
*pp
)
3247 const struct mvneta_statistic
*s
;
3248 void __iomem
*base
= pp
->base
;
3253 for (i
= 0, s
= mvneta_statistics
;
3254 s
< mvneta_statistics
+ ARRAY_SIZE(mvneta_statistics
);
3258 val
= readl_relaxed(base
+ s
->offset
);
3259 pp
->ethtool_stats
[i
] += val
;
3262 /* Docs say to read low 32-bit then high */
3263 low
= readl_relaxed(base
+ s
->offset
);
3264 high
= readl_relaxed(base
+ s
->offset
+ 4);
3265 val64
= (u64
)high
<< 32 | low
;
3266 pp
->ethtool_stats
[i
] += val64
;
3272 static void mvneta_ethtool_get_stats(struct net_device
*dev
,
3273 struct ethtool_stats
*stats
, u64
*data
)
3275 struct mvneta_port
*pp
= netdev_priv(dev
);
3278 mvneta_ethtool_update_stats(pp
);
3280 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3281 *data
++ = pp
->ethtool_stats
[i
];
3284 static int mvneta_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
3286 if (sset
== ETH_SS_STATS
)
3287 return ARRAY_SIZE(mvneta_statistics
);
3291 static u32
mvneta_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
3293 return MVNETA_RSS_LU_TABLE_SIZE
;
3296 static int mvneta_ethtool_get_rxnfc(struct net_device
*dev
,
3297 struct ethtool_rxnfc
*info
,
3298 u32
*rules __always_unused
)
3300 switch (info
->cmd
) {
3301 case ETHTOOL_GRXRINGS
:
3302 info
->data
= rxq_number
;
3311 static int mvneta_config_rss(struct mvneta_port
*pp
)
3316 netif_tx_stop_all_queues(pp
->dev
);
3318 for_each_online_cpu(cpu
)
3319 smp_call_function_single(cpu
, mvneta_percpu_mask_interrupt
,
3322 /* We have to synchronise on the napi of each CPU */
3323 for_each_online_cpu(cpu
) {
3324 struct mvneta_pcpu_port
*pcpu_port
=
3325 per_cpu_ptr(pp
->ports
, cpu
);
3327 napi_synchronize(&pcpu_port
->napi
);
3328 napi_disable(&pcpu_port
->napi
);
3331 pp
->rxq_def
= pp
->indir
[0];
3333 /* Update unicast mapping */
3334 mvneta_set_rx_mode(pp
->dev
);
3336 /* Update val of portCfg register accordingly with all RxQueue types */
3337 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
3338 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
3340 /* Update the elected CPU matching the new rxq_def */
3341 mvneta_percpu_elect(pp
);
3343 /* We have to synchronise on the napi of each CPU */
3344 for_each_online_cpu(cpu
) {
3345 struct mvneta_pcpu_port
*pcpu_port
=
3346 per_cpu_ptr(pp
->ports
, cpu
);
3348 napi_enable(&pcpu_port
->napi
);
3351 netif_tx_start_all_queues(pp
->dev
);
3356 static int mvneta_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
3357 const u8
*key
, const u8 hfunc
)
3359 struct mvneta_port
*pp
= netdev_priv(dev
);
3360 /* We require at least one supported parameter to be changed
3361 * and no change in any of the unsupported parameters
3364 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
3370 memcpy(pp
->indir
, indir
, MVNETA_RSS_LU_TABLE_SIZE
);
3372 return mvneta_config_rss(pp
);
3375 static int mvneta_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
3378 struct mvneta_port
*pp
= netdev_priv(dev
);
3381 *hfunc
= ETH_RSS_HASH_TOP
;
3386 memcpy(indir
, pp
->indir
, MVNETA_RSS_LU_TABLE_SIZE
);
3391 static const struct net_device_ops mvneta_netdev_ops
= {
3392 .ndo_open
= mvneta_open
,
3393 .ndo_stop
= mvneta_stop
,
3394 .ndo_start_xmit
= mvneta_tx
,
3395 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
3396 .ndo_set_mac_address
= mvneta_set_mac_addr
,
3397 .ndo_change_mtu
= mvneta_change_mtu
,
3398 .ndo_fix_features
= mvneta_fix_features
,
3399 .ndo_get_stats64
= mvneta_get_stats64
,
3400 .ndo_do_ioctl
= mvneta_ioctl
,
3403 const struct ethtool_ops mvneta_eth_tool_ops
= {
3404 .get_link
= ethtool_op_get_link
,
3405 .get_settings
= mvneta_ethtool_get_settings
,
3406 .set_settings
= mvneta_ethtool_set_settings
,
3407 .set_coalesce
= mvneta_ethtool_set_coalesce
,
3408 .get_coalesce
= mvneta_ethtool_get_coalesce
,
3409 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
3410 .get_ringparam
= mvneta_ethtool_get_ringparam
,
3411 .set_ringparam
= mvneta_ethtool_set_ringparam
,
3412 .get_strings
= mvneta_ethtool_get_strings
,
3413 .get_ethtool_stats
= mvneta_ethtool_get_stats
,
3414 .get_sset_count
= mvneta_ethtool_get_sset_count
,
3415 .get_rxfh_indir_size
= mvneta_ethtool_get_rxfh_indir_size
,
3416 .get_rxnfc
= mvneta_ethtool_get_rxnfc
,
3417 .get_rxfh
= mvneta_ethtool_get_rxfh
,
3418 .set_rxfh
= mvneta_ethtool_set_rxfh
,
3422 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
3427 mvneta_port_disable(pp
);
3429 /* Set port default values */
3430 mvneta_defaults_set(pp
);
3432 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(struct mvneta_tx_queue
),
3437 /* Initialize TX descriptor rings */
3438 for (queue
= 0; queue
< txq_number
; queue
++) {
3439 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3441 txq
->size
= pp
->tx_ring_size
;
3442 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
3445 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(struct mvneta_rx_queue
),
3450 /* Create Rx descriptor rings */
3451 for (queue
= 0; queue
< rxq_number
; queue
++) {
3452 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3454 rxq
->size
= pp
->rx_ring_size
;
3455 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
3456 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
3462 /* platform glue : initialize decoding windows */
3463 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
3464 const struct mbus_dram_target_info
*dram
)
3470 for (i
= 0; i
< 6; i
++) {
3471 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
3472 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
3475 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
3481 for (i
= 0; i
< dram
->num_cs
; i
++) {
3482 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
3483 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (cs
->base
& 0xffff0000) |
3484 (cs
->mbus_attr
<< 8) | dram
->mbus_dram_target_id
);
3486 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
3487 (cs
->size
- 1) & 0xffff0000);
3489 win_enable
&= ~(1 << i
);
3490 win_protect
|= 3 << (2 * i
);
3493 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
3494 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
3497 /* Power up the port */
3498 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
3502 /* MAC Cause register should be cleared */
3503 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
3505 ctrl
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
3507 /* Even though it might look weird, when we're configured in
3508 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3511 case PHY_INTERFACE_MODE_QSGMII
:
3512 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_QSGMII_SERDES_PROTO
);
3513 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
3515 case PHY_INTERFACE_MODE_SGMII
:
3516 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_SGMII_SERDES_PROTO
);
3517 ctrl
|= MVNETA_GMAC2_PCS_ENABLE
| MVNETA_GMAC2_PORT_RGMII
;
3519 case PHY_INTERFACE_MODE_RGMII
:
3520 case PHY_INTERFACE_MODE_RGMII_ID
:
3521 ctrl
|= MVNETA_GMAC2_PORT_RGMII
;
3527 /* Cancel Port Reset */
3528 ctrl
&= ~MVNETA_GMAC2_PORT_RESET
;
3529 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, ctrl
);
3531 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
3532 MVNETA_GMAC2_PORT_RESET
) != 0)
3538 /* Device initialization routine */
3539 static int mvneta_probe(struct platform_device
*pdev
)
3541 const struct mbus_dram_target_info
*dram_target_info
;
3542 struct resource
*res
;
3543 struct device_node
*dn
= pdev
->dev
.of_node
;
3544 struct device_node
*phy_node
;
3545 struct mvneta_port
*pp
;
3546 struct net_device
*dev
;
3547 const char *dt_mac_addr
;
3548 char hw_mac_addr
[ETH_ALEN
];
3549 const char *mac_from
;
3550 const char *managed
;
3556 dev
= alloc_etherdev_mqs(sizeof(struct mvneta_port
), txq_number
, rxq_number
);
3560 dev
->irq
= irq_of_parse_and_map(dn
, 0);
3561 if (dev
->irq
== 0) {
3563 goto err_free_netdev
;
3566 phy_node
= of_parse_phandle(dn
, "phy", 0);
3568 if (!of_phy_is_fixed_link(dn
)) {
3569 dev_err(&pdev
->dev
, "no PHY specified\n");
3574 err
= of_phy_register_fixed_link(dn
);
3576 dev_err(&pdev
->dev
, "cannot register fixed PHY\n");
3580 /* In the case of a fixed PHY, the DT node associated
3581 * to the PHY is the Ethernet MAC DT node.
3583 phy_node
= of_node_get(dn
);
3586 phy_mode
= of_get_phy_mode(dn
);
3588 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
3590 goto err_put_phy_node
;
3593 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
3594 dev
->watchdog_timeo
= 5 * HZ
;
3595 dev
->netdev_ops
= &mvneta_netdev_ops
;
3597 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
3599 pp
= netdev_priv(dev
);
3600 pp
->phy_node
= phy_node
;
3601 pp
->phy_interface
= phy_mode
;
3603 err
= of_property_read_string(dn
, "managed", &managed
);
3604 pp
->use_inband_status
= (err
== 0 &&
3605 strcmp(managed
, "in-band-status") == 0);
3606 pp
->cpu_notifier
.notifier_call
= mvneta_percpu_notifier
;
3608 pp
->rxq_def
= rxq_def
;
3610 pp
->indir
[0] = rxq_def
;
3612 pp
->clk
= devm_clk_get(&pdev
->dev
, "core");
3613 if (IS_ERR(pp
->clk
))
3614 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
3615 if (IS_ERR(pp
->clk
)) {
3616 err
= PTR_ERR(pp
->clk
);
3617 goto err_put_phy_node
;
3620 clk_prepare_enable(pp
->clk
);
3622 pp
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
3623 if (!IS_ERR(pp
->clk_bus
))
3624 clk_prepare_enable(pp
->clk_bus
);
3626 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
3627 pp
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
3628 if (IS_ERR(pp
->base
)) {
3629 err
= PTR_ERR(pp
->base
);
3633 /* Alloc per-cpu port structure */
3634 pp
->ports
= alloc_percpu(struct mvneta_pcpu_port
);
3640 /* Alloc per-cpu stats */
3641 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
3644 goto err_free_ports
;
3647 dt_mac_addr
= of_get_mac_address(dn
);
3649 mac_from
= "device tree";
3650 memcpy(dev
->dev_addr
, dt_mac_addr
, ETH_ALEN
);
3652 mvneta_get_mac_addr(pp
, hw_mac_addr
);
3653 if (is_valid_ether_addr(hw_mac_addr
)) {
3654 mac_from
= "hardware";
3655 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
3657 mac_from
= "random";
3658 eth_hw_addr_random(dev
);
3662 if (!of_property_read_u32(dn
, "tx-csum-limit", &tx_csum_limit
)) {
3663 if (tx_csum_limit
< 0 ||
3664 tx_csum_limit
> MVNETA_TX_CSUM_MAX_SIZE
) {
3665 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
3666 dev_info(&pdev
->dev
,
3667 "Wrong TX csum limit in DT, set to %dB\n",
3668 MVNETA_TX_CSUM_DEF_SIZE
);
3670 } else if (of_device_is_compatible(dn
, "marvell,armada-370-neta")) {
3671 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
3673 tx_csum_limit
= MVNETA_TX_CSUM_MAX_SIZE
;
3676 pp
->tx_csum_limit
= tx_csum_limit
;
3678 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
3679 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
3682 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3684 err
= mvneta_init(&pdev
->dev
, pp
);
3686 goto err_free_stats
;
3688 err
= mvneta_port_power_up(pp
, phy_mode
);
3690 dev_err(&pdev
->dev
, "can't power up port\n");
3691 goto err_free_stats
;
3694 dram_target_info
= mv_mbus_dram_info();
3695 if (dram_target_info
)
3696 mvneta_conf_mbus_windows(pp
, dram_target_info
);
3698 for_each_present_cpu(cpu
) {
3699 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3701 netif_napi_add(dev
, &port
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
3705 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
3706 dev
->hw_features
|= dev
->features
;
3707 dev
->vlan_features
|= dev
->features
;
3708 dev
->priv_flags
|= IFF_UNICAST_FLT
;
3709 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
3711 err
= register_netdev(dev
);
3713 dev_err(&pdev
->dev
, "failed to register\n");
3714 goto err_free_stats
;
3717 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
3720 platform_set_drvdata(pdev
, pp
->dev
);
3722 if (pp
->use_inband_status
) {
3723 struct phy_device
*phy
= of_phy_find_device(dn
);
3725 mvneta_fixed_link_update(pp
, phy
);
3727 put_device(&phy
->mdio
.dev
);
3733 free_percpu(pp
->stats
);
3735 free_percpu(pp
->ports
);
3737 clk_disable_unprepare(pp
->clk_bus
);
3738 clk_disable_unprepare(pp
->clk
);
3740 of_node_put(phy_node
);
3742 irq_dispose_mapping(dev
->irq
);
3748 /* Device removal routine */
3749 static int mvneta_remove(struct platform_device
*pdev
)
3751 struct net_device
*dev
= platform_get_drvdata(pdev
);
3752 struct mvneta_port
*pp
= netdev_priv(dev
);
3754 unregister_netdev(dev
);
3755 clk_disable_unprepare(pp
->clk_bus
);
3756 clk_disable_unprepare(pp
->clk
);
3757 free_percpu(pp
->ports
);
3758 free_percpu(pp
->stats
);
3759 irq_dispose_mapping(dev
->irq
);
3760 of_node_put(pp
->phy_node
);
3766 static const struct of_device_id mvneta_match
[] = {
3767 { .compatible
= "marvell,armada-370-neta" },
3768 { .compatible
= "marvell,armada-xp-neta" },
3771 MODULE_DEVICE_TABLE(of
, mvneta_match
);
3773 static struct platform_driver mvneta_driver
= {
3774 .probe
= mvneta_probe
,
3775 .remove
= mvneta_remove
,
3777 .name
= MVNETA_DRIVER_NAME
,
3778 .of_match_table
= mvneta_match
,
3782 module_platform_driver(mvneta_driver
);
3784 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3785 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3786 MODULE_LICENSE("GPL");
3788 module_param(rxq_number
, int, S_IRUGO
);
3789 module_param(txq_number
, int, S_IRUGO
);
3791 module_param(rxq_def
, int, S_IRUGO
);
3792 module_param(rx_copybreak
, int, S_IRUGO
| S_IWUSR
);