]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/calxeda/xgmac.c
net: calxedaxgmac: rework transmit ring handling
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / calxeda / xgmac.c
CommitLineData
85c10f28
RH
1/*
2 * Copyright 2010-2011 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/circ_buf.h>
20#include <linux/interrupt.h>
21#include <linux/etherdevice.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <linux/ethtool.h>
25#include <linux/if.h>
26#include <linux/crc32.h>
27#include <linux/dma-mapping.h>
28#include <linux/slab.h>
29
30/* XGMAC Register definitions */
31#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
32#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
33#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
34#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
35#define XGMAC_VERSION 0x00000020 /* Version */
36#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
37#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
38#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
39#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
40#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
41#define XGMAC_DEBUG 0x00000038 /* Debug */
42#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
43#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
44#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
45#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
46#define XGMAC_NUM_HASH 16
47#define XGMAC_OMR 0x00000400
48#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
49#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
50#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
51#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
52#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
53#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
54#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
55
56/* Hardware TX Statistics Counters */
57#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
58#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
59#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
60#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
61#define XGMAC_MMC_TXBCFRAME_G 0x00000824
62#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
63#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
64#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
65#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
66#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
67#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
68#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
69#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
70#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
71#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
72#define XGMAC_MMC_TXVLANFRAME 0x0000089C
73
74/* Hardware RX Statistics Counters */
75#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
76#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
77#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
78#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
79#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
80#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
81#define XGMAC_MMC_RXBCFRAME_G 0x00000918
82#define XGMAC_MMC_RXMCFRAME_G 0x00000920
83#define XGMAC_MMC_RXCRCERR 0x00000928
84#define XGMAC_MMC_RXRUNT 0x00000930
85#define XGMAC_MMC_RXJABBER 0x00000934
86#define XGMAC_MMC_RXUCFRAME_G 0x00000970
87#define XGMAC_MMC_RXLENGTHERR 0x00000978
88#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
89#define XGMAC_MMC_RXOVERFLOW 0x00000990
90#define XGMAC_MMC_RXVLANFRAME 0x00000998
91#define XGMAC_MMC_RXWATCHDOG 0x000009a0
92
93/* DMA Control and Status Registers */
94#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
95#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
96#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
97#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
98#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
99#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
100#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
101#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
102#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
103#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
104#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
105#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
107
108#define XGMAC_ADDR_AE 0x80000000
109#define XGMAC_MAX_FILTER_ADDR 31
110
111/* PMT Control and Status */
112#define XGMAC_PMT_POINTER_RESET 0x80000000
113#define XGMAC_PMT_GLBL_UNICAST 0x00000200
114#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
115#define XGMAC_PMT_MAGIC_PKT 0x00000020
116#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
117#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
118#define XGMAC_PMT_POWERDOWN 0x00000001
119
120#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
121#define XGMAC_CONTROL_SPD_MASK 0x60000000
122#define XGMAC_CONTROL_SPD_1G 0x60000000
123#define XGMAC_CONTROL_SPD_2_5G 0x40000000
124#define XGMAC_CONTROL_SPD_10G 0x00000000
125#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
126#define XGMAC_CONTROL_SARK_MASK 0x18000000
127#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
128#define XGMAC_CONTROL_CAR_MASK 0x06000000
129#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
130#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
131#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
132#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
133#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
134#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
135#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
136#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
137#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
138#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
139
140/* XGMAC Frame Filter defines */
141#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
142#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
143#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
144#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
145#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
146#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
147#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
148#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
149#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
150#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
151#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
152#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
153
154/* XGMAC FLOW CTRL defines */
155#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
156#define XGMAC_FLOW_CTRL_PT_SHIFT 16
157#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
158#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
159#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
160#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
161#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
162#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
163#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
164
165/* XGMAC_INT_STAT reg */
166#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
167#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
168
169/* DMA Bus Mode register defines */
170#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
171#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
172#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
173#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
174
175/* Programmable burst length */
176#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
177#define DMA_BUS_MODE_PBL_SHIFT 8
178#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
179#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
180#define DMA_BUS_MODE_RPBL_SHIFT 17
181#define DMA_BUS_MODE_USP 0x00800000
182#define DMA_BUS_MODE_8PBL 0x01000000
183#define DMA_BUS_MODE_AAL 0x02000000
184
185/* DMA Bus Mode register defines */
186#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
187#define DMA_BUS_PR_RATIO_SHIFT 14
188#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
189
190/* DMA Control register defines */
191#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
192#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
193#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
0aefa8ec 194#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
85c10f28
RH
195
196/* DMA Normal interrupt */
197#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
198#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
199#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
200#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
201#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
202#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
203#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
204#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
205#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
206#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
207#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
208#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
209#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
210#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
211#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
212
213#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
97a3a9a6 214 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
85c10f28
RH
215
216#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
217 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
218 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
219 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
220 DMA_INTR_ENA_TSE)
221
222/* DMA default interrupt mask */
223#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
224
225/* DMA Status register defines */
226#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
227#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
228#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
229#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
230#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
231#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
232#define DMA_STATUS_TS_SHIFT 20
233#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
234#define DMA_STATUS_RS_SHIFT 17
235#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
236#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
237#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
238#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
239#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
240#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
241#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
242#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
243#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
244#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
245#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
246#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
247#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
248#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
249#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
250
251/* Common MAC defines */
252#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
253#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
254
255/* XGMAC Operation Mode Register */
256#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
257#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
258#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
259#define XGMAC_OMR_TTC_MASK 0x00030000
260#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
261#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
262#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
263#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
264#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
265#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
266#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
267#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
f62a23a7 268#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
85c10f28
RH
269#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
270
271/* XGMAC HW Features Register */
272#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
273
274#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
275
276/* XGMAC Descriptor Defines */
277#define MAX_DESC_BUF_SZ (0x2000 - 8)
278
279#define RXDESC_EXT_STATUS 0x00000001
280#define RXDESC_CRC_ERR 0x00000002
281#define RXDESC_RX_ERR 0x00000008
282#define RXDESC_RX_WDOG 0x00000010
283#define RXDESC_FRAME_TYPE 0x00000020
284#define RXDESC_GIANT_FRAME 0x00000080
285#define RXDESC_LAST_SEG 0x00000100
286#define RXDESC_FIRST_SEG 0x00000200
287#define RXDESC_VLAN_FRAME 0x00000400
288#define RXDESC_OVERFLOW_ERR 0x00000800
289#define RXDESC_LENGTH_ERR 0x00001000
290#define RXDESC_SA_FILTER_FAIL 0x00002000
291#define RXDESC_DESCRIPTOR_ERR 0x00004000
292#define RXDESC_ERROR_SUMMARY 0x00008000
293#define RXDESC_FRAME_LEN_OFFSET 16
294#define RXDESC_FRAME_LEN_MASK 0x3fff0000
295#define RXDESC_DA_FILTER_FAIL 0x40000000
296
297#define RXDESC1_END_RING 0x00008000
298
299#define RXDESC_IP_PAYLOAD_MASK 0x00000003
300#define RXDESC_IP_PAYLOAD_UDP 0x00000001
301#define RXDESC_IP_PAYLOAD_TCP 0x00000002
302#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
303#define RXDESC_IP_HEADER_ERR 0x00000008
304#define RXDESC_IP_PAYLOAD_ERR 0x00000010
305#define RXDESC_IPV4_PACKET 0x00000040
306#define RXDESC_IPV6_PACKET 0x00000080
307#define TXDESC_UNDERFLOW_ERR 0x00000001
308#define TXDESC_JABBER_TIMEOUT 0x00000002
309#define TXDESC_LOCAL_FAULT 0x00000004
310#define TXDESC_REMOTE_FAULT 0x00000008
311#define TXDESC_VLAN_FRAME 0x00000010
312#define TXDESC_FRAME_FLUSHED 0x00000020
313#define TXDESC_IP_HEADER_ERR 0x00000040
314#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
315#define TXDESC_ERROR_SUMMARY 0x00008000
316#define TXDESC_SA_CTRL_INSERT 0x00040000
317#define TXDESC_SA_CTRL_REPLACE 0x00080000
318#define TXDESC_2ND_ADDR_CHAINED 0x00100000
319#define TXDESC_END_RING 0x00200000
320#define TXDESC_CSUM_IP 0x00400000
321#define TXDESC_CSUM_IP_PAYLD 0x00800000
322#define TXDESC_CSUM_ALL 0x00C00000
323#define TXDESC_CRC_EN_REPLACE 0x01000000
324#define TXDESC_CRC_EN_APPEND 0x02000000
325#define TXDESC_DISABLE_PAD 0x04000000
326#define TXDESC_FIRST_SEG 0x10000000
327#define TXDESC_LAST_SEG 0x20000000
328#define TXDESC_INTERRUPT 0x40000000
329
330#define DESC_OWN 0x80000000
331#define DESC_BUFFER1_SZ_MASK 0x00001fff
332#define DESC_BUFFER2_SZ_MASK 0x1fff0000
333#define DESC_BUFFER2_SZ_OFFSET 16
334
335struct xgmac_dma_desc {
336 __le32 flags;
337 __le32 buf_size;
338 __le32 buf1_addr; /* Buffer 1 Address Pointer */
339 __le32 buf2_addr; /* Buffer 2 Address Pointer */
340 __le32 ext_status;
341 __le32 res[3];
342};
343
344struct xgmac_extra_stats {
345 /* Transmit errors */
346 unsigned long tx_jabber;
347 unsigned long tx_frame_flushed;
348 unsigned long tx_payload_error;
349 unsigned long tx_ip_header_error;
350 unsigned long tx_local_fault;
351 unsigned long tx_remote_fault;
352 /* Receive errors */
353 unsigned long rx_watchdog;
354 unsigned long rx_da_filter_fail;
355 unsigned long rx_sa_filter_fail;
356 unsigned long rx_payload_error;
357 unsigned long rx_ip_header_error;
358 /* Tx/Rx IRQ errors */
359 unsigned long tx_undeflow;
360 unsigned long tx_process_stopped;
361 unsigned long rx_buf_unav;
362 unsigned long rx_process_stopped;
363 unsigned long tx_early;
364 unsigned long fatal_bus_error;
365};
366
367struct xgmac_priv {
368 struct xgmac_dma_desc *dma_rx;
369 struct sk_buff **rx_skbuff;
370 unsigned int rx_tail;
371 unsigned int rx_head;
372
373 struct xgmac_dma_desc *dma_tx;
374 struct sk_buff **tx_skbuff;
375 unsigned int tx_head;
376 unsigned int tx_tail;
97a3a9a6 377 int tx_irq_cnt;
85c10f28
RH
378
379 void __iomem *base;
85c10f28
RH
380 unsigned int dma_buf_sz;
381 dma_addr_t dma_rx_phy;
382 dma_addr_t dma_tx_phy;
383
384 struct net_device *dev;
385 struct device *device;
386 struct napi_struct napi;
387
388 struct xgmac_extra_stats xstats;
389
390 spinlock_t stats_lock;
391 int pmt_irq;
392 char rx_pause;
393 char tx_pause;
394 int wolopts;
395};
396
397/* XGMAC Configuration Settings */
398#define MAX_MTU 9000
399#define PAUSE_TIME 0x400
400
401#define DMA_RX_RING_SZ 256
402#define DMA_TX_RING_SZ 128
403/* minimum number of free TX descriptors required to wake up TX process */
404#define TX_THRESH (DMA_TX_RING_SZ/4)
405
406/* DMA descriptor ring helpers */
407#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
408#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
409#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
410
411/* XGMAC Descriptor Access Helpers */
412static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
413{
414 if (buf_sz > MAX_DESC_BUF_SZ)
415 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
416 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
417 else
418 p->buf_size = cpu_to_le32(buf_sz);
419}
420
421static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
422{
423 u32 len = cpu_to_le32(p->flags);
424 return (len & DESC_BUFFER1_SZ_MASK) +
425 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
426}
427
428static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
429 int buf_sz)
430{
431 struct xgmac_dma_desc *end = p + ring_size - 1;
432
433 memset(p, 0, sizeof(*p) * ring_size);
434
435 for (; p <= end; p++)
436 desc_set_buf_len(p, buf_sz);
437
438 end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
439}
440
441static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
442{
443 memset(p, 0, sizeof(*p) * ring_size);
444 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
445}
446
447static inline int desc_get_owner(struct xgmac_dma_desc *p)
448{
449 return le32_to_cpu(p->flags) & DESC_OWN;
450}
451
452static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
453{
454 /* Clear all fields and set the owner */
455 p->flags = cpu_to_le32(DESC_OWN);
456}
457
458static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
459{
460 u32 tmpflags = le32_to_cpu(p->flags);
461 tmpflags &= TXDESC_END_RING;
462 tmpflags |= flags | DESC_OWN;
463 p->flags = cpu_to_le32(tmpflags);
464}
465
466static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
467{
468 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
469}
470
471static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
472{
473 return le32_to_cpu(p->buf1_addr);
474}
475
476static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
477 u32 paddr, int len)
478{
479 p->buf1_addr = cpu_to_le32(paddr);
480 if (len > MAX_DESC_BUF_SZ)
481 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
482}
483
484static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
485 u32 paddr, int len)
486{
487 desc_set_buf_len(p, len);
488 desc_set_buf_addr(p, paddr, len);
489}
490
491static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
492{
493 u32 data = le32_to_cpu(p->flags);
494 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
495 if (data & RXDESC_FRAME_TYPE)
496 len -= ETH_FCS_LEN;
497
498 return len;
499}
500
501static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
502{
503 int timeout = 1000;
504 u32 reg = readl(ioaddr + XGMAC_OMR);
505 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
506
507 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
508 udelay(1);
509}
510
511static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
512{
513 struct xgmac_extra_stats *x = &priv->xstats;
514 u32 status = le32_to_cpu(p->flags);
515
516 if (!(status & TXDESC_ERROR_SUMMARY))
517 return 0;
518
519 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
520 if (status & TXDESC_JABBER_TIMEOUT)
521 x->tx_jabber++;
522 if (status & TXDESC_FRAME_FLUSHED)
523 x->tx_frame_flushed++;
524 if (status & TXDESC_UNDERFLOW_ERR)
525 xgmac_dma_flush_tx_fifo(priv->base);
526 if (status & TXDESC_IP_HEADER_ERR)
527 x->tx_ip_header_error++;
528 if (status & TXDESC_LOCAL_FAULT)
529 x->tx_local_fault++;
530 if (status & TXDESC_REMOTE_FAULT)
531 x->tx_remote_fault++;
532 if (status & TXDESC_PAYLOAD_CSUM_ERR)
533 x->tx_payload_error++;
534
535 return -1;
536}
537
538static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
539{
540 struct xgmac_extra_stats *x = &priv->xstats;
541 int ret = CHECKSUM_UNNECESSARY;
542 u32 status = le32_to_cpu(p->flags);
543 u32 ext_status = le32_to_cpu(p->ext_status);
544
545 if (status & RXDESC_DA_FILTER_FAIL) {
546 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
547 x->rx_da_filter_fail++;
548 return -1;
549 }
550
551 /* Check if packet has checksum already */
552 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
553 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
554 ret = CHECKSUM_NONE;
555
556 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
557 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
558
559 if (!(status & RXDESC_ERROR_SUMMARY))
560 return ret;
561
562 /* Handle any errors */
563 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
564 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
565 return -1;
566
567 if (status & RXDESC_EXT_STATUS) {
568 if (ext_status & RXDESC_IP_HEADER_ERR)
569 x->rx_ip_header_error++;
570 if (ext_status & RXDESC_IP_PAYLOAD_ERR)
571 x->rx_payload_error++;
572 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
573 ext_status);
574 return CHECKSUM_NONE;
575 }
576
577 return ret;
578}
579
580static inline void xgmac_mac_enable(void __iomem *ioaddr)
581{
582 u32 value = readl(ioaddr + XGMAC_CONTROL);
583 value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
584 writel(value, ioaddr + XGMAC_CONTROL);
585
586 value = readl(ioaddr + XGMAC_DMA_CONTROL);
587 value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
588 writel(value, ioaddr + XGMAC_DMA_CONTROL);
589}
590
591static inline void xgmac_mac_disable(void __iomem *ioaddr)
592{
593 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
594 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
595 writel(value, ioaddr + XGMAC_DMA_CONTROL);
596
597 value = readl(ioaddr + XGMAC_CONTROL);
598 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
599 writel(value, ioaddr + XGMAC_CONTROL);
600}
601
602static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
603 int num)
604{
605 u32 data;
606
607 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
608 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
609 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
610 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
611}
612
613static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
614 int num)
615{
616 u32 hi_addr, lo_addr;
617
618 /* Read the MAC address from the hardware */
619 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
620 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
621
622 /* Extract the MAC address from the high and low words */
623 addr[0] = lo_addr & 0xff;
624 addr[1] = (lo_addr >> 8) & 0xff;
625 addr[2] = (lo_addr >> 16) & 0xff;
626 addr[3] = (lo_addr >> 24) & 0xff;
627 addr[4] = hi_addr & 0xff;
628 addr[5] = (hi_addr >> 8) & 0xff;
629}
630
631static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
632{
633 u32 reg;
634 unsigned int flow = 0;
635
636 priv->rx_pause = rx;
637 priv->tx_pause = tx;
638
639 if (rx || tx) {
640 if (rx)
641 flow |= XGMAC_FLOW_CTRL_RFE;
642 if (tx)
643 flow |= XGMAC_FLOW_CTRL_TFE;
644
645 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
646 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
647
648 writel(flow, priv->base + XGMAC_FLOW_CTRL);
649
650 reg = readl(priv->base + XGMAC_OMR);
651 reg |= XGMAC_OMR_EFC;
652 writel(reg, priv->base + XGMAC_OMR);
653 } else {
654 writel(0, priv->base + XGMAC_FLOW_CTRL);
655
656 reg = readl(priv->base + XGMAC_OMR);
657 reg &= ~XGMAC_OMR_EFC;
658 writel(reg, priv->base + XGMAC_OMR);
659 }
660
661 return 0;
662}
663
664static void xgmac_rx_refill(struct xgmac_priv *priv)
665{
666 struct xgmac_dma_desc *p;
667 dma_addr_t paddr;
668
669 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
670 int entry = priv->rx_head;
671 struct sk_buff *skb;
672
673 p = priv->dma_rx + entry;
674
7c400919 675 if (priv->rx_skbuff[entry] == NULL) {
acb600de 676 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
7c400919
RH
677 if (unlikely(skb == NULL))
678 break;
679
680 priv->rx_skbuff[entry] = skb;
681 paddr = dma_map_single(priv->device, skb->data,
682 priv->dma_buf_sz, DMA_FROM_DEVICE);
683 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
684 }
85c10f28
RH
685
686 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
687 priv->rx_head, priv->rx_tail);
688
689 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
85c10f28
RH
690 desc_set_rx_owner(p);
691 }
692}
693
694/**
695 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
696 * @dev: net device structure
697 * Description: this function initializes the DMA RX/TX descriptors
698 * and allocates the socket buffers.
699 */
700static int xgmac_dma_desc_rings_init(struct net_device *dev)
701{
702 struct xgmac_priv *priv = netdev_priv(dev);
703 unsigned int bfsize;
704
705 /* Set the Buffer size according to the MTU;
706 * indeed, in case of jumbo we need to bump-up the buffer sizes.
707 */
708 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
709 64);
710
711 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
712
713 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
714 GFP_KERNEL);
715 if (!priv->rx_skbuff)
716 return -ENOMEM;
717
718 priv->dma_rx = dma_alloc_coherent(priv->device,
719 DMA_RX_RING_SZ *
720 sizeof(struct xgmac_dma_desc),
721 &priv->dma_rx_phy,
722 GFP_KERNEL);
723 if (!priv->dma_rx)
724 goto err_dma_rx;
725
726 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
727 GFP_KERNEL);
728 if (!priv->tx_skbuff)
729 goto err_tx_skb;
730
731 priv->dma_tx = dma_alloc_coherent(priv->device,
732 DMA_TX_RING_SZ *
733 sizeof(struct xgmac_dma_desc),
734 &priv->dma_tx_phy,
735 GFP_KERNEL);
736 if (!priv->dma_tx)
737 goto err_dma_tx;
738
739 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
740 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
741 priv->dma_rx, priv->dma_tx,
742 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
743
744 priv->rx_tail = 0;
745 priv->rx_head = 0;
746 priv->dma_buf_sz = bfsize;
747 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
748 xgmac_rx_refill(priv);
749
750 priv->tx_tail = 0;
751 priv->tx_head = 0;
752 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
753
754 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
755 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
756
757 return 0;
758
759err_dma_tx:
760 kfree(priv->tx_skbuff);
761err_tx_skb:
762 dma_free_coherent(priv->device,
763 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
764 priv->dma_rx, priv->dma_rx_phy);
765err_dma_rx:
766 kfree(priv->rx_skbuff);
767 return -ENOMEM;
768}
769
770static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
771{
772 int i;
773 struct xgmac_dma_desc *p;
774
775 if (!priv->rx_skbuff)
776 return;
777
778 for (i = 0; i < DMA_RX_RING_SZ; i++) {
779 if (priv->rx_skbuff[i] == NULL)
780 continue;
781
782 p = priv->dma_rx + i;
783 dma_unmap_single(priv->device, desc_get_buf_addr(p),
784 priv->dma_buf_sz, DMA_FROM_DEVICE);
785 dev_kfree_skb_any(priv->rx_skbuff[i]);
786 priv->rx_skbuff[i] = NULL;
787 }
788}
789
790static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
791{
792 int i, f;
793 struct xgmac_dma_desc *p;
794
795 if (!priv->tx_skbuff)
796 return;
797
798 for (i = 0; i < DMA_TX_RING_SZ; i++) {
799 if (priv->tx_skbuff[i] == NULL)
800 continue;
801
802 p = priv->dma_tx + i;
803 dma_unmap_single(priv->device, desc_get_buf_addr(p),
804 desc_get_buf_len(p), DMA_TO_DEVICE);
805
806 for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
807 p = priv->dma_tx + i++;
808 dma_unmap_page(priv->device, desc_get_buf_addr(p),
809 desc_get_buf_len(p), DMA_TO_DEVICE);
810 }
811
812 dev_kfree_skb_any(priv->tx_skbuff[i]);
813 priv->tx_skbuff[i] = NULL;
814 }
815}
816
817static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
818{
819 /* Release the DMA TX/RX socket buffers */
820 xgmac_free_rx_skbufs(priv);
821 xgmac_free_tx_skbufs(priv);
822
823 /* Free the consistent memory allocated for descriptor rings */
824 if (priv->dma_tx) {
825 dma_free_coherent(priv->device,
826 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
827 priv->dma_tx, priv->dma_tx_phy);
828 priv->dma_tx = NULL;
829 }
830 if (priv->dma_rx) {
831 dma_free_coherent(priv->device,
832 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
833 priv->dma_rx, priv->dma_rx_phy);
834 priv->dma_rx = NULL;
835 }
836 kfree(priv->rx_skbuff);
837 priv->rx_skbuff = NULL;
838 kfree(priv->tx_skbuff);
839 priv->tx_skbuff = NULL;
840}
841
842/**
843 * xgmac_tx:
844 * @priv: private driver structure
845 * Description: it reclaims resources after transmission completes.
846 */
847static void xgmac_tx_complete(struct xgmac_priv *priv)
848{
849 int i;
85c10f28
RH
850
851 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
852 unsigned int entry = priv->tx_tail;
853 struct sk_buff *skb = priv->tx_skbuff[entry];
854 struct xgmac_dma_desc *p = priv->dma_tx + entry;
855
856 /* Check if the descriptor is owned by the DMA. */
857 if (desc_get_owner(p))
858 break;
859
860 /* Verify tx error by looking at the last segment */
861 if (desc_get_tx_ls(p))
862 desc_get_tx_status(priv, p);
863
864 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
865 priv->tx_head, priv->tx_tail);
866
867 dma_unmap_single(priv->device, desc_get_buf_addr(p),
868 desc_get_buf_len(p), DMA_TO_DEVICE);
869
870 priv->tx_skbuff[entry] = NULL;
871 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
872
873 if (!skb) {
874 continue;
875 }
876
877 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
878 entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
879 DMA_TX_RING_SZ);
880 p = priv->dma_tx + priv->tx_tail;
881
882 dma_unmap_page(priv->device, desc_get_buf_addr(p),
883 desc_get_buf_len(p), DMA_TO_DEVICE);
884 }
885
acb600de 886 dev_kfree_skb(skb);
85c10f28
RH
887 }
888
889 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
97a3a9a6 890 MAX_SKB_FRAGS)
85c10f28
RH
891 netif_wake_queue(priv->dev);
892}
893
894/**
895 * xgmac_tx_err:
896 * @priv: pointer to the private device structure
897 * Description: it cleans the descriptors and restarts the transmission
898 * in case of errors.
899 */
900static void xgmac_tx_err(struct xgmac_priv *priv)
901{
902 u32 reg, value, inten;
903
904 netif_stop_queue(priv->dev);
905
906 inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
907 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
908
909 reg = readl(priv->base + XGMAC_DMA_CONTROL);
910 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
911 do {
912 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
913 } while (value && (value != 0x600000));
914
915 xgmac_free_tx_skbufs(priv);
916 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
917 priv->tx_tail = 0;
918 priv->tx_head = 0;
eb5e1b29 919 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
85c10f28
RH
920 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
921
922 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
923 priv->base + XGMAC_DMA_STATUS);
924 writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
925
926 netif_wake_queue(priv->dev);
927}
928
929static int xgmac_hw_init(struct net_device *dev)
930{
931 u32 value, ctrl;
932 int limit;
933 struct xgmac_priv *priv = netdev_priv(dev);
934 void __iomem *ioaddr = priv->base;
935
936 /* Save the ctrl register value */
937 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
938
939 /* SW reset */
940 value = DMA_BUS_MODE_SFT_RESET;
941 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
942 limit = 15000;
943 while (limit-- &&
944 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
945 cpu_relax();
946 if (limit < 0)
947 return -EBUSY;
948
949 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
950 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
951 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
952 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
953
954 /* Enable interrupts */
955 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
956 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
957
958 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
e36ce6eb 959 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
85c10f28
RH
960
961 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
962 XGMAC_CONTROL_CAR;
963 if (dev->features & NETIF_F_RXCSUM)
964 ctrl |= XGMAC_CONTROL_IPC;
965 writel(ctrl, ioaddr + XGMAC_CONTROL);
966
b821bd8e 967 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
85c10f28
RH
968
969 /* Set the HW DMA mode and the COE */
f62a23a7
RH
970 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
971 XGMAC_OMR_RTC_256,
85c10f28
RH
972 ioaddr + XGMAC_OMR);
973
974 /* Reset the MMC counters */
975 writel(1, ioaddr + XGMAC_MMC_CTRL);
976 return 0;
977}
978
979/**
980 * xgmac_open - open entry point of the driver
981 * @dev : pointer to the device structure.
982 * Description:
983 * This function is the open entry point of the driver.
984 * Return value:
985 * 0 on success and an appropriate (-)ve integer as defined in errno.h
986 * file on failure.
987 */
988static int xgmac_open(struct net_device *dev)
989{
990 int ret;
991 struct xgmac_priv *priv = netdev_priv(dev);
992 void __iomem *ioaddr = priv->base;
993
994 /* Check that the MAC address is valid. If its not, refuse
995 * to bring the device up. The user must specify an
996 * address using the following linux command:
997 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
998 if (!is_valid_ether_addr(dev->dev_addr)) {
7ce5d222 999 eth_hw_addr_random(dev);
85c10f28
RH
1000 netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1001 dev->dev_addr);
1002 }
1003
85c10f28
RH
1004 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1005
1006 /* Initialize the XGMAC and descriptors */
1007 xgmac_hw_init(dev);
1008 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1009 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1010
1011 ret = xgmac_dma_desc_rings_init(dev);
1012 if (ret < 0)
1013 return ret;
1014
1015 /* Enable the MAC Rx/Tx */
1016 xgmac_mac_enable(ioaddr);
1017
1018 napi_enable(&priv->napi);
1019 netif_start_queue(dev);
1020
1021 return 0;
1022}
1023
1024/**
1025 * xgmac_release - close entry point of the driver
1026 * @dev : device pointer.
1027 * Description:
1028 * This is the stop entry point of the driver.
1029 */
1030static int xgmac_stop(struct net_device *dev)
1031{
1032 struct xgmac_priv *priv = netdev_priv(dev);
1033
1034 netif_stop_queue(dev);
1035
1036 if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1037 napi_disable(&priv->napi);
1038
1039 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
1040
1041 /* Disable the MAC core */
1042 xgmac_mac_disable(priv->base);
1043
1044 /* Release and free the Rx/Tx resources */
1045 xgmac_free_dma_desc_rings(priv);
1046
1047 return 0;
1048}
1049
1050/**
1051 * xgmac_xmit:
1052 * @skb : the socket buffer
1053 * @dev : device pointer
1054 * Description : Tx entry point of the driver.
1055 */
1056static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1057{
1058 struct xgmac_priv *priv = netdev_priv(dev);
1059 unsigned int entry;
1060 int i;
97a3a9a6 1061 u32 irq_flag;
85c10f28
RH
1062 int nfrags = skb_shinfo(skb)->nr_frags;
1063 struct xgmac_dma_desc *desc, *first;
1064 unsigned int desc_flags;
1065 unsigned int len;
1066 dma_addr_t paddr;
1067
97a3a9a6
RH
1068 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1069 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
85c10f28
RH
1070
1071 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1072 TXDESC_CSUM_ALL : 0;
1073 entry = priv->tx_head;
1074 desc = priv->dma_tx + entry;
1075 first = desc;
1076
1077 len = skb_headlen(skb);
1078 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1079 if (dma_mapping_error(priv->device, paddr)) {
1080 dev_kfree_skb(skb);
1081 return -EIO;
1082 }
1083 priv->tx_skbuff[entry] = skb;
1084 desc_set_buf_addr_and_size(desc, paddr, len);
1085
1086 for (i = 0; i < nfrags; i++) {
1087 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1088
1089 len = frag->size;
1090
1091 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1092 DMA_TO_DEVICE);
1093 if (dma_mapping_error(priv->device, paddr)) {
1094 dev_kfree_skb(skb);
1095 return -EIO;
1096 }
1097
1098 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1099 desc = priv->dma_tx + entry;
1100 priv->tx_skbuff[entry] = NULL;
1101
1102 desc_set_buf_addr_and_size(desc, paddr, len);
1103 if (i < (nfrags - 1))
1104 desc_set_tx_owner(desc, desc_flags);
1105 }
1106
1107 /* Interrupt on completition only for the latest segment */
1108 if (desc != first)
1109 desc_set_tx_owner(desc, desc_flags |
97a3a9a6 1110 TXDESC_LAST_SEG | irq_flag);
85c10f28 1111 else
97a3a9a6 1112 desc_flags |= TXDESC_LAST_SEG | irq_flag;
85c10f28
RH
1113
1114 /* Set owner on first desc last to avoid race condition */
1115 wmb();
1116 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1117
1118 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1119
1120 writel(1, priv->base + XGMAC_DMA_TX_POLL);
97a3a9a6
RH
1121 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
1122 MAX_SKB_FRAGS)
1123 netif_stop_queue(dev);
85c10f28
RH
1124
1125 return NETDEV_TX_OK;
1126}
1127
1128static int xgmac_rx(struct xgmac_priv *priv, int limit)
1129{
1130 unsigned int entry;
1131 unsigned int count = 0;
1132 struct xgmac_dma_desc *p;
1133
1134 while (count < limit) {
1135 int ip_checksum;
1136 struct sk_buff *skb;
1137 int frame_len;
1138
85c10f28
RH
1139 entry = priv->rx_tail;
1140 p = priv->dma_rx + entry;
1141 if (desc_get_owner(p))
1142 break;
1143
1144 count++;
1145 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1146
1147 /* read the status of the incoming frame */
1148 ip_checksum = desc_get_rx_status(priv, p);
1149 if (ip_checksum < 0)
1150 continue;
1151
1152 skb = priv->rx_skbuff[entry];
1153 if (unlikely(!skb)) {
1154 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1155 break;
1156 }
1157 priv->rx_skbuff[entry] = NULL;
1158
1159 frame_len = desc_get_rx_frame_len(p);
1160 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1161 frame_len, ip_checksum);
1162
1163 skb_put(skb, frame_len);
1164 dma_unmap_single(priv->device, desc_get_buf_addr(p),
1165 frame_len, DMA_FROM_DEVICE);
1166
1167 skb->protocol = eth_type_trans(skb, priv->dev);
1168 skb->ip_summed = ip_checksum;
1169 if (ip_checksum == CHECKSUM_NONE)
1170 netif_receive_skb(skb);
1171 else
1172 napi_gro_receive(&priv->napi, skb);
1173 }
1174
1175 xgmac_rx_refill(priv);
1176
85c10f28
RH
1177 return count;
1178}
1179
1180/**
1181 * xgmac_poll - xgmac poll method (NAPI)
1182 * @napi : pointer to the napi structure.
1183 * @budget : maximum number of packets that the current CPU can receive from
1184 * all interfaces.
1185 * Description :
1186 * This function implements the the reception process.
1187 * Also it runs the TX completion thread
1188 */
1189static int xgmac_poll(struct napi_struct *napi, int budget)
1190{
1191 struct xgmac_priv *priv = container_of(napi,
1192 struct xgmac_priv, napi);
1193 int work_done = 0;
1194
1195 xgmac_tx_complete(priv);
1196 work_done = xgmac_rx(priv, budget);
1197
1198 if (work_done < budget) {
1199 napi_complete(napi);
0ec6d343 1200 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
1201 }
1202 return work_done;
1203}
1204
1205/**
1206 * xgmac_tx_timeout
1207 * @dev : Pointer to net device structure
1208 * Description: this function is called when a packet transmission fails to
1209 * complete within a reasonable tmrate. The driver will mark the error in the
1210 * netdev structure and arrange for the device to be reset to a sane state
1211 * in order to transmit a new packet.
1212 */
1213static void xgmac_tx_timeout(struct net_device *dev)
1214{
1215 struct xgmac_priv *priv = netdev_priv(dev);
1216
1217 /* Clear Tx resources and restart transmitting again */
1218 xgmac_tx_err(priv);
1219}
1220
1221/**
1222 * xgmac_set_rx_mode - entry point for multicast addressing
1223 * @dev : pointer to the device structure
1224 * Description:
1225 * This function is a driver entry point which gets called by the kernel
1226 * whenever multicast addresses must be enabled/disabled.
1227 * Return value:
1228 * void.
1229 */
1230static void xgmac_set_rx_mode(struct net_device *dev)
1231{
1232 int i;
1233 struct xgmac_priv *priv = netdev_priv(dev);
1234 void __iomem *ioaddr = priv->base;
1235 unsigned int value = 0;
1236 u32 hash_filter[XGMAC_NUM_HASH];
1237 int reg = 1;
1238 struct netdev_hw_addr *ha;
1239 bool use_hash = false;
1240
1241 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1242 netdev_mc_count(dev), netdev_uc_count(dev));
1243
1244 if (dev->flags & IFF_PROMISC) {
1245 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
1246 return;
1247 }
1248
1249 memset(hash_filter, 0, sizeof(hash_filter));
1250
1251 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
1252 use_hash = true;
1253 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1254 }
1255 netdev_for_each_uc_addr(ha, dev) {
1256 if (use_hash) {
1257 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1258
1259 /* The most significant 4 bits determine the register to
1260 * use (H/L) while the other 5 bits determine the bit
1261 * within the register. */
1262 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1263 } else {
1264 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1265 reg++;
1266 }
1267 }
1268
1269 if (dev->flags & IFF_ALLMULTI) {
1270 value |= XGMAC_FRAME_FILTER_PM;
1271 goto out;
1272 }
1273
1274 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1275 use_hash = true;
1276 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
1277 }
1278 netdev_for_each_mc_addr(ha, dev) {
1279 if (use_hash) {
1280 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1281
1282 /* The most significant 4 bits determine the register to
1283 * use (H/L) while the other 5 bits determine the bit
1284 * within the register. */
1285 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1286 } else {
1287 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1288 reg++;
1289 }
1290 }
1291
1292out:
1293 for (i = 0; i < XGMAC_NUM_HASH; i++)
1294 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1295
1296 writel(value, ioaddr + XGMAC_FRAME_FILTER);
1297}
1298
1299/**
1300 * xgmac_change_mtu - entry point to change MTU size for the device.
1301 * @dev : device pointer.
1302 * @new_mtu : the new MTU size for the device.
1303 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1304 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1305 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1306 * Return value:
1307 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1308 * file on failure.
1309 */
1310static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1311{
1312 struct xgmac_priv *priv = netdev_priv(dev);
1313 int old_mtu;
1314
1315 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
1316 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
1317 return -EINVAL;
1318 }
1319
1320 old_mtu = dev->mtu;
1321 dev->mtu = new_mtu;
1322
1323 /* return early if the buffer sizes will not change */
1324 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1325 return 0;
1326 if (old_mtu == new_mtu)
1327 return 0;
1328
1329 /* Stop everything, get ready to change the MTU */
1330 if (!netif_running(dev))
1331 return 0;
1332
1333 /* Bring the interface down and then back up */
1334 xgmac_stop(dev);
1335 return xgmac_open(dev);
1336}
1337
1338static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1339{
1340 u32 intr_status;
1341 struct net_device *dev = (struct net_device *)dev_id;
1342 struct xgmac_priv *priv = netdev_priv(dev);
1343 void __iomem *ioaddr = priv->base;
1344
0ec6d343 1345 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
85c10f28
RH
1346 if (intr_status & XGMAC_INT_STAT_PMT) {
1347 netdev_dbg(priv->dev, "received Magic frame\n");
1348 /* clear the PMT bits 5 and 6 by reading the PMT */
1349 readl(ioaddr + XGMAC_PMT);
1350 }
1351 return IRQ_HANDLED;
1352}
1353
1354static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1355{
1356 u32 intr_status;
1357 bool tx_err = false;
1358 struct net_device *dev = (struct net_device *)dev_id;
1359 struct xgmac_priv *priv = netdev_priv(dev);
1360 struct xgmac_extra_stats *x = &priv->xstats;
1361
1362 /* read the status register (CSR5) */
0ec6d343
RH
1363 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1364 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1365 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
85c10f28
RH
1366
1367 /* It displays the DMA process states (CSR5 register) */
1368 /* ABNORMAL interrupts */
1369 if (unlikely(intr_status & DMA_STATUS_AIS)) {
1370 if (intr_status & DMA_STATUS_TJT) {
1371 netdev_err(priv->dev, "transmit jabber\n");
1372 x->tx_jabber++;
1373 }
1374 if (intr_status & DMA_STATUS_RU)
1375 x->rx_buf_unav++;
1376 if (intr_status & DMA_STATUS_RPS) {
1377 netdev_err(priv->dev, "receive process stopped\n");
1378 x->rx_process_stopped++;
1379 }
1380 if (intr_status & DMA_STATUS_ETI) {
1381 netdev_err(priv->dev, "transmit early interrupt\n");
1382 x->tx_early++;
1383 }
1384 if (intr_status & DMA_STATUS_TPS) {
1385 netdev_err(priv->dev, "transmit process stopped\n");
1386 x->tx_process_stopped++;
1387 tx_err = true;
1388 }
1389 if (intr_status & DMA_STATUS_FBI) {
1390 netdev_err(priv->dev, "fatal bus error\n");
1391 x->fatal_bus_error++;
1392 tx_err = true;
1393 }
1394
1395 if (tx_err)
1396 xgmac_tx_err(priv);
1397 }
1398
1399 /* TX/RX NORMAL interrupts */
97a3a9a6 1400 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
0ec6d343 1401 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
1402 napi_schedule(&priv->napi);
1403 }
1404
1405 return IRQ_HANDLED;
1406}
1407
1408#ifdef CONFIG_NET_POLL_CONTROLLER
1409/* Polling receive - used by NETCONSOLE and other diagnostic tools
1410 * to allow network I/O with interrupts disabled. */
1411static void xgmac_poll_controller(struct net_device *dev)
1412{
1413 disable_irq(dev->irq);
1414 xgmac_interrupt(dev->irq, dev);
1415 enable_irq(dev->irq);
1416}
1417#endif
1418
bd601cc4 1419static struct rtnl_link_stats64 *
85c10f28
RH
1420xgmac_get_stats64(struct net_device *dev,
1421 struct rtnl_link_stats64 *storage)
1422{
1423 struct xgmac_priv *priv = netdev_priv(dev);
1424 void __iomem *base = priv->base;
1425 u32 count;
1426
1427 spin_lock_bh(&priv->stats_lock);
1428 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1429
1430 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1431 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1432
1433 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1434 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1435 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1436 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1437 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1438
1439 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1440 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1441
1442 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1443 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1444 storage->tx_packets = count;
1445 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1446
1447 writel(0, base + XGMAC_MMC_CTRL);
1448 spin_unlock_bh(&priv->stats_lock);
1449 return storage;
1450}
1451
1452static int xgmac_set_mac_address(struct net_device *dev, void *p)
1453{
1454 struct xgmac_priv *priv = netdev_priv(dev);
1455 void __iomem *ioaddr = priv->base;
1456 struct sockaddr *addr = p;
1457
1458 if (!is_valid_ether_addr(addr->sa_data))
1459 return -EADDRNOTAVAIL;
1460
7ce5d222 1461 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
85c10f28
RH
1462 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1463
1464 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1465
1466 return 0;
1467}
1468
1469static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1470{
1471 u32 ctrl;
1472 struct xgmac_priv *priv = netdev_priv(dev);
1473 void __iomem *ioaddr = priv->base;
1474 u32 changed = dev->features ^ features;
1475
1476 if (!(changed & NETIF_F_RXCSUM))
1477 return 0;
1478
1479 ctrl = readl(ioaddr + XGMAC_CONTROL);
1480 if (features & NETIF_F_RXCSUM)
1481 ctrl |= XGMAC_CONTROL_IPC;
1482 else
1483 ctrl &= ~XGMAC_CONTROL_IPC;
1484 writel(ctrl, ioaddr + XGMAC_CONTROL);
1485
1486 return 0;
1487}
1488
1489static const struct net_device_ops xgmac_netdev_ops = {
1490 .ndo_open = xgmac_open,
1491 .ndo_start_xmit = xgmac_xmit,
1492 .ndo_stop = xgmac_stop,
1493 .ndo_change_mtu = xgmac_change_mtu,
1494 .ndo_set_rx_mode = xgmac_set_rx_mode,
1495 .ndo_tx_timeout = xgmac_tx_timeout,
1496 .ndo_get_stats64 = xgmac_get_stats64,
1497#ifdef CONFIG_NET_POLL_CONTROLLER
1498 .ndo_poll_controller = xgmac_poll_controller,
1499#endif
1500 .ndo_set_mac_address = xgmac_set_mac_address,
1501 .ndo_set_features = xgmac_set_features,
1502};
1503
1504static int xgmac_ethtool_getsettings(struct net_device *dev,
1505 struct ethtool_cmd *cmd)
1506{
1507 cmd->autoneg = 0;
1508 cmd->duplex = DUPLEX_FULL;
1509 ethtool_cmd_speed_set(cmd, 10000);
1510 cmd->supported = 0;
1511 cmd->advertising = 0;
1512 cmd->transceiver = XCVR_INTERNAL;
1513 return 0;
1514}
1515
1516static void xgmac_get_pauseparam(struct net_device *netdev,
1517 struct ethtool_pauseparam *pause)
1518{
1519 struct xgmac_priv *priv = netdev_priv(netdev);
1520
1521 pause->rx_pause = priv->rx_pause;
1522 pause->tx_pause = priv->tx_pause;
1523}
1524
1525static int xgmac_set_pauseparam(struct net_device *netdev,
1526 struct ethtool_pauseparam *pause)
1527{
1528 struct xgmac_priv *priv = netdev_priv(netdev);
1529
1530 if (pause->autoneg)
1531 return -EINVAL;
1532
1533 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1534}
1535
1536struct xgmac_stats {
1537 char stat_string[ETH_GSTRING_LEN];
1538 int stat_offset;
1539 bool is_reg;
1540};
1541
1542#define XGMAC_STAT(m) \
1543 { #m, offsetof(struct xgmac_priv, xstats.m), false }
1544#define XGMAC_HW_STAT(m, reg_offset) \
1545 { #m, reg_offset, true }
1546
1547static const struct xgmac_stats xgmac_gstrings_stats[] = {
1548 XGMAC_STAT(tx_frame_flushed),
1549 XGMAC_STAT(tx_payload_error),
1550 XGMAC_STAT(tx_ip_header_error),
1551 XGMAC_STAT(tx_local_fault),
1552 XGMAC_STAT(tx_remote_fault),
1553 XGMAC_STAT(tx_early),
1554 XGMAC_STAT(tx_process_stopped),
1555 XGMAC_STAT(tx_jabber),
1556 XGMAC_STAT(rx_buf_unav),
1557 XGMAC_STAT(rx_process_stopped),
1558 XGMAC_STAT(rx_payload_error),
1559 XGMAC_STAT(rx_ip_header_error),
1560 XGMAC_STAT(rx_da_filter_fail),
1561 XGMAC_STAT(rx_sa_filter_fail),
1562 XGMAC_STAT(fatal_bus_error),
1563 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1564 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1565 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1566 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1567 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1568};
1569#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1570
1571static void xgmac_get_ethtool_stats(struct net_device *dev,
1572 struct ethtool_stats *dummy,
1573 u64 *data)
1574{
1575 struct xgmac_priv *priv = netdev_priv(dev);
1576 void *p = priv;
1577 int i;
1578
1579 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1580 if (xgmac_gstrings_stats[i].is_reg)
1581 *data++ = readl(priv->base +
1582 xgmac_gstrings_stats[i].stat_offset);
1583 else
1584 *data++ = *(u32 *)(p +
1585 xgmac_gstrings_stats[i].stat_offset);
1586 }
1587}
1588
1589static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1590{
1591 switch (sset) {
1592 case ETH_SS_STATS:
1593 return XGMAC_STATS_LEN;
1594 default:
1595 return -EINVAL;
1596 }
1597}
1598
1599static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1600 u8 *data)
1601{
1602 int i;
1603 u8 *p = data;
1604
1605 switch (stringset) {
1606 case ETH_SS_STATS:
1607 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1608 memcpy(p, xgmac_gstrings_stats[i].stat_string,
1609 ETH_GSTRING_LEN);
1610 p += ETH_GSTRING_LEN;
1611 }
1612 break;
1613 default:
1614 WARN_ON(1);
1615 break;
1616 }
1617}
1618
1619static void xgmac_get_wol(struct net_device *dev,
1620 struct ethtool_wolinfo *wol)
1621{
1622 struct xgmac_priv *priv = netdev_priv(dev);
1623
1624 if (device_can_wakeup(priv->device)) {
1625 wol->supported = WAKE_MAGIC | WAKE_UCAST;
1626 wol->wolopts = priv->wolopts;
1627 }
1628}
1629
1630static int xgmac_set_wol(struct net_device *dev,
1631 struct ethtool_wolinfo *wol)
1632{
1633 struct xgmac_priv *priv = netdev_priv(dev);
1634 u32 support = WAKE_MAGIC | WAKE_UCAST;
1635
1636 if (!device_can_wakeup(priv->device))
1637 return -ENOTSUPP;
1638
1639 if (wol->wolopts & ~support)
1640 return -EINVAL;
1641
1642 priv->wolopts = wol->wolopts;
1643
1644 if (wol->wolopts) {
1645 device_set_wakeup_enable(priv->device, 1);
1646 enable_irq_wake(dev->irq);
1647 } else {
1648 device_set_wakeup_enable(priv->device, 0);
1649 disable_irq_wake(dev->irq);
1650 }
1651
1652 return 0;
1653}
1654
bd601cc4 1655static const struct ethtool_ops xgmac_ethtool_ops = {
85c10f28
RH
1656 .get_settings = xgmac_ethtool_getsettings,
1657 .get_link = ethtool_op_get_link,
1658 .get_pauseparam = xgmac_get_pauseparam,
1659 .set_pauseparam = xgmac_set_pauseparam,
1660 .get_ethtool_stats = xgmac_get_ethtool_stats,
1661 .get_strings = xgmac_get_strings,
1662 .get_wol = xgmac_get_wol,
1663 .set_wol = xgmac_set_wol,
1664 .get_sset_count = xgmac_get_sset_count,
1665};
1666
1667/**
1668 * xgmac_probe
1669 * @pdev: platform device pointer
1670 * Description: the driver is initialized through platform_device.
1671 */
1672static int xgmac_probe(struct platform_device *pdev)
1673{
1674 int ret = 0;
1675 struct resource *res;
1676 struct net_device *ndev = NULL;
1677 struct xgmac_priv *priv = NULL;
1678 u32 uid;
1679
1680 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1681 if (!res)
1682 return -ENODEV;
1683
1684 if (!request_mem_region(res->start, resource_size(res), pdev->name))
1685 return -EBUSY;
1686
1687 ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1688 if (!ndev) {
1689 ret = -ENOMEM;
1690 goto err_alloc;
1691 }
1692
1693 SET_NETDEV_DEV(ndev, &pdev->dev);
1694 priv = netdev_priv(ndev);
1695 platform_set_drvdata(pdev, ndev);
1696 ether_setup(ndev);
1697 ndev->netdev_ops = &xgmac_netdev_ops;
1698 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1699 spin_lock_init(&priv->stats_lock);
1700
1701 priv->device = &pdev->dev;
1702 priv->dev = ndev;
1703 priv->rx_pause = 1;
1704 priv->tx_pause = 1;
1705
1706 priv->base = ioremap(res->start, resource_size(res));
1707 if (!priv->base) {
1708 netdev_err(ndev, "ioremap failed\n");
1709 ret = -ENOMEM;
1710 goto err_io;
1711 }
1712
1713 uid = readl(priv->base + XGMAC_VERSION);
1714 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1715
1716 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1717 ndev->irq = platform_get_irq(pdev, 0);
1718 if (ndev->irq == -ENXIO) {
1719 netdev_err(ndev, "No irq resource\n");
1720 ret = ndev->irq;
1721 goto err_irq;
1722 }
1723
1724 ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1725 dev_name(&pdev->dev), ndev);
1726 if (ret < 0) {
1727 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1728 ndev->irq, ret);
1729 goto err_irq;
1730 }
1731
1732 priv->pmt_irq = platform_get_irq(pdev, 1);
1733 if (priv->pmt_irq == -ENXIO) {
1734 netdev_err(ndev, "No pmt irq resource\n");
1735 ret = priv->pmt_irq;
1736 goto err_pmt_irq;
1737 }
1738
1739 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1740 dev_name(&pdev->dev), ndev);
1741 if (ret < 0) {
1742 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1743 priv->pmt_irq, ret);
1744 goto err_pmt_irq;
1745 }
1746
1747 device_set_wakeup_capable(&pdev->dev, 1);
1748 if (device_can_wakeup(priv->device))
1749 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1750
1751 ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1752 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1753 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1754 NETIF_F_RXCSUM;
1755 ndev->features |= ndev->hw_features;
1756 ndev->priv_flags |= IFF_UNICAST_FLT;
1757
1758 /* Get the MAC address */
1759 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
1760 if (!is_valid_ether_addr(ndev->dev_addr))
1761 netdev_warn(ndev, "MAC address %pM not valid",
1762 ndev->dev_addr);
1763
1764 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1765 ret = register_netdev(ndev);
1766 if (ret)
1767 goto err_reg;
1768
1769 return 0;
1770
1771err_reg:
1772 netif_napi_del(&priv->napi);
1773 free_irq(priv->pmt_irq, ndev);
1774err_pmt_irq:
1775 free_irq(ndev->irq, ndev);
1776err_irq:
1777 iounmap(priv->base);
1778err_io:
1779 free_netdev(ndev);
1780err_alloc:
1781 release_mem_region(res->start, resource_size(res));
1782 platform_set_drvdata(pdev, NULL);
1783 return ret;
1784}
1785
1786/**
1787 * xgmac_dvr_remove
1788 * @pdev: platform device pointer
1789 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1790 * changes the link status, releases the DMA descriptor rings,
1791 * unregisters the MDIO bus and unmaps the allocated memory.
1792 */
1793static int xgmac_remove(struct platform_device *pdev)
1794{
1795 struct net_device *ndev = platform_get_drvdata(pdev);
1796 struct xgmac_priv *priv = netdev_priv(ndev);
1797 struct resource *res;
1798
1799 xgmac_mac_disable(priv->base);
1800
1801 /* Free the IRQ lines */
1802 free_irq(ndev->irq, ndev);
1803 free_irq(priv->pmt_irq, ndev);
1804
1805 platform_set_drvdata(pdev, NULL);
1806 unregister_netdev(ndev);
1807 netif_napi_del(&priv->napi);
1808
1809 iounmap(priv->base);
1810 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1811 release_mem_region(res->start, resource_size(res));
1812
1813 free_netdev(ndev);
1814
1815 return 0;
1816}
1817
1818#ifdef CONFIG_PM_SLEEP
1819static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1820{
1821 unsigned int pmt = 0;
1822
1823 if (mode & WAKE_MAGIC)
1824 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
1825 if (mode & WAKE_UCAST)
1826 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1827
1828 writel(pmt, ioaddr + XGMAC_PMT);
1829}
1830
1831static int xgmac_suspend(struct device *dev)
1832{
1833 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1834 struct xgmac_priv *priv = netdev_priv(ndev);
1835 u32 value;
1836
1837 if (!ndev || !netif_running(ndev))
1838 return 0;
1839
1840 netif_device_detach(ndev);
1841 napi_disable(&priv->napi);
1842 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1843
1844 if (device_may_wakeup(priv->device)) {
1845 /* Stop TX/RX DMA Only */
1846 value = readl(priv->base + XGMAC_DMA_CONTROL);
1847 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1848 writel(value, priv->base + XGMAC_DMA_CONTROL);
1849
1850 xgmac_pmt(priv->base, priv->wolopts);
1851 } else
1852 xgmac_mac_disable(priv->base);
1853
1854 return 0;
1855}
1856
1857static int xgmac_resume(struct device *dev)
1858{
1859 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1860 struct xgmac_priv *priv = netdev_priv(ndev);
1861 void __iomem *ioaddr = priv->base;
1862
1863 if (!netif_running(ndev))
1864 return 0;
1865
1866 xgmac_pmt(ioaddr, 0);
1867
1868 /* Enable the MAC and DMA */
1869 xgmac_mac_enable(ioaddr);
1870 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1871 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1872
1873 netif_device_attach(ndev);
1874 napi_enable(&priv->napi);
1875
1876 return 0;
1877}
1878
1879static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
1880#define XGMAC_PM_OPS (&xgmac_pm_ops)
1881#else
1882#define XGMAC_PM_OPS NULL
1883#endif /* CONFIG_PM_SLEEP */
1884
1885static const struct of_device_id xgmac_of_match[] = {
1886 { .compatible = "calxeda,hb-xgmac", },
1887 {},
1888};
1889MODULE_DEVICE_TABLE(of, xgmac_of_match);
1890
1891static struct platform_driver xgmac_driver = {
1892 .driver = {
1893 .name = "calxedaxgmac",
1894 .of_match_table = xgmac_of_match,
1895 },
1896 .probe = xgmac_probe,
1897 .remove = xgmac_remove,
1898 .driver.pm = XGMAC_PM_OPS,
1899};
1900
1901module_platform_driver(xgmac_driver);
1902
1903MODULE_AUTHOR("Calxeda, Inc.");
1904MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1905MODULE_LICENSE("GPL v2");