]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/ethernet/calxeda/xgmac.c
unix_diag: fix info leak
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / ethernet / calxeda / xgmac.c
CommitLineData
85c10f28
RH
1/*
2 * Copyright 2010-2011 Calxeda, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/circ_buf.h>
20#include <linux/interrupt.h>
21#include <linux/etherdevice.h>
22#include <linux/platform_device.h>
23#include <linux/skbuff.h>
24#include <linux/ethtool.h>
25#include <linux/if.h>
26#include <linux/crc32.h>
27#include <linux/dma-mapping.h>
28#include <linux/slab.h>
29
30/* XGMAC Register definitions */
31#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
32#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
33#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
34#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
35#define XGMAC_VERSION 0x00000020 /* Version */
36#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
37#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
38#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
39#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
40#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
41#define XGMAC_DEBUG 0x00000038 /* Debug */
42#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
43#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
44#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
45#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
46#define XGMAC_NUM_HASH 16
47#define XGMAC_OMR 0x00000400
48#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
49#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
50#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
51#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
52#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
53#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
54#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
55
56/* Hardware TX Statistics Counters */
57#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
58#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
59#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
60#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
61#define XGMAC_MMC_TXBCFRAME_G 0x00000824
62#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
63#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
64#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
65#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
66#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
67#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
68#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
69#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
70#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
71#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
72#define XGMAC_MMC_TXVLANFRAME 0x0000089C
73
74/* Hardware RX Statistics Counters */
75#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
76#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
77#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
78#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
79#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
80#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
81#define XGMAC_MMC_RXBCFRAME_G 0x00000918
82#define XGMAC_MMC_RXMCFRAME_G 0x00000920
83#define XGMAC_MMC_RXCRCERR 0x00000928
84#define XGMAC_MMC_RXRUNT 0x00000930
85#define XGMAC_MMC_RXJABBER 0x00000934
86#define XGMAC_MMC_RXUCFRAME_G 0x00000970
87#define XGMAC_MMC_RXLENGTHERR 0x00000978
88#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
89#define XGMAC_MMC_RXOVERFLOW 0x00000990
90#define XGMAC_MMC_RXVLANFRAME 0x00000998
91#define XGMAC_MMC_RXWATCHDOG 0x000009a0
92
93/* DMA Control and Status Registers */
94#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
95#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
96#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
97#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
98#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
99#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
100#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
101#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
102#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
103#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
104#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
105#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
106#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
107
108#define XGMAC_ADDR_AE 0x80000000
109#define XGMAC_MAX_FILTER_ADDR 31
110
111/* PMT Control and Status */
112#define XGMAC_PMT_POINTER_RESET 0x80000000
113#define XGMAC_PMT_GLBL_UNICAST 0x00000200
114#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
115#define XGMAC_PMT_MAGIC_PKT 0x00000020
116#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
117#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
118#define XGMAC_PMT_POWERDOWN 0x00000001
119
120#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
121#define XGMAC_CONTROL_SPD_MASK 0x60000000
122#define XGMAC_CONTROL_SPD_1G 0x60000000
123#define XGMAC_CONTROL_SPD_2_5G 0x40000000
124#define XGMAC_CONTROL_SPD_10G 0x00000000
125#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
126#define XGMAC_CONTROL_SARK_MASK 0x18000000
127#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
128#define XGMAC_CONTROL_CAR_MASK 0x06000000
129#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
130#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
131#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
132#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
133#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
134#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
135#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
136#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
137#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
138#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
139
140/* XGMAC Frame Filter defines */
141#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
142#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
143#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
144#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
145#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
146#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
147#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
148#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
149#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
150#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
151#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
152#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
153
154/* XGMAC FLOW CTRL defines */
155#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
156#define XGMAC_FLOW_CTRL_PT_SHIFT 16
157#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
158#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
159#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
160#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
161#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
162#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
163#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
164
165/* XGMAC_INT_STAT reg */
e6c3827d 166#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
85c10f28
RH
167#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
168#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
169
170/* DMA Bus Mode register defines */
171#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
172#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
173#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
174#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
175
176/* Programmable burst length */
177#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
178#define DMA_BUS_MODE_PBL_SHIFT 8
179#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
180#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
181#define DMA_BUS_MODE_RPBL_SHIFT 17
182#define DMA_BUS_MODE_USP 0x00800000
183#define DMA_BUS_MODE_8PBL 0x01000000
184#define DMA_BUS_MODE_AAL 0x02000000
185
186/* DMA Bus Mode register defines */
187#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
188#define DMA_BUS_PR_RATIO_SHIFT 14
189#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
190
191/* DMA Control register defines */
192#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
193#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
194#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
0aefa8ec 195#define DMA_CONTROL_OSF 0x00000004 /* Operate on 2nd tx frame */
85c10f28
RH
196
197/* DMA Normal interrupt */
198#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
199#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
200#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
201#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
202#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
203#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
204#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
205#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
206#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
207#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
208#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
209#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
210#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
211#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
212#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
213
214#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
97a3a9a6 215 DMA_INTR_ENA_TUE | DMA_INTR_ENA_TIE)
85c10f28
RH
216
217#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
218 DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
219 DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
220 DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
221 DMA_INTR_ENA_TSE)
222
223/* DMA default interrupt mask */
224#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
225
226/* DMA Status register defines */
227#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
228#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
229#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
230#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
231#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
232#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
233#define DMA_STATUS_TS_SHIFT 20
234#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
235#define DMA_STATUS_RS_SHIFT 17
236#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
237#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
238#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
239#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
240#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
241#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
242#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
243#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
244#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
245#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
246#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
247#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
248#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
249#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
250#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
251
252/* Common MAC defines */
253#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
254#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
255
256/* XGMAC Operation Mode Register */
257#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
258#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
259#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
260#define XGMAC_OMR_TTC_MASK 0x00030000
261#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
262#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
263#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
264#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
265#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
266#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
267#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
268#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
f62a23a7 269#define XGMAC_OMR_RTC_256 0x00000018 /* RX Threshhold Ctrl */
85c10f28
RH
270#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
271
272/* XGMAC HW Features Register */
273#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
274
275#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
276
277/* XGMAC Descriptor Defines */
278#define MAX_DESC_BUF_SZ (0x2000 - 8)
279
280#define RXDESC_EXT_STATUS 0x00000001
281#define RXDESC_CRC_ERR 0x00000002
282#define RXDESC_RX_ERR 0x00000008
283#define RXDESC_RX_WDOG 0x00000010
284#define RXDESC_FRAME_TYPE 0x00000020
285#define RXDESC_GIANT_FRAME 0x00000080
286#define RXDESC_LAST_SEG 0x00000100
287#define RXDESC_FIRST_SEG 0x00000200
288#define RXDESC_VLAN_FRAME 0x00000400
289#define RXDESC_OVERFLOW_ERR 0x00000800
290#define RXDESC_LENGTH_ERR 0x00001000
291#define RXDESC_SA_FILTER_FAIL 0x00002000
292#define RXDESC_DESCRIPTOR_ERR 0x00004000
293#define RXDESC_ERROR_SUMMARY 0x00008000
294#define RXDESC_FRAME_LEN_OFFSET 16
295#define RXDESC_FRAME_LEN_MASK 0x3fff0000
296#define RXDESC_DA_FILTER_FAIL 0x40000000
297
298#define RXDESC1_END_RING 0x00008000
299
300#define RXDESC_IP_PAYLOAD_MASK 0x00000003
301#define RXDESC_IP_PAYLOAD_UDP 0x00000001
302#define RXDESC_IP_PAYLOAD_TCP 0x00000002
303#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
304#define RXDESC_IP_HEADER_ERR 0x00000008
305#define RXDESC_IP_PAYLOAD_ERR 0x00000010
306#define RXDESC_IPV4_PACKET 0x00000040
307#define RXDESC_IPV6_PACKET 0x00000080
308#define TXDESC_UNDERFLOW_ERR 0x00000001
309#define TXDESC_JABBER_TIMEOUT 0x00000002
310#define TXDESC_LOCAL_FAULT 0x00000004
311#define TXDESC_REMOTE_FAULT 0x00000008
312#define TXDESC_VLAN_FRAME 0x00000010
313#define TXDESC_FRAME_FLUSHED 0x00000020
314#define TXDESC_IP_HEADER_ERR 0x00000040
315#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
316#define TXDESC_ERROR_SUMMARY 0x00008000
317#define TXDESC_SA_CTRL_INSERT 0x00040000
318#define TXDESC_SA_CTRL_REPLACE 0x00080000
319#define TXDESC_2ND_ADDR_CHAINED 0x00100000
320#define TXDESC_END_RING 0x00200000
321#define TXDESC_CSUM_IP 0x00400000
322#define TXDESC_CSUM_IP_PAYLD 0x00800000
323#define TXDESC_CSUM_ALL 0x00C00000
324#define TXDESC_CRC_EN_REPLACE 0x01000000
325#define TXDESC_CRC_EN_APPEND 0x02000000
326#define TXDESC_DISABLE_PAD 0x04000000
327#define TXDESC_FIRST_SEG 0x10000000
328#define TXDESC_LAST_SEG 0x20000000
329#define TXDESC_INTERRUPT 0x40000000
330
331#define DESC_OWN 0x80000000
332#define DESC_BUFFER1_SZ_MASK 0x00001fff
333#define DESC_BUFFER2_SZ_MASK 0x1fff0000
334#define DESC_BUFFER2_SZ_OFFSET 16
335
336struct xgmac_dma_desc {
337 __le32 flags;
338 __le32 buf_size;
339 __le32 buf1_addr; /* Buffer 1 Address Pointer */
340 __le32 buf2_addr; /* Buffer 2 Address Pointer */
341 __le32 ext_status;
342 __le32 res[3];
343};
344
345struct xgmac_extra_stats {
346 /* Transmit errors */
347 unsigned long tx_jabber;
348 unsigned long tx_frame_flushed;
349 unsigned long tx_payload_error;
350 unsigned long tx_ip_header_error;
351 unsigned long tx_local_fault;
352 unsigned long tx_remote_fault;
353 /* Receive errors */
354 unsigned long rx_watchdog;
355 unsigned long rx_da_filter_fail;
85c10f28
RH
356 unsigned long rx_payload_error;
357 unsigned long rx_ip_header_error;
358 /* Tx/Rx IRQ errors */
85c10f28
RH
359 unsigned long tx_process_stopped;
360 unsigned long rx_buf_unav;
361 unsigned long rx_process_stopped;
362 unsigned long tx_early;
363 unsigned long fatal_bus_error;
364};
365
366struct xgmac_priv {
367 struct xgmac_dma_desc *dma_rx;
368 struct sk_buff **rx_skbuff;
369 unsigned int rx_tail;
370 unsigned int rx_head;
371
372 struct xgmac_dma_desc *dma_tx;
373 struct sk_buff **tx_skbuff;
374 unsigned int tx_head;
375 unsigned int tx_tail;
97a3a9a6 376 int tx_irq_cnt;
85c10f28
RH
377
378 void __iomem *base;
85c10f28
RH
379 unsigned int dma_buf_sz;
380 dma_addr_t dma_rx_phy;
381 dma_addr_t dma_tx_phy;
382
383 struct net_device *dev;
384 struct device *device;
385 struct napi_struct napi;
386
387 struct xgmac_extra_stats xstats;
388
389 spinlock_t stats_lock;
390 int pmt_irq;
391 char rx_pause;
392 char tx_pause;
393 int wolopts;
8746f671 394 struct work_struct tx_timeout_work;
85c10f28
RH
395};
396
397/* XGMAC Configuration Settings */
398#define MAX_MTU 9000
399#define PAUSE_TIME 0x400
400
401#define DMA_RX_RING_SZ 256
402#define DMA_TX_RING_SZ 128
403/* minimum number of free TX descriptors required to wake up TX process */
404#define TX_THRESH (DMA_TX_RING_SZ/4)
405
406/* DMA descriptor ring helpers */
407#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
408#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
409#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
410
cbe157b6
RH
411#define tx_dma_ring_space(p) \
412 dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
413
85c10f28
RH
414/* XGMAC Descriptor Access Helpers */
415static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
416{
417 if (buf_sz > MAX_DESC_BUF_SZ)
418 p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
419 (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
420 else
421 p->buf_size = cpu_to_le32(buf_sz);
422}
423
424static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
425{
ef07387f 426 u32 len = le32_to_cpu(p->buf_size);
85c10f28
RH
427 return (len & DESC_BUFFER1_SZ_MASK) +
428 ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
429}
430
431static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
432 int buf_sz)
433{
434 struct xgmac_dma_desc *end = p + ring_size - 1;
435
436 memset(p, 0, sizeof(*p) * ring_size);
437
438 for (; p <= end; p++)
439 desc_set_buf_len(p, buf_sz);
440
441 end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
442}
443
444static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
445{
446 memset(p, 0, sizeof(*p) * ring_size);
447 p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
448}
449
450static inline int desc_get_owner(struct xgmac_dma_desc *p)
451{
452 return le32_to_cpu(p->flags) & DESC_OWN;
453}
454
455static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
456{
457 /* Clear all fields and set the owner */
458 p->flags = cpu_to_le32(DESC_OWN);
459}
460
461static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
462{
463 u32 tmpflags = le32_to_cpu(p->flags);
464 tmpflags &= TXDESC_END_RING;
465 tmpflags |= flags | DESC_OWN;
466 p->flags = cpu_to_le32(tmpflags);
467}
468
92cd4253
RH
469static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
470{
471 u32 tmpflags = le32_to_cpu(p->flags);
472 tmpflags &= TXDESC_END_RING;
473 p->flags = cpu_to_le32(tmpflags);
474}
475
85c10f28
RH
476static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
477{
478 return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
479}
480
1a1d4d2f
RH
481static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
482{
483 return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
484}
485
85c10f28
RH
486static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
487{
488 return le32_to_cpu(p->buf1_addr);
489}
490
491static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
492 u32 paddr, int len)
493{
494 p->buf1_addr = cpu_to_le32(paddr);
495 if (len > MAX_DESC_BUF_SZ)
496 p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
497}
498
499static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
500 u32 paddr, int len)
501{
502 desc_set_buf_len(p, len);
503 desc_set_buf_addr(p, paddr, len);
504}
505
506static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
507{
508 u32 data = le32_to_cpu(p->flags);
509 u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
510 if (data & RXDESC_FRAME_TYPE)
511 len -= ETH_FCS_LEN;
512
513 return len;
514}
515
516static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
517{
518 int timeout = 1000;
519 u32 reg = readl(ioaddr + XGMAC_OMR);
520 writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
521
522 while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
523 udelay(1);
524}
525
526static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
527{
528 struct xgmac_extra_stats *x = &priv->xstats;
529 u32 status = le32_to_cpu(p->flags);
530
531 if (!(status & TXDESC_ERROR_SUMMARY))
532 return 0;
533
534 netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
535 if (status & TXDESC_JABBER_TIMEOUT)
536 x->tx_jabber++;
537 if (status & TXDESC_FRAME_FLUSHED)
538 x->tx_frame_flushed++;
539 if (status & TXDESC_UNDERFLOW_ERR)
540 xgmac_dma_flush_tx_fifo(priv->base);
541 if (status & TXDESC_IP_HEADER_ERR)
542 x->tx_ip_header_error++;
543 if (status & TXDESC_LOCAL_FAULT)
544 x->tx_local_fault++;
545 if (status & TXDESC_REMOTE_FAULT)
546 x->tx_remote_fault++;
547 if (status & TXDESC_PAYLOAD_CSUM_ERR)
548 x->tx_payload_error++;
549
550 return -1;
551}
552
553static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
554{
555 struct xgmac_extra_stats *x = &priv->xstats;
556 int ret = CHECKSUM_UNNECESSARY;
557 u32 status = le32_to_cpu(p->flags);
558 u32 ext_status = le32_to_cpu(p->ext_status);
559
560 if (status & RXDESC_DA_FILTER_FAIL) {
561 netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
562 x->rx_da_filter_fail++;
563 return -1;
564 }
565
d6fb3be5
RH
566 /* All frames should fit into a single buffer */
567 if (!(status & RXDESC_FIRST_SEG) || !(status & RXDESC_LAST_SEG))
568 return -1;
569
85c10f28
RH
570 /* Check if packet has checksum already */
571 if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
572 !(ext_status & RXDESC_IP_PAYLOAD_MASK))
573 ret = CHECKSUM_NONE;
574
575 netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
576 (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
577
578 if (!(status & RXDESC_ERROR_SUMMARY))
579 return ret;
580
581 /* Handle any errors */
582 if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
583 RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
584 return -1;
585
586 if (status & RXDESC_EXT_STATUS) {
587 if (ext_status & RXDESC_IP_HEADER_ERR)
588 x->rx_ip_header_error++;
589 if (ext_status & RXDESC_IP_PAYLOAD_ERR)
590 x->rx_payload_error++;
591 netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
592 ext_status);
593 return CHECKSUM_NONE;
594 }
595
596 return ret;
597}
598
599static inline void xgmac_mac_enable(void __iomem *ioaddr)
600{
601 u32 value = readl(ioaddr + XGMAC_CONTROL);
602 value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
603 writel(value, ioaddr + XGMAC_CONTROL);
604
605 value = readl(ioaddr + XGMAC_DMA_CONTROL);
606 value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
607 writel(value, ioaddr + XGMAC_DMA_CONTROL);
608}
609
610static inline void xgmac_mac_disable(void __iomem *ioaddr)
611{
612 u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
613 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
614 writel(value, ioaddr + XGMAC_DMA_CONTROL);
615
616 value = readl(ioaddr + XGMAC_CONTROL);
617 value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
618 writel(value, ioaddr + XGMAC_CONTROL);
619}
620
621static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
622 int num)
623{
624 u32 data;
625
2ee68f62
RH
626 if (addr) {
627 data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
628 writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
629 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
630 writel(data, ioaddr + XGMAC_ADDR_LOW(num));
631 } else {
632 writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
633 writel(0, ioaddr + XGMAC_ADDR_LOW(num));
634 }
85c10f28
RH
635}
636
637static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
638 int num)
639{
640 u32 hi_addr, lo_addr;
641
642 /* Read the MAC address from the hardware */
643 hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
644 lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
645
646 /* Extract the MAC address from the high and low words */
647 addr[0] = lo_addr & 0xff;
648 addr[1] = (lo_addr >> 8) & 0xff;
649 addr[2] = (lo_addr >> 16) & 0xff;
650 addr[3] = (lo_addr >> 24) & 0xff;
651 addr[4] = hi_addr & 0xff;
652 addr[5] = (hi_addr >> 8) & 0xff;
653}
654
655static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
656{
657 u32 reg;
658 unsigned int flow = 0;
659
660 priv->rx_pause = rx;
661 priv->tx_pause = tx;
662
663 if (rx || tx) {
664 if (rx)
665 flow |= XGMAC_FLOW_CTRL_RFE;
666 if (tx)
667 flow |= XGMAC_FLOW_CTRL_TFE;
668
669 flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
670 flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
671
672 writel(flow, priv->base + XGMAC_FLOW_CTRL);
673
674 reg = readl(priv->base + XGMAC_OMR);
675 reg |= XGMAC_OMR_EFC;
676 writel(reg, priv->base + XGMAC_OMR);
677 } else {
678 writel(0, priv->base + XGMAC_FLOW_CTRL);
679
680 reg = readl(priv->base + XGMAC_OMR);
681 reg &= ~XGMAC_OMR_EFC;
682 writel(reg, priv->base + XGMAC_OMR);
683 }
684
685 return 0;
686}
687
688static void xgmac_rx_refill(struct xgmac_priv *priv)
689{
690 struct xgmac_dma_desc *p;
691 dma_addr_t paddr;
ef468d23 692 int bufsz = priv->dev->mtu + ETH_HLEN + ETH_FCS_LEN;
85c10f28
RH
693
694 while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
695 int entry = priv->rx_head;
696 struct sk_buff *skb;
697
698 p = priv->dma_rx + entry;
699
7c400919 700 if (priv->rx_skbuff[entry] == NULL) {
ef468d23 701 skb = netdev_alloc_skb_ip_align(priv->dev, bufsz);
7c400919
RH
702 if (unlikely(skb == NULL))
703 break;
704
7c400919 705 paddr = dma_map_single(priv->device, skb->data,
531cda20
RH
706 priv->dma_buf_sz - NET_IP_ALIGN,
707 DMA_FROM_DEVICE);
708 if (dma_mapping_error(priv->device, paddr)) {
709 dev_kfree_skb_any(skb);
710 break;
711 }
712 priv->rx_skbuff[entry] = skb;
7c400919
RH
713 desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
714 }
85c10f28
RH
715
716 netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
717 priv->rx_head, priv->rx_tail);
718
719 priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
85c10f28
RH
720 desc_set_rx_owner(p);
721 }
722}
723
724/**
725 * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
726 * @dev: net device structure
727 * Description: this function initializes the DMA RX/TX descriptors
728 * and allocates the socket buffers.
729 */
730static int xgmac_dma_desc_rings_init(struct net_device *dev)
731{
732 struct xgmac_priv *priv = netdev_priv(dev);
733 unsigned int bfsize;
734
735 /* Set the Buffer size according to the MTU;
ef468d23
RH
736 * The total buffer size including any IP offset must be a multiple
737 * of 8 bytes.
85c10f28 738 */
ef468d23 739 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8);
85c10f28
RH
740
741 netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
742
743 priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
744 GFP_KERNEL);
745 if (!priv->rx_skbuff)
746 return -ENOMEM;
747
748 priv->dma_rx = dma_alloc_coherent(priv->device,
749 DMA_RX_RING_SZ *
750 sizeof(struct xgmac_dma_desc),
751 &priv->dma_rx_phy,
752 GFP_KERNEL);
753 if (!priv->dma_rx)
754 goto err_dma_rx;
755
756 priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
757 GFP_KERNEL);
758 if (!priv->tx_skbuff)
759 goto err_tx_skb;
760
761 priv->dma_tx = dma_alloc_coherent(priv->device,
762 DMA_TX_RING_SZ *
763 sizeof(struct xgmac_dma_desc),
764 &priv->dma_tx_phy,
765 GFP_KERNEL);
766 if (!priv->dma_tx)
767 goto err_dma_tx;
768
769 netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
770 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
771 priv->dma_rx, priv->dma_tx,
772 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
773
774 priv->rx_tail = 0;
775 priv->rx_head = 0;
776 priv->dma_buf_sz = bfsize;
777 desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
778 xgmac_rx_refill(priv);
779
780 priv->tx_tail = 0;
781 priv->tx_head = 0;
782 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
783
784 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
785 writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
786
787 return 0;
788
789err_dma_tx:
790 kfree(priv->tx_skbuff);
791err_tx_skb:
792 dma_free_coherent(priv->device,
793 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
794 priv->dma_rx, priv->dma_rx_phy);
795err_dma_rx:
796 kfree(priv->rx_skbuff);
797 return -ENOMEM;
798}
799
800static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
801{
802 int i;
803 struct xgmac_dma_desc *p;
804
805 if (!priv->rx_skbuff)
806 return;
807
808 for (i = 0; i < DMA_RX_RING_SZ; i++) {
531cda20
RH
809 struct sk_buff *skb = priv->rx_skbuff[i];
810 if (skb == NULL)
85c10f28
RH
811 continue;
812
813 p = priv->dma_rx + i;
814 dma_unmap_single(priv->device, desc_get_buf_addr(p),
531cda20
RH
815 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
816 dev_kfree_skb_any(skb);
85c10f28
RH
817 priv->rx_skbuff[i] = NULL;
818 }
819}
820
821static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
822{
1a1d4d2f 823 int i;
85c10f28
RH
824 struct xgmac_dma_desc *p;
825
826 if (!priv->tx_skbuff)
827 return;
828
829 for (i = 0; i < DMA_TX_RING_SZ; i++) {
830 if (priv->tx_skbuff[i] == NULL)
831 continue;
832
833 p = priv->dma_tx + i;
1a1d4d2f
RH
834 if (desc_get_tx_fs(p))
835 dma_unmap_single(priv->device, desc_get_buf_addr(p),
836 desc_get_buf_len(p), DMA_TO_DEVICE);
837 else
85c10f28
RH
838 dma_unmap_page(priv->device, desc_get_buf_addr(p),
839 desc_get_buf_len(p), DMA_TO_DEVICE);
85c10f28 840
1a1d4d2f
RH
841 if (desc_get_tx_ls(p))
842 dev_kfree_skb_any(priv->tx_skbuff[i]);
85c10f28
RH
843 priv->tx_skbuff[i] = NULL;
844 }
845}
846
847static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
848{
849 /* Release the DMA TX/RX socket buffers */
850 xgmac_free_rx_skbufs(priv);
851 xgmac_free_tx_skbufs(priv);
852
853 /* Free the consistent memory allocated for descriptor rings */
854 if (priv->dma_tx) {
855 dma_free_coherent(priv->device,
856 DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
857 priv->dma_tx, priv->dma_tx_phy);
858 priv->dma_tx = NULL;
859 }
860 if (priv->dma_rx) {
861 dma_free_coherent(priv->device,
862 DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
863 priv->dma_rx, priv->dma_rx_phy);
864 priv->dma_rx = NULL;
865 }
866 kfree(priv->rx_skbuff);
867 priv->rx_skbuff = NULL;
868 kfree(priv->tx_skbuff);
869 priv->tx_skbuff = NULL;
870}
871
872/**
873 * xgmac_tx:
874 * @priv: private driver structure
875 * Description: it reclaims resources after transmission completes.
876 */
877static void xgmac_tx_complete(struct xgmac_priv *priv)
878{
85c10f28
RH
879 while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
880 unsigned int entry = priv->tx_tail;
881 struct sk_buff *skb = priv->tx_skbuff[entry];
882 struct xgmac_dma_desc *p = priv->dma_tx + entry;
883
884 /* Check if the descriptor is owned by the DMA. */
885 if (desc_get_owner(p))
886 break;
887
85c10f28
RH
888 netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
889 priv->tx_head, priv->tx_tail);
890
1a1d4d2f
RH
891 if (desc_get_tx_fs(p))
892 dma_unmap_single(priv->device, desc_get_buf_addr(p),
893 desc_get_buf_len(p), DMA_TO_DEVICE);
894 else
85c10f28
RH
895 dma_unmap_page(priv->device, desc_get_buf_addr(p),
896 desc_get_buf_len(p), DMA_TO_DEVICE);
1a1d4d2f
RH
897
898 /* Check tx error on the last segment */
899 if (desc_get_tx_ls(p)) {
900 desc_get_tx_status(priv, p);
901 dev_kfree_skb(skb);
85c10f28
RH
902 }
903
1a1d4d2f
RH
904 priv->tx_skbuff[entry] = NULL;
905 priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
85c10f28
RH
906 }
907
cbe157b6
RH
908 /* Ensure tx_tail is visible to xgmac_xmit */
909 smp_mb();
910 if (unlikely(netif_queue_stopped(priv->dev) &&
911 (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
85c10f28
RH
912 netif_wake_queue(priv->dev);
913}
914
8746f671 915static void xgmac_tx_timeout_work(struct work_struct *work)
85c10f28 916{
8746f671
RH
917 u32 reg, value;
918 struct xgmac_priv *priv =
919 container_of(work, struct xgmac_priv, tx_timeout_work);
85c10f28 920
8746f671 921 napi_disable(&priv->napi);
85c10f28 922
85c10f28
RH
923 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
924
8746f671
RH
925 netif_tx_lock(priv->dev);
926
85c10f28
RH
927 reg = readl(priv->base + XGMAC_DMA_CONTROL);
928 writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
929 do {
930 value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
931 } while (value && (value != 0x600000));
932
933 xgmac_free_tx_skbufs(priv);
934 desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
935 priv->tx_tail = 0;
936 priv->tx_head = 0;
eb5e1b29 937 writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
85c10f28
RH
938 writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
939
940 writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
941 priv->base + XGMAC_DMA_STATUS);
85c10f28 942
8746f671 943 netif_tx_unlock(priv->dev);
85c10f28 944 netif_wake_queue(priv->dev);
8746f671
RH
945
946 napi_enable(&priv->napi);
947
948 /* Enable interrupts */
949 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
950 writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
951}
952
953static int xgmac_hw_init(struct net_device *dev)
954{
955 u32 value, ctrl;
956 int limit;
957 struct xgmac_priv *priv = netdev_priv(dev);
958 void __iomem *ioaddr = priv->base;
959
960 /* Save the ctrl register value */
961 ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
962
963 /* SW reset */
964 value = DMA_BUS_MODE_SFT_RESET;
965 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
966 limit = 15000;
967 while (limit-- &&
968 (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
969 cpu_relax();
970 if (limit < 0)
971 return -EBUSY;
972
973 value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
974 (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
975 DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
976 writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
977
f7ea1052 978 writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
85c10f28 979
e6c3827d
RH
980 /* Mask power mgt interrupt */
981 writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
982
85c10f28 983 /* XGMAC requires AXI bus init. This is a 'magic number' for now */
e36ce6eb 984 writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
85c10f28
RH
985
986 ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
987 XGMAC_CONTROL_CAR;
988 if (dev->features & NETIF_F_RXCSUM)
989 ctrl |= XGMAC_CONTROL_IPC;
990 writel(ctrl, ioaddr + XGMAC_CONTROL);
991
b821bd8e 992 writel(DMA_CONTROL_OSF, ioaddr + XGMAC_DMA_CONTROL);
85c10f28
RH
993
994 /* Set the HW DMA mode and the COE */
f62a23a7
RH
995 writel(XGMAC_OMR_TSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA |
996 XGMAC_OMR_RTC_256,
85c10f28
RH
997 ioaddr + XGMAC_OMR);
998
999 /* Reset the MMC counters */
1000 writel(1, ioaddr + XGMAC_MMC_CTRL);
1001 return 0;
1002}
1003
1004/**
1005 * xgmac_open - open entry point of the driver
1006 * @dev : pointer to the device structure.
1007 * Description:
1008 * This function is the open entry point of the driver.
1009 * Return value:
1010 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1011 * file on failure.
1012 */
1013static int xgmac_open(struct net_device *dev)
1014{
1015 int ret;
1016 struct xgmac_priv *priv = netdev_priv(dev);
1017 void __iomem *ioaddr = priv->base;
1018
1019 /* Check that the MAC address is valid. If its not, refuse
1020 * to bring the device up. The user must specify an
1021 * address using the following linux command:
1022 * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
1023 if (!is_valid_ether_addr(dev->dev_addr)) {
7ce5d222 1024 eth_hw_addr_random(dev);
85c10f28
RH
1025 netdev_dbg(priv->dev, "generated random MAC address %pM\n",
1026 dev->dev_addr);
1027 }
1028
85c10f28
RH
1029 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1030
1031 /* Initialize the XGMAC and descriptors */
1032 xgmac_hw_init(dev);
1033 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1034 xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
1035
1036 ret = xgmac_dma_desc_rings_init(dev);
1037 if (ret < 0)
1038 return ret;
1039
1040 /* Enable the MAC Rx/Tx */
1041 xgmac_mac_enable(ioaddr);
1042
1043 napi_enable(&priv->napi);
1044 netif_start_queue(dev);
1045
f7ea1052
RH
1046 /* Enable interrupts */
1047 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1048 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1049
85c10f28
RH
1050 return 0;
1051}
1052
1053/**
1054 * xgmac_release - close entry point of the driver
1055 * @dev : device pointer.
1056 * Description:
1057 * This is the stop entry point of the driver.
1058 */
1059static int xgmac_stop(struct net_device *dev)
1060{
1061 struct xgmac_priv *priv = netdev_priv(dev);
1062
1063 netif_stop_queue(dev);
1064
1065 if (readl(priv->base + XGMAC_DMA_INTR_ENA))
1066 napi_disable(&priv->napi);
1067
1068 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
1069
1070 /* Disable the MAC core */
1071 xgmac_mac_disable(priv->base);
1072
1073 /* Release and free the Rx/Tx resources */
1074 xgmac_free_dma_desc_rings(priv);
1075
1076 return 0;
1077}
1078
1079/**
1080 * xgmac_xmit:
1081 * @skb : the socket buffer
1082 * @dev : device pointer
1083 * Description : Tx entry point of the driver.
1084 */
1085static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
1086{
1087 struct xgmac_priv *priv = netdev_priv(dev);
1088 unsigned int entry;
1089 int i;
97a3a9a6 1090 u32 irq_flag;
85c10f28
RH
1091 int nfrags = skb_shinfo(skb)->nr_frags;
1092 struct xgmac_dma_desc *desc, *first;
1093 unsigned int desc_flags;
1094 unsigned int len;
1095 dma_addr_t paddr;
1096
97a3a9a6
RH
1097 priv->tx_irq_cnt = (priv->tx_irq_cnt + 1) & (DMA_TX_RING_SZ/4 - 1);
1098 irq_flag = priv->tx_irq_cnt ? 0 : TXDESC_INTERRUPT;
85c10f28
RH
1099
1100 desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
1101 TXDESC_CSUM_ALL : 0;
1102 entry = priv->tx_head;
1103 desc = priv->dma_tx + entry;
1104 first = desc;
1105
1106 len = skb_headlen(skb);
1107 paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
1108 if (dma_mapping_error(priv->device, paddr)) {
1109 dev_kfree_skb(skb);
92cd4253 1110 return NETDEV_TX_OK;
85c10f28
RH
1111 }
1112 priv->tx_skbuff[entry] = skb;
1113 desc_set_buf_addr_and_size(desc, paddr, len);
1114
1115 for (i = 0; i < nfrags; i++) {
1116 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1117
1118 len = frag->size;
1119
1120 paddr = skb_frag_dma_map(priv->device, frag, 0, len,
1121 DMA_TO_DEVICE);
92cd4253
RH
1122 if (dma_mapping_error(priv->device, paddr))
1123 goto dma_err;
85c10f28
RH
1124
1125 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1126 desc = priv->dma_tx + entry;
1a1d4d2f 1127 priv->tx_skbuff[entry] = skb;
85c10f28
RH
1128
1129 desc_set_buf_addr_and_size(desc, paddr, len);
1130 if (i < (nfrags - 1))
1131 desc_set_tx_owner(desc, desc_flags);
1132 }
1133
1134 /* Interrupt on completition only for the latest segment */
1135 if (desc != first)
1136 desc_set_tx_owner(desc, desc_flags |
97a3a9a6 1137 TXDESC_LAST_SEG | irq_flag);
85c10f28 1138 else
97a3a9a6 1139 desc_flags |= TXDESC_LAST_SEG | irq_flag;
85c10f28
RH
1140
1141 /* Set owner on first desc last to avoid race condition */
1142 wmb();
1143 desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
1144
ca32723a
RH
1145 writel(1, priv->base + XGMAC_DMA_TX_POLL);
1146
85c10f28
RH
1147 priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
1148
cbe157b6
RH
1149 /* Ensure tx_head update is visible to tx completion */
1150 smp_mb();
1151 if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
97a3a9a6 1152 netif_stop_queue(dev);
cbe157b6
RH
1153 /* Ensure netif_stop_queue is visible to tx completion */
1154 smp_mb();
1155 if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
1156 netif_start_queue(dev);
1157 }
85c10f28 1158 return NETDEV_TX_OK;
92cd4253
RH
1159
1160dma_err:
1161 entry = priv->tx_head;
1162 for ( ; i > 0; i--) {
1163 entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
1164 desc = priv->dma_tx + entry;
1165 priv->tx_skbuff[entry] = NULL;
1166 dma_unmap_page(priv->device, desc_get_buf_addr(desc),
1167 desc_get_buf_len(desc), DMA_TO_DEVICE);
1168 desc_clear_tx_owner(desc);
1169 }
1170 desc = first;
1171 dma_unmap_single(priv->device, desc_get_buf_addr(desc),
1172 desc_get_buf_len(desc), DMA_TO_DEVICE);
1173 dev_kfree_skb(skb);
1174 return NETDEV_TX_OK;
85c10f28
RH
1175}
1176
1177static int xgmac_rx(struct xgmac_priv *priv, int limit)
1178{
1179 unsigned int entry;
1180 unsigned int count = 0;
1181 struct xgmac_dma_desc *p;
1182
1183 while (count < limit) {
1184 int ip_checksum;
1185 struct sk_buff *skb;
1186 int frame_len;
1187
dc574f1d
RH
1188 if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
1189 break;
1190
85c10f28
RH
1191 entry = priv->rx_tail;
1192 p = priv->dma_rx + entry;
1193 if (desc_get_owner(p))
1194 break;
1195
1196 count++;
1197 priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
1198
1199 /* read the status of the incoming frame */
1200 ip_checksum = desc_get_rx_status(priv, p);
1201 if (ip_checksum < 0)
1202 continue;
1203
1204 skb = priv->rx_skbuff[entry];
1205 if (unlikely(!skb)) {
1206 netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
1207 break;
1208 }
1209 priv->rx_skbuff[entry] = NULL;
1210
1211 frame_len = desc_get_rx_frame_len(p);
1212 netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
1213 frame_len, ip_checksum);
1214
1215 skb_put(skb, frame_len);
1216 dma_unmap_single(priv->device, desc_get_buf_addr(p),
531cda20 1217 priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
85c10f28
RH
1218
1219 skb->protocol = eth_type_trans(skb, priv->dev);
1220 skb->ip_summed = ip_checksum;
1221 if (ip_checksum == CHECKSUM_NONE)
1222 netif_receive_skb(skb);
1223 else
1224 napi_gro_receive(&priv->napi, skb);
1225 }
1226
1227 xgmac_rx_refill(priv);
1228
85c10f28
RH
1229 return count;
1230}
1231
1232/**
1233 * xgmac_poll - xgmac poll method (NAPI)
1234 * @napi : pointer to the napi structure.
1235 * @budget : maximum number of packets that the current CPU can receive from
1236 * all interfaces.
1237 * Description :
1238 * This function implements the the reception process.
1239 * Also it runs the TX completion thread
1240 */
1241static int xgmac_poll(struct napi_struct *napi, int budget)
1242{
1243 struct xgmac_priv *priv = container_of(napi,
1244 struct xgmac_priv, napi);
1245 int work_done = 0;
1246
1247 xgmac_tx_complete(priv);
1248 work_done = xgmac_rx(priv, budget);
1249
1250 if (work_done < budget) {
1251 napi_complete(napi);
0ec6d343 1252 __raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
1253 }
1254 return work_done;
1255}
1256
1257/**
1258 * xgmac_tx_timeout
1259 * @dev : Pointer to net device structure
1260 * Description: this function is called when a packet transmission fails to
1261 * complete within a reasonable tmrate. The driver will mark the error in the
1262 * netdev structure and arrange for the device to be reset to a sane state
1263 * in order to transmit a new packet.
1264 */
1265static void xgmac_tx_timeout(struct net_device *dev)
1266{
1267 struct xgmac_priv *priv = netdev_priv(dev);
8746f671 1268 schedule_work(&priv->tx_timeout_work);
85c10f28
RH
1269}
1270
1271/**
1272 * xgmac_set_rx_mode - entry point for multicast addressing
1273 * @dev : pointer to the device structure
1274 * Description:
1275 * This function is a driver entry point which gets called by the kernel
1276 * whenever multicast addresses must be enabled/disabled.
1277 * Return value:
1278 * void.
1279 */
1280static void xgmac_set_rx_mode(struct net_device *dev)
1281{
1282 int i;
1283 struct xgmac_priv *priv = netdev_priv(dev);
1284 void __iomem *ioaddr = priv->base;
1285 unsigned int value = 0;
1286 u32 hash_filter[XGMAC_NUM_HASH];
1287 int reg = 1;
1288 struct netdev_hw_addr *ha;
1289 bool use_hash = false;
1290
1291 netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
1292 netdev_mc_count(dev), netdev_uc_count(dev));
1293
1294 if (dev->flags & IFF_PROMISC) {
1295 writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
1296 return;
1297 }
1298
1299 memset(hash_filter, 0, sizeof(hash_filter));
1300
1301 if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
1302 use_hash = true;
1303 value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
1304 }
1305 netdev_for_each_uc_addr(ha, dev) {
1306 if (use_hash) {
1307 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1308
1309 /* The most significant 4 bits determine the register to
1310 * use (H/L) while the other 5 bits determine the bit
1311 * within the register. */
1312 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1313 } else {
1314 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1315 reg++;
1316 }
1317 }
1318
1319 if (dev->flags & IFF_ALLMULTI) {
1320 value |= XGMAC_FRAME_FILTER_PM;
1321 goto out;
1322 }
1323
1324 if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
1325 use_hash = true;
1326 value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
2ee68f62
RH
1327 } else {
1328 use_hash = false;
85c10f28
RH
1329 }
1330 netdev_for_each_mc_addr(ha, dev) {
1331 if (use_hash) {
1332 u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
1333
1334 /* The most significant 4 bits determine the register to
1335 * use (H/L) while the other 5 bits determine the bit
1336 * within the register. */
1337 hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1338 } else {
1339 xgmac_set_mac_addr(ioaddr, ha->addr, reg);
1340 reg++;
1341 }
1342 }
1343
1344out:
2ee68f62
RH
1345 for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
1346 xgmac_set_mac_addr(ioaddr, NULL, reg);
85c10f28
RH
1347 for (i = 0; i < XGMAC_NUM_HASH; i++)
1348 writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
1349
1350 writel(value, ioaddr + XGMAC_FRAME_FILTER);
1351}
1352
1353/**
1354 * xgmac_change_mtu - entry point to change MTU size for the device.
1355 * @dev : device pointer.
1356 * @new_mtu : the new MTU size for the device.
1357 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
1358 * to drive packet transmission. Ethernet has an MTU of 1500 octets
1359 * (ETH_DATA_LEN). This value can be changed with ifconfig.
1360 * Return value:
1361 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1362 * file on failure.
1363 */
1364static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
1365{
1366 struct xgmac_priv *priv = netdev_priv(dev);
1367 int old_mtu;
1368
1369 if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
1370 netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
1371 return -EINVAL;
1372 }
1373
1374 old_mtu = dev->mtu;
1375 dev->mtu = new_mtu;
1376
1377 /* return early if the buffer sizes will not change */
1378 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1379 return 0;
1380 if (old_mtu == new_mtu)
1381 return 0;
1382
1383 /* Stop everything, get ready to change the MTU */
1384 if (!netif_running(dev))
1385 return 0;
1386
1387 /* Bring the interface down and then back up */
1388 xgmac_stop(dev);
1389 return xgmac_open(dev);
1390}
1391
1392static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
1393{
1394 u32 intr_status;
1395 struct net_device *dev = (struct net_device *)dev_id;
1396 struct xgmac_priv *priv = netdev_priv(dev);
1397 void __iomem *ioaddr = priv->base;
1398
0ec6d343 1399 intr_status = __raw_readl(ioaddr + XGMAC_INT_STAT);
85c10f28
RH
1400 if (intr_status & XGMAC_INT_STAT_PMT) {
1401 netdev_dbg(priv->dev, "received Magic frame\n");
1402 /* clear the PMT bits 5 and 6 by reading the PMT */
1403 readl(ioaddr + XGMAC_PMT);
1404 }
1405 return IRQ_HANDLED;
1406}
1407
1408static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
1409{
1410 u32 intr_status;
85c10f28
RH
1411 struct net_device *dev = (struct net_device *)dev_id;
1412 struct xgmac_priv *priv = netdev_priv(dev);
1413 struct xgmac_extra_stats *x = &priv->xstats;
1414
1415 /* read the status register (CSR5) */
0ec6d343
RH
1416 intr_status = __raw_readl(priv->base + XGMAC_DMA_STATUS);
1417 intr_status &= __raw_readl(priv->base + XGMAC_DMA_INTR_ENA);
1418 __raw_writel(intr_status, priv->base + XGMAC_DMA_STATUS);
85c10f28
RH
1419
1420 /* It displays the DMA process states (CSR5 register) */
1421 /* ABNORMAL interrupts */
1422 if (unlikely(intr_status & DMA_STATUS_AIS)) {
1423 if (intr_status & DMA_STATUS_TJT) {
1424 netdev_err(priv->dev, "transmit jabber\n");
1425 x->tx_jabber++;
1426 }
1427 if (intr_status & DMA_STATUS_RU)
1428 x->rx_buf_unav++;
1429 if (intr_status & DMA_STATUS_RPS) {
1430 netdev_err(priv->dev, "receive process stopped\n");
1431 x->rx_process_stopped++;
1432 }
1433 if (intr_status & DMA_STATUS_ETI) {
1434 netdev_err(priv->dev, "transmit early interrupt\n");
1435 x->tx_early++;
1436 }
1437 if (intr_status & DMA_STATUS_TPS) {
1438 netdev_err(priv->dev, "transmit process stopped\n");
1439 x->tx_process_stopped++;
8746f671 1440 schedule_work(&priv->tx_timeout_work);
85c10f28
RH
1441 }
1442 if (intr_status & DMA_STATUS_FBI) {
1443 netdev_err(priv->dev, "fatal bus error\n");
1444 x->fatal_bus_error++;
85c10f28 1445 }
85c10f28
RH
1446 }
1447
1448 /* TX/RX NORMAL interrupts */
97a3a9a6 1449 if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU | DMA_STATUS_TI)) {
0ec6d343 1450 __raw_writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
85c10f28
RH
1451 napi_schedule(&priv->napi);
1452 }
1453
1454 return IRQ_HANDLED;
1455}
1456
1457#ifdef CONFIG_NET_POLL_CONTROLLER
1458/* Polling receive - used by NETCONSOLE and other diagnostic tools
1459 * to allow network I/O with interrupts disabled. */
1460static void xgmac_poll_controller(struct net_device *dev)
1461{
1462 disable_irq(dev->irq);
1463 xgmac_interrupt(dev->irq, dev);
1464 enable_irq(dev->irq);
1465}
1466#endif
1467
bd601cc4 1468static struct rtnl_link_stats64 *
85c10f28
RH
1469xgmac_get_stats64(struct net_device *dev,
1470 struct rtnl_link_stats64 *storage)
1471{
1472 struct xgmac_priv *priv = netdev_priv(dev);
1473 void __iomem *base = priv->base;
1474 u32 count;
1475
1476 spin_lock_bh(&priv->stats_lock);
1477 writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
1478
1479 storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
1480 storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
1481
1482 storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
1483 storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
1484 storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
1485 storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
1486 storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
1487
1488 storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
1489 storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
1490
1491 count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
1492 storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
1493 storage->tx_packets = count;
1494 storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
1495
1496 writel(0, base + XGMAC_MMC_CTRL);
1497 spin_unlock_bh(&priv->stats_lock);
1498 return storage;
1499}
1500
1501static int xgmac_set_mac_address(struct net_device *dev, void *p)
1502{
1503 struct xgmac_priv *priv = netdev_priv(dev);
1504 void __iomem *ioaddr = priv->base;
1505 struct sockaddr *addr = p;
1506
1507 if (!is_valid_ether_addr(addr->sa_data))
1508 return -EADDRNOTAVAIL;
1509
1510 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1511
1512 xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
1513
1514 return 0;
1515}
1516
1517static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
1518{
1519 u32 ctrl;
1520 struct xgmac_priv *priv = netdev_priv(dev);
1521 void __iomem *ioaddr = priv->base;
cf62cb72 1522 netdev_features_t changed = dev->features ^ features;
85c10f28
RH
1523
1524 if (!(changed & NETIF_F_RXCSUM))
1525 return 0;
1526
1527 ctrl = readl(ioaddr + XGMAC_CONTROL);
1528 if (features & NETIF_F_RXCSUM)
1529 ctrl |= XGMAC_CONTROL_IPC;
1530 else
1531 ctrl &= ~XGMAC_CONTROL_IPC;
1532 writel(ctrl, ioaddr + XGMAC_CONTROL);
1533
1534 return 0;
1535}
1536
1537static const struct net_device_ops xgmac_netdev_ops = {
1538 .ndo_open = xgmac_open,
1539 .ndo_start_xmit = xgmac_xmit,
1540 .ndo_stop = xgmac_stop,
1541 .ndo_change_mtu = xgmac_change_mtu,
1542 .ndo_set_rx_mode = xgmac_set_rx_mode,
1543 .ndo_tx_timeout = xgmac_tx_timeout,
1544 .ndo_get_stats64 = xgmac_get_stats64,
1545#ifdef CONFIG_NET_POLL_CONTROLLER
1546 .ndo_poll_controller = xgmac_poll_controller,
1547#endif
1548 .ndo_set_mac_address = xgmac_set_mac_address,
1549 .ndo_set_features = xgmac_set_features,
1550};
1551
1552static int xgmac_ethtool_getsettings(struct net_device *dev,
1553 struct ethtool_cmd *cmd)
1554{
1555 cmd->autoneg = 0;
1556 cmd->duplex = DUPLEX_FULL;
1557 ethtool_cmd_speed_set(cmd, 10000);
1558 cmd->supported = 0;
1559 cmd->advertising = 0;
1560 cmd->transceiver = XCVR_INTERNAL;
1561 return 0;
1562}
1563
1564static void xgmac_get_pauseparam(struct net_device *netdev,
1565 struct ethtool_pauseparam *pause)
1566{
1567 struct xgmac_priv *priv = netdev_priv(netdev);
1568
1569 pause->rx_pause = priv->rx_pause;
1570 pause->tx_pause = priv->tx_pause;
1571}
1572
1573static int xgmac_set_pauseparam(struct net_device *netdev,
1574 struct ethtool_pauseparam *pause)
1575{
1576 struct xgmac_priv *priv = netdev_priv(netdev);
1577
1578 if (pause->autoneg)
1579 return -EINVAL;
1580
1581 return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
1582}
1583
1584struct xgmac_stats {
1585 char stat_string[ETH_GSTRING_LEN];
1586 int stat_offset;
1587 bool is_reg;
1588};
1589
1590#define XGMAC_STAT(m) \
1591 { #m, offsetof(struct xgmac_priv, xstats.m), false }
1592#define XGMAC_HW_STAT(m, reg_offset) \
1593 { #m, reg_offset, true }
1594
1595static const struct xgmac_stats xgmac_gstrings_stats[] = {
1596 XGMAC_STAT(tx_frame_flushed),
1597 XGMAC_STAT(tx_payload_error),
1598 XGMAC_STAT(tx_ip_header_error),
1599 XGMAC_STAT(tx_local_fault),
1600 XGMAC_STAT(tx_remote_fault),
1601 XGMAC_STAT(tx_early),
1602 XGMAC_STAT(tx_process_stopped),
1603 XGMAC_STAT(tx_jabber),
1604 XGMAC_STAT(rx_buf_unav),
1605 XGMAC_STAT(rx_process_stopped),
1606 XGMAC_STAT(rx_payload_error),
1607 XGMAC_STAT(rx_ip_header_error),
1608 XGMAC_STAT(rx_da_filter_fail),
85c10f28
RH
1609 XGMAC_STAT(fatal_bus_error),
1610 XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
1611 XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
1612 XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
1613 XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
1614 XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
1615};
1616#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
1617
1618static void xgmac_get_ethtool_stats(struct net_device *dev,
1619 struct ethtool_stats *dummy,
1620 u64 *data)
1621{
1622 struct xgmac_priv *priv = netdev_priv(dev);
1623 void *p = priv;
1624 int i;
1625
1626 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1627 if (xgmac_gstrings_stats[i].is_reg)
1628 *data++ = readl(priv->base +
1629 xgmac_gstrings_stats[i].stat_offset);
1630 else
1631 *data++ = *(u32 *)(p +
1632 xgmac_gstrings_stats[i].stat_offset);
1633 }
1634}
1635
1636static int xgmac_get_sset_count(struct net_device *netdev, int sset)
1637{
1638 switch (sset) {
1639 case ETH_SS_STATS:
1640 return XGMAC_STATS_LEN;
1641 default:
1642 return -EINVAL;
1643 }
1644}
1645
1646static void xgmac_get_strings(struct net_device *dev, u32 stringset,
1647 u8 *data)
1648{
1649 int i;
1650 u8 *p = data;
1651
1652 switch (stringset) {
1653 case ETH_SS_STATS:
1654 for (i = 0; i < XGMAC_STATS_LEN; i++) {
1655 memcpy(p, xgmac_gstrings_stats[i].stat_string,
1656 ETH_GSTRING_LEN);
1657 p += ETH_GSTRING_LEN;
1658 }
1659 break;
1660 default:
1661 WARN_ON(1);
1662 break;
1663 }
1664}
1665
1666static void xgmac_get_wol(struct net_device *dev,
1667 struct ethtool_wolinfo *wol)
1668{
1669 struct xgmac_priv *priv = netdev_priv(dev);
1670
1671 if (device_can_wakeup(priv->device)) {
1672 wol->supported = WAKE_MAGIC | WAKE_UCAST;
1673 wol->wolopts = priv->wolopts;
1674 }
1675}
1676
1677static int xgmac_set_wol(struct net_device *dev,
1678 struct ethtool_wolinfo *wol)
1679{
1680 struct xgmac_priv *priv = netdev_priv(dev);
1681 u32 support = WAKE_MAGIC | WAKE_UCAST;
1682
1683 if (!device_can_wakeup(priv->device))
1684 return -ENOTSUPP;
1685
1686 if (wol->wolopts & ~support)
1687 return -EINVAL;
1688
1689 priv->wolopts = wol->wolopts;
1690
1691 if (wol->wolopts) {
1692 device_set_wakeup_enable(priv->device, 1);
1693 enable_irq_wake(dev->irq);
1694 } else {
1695 device_set_wakeup_enable(priv->device, 0);
1696 disable_irq_wake(dev->irq);
1697 }
1698
1699 return 0;
1700}
1701
bd601cc4 1702static const struct ethtool_ops xgmac_ethtool_ops = {
85c10f28
RH
1703 .get_settings = xgmac_ethtool_getsettings,
1704 .get_link = ethtool_op_get_link,
1705 .get_pauseparam = xgmac_get_pauseparam,
1706 .set_pauseparam = xgmac_set_pauseparam,
1707 .get_ethtool_stats = xgmac_get_ethtool_stats,
1708 .get_strings = xgmac_get_strings,
1709 .get_wol = xgmac_get_wol,
1710 .set_wol = xgmac_set_wol,
1711 .get_sset_count = xgmac_get_sset_count,
1712};
1713
1714/**
1715 * xgmac_probe
1716 * @pdev: platform device pointer
1717 * Description: the driver is initialized through platform_device.
1718 */
1719static int xgmac_probe(struct platform_device *pdev)
1720{
1721 int ret = 0;
1722 struct resource *res;
1723 struct net_device *ndev = NULL;
1724 struct xgmac_priv *priv = NULL;
1725 u32 uid;
1726
1727 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1728 if (!res)
1729 return -ENODEV;
1730
1731 if (!request_mem_region(res->start, resource_size(res), pdev->name))
1732 return -EBUSY;
1733
1734 ndev = alloc_etherdev(sizeof(struct xgmac_priv));
1735 if (!ndev) {
1736 ret = -ENOMEM;
1737 goto err_alloc;
1738 }
1739
1740 SET_NETDEV_DEV(ndev, &pdev->dev);
1741 priv = netdev_priv(ndev);
1742 platform_set_drvdata(pdev, ndev);
1743 ether_setup(ndev);
1744 ndev->netdev_ops = &xgmac_netdev_ops;
1745 SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
1746 spin_lock_init(&priv->stats_lock);
8746f671 1747 INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
85c10f28
RH
1748
1749 priv->device = &pdev->dev;
1750 priv->dev = ndev;
1751 priv->rx_pause = 1;
1752 priv->tx_pause = 1;
1753
1754 priv->base = ioremap(res->start, resource_size(res));
1755 if (!priv->base) {
1756 netdev_err(ndev, "ioremap failed\n");
1757 ret = -ENOMEM;
1758 goto err_io;
1759 }
1760
1761 uid = readl(priv->base + XGMAC_VERSION);
1762 netdev_info(ndev, "h/w version is 0x%x\n", uid);
1763
1764 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1765 ndev->irq = platform_get_irq(pdev, 0);
1766 if (ndev->irq == -ENXIO) {
1767 netdev_err(ndev, "No irq resource\n");
1768 ret = ndev->irq;
1769 goto err_irq;
1770 }
1771
1772 ret = request_irq(ndev->irq, xgmac_interrupt, 0,
1773 dev_name(&pdev->dev), ndev);
1774 if (ret < 0) {
1775 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1776 ndev->irq, ret);
1777 goto err_irq;
1778 }
1779
1780 priv->pmt_irq = platform_get_irq(pdev, 1);
1781 if (priv->pmt_irq == -ENXIO) {
1782 netdev_err(ndev, "No pmt irq resource\n");
1783 ret = priv->pmt_irq;
1784 goto err_pmt_irq;
1785 }
1786
1787 ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
1788 dev_name(&pdev->dev), ndev);
1789 if (ret < 0) {
1790 netdev_err(ndev, "Could not request irq %d - ret %d)\n",
1791 priv->pmt_irq, ret);
1792 goto err_pmt_irq;
1793 }
1794
1795 device_set_wakeup_capable(&pdev->dev, 1);
1796 if (device_can_wakeup(priv->device))
1797 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1798
50ae3c22 1799 ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
85c10f28
RH
1800 if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
1801 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1802 NETIF_F_RXCSUM;
1803 ndev->features |= ndev->hw_features;
1804 ndev->priv_flags |= IFF_UNICAST_FLT;
1805
1806 /* Get the MAC address */
1807 xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
1808 if (!is_valid_ether_addr(ndev->dev_addr))
1809 netdev_warn(ndev, "MAC address %pM not valid",
1810 ndev->dev_addr);
1811
1812 netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
1813 ret = register_netdev(ndev);
1814 if (ret)
1815 goto err_reg;
1816
1817 return 0;
1818
1819err_reg:
1820 netif_napi_del(&priv->napi);
1821 free_irq(priv->pmt_irq, ndev);
1822err_pmt_irq:
1823 free_irq(ndev->irq, ndev);
1824err_irq:
1825 iounmap(priv->base);
1826err_io:
1827 free_netdev(ndev);
1828err_alloc:
1829 release_mem_region(res->start, resource_size(res));
85c10f28
RH
1830 return ret;
1831}
1832
1833/**
1834 * xgmac_dvr_remove
1835 * @pdev: platform device pointer
1836 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
1837 * changes the link status, releases the DMA descriptor rings,
1838 * unregisters the MDIO bus and unmaps the allocated memory.
1839 */
1840static int xgmac_remove(struct platform_device *pdev)
1841{
1842 struct net_device *ndev = platform_get_drvdata(pdev);
1843 struct xgmac_priv *priv = netdev_priv(ndev);
1844 struct resource *res;
1845
1846 xgmac_mac_disable(priv->base);
1847
1848 /* Free the IRQ lines */
1849 free_irq(ndev->irq, ndev);
1850 free_irq(priv->pmt_irq, ndev);
1851
85c10f28
RH
1852 unregister_netdev(ndev);
1853 netif_napi_del(&priv->napi);
1854
1855 iounmap(priv->base);
1856 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1857 release_mem_region(res->start, resource_size(res));
1858
1859 free_netdev(ndev);
1860
1861 return 0;
1862}
1863
1864#ifdef CONFIG_PM_SLEEP
1865static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
1866{
1867 unsigned int pmt = 0;
1868
1869 if (mode & WAKE_MAGIC)
e6c3827d 1870 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
85c10f28
RH
1871 if (mode & WAKE_UCAST)
1872 pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
1873
1874 writel(pmt, ioaddr + XGMAC_PMT);
1875}
1876
1877static int xgmac_suspend(struct device *dev)
1878{
1879 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1880 struct xgmac_priv *priv = netdev_priv(ndev);
1881 u32 value;
1882
1883 if (!ndev || !netif_running(ndev))
1884 return 0;
1885
1886 netif_device_detach(ndev);
1887 napi_disable(&priv->napi);
1888 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1889
1890 if (device_may_wakeup(priv->device)) {
1891 /* Stop TX/RX DMA Only */
1892 value = readl(priv->base + XGMAC_DMA_CONTROL);
1893 value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
1894 writel(value, priv->base + XGMAC_DMA_CONTROL);
1895
1896 xgmac_pmt(priv->base, priv->wolopts);
1897 } else
1898 xgmac_mac_disable(priv->base);
1899
1900 return 0;
1901}
1902
1903static int xgmac_resume(struct device *dev)
1904{
1905 struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
1906 struct xgmac_priv *priv = netdev_priv(ndev);
1907 void __iomem *ioaddr = priv->base;
1908
1909 if (!netif_running(ndev))
1910 return 0;
1911
1912 xgmac_pmt(ioaddr, 0);
1913
1914 /* Enable the MAC and DMA */
1915 xgmac_mac_enable(ioaddr);
1916 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
1917 writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
1918
1919 netif_device_attach(ndev);
1920 napi_enable(&priv->napi);
1921
1922 return 0;
1923}
c132cf56 1924#endif /* CONFIG_PM_SLEEP */
85c10f28
RH
1925
1926static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
85c10f28
RH
1927
1928static const struct of_device_id xgmac_of_match[] = {
1929 { .compatible = "calxeda,hb-xgmac", },
1930 {},
1931};
1932MODULE_DEVICE_TABLE(of, xgmac_of_match);
1933
1934static struct platform_driver xgmac_driver = {
1935 .driver = {
1936 .name = "calxedaxgmac",
1937 .of_match_table = xgmac_of_match,
1938 },
1939 .probe = xgmac_probe,
1940 .remove = xgmac_remove,
c132cf56 1941 .driver.pm = &xgmac_pm_ops,
85c10f28
RH
1942};
1943
1944module_platform_driver(xgmac_driver);
1945
1946MODULE_AUTHOR("Calxeda, Inc.");
1947MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
1948MODULE_LICENSE("GPL v2");