]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __MV643XX_ETH_H__ |
2 | #define __MV643XX_ETH_H__ | |
3 | ||
1da177e4 LT |
4 | #include <linux/module.h> |
5 | #include <linux/kernel.h> | |
6 | #include <linux/spinlock.h> | |
7 | #include <linux/workqueue.h> | |
c28a4f89 | 8 | #include <linux/mii.h> |
1da177e4 LT |
9 | |
10 | #include <linux/mv643xx.h> | |
11 | ||
471a5671 DJ |
12 | #include <asm/dma-mapping.h> |
13 | ||
1da177e4 LT |
14 | /* Checksum offload for Tx works for most packets, but |
15 | * fails if previous packet sent did not use hw csum | |
16 | */ | |
26006360 | 17 | #define MV643XX_CHECKSUM_OFFLOAD_TX |
1da177e4 LT |
18 | #define MV643XX_NAPI |
19 | #define MV643XX_TX_FAST_REFILL | |
1da177e4 LT |
20 | #undef MV643XX_COAL |
21 | ||
22 | /* | |
23 | * Number of RX / TX descriptors on RX / TX rings. | |
24 | * Note that allocating RX descriptors is done by allocating the RX | |
25 | * ring AND a preallocated RX buffers (skb's) for each descriptor. | |
26 | * The TX descriptors only allocates the TX descriptors ring, | |
27 | * with no pre allocated TX buffers (skb's are allocated by higher layers. | |
28 | */ | |
29 | ||
30 | /* Default TX ring size is 1000 descriptors */ | |
31 | #define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000 | |
32 | ||
33 | /* Default RX ring size is 400 descriptors */ | |
34 | #define MV643XX_DEFAULT_RX_QUEUE_SIZE 400 | |
35 | ||
36 | #define MV643XX_TX_COAL 100 | |
37 | #ifdef MV643XX_COAL | |
38 | #define MV643XX_RX_COAL 100 | |
39 | #endif | |
40 | ||
7303fde8 DF |
41 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX |
42 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | |
43 | #else | |
44 | #define MAX_DESCS_PER_SKB 1 | |
45 | #endif | |
1da177e4 | 46 | |
7303fde8 DF |
47 | #define ETH_VLAN_HLEN 4 |
48 | #define ETH_FCS_LEN 4 | |
6f059c3e | 49 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ |
7303fde8 | 50 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ |
6f059c3e | 51 | ETH_VLAN_HLEN + ETH_FCS_LEN) |
471a5671 | 52 | #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + dma_get_cache_alignment()) |
7303fde8 DF |
53 | |
54 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ | |
55 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ | |
56 | ||
57 | #define ETH_INT_CAUSE_RX_DONE (ETH_RX_QUEUES_ENABLED << 2) | |
58 | #define ETH_INT_CAUSE_RX_ERROR (ETH_RX_QUEUES_ENABLED << 9) | |
59 | #define ETH_INT_CAUSE_RX (ETH_INT_CAUSE_RX_DONE | ETH_INT_CAUSE_RX_ERROR) | |
60 | #define ETH_INT_CAUSE_EXT 0x00000002 | |
61 | #define ETH_INT_UNMASK_ALL (ETH_INT_CAUSE_RX | ETH_INT_CAUSE_EXT) | |
62 | ||
63 | #define ETH_INT_CAUSE_TX_DONE (ETH_TX_QUEUES_ENABLED << 0) | |
64 | #define ETH_INT_CAUSE_TX_ERROR (ETH_TX_QUEUES_ENABLED << 8) | |
65 | #define ETH_INT_CAUSE_TX (ETH_INT_CAUSE_TX_DONE | ETH_INT_CAUSE_TX_ERROR) | |
66 | #define ETH_INT_CAUSE_PHY 0x00010000 | |
2bcff60f DF |
67 | #define ETH_INT_CAUSE_STATE 0x00100000 |
68 | #define ETH_INT_UNMASK_ALL_EXT (ETH_INT_CAUSE_TX | ETH_INT_CAUSE_PHY | \ | |
69 | ETH_INT_CAUSE_STATE) | |
7303fde8 DF |
70 | |
71 | #define ETH_INT_MASK_ALL 0x00000000 | |
72 | #define ETH_INT_MASK_ALL_EXT 0x00000000 | |
73 | ||
74 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | |
75 | #define PHY_WAIT_MICRO_SECONDS 10 | |
1da177e4 | 76 | |
1da177e4 LT |
77 | /* Buffer offset from buffer pointer */ |
78 | #define RX_BUF_OFFSET 0x2 | |
79 | ||
80 | /* Gigabit Ethernet Unit Global Registers */ | |
81 | ||
82 | /* MIB Counters register definitions */ | |
83 | #define ETH_MIB_GOOD_OCTETS_RECEIVED_LOW 0x0 | |
84 | #define ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH 0x4 | |
85 | #define ETH_MIB_BAD_OCTETS_RECEIVED 0x8 | |
86 | #define ETH_MIB_INTERNAL_MAC_TRANSMIT_ERR 0xc | |
87 | #define ETH_MIB_GOOD_FRAMES_RECEIVED 0x10 | |
88 | #define ETH_MIB_BAD_FRAMES_RECEIVED 0x14 | |
89 | #define ETH_MIB_BROADCAST_FRAMES_RECEIVED 0x18 | |
90 | #define ETH_MIB_MULTICAST_FRAMES_RECEIVED 0x1c | |
91 | #define ETH_MIB_FRAMES_64_OCTETS 0x20 | |
92 | #define ETH_MIB_FRAMES_65_TO_127_OCTETS 0x24 | |
93 | #define ETH_MIB_FRAMES_128_TO_255_OCTETS 0x28 | |
94 | #define ETH_MIB_FRAMES_256_TO_511_OCTETS 0x2c | |
95 | #define ETH_MIB_FRAMES_512_TO_1023_OCTETS 0x30 | |
96 | #define ETH_MIB_FRAMES_1024_TO_MAX_OCTETS 0x34 | |
97 | #define ETH_MIB_GOOD_OCTETS_SENT_LOW 0x38 | |
98 | #define ETH_MIB_GOOD_OCTETS_SENT_HIGH 0x3c | |
99 | #define ETH_MIB_GOOD_FRAMES_SENT 0x40 | |
100 | #define ETH_MIB_EXCESSIVE_COLLISION 0x44 | |
101 | #define ETH_MIB_MULTICAST_FRAMES_SENT 0x48 | |
102 | #define ETH_MIB_BROADCAST_FRAMES_SENT 0x4c | |
103 | #define ETH_MIB_UNREC_MAC_CONTROL_RECEIVED 0x50 | |
104 | #define ETH_MIB_FC_SENT 0x54 | |
105 | #define ETH_MIB_GOOD_FC_RECEIVED 0x58 | |
106 | #define ETH_MIB_BAD_FC_RECEIVED 0x5c | |
107 | #define ETH_MIB_UNDERSIZE_RECEIVED 0x60 | |
108 | #define ETH_MIB_FRAGMENTS_RECEIVED 0x64 | |
109 | #define ETH_MIB_OVERSIZE_RECEIVED 0x68 | |
110 | #define ETH_MIB_JABBER_RECEIVED 0x6c | |
111 | #define ETH_MIB_MAC_RECEIVE_ERROR 0x70 | |
112 | #define ETH_MIB_BAD_CRC_EVENT 0x74 | |
113 | #define ETH_MIB_COLLISION 0x78 | |
114 | #define ETH_MIB_LATE_COLLISION 0x7c | |
115 | ||
116 | /* Port serial status reg (PSR) */ | |
ebe19a4e DF |
117 | #define ETH_INTERFACE_PCM 0x00000001 |
118 | #define ETH_LINK_IS_UP 0x00000002 | |
119 | #define ETH_PORT_AT_FULL_DUPLEX 0x00000004 | |
120 | #define ETH_RX_FLOW_CTRL_ENABLED 0x00000008 | |
121 | #define ETH_GMII_SPEED_1000 0x00000010 | |
122 | #define ETH_MII_SPEED_100 0x00000020 | |
123 | #define ETH_TX_IN_PROGRESS 0x00000080 | |
124 | #define ETH_BYPASS_ACTIVE 0x00000100 | |
125 | #define ETH_PORT_AT_PARTITION_STATE 0x00000200 | |
126 | #define ETH_PORT_TX_FIFO_EMPTY 0x00000400 | |
1da177e4 LT |
127 | |
128 | /* SMI reg */ | |
ebe19a4e DF |
129 | #define ETH_SMI_BUSY 0x10000000 /* 0 - Write, 1 - Read */ |
130 | #define ETH_SMI_READ_VALID 0x08000000 /* 0 - Write, 1 - Read */ | |
131 | #define ETH_SMI_OPCODE_WRITE 0 /* Completion of Read */ | |
132 | #define ETH_SMI_OPCODE_READ 0x04000000 /* Operation is in progress */ | |
133 | ||
134 | /* Interrupt Cause Register Bit Definitions */ | |
1da177e4 LT |
135 | |
136 | /* SDMA command status fields macros */ | |
137 | ||
138 | /* Tx & Rx descriptors status */ | |
ebe19a4e | 139 | #define ETH_ERROR_SUMMARY 0x00000001 |
1da177e4 LT |
140 | |
141 | /* Tx & Rx descriptors command */ | |
ebe19a4e | 142 | #define ETH_BUFFER_OWNED_BY_DMA 0x80000000 |
1da177e4 LT |
143 | |
144 | /* Tx descriptors status */ | |
ebe19a4e DF |
145 | #define ETH_LC_ERROR 0 |
146 | #define ETH_UR_ERROR 0x00000002 | |
147 | #define ETH_RL_ERROR 0x00000004 | |
148 | #define ETH_LLC_SNAP_FORMAT 0x00000200 | |
1da177e4 LT |
149 | |
150 | /* Rx descriptors status */ | |
ebe19a4e DF |
151 | #define ETH_OVERRUN_ERROR 0x00000002 |
152 | #define ETH_MAX_FRAME_LENGTH_ERROR 0x00000004 | |
153 | #define ETH_RESOURCE_ERROR 0x00000006 | |
154 | #define ETH_VLAN_TAGGED 0x00080000 | |
155 | #define ETH_BPDU_FRAME 0x00100000 | |
156 | #define ETH_UDP_FRAME_OVER_IP_V_4 0x00200000 | |
157 | #define ETH_OTHER_FRAME_TYPE 0x00400000 | |
158 | #define ETH_LAYER_2_IS_ETH_V_2 0x00800000 | |
159 | #define ETH_FRAME_TYPE_IP_V_4 0x01000000 | |
160 | #define ETH_FRAME_HEADER_OK 0x02000000 | |
161 | #define ETH_RX_LAST_DESC 0x04000000 | |
162 | #define ETH_RX_FIRST_DESC 0x08000000 | |
163 | #define ETH_UNKNOWN_DESTINATION_ADDR 0x10000000 | |
164 | #define ETH_RX_ENABLE_INTERRUPT 0x20000000 | |
165 | #define ETH_LAYER_4_CHECKSUM_OK 0x40000000 | |
1da177e4 LT |
166 | |
167 | /* Rx descriptors byte count */ | |
ebe19a4e | 168 | #define ETH_FRAME_FRAGMENTED 0x00000004 |
1da177e4 LT |
169 | |
170 | /* Tx descriptors command */ | |
ebe19a4e DF |
171 | #define ETH_LAYER_4_CHECKSUM_FIRST_DESC 0x00000400 |
172 | #define ETH_FRAME_SET_TO_VLAN 0x00008000 | |
173 | #define ETH_UDP_FRAME 0x00010000 | |
174 | #define ETH_GEN_TCP_UDP_CHECKSUM 0x00020000 | |
175 | #define ETH_GEN_IP_V_4_CHECKSUM 0x00040000 | |
176 | #define ETH_ZERO_PADDING 0x00080000 | |
177 | #define ETH_TX_LAST_DESC 0x00100000 | |
178 | #define ETH_TX_FIRST_DESC 0x00200000 | |
179 | #define ETH_GEN_CRC 0x00400000 | |
180 | #define ETH_TX_ENABLE_INTERRUPT 0x00800000 | |
181 | #define ETH_AUTO_MODE 0x40000000 | |
1da177e4 | 182 | |
26006360 DF |
183 | #define ETH_TX_IHL_SHIFT 11 |
184 | ||
1da177e4 LT |
185 | /* typedefs */ |
186 | ||
187 | typedef enum _eth_func_ret_status { | |
188 | ETH_OK, /* Returned as expected. */ | |
189 | ETH_ERROR, /* Fundamental error. */ | |
190 | ETH_RETRY, /* Could not process request. Try later.*/ | |
191 | ETH_END_OF_JOB, /* Ring has nothing to process. */ | |
192 | ETH_QUEUE_FULL, /* Ring resource error. */ | |
193 | ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ | |
194 | } ETH_FUNC_RET_STATUS; | |
195 | ||
196 | typedef enum _eth_target { | |
197 | ETH_TARGET_DRAM, | |
198 | ETH_TARGET_DEVICE, | |
199 | ETH_TARGET_CBS, | |
200 | ETH_TARGET_PCI0, | |
201 | ETH_TARGET_PCI1 | |
202 | } ETH_TARGET; | |
203 | ||
204 | /* These are for big-endian machines. Little endian needs different | |
205 | * definitions. | |
206 | */ | |
207 | #if defined(__BIG_ENDIAN) | |
208 | struct eth_rx_desc { | |
209 | u16 byte_cnt; /* Descriptor buffer byte count */ | |
210 | u16 buf_size; /* Buffer size */ | |
211 | u32 cmd_sts; /* Descriptor command status */ | |
212 | u32 next_desc_ptr; /* Next descriptor pointer */ | |
213 | u32 buf_ptr; /* Descriptor buffer pointer */ | |
214 | }; | |
215 | ||
216 | struct eth_tx_desc { | |
217 | u16 byte_cnt; /* buffer byte count */ | |
218 | u16 l4i_chk; /* CPU provided TCP checksum */ | |
219 | u32 cmd_sts; /* Command/status field */ | |
220 | u32 next_desc_ptr; /* Pointer to next descriptor */ | |
221 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | |
222 | }; | |
223 | ||
224 | #elif defined(__LITTLE_ENDIAN) | |
225 | struct eth_rx_desc { | |
226 | u32 cmd_sts; /* Descriptor command status */ | |
227 | u16 buf_size; /* Buffer size */ | |
228 | u16 byte_cnt; /* Descriptor buffer byte count */ | |
229 | u32 buf_ptr; /* Descriptor buffer pointer */ | |
230 | u32 next_desc_ptr; /* Next descriptor pointer */ | |
231 | }; | |
232 | ||
233 | struct eth_tx_desc { | |
234 | u32 cmd_sts; /* Command/status field */ | |
235 | u16 l4i_chk; /* CPU provided TCP checksum */ | |
236 | u16 byte_cnt; /* buffer byte count */ | |
237 | u32 buf_ptr; /* pointer to buffer for this descriptor*/ | |
238 | u32 next_desc_ptr; /* Pointer to next descriptor */ | |
239 | }; | |
240 | #else | |
241 | #error One of __BIG_ENDIAN or __LITTLE_ENDIAN must be defined | |
242 | #endif | |
243 | ||
244 | /* Unified struct for Rx and Tx operations. The user is not required to */ | |
245 | /* be familier with neither Tx nor Rx descriptors. */ | |
246 | struct pkt_info { | |
247 | unsigned short byte_cnt; /* Descriptor buffer byte count */ | |
248 | unsigned short l4i_chk; /* Tx CPU provided TCP Checksum */ | |
249 | unsigned int cmd_sts; /* Descriptor command status */ | |
250 | dma_addr_t buf_ptr; /* Descriptor buffer pointer */ | |
251 | struct sk_buff *return_info; /* User resource return information */ | |
252 | }; | |
253 | ||
47bdd718 | 254 | /* Ethernet port specific information */ |
1da177e4 LT |
255 | |
256 | struct mv643xx_mib_counters { | |
257 | u64 good_octets_received; | |
258 | u32 bad_octets_received; | |
259 | u32 internal_mac_transmit_err; | |
260 | u32 good_frames_received; | |
261 | u32 bad_frames_received; | |
262 | u32 broadcast_frames_received; | |
263 | u32 multicast_frames_received; | |
264 | u32 frames_64_octets; | |
265 | u32 frames_65_to_127_octets; | |
266 | u32 frames_128_to_255_octets; | |
267 | u32 frames_256_to_511_octets; | |
268 | u32 frames_512_to_1023_octets; | |
269 | u32 frames_1024_to_max_octets; | |
270 | u64 good_octets_sent; | |
271 | u32 good_frames_sent; | |
272 | u32 excessive_collision; | |
273 | u32 multicast_frames_sent; | |
274 | u32 broadcast_frames_sent; | |
275 | u32 unrec_mac_control_received; | |
276 | u32 fc_sent; | |
277 | u32 good_fc_received; | |
278 | u32 bad_fc_received; | |
279 | u32 undersize_received; | |
280 | u32 fragments_received; | |
281 | u32 oversize_received; | |
282 | u32 jabber_received; | |
283 | u32 mac_receive_error; | |
284 | u32 bad_crc_event; | |
285 | u32 collision; | |
286 | u32 late_collision; | |
287 | }; | |
288 | ||
289 | struct mv643xx_private { | |
290 | int port_num; /* User Ethernet port number */ | |
1da177e4 LT |
291 | |
292 | u32 rx_sram_addr; /* Base address of rx sram area */ | |
293 | u32 rx_sram_size; /* Size of rx sram area */ | |
294 | u32 tx_sram_addr; /* Base address of tx sram area */ | |
295 | u32 tx_sram_size; /* Size of tx sram area */ | |
296 | ||
297 | int rx_resource_err; /* Rx ring resource error flag */ | |
1da177e4 LT |
298 | |
299 | /* Tx/Rx rings managment indexes fields. For driver use */ | |
300 | ||
301 | /* Next available and first returning Rx resource */ | |
302 | int rx_curr_desc_q, rx_used_desc_q; | |
303 | ||
304 | /* Next available and first returning Tx resource */ | |
305 | int tx_curr_desc_q, tx_used_desc_q; | |
1da177e4 LT |
306 | |
307 | #ifdef MV643XX_TX_FAST_REFILL | |
308 | u32 tx_clean_threshold; | |
309 | #endif | |
310 | ||
311 | struct eth_rx_desc *p_rx_desc_area; | |
312 | dma_addr_t rx_desc_dma; | |
c8aaea25 | 313 | int rx_desc_area_size; |
1da177e4 LT |
314 | struct sk_buff **rx_skb; |
315 | ||
316 | struct eth_tx_desc *p_tx_desc_area; | |
317 | dma_addr_t tx_desc_dma; | |
c8aaea25 | 318 | int tx_desc_area_size; |
1da177e4 LT |
319 | struct sk_buff **tx_skb; |
320 | ||
321 | struct work_struct tx_timeout_task; | |
322 | ||
bea3348e SH |
323 | struct net_device *dev; |
324 | struct napi_struct napi; | |
1da177e4 LT |
325 | struct net_device_stats stats; |
326 | struct mv643xx_mib_counters mib_counters; | |
327 | spinlock_t lock; | |
328 | /* Size of Tx Ring per queue */ | |
c8aaea25 | 329 | int tx_ring_size; |
f98e36f1 | 330 | /* Number of tx descriptors in use */ |
c8aaea25 | 331 | int tx_desc_count; |
1da177e4 | 332 | /* Size of Rx Ring per queue */ |
c8aaea25 | 333 | int rx_ring_size; |
f98e36f1 | 334 | /* Number of rx descriptors in use */ |
c8aaea25 | 335 | int rx_desc_count; |
1da177e4 | 336 | |
1da177e4 LT |
337 | /* |
338 | * Used in case RX Ring is empty, which can be caused when | |
339 | * system does not have resources (skb's) | |
340 | */ | |
341 | struct timer_list timeout; | |
1da177e4 LT |
342 | |
343 | u32 rx_int_coal; | |
344 | u32 tx_int_coal; | |
c28a4f89 | 345 | struct mii_if_info mii; |
1da177e4 LT |
346 | }; |
347 | ||
1da177e4 LT |
348 | /* Port operation control routines */ |
349 | static void eth_port_init(struct mv643xx_private *mp); | |
350 | static void eth_port_reset(unsigned int eth_port_num); | |
ed9b5d45 | 351 | static void eth_port_start(struct net_device *dev); |
1da177e4 | 352 | |
1da177e4 LT |
353 | /* PHY and MIB routines */ |
354 | static void ethernet_phy_reset(unsigned int eth_port_num); | |
355 | ||
356 | static void eth_port_write_smi_reg(unsigned int eth_port_num, | |
357 | unsigned int phy_reg, unsigned int value); | |
358 | ||
359 | static void eth_port_read_smi_reg(unsigned int eth_port_num, | |
360 | unsigned int phy_reg, unsigned int *value); | |
361 | ||
362 | static void eth_clear_mib_counters(unsigned int eth_port_num); | |
363 | ||
364 | /* Port data flow control routines */ | |
1da177e4 LT |
365 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, |
366 | struct pkt_info *p_pkt_info); | |
367 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | |
368 | struct pkt_info *p_pkt_info); | |
369 | ||
370 | #endif /* __MV643XX_ETH_H__ */ |