]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/net/s2io.c
S2io: Fix IOMMU overflow checking.
[mirror_ubuntu-bionic-kernel.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
0c61ed5f 3 * Copyright(c) 2002-2007 Neterion Inc.
1da177e4
LT
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
43b7c451 41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
9dc737a7
AR
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
926930b2
SS
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
3a3d5756
SH
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
55 ************************************************************************/
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
66#include <linux/skbuff.h>
67#include <linux/init.h>
68#include <linux/delay.h>
69#include <linux/stddef.h>
70#include <linux/ioctl.h>
71#include <linux/timex.h>
1da177e4 72#include <linux/ethtool.h>
1da177e4 73#include <linux/workqueue.h>
be3a6b02 74#include <linux/if_vlan.h>
7d3d0439
RA
75#include <linux/ip.h>
76#include <linux/tcp.h>
77#include <net/tcp.h>
1da177e4 78
1da177e4
LT
79#include <asm/system.h>
80#include <asm/uaccess.h>
20346722 81#include <asm/io.h>
fe931395 82#include <asm/div64.h>
330ce0de 83#include <asm/irq.h>
1da177e4
LT
84
85/* local include */
86#include "s2io.h"
87#include "s2io-regs.h"
88
0b5923cd 89#define DRV_VERSION "2.0.26.24"
6c1792f4 90
1da177e4 91/* S2io Driver name & version. */
20346722 92static char s2io_driver_name[] = "Neterion";
6c1792f4 93static char s2io_driver_version[] = DRV_VERSION;
1da177e4 94
6d517a27
VP
95static int rxd_size[2] = {32,48};
96static int rxd_count[2] = {127,85};
da6971d8 97
1ee6dd77 98static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
99{
100 int ret;
101
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105 return ret;
106}
107
20346722 108/*
1da177e4
LT
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
112 */
541ae68f
K
113#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
117
118#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
1da177e4 120
92b84437
SS
121static inline int is_s2io_card_up(const struct s2io_nic * sp)
122{
123 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
124}
125
1da177e4
LT
126/* Ethtool related variables and Macros. */
127static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
133};
134
fa1f0cb3 135static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
136 {"tmac_frms"},
137 {"tmac_data_octets"},
138 {"tmac_drop_frms"},
139 {"tmac_mcst_frms"},
140 {"tmac_bcst_frms"},
141 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
142 {"tmac_ttl_octets"},
143 {"tmac_ucst_frms"},
144 {"tmac_nucst_frms"},
1da177e4 145 {"tmac_any_err_frms"},
bd1034f0 146 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
147 {"tmac_vld_ip_octets"},
148 {"tmac_vld_ip"},
149 {"tmac_drop_ip"},
150 {"tmac_icmp"},
151 {"tmac_rst_tcp"},
152 {"tmac_tcp"},
153 {"tmac_udp"},
154 {"rmac_vld_frms"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
157 {"rmac_drop_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
bd1034f0 161 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
162 {"rmac_long_frms"},
163 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
164 {"rmac_unsup_ctrl_frms"},
165 {"rmac_ttl_octets"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
1da177e4 168 {"rmac_discarded_frms"},
bd1034f0
AR
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
171 {"rmac_ttl_frms"},
1da177e4
LT
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
174 {"rmac_frag_frms"},
175 {"rmac_jabber_frms"},
bd1034f0
AR
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
182 {"rmac_ip"},
183 {"rmac_ip_octets"},
184 {"rmac_hdr_err_ip"},
185 {"rmac_drop_ip"},
186 {"rmac_icmp"},
187 {"rmac_tcp"},
188 {"rmac_udp"},
189 {"rmac_err_drp_udp"},
bd1034f0
AR
190 {"rmac_xgmii_err_sym"},
191 {"rmac_frms_q0"},
192 {"rmac_frms_q1"},
193 {"rmac_frms_q2"},
194 {"rmac_frms_q3"},
195 {"rmac_frms_q4"},
196 {"rmac_frms_q5"},
197 {"rmac_frms_q6"},
198 {"rmac_frms_q7"},
199 {"rmac_full_q0"},
200 {"rmac_full_q1"},
201 {"rmac_full_q2"},
202 {"rmac_full_q3"},
203 {"rmac_full_q4"},
204 {"rmac_full_q5"},
205 {"rmac_full_q6"},
206 {"rmac_full_q7"},
1da177e4 207 {"rmac_pause_cnt"},
bd1034f0
AR
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
210 {"rmac_accepted_ip"},
211 {"rmac_err_tcp"},
bd1034f0
AR
212 {"rd_req_cnt"},
213 {"new_rd_req_cnt"},
214 {"new_rd_req_rtry_cnt"},
215 {"rd_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
217 {"wr_req_cnt"},
218 {"new_wr_req_cnt"},
219 {"new_wr_req_rtry_cnt"},
220 {"wr_rtry_cnt"},
221 {"wr_disc_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
223 {"txp_wr_cnt"},
224 {"txd_rd_cnt"},
225 {"txd_wr_cnt"},
226 {"rxd_rd_cnt"},
227 {"rxd_wr_cnt"},
228 {"txf_rd_cnt"},
fa1f0cb3
SS
229 {"rxf_wr_cnt"}
230};
231
232static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
233 {"rmac_ttl_1519_4095_frms"},
234 {"rmac_ttl_4096_8191_frms"},
235 {"rmac_ttl_8192_max_frms"},
236 {"rmac_ttl_gt_max_frms"},
237 {"rmac_osized_alt_frms"},
238 {"rmac_jabber_alt_frms"},
239 {"rmac_gt_max_alt_frms"},
240 {"rmac_vlan_frms"},
241 {"rmac_len_discard"},
242 {"rmac_fcs_discard"},
243 {"rmac_pf_discard"},
244 {"rmac_da_discard"},
245 {"rmac_red_discard"},
246 {"rmac_rts_discard"},
247 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
248 {"link_fault_cnt"}
249};
250
251static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
252 {"\n DRIVER STATISTICS"},
253 {"single_bit_ecc_errs"},
254 {"double_bit_ecc_errs"},
bd1034f0
AR
255 {"parity_err_cnt"},
256 {"serious_err_cnt"},
257 {"soft_reset_cnt"},
258 {"fifo_full_cnt"},
8116f3cf
SS
259 {"ring_0_full_cnt"},
260 {"ring_1_full_cnt"},
261 {"ring_2_full_cnt"},
262 {"ring_3_full_cnt"},
263 {"ring_4_full_cnt"},
264 {"ring_5_full_cnt"},
265 {"ring_6_full_cnt"},
266 {"ring_7_full_cnt"},
43b7c451
SH
267 {"alarm_transceiver_temp_high"},
268 {"alarm_transceiver_temp_low"},
269 {"alarm_laser_bias_current_high"},
270 {"alarm_laser_bias_current_low"},
271 {"alarm_laser_output_power_high"},
272 {"alarm_laser_output_power_low"},
273 {"warn_transceiver_temp_high"},
274 {"warn_transceiver_temp_low"},
275 {"warn_laser_bias_current_high"},
276 {"warn_laser_bias_current_low"},
277 {"warn_laser_output_power_high"},
278 {"warn_laser_output_power_low"},
279 {"lro_aggregated_pkts"},
280 {"lro_flush_both_count"},
281 {"lro_out_of_sequence_pkts"},
282 {"lro_flush_due_to_max_pkts"},
283 {"lro_avg_aggr_pkts"},
284 {"mem_alloc_fail_cnt"},
285 {"pci_map_fail_cnt"},
286 {"watchdog_timer_cnt"},
287 {"mem_allocated"},
288 {"mem_freed"},
289 {"link_up_cnt"},
290 {"link_down_cnt"},
291 {"link_up_time"},
292 {"link_down_time"},
293 {"tx_tcode_buf_abort_cnt"},
294 {"tx_tcode_desc_abort_cnt"},
295 {"tx_tcode_parity_err_cnt"},
296 {"tx_tcode_link_loss_cnt"},
297 {"tx_tcode_list_proc_err_cnt"},
298 {"rx_tcode_parity_err_cnt"},
299 {"rx_tcode_abort_cnt"},
300 {"rx_tcode_parity_abort_cnt"},
301 {"rx_tcode_rda_fail_cnt"},
302 {"rx_tcode_unkn_prot_cnt"},
303 {"rx_tcode_fcs_err_cnt"},
304 {"rx_tcode_buf_size_err_cnt"},
305 {"rx_tcode_rxd_corrupt_cnt"},
306 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
307 {"tda_err_cnt"},
308 {"pfc_err_cnt"},
309 {"pcc_err_cnt"},
310 {"tti_err_cnt"},
311 {"tpa_err_cnt"},
312 {"sm_err_cnt"},
313 {"lso_err_cnt"},
314 {"mac_tmac_err_cnt"},
315 {"mac_rmac_err_cnt"},
316 {"xgxs_txgxs_err_cnt"},
317 {"xgxs_rxgxs_err_cnt"},
318 {"rc_err_cnt"},
319 {"prc_pcix_err_cnt"},
320 {"rpa_err_cnt"},
321 {"rda_err_cnt"},
322 {"rti_err_cnt"},
323 {"mc_err_cnt"}
1da177e4
LT
324};
325
4c3616cd
AMR
326#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
327#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
328#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3
SS
329
330#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
331#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
332
333#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
334#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
1da177e4 335
4c3616cd 336#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
1da177e4
LT
337#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
338
25fff88e
K
339#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
340 init_timer(&timer); \
341 timer.function = handle; \
342 timer.data = (unsigned long) arg; \
343 mod_timer(&timer, (jiffies + exp)) \
344
2fd37688
SS
345/* copy mac addr to def_mac_addr array */
346static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
347{
348 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
349 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
350 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
351 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
352 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
353 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
354}
be3a6b02
K
355/* Add the vlan */
356static void s2io_vlan_rx_register(struct net_device *dev,
357 struct vlan_group *grp)
358{
2fda096d 359 int i;
1ee6dd77 360 struct s2io_nic *nic = dev->priv;
2fda096d
SR
361 unsigned long flags[MAX_TX_FIFOS];
362 struct mac_info *mac_control = &nic->mac_control;
363 struct config_param *config = &nic->config;
364
365 for (i = 0; i < config->tx_fifo_num; i++)
366 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
be3a6b02 367
be3a6b02 368 nic->vlgrp = grp;
2fda096d
SR
369 for (i = config->tx_fifo_num - 1; i >= 0; i--)
370 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
371 flags[i]);
be3a6b02
K
372}
373
926930b2 374/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
7b490343 375static int vlan_strip_flag;
926930b2 376
cdb5bf02
SH
377/* Unregister the vlan */
378static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
379{
380 int i;
381 struct s2io_nic *nic = dev->priv;
382 unsigned long flags[MAX_TX_FIFOS];
383 struct mac_info *mac_control = &nic->mac_control;
384 struct config_param *config = &nic->config;
385
386 for (i = 0; i < config->tx_fifo_num; i++)
387 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
388
389 if (nic->vlgrp)
390 vlan_group_set_device(nic->vlgrp, vid, NULL);
391
392 for (i = config->tx_fifo_num - 1; i >= 0; i--)
393 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
394 flags[i]);
395}
396
20346722 397/*
1da177e4
LT
398 * Constants to be programmed into the Xena's registers, to configure
399 * the XAUI.
400 */
401
1da177e4 402#define END_SIGN 0x0
f71e1309 403static const u64 herc_act_dtx_cfg[] = {
541ae68f 404 /* Set address */
e960fc5c 405 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 406 /* Write data */
e960fc5c 407 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
408 /* Set address */
409 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
410 /* Write data */
411 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
412 /* Set address */
e960fc5c 413 0x801205150D440000ULL, 0x801205150D4400E0ULL,
414 /* Write data */
415 0x801205150D440004ULL, 0x801205150D4400E4ULL,
416 /* Set address */
541ae68f
K
417 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
418 /* Write data */
419 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
420 /* Done */
421 END_SIGN
422};
423
f71e1309 424static const u64 xena_dtx_cfg[] = {
c92ca04b 425 /* Set address */
1da177e4 426 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
427 /* Write data */
428 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
429 /* Set address */
430 0x8001051500000000ULL, 0x80010515000000E0ULL,
431 /* Write data */
432 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
433 /* Set address */
1da177e4 434 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
435 /* Write data */
436 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
437 END_SIGN
438};
439
20346722 440/*
1da177e4
LT
441 * Constants for Fixing the MacAddress problem seen mostly on
442 * Alpha machines.
443 */
f71e1309 444static const u64 fix_mac[] = {
1da177e4
LT
445 0x0060000000000000ULL, 0x0060600000000000ULL,
446 0x0040600000000000ULL, 0x0000600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0060600000000000ULL,
452 0x0020600000000000ULL, 0x0060600000000000ULL,
453 0x0020600000000000ULL, 0x0060600000000000ULL,
454 0x0020600000000000ULL, 0x0060600000000000ULL,
455 0x0020600000000000ULL, 0x0060600000000000ULL,
456 0x0020600000000000ULL, 0x0060600000000000ULL,
457 0x0020600000000000ULL, 0x0000600000000000ULL,
458 0x0040600000000000ULL, 0x0060600000000000ULL,
459 END_SIGN
460};
461
b41477f3
AR
462MODULE_LICENSE("GPL");
463MODULE_VERSION(DRV_VERSION);
464
465
1da177e4 466/* Module Loadable parameters. */
6cfc482b 467S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 468S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 469S2IO_PARM_INT(multiq, 0);
b41477f3
AR
470S2IO_PARM_INT(rx_ring_mode, 1);
471S2IO_PARM_INT(use_continuous_tx_intrs, 1);
472S2IO_PARM_INT(rmac_pause_time, 0x100);
473S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
474S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
475S2IO_PARM_INT(shared_splits, 0);
476S2IO_PARM_INT(tmac_util_period, 5);
477S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 478S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
479/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
480S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 481/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 482S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 483/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 484S2IO_PARM_INT(intr_type, 2);
7d3d0439 485/* Large receive offload feature */
43b7c451
SH
486static unsigned int lro_enable;
487module_param_named(lro, lro_enable, uint, 0);
488
7d3d0439
RA
489/* Max pkts to be aggregated by LRO at one time. If not specified,
490 * aggregation happens until we hit max IP pkt size(64K)
491 */
b41477f3 492S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 493S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
494
495S2IO_PARM_INT(napi, 1);
496S2IO_PARM_INT(ufo, 0);
926930b2 497S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
498
499static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
500 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
501static unsigned int rx_ring_sz[MAX_RX_RINGS] =
502 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
503static unsigned int rts_frm_len[MAX_RX_RINGS] =
504 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
505
506module_param_array(tx_fifo_len, uint, NULL, 0);
507module_param_array(rx_ring_sz, uint, NULL, 0);
508module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 509
20346722 510/*
1da177e4 511 * S2IO device table.
20346722 512 * This table lists all the devices that this driver supports.
1da177e4
LT
513 */
514static struct pci_device_id s2io_tbl[] __devinitdata = {
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
516 PCI_ANY_ID, PCI_ANY_ID},
517 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
518 PCI_ANY_ID, PCI_ANY_ID},
519 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
520 PCI_ANY_ID, PCI_ANY_ID},
521 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
522 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
523 {0,}
524};
525
526MODULE_DEVICE_TABLE(pci, s2io_tbl);
527
d796fdb7
LV
528static struct pci_error_handlers s2io_err_handler = {
529 .error_detected = s2io_io_error_detected,
530 .slot_reset = s2io_io_slot_reset,
531 .resume = s2io_io_resume,
532};
533
1da177e4
LT
534static struct pci_driver s2io_driver = {
535 .name = "S2IO",
536 .id_table = s2io_tbl,
537 .probe = s2io_init_nic,
538 .remove = __devexit_p(s2io_rem_nic),
d796fdb7 539 .err_handler = &s2io_err_handler,
1da177e4
LT
540};
541
542/* A simplifier macro used both by init and free shared_mem Fns(). */
543#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
544
3a3d5756
SH
545/* netqueue manipulation helper functions */
546static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
547{
548 int i;
3a3d5756
SH
549 if (sp->config.multiq) {
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 netif_stop_subqueue(sp->dev, i);
b19fa1fa 552 } else {
3a3d5756
SH
553 for (i = 0; i < sp->config.tx_fifo_num; i++)
554 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
555 netif_stop_queue(sp->dev);
556 }
557}
558
559static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
560{
3a3d5756
SH
561 if (sp->config.multiq)
562 netif_stop_subqueue(sp->dev, fifo_no);
b19fa1fa 563 else {
3a3d5756
SH
564 sp->mac_control.fifos[fifo_no].queue_state =
565 FIFO_QUEUE_STOP;
566 netif_stop_queue(sp->dev);
567 }
568}
569
570static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
571{
572 int i;
3a3d5756
SH
573 if (sp->config.multiq) {
574 for (i = 0; i < sp->config.tx_fifo_num; i++)
575 netif_start_subqueue(sp->dev, i);
b19fa1fa 576 } else {
3a3d5756
SH
577 for (i = 0; i < sp->config.tx_fifo_num; i++)
578 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
579 netif_start_queue(sp->dev);
580 }
581}
582
583static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
584{
3a3d5756
SH
585 if (sp->config.multiq)
586 netif_start_subqueue(sp->dev, fifo_no);
b19fa1fa 587 else {
3a3d5756
SH
588 sp->mac_control.fifos[fifo_no].queue_state =
589 FIFO_QUEUE_START;
590 netif_start_queue(sp->dev);
591 }
592}
593
594static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
595{
596 int i;
3a3d5756
SH
597 if (sp->config.multiq) {
598 for (i = 0; i < sp->config.tx_fifo_num; i++)
599 netif_wake_subqueue(sp->dev, i);
b19fa1fa 600 } else {
3a3d5756
SH
601 for (i = 0; i < sp->config.tx_fifo_num; i++)
602 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
603 netif_wake_queue(sp->dev);
604 }
605}
606
607static inline void s2io_wake_tx_queue(
608 struct fifo_info *fifo, int cnt, u8 multiq)
609{
610
3a3d5756
SH
611 if (multiq) {
612 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
613 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
b19fa1fa 614 } else if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
615 if (netif_queue_stopped(fifo->dev)) {
616 fifo->queue_state = FIFO_QUEUE_START;
617 netif_wake_queue(fifo->dev);
618 }
619 }
620}
621
1da177e4
LT
622/**
623 * init_shared_mem - Allocation and Initialization of Memory
624 * @nic: Device private variable.
20346722
K
625 * Description: The function allocates all the memory areas shared
626 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
627 * Rx descriptors and the statistics block.
628 */
629
630static int init_shared_mem(struct s2io_nic *nic)
631{
632 u32 size;
633 void *tmp_v_addr, *tmp_v_addr_next;
634 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 635 struct RxD_block *pre_rxd_blk = NULL;
372cc597 636 int i, j, blk_cnt;
1da177e4
LT
637 int lst_size, lst_per_page;
638 struct net_device *dev = nic->dev;
8ae418cf 639 unsigned long tmp;
1ee6dd77 640 struct buffAdd *ba;
1da177e4 641
1ee6dd77 642 struct mac_info *mac_control;
1da177e4 643 struct config_param *config;
491976b2 644 unsigned long long mem_allocated = 0;
1da177e4
LT
645
646 mac_control = &nic->mac_control;
647 config = &nic->config;
648
649
650 /* Allocation and initialization of TXDLs in FIOFs */
651 size = 0;
652 for (i = 0; i < config->tx_fifo_num; i++) {
653 size += config->tx_cfg[i].fifo_len;
654 }
655 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 656 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 657 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 658 return -EINVAL;
1da177e4
LT
659 }
660
2fda096d
SR
661 size = 0;
662 for (i = 0; i < config->tx_fifo_num; i++) {
663 size = config->tx_cfg[i].fifo_len;
664 /*
665 * Legal values are from 2 to 8192
666 */
667 if (size < 2) {
668 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
669 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
670 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
671 "are 2 to 8192\n");
672 return -EINVAL;
673 }
674 }
675
1ee6dd77 676 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
677 lst_per_page = PAGE_SIZE / lst_size;
678
679 for (i = 0; i < config->tx_fifo_num; i++) {
680 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 681 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
bd684e43 682 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
20346722
K
683 GFP_KERNEL);
684 if (!mac_control->fifos[i].list_info) {
0c61ed5f 685 DBG_PRINT(INFO_DBG,
1da177e4
LT
686 "Malloc failed for list_info\n");
687 return -ENOMEM;
688 }
491976b2 689 mem_allocated += list_holder_size;
1da177e4
LT
690 }
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
693 lst_per_page);
20346722
K
694 mac_control->fifos[i].tx_curr_put_info.offset = 0;
695 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 696 config->tx_cfg[i].fifo_len - 1;
20346722
K
697 mac_control->fifos[i].tx_curr_get_info.offset = 0;
698 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 699 config->tx_cfg[i].fifo_len - 1;
20346722
K
700 mac_control->fifos[i].fifo_no = i;
701 mac_control->fifos[i].nic = nic;
fed5eccd 702 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
3a3d5756 703 mac_control->fifos[i].dev = dev;
20346722 704
1da177e4
LT
705 for (j = 0; j < page_num; j++) {
706 int k = 0;
707 dma_addr_t tmp_p;
708 void *tmp_v;
709 tmp_v = pci_alloc_consistent(nic->pdev,
710 PAGE_SIZE, &tmp_p);
711 if (!tmp_v) {
0c61ed5f 712 DBG_PRINT(INFO_DBG,
1da177e4 713 "pci_alloc_consistent ");
0c61ed5f 714 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
1da177e4
LT
715 return -ENOMEM;
716 }
776bd20f 717 /* If we got a zero DMA address(can happen on
718 * certain platforms like PPC), reallocate.
719 * Store virtual address of page we don't want,
720 * to be freed later.
721 */
722 if (!tmp_p) {
723 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 724 DBG_PRINT(INIT_DBG,
776bd20f 725 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 726 DBG_PRINT(INIT_DBG,
6b4d617d 727 "Virtual address %p\n", tmp_v);
776bd20f 728 tmp_v = pci_alloc_consistent(nic->pdev,
729 PAGE_SIZE, &tmp_p);
730 if (!tmp_v) {
0c61ed5f 731 DBG_PRINT(INFO_DBG,
776bd20f 732 "pci_alloc_consistent ");
0c61ed5f 733 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
776bd20f 734 return -ENOMEM;
735 }
491976b2 736 mem_allocated += PAGE_SIZE;
776bd20f 737 }
1da177e4
LT
738 while (k < lst_per_page) {
739 int l = (j * lst_per_page) + k;
740 if (l == config->tx_cfg[i].fifo_len)
20346722
K
741 break;
742 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 743 tmp_v + (k * lst_size);
20346722 744 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
745 tmp_p + (k * lst_size);
746 k++;
747 }
748 }
749 }
1da177e4 750
2fda096d
SR
751 for (i = 0; i < config->tx_fifo_num; i++) {
752 size = config->tx_cfg[i].fifo_len;
753 mac_control->fifos[i].ufo_in_band_v
754 = kcalloc(size, sizeof(u64), GFP_KERNEL);
755 if (!mac_control->fifos[i].ufo_in_band_v)
756 return -ENOMEM;
757 mem_allocated += (size * sizeof(u64));
758 }
fed5eccd 759
1da177e4
LT
760 /* Allocation and initialization of RXDs in Rings */
761 size = 0;
762 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
763 if (config->rx_cfg[i].num_rxd %
764 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
765 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
766 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
767 i);
768 DBG_PRINT(ERR_DBG, "RxDs per Block");
769 return FAILURE;
770 }
771 size += config->rx_cfg[i].num_rxd;
20346722 772 mac_control->rings[i].block_count =
da6971d8
AR
773 config->rx_cfg[i].num_rxd /
774 (rxd_count[nic->rxd_mode] + 1 );
775 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
776 mac_control->rings[i].block_count;
1da177e4 777 }
da6971d8 778 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 779 size = (size * (sizeof(struct RxD1)));
da6971d8 780 else
1ee6dd77 781 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
782
783 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
784 mac_control->rings[i].rx_curr_get_info.block_index = 0;
785 mac_control->rings[i].rx_curr_get_info.offset = 0;
786 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 787 config->rx_cfg[i].num_rxd - 1;
20346722
K
788 mac_control->rings[i].rx_curr_put_info.block_index = 0;
789 mac_control->rings[i].rx_curr_put_info.offset = 0;
790 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 791 config->rx_cfg[i].num_rxd - 1;
20346722
K
792 mac_control->rings[i].nic = nic;
793 mac_control->rings[i].ring_no = i;
0425b46a 794 mac_control->rings[i].lro = lro_enable;
20346722 795
da6971d8
AR
796 blk_cnt = config->rx_cfg[i].num_rxd /
797 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
798 /* Allocating all the Rx blocks */
799 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 800 struct rx_block_info *rx_blocks;
da6971d8
AR
801 int l;
802
803 rx_blocks = &mac_control->rings[i].rx_blocks[j];
804 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
805 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
806 &tmp_p_addr);
807 if (tmp_v_addr == NULL) {
808 /*
20346722
K
809 * In case of failure, free_shared_mem()
810 * is called, which should free any
811 * memory that was alloced till the
1da177e4
LT
812 * failure happened.
813 */
da6971d8 814 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
815 return -ENOMEM;
816 }
491976b2 817 mem_allocated += size;
1da177e4 818 memset(tmp_v_addr, 0, size);
da6971d8
AR
819 rx_blocks->block_virt_addr = tmp_v_addr;
820 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 821 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
822 rxd_count[nic->rxd_mode],
823 GFP_KERNEL);
372cc597
SS
824 if (!rx_blocks->rxds)
825 return -ENOMEM;
8a4bdbaa 826 mem_allocated +=
491976b2 827 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
da6971d8
AR
828 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
829 rx_blocks->rxds[l].virt_addr =
830 rx_blocks->block_virt_addr +
831 (rxd_size[nic->rxd_mode] * l);
832 rx_blocks->rxds[l].dma_addr =
833 rx_blocks->block_dma_addr +
834 (rxd_size[nic->rxd_mode] * l);
835 }
1da177e4
LT
836 }
837 /* Interlinking all Rx Blocks */
838 for (j = 0; j < blk_cnt; j++) {
20346722
K
839 tmp_v_addr =
840 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 841 tmp_v_addr_next =
20346722 842 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 843 blk_cnt].block_virt_addr;
20346722
K
844 tmp_p_addr =
845 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 846 tmp_p_addr_next =
20346722 847 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
848 blk_cnt].block_dma_addr;
849
1ee6dd77 850 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
851 pre_rxd_blk->reserved_2_pNext_RxD_block =
852 (unsigned long) tmp_v_addr_next;
1da177e4
LT
853 pre_rxd_blk->pNext_RxD_Blk_physical =
854 (u64) tmp_p_addr_next;
855 }
856 }
6d517a27 857 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
858 /*
859 * Allocation of Storages for buffer addresses in 2BUFF mode
860 * and the buffers as well.
861 */
862 for (i = 0; i < config->rx_ring_num; i++) {
863 blk_cnt = config->rx_cfg[i].num_rxd /
864 (rxd_count[nic->rxd_mode]+ 1);
865 mac_control->rings[i].ba =
1ee6dd77 866 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 867 GFP_KERNEL);
da6971d8 868 if (!mac_control->rings[i].ba)
1da177e4 869 return -ENOMEM;
491976b2 870 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
da6971d8
AR
871 for (j = 0; j < blk_cnt; j++) {
872 int k = 0;
873 mac_control->rings[i].ba[j] =
1ee6dd77 874 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
875 (rxd_count[nic->rxd_mode] + 1)),
876 GFP_KERNEL);
877 if (!mac_control->rings[i].ba[j])
1da177e4 878 return -ENOMEM;
491976b2
SH
879 mem_allocated += (sizeof(struct buffAdd) * \
880 (rxd_count[nic->rxd_mode] + 1));
da6971d8
AR
881 while (k != rxd_count[nic->rxd_mode]) {
882 ba = &mac_control->rings[i].ba[j][k];
883
884 ba->ba_0_org = (void *) kmalloc
885 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
886 if (!ba->ba_0_org)
887 return -ENOMEM;
8a4bdbaa 888 mem_allocated +=
491976b2 889 (BUF0_LEN + ALIGN_SIZE);
da6971d8
AR
890 tmp = (unsigned long)ba->ba_0_org;
891 tmp += ALIGN_SIZE;
892 tmp &= ~((unsigned long) ALIGN_SIZE);
893 ba->ba_0 = (void *) tmp;
894
895 ba->ba_1_org = (void *) kmalloc
896 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
897 if (!ba->ba_1_org)
898 return -ENOMEM;
8a4bdbaa 899 mem_allocated
491976b2 900 += (BUF1_LEN + ALIGN_SIZE);
da6971d8
AR
901 tmp = (unsigned long) ba->ba_1_org;
902 tmp += ALIGN_SIZE;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_1 = (void *) tmp;
905 k++;
906 }
1da177e4
LT
907 }
908 }
909 }
1da177e4
LT
910
911 /* Allocation and initialization of Statistics block */
1ee6dd77 912 size = sizeof(struct stat_block);
1da177e4
LT
913 mac_control->stats_mem = pci_alloc_consistent
914 (nic->pdev, size, &mac_control->stats_mem_phy);
915
916 if (!mac_control->stats_mem) {
20346722
K
917 /*
918 * In case of failure, free_shared_mem() is called, which
919 * should free any memory that was alloced till the
1da177e4
LT
920 * failure happened.
921 */
922 return -ENOMEM;
923 }
491976b2 924 mem_allocated += size;
1da177e4
LT
925 mac_control->stats_mem_sz = size;
926
927 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 928 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 929 memset(tmp_v_addr, 0, size);
1da177e4
LT
930 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
931 (unsigned long long) tmp_p_addr);
491976b2 932 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
933 return SUCCESS;
934}
935
20346722
K
936/**
937 * free_shared_mem - Free the allocated Memory
1da177e4
LT
938 * @nic: Device private variable.
939 * Description: This function is to free all memory locations allocated by
940 * the init_shared_mem() function and return it to the kernel.
941 */
942
943static void free_shared_mem(struct s2io_nic *nic)
944{
945 int i, j, blk_cnt, size;
946 void *tmp_v_addr;
947 dma_addr_t tmp_p_addr;
1ee6dd77 948 struct mac_info *mac_control;
1da177e4
LT
949 struct config_param *config;
950 int lst_size, lst_per_page;
8910b49f 951 struct net_device *dev;
491976b2 952 int page_num = 0;
1da177e4
LT
953
954 if (!nic)
955 return;
956
8910b49f
MG
957 dev = nic->dev;
958
1da177e4
LT
959 mac_control = &nic->mac_control;
960 config = &nic->config;
961
1ee6dd77 962 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
963 lst_per_page = PAGE_SIZE / lst_size;
964
965 for (i = 0; i < config->tx_fifo_num; i++) {
491976b2
SH
966 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
967 lst_per_page);
1da177e4
LT
968 for (j = 0; j < page_num; j++) {
969 int mem_blks = (j * lst_per_page);
776bd20f 970 if (!mac_control->fifos[i].list_info)
6aa20a22 971 return;
776bd20f 972 if (!mac_control->fifos[i].list_info[mem_blks].
973 list_virt_addr)
1da177e4
LT
974 break;
975 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
976 mac_control->fifos[i].
977 list_info[mem_blks].
1da177e4 978 list_virt_addr,
20346722
K
979 mac_control->fifos[i].
980 list_info[mem_blks].
1da177e4 981 list_phy_addr);
8a4bdbaa 982 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 983 += PAGE_SIZE;
1da177e4 984 }
776bd20f 985 /* If we got a zero DMA address during allocation,
986 * free the page now
987 */
988 if (mac_control->zerodma_virt_addr) {
989 pci_free_consistent(nic->pdev, PAGE_SIZE,
990 mac_control->zerodma_virt_addr,
991 (dma_addr_t)0);
6aa20a22 992 DBG_PRINT(INIT_DBG,
6b4d617d
AM
993 "%s: Freeing TxDL with zero DMA addr. ",
994 dev->name);
995 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
996 mac_control->zerodma_virt_addr);
8a4bdbaa 997 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 998 += PAGE_SIZE;
776bd20f 999 }
20346722 1000 kfree(mac_control->fifos[i].list_info);
8a4bdbaa 1001 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1002 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1da177e4
LT
1003 }
1004
1da177e4 1005 size = SIZE_OF_BLOCK;
1da177e4 1006 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1007 blk_cnt = mac_control->rings[i].block_count;
1da177e4 1008 for (j = 0; j < blk_cnt; j++) {
20346722
K
1009 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1010 block_virt_addr;
1011 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1012 block_dma_addr;
1da177e4
LT
1013 if (tmp_v_addr == NULL)
1014 break;
1015 pci_free_consistent(nic->pdev, size,
1016 tmp_v_addr, tmp_p_addr);
491976b2 1017 nic->mac_control.stats_info->sw_stat.mem_freed += size;
da6971d8 1018 kfree(mac_control->rings[i].rx_blocks[j].rxds);
8a4bdbaa 1019 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1020 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1da177e4
LT
1021 }
1022 }
1023
6d517a27 1024 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
1025 /* Freeing buffer storage addresses in 2BUFF mode. */
1026 for (i = 0; i < config->rx_ring_num; i++) {
1027 blk_cnt = config->rx_cfg[i].num_rxd /
1028 (rxd_count[nic->rxd_mode] + 1);
1029 for (j = 0; j < blk_cnt; j++) {
1030 int k = 0;
1031 if (!mac_control->rings[i].ba[j])
1032 continue;
1033 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 1034 struct buffAdd *ba =
da6971d8
AR
1035 &mac_control->rings[i].ba[j][k];
1036 kfree(ba->ba_0_org);
491976b2
SH
1037 nic->mac_control.stats_info->sw_stat.\
1038 mem_freed += (BUF0_LEN + ALIGN_SIZE);
da6971d8 1039 kfree(ba->ba_1_org);
491976b2
SH
1040 nic->mac_control.stats_info->sw_stat.\
1041 mem_freed += (BUF1_LEN + ALIGN_SIZE);
da6971d8
AR
1042 k++;
1043 }
1044 kfree(mac_control->rings[i].ba[j]);
9caab458
SS
1045 nic->mac_control.stats_info->sw_stat.mem_freed +=
1046 (sizeof(struct buffAdd) *
1047 (rxd_count[nic->rxd_mode] + 1));
1da177e4 1048 }
da6971d8 1049 kfree(mac_control->rings[i].ba);
8a4bdbaa 1050 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1051 (sizeof(struct buffAdd *) * blk_cnt);
1da177e4 1052 }
1da177e4 1053 }
1da177e4 1054
2fda096d
SR
1055 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1056 if (mac_control->fifos[i].ufo_in_band_v) {
1057 nic->mac_control.stats_info->sw_stat.mem_freed
1058 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1059 kfree(mac_control->fifos[i].ufo_in_band_v);
1060 }
1061 }
1062
1da177e4 1063 if (mac_control->stats_mem) {
2fda096d
SR
1064 nic->mac_control.stats_info->sw_stat.mem_freed +=
1065 mac_control->stats_mem_sz;
1da177e4
LT
1066 pci_free_consistent(nic->pdev,
1067 mac_control->stats_mem_sz,
1068 mac_control->stats_mem,
1069 mac_control->stats_mem_phy);
491976b2 1070 }
1da177e4
LT
1071}
1072
541ae68f
K
1073/**
1074 * s2io_verify_pci_mode -
1075 */
1076
1ee6dd77 1077static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1078{
1ee6dd77 1079 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1080 register u64 val64 = 0;
1081 int mode;
1082
1083 val64 = readq(&bar0->pci_mode);
1084 mode = (u8)GET_PCI_MODE(val64);
1085
1086 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1087 return -1; /* Unknown PCI mode */
1088 return mode;
1089}
1090
c92ca04b
AR
1091#define NEC_VENID 0x1033
1092#define NEC_DEVID 0x0125
1093static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1094{
1095 struct pci_dev *tdev = NULL;
26d36b64
AC
1096 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1097 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
7ad62dbc 1098 if (tdev->bus == s2io_pdev->bus->parent) {
26d36b64 1099 pci_dev_put(tdev);
c92ca04b 1100 return 1;
7ad62dbc 1101 }
c92ca04b
AR
1102 }
1103 }
1104 return 0;
1105}
541ae68f 1106
7b32a312 1107static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1108/**
1109 * s2io_print_pci_mode -
1110 */
1ee6dd77 1111static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1112{
1ee6dd77 1113 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1114 register u64 val64 = 0;
1115 int mode;
1116 struct config_param *config = &nic->config;
1117
1118 val64 = readq(&bar0->pci_mode);
1119 mode = (u8)GET_PCI_MODE(val64);
1120
1121 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1122 return -1; /* Unknown PCI mode */
1123
c92ca04b
AR
1124 config->bus_speed = bus_speed[mode];
1125
1126 if (s2io_on_nec_bridge(nic->pdev)) {
1127 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1128 nic->dev->name);
1129 return mode;
1130 }
1131
541ae68f
K
1132 if (val64 & PCI_MODE_32_BITS) {
1133 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1134 } else {
1135 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1136 }
1137
1138 switch(mode) {
1139 case PCI_MODE_PCI_33:
1140 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
1141 break;
1142 case PCI_MODE_PCI_66:
1143 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
1144 break;
1145 case PCI_MODE_PCIX_M1_66:
1146 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
1147 break;
1148 case PCI_MODE_PCIX_M1_100:
1149 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
1150 break;
1151 case PCI_MODE_PCIX_M1_133:
1152 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
1153 break;
1154 case PCI_MODE_PCIX_M2_66:
1155 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
1156 break;
1157 case PCI_MODE_PCIX_M2_100:
1158 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
1159 break;
1160 case PCI_MODE_PCIX_M2_133:
1161 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
1162 break;
1163 default:
1164 return -1; /* Unsupported bus speed */
1165 }
1166
1167 return mode;
1168}
1169
b7c5678f
RV
1170/**
1171 * init_tti - Initialization transmit traffic interrupt scheme
1172 * @nic: device private variable
1173 * @link: link status (UP/DOWN) used to enable/disable continuous
1174 * transmit interrupts
1175 * Description: The function configures transmit traffic interrupts
1176 * Return Value: SUCCESS on success and
1177 * '-1' on failure
1178 */
1179
0d66afe7 1180static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1181{
1182 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1183 register u64 val64 = 0;
1184 int i;
1185 struct config_param *config;
1186
1187 config = &nic->config;
1188
1189 for (i = 0; i < config->tx_fifo_num; i++) {
1190 /*
1191 * TTI Initialization. Default Tx timer gets us about
1192 * 250 interrupts per sec. Continuous interrupts are enabled
1193 * by default.
1194 */
1195 if (nic->device_type == XFRAME_II_DEVICE) {
1196 int count = (nic->config.bus_speed * 125)/2;
1197 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1198 } else
1199 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1200
1201 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1202 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1203 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1204 TTI_DATA1_MEM_TX_TIMER_AC_EN;
ac731ab6
SH
1205 if (i == 0)
1206 if (use_continuous_tx_intrs && (link == LINK_UP))
1207 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
b7c5678f
RV
1208 writeq(val64, &bar0->tti_data1_mem);
1209
ac731ab6
SH
1210 if (nic->config.intr_type == MSI_X) {
1211 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1212 TTI_DATA2_MEM_TX_UFC_B(0x100) |
1213 TTI_DATA2_MEM_TX_UFC_C(0x200) |
1214 TTI_DATA2_MEM_TX_UFC_D(0x300);
1215 } else {
1216 if ((nic->config.tx_steering_type ==
1217 TX_DEFAULT_STEERING) &&
1218 (config->tx_fifo_num > 1) &&
1219 (i >= nic->udp_fifo_idx) &&
1220 (i < (nic->udp_fifo_idx +
1221 nic->total_udp_fifos)))
1222 val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
1223 TTI_DATA2_MEM_TX_UFC_B(0x80) |
1224 TTI_DATA2_MEM_TX_UFC_C(0x100) |
1225 TTI_DATA2_MEM_TX_UFC_D(0x120);
1226 else
1227 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1228 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1229 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1230 TTI_DATA2_MEM_TX_UFC_D(0x80);
1231 }
b7c5678f
RV
1232
1233 writeq(val64, &bar0->tti_data2_mem);
1234
1235 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1236 TTI_CMD_MEM_OFFSET(i);
1237 writeq(val64, &bar0->tti_command_mem);
1238
1239 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1240 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1241 return FAILURE;
1242 }
1243
1244 return SUCCESS;
1245}
1246
20346722
K
1247/**
1248 * init_nic - Initialization of hardware
b7c5678f 1249 * @nic: device private variable
20346722
K
1250 * Description: The function sequentially configures every block
1251 * of the H/W from their reset values.
1252 * Return Value: SUCCESS on success and
1da177e4
LT
1253 * '-1' on failure (endian settings incorrect).
1254 */
1255
1256static int init_nic(struct s2io_nic *nic)
1257{
1ee6dd77 1258 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1259 struct net_device *dev = nic->dev;
1260 register u64 val64 = 0;
1261 void __iomem *add;
1262 u32 time;
1263 int i, j;
1ee6dd77 1264 struct mac_info *mac_control;
1da177e4 1265 struct config_param *config;
c92ca04b 1266 int dtx_cnt = 0;
1da177e4 1267 unsigned long long mem_share;
20346722 1268 int mem_size;
1da177e4
LT
1269
1270 mac_control = &nic->mac_control;
1271 config = &nic->config;
1272
5e25b9dd 1273 /* to set the swapper controle on the card */
20346722 1274 if(s2io_set_swapper(nic)) {
1da177e4 1275 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
9f74ffde 1276 return -EIO;
1da177e4
LT
1277 }
1278
541ae68f
K
1279 /*
1280 * Herc requires EOI to be removed from reset before XGXS, so..
1281 */
1282 if (nic->device_type & XFRAME_II_DEVICE) {
1283 val64 = 0xA500000000ULL;
1284 writeq(val64, &bar0->sw_reset);
1285 msleep(500);
1286 val64 = readq(&bar0->sw_reset);
1287 }
1288
1da177e4
LT
1289 /* Remove XGXS from reset state */
1290 val64 = 0;
1291 writeq(val64, &bar0->sw_reset);
1da177e4 1292 msleep(500);
20346722 1293 val64 = readq(&bar0->sw_reset);
1da177e4 1294
7962024e
SH
1295 /* Ensure that it's safe to access registers by checking
1296 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1297 */
1298 if (nic->device_type == XFRAME_II_DEVICE) {
1299 for (i = 0; i < 50; i++) {
1300 val64 = readq(&bar0->adapter_status);
1301 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1302 break;
1303 msleep(10);
1304 }
1305 if (i == 50)
1306 return -ENODEV;
1307 }
1308
1da177e4
LT
1309 /* Enable Receiving broadcasts */
1310 add = &bar0->mac_cfg;
1311 val64 = readq(&bar0->mac_cfg);
1312 val64 |= MAC_RMAC_BCAST_ENABLE;
1313 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1314 writel((u32) val64, add);
1315 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1316 writel((u32) (val64 >> 32), (add + 4));
1317
1318 /* Read registers in all blocks */
1319 val64 = readq(&bar0->mac_int_mask);
1320 val64 = readq(&bar0->mc_int_mask);
1321 val64 = readq(&bar0->xgxs_int_mask);
1322
1323 /* Set MTU */
1324 val64 = dev->mtu;
1325 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1326
541ae68f
K
1327 if (nic->device_type & XFRAME_II_DEVICE) {
1328 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1329 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1330 &bar0->dtx_control, UF);
541ae68f
K
1331 if (dtx_cnt & 0x1)
1332 msleep(1); /* Necessary!! */
1da177e4
LT
1333 dtx_cnt++;
1334 }
541ae68f 1335 } else {
c92ca04b
AR
1336 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1337 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1338 &bar0->dtx_control, UF);
1339 val64 = readq(&bar0->dtx_control);
1340 dtx_cnt++;
1da177e4
LT
1341 }
1342 }
1343
1344 /* Tx DMA Initialization */
1345 val64 = 0;
1346 writeq(val64, &bar0->tx_fifo_partition_0);
1347 writeq(val64, &bar0->tx_fifo_partition_1);
1348 writeq(val64, &bar0->tx_fifo_partition_2);
1349 writeq(val64, &bar0->tx_fifo_partition_3);
1350
1351
1352 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1353 val64 |=
b7c5678f 1354 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1da177e4 1355 13) | vBIT(config->tx_cfg[i].fifo_priority,
b7c5678f 1356 ((j * 32) + 5), 3);
1da177e4
LT
1357
1358 if (i == (config->tx_fifo_num - 1)) {
1359 if (i % 2 == 0)
1360 i++;
1361 }
1362
1363 switch (i) {
1364 case 1:
1365 writeq(val64, &bar0->tx_fifo_partition_0);
1366 val64 = 0;
b7c5678f 1367 j = 0;
1da177e4
LT
1368 break;
1369 case 3:
1370 writeq(val64, &bar0->tx_fifo_partition_1);
1371 val64 = 0;
b7c5678f 1372 j = 0;
1da177e4
LT
1373 break;
1374 case 5:
1375 writeq(val64, &bar0->tx_fifo_partition_2);
1376 val64 = 0;
b7c5678f 1377 j = 0;
1da177e4
LT
1378 break;
1379 case 7:
1380 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1381 val64 = 0;
1382 j = 0;
1383 break;
1384 default:
1385 j++;
1da177e4
LT
1386 break;
1387 }
1388 }
1389
5e25b9dd
K
1390 /*
1391 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1392 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1393 */
541ae68f 1394 if ((nic->device_type == XFRAME_I_DEVICE) &&
44c10138 1395 (nic->pdev->revision < 4))
5e25b9dd
K
1396 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1397
1da177e4
LT
1398 val64 = readq(&bar0->tx_fifo_partition_0);
1399 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1400 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1401
20346722
K
1402 /*
1403 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1404 * integrity checking.
1405 */
1406 val64 = readq(&bar0->tx_pa_cfg);
1407 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1408 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1409 writeq(val64, &bar0->tx_pa_cfg);
1410
1411 /* Rx DMA intialization. */
1412 val64 = 0;
1413 for (i = 0; i < config->rx_ring_num; i++) {
1414 val64 |=
1415 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1416 3);
1417 }
1418 writeq(val64, &bar0->rx_queue_priority);
1419
20346722
K
1420 /*
1421 * Allocating equal share of memory to all the
1da177e4
LT
1422 * configured Rings.
1423 */
1424 val64 = 0;
541ae68f
K
1425 if (nic->device_type & XFRAME_II_DEVICE)
1426 mem_size = 32;
1427 else
1428 mem_size = 64;
1429
1da177e4
LT
1430 for (i = 0; i < config->rx_ring_num; i++) {
1431 switch (i) {
1432 case 0:
20346722
K
1433 mem_share = (mem_size / config->rx_ring_num +
1434 mem_size % config->rx_ring_num);
1da177e4
LT
1435 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1436 continue;
1437 case 1:
20346722 1438 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1439 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1440 continue;
1441 case 2:
20346722 1442 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1443 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1444 continue;
1445 case 3:
20346722 1446 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1447 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1448 continue;
1449 case 4:
20346722 1450 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1451 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1452 continue;
1453 case 5:
20346722 1454 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1455 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1456 continue;
1457 case 6:
20346722 1458 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1459 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1460 continue;
1461 case 7:
20346722 1462 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1463 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1464 continue;
1465 }
1466 }
1467 writeq(val64, &bar0->rx_queue_cfg);
1468
20346722 1469 /*
5e25b9dd 1470 * Filling Tx round robin registers
b7c5678f 1471 * as per the number of FIFOs for equal scheduling priority
1da177e4 1472 */
5e25b9dd
K
1473 switch (config->tx_fifo_num) {
1474 case 1:
b7c5678f 1475 val64 = 0x0;
5e25b9dd
K
1476 writeq(val64, &bar0->tx_w_round_robin_0);
1477 writeq(val64, &bar0->tx_w_round_robin_1);
1478 writeq(val64, &bar0->tx_w_round_robin_2);
1479 writeq(val64, &bar0->tx_w_round_robin_3);
1480 writeq(val64, &bar0->tx_w_round_robin_4);
1481 break;
1482 case 2:
b7c5678f 1483 val64 = 0x0001000100010001ULL;
5e25b9dd 1484 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1485 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1486 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1487 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1488 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1489 writeq(val64, &bar0->tx_w_round_robin_4);
1490 break;
1491 case 3:
b7c5678f 1492 val64 = 0x0001020001020001ULL;
5e25b9dd 1493 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1494 val64 = 0x0200010200010200ULL;
5e25b9dd 1495 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1496 val64 = 0x0102000102000102ULL;
5e25b9dd 1497 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1498 val64 = 0x0001020001020001ULL;
5e25b9dd 1499 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1500 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1501 writeq(val64, &bar0->tx_w_round_robin_4);
1502 break;
1503 case 4:
b7c5678f 1504 val64 = 0x0001020300010203ULL;
5e25b9dd 1505 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1506 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1507 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1508 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1509 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1510 writeq(val64, &bar0->tx_w_round_robin_4);
1511 break;
1512 case 5:
b7c5678f 1513 val64 = 0x0001020304000102ULL;
5e25b9dd 1514 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1515 val64 = 0x0304000102030400ULL;
5e25b9dd 1516 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1517 val64 = 0x0102030400010203ULL;
5e25b9dd 1518 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1519 val64 = 0x0400010203040001ULL;
5e25b9dd 1520 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1521 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1522 writeq(val64, &bar0->tx_w_round_robin_4);
1523 break;
1524 case 6:
b7c5678f 1525 val64 = 0x0001020304050001ULL;
5e25b9dd 1526 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1527 val64 = 0x0203040500010203ULL;
5e25b9dd 1528 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1529 val64 = 0x0405000102030405ULL;
5e25b9dd 1530 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1531 val64 = 0x0001020304050001ULL;
5e25b9dd 1532 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1533 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1534 writeq(val64, &bar0->tx_w_round_robin_4);
1535 break;
1536 case 7:
b7c5678f 1537 val64 = 0x0001020304050600ULL;
5e25b9dd 1538 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1539 val64 = 0x0102030405060001ULL;
5e25b9dd 1540 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1541 val64 = 0x0203040506000102ULL;
5e25b9dd 1542 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1543 val64 = 0x0304050600010203ULL;
5e25b9dd 1544 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1545 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1546 writeq(val64, &bar0->tx_w_round_robin_4);
1547 break;
1548 case 8:
b7c5678f 1549 val64 = 0x0001020304050607ULL;
5e25b9dd 1550 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1551 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1552 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1553 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1554 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1555 writeq(val64, &bar0->tx_w_round_robin_4);
1556 break;
1557 }
1558
b41477f3 1559 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1560 val64 = readq(&bar0->tx_fifo_partition_0);
1561 val64 |= (TX_FIFO_PARTITION_EN);
1562 writeq(val64, &bar0->tx_fifo_partition_0);
1563
5e25b9dd 1564 /* Filling the Rx round robin registers as per the
0425b46a
SH
1565 * number of Rings and steering based on QoS with
1566 * equal priority.
1567 */
5e25b9dd
K
1568 switch (config->rx_ring_num) {
1569 case 1:
0425b46a
SH
1570 val64 = 0x0;
1571 writeq(val64, &bar0->rx_w_round_robin_0);
1572 writeq(val64, &bar0->rx_w_round_robin_1);
1573 writeq(val64, &bar0->rx_w_round_robin_2);
1574 writeq(val64, &bar0->rx_w_round_robin_3);
1575 writeq(val64, &bar0->rx_w_round_robin_4);
1576
5e25b9dd
K
1577 val64 = 0x8080808080808080ULL;
1578 writeq(val64, &bar0->rts_qos_steering);
1579 break;
1580 case 2:
0425b46a 1581 val64 = 0x0001000100010001ULL;
5e25b9dd 1582 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1583 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1584 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1585 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1586 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1587 writeq(val64, &bar0->rx_w_round_robin_4);
1588
1589 val64 = 0x8080808040404040ULL;
1590 writeq(val64, &bar0->rts_qos_steering);
1591 break;
1592 case 3:
0425b46a 1593 val64 = 0x0001020001020001ULL;
5e25b9dd 1594 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1595 val64 = 0x0200010200010200ULL;
5e25b9dd 1596 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1597 val64 = 0x0102000102000102ULL;
5e25b9dd 1598 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1599 val64 = 0x0001020001020001ULL;
5e25b9dd 1600 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1601 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1602 writeq(val64, &bar0->rx_w_round_robin_4);
1603
1604 val64 = 0x8080804040402020ULL;
1605 writeq(val64, &bar0->rts_qos_steering);
1606 break;
1607 case 4:
0425b46a 1608 val64 = 0x0001020300010203ULL;
5e25b9dd 1609 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1610 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1611 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1612 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1613 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1614 writeq(val64, &bar0->rx_w_round_robin_4);
1615
1616 val64 = 0x8080404020201010ULL;
1617 writeq(val64, &bar0->rts_qos_steering);
1618 break;
1619 case 5:
0425b46a 1620 val64 = 0x0001020304000102ULL;
5e25b9dd 1621 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1622 val64 = 0x0304000102030400ULL;
5e25b9dd 1623 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1624 val64 = 0x0102030400010203ULL;
5e25b9dd 1625 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1626 val64 = 0x0400010203040001ULL;
5e25b9dd 1627 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1628 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1629 writeq(val64, &bar0->rx_w_round_robin_4);
1630
1631 val64 = 0x8080404020201008ULL;
1632 writeq(val64, &bar0->rts_qos_steering);
1633 break;
1634 case 6:
0425b46a 1635 val64 = 0x0001020304050001ULL;
5e25b9dd 1636 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1637 val64 = 0x0203040500010203ULL;
5e25b9dd 1638 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1639 val64 = 0x0405000102030405ULL;
5e25b9dd 1640 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1641 val64 = 0x0001020304050001ULL;
5e25b9dd 1642 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1643 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1644 writeq(val64, &bar0->rx_w_round_robin_4);
1645
1646 val64 = 0x8080404020100804ULL;
1647 writeq(val64, &bar0->rts_qos_steering);
1648 break;
1649 case 7:
0425b46a 1650 val64 = 0x0001020304050600ULL;
5e25b9dd 1651 writeq(val64, &bar0->rx_w_round_robin_0);
0425b46a 1652 val64 = 0x0102030405060001ULL;
5e25b9dd 1653 writeq(val64, &bar0->rx_w_round_robin_1);
0425b46a 1654 val64 = 0x0203040506000102ULL;
5e25b9dd 1655 writeq(val64, &bar0->rx_w_round_robin_2);
0425b46a 1656 val64 = 0x0304050600010203ULL;
5e25b9dd 1657 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1658 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1659 writeq(val64, &bar0->rx_w_round_robin_4);
1660
1661 val64 = 0x8080402010080402ULL;
1662 writeq(val64, &bar0->rts_qos_steering);
1663 break;
1664 case 8:
0425b46a 1665 val64 = 0x0001020304050607ULL;
5e25b9dd 1666 writeq(val64, &bar0->rx_w_round_robin_0);
5e25b9dd 1667 writeq(val64, &bar0->rx_w_round_robin_1);
5e25b9dd 1668 writeq(val64, &bar0->rx_w_round_robin_2);
5e25b9dd 1669 writeq(val64, &bar0->rx_w_round_robin_3);
0425b46a 1670 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1671 writeq(val64, &bar0->rx_w_round_robin_4);
1672
1673 val64 = 0x8040201008040201ULL;
1674 writeq(val64, &bar0->rts_qos_steering);
1675 break;
1676 }
1da177e4
LT
1677
1678 /* UDP Fix */
1679 val64 = 0;
20346722 1680 for (i = 0; i < 8; i++)
1da177e4
LT
1681 writeq(val64, &bar0->rts_frm_len_n[i]);
1682
5e25b9dd
K
1683 /* Set the default rts frame length for the rings configured */
1684 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1685 for (i = 0 ; i < config->rx_ring_num ; i++)
1686 writeq(val64, &bar0->rts_frm_len_n[i]);
1687
1688 /* Set the frame length for the configured rings
1689 * desired by the user
1690 */
1691 for (i = 0; i < config->rx_ring_num; i++) {
1692 /* If rts_frm_len[i] == 0 then it is assumed that user not
1693 * specified frame length steering.
1694 * If the user provides the frame length then program
1695 * the rts_frm_len register for those values or else
1696 * leave it as it is.
1697 */
1698 if (rts_frm_len[i] != 0) {
1699 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1700 &bar0->rts_frm_len_n[i]);
1701 }
1702 }
8a4bdbaa 1703
9fc93a41
SS
1704 /* Disable differentiated services steering logic */
1705 for (i = 0; i < 64; i++) {
1706 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1707 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1708 dev->name);
1709 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
9f74ffde 1710 return -ENODEV;
9fc93a41
SS
1711 }
1712 }
1713
20346722 1714 /* Program statistics memory */
1da177e4 1715 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1716
541ae68f
K
1717 if (nic->device_type == XFRAME_II_DEVICE) {
1718 val64 = STAT_BC(0x320);
1719 writeq(val64, &bar0->stat_byte_cnt);
1720 }
1721
20346722 1722 /*
1da177e4
LT
1723 * Initializing the sampling rate for the device to calculate the
1724 * bandwidth utilization.
1725 */
1726 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1727 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1728 writeq(val64, &bar0->mac_link_util);
1729
20346722
K
1730 /*
1731 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1732 * Scheme.
1733 */
1da177e4 1734
b7c5678f
RV
1735 /* Initialize TTI */
1736 if (SUCCESS != init_tti(nic, nic->last_link_state))
1737 return -ENODEV;
1da177e4 1738
8a4bdbaa
SS
1739 /* RTI Initialization */
1740 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1741 /*
8a4bdbaa
SS
1742 * Programmed to generate Apprx 500 Intrs per
1743 * second
1744 */
1745 int count = (nic->config.bus_speed * 125)/4;
1746 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1747 } else
1748 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1749 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1750 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1751 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1752
1753 writeq(val64, &bar0->rti_data1_mem);
1754
1755 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1756 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1757 if (nic->config.intr_type == MSI_X)
1758 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1759 RTI_DATA2_MEM_RX_UFC_D(0x40));
1760 else
1761 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1762 RTI_DATA2_MEM_RX_UFC_D(0x80));
1763 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1764
8a4bdbaa
SS
1765 for (i = 0; i < config->rx_ring_num; i++) {
1766 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1767 | RTI_CMD_MEM_OFFSET(i);
1768 writeq(val64, &bar0->rti_command_mem);
1da177e4 1769
8a4bdbaa
SS
1770 /*
1771 * Once the operation completes, the Strobe bit of the
1772 * command register will be reset. We poll for this
1773 * particular condition. We wait for a maximum of 500ms
1774 * for the operation to complete, if it's not complete
1775 * by then we return error.
1776 */
1777 time = 0;
1778 while (TRUE) {
1779 val64 = readq(&bar0->rti_command_mem);
1780 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1781 break;
b6e3f982 1782
8a4bdbaa
SS
1783 if (time > 10) {
1784 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1785 dev->name);
9f74ffde 1786 return -ENODEV;
b6e3f982 1787 }
8a4bdbaa
SS
1788 time++;
1789 msleep(50);
1da177e4 1790 }
1da177e4
LT
1791 }
1792
20346722
K
1793 /*
1794 * Initializing proper values as Pause threshold into all
1da177e4
LT
1795 * the 8 Queues on Rx side.
1796 */
1797 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1798 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1799
1800 /* Disable RMAC PAD STRIPPING */
509a2671 1801 add = &bar0->mac_cfg;
1da177e4
LT
1802 val64 = readq(&bar0->mac_cfg);
1803 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1804 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1805 writel((u32) (val64), add);
1806 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1807 writel((u32) (val64 >> 32), (add + 4));
1808 val64 = readq(&bar0->mac_cfg);
1809
7d3d0439
RA
1810 /* Enable FCS stripping by adapter */
1811 add = &bar0->mac_cfg;
1812 val64 = readq(&bar0->mac_cfg);
1813 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1814 if (nic->device_type == XFRAME_II_DEVICE)
1815 writeq(val64, &bar0->mac_cfg);
1816 else {
1817 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1818 writel((u32) (val64), add);
1819 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1820 writel((u32) (val64 >> 32), (add + 4));
1821 }
1822
20346722
K
1823 /*
1824 * Set the time value to be inserted in the pause frame
1da177e4
LT
1825 * generated by xena.
1826 */
1827 val64 = readq(&bar0->rmac_pause_cfg);
1828 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1829 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1830 writeq(val64, &bar0->rmac_pause_cfg);
1831
20346722 1832 /*
1da177e4
LT
1833 * Set the Threshold Limit for Generating the pause frame
1834 * If the amount of data in any Queue exceeds ratio of
1835 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1836 * pause frame is generated
1837 */
1838 val64 = 0;
1839 for (i = 0; i < 4; i++) {
1840 val64 |=
1841 (((u64) 0xFF00 | nic->mac_control.
1842 mc_pause_threshold_q0q3)
1843 << (i * 2 * 8));
1844 }
1845 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1846
1847 val64 = 0;
1848 for (i = 0; i < 4; i++) {
1849 val64 |=
1850 (((u64) 0xFF00 | nic->mac_control.
1851 mc_pause_threshold_q4q7)
1852 << (i * 2 * 8));
1853 }
1854 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1855
20346722
K
1856 /*
1857 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1858 * exceeded the limit pointed by shared_splits
1859 */
1860 val64 = readq(&bar0->pic_control);
1861 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1862 writeq(val64, &bar0->pic_control);
1863
863c11a9
AR
1864 if (nic->config.bus_speed == 266) {
1865 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1866 writeq(0x0, &bar0->read_retry_delay);
1867 writeq(0x0, &bar0->write_retry_delay);
1868 }
1869
541ae68f
K
1870 /*
1871 * Programming the Herc to split every write transaction
1872 * that does not start on an ADB to reduce disconnects.
1873 */
1874 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1875 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1876 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1877 writeq(val64, &bar0->misc_control);
1878 val64 = readq(&bar0->pic_control2);
b7b5a128 1879 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1880 writeq(val64, &bar0->pic_control2);
541ae68f 1881 }
c92ca04b
AR
1882 if (strstr(nic->product_name, "CX4")) {
1883 val64 = TMAC_AVG_IPG(0x17);
1884 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1885 }
1886
1da177e4
LT
1887 return SUCCESS;
1888}
a371a07d
K
1889#define LINK_UP_DOWN_INTERRUPT 1
1890#define MAC_RMAC_ERR_TIMER 2
1891
1ee6dd77 1892static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1893{
eaae7f72 1894 if (nic->config.intr_type != INTA)
cc6e7c44 1895 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1896 if (nic->device_type == XFRAME_II_DEVICE)
1897 return LINK_UP_DOWN_INTERRUPT;
1898 else
1899 return MAC_RMAC_ERR_TIMER;
1900}
8116f3cf 1901
9caab458
SS
1902/**
1903 * do_s2io_write_bits - update alarm bits in alarm register
1904 * @value: alarm bits
1905 * @flag: interrupt status
1906 * @addr: address value
1907 * Description: update alarm bits in alarm register
1908 * Return Value:
1909 * NONE.
1910 */
1911static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1912{
1913 u64 temp64;
1914
1915 temp64 = readq(addr);
1916
1917 if(flag == ENABLE_INTRS)
1918 temp64 &= ~((u64) value);
1919 else
1920 temp64 |= ((u64) value);
1921 writeq(temp64, addr);
1922}
1da177e4 1923
43b7c451 1924static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1925{
1926 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1927 register u64 gen_int_mask = 0;
1928
1929 if (mask & TX_DMA_INTR) {
1930
1931 gen_int_mask |= TXDMA_INT_M;
1932
1933 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1934 TXDMA_PCC_INT | TXDMA_TTI_INT |
1935 TXDMA_LSO_INT | TXDMA_TPA_INT |
1936 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1937
1938 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1939 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1940 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1941 &bar0->pfc_err_mask);
1942
1943 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1944 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1945 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1946
1947 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1948 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1949 PCC_N_SERR | PCC_6_COF_OV_ERR |
1950 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1951 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1952 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1953
1954 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1955 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1956
1957 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1958 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1959 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1960 flag, &bar0->lso_err_mask);
1961
1962 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1963 flag, &bar0->tpa_err_mask);
1964
1965 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1966
1967 }
1968
1969 if (mask & TX_MAC_INTR) {
1970 gen_int_mask |= TXMAC_INT_M;
1971 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1972 &bar0->mac_int_mask);
1973 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1974 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1975 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1976 flag, &bar0->mac_tmac_err_mask);
1977 }
1978
1979 if (mask & TX_XGXS_INTR) {
1980 gen_int_mask |= TXXGXS_INT_M;
1981 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1982 &bar0->xgxs_int_mask);
1983 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1984 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1985 flag, &bar0->xgxs_txgxs_err_mask);
1986 }
1987
1988 if (mask & RX_DMA_INTR) {
1989 gen_int_mask |= RXDMA_INT_M;
1990 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1991 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1992 flag, &bar0->rxdma_int_mask);
1993 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1994 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1995 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1996 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1997 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1998 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1999 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
2000 &bar0->prc_pcix_err_mask);
2001 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
2002 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
2003 &bar0->rpa_err_mask);
2004 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
2005 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
2006 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2007 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2008 flag, &bar0->rda_err_mask);
2009 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2010 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2011 flag, &bar0->rti_err_mask);
2012 }
2013
2014 if (mask & RX_MAC_INTR) {
2015 gen_int_mask |= RXMAC_INT_M;
2016 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2017 &bar0->mac_int_mask);
2018 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2019 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2020 RMAC_DOUBLE_ECC_ERR |
2021 RMAC_LINK_STATE_CHANGE_INT,
2022 flag, &bar0->mac_rmac_err_mask);
2023 }
2024
2025 if (mask & RX_XGXS_INTR)
2026 {
2027 gen_int_mask |= RXXGXS_INT_M;
2028 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2029 &bar0->xgxs_int_mask);
2030 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2031 &bar0->xgxs_rxgxs_err_mask);
2032 }
2033
2034 if (mask & MC_INTR) {
2035 gen_int_mask |= MC_INT_M;
2036 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2037 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2038 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2039 &bar0->mc_err_mask);
2040 }
2041 nic->general_int_mask = gen_int_mask;
2042
2043 /* Remove this line when alarm interrupts are enabled */
2044 nic->general_int_mask = 0;
2045}
20346722
K
2046/**
2047 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
2048 * @nic: device private variable,
2049 * @mask: A mask indicating which Intr block must be modified and,
2050 * @flag: A flag indicating whether to enable or disable the Intrs.
2051 * Description: This function will either disable or enable the interrupts
20346722
K
2052 * depending on the flag argument. The mask argument can be used to
2053 * enable/disable any Intr block.
1da177e4
LT
2054 * Return Value: NONE.
2055 */
2056
2057static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2058{
1ee6dd77 2059 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
2060 register u64 temp64 = 0, intr_mask = 0;
2061
2062 intr_mask = nic->general_int_mask;
1da177e4
LT
2063
2064 /* Top level interrupt classification */
2065 /* PIC Interrupts */
9caab458 2066 if (mask & TX_PIC_INTR) {
1da177e4 2067 /* Enable PIC Intrs in the general intr mask register */
9caab458 2068 intr_mask |= TXPIC_INT_M;
1da177e4 2069 if (flag == ENABLE_INTRS) {
20346722 2070 /*
a371a07d 2071 * If Hercules adapter enable GPIO otherwise
b41477f3 2072 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2073 * interrupts for now.
2074 * TODO
1da177e4 2075 */
a371a07d
K
2076 if (s2io_link_fault_indication(nic) ==
2077 LINK_UP_DOWN_INTERRUPT ) {
9caab458
SS
2078 do_s2io_write_bits(PIC_INT_GPIO, flag,
2079 &bar0->pic_int_mask);
2080 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2081 &bar0->gpio_int_mask);
2082 } else
a371a07d 2083 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2084 } else if (flag == DISABLE_INTRS) {
20346722
K
2085 /*
2086 * Disable PIC Intrs in the general
2087 * intr mask register
1da177e4
LT
2088 */
2089 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2090 }
2091 }
2092
1da177e4
LT
2093 /* Tx traffic interrupts */
2094 if (mask & TX_TRAFFIC_INTR) {
9caab458 2095 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2096 if (flag == ENABLE_INTRS) {
20346722 2097 /*
1da177e4 2098 * Enable all the Tx side interrupts
20346722 2099 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2100 */
2101 writeq(0x0, &bar0->tx_traffic_mask);
2102 } else if (flag == DISABLE_INTRS) {
20346722
K
2103 /*
2104 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2105 * register.
2106 */
2107 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2108 }
2109 }
2110
2111 /* Rx traffic interrupts */
2112 if (mask & RX_TRAFFIC_INTR) {
9caab458 2113 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2114 if (flag == ENABLE_INTRS) {
1da177e4
LT
2115 /* writing 0 Enables all 8 RX interrupt levels */
2116 writeq(0x0, &bar0->rx_traffic_mask);
2117 } else if (flag == DISABLE_INTRS) {
20346722
K
2118 /*
2119 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2120 * register.
2121 */
2122 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2123 }
2124 }
9caab458
SS
2125
2126 temp64 = readq(&bar0->general_int_mask);
2127 if (flag == ENABLE_INTRS)
2128 temp64 &= ~((u64) intr_mask);
2129 else
2130 temp64 = DISABLE_ALL_INTRS;
2131 writeq(temp64, &bar0->general_int_mask);
2132
2133 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2134}
2135
19a60522
SS
2136/**
2137 * verify_pcc_quiescent- Checks for PCC quiescent state
2138 * Return: 1 If PCC is quiescence
2139 * 0 If PCC is not quiescence
2140 */
1ee6dd77 2141static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2142{
19a60522 2143 int ret = 0, herc;
1ee6dd77 2144 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2145 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2146
19a60522 2147 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722
K
2148
2149 if (flag == FALSE) {
44c10138 2150 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2151 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2152 ret = 1;
19a60522
SS
2153 } else {
2154 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2155 ret = 1;
20346722
K
2156 }
2157 } else {
44c10138 2158 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2159 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2160 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2161 ret = 1;
5e25b9dd
K
2162 } else {
2163 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2164 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2165 ret = 1;
20346722
K
2166 }
2167 }
2168
2169 return ret;
2170}
2171/**
2172 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2173 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2174 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2175 * differs and the calling function passes the input argument flag to
2176 * indicate this.
20346722 2177 * Return: 1 If xena is quiescence
1da177e4
LT
2178 * 0 If Xena is not quiescence
2179 */
2180
1ee6dd77 2181static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2182{
19a60522 2183 int mode;
1ee6dd77 2184 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2185 u64 val64 = readq(&bar0->adapter_status);
2186 mode = s2io_verify_pci_mode(sp);
1da177e4 2187
19a60522
SS
2188 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2189 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2190 return 0;
2191 }
2192 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2193 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2194 return 0;
2195 }
2196 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2197 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2198 return 0;
2199 }
2200 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2201 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2202 return 0;
2203 }
2204 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2205 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2206 return 0;
2207 }
2208 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2209 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2210 return 0;
2211 }
2212 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2213 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2214 return 0;
2215 }
2216 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2217 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2218 return 0;
1da177e4
LT
2219 }
2220
19a60522
SS
2221 /*
2222 * In PCI 33 mode, the P_PLL is not used, and therefore,
2223 * the the P_PLL_LOCK bit in the adapter_status register will
2224 * not be asserted.
2225 */
2226 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2227 sp->device_type == XFRAME_II_DEVICE && mode !=
2228 PCI_MODE_PCI_33) {
2229 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2230 return 0;
2231 }
2232 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2233 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2234 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2235 return 0;
2236 }
2237 return 1;
1da177e4
LT
2238}
2239
2240/**
2241 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2242 * @sp: Pointer to device specifc structure
20346722 2243 * Description :
1da177e4
LT
2244 * New procedure to clear mac address reading problems on Alpha platforms
2245 *
2246 */
2247
1ee6dd77 2248static void fix_mac_address(struct s2io_nic * sp)
1da177e4 2249{
1ee6dd77 2250 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2251 u64 val64;
2252 int i = 0;
2253
2254 while (fix_mac[i] != END_SIGN) {
2255 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2256 udelay(10);
1da177e4
LT
2257 val64 = readq(&bar0->gpio_control);
2258 }
2259}
2260
2261/**
20346722 2262 * start_nic - Turns the device on
1da177e4 2263 * @nic : device private variable.
20346722
K
2264 * Description:
2265 * This function actually turns the device on. Before this function is
2266 * called,all Registers are configured from their reset states
2267 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2268 * calling this function, the device interrupts are cleared and the NIC is
2269 * literally switched on by writing into the adapter control register.
20346722 2270 * Return Value:
1da177e4
LT
2271 * SUCCESS on success and -1 on failure.
2272 */
2273
2274static int start_nic(struct s2io_nic *nic)
2275{
1ee6dd77 2276 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2277 struct net_device *dev = nic->dev;
2278 register u64 val64 = 0;
20346722 2279 u16 subid, i;
1ee6dd77 2280 struct mac_info *mac_control;
1da177e4
LT
2281 struct config_param *config;
2282
2283 mac_control = &nic->mac_control;
2284 config = &nic->config;
2285
2286 /* PRC Initialization and configuration */
2287 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2288 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
2289 &bar0->prc_rxd0_n[i]);
2290
2291 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2292 if (nic->rxd_mode == RXD_MODE_1)
2293 val64 |= PRC_CTRL_RC_ENABLED;
2294 else
2295 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2296 if (nic->device_type == XFRAME_II_DEVICE)
2297 val64 |= PRC_CTRL_GROUP_READS;
2298 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2299 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2300 writeq(val64, &bar0->prc_ctrl_n[i]);
2301 }
2302
da6971d8
AR
2303 if (nic->rxd_mode == RXD_MODE_3B) {
2304 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2305 val64 = readq(&bar0->rx_pa_cfg);
2306 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2307 writeq(val64, &bar0->rx_pa_cfg);
2308 }
1da177e4 2309
926930b2
SS
2310 if (vlan_tag_strip == 0) {
2311 val64 = readq(&bar0->rx_pa_cfg);
2312 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2313 writeq(val64, &bar0->rx_pa_cfg);
2314 vlan_strip_flag = 0;
2315 }
2316
20346722 2317 /*
1da177e4
LT
2318 * Enabling MC-RLDRAM. After enabling the device, we timeout
2319 * for around 100ms, which is approximately the time required
2320 * for the device to be ready for operation.
2321 */
2322 val64 = readq(&bar0->mc_rldram_mrs);
2323 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2324 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2325 val64 = readq(&bar0->mc_rldram_mrs);
2326
20346722 2327 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2328
2329 /* Enabling ECC Protection. */
2330 val64 = readq(&bar0->adapter_control);
2331 val64 &= ~ADAPTER_ECC_EN;
2332 writeq(val64, &bar0->adapter_control);
2333
20346722
K
2334 /*
2335 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2336 * it.
2337 */
2338 val64 = readq(&bar0->adapter_status);
19a60522 2339 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
2340 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2341 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2342 (unsigned long long) val64);
2343 return FAILURE;
2344 }
2345
20346722 2346 /*
1da177e4 2347 * With some switches, link might be already up at this point.
20346722
K
2348 * Because of this weird behavior, when we enable laser,
2349 * we may not get link. We need to handle this. We cannot
2350 * figure out which switch is misbehaving. So we are forced to
2351 * make a global change.
1da177e4
LT
2352 */
2353
2354 /* Enabling Laser. */
2355 val64 = readq(&bar0->adapter_control);
2356 val64 |= ADAPTER_EOI_TX_ON;
2357 writeq(val64, &bar0->adapter_control);
2358
c92ca04b
AR
2359 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2360 /*
2361 * Dont see link state interrupts initally on some switches,
2362 * so directly scheduling the link state task here.
2363 */
2364 schedule_work(&nic->set_link_task);
2365 }
1da177e4
LT
2366 /* SXE-002: Initialize link and activity LED */
2367 subid = nic->pdev->subsystem_device;
541ae68f
K
2368 if (((subid & 0xFF) >= 0x07) &&
2369 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2370 val64 = readq(&bar0->gpio_control);
2371 val64 |= 0x0000800000000000ULL;
2372 writeq(val64, &bar0->gpio_control);
2373 val64 = 0x0411040400000000ULL;
509a2671 2374 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2375 }
2376
1da177e4
LT
2377 return SUCCESS;
2378}
fed5eccd
AR
2379/**
2380 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2381 */
1ee6dd77
RB
2382static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2383 TxD *txdlp, int get_off)
fed5eccd 2384{
1ee6dd77 2385 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2386 struct sk_buff *skb;
1ee6dd77 2387 struct TxD *txds;
fed5eccd
AR
2388 u16 j, frg_cnt;
2389
2390 txds = txdlp;
2fda096d 2391 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
fed5eccd
AR
2392 pci_unmap_single(nic->pdev, (dma_addr_t)
2393 txds->Buffer_Pointer, sizeof(u64),
2394 PCI_DMA_TODEVICE);
2395 txds++;
2396 }
2397
2398 skb = (struct sk_buff *) ((unsigned long)
2399 txds->Host_Control);
2400 if (!skb) {
1ee6dd77 2401 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2402 return NULL;
2403 }
2404 pci_unmap_single(nic->pdev, (dma_addr_t)
2405 txds->Buffer_Pointer,
2406 skb->len - skb->data_len,
2407 PCI_DMA_TODEVICE);
2408 frg_cnt = skb_shinfo(skb)->nr_frags;
2409 if (frg_cnt) {
2410 txds++;
2411 for (j = 0; j < frg_cnt; j++, txds++) {
2412 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2413 if (!txds->Buffer_Pointer)
2414 break;
6aa20a22 2415 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2416 txds->Buffer_Pointer,
2417 frag->size, PCI_DMA_TODEVICE);
2418 }
2419 }
1ee6dd77 2420 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2421 return(skb);
2422}
1da177e4 2423
20346722
K
2424/**
2425 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2426 * @nic : device private variable.
20346722 2427 * Description:
1da177e4 2428 * Free all queued Tx buffers.
20346722 2429 * Return Value: void
1da177e4
LT
2430*/
2431
2432static void free_tx_buffers(struct s2io_nic *nic)
2433{
2434 struct net_device *dev = nic->dev;
2435 struct sk_buff *skb;
1ee6dd77 2436 struct TxD *txdp;
1da177e4 2437 int i, j;
1ee6dd77 2438 struct mac_info *mac_control;
1da177e4 2439 struct config_param *config;
fed5eccd 2440 int cnt = 0;
1da177e4
LT
2441
2442 mac_control = &nic->mac_control;
2443 config = &nic->config;
2444
2445 for (i = 0; i < config->tx_fifo_num; i++) {
2fda096d
SR
2446 unsigned long flags;
2447 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
b35b3b49 2448 for (j = 0; j < config->tx_cfg[i].fifo_len; j++) {
491976b2
SH
2449 txdp = (struct TxD *) \
2450 mac_control->fifos[i].list_info[j].list_virt_addr;
fed5eccd
AR
2451 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2452 if (skb) {
8a4bdbaa 2453 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 2454 += skb->truesize;
fed5eccd
AR
2455 dev_kfree_skb(skb);
2456 cnt++;
1da177e4 2457 }
1da177e4
LT
2458 }
2459 DBG_PRINT(INTR_DBG,
2460 "%s:forcibly freeing %d skbs on FIFO%d\n",
2461 dev->name, cnt, i);
20346722
K
2462 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2463 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2fda096d 2464 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
1da177e4
LT
2465 }
2466}
2467
20346722
K
2468/**
2469 * stop_nic - To stop the nic
1da177e4 2470 * @nic ; device private variable.
20346722
K
2471 * Description:
2472 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2473 * function does. This function is called to stop the device.
2474 * Return Value:
2475 * void.
2476 */
2477
2478static void stop_nic(struct s2io_nic *nic)
2479{
1ee6dd77 2480 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2481 register u64 val64 = 0;
5d3213cc 2482 u16 interruptible;
1ee6dd77 2483 struct mac_info *mac_control;
1da177e4
LT
2484 struct config_param *config;
2485
2486 mac_control = &nic->mac_control;
2487 config = &nic->config;
2488
2489 /* Disable all interrupts */
9caab458 2490 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2491 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2492 interruptible |= TX_PIC_INTR;
1da177e4
LT
2493 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2494
5d3213cc
AR
2495 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2496 val64 = readq(&bar0->adapter_control);
2497 val64 &= ~(ADAPTER_CNTL_EN);
2498 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2499}
2500
20346722
K
2501/**
2502 * fill_rx_buffers - Allocates the Rx side skbs
0425b46a 2503 * @ring_info: per ring structure
3f78d885
SH
2504 * @from_card_up: If this is true, we will map the buffer to get
2505 * the dma address for buf0 and buf1 to give it to the card.
2506 * Else we will sync the already mapped buffer to give it to the card.
20346722 2507 * Description:
1da177e4
LT
2508 * The function allocates Rx side skbs and puts the physical
2509 * address of these buffers into the RxD buffer pointers, so that the NIC
2510 * can DMA the received frame into these locations.
2511 * The NIC supports 3 receive modes, viz
2512 * 1. single buffer,
2513 * 2. three buffer and
2514 * 3. Five buffer modes.
20346722
K
2515 * Each mode defines how many fragments the received frame will be split
2516 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2517 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2518 * is split into 3 fragments. As of now only single buffer mode is
2519 * supported.
2520 * Return Value:
2521 * SUCCESS on success or an appropriate -ve value on failure.
2522 */
2523
3f78d885 2524static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
1da177e4 2525{
1da177e4 2526 struct sk_buff *skb;
1ee6dd77 2527 struct RxD_t *rxdp;
0425b46a 2528 int off, size, block_no, block_no1;
1da177e4 2529 u32 alloc_tab = 0;
20346722 2530 u32 alloc_cnt;
20346722 2531 u64 tmp;
1ee6dd77 2532 struct buffAdd *ba;
1ee6dd77 2533 struct RxD_t *first_rxdp = NULL;
363dc367 2534 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
0425b46a 2535 int rxd_index = 0;
6d517a27
VP
2536 struct RxD1 *rxdp1;
2537 struct RxD3 *rxdp3;
0425b46a 2538 struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
1da177e4 2539
0425b46a 2540 alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
1da177e4 2541
0425b46a 2542 block_no1 = ring->rx_curr_get_info.block_index;
1da177e4 2543 while (alloc_tab < alloc_cnt) {
0425b46a 2544 block_no = ring->rx_curr_put_info.block_index;
1da177e4 2545
0425b46a
SH
2546 off = ring->rx_curr_put_info.offset;
2547
2548 rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
2549
2550 rxd_index = off + 1;
2551 if (block_no)
2552 rxd_index += (block_no * ring->rxd_count);
da6971d8 2553
7d2e3cb7 2554 if ((block_no == block_no1) &&
0425b46a
SH
2555 (off == ring->rx_curr_get_info.offset) &&
2556 (rxdp->Host_Control)) {
da6971d8 2557 DBG_PRINT(INTR_DBG, "%s: Get and Put",
0425b46a 2558 ring->dev->name);
1da177e4
LT
2559 DBG_PRINT(INTR_DBG, " info equated\n");
2560 goto end;
2561 }
0425b46a
SH
2562 if (off && (off == ring->rxd_count)) {
2563 ring->rx_curr_put_info.block_index++;
2564 if (ring->rx_curr_put_info.block_index ==
2565 ring->block_count)
2566 ring->rx_curr_put_info.block_index = 0;
2567 block_no = ring->rx_curr_put_info.block_index;
2568 off = 0;
2569 ring->rx_curr_put_info.offset = off;
2570 rxdp = ring->rx_blocks[block_no].block_virt_addr;
1da177e4 2571 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
0425b46a
SH
2572 ring->dev->name, rxdp);
2573
1da177e4 2574 }
c9fcbf47 2575
da6971d8 2576 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
0425b46a 2577 ((ring->rxd_mode == RXD_MODE_3B) &&
b7b5a128 2578 (rxdp->Control_2 & s2BIT(0)))) {
0425b46a 2579 ring->rx_curr_put_info.offset = off;
1da177e4
LT
2580 goto end;
2581 }
da6971d8 2582 /* calculate size of skb based on ring mode */
0425b46a 2583 size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
da6971d8 2584 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
0425b46a 2585 if (ring->rxd_mode == RXD_MODE_1)
da6971d8 2586 size += NET_IP_ALIGN;
da6971d8 2587 else
0425b46a 2588 size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2589
da6971d8
AR
2590 /* allocate skb */
2591 skb = dev_alloc_skb(size);
2592 if(!skb) {
0425b46a 2593 DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
0c61ed5f 2594 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2595 if (first_rxdp) {
2596 wmb();
2597 first_rxdp->Control_1 |= RXD_OWN_XENA;
2598 }
0425b46a 2599 stats->mem_alloc_fail_cnt++;
7d2e3cb7 2600
da6971d8
AR
2601 return -ENOMEM ;
2602 }
0425b46a
SH
2603 stats->mem_allocated += skb->truesize;
2604
2605 if (ring->rxd_mode == RXD_MODE_1) {
da6971d8 2606 /* 1 buffer mode - normal operation mode */
6d517a27 2607 rxdp1 = (struct RxD1*)rxdp;
1ee6dd77 2608 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2609 skb_reserve(skb, NET_IP_ALIGN);
6d517a27 2610 rxdp1->Buffer0_ptr = pci_map_single
0425b46a 2611 (ring->pdev, skb->data, size - NET_IP_ALIGN,
863c11a9 2612 PCI_DMA_FROMDEVICE);
64c42f69 2613 if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
491abf25
VP
2614 goto pci_map_failed;
2615
8a4bdbaa 2616 rxdp->Control_2 =
491976b2 2617 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
0425b46a
SH
2618 rxdp->Host_Control = (unsigned long) (skb);
2619 } else if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8 2620 /*
6d517a27
VP
2621 * 2 buffer mode -
2622 * 2 buffer mode provides 128
da6971d8 2623 * byte aligned receive buffers.
da6971d8
AR
2624 */
2625
6d517a27 2626 rxdp3 = (struct RxD3*)rxdp;
491976b2 2627 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2628 Buffer0_ptr = rxdp3->Buffer0_ptr;
2629 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2630 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2631 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2632 rxdp3->Buffer0_ptr = Buffer0_ptr;
2633 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2634
0425b46a 2635 ba = &ring->ba[block_no][off];
da6971d8
AR
2636 skb_reserve(skb, BUF0_LEN);
2637 tmp = (u64)(unsigned long) skb->data;
2638 tmp += ALIGN_SIZE;
2639 tmp &= ~ALIGN_SIZE;
2640 skb->data = (void *) (unsigned long)tmp;
27a884dc 2641 skb_reset_tail_pointer(skb);
da6971d8 2642
3f78d885 2643 if (from_card_up) {
6d517a27 2644 rxdp3->Buffer0_ptr =
0425b46a
SH
2645 pci_map_single(ring->pdev, ba->ba_0,
2646 BUF0_LEN, PCI_DMA_FROMDEVICE);
3f78d885
SH
2647 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
2648 goto pci_map_failed;
2649 } else
0425b46a 2650 pci_dma_sync_single_for_device(ring->pdev,
6d517a27 2651 (dma_addr_t) rxdp3->Buffer0_ptr,
75c30b13 2652 BUF0_LEN, PCI_DMA_FROMDEVICE);
491abf25 2653
da6971d8 2654 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
0425b46a 2655 if (ring->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
2656 /* Two buffer mode */
2657
2658 /*
6aa20a22 2659 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2660 * L4 payload
2661 */
6d517a27 2662 rxdp3->Buffer2_ptr = pci_map_single
0425b46a 2663 (ring->pdev, skb->data, ring->mtu + 4,
da6971d8
AR
2664 PCI_DMA_FROMDEVICE);
2665
64c42f69 2666 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
491abf25
VP
2667 goto pci_map_failed;
2668
3f78d885 2669 if (from_card_up) {
0425b46a
SH
2670 rxdp3->Buffer1_ptr =
2671 pci_map_single(ring->pdev,
75c30b13
AR
2672 ba->ba_1, BUF1_LEN,
2673 PCI_DMA_FROMDEVICE);
0425b46a 2674
3f78d885
SH
2675 if (pci_dma_mapping_error
2676 (rxdp3->Buffer1_ptr)) {
2677 pci_unmap_single
2678 (ring->pdev,
2679 (dma_addr_t)(unsigned long)
2680 skb->data,
2681 ring->mtu + 4,
2682 PCI_DMA_FROMDEVICE);
2683 goto pci_map_failed;
2684 }
75c30b13 2685 }
da6971d8
AR
2686 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2687 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
0425b46a 2688 (ring->mtu + 4);
da6971d8 2689 }
b7b5a128 2690 rxdp->Control_2 |= s2BIT(0);
0425b46a 2691 rxdp->Host_Control = (unsigned long) (skb);
1da177e4 2692 }
303bcb4b
K
2693 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2694 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2695 off++;
0425b46a 2696 if (off == (ring->rxd_count + 1))
da6971d8 2697 off = 0;
0425b46a 2698 ring->rx_curr_put_info.offset = off;
20346722 2699
da6971d8 2700 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2701 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2702 if (first_rxdp) {
2703 wmb();
2704 first_rxdp->Control_1 |= RXD_OWN_XENA;
2705 }
2706 first_rxdp = rxdp;
2707 }
0425b46a 2708 ring->rx_bufs_left += 1;
1da177e4
LT
2709 alloc_tab++;
2710 }
2711
2712 end:
303bcb4b
K
2713 /* Transfer ownership of first descriptor to adapter just before
2714 * exiting. Before that, use memory barrier so that ownership
2715 * and other fields are seen by adapter correctly.
2716 */
2717 if (first_rxdp) {
2718 wmb();
2719 first_rxdp->Control_1 |= RXD_OWN_XENA;
2720 }
2721
1da177e4 2722 return SUCCESS;
491abf25
VP
2723pci_map_failed:
2724 stats->pci_map_fail_cnt++;
2725 stats->mem_freed += skb->truesize;
2726 dev_kfree_skb_irq(skb);
2727 return -ENOMEM;
1da177e4
LT
2728}
2729
da6971d8
AR
2730static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2731{
2732 struct net_device *dev = sp->dev;
2733 int j;
2734 struct sk_buff *skb;
1ee6dd77
RB
2735 struct RxD_t *rxdp;
2736 struct mac_info *mac_control;
2737 struct buffAdd *ba;
6d517a27
VP
2738 struct RxD1 *rxdp1;
2739 struct RxD3 *rxdp3;
da6971d8
AR
2740
2741 mac_control = &sp->mac_control;
2742 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2743 rxdp = mac_control->rings[ring_no].
2744 rx_blocks[blk].rxds[j].virt_addr;
2745 skb = (struct sk_buff *)
2746 ((unsigned long) rxdp->Host_Control);
2747 if (!skb) {
2748 continue;
2749 }
2750 if (sp->rxd_mode == RXD_MODE_1) {
6d517a27 2751 rxdp1 = (struct RxD1*)rxdp;
da6971d8 2752 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2753 rxdp1->Buffer0_ptr,
2754 dev->mtu +
2755 HEADER_ETHERNET_II_802_3_SIZE
2756 + HEADER_802_2_SIZE +
2757 HEADER_SNAP_SIZE,
2758 PCI_DMA_FROMDEVICE);
1ee6dd77 2759 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2760 } else if(sp->rxd_mode == RXD_MODE_3B) {
6d517a27 2761 rxdp3 = (struct RxD3*)rxdp;
da6971d8
AR
2762 ba = &mac_control->rings[ring_no].
2763 ba[blk][j];
2764 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2765 rxdp3->Buffer0_ptr,
2766 BUF0_LEN,
da6971d8
AR
2767 PCI_DMA_FROMDEVICE);
2768 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2769 rxdp3->Buffer1_ptr,
2770 BUF1_LEN,
da6971d8
AR
2771 PCI_DMA_FROMDEVICE);
2772 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2773 rxdp3->Buffer2_ptr,
2774 dev->mtu + 4,
da6971d8 2775 PCI_DMA_FROMDEVICE);
1ee6dd77 2776 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2777 }
491976b2 2778 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
da6971d8 2779 dev_kfree_skb(skb);
0425b46a 2780 mac_control->rings[ring_no].rx_bufs_left -= 1;
da6971d8
AR
2781 }
2782}
2783
1da177e4 2784/**
20346722 2785 * free_rx_buffers - Frees all Rx buffers
1da177e4 2786 * @sp: device private variable.
20346722 2787 * Description:
1da177e4
LT
2788 * This function will free all Rx buffers allocated by host.
2789 * Return Value:
2790 * NONE.
2791 */
2792
2793static void free_rx_buffers(struct s2io_nic *sp)
2794{
2795 struct net_device *dev = sp->dev;
da6971d8 2796 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2797 struct mac_info *mac_control;
1da177e4 2798 struct config_param *config;
1da177e4
LT
2799
2800 mac_control = &sp->mac_control;
2801 config = &sp->config;
2802
2803 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2804 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2805 free_rxd_blk(sp,i,blk);
1da177e4 2806
20346722
K
2807 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2808 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2809 mac_control->rings[i].rx_curr_put_info.offset = 0;
2810 mac_control->rings[i].rx_curr_get_info.offset = 0;
0425b46a 2811 mac_control->rings[i].rx_bufs_left = 0;
1da177e4
LT
2812 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2813 dev->name, buf_cnt, i);
2814 }
2815}
2816
f61e0a35
SH
2817static int s2io_chk_rx_buffers(struct ring_info *ring)
2818{
3f78d885 2819 if (fill_rx_buffers(ring, 0) == -ENOMEM) {
f61e0a35
SH
2820 DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
2821 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
2822 }
2823 return 0;
2824}
2825
1da177e4
LT
2826/**
2827 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2828 * @napi : pointer to the napi structure.
20346722 2829 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2830 * during one pass through the 'Poll" function.
2831 * Description:
2832 * Comes into picture only if NAPI support has been incorporated. It does
2833 * the same thing that rx_intr_handler does, but not in a interrupt context
2834 * also It will process only a given number of packets.
2835 * Return value:
2836 * 0 on success and 1 if there are No Rx packets to be processed.
2837 */
2838
f61e0a35 2839static int s2io_poll_msix(struct napi_struct *napi, int budget)
1da177e4 2840{
f61e0a35
SH
2841 struct ring_info *ring = container_of(napi, struct ring_info, napi);
2842 struct net_device *dev = ring->dev;
1da177e4 2843 struct config_param *config;
f61e0a35
SH
2844 struct mac_info *mac_control;
2845 int pkts_processed = 0;
1a79d1c3
AV
2846 u8 __iomem *addr = NULL;
2847 u8 val8 = 0;
f61e0a35 2848 struct s2io_nic *nic = dev->priv;
1ee6dd77 2849 struct XENA_dev_config __iomem *bar0 = nic->bar0;
f61e0a35 2850 int budget_org = budget;
1da177e4 2851
1da177e4 2852 config = &nic->config;
f61e0a35 2853 mac_control = &nic->mac_control;
1da177e4 2854
f61e0a35
SH
2855 if (unlikely(!is_s2io_card_up(nic)))
2856 return 0;
1da177e4 2857
f61e0a35
SH
2858 pkts_processed = rx_intr_handler(ring, budget);
2859 s2io_chk_rx_buffers(ring);
1da177e4 2860
f61e0a35
SH
2861 if (pkts_processed < budget_org) {
2862 netif_rx_complete(dev, napi);
2863 /*Re Enable MSI-Rx Vector*/
1a79d1c3 2864 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
2865 addr += 7 - ring->ring_no;
2866 val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
2867 writeb(val8, addr);
2868 val8 = readb(addr);
1da177e4 2869 }
f61e0a35
SH
2870 return pkts_processed;
2871}
2872static int s2io_poll_inta(struct napi_struct *napi, int budget)
2873{
2874 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2875 struct ring_info *ring;
2876 struct net_device *dev = nic->dev;
2877 struct config_param *config;
2878 struct mac_info *mac_control;
2879 int pkts_processed = 0;
2880 int ring_pkts_processed, i;
2881 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2882 int budget_org = budget;
1da177e4 2883
f61e0a35
SH
2884 config = &nic->config;
2885 mac_control = &nic->mac_control;
1da177e4 2886
f61e0a35
SH
2887 if (unlikely(!is_s2io_card_up(nic)))
2888 return 0;
1da177e4 2889
1da177e4 2890 for (i = 0; i < config->rx_ring_num; i++) {
f61e0a35
SH
2891 ring = &mac_control->rings[i];
2892 ring_pkts_processed = rx_intr_handler(ring, budget);
2893 s2io_chk_rx_buffers(ring);
2894 pkts_processed += ring_pkts_processed;
2895 budget -= ring_pkts_processed;
2896 if (budget <= 0)
1da177e4 2897 break;
1da177e4 2898 }
f61e0a35
SH
2899 if (pkts_processed < budget_org) {
2900 netif_rx_complete(dev, napi);
2901 /* Re enable the Rx interrupts for the ring */
2902 writeq(0, &bar0->rx_traffic_mask);
2903 readl(&bar0->rx_traffic_mask);
2904 }
2905 return pkts_processed;
1da177e4 2906}
20346722 2907
b41477f3 2908#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2909/**
b41477f3 2910 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2911 * @dev : pointer to the device structure.
2912 * Description:
b41477f3
AR
2913 * This function will be called by upper layer to check for events on the
2914 * interface in situations where interrupts are disabled. It is used for
2915 * specific in-kernel networking tasks, such as remote consoles and kernel
2916 * debugging over the network (example netdump in RedHat).
612eff0e 2917 */
612eff0e
BH
2918static void s2io_netpoll(struct net_device *dev)
2919{
1ee6dd77
RB
2920 struct s2io_nic *nic = dev->priv;
2921 struct mac_info *mac_control;
612eff0e 2922 struct config_param *config;
1ee6dd77 2923 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2924 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2925 int i;
2926
d796fdb7
LV
2927 if (pci_channel_offline(nic->pdev))
2928 return;
2929
612eff0e
BH
2930 disable_irq(dev->irq);
2931
612eff0e
BH
2932 mac_control = &nic->mac_control;
2933 config = &nic->config;
2934
612eff0e 2935 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2936 writeq(val64, &bar0->tx_traffic_int);
2937
6aa20a22 2938 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2939 * run out of skbs and will fail and eventually netpoll application such
2940 * as netdump will fail.
2941 */
2942 for (i = 0; i < config->tx_fifo_num; i++)
2943 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2944
b41477f3 2945 /* check for received packet and indicate up to network */
612eff0e 2946 for (i = 0; i < config->rx_ring_num; i++)
f61e0a35 2947 rx_intr_handler(&mac_control->rings[i], 0);
612eff0e
BH
2948
2949 for (i = 0; i < config->rx_ring_num; i++) {
3f78d885 2950 if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
0c61ed5f
RV
2951 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2952 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
612eff0e
BH
2953 break;
2954 }
2955 }
612eff0e
BH
2956 enable_irq(dev->irq);
2957 return;
2958}
2959#endif
2960
20346722 2961/**
1da177e4 2962 * rx_intr_handler - Rx interrupt handler
f61e0a35
SH
2963 * @ring_info: per ring structure.
2964 * @budget: budget for napi processing.
20346722
K
2965 * Description:
2966 * If the interrupt is because of a received frame or if the
1da177e4 2967 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2968 * called. It picks out the RxD at which place the last Rx processing had
2969 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2970 * the offset.
2971 * Return Value:
f61e0a35 2972 * No. of napi packets processed.
1da177e4 2973 */
f61e0a35 2974static int rx_intr_handler(struct ring_info *ring_data, int budget)
1da177e4 2975{
c9fcbf47 2976 int get_block, put_block;
1ee6dd77
RB
2977 struct rx_curr_get_info get_info, put_info;
2978 struct RxD_t *rxdp;
1da177e4 2979 struct sk_buff *skb;
f61e0a35 2980 int pkt_cnt = 0, napi_pkts = 0;
7d3d0439 2981 int i;
6d517a27
VP
2982 struct RxD1* rxdp1;
2983 struct RxD3* rxdp3;
7d3d0439 2984
20346722
K
2985 get_info = ring_data->rx_curr_get_info;
2986 get_block = get_info.block_index;
1ee6dd77 2987 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2988 put_block = put_info.block_index;
da6971d8 2989 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65 2990
da6971d8 2991 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2992 /*
2993 * If your are next to put index then it's
2994 * FIFO full condition
2995 */
da6971d8
AR
2996 if ((get_block == put_block) &&
2997 (get_info.offset + 1) == put_info.offset) {
0425b46a
SH
2998 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
2999 ring_data->dev->name);
da6971d8
AR
3000 break;
3001 }
20346722
K
3002 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
3003 if (skb == NULL) {
3004 DBG_PRINT(ERR_DBG, "%s: The skb is ",
0425b46a 3005 ring_data->dev->name);
20346722 3006 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
f61e0a35 3007 return 0;
1da177e4 3008 }
0425b46a 3009 if (ring_data->rxd_mode == RXD_MODE_1) {
6d517a27 3010 rxdp1 = (struct RxD1*)rxdp;
0425b46a 3011 pci_unmap_single(ring_data->pdev, (dma_addr_t)
6d517a27 3012 rxdp1->Buffer0_ptr,
0425b46a 3013 ring_data->mtu +
6d517a27
VP
3014 HEADER_ETHERNET_II_802_3_SIZE +
3015 HEADER_802_2_SIZE +
3016 HEADER_SNAP_SIZE,
3017 PCI_DMA_FROMDEVICE);
0425b46a 3018 } else if (ring_data->rxd_mode == RXD_MODE_3B) {
6d517a27 3019 rxdp3 = (struct RxD3*)rxdp;
0425b46a 3020 pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
6d517a27
VP
3021 rxdp3->Buffer0_ptr,
3022 BUF0_LEN, PCI_DMA_FROMDEVICE);
0425b46a 3023 pci_unmap_single(ring_data->pdev, (dma_addr_t)
6d517a27 3024 rxdp3->Buffer2_ptr,
0425b46a 3025 ring_data->mtu + 4,
6d517a27 3026 PCI_DMA_FROMDEVICE);
da6971d8 3027 }
863c11a9 3028 prefetch(skb->data);
20346722
K
3029 rx_osm_handler(ring_data, rxdp);
3030 get_info.offset++;
da6971d8
AR
3031 ring_data->rx_curr_get_info.offset = get_info.offset;
3032 rxdp = ring_data->rx_blocks[get_block].
3033 rxds[get_info.offset].virt_addr;
0425b46a 3034 if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
20346722 3035 get_info.offset = 0;
da6971d8 3036 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 3037 get_block++;
da6971d8
AR
3038 if (get_block == ring_data->block_count)
3039 get_block = 0;
3040 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
3041 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3042 }
1da177e4 3043
f61e0a35
SH
3044 if (ring_data->nic->config.napi) {
3045 budget--;
3046 napi_pkts++;
3047 if (!budget)
0425b46a
SH
3048 break;
3049 }
20346722 3050 pkt_cnt++;
1da177e4
LT
3051 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3052 break;
3053 }
0425b46a 3054 if (ring_data->lro) {
7d3d0439
RA
3055 /* Clear all LRO sessions before exiting */
3056 for (i=0; i<MAX_LRO_SESSIONS; i++) {
0425b46a 3057 struct lro *lro = &ring_data->lro0_n[i];
7d3d0439 3058 if (lro->in_use) {
0425b46a 3059 update_L3L4_header(ring_data->nic, lro);
cdb5bf02 3060 queue_rx_frame(lro->parent, lro->vlan_tag);
7d3d0439
RA
3061 clear_lro_session(lro);
3062 }
3063 }
3064 }
f61e0a35 3065 return(napi_pkts);
1da177e4 3066}
20346722
K
3067
3068/**
1da177e4
LT
3069 * tx_intr_handler - Transmit interrupt handler
3070 * @nic : device private variable
20346722
K
3071 * Description:
3072 * If an interrupt was raised to indicate DMA complete of the
3073 * Tx packet, this function is called. It identifies the last TxD
3074 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3075 * DMA'ed into the NICs internal memory.
3076 * Return Value:
3077 * NONE
3078 */
3079
1ee6dd77 3080static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3081{
1ee6dd77 3082 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3083 struct tx_curr_get_info get_info, put_info;
3a3d5756 3084 struct sk_buff *skb = NULL;
1ee6dd77 3085 struct TxD *txdlp;
3a3d5756 3086 int pkt_cnt = 0;
2fda096d 3087 unsigned long flags = 0;
f9046eb3 3088 u8 err_mask;
1da177e4 3089
2fda096d
SR
3090 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3091 return;
3092
20346722 3093 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
3094 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3095 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722
K
3096 list_virt_addr;
3097 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3098 (get_info.offset != put_info.offset) &&
3099 (txdlp->Host_Control)) {
3100 /* Check for TxD errors */
3101 if (txdlp->Control_1 & TXD_T_CODE) {
3102 unsigned long long err;
3103 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
3104 if (err & 0x1) {
3105 nic->mac_control.stats_info->sw_stat.
3106 parity_err_cnt++;
3107 }
491976b2
SH
3108
3109 /* update t_code statistics */
f9046eb3
OH
3110 err_mask = err >> 48;
3111 switch(err_mask) {
491976b2
SH
3112 case 2:
3113 nic->mac_control.stats_info->sw_stat.
3114 tx_buf_abort_cnt++;
3115 break;
3116
3117 case 3:
3118 nic->mac_control.stats_info->sw_stat.
3119 tx_desc_abort_cnt++;
3120 break;
3121
3122 case 7:
3123 nic->mac_control.stats_info->sw_stat.
3124 tx_parity_err_cnt++;
3125 break;
3126
3127 case 10:
3128 nic->mac_control.stats_info->sw_stat.
3129 tx_link_loss_cnt++;
3130 break;
3131
3132 case 15:
3133 nic->mac_control.stats_info->sw_stat.
3134 tx_list_proc_err_cnt++;
3135 break;
3136 }
20346722 3137 }
1da177e4 3138
fed5eccd 3139 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3140 if (skb == NULL) {
2fda096d 3141 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
20346722
K
3142 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3143 __FUNCTION__);
3144 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3145 return;
3146 }
3a3d5756 3147 pkt_cnt++;
20346722 3148
20346722 3149 /* Updating the statistics block */
20346722 3150 nic->stats.tx_bytes += skb->len;
491976b2 3151 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
20346722
K
3152 dev_kfree_skb_irq(skb);
3153
3154 get_info.offset++;
863c11a9
AR
3155 if (get_info.offset == get_info.fifo_len + 1)
3156 get_info.offset = 0;
1ee6dd77 3157 txdlp = (struct TxD *) fifo_data->list_info
20346722
K
3158 [get_info.offset].list_virt_addr;
3159 fifo_data->tx_curr_get_info.offset =
3160 get_info.offset;
1da177e4
LT
3161 }
3162
3a3d5756 3163 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3164
3165 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3166}
3167
bd1034f0
AR
3168/**
3169 * s2io_mdio_write - Function to write in to MDIO registers
3170 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3171 * @addr : address value
3172 * @value : data value
3173 * @dev : pointer to net_device structure
3174 * Description:
3175 * This function is used to write values to the MDIO registers
3176 * NONE
3177 */
3178static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3179{
3180 u64 val64 = 0x0;
1ee6dd77
RB
3181 struct s2io_nic *sp = dev->priv;
3182 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3183
3184 //address transaction
3185 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3186 | MDIO_MMD_DEV_ADDR(mmd_type)
3187 | MDIO_MMS_PRT_ADDR(0x0);
3188 writeq(val64, &bar0->mdio_control);
3189 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3190 writeq(val64, &bar0->mdio_control);
3191 udelay(100);
3192
3193 //Data transaction
3194 val64 = 0x0;
3195 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3196 | MDIO_MMD_DEV_ADDR(mmd_type)
3197 | MDIO_MMS_PRT_ADDR(0x0)
3198 | MDIO_MDIO_DATA(value)
3199 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3200 writeq(val64, &bar0->mdio_control);
3201 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3202 writeq(val64, &bar0->mdio_control);
3203 udelay(100);
3204
3205 val64 = 0x0;
3206 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3207 | MDIO_MMD_DEV_ADDR(mmd_type)
3208 | MDIO_MMS_PRT_ADDR(0x0)
3209 | MDIO_OP(MDIO_OP_READ_TRANS);
3210 writeq(val64, &bar0->mdio_control);
3211 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3212 writeq(val64, &bar0->mdio_control);
3213 udelay(100);
3214
3215}
3216
3217/**
3218 * s2io_mdio_read - Function to write in to MDIO registers
3219 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3220 * @addr : address value
3221 * @dev : pointer to net_device structure
3222 * Description:
3223 * This function is used to read values to the MDIO registers
3224 * NONE
3225 */
3226static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3227{
3228 u64 val64 = 0x0;
3229 u64 rval64 = 0x0;
1ee6dd77
RB
3230 struct s2io_nic *sp = dev->priv;
3231 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3232
3233 /* address transaction */
3234 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3235 | MDIO_MMD_DEV_ADDR(mmd_type)
3236 | MDIO_MMS_PRT_ADDR(0x0);
3237 writeq(val64, &bar0->mdio_control);
3238 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3239 writeq(val64, &bar0->mdio_control);
3240 udelay(100);
3241
3242 /* Data transaction */
3243 val64 = 0x0;
3244 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3245 | MDIO_MMD_DEV_ADDR(mmd_type)
3246 | MDIO_MMS_PRT_ADDR(0x0)
3247 | MDIO_OP(MDIO_OP_READ_TRANS);
3248 writeq(val64, &bar0->mdio_control);
3249 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3250 writeq(val64, &bar0->mdio_control);
3251 udelay(100);
3252
3253 /* Read the value from regs */
3254 rval64 = readq(&bar0->mdio_control);
3255 rval64 = rval64 & 0xFFFF0000;
3256 rval64 = rval64 >> 16;
3257 return rval64;
3258}
3259/**
3260 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3261 * @counter : couter value to be updated
3262 * @flag : flag to indicate the status
3263 * @type : counter type
3264 * Description:
3265 * This function is to check the status of the xpak counters value
3266 * NONE
3267 */
3268
3269static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3270{
3271 u64 mask = 0x3;
3272 u64 val64;
3273 int i;
3274 for(i = 0; i <index; i++)
3275 mask = mask << 0x2;
3276
3277 if(flag > 0)
3278 {
3279 *counter = *counter + 1;
3280 val64 = *regs_stat & mask;
3281 val64 = val64 >> (index * 0x2);
3282 val64 = val64 + 1;
3283 if(val64 == 3)
3284 {
3285 switch(type)
3286 {
3287 case 1:
3288 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3289 "service. Excessive temperatures may "
3290 "result in premature transceiver "
3291 "failure \n");
3292 break;
3293 case 2:
3294 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3295 "service Excessive bias currents may "
3296 "indicate imminent laser diode "
3297 "failure \n");
3298 break;
3299 case 3:
3300 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3301 "service Excessive laser output "
3302 "power may saturate far-end "
3303 "receiver\n");
3304 break;
3305 default:
3306 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3307 "type \n");
3308 }
3309 val64 = 0x0;
3310 }
3311 val64 = val64 << (index * 0x2);
3312 *regs_stat = (*regs_stat & (~mask)) | (val64);
3313
3314 } else {
3315 *regs_stat = *regs_stat & (~mask);
3316 }
3317}
3318
3319/**
3320 * s2io_updt_xpak_counter - Function to update the xpak counters
3321 * @dev : pointer to net_device struct
3322 * Description:
3323 * This function is to upate the status of the xpak counters value
3324 * NONE
3325 */
3326static void s2io_updt_xpak_counter(struct net_device *dev)
3327{
3328 u16 flag = 0x0;
3329 u16 type = 0x0;
3330 u16 val16 = 0x0;
3331 u64 val64 = 0x0;
3332 u64 addr = 0x0;
3333
1ee6dd77
RB
3334 struct s2io_nic *sp = dev->priv;
3335 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
3336
3337 /* Check the communication with the MDIO slave */
3338 addr = 0x0000;
3339 val64 = 0x0;
3340 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3341 if((val64 == 0xFFFF) || (val64 == 0x0000))
3342 {
3343 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3344 "Returned %llx\n", (unsigned long long)val64);
3345 return;
3346 }
3347
3348 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3349 if(val64 != 0x2040)
3350 {
3351 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3352 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3353 (unsigned long long)val64);
3354 return;
3355 }
3356
3357 /* Loading the DOM register to MDIO register */
3358 addr = 0xA100;
3359 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3360 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3361
3362 /* Reading the Alarm flags */
3363 addr = 0xA070;
3364 val64 = 0x0;
3365 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3366
3367 flag = CHECKBIT(val64, 0x7);
3368 type = 1;
3369 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3370 &stat_info->xpak_stat.xpak_regs_stat,
3371 0x0, flag, type);
3372
3373 if(CHECKBIT(val64, 0x6))
3374 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3375
3376 flag = CHECKBIT(val64, 0x3);
3377 type = 2;
3378 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3379 &stat_info->xpak_stat.xpak_regs_stat,
3380 0x2, flag, type);
3381
3382 if(CHECKBIT(val64, 0x2))
3383 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3384
3385 flag = CHECKBIT(val64, 0x1);
3386 type = 3;
3387 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3388 &stat_info->xpak_stat.xpak_regs_stat,
3389 0x4, flag, type);
3390
3391 if(CHECKBIT(val64, 0x0))
3392 stat_info->xpak_stat.alarm_laser_output_power_low++;
3393
3394 /* Reading the Warning flags */
3395 addr = 0xA074;
3396 val64 = 0x0;
3397 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3398
3399 if(CHECKBIT(val64, 0x7))
3400 stat_info->xpak_stat.warn_transceiver_temp_high++;
3401
3402 if(CHECKBIT(val64, 0x6))
3403 stat_info->xpak_stat.warn_transceiver_temp_low++;
3404
3405 if(CHECKBIT(val64, 0x3))
3406 stat_info->xpak_stat.warn_laser_bias_current_high++;
3407
3408 if(CHECKBIT(val64, 0x2))
3409 stat_info->xpak_stat.warn_laser_bias_current_low++;
3410
3411 if(CHECKBIT(val64, 0x1))
3412 stat_info->xpak_stat.warn_laser_output_power_high++;
3413
3414 if(CHECKBIT(val64, 0x0))
3415 stat_info->xpak_stat.warn_laser_output_power_low++;
3416}
3417
20346722 3418/**
1da177e4 3419 * wait_for_cmd_complete - waits for a command to complete.
20346722 3420 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3421 * s2io_nic structure.
20346722
K
3422 * Description: Function that waits for a command to Write into RMAC
3423 * ADDR DATA registers to be completed and returns either success or
3424 * error depending on whether the command was complete or not.
1da177e4
LT
3425 * Return value:
3426 * SUCCESS on success and FAILURE on failure.
3427 */
3428
9fc93a41
SS
3429static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3430 int bit_state)
1da177e4 3431{
9fc93a41 3432 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3433 u64 val64;
3434
9fc93a41
SS
3435 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3436 return FAILURE;
3437
3438 do {
c92ca04b 3439 val64 = readq(addr);
9fc93a41
SS
3440 if (bit_state == S2IO_BIT_RESET) {
3441 if (!(val64 & busy_bit)) {
3442 ret = SUCCESS;
3443 break;
3444 }
3445 } else {
3446 if (!(val64 & busy_bit)) {
3447 ret = SUCCESS;
3448 break;
3449 }
1da177e4 3450 }
c92ca04b
AR
3451
3452 if(in_interrupt())
9fc93a41 3453 mdelay(delay);
c92ca04b 3454 else
9fc93a41 3455 msleep(delay);
c92ca04b 3456
9fc93a41
SS
3457 if (++cnt >= 10)
3458 delay = 50;
3459 } while (cnt < 20);
1da177e4
LT
3460 return ret;
3461}
19a60522
SS
3462/*
3463 * check_pci_device_id - Checks if the device id is supported
3464 * @id : device id
3465 * Description: Function to check if the pci device id is supported by driver.
3466 * Return value: Actual device id if supported else PCI_ANY_ID
3467 */
3468static u16 check_pci_device_id(u16 id)
3469{
3470 switch (id) {
3471 case PCI_DEVICE_ID_HERC_WIN:
3472 case PCI_DEVICE_ID_HERC_UNI:
3473 return XFRAME_II_DEVICE;
3474 case PCI_DEVICE_ID_S2IO_UNI:
3475 case PCI_DEVICE_ID_S2IO_WIN:
3476 return XFRAME_I_DEVICE;
3477 default:
3478 return PCI_ANY_ID;
3479 }
3480}
1da177e4 3481
20346722
K
3482/**
3483 * s2io_reset - Resets the card.
1da177e4
LT
3484 * @sp : private member of the device structure.
3485 * Description: Function to Reset the card. This function then also
20346722 3486 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3487 * the card reset also resets the configuration space.
3488 * Return value:
3489 * void.
3490 */
3491
1ee6dd77 3492static void s2io_reset(struct s2io_nic * sp)
1da177e4 3493{
1ee6dd77 3494 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3495 u64 val64;
5e25b9dd 3496 u16 subid, pci_cmd;
19a60522
SS
3497 int i;
3498 u16 val16;
491976b2
SH
3499 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3500 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3501
19a60522
SS
3502 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3503 __FUNCTION__, sp->dev->name);
1da177e4 3504
0b1f7ebe 3505 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3506 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3507
1da177e4
LT
3508 val64 = SW_RESET_ALL;
3509 writeq(val64, &bar0->sw_reset);
c92ca04b
AR
3510 if (strstr(sp->product_name, "CX4")) {
3511 msleep(750);
3512 }
19a60522
SS
3513 msleep(250);
3514 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3515
19a60522
SS
3516 /* Restore the PCI state saved during initialization. */
3517 pci_restore_state(sp->pdev);
3518 pci_read_config_word(sp->pdev, 0x2, &val16);
3519 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3520 break;
3521 msleep(200);
3522 }
1da177e4 3523
19a60522
SS
3524 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3525 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3526 }
3527
3528 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3529
3530 s2io_init_pci(sp);
1da177e4 3531
20346722
K
3532 /* Set swapper to enable I/O register access */
3533 s2io_set_swapper(sp);
3534
faa4f796
SH
3535 /* restore mac_addr entries */
3536 do_s2io_restore_unicast_mc(sp);
3537
cc6e7c44
RA
3538 /* Restore the MSIX table entries from local variables */
3539 restore_xmsi_data(sp);
3540
5e25b9dd 3541 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3542 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3543 /* Clear "detected parity error" bit */
303bcb4b 3544 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3545
303bcb4b
K
3546 /* Clearing PCIX Ecc status register */
3547 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3548
303bcb4b 3549 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3550 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3551 }
5e25b9dd 3552
20346722
K
3553 /* Reset device statistics maintained by OS */
3554 memset(&sp->stats, 0, sizeof (struct net_device_stats));
8a4bdbaa 3555
491976b2
SH
3556 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3557 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3558 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3559 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
363dc367 3560 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
491976b2
SH
3561 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3562 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3563 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3564 /* save link up/down time/cnt, reset/memory/watchdog cnt */
363dc367 3565 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
491976b2
SH
3566 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3567 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3568 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3569 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3570 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
363dc367 3571 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
491976b2
SH
3572 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3573 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3574 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
20346722 3575
1da177e4
LT
3576 /* SXE-002: Configure link and activity LED to turn it off */
3577 subid = sp->pdev->subsystem_device;
541ae68f
K
3578 if (((subid & 0xFF) >= 0x07) &&
3579 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3580 val64 = readq(&bar0->gpio_control);
3581 val64 |= 0x0000800000000000ULL;
3582 writeq(val64, &bar0->gpio_control);
3583 val64 = 0x0411040400000000ULL;
509a2671 3584 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3585 }
3586
541ae68f
K
3587 /*
3588 * Clear spurious ECC interrupts that would have occured on
3589 * XFRAME II cards after reset.
3590 */
3591 if (sp->device_type == XFRAME_II_DEVICE) {
3592 val64 = readq(&bar0->pcc_err_reg);
3593 writeq(val64, &bar0->pcc_err_reg);
3594 }
3595
1da177e4
LT
3596 sp->device_enabled_once = FALSE;
3597}
3598
3599/**
20346722
K
3600 * s2io_set_swapper - to set the swapper controle on the card
3601 * @sp : private member of the device structure,
1da177e4 3602 * pointer to the s2io_nic structure.
20346722 3603 * Description: Function to set the swapper control on the card
1da177e4
LT
3604 * correctly depending on the 'endianness' of the system.
3605 * Return value:
3606 * SUCCESS on success and FAILURE on failure.
3607 */
3608
1ee6dd77 3609static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3610{
3611 struct net_device *dev = sp->dev;
1ee6dd77 3612 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3613 u64 val64, valt, valr;
3614
20346722 3615 /*
1da177e4
LT
3616 * Set proper endian settings and verify the same by reading
3617 * the PIF Feed-back register.
3618 */
3619
3620 val64 = readq(&bar0->pif_rd_swapper_fb);
3621 if (val64 != 0x0123456789ABCDEFULL) {
3622 int i = 0;
3623 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3624 0x8100008181000081ULL, /* FE=1, SE=0 */
3625 0x4200004242000042ULL, /* FE=0, SE=1 */
3626 0}; /* FE=0, SE=0 */
3627
3628 while(i<4) {
3629 writeq(value[i], &bar0->swapper_ctrl);
3630 val64 = readq(&bar0->pif_rd_swapper_fb);
3631 if (val64 == 0x0123456789ABCDEFULL)
3632 break;
3633 i++;
3634 }
3635 if (i == 4) {
3636 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3637 dev->name);
3638 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3639 (unsigned long long) val64);
3640 return FAILURE;
3641 }
3642 valr = value[i];
3643 } else {
3644 valr = readq(&bar0->swapper_ctrl);
3645 }
3646
3647 valt = 0x0123456789ABCDEFULL;
3648 writeq(valt, &bar0->xmsi_address);
3649 val64 = readq(&bar0->xmsi_address);
3650
3651 if(val64 != valt) {
3652 int i = 0;
3653 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3654 0x0081810000818100ULL, /* FE=1, SE=0 */
3655 0x0042420000424200ULL, /* FE=0, SE=1 */
3656 0}; /* FE=0, SE=0 */
3657
3658 while(i<4) {
3659 writeq((value[i] | valr), &bar0->swapper_ctrl);
3660 writeq(valt, &bar0->xmsi_address);
3661 val64 = readq(&bar0->xmsi_address);
3662 if(val64 == valt)
3663 break;
3664 i++;
3665 }
3666 if(i == 4) {
20346722 3667 unsigned long long x = val64;
1da177e4 3668 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3669 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3670 return FAILURE;
3671 }
3672 }
3673 val64 = readq(&bar0->swapper_ctrl);
3674 val64 &= 0xFFFF000000000000ULL;
3675
3676#ifdef __BIG_ENDIAN
20346722
K
3677 /*
3678 * The device by default set to a big endian format, so a
1da177e4
LT
3679 * big endian driver need not set anything.
3680 */
3681 val64 |= (SWAPPER_CTRL_TXP_FE |
3682 SWAPPER_CTRL_TXP_SE |
3683 SWAPPER_CTRL_TXD_R_FE |
3684 SWAPPER_CTRL_TXD_W_FE |
3685 SWAPPER_CTRL_TXF_R_FE |
3686 SWAPPER_CTRL_RXD_R_FE |
3687 SWAPPER_CTRL_RXD_W_FE |
3688 SWAPPER_CTRL_RXF_W_FE |
3689 SWAPPER_CTRL_XMSI_FE |
1da177e4 3690 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
eaae7f72 3691 if (sp->config.intr_type == INTA)
cc6e7c44 3692 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3693 writeq(val64, &bar0->swapper_ctrl);
3694#else
20346722 3695 /*
1da177e4 3696 * Initially we enable all bits to make it accessible by the
20346722 3697 * driver, then we selectively enable only those bits that
1da177e4
LT
3698 * we want to set.
3699 */
3700 val64 |= (SWAPPER_CTRL_TXP_FE |
3701 SWAPPER_CTRL_TXP_SE |
3702 SWAPPER_CTRL_TXD_R_FE |
3703 SWAPPER_CTRL_TXD_R_SE |
3704 SWAPPER_CTRL_TXD_W_FE |
3705 SWAPPER_CTRL_TXD_W_SE |
3706 SWAPPER_CTRL_TXF_R_FE |
3707 SWAPPER_CTRL_RXD_R_FE |
3708 SWAPPER_CTRL_RXD_R_SE |
3709 SWAPPER_CTRL_RXD_W_FE |
3710 SWAPPER_CTRL_RXD_W_SE |
3711 SWAPPER_CTRL_RXF_W_FE |
3712 SWAPPER_CTRL_XMSI_FE |
1da177e4 3713 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
eaae7f72 3714 if (sp->config.intr_type == INTA)
cc6e7c44 3715 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3716 writeq(val64, &bar0->swapper_ctrl);
3717#endif
3718 val64 = readq(&bar0->swapper_ctrl);
3719
20346722
K
3720 /*
3721 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3722 * feedback register.
3723 */
3724 val64 = readq(&bar0->pif_rd_swapper_fb);
3725 if (val64 != 0x0123456789ABCDEFULL) {
3726 /* Endian settings are incorrect, calls for another dekko. */
3727 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3728 dev->name);
3729 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3730 (unsigned long long) val64);
3731 return FAILURE;
3732 }
3733
3734 return SUCCESS;
3735}
3736
1ee6dd77 3737static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3738{
1ee6dd77 3739 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3740 u64 val64;
3741 int ret = 0, cnt = 0;
3742
3743 do {
3744 val64 = readq(&bar0->xmsi_access);
b7b5a128 3745 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3746 break;
3747 mdelay(1);
3748 cnt++;
3749 } while(cnt < 5);
3750 if (cnt == 5) {
3751 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3752 ret = 1;
3753 }
3754
3755 return ret;
3756}
3757
1ee6dd77 3758static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3759{
1ee6dd77 3760 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3761 u64 val64;
f61e0a35
SH
3762 int i, msix_index;
3763
3764
3765 if (nic->device_type == XFRAME_I_DEVICE)
3766 return;
cc6e7c44 3767
75c30b13 3768 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
f61e0a35 3769 msix_index = (i) ? ((i-1) * 8 + 1): 0;
cc6e7c44
RA
3770 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3771 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
f61e0a35 3772 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3773 writeq(val64, &bar0->xmsi_access);
f61e0a35 3774 if (wait_for_msix_trans(nic, msix_index)) {
cc6e7c44
RA
3775 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3776 continue;
3777 }
3778 }
3779}
3780
1ee6dd77 3781static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3782{
1ee6dd77 3783 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44 3784 u64 val64, addr, data;
f61e0a35
SH
3785 int i, msix_index;
3786
3787 if (nic->device_type == XFRAME_I_DEVICE)
3788 return;
cc6e7c44
RA
3789
3790 /* Store and display */
75c30b13 3791 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
f61e0a35
SH
3792 msix_index = (i) ? ((i-1) * 8 + 1): 0;
3793 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
cc6e7c44 3794 writeq(val64, &bar0->xmsi_access);
f61e0a35 3795 if (wait_for_msix_trans(nic, msix_index)) {
cc6e7c44
RA
3796 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3797 continue;
3798 }
3799 addr = readq(&bar0->xmsi_address);
3800 data = readq(&bar0->xmsi_data);
3801 if (addr && data) {
3802 nic->msix_info[i].addr = addr;
3803 nic->msix_info[i].data = data;
3804 }
3805 }
3806}
3807
1ee6dd77 3808static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3809{
1ee6dd77 3810 struct XENA_dev_config __iomem *bar0 = nic->bar0;
ac731ab6 3811 u64 rx_mat;
cc6e7c44
RA
3812 u16 msi_control; /* Temp variable */
3813 int ret, i, j, msix_indx = 1;
3814
f61e0a35 3815 nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
cc6e7c44 3816 GFP_KERNEL);
bd684e43 3817 if (!nic->entries) {
491976b2
SH
3818 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3819 __FUNCTION__);
c53d4945 3820 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44
RA
3821 return -ENOMEM;
3822 }
8a4bdbaa 3823 nic->mac_control.stats_info->sw_stat.mem_allocated
f61e0a35
SH
3824 += (nic->num_entries * sizeof(struct msix_entry));
3825
3826 memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3827
3828 nic->s2io_entries =
f61e0a35 3829 kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
cc6e7c44 3830 GFP_KERNEL);
bd684e43 3831 if (!nic->s2io_entries) {
8a4bdbaa 3832 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
491976b2 3833 __FUNCTION__);
c53d4945 3834 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44 3835 kfree(nic->entries);
8a4bdbaa 3836 nic->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 3837 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44
RA
3838 return -ENOMEM;
3839 }
8a4bdbaa 3840 nic->mac_control.stats_info->sw_stat.mem_allocated
f61e0a35
SH
3841 += (nic->num_entries * sizeof(struct s2io_msix_entry));
3842 memset(nic->s2io_entries, 0,
3843 nic->num_entries * sizeof(struct s2io_msix_entry));
cc6e7c44 3844
ac731ab6
SH
3845 nic->entries[0].entry = 0;
3846 nic->s2io_entries[0].entry = 0;
3847 nic->s2io_entries[0].in_use = MSIX_FLG;
3848 nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
3849 nic->s2io_entries[0].arg = &nic->mac_control.fifos;
3850
f61e0a35
SH
3851 for (i = 1; i < nic->num_entries; i++) {
3852 nic->entries[i].entry = ((i - 1) * 8) + 1;
3853 nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
cc6e7c44
RA
3854 nic->s2io_entries[i].arg = NULL;
3855 nic->s2io_entries[i].in_use = 0;
3856 }
3857
8a4bdbaa 3858 rx_mat = readq(&bar0->rx_mat);
f61e0a35 3859 for (j = 0; j < nic->config.rx_ring_num; j++) {
8a4bdbaa 3860 rx_mat |= RX_MAT_SET(j, msix_indx);
f61e0a35
SH
3861 nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
3862 nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
3863 nic->s2io_entries[j+1].in_use = MSIX_FLG;
3864 msix_indx += 8;
cc6e7c44 3865 }
8a4bdbaa 3866 writeq(rx_mat, &bar0->rx_mat);
f61e0a35 3867 readq(&bar0->rx_mat);
cc6e7c44 3868
f61e0a35 3869 ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
c92ca04b 3870 /* We fail init if error or we get less vectors than min required */
cc6e7c44
RA
3871 if (ret) {
3872 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3873 kfree(nic->entries);
8a4bdbaa 3874 nic->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 3875 += (nic->num_entries * sizeof(struct msix_entry));
cc6e7c44 3876 kfree(nic->s2io_entries);
8a4bdbaa 3877 nic->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 3878 += (nic->num_entries * sizeof(struct s2io_msix_entry));
cc6e7c44
RA
3879 nic->entries = NULL;
3880 nic->s2io_entries = NULL;
3881 return -ENOMEM;
3882 }
3883
3884 /*
3885 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3886 * in the herc NIC. (Temp change, needs to be removed later)
3887 */
3888 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3889 msi_control |= 0x1; /* Enable MSI */
3890 pci_write_config_word(nic->pdev, 0x42, msi_control);
3891
3892 return 0;
3893}
3894
8abc4d5b 3895/* Handle software interrupt used during MSI(X) test */
33390a70 3896static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3897{
3898 struct s2io_nic *sp = dev_id;
3899
3900 sp->msi_detected = 1;
3901 wake_up(&sp->msi_wait);
3902
3903 return IRQ_HANDLED;
3904}
3905
3906/* Test interrupt path by forcing a a software IRQ */
33390a70 3907static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3908{
3909 struct pci_dev *pdev = sp->pdev;
3910 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3911 int err;
3912 u64 val64, saved64;
3913
3914 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3915 sp->name, sp);
3916 if (err) {
3917 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3918 sp->dev->name, pci_name(pdev), pdev->irq);
3919 return err;
3920 }
3921
3922 init_waitqueue_head (&sp->msi_wait);
3923 sp->msi_detected = 0;
3924
3925 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3926 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3927 val64 |= SCHED_INT_CTRL_TIMER_EN;
3928 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3929 writeq(val64, &bar0->scheduled_int_ctrl);
3930
3931 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3932
3933 if (!sp->msi_detected) {
3934 /* MSI(X) test failed, go back to INTx mode */
2450022a 3935 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
8abc4d5b
SS
3936 "using MSI(X) during test\n", sp->dev->name,
3937 pci_name(pdev));
3938
3939 err = -EOPNOTSUPP;
3940 }
3941
3942 free_irq(sp->entries[1].vector, sp);
3943
3944 writeq(saved64, &bar0->scheduled_int_ctrl);
3945
3946 return err;
3947}
18b2b7bd
SH
3948
3949static void remove_msix_isr(struct s2io_nic *sp)
3950{
3951 int i;
3952 u16 msi_control;
3953
f61e0a35 3954 for (i = 0; i < sp->num_entries; i++) {
18b2b7bd
SH
3955 if (sp->s2io_entries[i].in_use ==
3956 MSIX_REGISTERED_SUCCESS) {
3957 int vector = sp->entries[i].vector;
3958 void *arg = sp->s2io_entries[i].arg;
3959 free_irq(vector, arg);
3960 }
3961 }
3962
3963 kfree(sp->entries);
3964 kfree(sp->s2io_entries);
3965 sp->entries = NULL;
3966 sp->s2io_entries = NULL;
3967
3968 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3969 msi_control &= 0xFFFE; /* Disable MSI */
3970 pci_write_config_word(sp->pdev, 0x42, msi_control);
3971
3972 pci_disable_msix(sp->pdev);
3973}
3974
3975static void remove_inta_isr(struct s2io_nic *sp)
3976{
3977 struct net_device *dev = sp->dev;
3978
3979 free_irq(sp->pdev->irq, dev);
3980}
3981
1da177e4
LT
3982/* ********************************************************* *
3983 * Functions defined below concern the OS part of the driver *
3984 * ********************************************************* */
3985
20346722 3986/**
1da177e4
LT
3987 * s2io_open - open entry point of the driver
3988 * @dev : pointer to the device structure.
3989 * Description:
3990 * This function is the open entry point of the driver. It mainly calls a
3991 * function to allocate Rx buffers and inserts them into the buffer
20346722 3992 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3993 * Return value:
3994 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3995 * file on failure.
3996 */
3997
ac1f60db 3998static int s2io_open(struct net_device *dev)
1da177e4 3999{
1ee6dd77 4000 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4001 int err = 0;
4002
20346722
K
4003 /*
4004 * Make sure you have link off by default every time
1da177e4
LT
4005 * Nic is initialized
4006 */
4007 netif_carrier_off(dev);
0b1f7ebe 4008 sp->last_link_state = 0;
1da177e4
LT
4009
4010 /* Initialize H/W and enable interrupts */
c92ca04b
AR
4011 err = s2io_card_up(sp);
4012 if (err) {
1da177e4
LT
4013 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4014 dev->name);
e6a8fee2 4015 goto hw_init_failed;
1da177e4
LT
4016 }
4017
2fd37688 4018 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 4019 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 4020 s2io_card_down(sp);
20346722 4021 err = -ENODEV;
e6a8fee2 4022 goto hw_init_failed;
1da177e4 4023 }
3a3d5756 4024 s2io_start_all_tx_queue(sp);
1da177e4 4025 return 0;
20346722 4026
20346722 4027hw_init_failed:
eaae7f72 4028 if (sp->config.intr_type == MSI_X) {
491976b2 4029 if (sp->entries) {
cc6e7c44 4030 kfree(sp->entries);
8a4bdbaa 4031 sp->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 4032 += (sp->num_entries * sizeof(struct msix_entry));
491976b2
SH
4033 }
4034 if (sp->s2io_entries) {
cc6e7c44 4035 kfree(sp->s2io_entries);
8a4bdbaa 4036 sp->mac_control.stats_info->sw_stat.mem_freed
f61e0a35 4037 += (sp->num_entries * sizeof(struct s2io_msix_entry));
491976b2 4038 }
cc6e7c44 4039 }
20346722 4040 return err;
1da177e4
LT
4041}
4042
4043/**
4044 * s2io_close -close entry point of the driver
4045 * @dev : device pointer.
4046 * Description:
4047 * This is the stop entry point of the driver. It needs to undo exactly
4048 * whatever was done by the open entry point,thus it's usually referred to
4049 * as the close function.Among other things this function mainly stops the
4050 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4051 * Return value:
4052 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4053 * file on failure.
4054 */
4055
ac1f60db 4056static int s2io_close(struct net_device *dev)
1da177e4 4057{
1ee6dd77 4058 struct s2io_nic *sp = dev->priv;
faa4f796
SH
4059 struct config_param *config = &sp->config;
4060 u64 tmp64;
4061 int offset;
cc6e7c44 4062
9f74ffde
SH
4063 /* Return if the device is already closed *
4064 * Can happen when s2io_card_up failed in change_mtu *
4065 */
4066 if (!is_s2io_card_up(sp))
4067 return 0;
4068
3a3d5756 4069 s2io_stop_all_tx_queue(sp);
faa4f796
SH
4070 /* delete all populated mac entries */
4071 for (offset = 1; offset < config->max_mc_addr; offset++) {
4072 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4073 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4074 do_s2io_delete_unicast_mc(sp, tmp64);
4075 }
4076
e6a8fee2 4077 s2io_card_down(sp);
cc6e7c44 4078
1da177e4
LT
4079 return 0;
4080}
4081
4082/**
4083 * s2io_xmit - Tx entry point of te driver
4084 * @skb : the socket buffer containing the Tx data.
4085 * @dev : device pointer.
4086 * Description :
4087 * This function is the Tx entry point of the driver. S2IO NIC supports
4088 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4089 * NOTE: when device cant queue the pkt,just the trans_start variable will
4090 * not be upadted.
4091 * Return value:
4092 * 0 on success & 1 on failure.
4093 */
4094
ac1f60db 4095static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4096{
1ee6dd77 4097 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4098 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4099 register u64 val64;
1ee6dd77
RB
4100 struct TxD *txdp;
4101 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4102 unsigned long flags = 0;
be3a6b02 4103 u16 vlan_tag = 0;
2fda096d 4104 struct fifo_info *fifo = NULL;
1ee6dd77 4105 struct mac_info *mac_control;
1da177e4 4106 struct config_param *config;
6cfc482b 4107 int do_spin_lock = 1;
75c30b13 4108 int offload_type;
6cfc482b 4109 int enable_per_list_interrupt = 0;
491abf25 4110 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
4111
4112 mac_control = &sp->mac_control;
4113 config = &sp->config;
4114
20346722 4115 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4116
4117 if (unlikely(skb->len <= 0)) {
4118 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4119 dev_kfree_skb_any(skb);
4120 return 0;
2fda096d 4121 }
491976b2 4122
92b84437 4123 if (!is_s2io_card_up(sp)) {
20346722 4124 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4125 dev->name);
20346722
K
4126 dev_kfree_skb(skb);
4127 return 0;
1da177e4
LT
4128 }
4129
4130 queue = 0;
3a3d5756 4131 if (sp->vlgrp && vlan_tx_tag_present(skb))
be3a6b02 4132 vlan_tag = vlan_tx_tag_get(skb);
6cfc482b
SH
4133 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4134 if (skb->protocol == htons(ETH_P_IP)) {
4135 struct iphdr *ip;
4136 struct tcphdr *th;
4137 ip = ip_hdr(skb);
4138
4139 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4140 th = (struct tcphdr *)(((unsigned char *)ip) +
4141 ip->ihl*4);
4142
4143 if (ip->protocol == IPPROTO_TCP) {
4144 queue_len = sp->total_tcp_fifos;
4145 queue = (ntohs(th->source) +
4146 ntohs(th->dest)) &
4147 sp->fifo_selector[queue_len - 1];
4148 if (queue >= queue_len)
4149 queue = queue_len - 1;
4150 } else if (ip->protocol == IPPROTO_UDP) {
4151 queue_len = sp->total_udp_fifos;
4152 queue = (ntohs(th->source) +
4153 ntohs(th->dest)) &
4154 sp->fifo_selector[queue_len - 1];
4155 if (queue >= queue_len)
4156 queue = queue_len - 1;
4157 queue += sp->udp_fifo_idx;
4158 if (skb->len > 1024)
4159 enable_per_list_interrupt = 1;
4160 do_spin_lock = 0;
4161 }
4162 }
4163 }
4164 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4165 /* get fifo number based on skb->priority value */
4166 queue = config->fifo_mapping
4167 [skb->priority & (MAX_TX_FIFOS - 1)];
4168 fifo = &mac_control->fifos[queue];
3a3d5756 4169
6cfc482b
SH
4170 if (do_spin_lock)
4171 spin_lock_irqsave(&fifo->tx_lock, flags);
4172 else {
4173 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4174 return NETDEV_TX_LOCKED;
4175 }
be3a6b02 4176
3a3d5756
SH
4177 if (sp->config.multiq) {
4178 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4179 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4180 return NETDEV_TX_BUSY;
4181 }
b19fa1fa 4182 } else if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
3a3d5756
SH
4183 if (netif_queue_stopped(dev)) {
4184 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4185 return NETDEV_TX_BUSY;
4186 }
4187 }
4188
2fda096d
SR
4189 put_off = (u16) fifo->tx_curr_put_info.offset;
4190 get_off = (u16) fifo->tx_curr_get_info.offset;
4191 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
20346722 4192
2fda096d 4193 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4194 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
4195 if (txdp->Host_Control ||
4196 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4197 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4198 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4199 dev_kfree_skb(skb);
2fda096d 4200 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
4201 return 0;
4202 }
0b1f7ebe 4203
75c30b13 4204 offload_type = s2io_offload_type(skb);
75c30b13 4205 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4206 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4207 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4208 }
84fa7933 4209 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
4210 txdp->Control_2 |=
4211 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4212 TXD_TX_CKO_UDP_EN);
4213 }
fed5eccd
AR
4214 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4215 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4216 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4217 if (enable_per_list_interrupt)
4218 if (put_off & (queue_len >> 5))
4219 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4220 if (vlan_tag) {
be3a6b02
K
4221 txdp->Control_2 |= TXD_VLAN_ENABLE;
4222 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4223 }
4224
fed5eccd 4225 frg_len = skb->len - skb->data_len;
75c30b13 4226 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4227 int ufo_size;
4228
75c30b13 4229 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4230 ufo_size &= ~7;
4231 txdp->Control_1 |= TXD_UFO_EN;
4232 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4233 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4234#ifdef __BIG_ENDIAN
3459feb8 4235 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
2fda096d 4236 fifo->ufo_in_band_v[put_off] =
3459feb8 4237 (__force u64)skb_shinfo(skb)->ip6_frag_id;
fed5eccd 4238#else
2fda096d 4239 fifo->ufo_in_band_v[put_off] =
3459feb8 4240 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
fed5eccd 4241#endif
2fda096d 4242 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4243 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
2fda096d 4244 fifo->ufo_in_band_v,
fed5eccd 4245 sizeof(u64), PCI_DMA_TODEVICE);
64c42f69 4246 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
491abf25 4247 goto pci_map_failed;
fed5eccd 4248 txdp++;
fed5eccd 4249 }
1da177e4 4250
fed5eccd
AR
4251 txdp->Buffer_Pointer = pci_map_single
4252 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
64c42f69 4253 if (pci_dma_mapping_error(txdp->Buffer_Pointer))
491abf25
VP
4254 goto pci_map_failed;
4255
fed5eccd
AR
4256 txdp->Host_Control = (unsigned long) skb;
4257 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4258 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4259 txdp->Control_1 |= TXD_UFO_EN;
4260
4261 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4262 /* For fragmented SKB. */
4263 for (i = 0; i < frg_cnt; i++) {
4264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
4265 /* A '0' length fragment will be ignored */
4266 if (!frag->size)
4267 continue;
1da177e4
LT
4268 txdp++;
4269 txdp->Buffer_Pointer = (u64) pci_map_page
4270 (sp->pdev, frag->page, frag->page_offset,
4271 frag->size, PCI_DMA_TODEVICE);
efd51b5c 4272 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 4273 if (offload_type == SKB_GSO_UDP)
fed5eccd 4274 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4275 }
4276 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4277
75c30b13 4278 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4279 frg_cnt++; /* as Txd0 was used for inband header */
4280
1da177e4 4281 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4282 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4283 writeq(val64, &tx_fifo->TxDL_Pointer);
4284
4285 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4286 TX_FIFO_LAST_LIST);
75c30b13 4287 if (offload_type)
fed5eccd 4288 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4289
1da177e4
LT
4290 writeq(val64, &tx_fifo->List_Control);
4291
303bcb4b
K
4292 mmiowb();
4293
1da177e4 4294 put_off++;
2fda096d 4295 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4296 put_off = 0;
2fda096d 4297 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4298
4299 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4300 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 4301 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
4302 DBG_PRINT(TX_DBG,
4303 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4304 put_off, get_off);
3a3d5756 4305 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4306 }
491976b2 4307 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
1da177e4 4308 dev->trans_start = jiffies;
2fda096d 4309 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4310
f6f4bfa3
SH
4311 if (sp->config.intr_type == MSI_X)
4312 tx_intr_handler(fifo);
4313
491abf25
VP
4314 return 0;
4315pci_map_failed:
4316 stats->pci_map_fail_cnt++;
3a3d5756 4317 s2io_stop_tx_queue(sp, fifo->fifo_no);
491abf25
VP
4318 stats->mem_freed += skb->truesize;
4319 dev_kfree_skb(skb);
2fda096d 4320 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
4321 return 0;
4322}
4323
25fff88e
K
4324static void
4325s2io_alarm_handle(unsigned long data)
4326{
1ee6dd77 4327 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4328 struct net_device *dev = sp->dev;
25fff88e 4329
8116f3cf 4330 s2io_handle_errors(dev);
25fff88e
K
4331 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4332}
4333
7d12e780 4334static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4335{
1ee6dd77
RB
4336 struct ring_info *ring = (struct ring_info *)dev_id;
4337 struct s2io_nic *sp = ring->nic;
f61e0a35
SH
4338 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4339 struct net_device *dev = sp->dev;
cc6e7c44 4340
f61e0a35 4341 if (unlikely(!is_s2io_card_up(sp)))
92b84437 4342 return IRQ_HANDLED;
92b84437 4343
f61e0a35 4344 if (sp->config.napi) {
1a79d1c3
AV
4345 u8 __iomem *addr = NULL;
4346 u8 val8 = 0;
f61e0a35 4347
1a79d1c3 4348 addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
f61e0a35
SH
4349 addr += (7 - ring->ring_no);
4350 val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
4351 writeb(val8, addr);
4352 val8 = readb(addr);
4353 netif_rx_schedule(dev, &ring->napi);
4354 } else {
4355 rx_intr_handler(ring, 0);
4356 s2io_chk_rx_buffers(ring);
4357 }
7d3d0439 4358
cc6e7c44
RA
4359 return IRQ_HANDLED;
4360}
4361
7d12e780 4362static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4363{
ac731ab6
SH
4364 int i;
4365 struct fifo_info *fifos = (struct fifo_info *)dev_id;
4366 struct s2io_nic *sp = fifos->nic;
4367 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4368 struct config_param *config = &sp->config;
4369 u64 reason;
cc6e7c44 4370
ac731ab6
SH
4371 if (unlikely(!is_s2io_card_up(sp)))
4372 return IRQ_NONE;
4373
4374 reason = readq(&bar0->general_int_status);
4375 if (unlikely(reason == S2IO_MINUS_ONE))
4376 /* Nothing much can be done. Get out */
92b84437 4377 return IRQ_HANDLED;
92b84437 4378
ac731ab6
SH
4379 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4380
4381 if (reason & GEN_INTR_TXTRAFFIC)
4382 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4383
4384 for (i = 0; i < config->tx_fifo_num; i++)
4385 tx_intr_handler(&fifos[i]);
4386
4387 writeq(sp->general_int_mask, &bar0->general_int_mask);
4388 readl(&bar0->general_int_status);
4389
cc6e7c44
RA
4390 return IRQ_HANDLED;
4391}
ac731ab6 4392
1ee6dd77 4393static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4394{
1ee6dd77 4395 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4396 u64 val64;
4397
4398 val64 = readq(&bar0->pic_int_status);
4399 if (val64 & PIC_INT_GPIO) {
4400 val64 = readq(&bar0->gpio_int_reg);
4401 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4402 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4403 /*
4404 * This is unstable state so clear both up/down
4405 * interrupt and adapter to re-evaluate the link state.
4406 */
a371a07d
K
4407 val64 |= GPIO_INT_REG_LINK_DOWN;
4408 val64 |= GPIO_INT_REG_LINK_UP;
4409 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4410 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4411 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4412 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4413 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4414 }
c92ca04b
AR
4415 else if (val64 & GPIO_INT_REG_LINK_UP) {
4416 val64 = readq(&bar0->adapter_status);
c92ca04b 4417 /* Enable Adapter */
19a60522
SS
4418 val64 = readq(&bar0->adapter_control);
4419 val64 |= ADAPTER_CNTL_EN;
4420 writeq(val64, &bar0->adapter_control);
4421 val64 |= ADAPTER_LED_ON;
4422 writeq(val64, &bar0->adapter_control);
4423 if (!sp->device_enabled_once)
4424 sp->device_enabled_once = 1;
c92ca04b 4425
19a60522
SS
4426 s2io_link(sp, LINK_UP);
4427 /*
4428 * unmask link down interrupt and mask link-up
4429 * intr
4430 */
4431 val64 = readq(&bar0->gpio_int_mask);
4432 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4433 val64 |= GPIO_INT_MASK_LINK_UP;
4434 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4435
c92ca04b
AR
4436 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4437 val64 = readq(&bar0->adapter_status);
19a60522
SS
4438 s2io_link(sp, LINK_DOWN);
4439 /* Link is down so unmaks link up interrupt */
4440 val64 = readq(&bar0->gpio_int_mask);
4441 val64 &= ~GPIO_INT_MASK_LINK_UP;
4442 val64 |= GPIO_INT_MASK_LINK_DOWN;
4443 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4444
4445 /* turn off LED */
4446 val64 = readq(&bar0->adapter_control);
4447 val64 = val64 &(~ADAPTER_LED_ON);
4448 writeq(val64, &bar0->adapter_control);
a371a07d
K
4449 }
4450 }
c92ca04b 4451 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4452}
4453
8116f3cf
SS
4454/**
4455 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4456 * @value: alarm bits
4457 * @addr: address value
4458 * @cnt: counter variable
4459 * Description: Check for alarm and increment the counter
4460 * Return Value:
4461 * 1 - if alarm bit set
4462 * 0 - if alarm bit is not set
4463 */
43b7c451 4464static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
8116f3cf
SS
4465 unsigned long long *cnt)
4466{
4467 u64 val64;
4468 val64 = readq(addr);
4469 if ( val64 & value ) {
4470 writeq(val64, addr);
4471 (*cnt)++;
4472 return 1;
4473 }
4474 return 0;
4475
4476}
4477
4478/**
4479 * s2io_handle_errors - Xframe error indication handler
4480 * @nic: device private variable
4481 * Description: Handle alarms such as loss of link, single or
4482 * double ECC errors, critical and serious errors.
4483 * Return Value:
4484 * NONE
4485 */
4486static void s2io_handle_errors(void * dev_id)
4487{
4488 struct net_device *dev = (struct net_device *) dev_id;
4489 struct s2io_nic *sp = dev->priv;
4490 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4491 u64 temp64 = 0,val64=0;
4492 int i = 0;
4493
4494 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4495 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4496
92b84437 4497 if (!is_s2io_card_up(sp))
8116f3cf
SS
4498 return;
4499
4500 if (pci_channel_offline(sp->pdev))
4501 return;
4502
4503 memset(&sw_stat->ring_full_cnt, 0,
4504 sizeof(sw_stat->ring_full_cnt));
4505
4506 /* Handling the XPAK counters update */
4507 if(stats->xpak_timer_count < 72000) {
4508 /* waiting for an hour */
4509 stats->xpak_timer_count++;
4510 } else {
4511 s2io_updt_xpak_counter(dev);
4512 /* reset the count to zero */
4513 stats->xpak_timer_count = 0;
4514 }
4515
4516 /* Handling link status change error Intr */
4517 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4518 val64 = readq(&bar0->mac_rmac_err_reg);
4519 writeq(val64, &bar0->mac_rmac_err_reg);
4520 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4521 schedule_work(&sp->set_link_task);
4522 }
4523
4524 /* In case of a serious error, the device will be Reset. */
4525 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4526 &sw_stat->serious_err_cnt))
4527 goto reset;
4528
4529 /* Check for data parity error */
4530 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4531 &sw_stat->parity_err_cnt))
4532 goto reset;
4533
4534 /* Check for ring full counter */
4535 if (sp->device_type == XFRAME_II_DEVICE) {
4536 val64 = readq(&bar0->ring_bump_counter1);
4537 for (i=0; i<4; i++) {
4538 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4539 temp64 >>= 64 - ((i+1)*16);
4540 sw_stat->ring_full_cnt[i] += temp64;
4541 }
4542
4543 val64 = readq(&bar0->ring_bump_counter2);
4544 for (i=0; i<4; i++) {
4545 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4546 temp64 >>= 64 - ((i+1)*16);
4547 sw_stat->ring_full_cnt[i+4] += temp64;
4548 }
4549 }
4550
4551 val64 = readq(&bar0->txdma_int_status);
4552 /*check for pfc_err*/
4553 if (val64 & TXDMA_PFC_INT) {
4554 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4555 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4556 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4557 &sw_stat->pfc_err_cnt))
4558 goto reset;
4559 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4560 &sw_stat->pfc_err_cnt);
4561 }
4562
4563 /*check for tda_err*/
4564 if (val64 & TXDMA_TDA_INT) {
4565 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4566 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4567 &sw_stat->tda_err_cnt))
4568 goto reset;
4569 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4570 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4571 }
4572 /*check for pcc_err*/
4573 if (val64 & TXDMA_PCC_INT) {
4574 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4575 | PCC_N_SERR | PCC_6_COF_OV_ERR
4576 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4577 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4578 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4579 &sw_stat->pcc_err_cnt))
4580 goto reset;
4581 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4582 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4583 }
4584
4585 /*check for tti_err*/
4586 if (val64 & TXDMA_TTI_INT) {
4587 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4588 &sw_stat->tti_err_cnt))
4589 goto reset;
4590 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4591 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4592 }
4593
4594 /*check for lso_err*/
4595 if (val64 & TXDMA_LSO_INT) {
4596 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4597 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4598 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4599 goto reset;
4600 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4601 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4602 }
4603
4604 /*check for tpa_err*/
4605 if (val64 & TXDMA_TPA_INT) {
4606 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4607 &sw_stat->tpa_err_cnt))
4608 goto reset;
4609 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4610 &sw_stat->tpa_err_cnt);
4611 }
4612
4613 /*check for sm_err*/
4614 if (val64 & TXDMA_SM_INT) {
4615 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4616 &sw_stat->sm_err_cnt))
4617 goto reset;
4618 }
4619
4620 val64 = readq(&bar0->mac_int_status);
4621 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4622 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4623 &bar0->mac_tmac_err_reg,
4624 &sw_stat->mac_tmac_err_cnt))
4625 goto reset;
4626 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4627 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4628 &bar0->mac_tmac_err_reg,
4629 &sw_stat->mac_tmac_err_cnt);
4630 }
4631
4632 val64 = readq(&bar0->xgxs_int_status);
4633 if (val64 & XGXS_INT_STATUS_TXGXS) {
4634 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4635 &bar0->xgxs_txgxs_err_reg,
4636 &sw_stat->xgxs_txgxs_err_cnt))
4637 goto reset;
4638 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4639 &bar0->xgxs_txgxs_err_reg,
4640 &sw_stat->xgxs_txgxs_err_cnt);
4641 }
4642
4643 val64 = readq(&bar0->rxdma_int_status);
4644 if (val64 & RXDMA_INT_RC_INT_M) {
4645 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4646 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4647 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4648 goto reset;
4649 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4650 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4651 &sw_stat->rc_err_cnt);
4652 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4653 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4654 &sw_stat->prc_pcix_err_cnt))
4655 goto reset;
4656 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4657 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4658 &sw_stat->prc_pcix_err_cnt);
4659 }
4660
4661 if (val64 & RXDMA_INT_RPA_INT_M) {
4662 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4663 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4664 goto reset;
4665 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4666 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4667 }
4668
4669 if (val64 & RXDMA_INT_RDA_INT_M) {
4670 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4671 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4672 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4673 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4674 goto reset;
4675 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4676 | RDA_MISC_ERR | RDA_PCIX_ERR,
4677 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4678 }
4679
4680 if (val64 & RXDMA_INT_RTI_INT_M) {
4681 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4682 &sw_stat->rti_err_cnt))
4683 goto reset;
4684 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4685 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4686 }
4687
4688 val64 = readq(&bar0->mac_int_status);
4689 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4690 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4691 &bar0->mac_rmac_err_reg,
4692 &sw_stat->mac_rmac_err_cnt))
4693 goto reset;
4694 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4695 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4696 &sw_stat->mac_rmac_err_cnt);
4697 }
4698
4699 val64 = readq(&bar0->xgxs_int_status);
4700 if (val64 & XGXS_INT_STATUS_RXGXS) {
4701 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4702 &bar0->xgxs_rxgxs_err_reg,
4703 &sw_stat->xgxs_rxgxs_err_cnt))
4704 goto reset;
4705 }
4706
4707 val64 = readq(&bar0->mc_int_status);
4708 if(val64 & MC_INT_STATUS_MC_INT) {
4709 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4710 &sw_stat->mc_err_cnt))
4711 goto reset;
4712
4713 /* Handling Ecc errors */
4714 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4715 writeq(val64, &bar0->mc_err_reg);
4716 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4717 sw_stat->double_ecc_errs++;
4718 if (sp->device_type != XFRAME_II_DEVICE) {
4719 /*
4720 * Reset XframeI only if critical error
4721 */
4722 if (val64 &
4723 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4724 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4725 goto reset;
4726 }
4727 } else
4728 sw_stat->single_ecc_errs++;
4729 }
4730 }
4731 return;
4732
4733reset:
3a3d5756 4734 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4735 schedule_work(&sp->rst_timer_task);
4736 sw_stat->soft_reset_cnt++;
4737 return;
4738}
4739
1da177e4
LT
4740/**
4741 * s2io_isr - ISR handler of the device .
4742 * @irq: the irq of the device.
4743 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4744 * Description: This function is the ISR handler of the device. It
4745 * identifies the reason for the interrupt and calls the relevant
4746 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4747 * recv buffers, if their numbers are below the panic value which is
4748 * presently set to 25% of the original number of rcv buffers allocated.
4749 * Return value:
20346722 4750 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4751 * IRQ_NONE: will be returned if interrupt is not from our device
4752 */
7d12e780 4753static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4754{
4755 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4756 struct s2io_nic *sp = dev->priv;
4757 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4758 int i;
19a60522 4759 u64 reason = 0;
1ee6dd77 4760 struct mac_info *mac_control;
1da177e4
LT
4761 struct config_param *config;
4762
d796fdb7
LV
4763 /* Pretend we handled any irq's from a disconnected card */
4764 if (pci_channel_offline(sp->pdev))
4765 return IRQ_NONE;
4766
596c5c97 4767 if (!is_s2io_card_up(sp))
92b84437 4768 return IRQ_NONE;
92b84437 4769
1da177e4
LT
4770 mac_control = &sp->mac_control;
4771 config = &sp->config;
4772
20346722 4773 /*
1da177e4
LT
4774 * Identify the cause for interrupt and call the appropriate
4775 * interrupt handler. Causes for the interrupt could be;
4776 * 1. Rx of packet.
4777 * 2. Tx complete.
4778 * 3. Link down.
1da177e4
LT
4779 */
4780 reason = readq(&bar0->general_int_status);
4781
596c5c97
SS
4782 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4783 /* Nothing much can be done. Get out */
4784 return IRQ_HANDLED;
1da177e4 4785 }
5d3213cc 4786
596c5c97
SS
4787 if (reason & (GEN_INTR_RXTRAFFIC |
4788 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4789 {
4790 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4791
4792 if (config->napi) {
4793 if (reason & GEN_INTR_RXTRAFFIC) {
f61e0a35
SH
4794 netif_rx_schedule(dev, &sp->napi);
4795 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4796 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4797 readl(&bar0->rx_traffic_int);
db874e65 4798 }
596c5c97
SS
4799 } else {
4800 /*
4801 * rx_traffic_int reg is an R1 register, writing all 1's
4802 * will ensure that the actual interrupt causing bit
4803 * get's cleared and hence a read can be avoided.
4804 */
4805 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4806 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97
SS
4807
4808 for (i = 0; i < config->rx_ring_num; i++)
f61e0a35 4809 rx_intr_handler(&mac_control->rings[i], 0);
db874e65 4810 }
596c5c97 4811
db874e65 4812 /*
596c5c97 4813 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4814 * will ensure that the actual interrupt causing bit get's
4815 * cleared and hence a read can be avoided.
4816 */
596c5c97
SS
4817 if (reason & GEN_INTR_TXTRAFFIC)
4818 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4819
596c5c97
SS
4820 for (i = 0; i < config->tx_fifo_num; i++)
4821 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4822
596c5c97
SS
4823 if (reason & GEN_INTR_TXPIC)
4824 s2io_txpic_intr_handle(sp);
fe113638 4825
596c5c97
SS
4826 /*
4827 * Reallocate the buffers from the interrupt handler itself.
4828 */
4829 if (!config->napi) {
4830 for (i = 0; i < config->rx_ring_num; i++)
0425b46a 4831 s2io_chk_rx_buffers(&mac_control->rings[i]);
596c5c97
SS
4832 }
4833 writeq(sp->general_int_mask, &bar0->general_int_mask);
4834 readl(&bar0->general_int_status);
20346722 4835
596c5c97 4836 return IRQ_HANDLED;
db874e65 4837
596c5c97
SS
4838 }
4839 else if (!reason) {
4840 /* The interrupt was not raised by us */
4841 return IRQ_NONE;
4842 }
db874e65 4843
1da177e4
LT
4844 return IRQ_HANDLED;
4845}
4846
7ba013ac
K
4847/**
4848 * s2io_updt_stats -
4849 */
1ee6dd77 4850static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4851{
1ee6dd77 4852 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4853 u64 val64;
4854 int cnt = 0;
4855
92b84437 4856 if (is_s2io_card_up(sp)) {
7ba013ac
K
4857 /* Apprx 30us on a 133 MHz bus */
4858 val64 = SET_UPDT_CLICKS(10) |
4859 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4860 writeq(val64, &bar0->stat_cfg);
4861 do {
4862 udelay(100);
4863 val64 = readq(&bar0->stat_cfg);
b7b5a128 4864 if (!(val64 & s2BIT(0)))
7ba013ac
K
4865 break;
4866 cnt++;
4867 if (cnt == 5)
4868 break; /* Updt failed */
4869 } while(1);
8a4bdbaa 4870 }
7ba013ac
K
4871}
4872
1da177e4 4873/**
20346722 4874 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4875 * @dev : pointer to the device structure.
4876 * Description:
20346722 4877 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4878 * structure and returns a pointer to the same.
4879 * Return value:
4880 * pointer to the updated net_device_stats structure.
4881 */
4882
ac1f60db 4883static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4884{
1ee6dd77
RB
4885 struct s2io_nic *sp = dev->priv;
4886 struct mac_info *mac_control;
1da177e4 4887 struct config_param *config;
0425b46a 4888 int i;
1da177e4 4889
20346722 4890
1da177e4
LT
4891 mac_control = &sp->mac_control;
4892 config = &sp->config;
4893
7ba013ac
K
4894 /* Configure Stats for immediate updt */
4895 s2io_updt_stats(sp);
4896
4897 sp->stats.tx_packets =
4898 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4899 sp->stats.tx_errors =
4900 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4901 sp->stats.rx_errors =
ee705dba 4902 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722
K
4903 sp->stats.multicast =
4904 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4905 sp->stats.rx_length_errors =
ee705dba 4906 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4 4907
0425b46a
SH
4908 /* collect per-ring rx_packets and rx_bytes */
4909 sp->stats.rx_packets = sp->stats.rx_bytes = 0;
4910 for (i = 0; i < config->rx_ring_num; i++) {
4911 sp->stats.rx_packets += mac_control->rings[i].rx_packets;
4912 sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
4913 }
4914
1da177e4
LT
4915 return (&sp->stats);
4916}
4917
4918/**
4919 * s2io_set_multicast - entry point for multicast address enable/disable.
4920 * @dev : pointer to the device structure
4921 * Description:
20346722
K
4922 * This function is a driver entry point which gets called by the kernel
4923 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4924 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4925 * determine, if multicast address must be enabled or if promiscuous mode
4926 * is to be disabled etc.
4927 * Return value:
4928 * void.
4929 */
4930
4931static void s2io_set_multicast(struct net_device *dev)
4932{
4933 int i, j, prev_cnt;
4934 struct dev_mc_list *mclist;
1ee6dd77
RB
4935 struct s2io_nic *sp = dev->priv;
4936 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4937 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4938 0xfeffffffffffULL;
faa4f796 4939 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4940 void __iomem *add;
faa4f796 4941 struct config_param *config = &sp->config;
1da177e4
LT
4942
4943 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4944 /* Enable all Multicast addresses */
4945 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4946 &bar0->rmac_addr_data0_mem);
4947 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4948 &bar0->rmac_addr_data1_mem);
4949 val64 = RMAC_ADDR_CMD_MEM_WE |
4950 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796 4951 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4952 writeq(val64, &bar0->rmac_addr_cmd_mem);
4953 /* Wait till command completes */
c92ca04b 4954 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4955 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4956 S2IO_BIT_RESET);
1da177e4
LT
4957
4958 sp->m_cast_flg = 1;
faa4f796 4959 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4960 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4961 /* Disable all Multicast addresses */
4962 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4963 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4964 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4965 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4966 val64 = RMAC_ADDR_CMD_MEM_WE |
4967 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4968 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4969 writeq(val64, &bar0->rmac_addr_cmd_mem);
4970 /* Wait till command completes */
c92ca04b 4971 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4972 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4973 S2IO_BIT_RESET);
1da177e4
LT
4974
4975 sp->m_cast_flg = 0;
4976 sp->all_multi_pos = 0;
4977 }
4978
4979 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4980 /* Put the NIC into promiscuous mode */
4981 add = &bar0->mac_cfg;
4982 val64 = readq(&bar0->mac_cfg);
4983 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4984
4985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4986 writel((u32) val64, add);
4987 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4988 writel((u32) (val64 >> 32), (add + 4));
4989
926930b2
SS
4990 if (vlan_tag_strip != 1) {
4991 val64 = readq(&bar0->rx_pa_cfg);
4992 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4993 writeq(val64, &bar0->rx_pa_cfg);
4994 vlan_strip_flag = 0;
4995 }
4996
1da177e4
LT
4997 val64 = readq(&bar0->mac_cfg);
4998 sp->promisc_flg = 1;
776bd20f 4999 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
5000 dev->name);
5001 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5002 /* Remove the NIC from promiscuous mode */
5003 add = &bar0->mac_cfg;
5004 val64 = readq(&bar0->mac_cfg);
5005 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5006
5007 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5008 writel((u32) val64, add);
5009 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5010 writel((u32) (val64 >> 32), (add + 4));
5011
926930b2
SS
5012 if (vlan_tag_strip != 0) {
5013 val64 = readq(&bar0->rx_pa_cfg);
5014 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5015 writeq(val64, &bar0->rx_pa_cfg);
5016 vlan_strip_flag = 1;
5017 }
5018
1da177e4
LT
5019 val64 = readq(&bar0->mac_cfg);
5020 sp->promisc_flg = 0;
776bd20f 5021 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
5022 dev->name);
5023 }
5024
5025 /* Update individual M_CAST address list */
5026 if ((!sp->m_cast_flg) && dev->mc_count) {
5027 if (dev->mc_count >
faa4f796 5028 (config->max_mc_addr - config->max_mac_addr)) {
1da177e4
LT
5029 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5030 dev->name);
5031 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5032 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5033 return;
5034 }
5035
5036 prev_cnt = sp->mc_addr_count;
5037 sp->mc_addr_count = dev->mc_count;
5038
5039 /* Clear out the previous list of Mc in the H/W. */
5040 for (i = 0; i < prev_cnt; i++) {
5041 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5042 &bar0->rmac_addr_data0_mem);
5043 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 5044 &bar0->rmac_addr_data1_mem);
1da177e4
LT
5045 val64 = RMAC_ADDR_CMD_MEM_WE |
5046 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5047 RMAC_ADDR_CMD_MEM_OFFSET
faa4f796 5048 (config->mc_start_offset + i);
1da177e4
LT
5049 writeq(val64, &bar0->rmac_addr_cmd_mem);
5050
5051 /* Wait for command completes */
c92ca04b 5052 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
5053 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5054 S2IO_BIT_RESET)) {
1da177e4
LT
5055 DBG_PRINT(ERR_DBG, "%s: Adding ",
5056 dev->name);
5057 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5058 return;
5059 }
5060 }
5061
5062 /* Create the new Rx filter list and update the same in H/W. */
5063 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5064 i++, mclist = mclist->next) {
5065 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5066 ETH_ALEN);
a7a80d5a 5067 mac_addr = 0;
1da177e4
LT
5068 for (j = 0; j < ETH_ALEN; j++) {
5069 mac_addr |= mclist->dmi_addr[j];
5070 mac_addr <<= 8;
5071 }
5072 mac_addr >>= 8;
5073 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5074 &bar0->rmac_addr_data0_mem);
5075 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 5076 &bar0->rmac_addr_data1_mem);
1da177e4
LT
5077 val64 = RMAC_ADDR_CMD_MEM_WE |
5078 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5079 RMAC_ADDR_CMD_MEM_OFFSET
faa4f796 5080 (i + config->mc_start_offset);
1da177e4
LT
5081 writeq(val64, &bar0->rmac_addr_cmd_mem);
5082
5083 /* Wait for command completes */
c92ca04b 5084 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
5085 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5086 S2IO_BIT_RESET)) {
1da177e4
LT
5087 DBG_PRINT(ERR_DBG, "%s: Adding ",
5088 dev->name);
5089 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5090 return;
5091 }
5092 }
5093 }
5094}
5095
faa4f796
SH
5096/* read from CAM unicast & multicast addresses and store it in
5097 * def_mac_addr structure
5098 */
5099void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5100{
5101 int offset;
5102 u64 mac_addr = 0x0;
5103 struct config_param *config = &sp->config;
5104
5105 /* store unicast & multicast mac addresses */
5106 for (offset = 0; offset < config->max_mc_addr; offset++) {
5107 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5108 /* if read fails disable the entry */
5109 if (mac_addr == FAILURE)
5110 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5111 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5112 }
5113}
5114
5115/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5116static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5117{
5118 int offset;
5119 struct config_param *config = &sp->config;
5120 /* restore unicast mac address */
5121 for (offset = 0; offset < config->max_mac_addr; offset++)
5122 do_s2io_prog_unicast(sp->dev,
5123 sp->def_mac_addr[offset].mac_addr);
5124
5125 /* restore multicast mac address */
5126 for (offset = config->mc_start_offset;
5127 offset < config->max_mc_addr; offset++)
5128 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5129}
5130
5131/* add a multicast MAC address to CAM */
5132static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5133{
5134 int i;
5135 u64 mac_addr = 0;
5136 struct config_param *config = &sp->config;
5137
5138 for (i = 0; i < ETH_ALEN; i++) {
5139 mac_addr <<= 8;
5140 mac_addr |= addr[i];
5141 }
5142 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5143 return SUCCESS;
5144
5145 /* check if the multicast mac already preset in CAM */
5146 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5147 u64 tmp64;
5148 tmp64 = do_s2io_read_unicast_mc(sp, i);
5149 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5150 break;
5151
5152 if (tmp64 == mac_addr)
5153 return SUCCESS;
5154 }
5155 if (i == config->max_mc_addr) {
5156 DBG_PRINT(ERR_DBG,
5157 "CAM full no space left for multicast MAC\n");
5158 return FAILURE;
5159 }
5160 /* Update the internal structure with this new mac address */
5161 do_s2io_copy_mac_addr(sp, i, mac_addr);
5162
5163 return (do_s2io_add_mac(sp, mac_addr, i));
5164}
5165
5166/* add MAC address to CAM */
5167static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5168{
5169 u64 val64;
5170 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5171
5172 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5173 &bar0->rmac_addr_data0_mem);
5174
5175 val64 =
5176 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5177 RMAC_ADDR_CMD_MEM_OFFSET(off);
5178 writeq(val64, &bar0->rmac_addr_cmd_mem);
5179
5180 /* Wait till command completes */
5181 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5182 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5183 S2IO_BIT_RESET)) {
faa4f796 5184 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5185 return FAILURE;
5186 }
5187 return SUCCESS;
5188}
faa4f796
SH
5189/* deletes a specified unicast/multicast mac entry from CAM */
5190static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5191{
5192 int offset;
5193 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5194 struct config_param *config = &sp->config;
5195
5196 for (offset = 1;
5197 offset < config->max_mc_addr; offset++) {
5198 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5199 if (tmp64 == addr) {
5200 /* disable the entry by writing 0xffffffffffffULL */
5201 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5202 return FAILURE;
5203 /* store the new mac list from CAM */
5204 do_s2io_store_unicast_mc(sp);
5205 return SUCCESS;
5206 }
5207 }
5208 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5209 (unsigned long long)addr);
5210 return FAILURE;
5211}
5212
5213/* read mac entries from CAM */
5214static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5215{
5216 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5217 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5218
5219 /* read mac addr */
5220 val64 =
5221 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5222 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5223 writeq(val64, &bar0->rmac_addr_cmd_mem);
5224
5225 /* Wait till command completes */
5226 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5227 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5228 S2IO_BIT_RESET)) {
5229 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5230 return FAILURE;
5231 }
5232 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5233 return (tmp64 >> 16);
5234}
2fd37688
SS
5235
5236/**
5237 * s2io_set_mac_addr driver entry point
5238 */
faa4f796 5239
2fd37688
SS
5240static int s2io_set_mac_addr(struct net_device *dev, void *p)
5241{
5242 struct sockaddr *addr = p;
5243
5244 if (!is_valid_ether_addr(addr->sa_data))
5245 return -EINVAL;
5246
5247 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5248
5249 /* store the MAC address in CAM */
5250 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5251}
1da177e4 5252/**
2fd37688 5253 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5254 * @dev : pointer to the device structure.
5255 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5256 * Description : This procedure will program the Xframe to receive
1da177e4 5257 * frames with new Mac Address
20346722 5258 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5259 * as defined in errno.h file on failure.
5260 */
faa4f796 5261
2fd37688 5262static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5263{
1ee6dd77 5264 struct s2io_nic *sp = dev->priv;
2fd37688 5265 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5266 int i;
faa4f796
SH
5267 u64 tmp64;
5268 struct config_param *config = &sp->config;
1da177e4 5269
20346722 5270 /*
2fd37688
SS
5271 * Set the new MAC address as the new unicast filter and reflect this
5272 * change on the device address registered with the OS. It will be
5273 * at offset 0.
5274 */
1da177e4
LT
5275 for (i = 0; i < ETH_ALEN; i++) {
5276 mac_addr <<= 8;
5277 mac_addr |= addr[i];
2fd37688
SS
5278 perm_addr <<= 8;
5279 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5280 }
5281
2fd37688
SS
5282 /* check if the dev_addr is different than perm_addr */
5283 if (mac_addr == perm_addr)
d8d70caf
SS
5284 return SUCCESS;
5285
faa4f796
SH
5286 /* check if the mac already preset in CAM */
5287 for (i = 1; i < config->max_mac_addr; i++) {
5288 tmp64 = do_s2io_read_unicast_mc(sp, i);
5289 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5290 break;
5291
5292 if (tmp64 == mac_addr) {
5293 DBG_PRINT(INFO_DBG,
5294 "MAC addr:0x%llx already present in CAM\n",
5295 (unsigned long long)mac_addr);
5296 return SUCCESS;
5297 }
5298 }
5299 if (i == config->max_mac_addr) {
5300 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5301 return FAILURE;
5302 }
d8d70caf 5303 /* Update the internal structure with this new mac address */
faa4f796
SH
5304 do_s2io_copy_mac_addr(sp, i, mac_addr);
5305 return (do_s2io_add_mac(sp, mac_addr, i));
1da177e4
LT
5306}
5307
5308/**
20346722 5309 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5310 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5311 * @info: pointer to the structure with parameters given by ethtool to set
5312 * link information.
5313 * Description:
20346722 5314 * The function sets different link parameters provided by the user onto
1da177e4
LT
5315 * the NIC.
5316 * Return value:
5317 * 0 on success.
5318*/
5319
5320static int s2io_ethtool_sset(struct net_device *dev,
5321 struct ethtool_cmd *info)
5322{
1ee6dd77 5323 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5324 if ((info->autoneg == AUTONEG_ENABLE) ||
5325 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5326 return -EINVAL;
5327 else {
5328 s2io_close(sp->dev);
5329 s2io_open(sp->dev);
5330 }
5331
5332 return 0;
5333}
5334
5335/**
20346722 5336 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5337 * @sp : private member of the device structure, pointer to the
5338 * s2io_nic structure.
5339 * @info : pointer to the structure with parameters given by ethtool
5340 * to return link information.
5341 * Description:
5342 * Returns link specific information like speed, duplex etc.. to ethtool.
5343 * Return value :
5344 * return 0 on success.
5345 */
5346
5347static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5348{
1ee6dd77 5349 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5350 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5351 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5352 info->port = PORT_FIBRE;
1a7eb72b
SS
5353
5354 /* info->transceiver */
5355 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5356
5357 if (netif_carrier_ok(sp->dev)) {
5358 info->speed = 10000;
5359 info->duplex = DUPLEX_FULL;
5360 } else {
5361 info->speed = -1;
5362 info->duplex = -1;
5363 }
5364
5365 info->autoneg = AUTONEG_DISABLE;
5366 return 0;
5367}
5368
5369/**
20346722
K
5370 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5371 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5372 * s2io_nic structure.
5373 * @info : pointer to the structure with parameters given by ethtool to
5374 * return driver information.
5375 * Description:
5376 * Returns driver specefic information like name, version etc.. to ethtool.
5377 * Return value:
5378 * void
5379 */
5380
5381static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5382 struct ethtool_drvinfo *info)
5383{
1ee6dd77 5384 struct s2io_nic *sp = dev->priv;
1da177e4 5385
dbc2309d
JL
5386 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5387 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5388 strncpy(info->fw_version, "", sizeof(info->fw_version));
5389 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5390 info->regdump_len = XENA_REG_SPACE;
5391 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5392}
5393
5394/**
5395 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5396 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5397 * s2io_nic structure.
20346722 5398 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5399 * dumping the registers.
5400 * @reg_space: The input argumnet into which all the registers are dumped.
5401 * Description:
5402 * Dumps the entire register space of xFrame NIC into the user given
5403 * buffer area.
5404 * Return value :
5405 * void .
5406*/
5407
5408static void s2io_ethtool_gregs(struct net_device *dev,
5409 struct ethtool_regs *regs, void *space)
5410{
5411 int i;
5412 u64 reg;
5413 u8 *reg_space = (u8 *) space;
1ee6dd77 5414 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5415
5416 regs->len = XENA_REG_SPACE;
5417 regs->version = sp->pdev->subsystem_device;
5418
5419 for (i = 0; i < regs->len; i += 8) {
5420 reg = readq(sp->bar0 + i);
5421 memcpy((reg_space + i), &reg, 8);
5422 }
5423}
5424
5425/**
5426 * s2io_phy_id - timer function that alternates adapter LED.
20346722 5427 * @data : address of the private member of the device structure, which
1da177e4 5428 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
5429 * Description: This is actually the timer function that alternates the
5430 * adapter LED bit of the adapter control bit to set/reset every time on
5431 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
5432 * once every second.
5433*/
5434static void s2io_phy_id(unsigned long data)
5435{
1ee6dd77
RB
5436 struct s2io_nic *sp = (struct s2io_nic *) data;
5437 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5438 u64 val64 = 0;
5439 u16 subid;
5440
5441 subid = sp->pdev->subsystem_device;
541ae68f
K
5442 if ((sp->device_type == XFRAME_II_DEVICE) ||
5443 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
5444 val64 = readq(&bar0->gpio_control);
5445 val64 ^= GPIO_CTRL_GPIO_0;
5446 writeq(val64, &bar0->gpio_control);
5447 } else {
5448 val64 = readq(&bar0->adapter_control);
5449 val64 ^= ADAPTER_LED_ON;
5450 writeq(val64, &bar0->adapter_control);
5451 }
5452
5453 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5454}
5455
5456/**
5457 * s2io_ethtool_idnic - To physically identify the nic on the system.
5458 * @sp : private member of the device structure, which is a pointer to the
5459 * s2io_nic structure.
20346722 5460 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
5461 * ethtool.
5462 * Description: Used to physically identify the NIC on the system.
20346722 5463 * The Link LED will blink for a time specified by the user for
1da177e4 5464 * identification.
20346722 5465 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
5466 * identification is possible only if it's link is up.
5467 * Return value:
5468 * int , returns 0 on success
5469 */
5470
5471static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5472{
5473 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
5474 struct s2io_nic *sp = dev->priv;
5475 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5476 u16 subid;
5477
5478 subid = sp->pdev->subsystem_device;
5479 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
5480 if ((sp->device_type == XFRAME_I_DEVICE) &&
5481 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
5482 val64 = readq(&bar0->adapter_control);
5483 if (!(val64 & ADAPTER_CNTL_EN)) {
5484 printk(KERN_ERR
5485 "Adapter Link down, cannot blink LED\n");
5486 return -EFAULT;
5487 }
5488 }
5489 if (sp->id_timer.function == NULL) {
5490 init_timer(&sp->id_timer);
5491 sp->id_timer.function = s2io_phy_id;
5492 sp->id_timer.data = (unsigned long) sp;
5493 }
5494 mod_timer(&sp->id_timer, jiffies);
5495 if (data)
20346722 5496 msleep_interruptible(data * HZ);
1da177e4 5497 else
20346722 5498 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
5499 del_timer_sync(&sp->id_timer);
5500
541ae68f 5501 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
5502 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5503 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5504 }
5505
5506 return 0;
5507}
5508
0cec35eb
SH
5509static void s2io_ethtool_gringparam(struct net_device *dev,
5510 struct ethtool_ringparam *ering)
5511{
5512 struct s2io_nic *sp = dev->priv;
5513 int i,tx_desc_count=0,rx_desc_count=0;
5514
5515 if (sp->rxd_mode == RXD_MODE_1)
5516 ering->rx_max_pending = MAX_RX_DESC_1;
5517 else if (sp->rxd_mode == RXD_MODE_3B)
5518 ering->rx_max_pending = MAX_RX_DESC_2;
0cec35eb
SH
5519
5520 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5521 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
0cec35eb 5522 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
8a4bdbaa 5523
0cec35eb
SH
5524 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5525 ering->tx_pending = tx_desc_count;
5526 rx_desc_count = 0;
8a4bdbaa 5527 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
0cec35eb 5528 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
b6627672 5529
0cec35eb
SH
5530 ering->rx_pending = rx_desc_count;
5531
5532 ering->rx_mini_max_pending = 0;
5533 ering->rx_mini_pending = 0;
5534 if(sp->rxd_mode == RXD_MODE_1)
5535 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5536 else if (sp->rxd_mode == RXD_MODE_3B)
5537 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5538 ering->rx_jumbo_pending = rx_desc_count;
5539}
5540
1da177e4
LT
5541/**
5542 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5543 * @sp : private member of the device structure, which is a pointer to the
5544 * s2io_nic structure.
1da177e4
LT
5545 * @ep : pointer to the structure with pause parameters given by ethtool.
5546 * Description:
5547 * Returns the Pause frame generation and reception capability of the NIC.
5548 * Return value:
5549 * void
5550 */
5551static void s2io_ethtool_getpause_data(struct net_device *dev,
5552 struct ethtool_pauseparam *ep)
5553{
5554 u64 val64;
1ee6dd77
RB
5555 struct s2io_nic *sp = dev->priv;
5556 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5557
5558 val64 = readq(&bar0->rmac_pause_cfg);
5559 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5560 ep->tx_pause = TRUE;
5561 if (val64 & RMAC_PAUSE_RX_ENABLE)
5562 ep->rx_pause = TRUE;
5563 ep->autoneg = FALSE;
5564}
5565
5566/**
5567 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5568 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5569 * s2io_nic structure.
5570 * @ep : pointer to the structure with pause parameters given by ethtool.
5571 * Description:
5572 * It can be used to set or reset Pause frame generation or reception
5573 * support of the NIC.
5574 * Return value:
5575 * int, returns 0 on Success
5576 */
5577
5578static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 5579 struct ethtool_pauseparam *ep)
1da177e4
LT
5580{
5581 u64 val64;
1ee6dd77
RB
5582 struct s2io_nic *sp = dev->priv;
5583 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5584
5585 val64 = readq(&bar0->rmac_pause_cfg);
5586 if (ep->tx_pause)
5587 val64 |= RMAC_PAUSE_GEN_ENABLE;
5588 else
5589 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5590 if (ep->rx_pause)
5591 val64 |= RMAC_PAUSE_RX_ENABLE;
5592 else
5593 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5594 writeq(val64, &bar0->rmac_pause_cfg);
5595 return 0;
5596}
5597
5598/**
5599 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5600 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5601 * s2io_nic structure.
5602 * @off : offset at which the data must be written
5603 * @data : Its an output parameter where the data read at the given
20346722 5604 * offset is stored.
1da177e4 5605 * Description:
20346722 5606 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5607 * read data.
5608 * NOTE: Will allow to read only part of the EEPROM visible through the
5609 * I2C bus.
5610 * Return value:
5611 * -1 on failure and 0 on success.
5612 */
5613
5614#define S2IO_DEV_ID 5
1ee6dd77 5615static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
5616{
5617 int ret = -1;
5618 u32 exit_cnt = 0;
5619 u64 val64;
1ee6dd77 5620 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5621
ad4ebed0 5622 if (sp->device_type == XFRAME_I_DEVICE) {
5623 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5624 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5625 I2C_CONTROL_CNTL_START;
5626 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5627
ad4ebed0 5628 while (exit_cnt < 5) {
5629 val64 = readq(&bar0->i2c_control);
5630 if (I2C_CONTROL_CNTL_END(val64)) {
5631 *data = I2C_CONTROL_GET_DATA(val64);
5632 ret = 0;
5633 break;
5634 }
5635 msleep(50);
5636 exit_cnt++;
1da177e4 5637 }
1da177e4
LT
5638 }
5639
ad4ebed0 5640 if (sp->device_type == XFRAME_II_DEVICE) {
5641 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5642 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5643 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5644 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5645 val64 |= SPI_CONTROL_REQ;
5646 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5647 while (exit_cnt < 5) {
5648 val64 = readq(&bar0->spi_control);
5649 if (val64 & SPI_CONTROL_NACK) {
5650 ret = 1;
5651 break;
5652 } else if (val64 & SPI_CONTROL_DONE) {
5653 *data = readq(&bar0->spi_data);
5654 *data &= 0xffffff;
5655 ret = 0;
5656 break;
5657 }
5658 msleep(50);
5659 exit_cnt++;
5660 }
5661 }
1da177e4
LT
5662 return ret;
5663}
5664
5665/**
5666 * write_eeprom - actually writes the relevant part of the data value.
5667 * @sp : private member of the device structure, which is a pointer to the
5668 * s2io_nic structure.
5669 * @off : offset at which the data must be written
5670 * @data : The data that is to be written
20346722 5671 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5672 * the Eeprom. (max of 3)
5673 * Description:
5674 * Actually writes the relevant part of the data value into the Eeprom
5675 * through the I2C bus.
5676 * Return value:
5677 * 0 on success, -1 on failure.
5678 */
5679
1ee6dd77 5680static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
5681{
5682 int exit_cnt = 0, ret = -1;
5683 u64 val64;
1ee6dd77 5684 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5685
ad4ebed0 5686 if (sp->device_type == XFRAME_I_DEVICE) {
5687 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5688 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5689 I2C_CONTROL_CNTL_START;
5690 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5691
5692 while (exit_cnt < 5) {
5693 val64 = readq(&bar0->i2c_control);
5694 if (I2C_CONTROL_CNTL_END(val64)) {
5695 if (!(val64 & I2C_CONTROL_NACK))
5696 ret = 0;
5697 break;
5698 }
5699 msleep(50);
5700 exit_cnt++;
5701 }
5702 }
1da177e4 5703
ad4ebed0 5704 if (sp->device_type == XFRAME_II_DEVICE) {
5705 int write_cnt = (cnt == 8) ? 0 : cnt;
5706 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5707
5708 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5709 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5710 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5711 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5712 val64 |= SPI_CONTROL_REQ;
5713 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5714 while (exit_cnt < 5) {
5715 val64 = readq(&bar0->spi_control);
5716 if (val64 & SPI_CONTROL_NACK) {
5717 ret = 1;
5718 break;
5719 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5720 ret = 0;
ad4ebed0 5721 break;
5722 }
5723 msleep(50);
5724 exit_cnt++;
1da177e4 5725 }
1da177e4 5726 }
1da177e4
LT
5727 return ret;
5728}
1ee6dd77 5729static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5730{
b41477f3
AR
5731 u8 *vpd_data;
5732 u8 data;
9dc737a7
AR
5733 int i=0, cnt, fail = 0;
5734 int vpd_addr = 0x80;
5735
5736 if (nic->device_type == XFRAME_II_DEVICE) {
5737 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5738 vpd_addr = 0x80;
5739 }
5740 else {
5741 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5742 vpd_addr = 0x50;
5743 }
19a60522 5744 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5745
b41477f3 5746 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945
SH
5747 if (!vpd_data) {
5748 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
b41477f3 5749 return;
c53d4945 5750 }
491976b2 5751 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
b41477f3 5752
9dc737a7
AR
5753 for (i = 0; i < 256; i +=4 ) {
5754 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5755 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5756 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5757 for (cnt = 0; cnt <5; cnt++) {
5758 msleep(2);
5759 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5760 if (data == 0x80)
5761 break;
5762 }
5763 if (cnt >= 5) {
5764 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5765 fail = 1;
5766 break;
5767 }
5768 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5769 (u32 *)&vpd_data[i]);
5770 }
19a60522
SS
5771
5772 if(!fail) {
5773 /* read serial number of adapter */
5774 for (cnt = 0; cnt < 256; cnt++) {
5775 if ((vpd_data[cnt] == 'S') &&
5776 (vpd_data[cnt+1] == 'N') &&
5777 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5778 memset(nic->serial_num, 0, VPD_STRING_LEN);
5779 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5780 vpd_data[cnt+2]);
5781 break;
5782 }
5783 }
5784 }
5785
5786 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
5787 memset(nic->product_name, 0, vpd_data[1]);
5788 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5789 }
b41477f3 5790 kfree(vpd_data);
491976b2 5791 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
9dc737a7
AR
5792}
5793
1da177e4
LT
5794/**
5795 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5796 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5797 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5798 * containing all relevant information.
5799 * @data_buf : user defined value to be written into Eeprom.
5800 * Description: Reads the values stored in the Eeprom at given offset
5801 * for a given length. Stores these values int the input argument data
5802 * buffer 'data_buf' and returns these to the caller (ethtool.)
5803 * Return value:
5804 * int 0 on success
5805 */
5806
5807static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 5808 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5809{
ad4ebed0 5810 u32 i, valid;
5811 u64 data;
1ee6dd77 5812 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5813
5814 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5815
5816 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5817 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5818
5819 for (i = 0; i < eeprom->len; i += 4) {
5820 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5821 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5822 return -EFAULT;
5823 }
5824 valid = INV(data);
5825 memcpy((data_buf + i), &valid, 4);
5826 }
5827 return 0;
5828}
5829
5830/**
5831 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5832 * @sp : private member of the device structure, which is a pointer to the
5833 * s2io_nic structure.
20346722 5834 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5835 * containing all relevant information.
5836 * @data_buf ; user defined value to be written into Eeprom.
5837 * Description:
5838 * Tries to write the user provided value in the Eeprom, at the offset
5839 * given by the user.
5840 * Return value:
5841 * 0 on success, -EFAULT on failure.
5842 */
5843
5844static int s2io_ethtool_seeprom(struct net_device *dev,
5845 struct ethtool_eeprom *eeprom,
5846 u8 * data_buf)
5847{
5848 int len = eeprom->len, cnt = 0;
ad4ebed0 5849 u64 valid = 0, data;
1ee6dd77 5850 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5851
5852 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5853 DBG_PRINT(ERR_DBG,
5854 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5855 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5856 eeprom->magic);
5857 return -EFAULT;
5858 }
5859
5860 while (len) {
5861 data = (u32) data_buf[cnt] & 0x000000FF;
5862 if (data) {
5863 valid = (u32) (data << 24);
5864 } else
5865 valid = data;
5866
5867 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5868 DBG_PRINT(ERR_DBG,
5869 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5870 DBG_PRINT(ERR_DBG,
5871 "write into the specified offset\n");
5872 return -EFAULT;
5873 }
5874 cnt++;
5875 len--;
5876 }
5877
5878 return 0;
5879}
5880
5881/**
20346722
K
5882 * s2io_register_test - reads and writes into all clock domains.
5883 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5884 * s2io_nic structure.
5885 * @data : variable that returns the result of each of the test conducted b
5886 * by the driver.
5887 * Description:
5888 * Read and write into all clock domains. The NIC has 3 clock domains,
5889 * see that registers in all the three regions are accessible.
5890 * Return value:
5891 * 0 on success.
5892 */
5893
1ee6dd77 5894static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5895{
1ee6dd77 5896 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5897 u64 val64 = 0, exp_val;
1da177e4
LT
5898 int fail = 0;
5899
20346722
K
5900 val64 = readq(&bar0->pif_rd_swapper_fb);
5901 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5902 fail = 1;
5903 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5904 }
5905
5906 val64 = readq(&bar0->rmac_pause_cfg);
5907 if (val64 != 0xc000ffff00000000ULL) {
5908 fail = 1;
5909 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5910 }
5911
5912 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5913 if (sp->device_type == XFRAME_II_DEVICE)
5914 exp_val = 0x0404040404040404ULL;
5915 else
5916 exp_val = 0x0808080808080808ULL;
5917 if (val64 != exp_val) {
1da177e4
LT
5918 fail = 1;
5919 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5920 }
5921
5922 val64 = readq(&bar0->xgxs_efifo_cfg);
5923 if (val64 != 0x000000001923141EULL) {
5924 fail = 1;
5925 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5926 }
5927
5928 val64 = 0x5A5A5A5A5A5A5A5AULL;
5929 writeq(val64, &bar0->xmsi_data);
5930 val64 = readq(&bar0->xmsi_data);
5931 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5932 fail = 1;
5933 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5934 }
5935
5936 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5937 writeq(val64, &bar0->xmsi_data);
5938 val64 = readq(&bar0->xmsi_data);
5939 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5940 fail = 1;
5941 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5942 }
5943
5944 *data = fail;
ad4ebed0 5945 return fail;
1da177e4
LT
5946}
5947
5948/**
20346722 5949 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5950 * @sp : private member of the device structure, which is a pointer to the
5951 * s2io_nic structure.
5952 * @data:variable that returns the result of each of the test conducted by
5953 * the driver.
5954 * Description:
20346722 5955 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5956 * register.
5957 * Return value:
5958 * 0 on success.
5959 */
5960
1ee6dd77 5961static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5962{
5963 int fail = 0;
ad4ebed0 5964 u64 ret_data, org_4F0, org_7F0;
5965 u8 saved_4F0 = 0, saved_7F0 = 0;
5966 struct net_device *dev = sp->dev;
1da177e4
LT
5967
5968 /* Test Write Error at offset 0 */
ad4ebed0 5969 /* Note that SPI interface allows write access to all areas
5970 * of EEPROM. Hence doing all negative testing only for Xframe I.
5971 */
5972 if (sp->device_type == XFRAME_I_DEVICE)
5973 if (!write_eeprom(sp, 0, 0, 3))
5974 fail = 1;
5975
5976 /* Save current values at offsets 0x4F0 and 0x7F0 */
5977 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5978 saved_4F0 = 1;
5979 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5980 saved_7F0 = 1;
1da177e4
LT
5981
5982 /* Test Write at offset 4f0 */
ad4ebed0 5983 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5984 fail = 1;
5985 if (read_eeprom(sp, 0x4F0, &ret_data))
5986 fail = 1;
5987
ad4ebed0 5988 if (ret_data != 0x012345) {
26b7625c
AM
5989 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5990 "Data written %llx Data read %llx\n",
5991 dev->name, (unsigned long long)0x12345,
5992 (unsigned long long)ret_data);
1da177e4 5993 fail = 1;
ad4ebed0 5994 }
1da177e4
LT
5995
5996 /* Reset the EEPROM data go FFFF */
ad4ebed0 5997 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
5998
5999 /* Test Write Request Error at offset 0x7c */
ad4ebed0 6000 if (sp->device_type == XFRAME_I_DEVICE)
6001 if (!write_eeprom(sp, 0x07C, 0, 3))
6002 fail = 1;
1da177e4 6003
ad4ebed0 6004 /* Test Write Request at offset 0x7f0 */
6005 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6006 fail = 1;
ad4ebed0 6007 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6008 fail = 1;
6009
ad4ebed0 6010 if (ret_data != 0x012345) {
26b7625c
AM
6011 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6012 "Data written %llx Data read %llx\n",
6013 dev->name, (unsigned long long)0x12345,
6014 (unsigned long long)ret_data);
1da177e4 6015 fail = 1;
ad4ebed0 6016 }
1da177e4
LT
6017
6018 /* Reset the EEPROM data go FFFF */
ad4ebed0 6019 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6020
ad4ebed0 6021 if (sp->device_type == XFRAME_I_DEVICE) {
6022 /* Test Write Error at offset 0x80 */
6023 if (!write_eeprom(sp, 0x080, 0, 3))
6024 fail = 1;
1da177e4 6025
ad4ebed0 6026 /* Test Write Error at offset 0xfc */
6027 if (!write_eeprom(sp, 0x0FC, 0, 3))
6028 fail = 1;
1da177e4 6029
ad4ebed0 6030 /* Test Write Error at offset 0x100 */
6031 if (!write_eeprom(sp, 0x100, 0, 3))
6032 fail = 1;
1da177e4 6033
ad4ebed0 6034 /* Test Write Error at offset 4ec */
6035 if (!write_eeprom(sp, 0x4EC, 0, 3))
6036 fail = 1;
6037 }
6038
6039 /* Restore values at offsets 0x4F0 and 0x7F0 */
6040 if (saved_4F0)
6041 write_eeprom(sp, 0x4F0, org_4F0, 3);
6042 if (saved_7F0)
6043 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6044
6045 *data = fail;
ad4ebed0 6046 return fail;
1da177e4
LT
6047}
6048
6049/**
6050 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6051 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6052 * s2io_nic structure.
20346722 6053 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6054 * the driver.
6055 * Description:
6056 * This invokes the MemBist test of the card. We give around
6057 * 2 secs time for the Test to complete. If it's still not complete
20346722 6058 * within this peiod, we consider that the test failed.
1da177e4
LT
6059 * Return value:
6060 * 0 on success and -1 on failure.
6061 */
6062
1ee6dd77 6063static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
6064{
6065 u8 bist = 0;
6066 int cnt = 0, ret = -1;
6067
6068 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6069 bist |= PCI_BIST_START;
6070 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6071
6072 while (cnt < 20) {
6073 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6074 if (!(bist & PCI_BIST_START)) {
6075 *data = (bist & PCI_BIST_CODE_MASK);
6076 ret = 0;
6077 break;
6078 }
6079 msleep(100);
6080 cnt++;
6081 }
6082
6083 return ret;
6084}
6085
6086/**
20346722
K
6087 * s2io-link_test - verifies the link state of the nic
6088 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6089 * s2io_nic structure.
6090 * @data: variable that returns the result of each of the test conducted by
6091 * the driver.
6092 * Description:
20346722 6093 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6094 * argument 'data' appropriately.
6095 * Return value:
6096 * 0 on success.
6097 */
6098
1ee6dd77 6099static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 6100{
1ee6dd77 6101 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6102 u64 val64;
6103
6104 val64 = readq(&bar0->adapter_status);
c92ca04b 6105 if(!(LINK_IS_UP(val64)))
1da177e4 6106 *data = 1;
c92ca04b
AR
6107 else
6108 *data = 0;
1da177e4 6109
b41477f3 6110 return *data;
1da177e4
LT
6111}
6112
6113/**
20346722
K
6114 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6115 * @sp - private member of the device structure, which is a pointer to the
1da177e4 6116 * s2io_nic structure.
20346722 6117 * @data - variable that returns the result of each of the test
1da177e4
LT
6118 * conducted by the driver.
6119 * Description:
20346722 6120 * This is one of the offline test that tests the read and write
1da177e4
LT
6121 * access to the RldRam chip on the NIC.
6122 * Return value:
6123 * 0 on success.
6124 */
6125
1ee6dd77 6126static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 6127{
1ee6dd77 6128 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6129 u64 val64;
ad4ebed0 6130 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6131
6132 val64 = readq(&bar0->adapter_control);
6133 val64 &= ~ADAPTER_ECC_EN;
6134 writeq(val64, &bar0->adapter_control);
6135
6136 val64 = readq(&bar0->mc_rldram_test_ctrl);
6137 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6138 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6139
6140 val64 = readq(&bar0->mc_rldram_mrs);
6141 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6143
6144 val64 |= MC_RLDRAM_MRS_ENABLE;
6145 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6146
6147 while (iteration < 2) {
6148 val64 = 0x55555555aaaa0000ULL;
6149 if (iteration == 1) {
6150 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6151 }
6152 writeq(val64, &bar0->mc_rldram_test_d0);
6153
6154 val64 = 0xaaaa5a5555550000ULL;
6155 if (iteration == 1) {
6156 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6157 }
6158 writeq(val64, &bar0->mc_rldram_test_d1);
6159
6160 val64 = 0x55aaaaaaaa5a0000ULL;
6161 if (iteration == 1) {
6162 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6163 }
6164 writeq(val64, &bar0->mc_rldram_test_d2);
6165
ad4ebed0 6166 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6167 writeq(val64, &bar0->mc_rldram_test_add);
6168
ad4ebed0 6169 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6170 MC_RLDRAM_TEST_GO;
6171 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6172
6173 for (cnt = 0; cnt < 5; cnt++) {
6174 val64 = readq(&bar0->mc_rldram_test_ctrl);
6175 if (val64 & MC_RLDRAM_TEST_DONE)
6176 break;
6177 msleep(200);
6178 }
6179
6180 if (cnt == 5)
6181 break;
6182
ad4ebed0 6183 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6184 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6185
6186 for (cnt = 0; cnt < 5; cnt++) {
6187 val64 = readq(&bar0->mc_rldram_test_ctrl);
6188 if (val64 & MC_RLDRAM_TEST_DONE)
6189 break;
6190 msleep(500);
6191 }
6192
6193 if (cnt == 5)
6194 break;
6195
6196 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6197 if (!(val64 & MC_RLDRAM_TEST_PASS))
6198 test_fail = 1;
1da177e4
LT
6199
6200 iteration++;
6201 }
6202
ad4ebed0 6203 *data = test_fail;
1da177e4 6204
ad4ebed0 6205 /* Bring the adapter out of test mode */
6206 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6207
6208 return test_fail;
1da177e4
LT
6209}
6210
6211/**
6212 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6213 * @sp : private member of the device structure, which is a pointer to the
6214 * s2io_nic structure.
6215 * @ethtest : pointer to a ethtool command specific structure that will be
6216 * returned to the user.
20346722 6217 * @data : variable that returns the result of each of the test
1da177e4
LT
6218 * conducted by the driver.
6219 * Description:
6220 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6221 * the health of the card.
6222 * Return value:
6223 * void
6224 */
6225
6226static void s2io_ethtool_test(struct net_device *dev,
6227 struct ethtool_test *ethtest,
6228 uint64_t * data)
6229{
1ee6dd77 6230 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6231 int orig_state = netif_running(sp->dev);
6232
6233 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6234 /* Offline Tests. */
20346722 6235 if (orig_state)
1da177e4 6236 s2io_close(sp->dev);
1da177e4
LT
6237
6238 if (s2io_register_test(sp, &data[0]))
6239 ethtest->flags |= ETH_TEST_FL_FAILED;
6240
6241 s2io_reset(sp);
1da177e4
LT
6242
6243 if (s2io_rldram_test(sp, &data[3]))
6244 ethtest->flags |= ETH_TEST_FL_FAILED;
6245
6246 s2io_reset(sp);
1da177e4
LT
6247
6248 if (s2io_eeprom_test(sp, &data[1]))
6249 ethtest->flags |= ETH_TEST_FL_FAILED;
6250
6251 if (s2io_bist_test(sp, &data[4]))
6252 ethtest->flags |= ETH_TEST_FL_FAILED;
6253
6254 if (orig_state)
6255 s2io_open(sp->dev);
6256
6257 data[2] = 0;
6258 } else {
6259 /* Online Tests. */
6260 if (!orig_state) {
6261 DBG_PRINT(ERR_DBG,
6262 "%s: is not up, cannot run test\n",
6263 dev->name);
6264 data[0] = -1;
6265 data[1] = -1;
6266 data[2] = -1;
6267 data[3] = -1;
6268 data[4] = -1;
6269 }
6270
6271 if (s2io_link_test(sp, &data[2]))
6272 ethtest->flags |= ETH_TEST_FL_FAILED;
6273
6274 data[0] = 0;
6275 data[1] = 0;
6276 data[3] = 0;
6277 data[4] = 0;
6278 }
6279}
6280
6281static void s2io_get_ethtool_stats(struct net_device *dev,
6282 struct ethtool_stats *estats,
6283 u64 * tmp_stats)
6284{
8116f3cf 6285 int i = 0, k;
1ee6dd77
RB
6286 struct s2io_nic *sp = dev->priv;
6287 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 6288
7ba013ac 6289 s2io_updt_stats(sp);
541ae68f
K
6290 tmp_stats[i++] =
6291 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6292 le32_to_cpu(stat_info->tmac_frms);
6293 tmp_stats[i++] =
6294 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6295 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 6296 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
6297 tmp_stats[i++] =
6298 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6299 le32_to_cpu(stat_info->tmac_mcst_frms);
6300 tmp_stats[i++] =
6301 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6302 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 6303 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
6304 tmp_stats[i++] =
6305 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6306 le32_to_cpu(stat_info->tmac_ttl_octets);
6307 tmp_stats[i++] =
6308 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6309 le32_to_cpu(stat_info->tmac_ucst_frms);
6310 tmp_stats[i++] =
6311 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6312 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
6313 tmp_stats[i++] =
6314 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6315 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 6316 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 6317 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
6318 tmp_stats[i++] =
6319 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6320 le32_to_cpu(stat_info->tmac_vld_ip);
6321 tmp_stats[i++] =
6322 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6323 le32_to_cpu(stat_info->tmac_drop_ip);
6324 tmp_stats[i++] =
6325 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6326 le32_to_cpu(stat_info->tmac_icmp);
6327 tmp_stats[i++] =
6328 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6329 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 6330 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
6331 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6332 le32_to_cpu(stat_info->tmac_udp);
6333 tmp_stats[i++] =
6334 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6335 le32_to_cpu(stat_info->rmac_vld_frms);
6336 tmp_stats[i++] =
6337 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6338 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
6339 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6340 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
6341 tmp_stats[i++] =
6342 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6343 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6344 tmp_stats[i++] =
6345 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6346 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 6347 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 6348 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
6349 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6350 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
6351 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6352 tmp_stats[i++] =
6353 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6354 le32_to_cpu(stat_info->rmac_ttl_octets);
6355 tmp_stats[i++] =
6356 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6357 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6358 tmp_stats[i++] =
6359 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6360 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
6361 tmp_stats[i++] =
6362 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6363 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
6364 tmp_stats[i++] =
6365 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6366 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6367 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6368 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
6369 tmp_stats[i++] =
6370 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6371 le32_to_cpu(stat_info->rmac_usized_frms);
6372 tmp_stats[i++] =
6373 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6374 le32_to_cpu(stat_info->rmac_osized_frms);
6375 tmp_stats[i++] =
6376 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6377 le32_to_cpu(stat_info->rmac_frag_frms);
6378 tmp_stats[i++] =
6379 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6380 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
6381 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6382 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6383 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6384 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6385 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6386 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6387 tmp_stats[i++] =
6388 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 6389 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
6390 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6391 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
6392 tmp_stats[i++] =
6393 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 6394 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
6395 tmp_stats[i++] =
6396 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 6397 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 6398 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
6399 tmp_stats[i++] =
6400 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
6401 le32_to_cpu(stat_info->rmac_udp);
6402 tmp_stats[i++] =
6403 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6404 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
6405 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6406 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6407 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6408 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6409 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6410 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6411 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6412 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6413 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6414 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6415 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6416 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6417 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6418 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6419 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6420 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6421 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
6422 tmp_stats[i++] =
6423 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6424 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
6425 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6426 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
6427 tmp_stats[i++] =
6428 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6429 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 6430 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
6431 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6432 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6433 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6434 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6435 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
fa1f0cb3
SS
6449
6450 /* Enhanced statistics exist only for Hercules */
6451 if(sp->device_type == XFRAME_II_DEVICE) {
6452 tmp_stats[i++] =
6453 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6454 tmp_stats[i++] =
6455 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6456 tmp_stats[i++] =
6457 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6458 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6459 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6460 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6461 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6462 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6463 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6464 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6465 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6466 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6467 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6468 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6469 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6470 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6471 }
6472
7ba013ac
K
6473 tmp_stats[i++] = 0;
6474 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6475 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
6476 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6477 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6478 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6479 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
8116f3cf
SS
6480 for (k = 0; k < MAX_RX_RINGS; k++)
6481 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
bd1034f0
AR
6482 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6483 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6484 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6485 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6486 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6487 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6488 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6489 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6490 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6491 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6492 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6493 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
6494 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6495 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6496 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6497 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 6498 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
6499 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6500 int count = 0;
6aa20a22 6501 /*
bd1034f0
AR
6502 * Since 64-bit divide does not work on all platforms,
6503 * do repeated subtraction.
6504 */
6505 while (tmp >= stat_info->sw_stat.num_aggregations) {
6506 tmp -= stat_info->sw_stat.num_aggregations;
6507 count++;
6508 }
6509 tmp_stats[i++] = count;
fe931395 6510 }
bd1034f0
AR
6511 else
6512 tmp_stats[i++] = 0;
c53d4945 6513 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
491abf25 6514 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
c53d4945 6515 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
491976b2
SH
6516 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6517 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6518 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6519 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6520 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6521 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6522
6523 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6525 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6526 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6527 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6528
6529 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6532 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6533 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
8116f3cf
SS
6538 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6542 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
1da177e4
LT
6555}
6556
ac1f60db 6557static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
6558{
6559 return (XENA_REG_SPACE);
6560}
6561
6562
ac1f60db 6563static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 6564{
1ee6dd77 6565 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6566
6567 return (sp->rx_csum);
6568}
ac1f60db
AB
6569
6570static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 6571{
1ee6dd77 6572 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6573
6574 if (data)
6575 sp->rx_csum = 1;
6576 else
6577 sp->rx_csum = 0;
6578
6579 return 0;
6580}
ac1f60db
AB
6581
6582static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
6583{
6584 return (XENA_EEPROM_SPACE);
6585}
6586
b9f2c044 6587static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6588{
b9f2c044
JG
6589 struct s2io_nic *sp = dev->priv;
6590
6591 switch (sset) {
6592 case ETH_SS_TEST:
6593 return S2IO_TEST_LEN;
6594 case ETH_SS_STATS:
6595 switch(sp->device_type) {
6596 case XFRAME_I_DEVICE:
6597 return XFRAME_I_STAT_LEN;
6598 case XFRAME_II_DEVICE:
6599 return XFRAME_II_STAT_LEN;
6600 default:
6601 return 0;
6602 }
6603 default:
6604 return -EOPNOTSUPP;
6605 }
1da177e4 6606}
ac1f60db
AB
6607
6608static void s2io_ethtool_get_strings(struct net_device *dev,
6609 u32 stringset, u8 * data)
1da177e4 6610{
fa1f0cb3
SS
6611 int stat_size = 0;
6612 struct s2io_nic *sp = dev->priv;
6613
1da177e4
LT
6614 switch (stringset) {
6615 case ETH_SS_TEST:
6616 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6617 break;
6618 case ETH_SS_STATS:
fa1f0cb3
SS
6619 stat_size = sizeof(ethtool_xena_stats_keys);
6620 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6621 if(sp->device_type == XFRAME_II_DEVICE) {
6622 memcpy(data + stat_size,
6623 &ethtool_enhanced_stats_keys,
6624 sizeof(ethtool_enhanced_stats_keys));
6625 stat_size += sizeof(ethtool_enhanced_stats_keys);
6626 }
6627
6628 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6629 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6630 }
6631}
1da177e4 6632
ac1f60db 6633static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
6634{
6635 if (data)
6636 dev->features |= NETIF_F_IP_CSUM;
6637 else
6638 dev->features &= ~NETIF_F_IP_CSUM;
6639
6640 return 0;
6641}
6642
75c30b13
AR
6643static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6644{
6645 return (dev->features & NETIF_F_TSO) != 0;
6646}
6647static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6648{
6649 if (data)
6650 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6651 else
6652 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6653
6654 return 0;
6655}
1da177e4 6656
7282d491 6657static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6658 .get_settings = s2io_ethtool_gset,
6659 .set_settings = s2io_ethtool_sset,
6660 .get_drvinfo = s2io_ethtool_gdrvinfo,
6661 .get_regs_len = s2io_ethtool_get_regs_len,
6662 .get_regs = s2io_ethtool_gregs,
6663 .get_link = ethtool_op_get_link,
6664 .get_eeprom_len = s2io_get_eeprom_len,
6665 .get_eeprom = s2io_ethtool_geeprom,
6666 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6667 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6668 .get_pauseparam = s2io_ethtool_getpause_data,
6669 .set_pauseparam = s2io_ethtool_setpause_data,
6670 .get_rx_csum = s2io_ethtool_get_rx_csum,
6671 .set_rx_csum = s2io_ethtool_set_rx_csum,
1da177e4 6672 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
1da177e4 6673 .set_sg = ethtool_op_set_sg,
75c30b13
AR
6674 .get_tso = s2io_ethtool_op_get_tso,
6675 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd 6676 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
6677 .self_test = s2io_ethtool_test,
6678 .get_strings = s2io_ethtool_get_strings,
6679 .phys_id = s2io_ethtool_idnic,
b9f2c044
JG
6680 .get_ethtool_stats = s2io_get_ethtool_stats,
6681 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6682};
6683
6684/**
20346722 6685 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6686 * @dev : Device pointer.
6687 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6688 * a proprietary structure used to pass information to the driver.
6689 * @cmd : This is used to distinguish between the different commands that
6690 * can be passed to the IOCTL functions.
6691 * Description:
20346722
K
6692 * Currently there are no special functionality supported in IOCTL, hence
6693 * function always return EOPNOTSUPPORTED
1da177e4
LT
6694 */
6695
ac1f60db 6696static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6697{
6698 return -EOPNOTSUPP;
6699}
6700
6701/**
6702 * s2io_change_mtu - entry point to change MTU size for the device.
6703 * @dev : device pointer.
6704 * @new_mtu : the new MTU size for the device.
6705 * Description: A driver entry point to change MTU size for the device.
6706 * Before changing the MTU the device must be stopped.
6707 * Return value:
6708 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6709 * file on failure.
6710 */
6711
ac1f60db 6712static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6713{
1ee6dd77 6714 struct s2io_nic *sp = dev->priv;
9f74ffde 6715 int ret = 0;
1da177e4
LT
6716
6717 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6718 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6719 dev->name);
6720 return -EPERM;
6721 }
6722
1da177e4 6723 dev->mtu = new_mtu;
d8892c6e 6724 if (netif_running(dev)) {
3a3d5756 6725 s2io_stop_all_tx_queue(sp);
e6a8fee2 6726 s2io_card_down(sp);
9f74ffde
SH
6727 ret = s2io_card_up(sp);
6728 if (ret) {
d8892c6e
K
6729 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6730 __FUNCTION__);
9f74ffde 6731 return ret;
d8892c6e 6732 }
3a3d5756 6733 s2io_wake_all_tx_queue(sp);
d8892c6e 6734 } else { /* Device is down */
1ee6dd77 6735 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6736 u64 val64 = new_mtu;
6737
6738 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6739 }
1da177e4 6740
9f74ffde 6741 return ret;
1da177e4
LT
6742}
6743
1da177e4
LT
6744/**
6745 * s2io_set_link - Set the LInk status
6746 * @data: long pointer to device private structue
6747 * Description: Sets the link status for the adapter
6748 */
6749
c4028958 6750static void s2io_set_link(struct work_struct *work)
1da177e4 6751{
1ee6dd77 6752 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 6753 struct net_device *dev = nic->dev;
1ee6dd77 6754 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6755 register u64 val64;
6756 u16 subid;
6757
22747d6b
FR
6758 rtnl_lock();
6759
6760 if (!netif_running(dev))
6761 goto out_unlock;
6762
92b84437 6763 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6764 /* The card is being reset, no point doing anything */
22747d6b 6765 goto out_unlock;
1da177e4
LT
6766 }
6767
6768 subid = nic->pdev->subsystem_device;
a371a07d
K
6769 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6770 /*
6771 * Allow a small delay for the NICs self initiated
6772 * cleanup to complete.
6773 */
6774 msleep(100);
6775 }
1da177e4
LT
6776
6777 val64 = readq(&bar0->adapter_status);
19a60522
SS
6778 if (LINK_IS_UP(val64)) {
6779 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6780 if (verify_xena_quiescence(nic)) {
6781 val64 = readq(&bar0->adapter_control);
6782 val64 |= ADAPTER_CNTL_EN;
1da177e4 6783 writeq(val64, &bar0->adapter_control);
19a60522
SS
6784 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6785 nic->device_type, subid)) {
6786 val64 = readq(&bar0->gpio_control);
6787 val64 |= GPIO_CTRL_GPIO_0;
6788 writeq(val64, &bar0->gpio_control);
6789 val64 = readq(&bar0->gpio_control);
6790 } else {
6791 val64 |= ADAPTER_LED_ON;
6792 writeq(val64, &bar0->adapter_control);
a371a07d 6793 }
1da177e4 6794 nic->device_enabled_once = TRUE;
19a60522
SS
6795 } else {
6796 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6797 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
3a3d5756 6798 s2io_stop_all_tx_queue(nic);
1da177e4 6799 }
19a60522 6800 }
92c48799
SS
6801 val64 = readq(&bar0->adapter_control);
6802 val64 |= ADAPTER_LED_ON;
6803 writeq(val64, &bar0->adapter_control);
6804 s2io_link(nic, LINK_UP);
19a60522
SS
6805 } else {
6806 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6807 subid)) {
6808 val64 = readq(&bar0->gpio_control);
6809 val64 &= ~GPIO_CTRL_GPIO_0;
6810 writeq(val64, &bar0->gpio_control);
6811 val64 = readq(&bar0->gpio_control);
1da177e4 6812 }
92c48799
SS
6813 /* turn off LED */
6814 val64 = readq(&bar0->adapter_control);
6815 val64 = val64 &(~ADAPTER_LED_ON);
6816 writeq(val64, &bar0->adapter_control);
19a60522 6817 s2io_link(nic, LINK_DOWN);
1da177e4 6818 }
92b84437 6819 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6820
6821out_unlock:
d8d70caf 6822 rtnl_unlock();
1da177e4
LT
6823}
6824
1ee6dd77
RB
6825static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6826 struct buffAdd *ba,
6827 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6828 u64 *temp2, int size)
5d3213cc
AR
6829{
6830 struct net_device *dev = sp->dev;
491abf25 6831 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6832
6833 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6834 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6835 /* allocate skb */
6836 if (*skb) {
6837 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6838 /*
6839 * As Rx frame are not going to be processed,
6840 * using same mapped address for the Rxd
6841 * buffer pointer
6842 */
6d517a27 6843 rxdp1->Buffer0_ptr = *temp0;
5d3213cc
AR
6844 } else {
6845 *skb = dev_alloc_skb(size);
6846 if (!(*skb)) {
0c61ed5f 6847 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
c53d4945
SH
6848 DBG_PRINT(INFO_DBG, "memory to allocate ");
6849 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6850 sp->mac_control.stats_info->sw_stat. \
6851 mem_alloc_fail_cnt++;
5d3213cc
AR
6852 return -ENOMEM ;
6853 }
8a4bdbaa 6854 sp->mac_control.stats_info->sw_stat.mem_allocated
491976b2 6855 += (*skb)->truesize;
5d3213cc
AR
6856 /* storing the mapped addr in a temp variable
6857 * such it will be used for next rxd whose
6858 * Host Control is NULL
6859 */
6d517a27 6860 rxdp1->Buffer0_ptr = *temp0 =
5d3213cc
AR
6861 pci_map_single( sp->pdev, (*skb)->data,
6862 size - NET_IP_ALIGN,
6863 PCI_DMA_FROMDEVICE);
64c42f69 6864 if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
491abf25 6865 goto memalloc_failed;
5d3213cc
AR
6866 rxdp->Host_Control = (unsigned long) (*skb);
6867 }
6868 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6869 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6870 /* Two buffer Mode */
6871 if (*skb) {
6d517a27
VP
6872 rxdp3->Buffer2_ptr = *temp2;
6873 rxdp3->Buffer0_ptr = *temp0;
6874 rxdp3->Buffer1_ptr = *temp1;
5d3213cc
AR
6875 } else {
6876 *skb = dev_alloc_skb(size);
2ceaac75 6877 if (!(*skb)) {
c53d4945
SH
6878 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6879 DBG_PRINT(INFO_DBG, "memory to allocate ");
6880 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6881 sp->mac_control.stats_info->sw_stat. \
6882 mem_alloc_fail_cnt++;
2ceaac75
DR
6883 return -ENOMEM;
6884 }
8a4bdbaa 6885 sp->mac_control.stats_info->sw_stat.mem_allocated
491976b2 6886 += (*skb)->truesize;
6d517a27 6887 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6888 pci_map_single(sp->pdev, (*skb)->data,
6889 dev->mtu + 4,
6890 PCI_DMA_FROMDEVICE);
64c42f69 6891 if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
491abf25 6892 goto memalloc_failed;
6d517a27 6893 rxdp3->Buffer0_ptr = *temp0 =
5d3213cc
AR
6894 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6895 PCI_DMA_FROMDEVICE);
64c42f69 6896 if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
491abf25 6897 pci_unmap_single (sp->pdev,
3e847423 6898 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
6899 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6900 goto memalloc_failed;
6901 }
5d3213cc
AR
6902 rxdp->Host_Control = (unsigned long) (*skb);
6903
6904 /* Buffer-1 will be dummy buffer not used */
6d517a27 6905 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6906 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
5d3213cc 6907 PCI_DMA_FROMDEVICE);
64c42f69 6908 if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
491abf25 6909 pci_unmap_single (sp->pdev,
3e847423
AV
6910 (dma_addr_t)rxdp3->Buffer0_ptr,
6911 BUF0_LEN, PCI_DMA_FROMDEVICE);
6912 pci_unmap_single (sp->pdev,
6913 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
6914 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6915 goto memalloc_failed;
6916 }
5d3213cc
AR
6917 }
6918 }
6919 return 0;
491abf25
VP
6920 memalloc_failed:
6921 stats->pci_map_fail_cnt++;
6922 stats->mem_freed += (*skb)->truesize;
6923 dev_kfree_skb(*skb);
6924 return -ENOMEM;
5d3213cc 6925}
491abf25 6926
1ee6dd77
RB
6927static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6928 int size)
5d3213cc
AR
6929{
6930 struct net_device *dev = sp->dev;
6931 if (sp->rxd_mode == RXD_MODE_1) {
6932 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6933 } else if (sp->rxd_mode == RXD_MODE_3B) {
6934 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6935 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6936 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
5d3213cc
AR
6937 }
6938}
6939
1ee6dd77 6940static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6941{
6942 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6943 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6944 struct config_param *config = &sp->config;
6945 struct net_device *dev = sp->dev;
1ee6dd77 6946 struct RxD_t *rxdp = NULL;
5d3213cc 6947 struct sk_buff *skb = NULL;
1ee6dd77 6948 struct buffAdd *ba = NULL;
5d3213cc
AR
6949 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6950
6951 /* Calculate the size based on ring mode */
6952 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6953 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6954 if (sp->rxd_mode == RXD_MODE_1)
6955 size += NET_IP_ALIGN;
6956 else if (sp->rxd_mode == RXD_MODE_3B)
6957 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
6958
6959 for (i = 0; i < config->rx_ring_num; i++) {
6960 blk_cnt = config->rx_cfg[i].num_rxd /
6961 (rxd_count[sp->rxd_mode] +1);
6962
6963 for (j = 0; j < blk_cnt; j++) {
6964 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6965 rxdp = mac_control->rings[i].
6966 rx_blocks[j].rxds[k].virt_addr;
6d517a27 6967 if(sp->rxd_mode == RXD_MODE_3B)
5d3213cc 6968 ba = &mac_control->rings[i].ba[j][k];
ac1f90d6 6969 if (set_rxd_buffer_pointer(sp, rxdp, ba,
5d3213cc
AR
6970 &skb,(u64 *)&temp0_64,
6971 (u64 *)&temp1_64,
ac1f90d6 6972 (u64 *)&temp2_64,
20cbe73c 6973 size) == -ENOMEM) {
ac1f90d6
SS
6974 return 0;
6975 }
5d3213cc
AR
6976
6977 set_rxd_buffer_size(sp, rxdp, size);
6978 wmb();
6979 /* flip the Ownership bit to Hardware */
6980 rxdp->Control_1 |= RXD_OWN_XENA;
6981 }
6982 }
6983 }
6984 return 0;
6985
6986}
6987
1ee6dd77 6988static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 6989{
e6a8fee2 6990 int ret = 0;
c92ca04b 6991 struct net_device *dev = sp->dev;
e6a8fee2 6992 int err = 0;
1da177e4 6993
eaae7f72 6994 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
6995 ret = s2io_enable_msi_x(sp);
6996 if (ret) {
6997 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 6998 sp->config.intr_type = INTA;
20346722 6999 }
1da177e4 7000
1ee6dd77 7001 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 7002 store_xmsi_data(sp);
c92ca04b 7003
e6a8fee2 7004 /* After proper initialization of H/W, register ISR */
eaae7f72 7005 if (sp->config.intr_type == MSI_X) {
ac731ab6
SH
7006 int i, msix_rx_cnt = 0;
7007
f61e0a35
SH
7008 for (i = 0; i < sp->num_entries; i++) {
7009 if (sp->s2io_entries[i].in_use == MSIX_FLG) {
7010 if (sp->s2io_entries[i].type ==
ac731ab6
SH
7011 MSIX_RING_TYPE) {
7012 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7013 dev->name, i);
7014 err = request_irq(sp->entries[i].vector,
7015 s2io_msix_ring_handle, 0,
7016 sp->desc[i],
7017 sp->s2io_entries[i].arg);
7018 } else if (sp->s2io_entries[i].type ==
7019 MSIX_ALARM_TYPE) {
7020 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
e6a8fee2 7021 dev->name, i);
ac731ab6
SH
7022 err = request_irq(sp->entries[i].vector,
7023 s2io_msix_fifo_handle, 0,
7024 sp->desc[i],
7025 sp->s2io_entries[i].arg);
7026
fb6a825b 7027 }
ac731ab6
SH
7028 /* if either data or addr is zero print it. */
7029 if (!(sp->msix_info[i].addr &&
fb6a825b 7030 sp->msix_info[i].data)) {
ac731ab6
SH
7031 DBG_PRINT(ERR_DBG,
7032 "%s @Addr:0x%llx Data:0x%llx\n",
7033 sp->desc[i],
fb6a825b
SS
7034 (unsigned long long)
7035 sp->msix_info[i].addr,
3459feb8 7036 (unsigned long long)
ac731ab6
SH
7037 ntohl(sp->msix_info[i].data));
7038 } else
fb6a825b 7039 msix_rx_cnt++;
ac731ab6
SH
7040 if (err) {
7041 remove_msix_isr(sp);
7042
7043 DBG_PRINT(ERR_DBG,
7044 "%s:MSI-X-%d registration "
7045 "failed\n", dev->name, i);
7046
7047 DBG_PRINT(ERR_DBG,
7048 "%s: Defaulting to INTA\n",
7049 dev->name);
7050 sp->config.intr_type = INTA;
7051 break;
fb6a825b 7052 }
ac731ab6
SH
7053 sp->s2io_entries[i].in_use =
7054 MSIX_REGISTERED_SUCCESS;
c92ca04b 7055 }
e6a8fee2 7056 }
18b2b7bd 7057 if (!err) {
18b2b7bd 7058 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
ac731ab6
SH
7059 --msix_rx_cnt);
7060 DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
7061 " through alarm vector\n");
18b2b7bd 7062 }
e6a8fee2 7063 }
eaae7f72 7064 if (sp->config.intr_type == INTA) {
e6a8fee2
AR
7065 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7066 sp->name, dev);
7067 if (err) {
7068 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7069 dev->name);
7070 return -1;
7071 }
7072 }
7073 return 0;
7074}
1ee6dd77 7075static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2 7076{
18b2b7bd
SH
7077 if (sp->config.intr_type == MSI_X)
7078 remove_msix_isr(sp);
7079 else
7080 remove_inta_isr(sp);
e6a8fee2
AR
7081}
7082
d796fdb7 7083static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
e6a8fee2
AR
7084{
7085 int cnt = 0;
1ee6dd77 7086 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2 7087 register u64 val64 = 0;
5f490c96
SH
7088 struct config_param *config;
7089 config = &sp->config;
e6a8fee2 7090
9f74ffde
SH
7091 if (!is_s2io_card_up(sp))
7092 return;
7093
e6a8fee2
AR
7094 del_timer_sync(&sp->alarm_timer);
7095 /* If s2io_set_link task is executing, wait till it completes. */
92b84437 7096 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
e6a8fee2
AR
7097 msleep(50);
7098 }
92b84437 7099 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7100
5f490c96 7101 /* Disable napi */
f61e0a35
SH
7102 if (sp->config.napi) {
7103 int off = 0;
7104 if (config->intr_type == MSI_X) {
7105 for (; off < sp->config.rx_ring_num; off++)
7106 napi_disable(&sp->mac_control.rings[off].napi);
7107 }
7108 else
7109 napi_disable(&sp->napi);
7110 }
5f490c96 7111
e6a8fee2 7112 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7113 if (do_io)
7114 stop_nic(sp);
e6a8fee2
AR
7115
7116 s2io_rem_isr(sp);
1da177e4 7117
1da177e4 7118 /* Check if the device is Quiescent and then Reset the NIC */
d796fdb7 7119 while(do_io) {
5d3213cc
AR
7120 /* As per the HW requirement we need to replenish the
7121 * receive buffer to avoid the ring bump. Since there is
7122 * no intention of processing the Rx frame at this pointwe are
7123 * just settting the ownership bit of rxd in Each Rx
7124 * ring to HW and set the appropriate buffer size
7125 * based on the ring mode
7126 */
7127 rxd_owner_bit_reset(sp);
7128
1da177e4 7129 val64 = readq(&bar0->adapter_status);
19a60522
SS
7130 if (verify_xena_quiescence(sp)) {
7131 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
7132 break;
7133 }
7134
7135 msleep(50);
7136 cnt++;
7137 if (cnt == 10) {
7138 DBG_PRINT(ERR_DBG,
7139 "s2io_close:Device not Quiescent ");
7140 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7141 (unsigned long long) val64);
7142 break;
7143 }
d796fdb7
LV
7144 }
7145 if (do_io)
7146 s2io_reset(sp);
1da177e4 7147
7ba013ac 7148 /* Free all Tx buffers */
1da177e4 7149 free_tx_buffers(sp);
7ba013ac
K
7150
7151 /* Free all Rx buffers */
1da177e4
LT
7152 free_rx_buffers(sp);
7153
92b84437 7154 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7155}
7156
d796fdb7
LV
7157static void s2io_card_down(struct s2io_nic * sp)
7158{
7159 do_s2io_card_down(sp, 1);
7160}
7161
1ee6dd77 7162static int s2io_card_up(struct s2io_nic * sp)
1da177e4 7163{
cc6e7c44 7164 int i, ret = 0;
1ee6dd77 7165 struct mac_info *mac_control;
1da177e4
LT
7166 struct config_param *config;
7167 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 7168 u16 interruptible;
1da177e4
LT
7169
7170 /* Initialize the H/W I/O registers */
9f74ffde
SH
7171 ret = init_nic(sp);
7172 if (ret != 0) {
1da177e4
LT
7173 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7174 dev->name);
9f74ffde
SH
7175 if (ret != -EIO)
7176 s2io_reset(sp);
7177 return ret;
1da177e4
LT
7178 }
7179
20346722
K
7180 /*
7181 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7182 * Rx ring and initializing buffers into 30 Rx blocks
7183 */
7184 mac_control = &sp->mac_control;
7185 config = &sp->config;
7186
7187 for (i = 0; i < config->rx_ring_num; i++) {
0425b46a 7188 mac_control->rings[i].mtu = dev->mtu;
3f78d885 7189 ret = fill_rx_buffers(&mac_control->rings[i], 1);
0425b46a 7190 if (ret) {
1da177e4
LT
7191 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7192 dev->name);
7193 s2io_reset(sp);
7194 free_rx_buffers(sp);
7195 return -ENOMEM;
7196 }
7197 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
0425b46a 7198 mac_control->rings[i].rx_bufs_left);
1da177e4 7199 }
5f490c96
SH
7200
7201 /* Initialise napi */
f61e0a35
SH
7202 if (config->napi) {
7203 int i;
7204 if (config->intr_type == MSI_X) {
7205 for (i = 0; i < sp->config.rx_ring_num; i++)
7206 napi_enable(&sp->mac_control.rings[i].napi);
7207 } else {
7208 napi_enable(&sp->napi);
7209 }
7210 }
5f490c96 7211
19a60522
SS
7212 /* Maintain the state prior to the open */
7213 if (sp->promisc_flg)
7214 sp->promisc_flg = 0;
7215 if (sp->m_cast_flg) {
7216 sp->m_cast_flg = 0;
7217 sp->all_multi_pos= 0;
7218 }
1da177e4
LT
7219
7220 /* Setting its receive mode */
7221 s2io_set_multicast(dev);
7222
7d3d0439 7223 if (sp->lro) {
b41477f3 7224 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
7225 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7226 /* Check if we can use(if specified) user provided value */
7227 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7228 sp->lro_max_aggr_per_sess = lro_max_pkts;
7229 }
7230
1da177e4
LT
7231 /* Enable Rx Traffic and interrupts on the NIC */
7232 if (start_nic(sp)) {
7233 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7234 s2io_reset(sp);
e6a8fee2
AR
7235 free_rx_buffers(sp);
7236 return -ENODEV;
7237 }
7238
7239 /* Add interrupt service routine */
7240 if (s2io_add_isr(sp) != 0) {
eaae7f72 7241 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7242 s2io_rem_isr(sp);
7243 s2io_reset(sp);
1da177e4
LT
7244 free_rx_buffers(sp);
7245 return -ENODEV;
7246 }
7247
25fff88e
K
7248 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7249
e6a8fee2 7250 /* Enable select interrupts */
9caab458 7251 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
eaae7f72 7252 if (sp->config.intr_type != INTA)
ac731ab6 7253 en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
e6a8fee2
AR
7254 else {
7255 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7256 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7257 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7258 }
7259
92b84437 7260 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
1da177e4
LT
7261 return 0;
7262}
7263
20346722 7264/**
1da177e4
LT
7265 * s2io_restart_nic - Resets the NIC.
7266 * @data : long pointer to the device private structure
7267 * Description:
7268 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7269 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7270 * the run time of the watch dog routine which is run holding a
7271 * spin lock.
7272 */
7273
c4028958 7274static void s2io_restart_nic(struct work_struct *work)
1da177e4 7275{
1ee6dd77 7276 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7277 struct net_device *dev = sp->dev;
1da177e4 7278
22747d6b
FR
7279 rtnl_lock();
7280
7281 if (!netif_running(dev))
7282 goto out_unlock;
7283
e6a8fee2 7284 s2io_card_down(sp);
1da177e4
LT
7285 if (s2io_card_up(sp)) {
7286 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7287 dev->name);
7288 }
3a3d5756 7289 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7290 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7291 dev->name);
22747d6b
FR
7292out_unlock:
7293 rtnl_unlock();
1da177e4
LT
7294}
7295
20346722
K
7296/**
7297 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7298 * @dev : Pointer to net device structure
7299 * Description:
7300 * This function is triggered if the Tx Queue is stopped
7301 * for a pre-defined amount of time when the Interface is still up.
7302 * If the Interface is jammed in such a situation, the hardware is
7303 * reset (by s2io_close) and restarted again (by s2io_open) to
7304 * overcome any problem that might have been caused in the hardware.
7305 * Return value:
7306 * void
7307 */
7308
7309static void s2io_tx_watchdog(struct net_device *dev)
7310{
1ee6dd77 7311 struct s2io_nic *sp = dev->priv;
1da177e4
LT
7312
7313 if (netif_carrier_ok(dev)) {
c53d4945 7314 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
1da177e4 7315 schedule_work(&sp->rst_timer_task);
bd1034f0 7316 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
7317 }
7318}
7319
7320/**
7321 * rx_osm_handler - To perform some OS related operations on SKB.
7322 * @sp: private member of the device structure,pointer to s2io_nic structure.
7323 * @skb : the socket buffer pointer.
7324 * @len : length of the packet
7325 * @cksum : FCS checksum of the frame.
7326 * @ring_no : the ring from which this RxD was extracted.
20346722 7327 * Description:
b41477f3 7328 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7329 * some OS related operations on the SKB before passing it to the upper
7330 * layers. It mainly checks if the checksum is OK, if so adds it to the
7331 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7332 * to the upper layer. If the checksum is wrong, it increments the Rx
7333 * packet error count, frees the SKB and returns error.
7334 * Return value:
7335 * SUCCESS on success and -1 on failure.
7336 */
1ee6dd77 7337static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7338{
1ee6dd77 7339 struct s2io_nic *sp = ring_data->nic;
0425b46a 7340 struct net_device *dev = (struct net_device *) ring_data->dev;
20346722
K
7341 struct sk_buff *skb = (struct sk_buff *)
7342 ((unsigned long) rxdp->Host_Control);
7343 int ring_no = ring_data->ring_no;
1da177e4 7344 u16 l3_csum, l4_csum;
863c11a9 7345 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 7346 struct lro *lro;
f9046eb3 7347 u8 err_mask;
da6971d8 7348
20346722 7349 skb->dev = dev;
c92ca04b 7350
863c11a9 7351 if (err) {
bd1034f0
AR
7352 /* Check for parity error */
7353 if (err & 0x1) {
7354 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7355 }
f9046eb3
OH
7356 err_mask = err >> 48;
7357 switch(err_mask) {
491976b2
SH
7358 case 1:
7359 sp->mac_control.stats_info->sw_stat.
7360 rx_parity_err_cnt++;
7361 break;
7362
7363 case 2:
7364 sp->mac_control.stats_info->sw_stat.
7365 rx_abort_cnt++;
7366 break;
7367
7368 case 3:
7369 sp->mac_control.stats_info->sw_stat.
7370 rx_parity_abort_cnt++;
7371 break;
7372
7373 case 4:
7374 sp->mac_control.stats_info->sw_stat.
7375 rx_rda_fail_cnt++;
7376 break;
7377
7378 case 5:
7379 sp->mac_control.stats_info->sw_stat.
7380 rx_unkn_prot_cnt++;
7381 break;
7382
7383 case 6:
7384 sp->mac_control.stats_info->sw_stat.
7385 rx_fcs_err_cnt++;
7386 break;
bd1034f0 7387
491976b2
SH
7388 case 7:
7389 sp->mac_control.stats_info->sw_stat.
7390 rx_buf_size_err_cnt++;
7391 break;
7392
7393 case 8:
7394 sp->mac_control.stats_info->sw_stat.
7395 rx_rxd_corrupt_cnt++;
7396 break;
7397
7398 case 15:
7399 sp->mac_control.stats_info->sw_stat.
7400 rx_unkn_err_cnt++;
7401 break;
7402 }
863c11a9
AR
7403 /*
7404 * Drop the packet if bad transfer code. Exception being
7405 * 0x5, which could be due to unsupported IPv6 extension header.
7406 * In this case, we let stack handle the packet.
7407 * Note that in this case, since checksum will be incorrect,
7408 * stack will validate the same.
7409 */
f9046eb3
OH
7410 if (err_mask != 0x5) {
7411 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7412 dev->name, err_mask);
863c11a9 7413 sp->stats.rx_crc_errors++;
8a4bdbaa 7414 sp->mac_control.stats_info->sw_stat.mem_freed
491976b2 7415 += skb->truesize;
863c11a9 7416 dev_kfree_skb(skb);
0425b46a 7417 ring_data->rx_bufs_left -= 1;
863c11a9
AR
7418 rxdp->Host_Control = 0;
7419 return 0;
7420 }
20346722 7421 }
1da177e4 7422
20346722 7423 /* Updating statistics */
0425b46a 7424 ring_data->rx_packets++;
20346722 7425 rxdp->Host_Control = 0;
da6971d8
AR
7426 if (sp->rxd_mode == RXD_MODE_1) {
7427 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7428
0425b46a 7429 ring_data->rx_bytes += len;
da6971d8
AR
7430 skb_put(skb, len);
7431
6d517a27 7432 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7433 int get_block = ring_data->rx_curr_get_info.block_index;
7434 int get_off = ring_data->rx_curr_get_info.offset;
7435 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7436 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7437 unsigned char *buff = skb_push(skb, buf0_len);
7438
1ee6dd77 7439 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
0425b46a 7440 ring_data->rx_bytes += buf0_len + buf2_len;
da6971d8 7441 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7442 skb_put(skb, buf2_len);
da6971d8 7443 }
20346722 7444
0425b46a
SH
7445 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
7446 (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
7447 (sp->rx_csum)) {
7448 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7449 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7450 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7451 /*
1da177e4
LT
7452 * NIC verifies if the Checksum of the received
7453 * frame is Ok or not and accordingly returns
7454 * a flag in the RxD.
7455 */
7456 skb->ip_summed = CHECKSUM_UNNECESSARY;
0425b46a 7457 if (ring_data->lro) {
7d3d0439
RA
7458 u32 tcp_len;
7459 u8 *tcp;
7460 int ret = 0;
7461
0425b46a
SH
7462 ret = s2io_club_tcp_session(ring_data,
7463 skb->data, &tcp, &tcp_len, &lro,
7464 rxdp, sp);
7d3d0439
RA
7465 switch (ret) {
7466 case 3: /* Begin anew */
7467 lro->parent = skb;
7468 goto aggregate;
7469 case 1: /* Aggregate */
7470 {
7471 lro_append_pkt(sp, lro,
7472 skb, tcp_len);
7473 goto aggregate;
7474 }
7475 case 4: /* Flush session */
7476 {
7477 lro_append_pkt(sp, lro,
7478 skb, tcp_len);
cdb5bf02
SH
7479 queue_rx_frame(lro->parent,
7480 lro->vlan_tag);
7d3d0439
RA
7481 clear_lro_session(lro);
7482 sp->mac_control.stats_info->
7483 sw_stat.flush_max_pkts++;
7484 goto aggregate;
7485 }
7486 case 2: /* Flush both */
7487 lro->parent->data_len =
7488 lro->frags_len;
7489 sp->mac_control.stats_info->
7490 sw_stat.sending_both++;
cdb5bf02
SH
7491 queue_rx_frame(lro->parent,
7492 lro->vlan_tag);
7d3d0439
RA
7493 clear_lro_session(lro);
7494 goto send_up;
7495 case 0: /* sessions exceeded */
c92ca04b
AR
7496 case -1: /* non-TCP or not
7497 * L2 aggregatable
7498 */
7d3d0439
RA
7499 case 5: /*
7500 * First pkt in session not
7501 * L3/L4 aggregatable
7502 */
7503 break;
7504 default:
7505 DBG_PRINT(ERR_DBG,
7506 "%s: Samadhana!!\n",
7507 __FUNCTION__);
7508 BUG();
7509 }
7510 }
1da177e4 7511 } else {
20346722
K
7512 /*
7513 * Packet with erroneous checksum, let the
1da177e4
LT
7514 * upper layers deal with it.
7515 */
7516 skb->ip_summed = CHECKSUM_NONE;
7517 }
cdb5bf02 7518 } else
1da177e4 7519 skb->ip_summed = CHECKSUM_NONE;
cdb5bf02 7520
491976b2 7521 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7d3d0439 7522send_up:
cdb5bf02 7523 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
1da177e4 7524 dev->last_rx = jiffies;
7d3d0439 7525aggregate:
0425b46a 7526 sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
1da177e4
LT
7527 return SUCCESS;
7528}
7529
7530/**
7531 * s2io_link - stops/starts the Tx queue.
7532 * @sp : private member of the device structure, which is a pointer to the
7533 * s2io_nic structure.
7534 * @link : inidicates whether link is UP/DOWN.
7535 * Description:
7536 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7537 * status of the NIC is is down or up. This is called by the Alarm
7538 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7539 * Return value:
7540 * void.
7541 */
7542
1ee6dd77 7543static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
7544{
7545 struct net_device *dev = (struct net_device *) sp->dev;
7546
7547 if (link != sp->last_link_state) {
b7c5678f 7548 init_tti(sp, link);
1da177e4
LT
7549 if (link == LINK_DOWN) {
7550 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7551 s2io_stop_all_tx_queue(sp);
1da177e4 7552 netif_carrier_off(dev);
491976b2 7553 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
8a4bdbaa 7554 sp->mac_control.stats_info->sw_stat.link_up_time =
491976b2
SH
7555 jiffies - sp->start_time;
7556 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
1da177e4
LT
7557 } else {
7558 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
491976b2 7559 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
8a4bdbaa 7560 sp->mac_control.stats_info->sw_stat.link_down_time =
491976b2
SH
7561 jiffies - sp->start_time;
7562 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
1da177e4 7563 netif_carrier_on(dev);
3a3d5756 7564 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7565 }
7566 }
7567 sp->last_link_state = link;
491976b2 7568 sp->start_time = jiffies;
1da177e4
LT
7569}
7570
20346722
K
7571/**
7572 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7573 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7574 * s2io_nic structure.
7575 * Description:
7576 * This function initializes a few of the PCI and PCI-X configuration registers
7577 * with recommended values.
7578 * Return value:
7579 * void
7580 */
7581
1ee6dd77 7582static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 7583{
20346722 7584 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7585
7586 /* Enable Data Parity Error Recovery in PCI-X command register. */
7587 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7588 &(pcix_cmd));
1da177e4 7589 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7590 (pcix_cmd | 1));
1da177e4 7591 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7592 &(pcix_cmd));
1da177e4
LT
7593
7594 /* Set the PErr Response bit in PCI command register. */
7595 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7596 pci_write_config_word(sp->pdev, PCI_COMMAND,
7597 (pci_cmd | PCI_COMMAND_PARITY));
7598 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7599}
7600
3a3d5756
SH
7601static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7602 u8 *dev_multiq)
9dc737a7 7603{
2fda096d 7604 if ((tx_fifo_num > MAX_TX_FIFOS) ||
6cfc482b 7605 (tx_fifo_num < 1)) {
2fda096d
SR
7606 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7607 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7608
7609 if (tx_fifo_num < 1)
7610 tx_fifo_num = 1;
7611 else
7612 tx_fifo_num = MAX_TX_FIFOS;
7613
2fda096d
SR
7614 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7615 DBG_PRINT(ERR_DBG, "tx fifos\n");
9dc737a7 7616 }
2fda096d 7617
6cfc482b 7618 if (multiq)
3a3d5756 7619 *dev_multiq = multiq;
6cfc482b
SH
7620
7621 if (tx_steering_type && (1 == tx_fifo_num)) {
7622 if (tx_steering_type != TX_DEFAULT_STEERING)
7623 DBG_PRINT(ERR_DBG,
7624 "s2io: Tx steering is not supported with "
7625 "one fifo. Disabling Tx steering.\n");
7626 tx_steering_type = NO_STEERING;
7627 }
7628
7629 if ((tx_steering_type < NO_STEERING) ||
7630 (tx_steering_type > TX_DEFAULT_STEERING)) {
7631 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7632 "supported\n");
7633 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7634 tx_steering_type = NO_STEERING;
3a3d5756
SH
7635 }
7636
0425b46a
SH
7637 if (rx_ring_num > MAX_RX_RINGS) {
7638 DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
9dc737a7 7639 "supported\n");
0425b46a
SH
7640 DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
7641 MAX_RX_RINGS);
7642 rx_ring_num = MAX_RX_RINGS;
9dc737a7 7643 }
0425b46a 7644
eccb8628 7645 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9dc737a7
AR
7646 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7647 "Defaulting to INTA\n");
7648 *dev_intr_type = INTA;
7649 }
596c5c97 7650
9dc737a7
AR
7651 if ((*dev_intr_type == MSI_X) &&
7652 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7653 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 7654 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
7655 "Defaulting to INTA\n");
7656 *dev_intr_type = INTA;
7657 }
fb6a825b 7658
6d517a27 7659 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9dc737a7 7660 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6d517a27
VP
7661 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7662 rx_ring_mode = 1;
9dc737a7
AR
7663 }
7664 return SUCCESS;
7665}
7666
9fc93a41
SS
7667/**
7668 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7669 * or Traffic class respectively.
b7c5678f 7670 * @nic: device private variable
9fc93a41
SS
7671 * Description: The function configures the receive steering to
7672 * desired receive ring.
7673 * Return Value: SUCCESS on success and
7674 * '-1' on failure (endian settings incorrect).
7675 */
7676static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7677{
7678 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7679 register u64 val64 = 0;
7680
7681 if (ds_codepoint > 63)
7682 return FAILURE;
7683
7684 val64 = RTS_DS_MEM_DATA(ring);
7685 writeq(val64, &bar0->rts_ds_mem_data);
7686
7687 val64 = RTS_DS_MEM_CTRL_WE |
7688 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7689 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7690
7691 writeq(val64, &bar0->rts_ds_mem_ctrl);
7692
7693 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7694 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7695 S2IO_BIT_RESET);
7696}
7697
1da177e4 7698/**
20346722 7699 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7700 * @pdev : structure containing the PCI related information of the device.
7701 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7702 * Description:
7703 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7704 * All OS related initialization including memory and device structure and
7705 * initlaization of the device private variable is done. Also the swapper
7706 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7707 * registers of the device.
7708 * Return value:
7709 * returns 0 on success and negative on failure.
7710 */
7711
7712static int __devinit
7713s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7714{
1ee6dd77 7715 struct s2io_nic *sp;
1da177e4 7716 struct net_device *dev;
1da177e4
LT
7717 int i, j, ret;
7718 int dma_flag = FALSE;
7719 u32 mac_up, mac_down;
7720 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7721 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7722 u16 subid;
1ee6dd77 7723 struct mac_info *mac_control;
1da177e4 7724 struct config_param *config;
541ae68f 7725 int mode;
cc6e7c44 7726 u8 dev_intr_type = intr_type;
3a3d5756 7727 u8 dev_multiq = 0;
0795af57 7728 DECLARE_MAC_BUF(mac);
1da177e4 7729
3a3d5756
SH
7730 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7731 if (ret)
9dc737a7 7732 return ret;
1da177e4
LT
7733
7734 if ((ret = pci_enable_device(pdev))) {
7735 DBG_PRINT(ERR_DBG,
7736 "s2io_init_nic: pci_enable_device failed\n");
7737 return ret;
7738 }
7739
1e7f0bd8 7740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7741 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7742 dma_flag = TRUE;
1da177e4 7743 if (pci_set_consistent_dma_mask
1e7f0bd8 7744 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7745 DBG_PRINT(ERR_DBG,
7746 "Unable to obtain 64bit DMA for \
7747 consistent allocations\n");
7748 pci_disable_device(pdev);
7749 return -ENOMEM;
7750 }
1e7f0bd8 7751 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
7752 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7753 } else {
7754 pci_disable_device(pdev);
7755 return -ENOMEM;
7756 }
eccb8628
VP
7757 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7758 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7759 pci_disable_device(pdev);
7760 return -ENODEV;
1da177e4 7761 }
3a3d5756 7762 if (dev_multiq)
6cfc482b 7763 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756 7764 else
b19fa1fa 7765 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7766 if (dev == NULL) {
7767 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7768 pci_disable_device(pdev);
7769 pci_release_regions(pdev);
7770 return -ENODEV;
7771 }
7772
7773 pci_set_master(pdev);
7774 pci_set_drvdata(pdev, dev);
1da177e4
LT
7775 SET_NETDEV_DEV(dev, &pdev->dev);
7776
7777 /* Private member variable initialized to s2io NIC structure */
7778 sp = dev->priv;
1ee6dd77 7779 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
7780 sp->dev = dev;
7781 sp->pdev = pdev;
1da177e4 7782 sp->high_dma_flag = dma_flag;
1da177e4 7783 sp->device_enabled_once = FALSE;
da6971d8
AR
7784 if (rx_ring_mode == 1)
7785 sp->rxd_mode = RXD_MODE_1;
7786 if (rx_ring_mode == 2)
7787 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7788
eaae7f72 7789 sp->config.intr_type = dev_intr_type;
1da177e4 7790
541ae68f
K
7791 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7792 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7793 sp->device_type = XFRAME_II_DEVICE;
7794 else
7795 sp->device_type = XFRAME_I_DEVICE;
7796
43b7c451 7797 sp->lro = lro_enable;
6aa20a22 7798
1da177e4
LT
7799 /* Initialize some PCI/PCI-X fields of the NIC. */
7800 s2io_init_pci(sp);
7801
20346722 7802 /*
1da177e4 7803 * Setting the device configuration parameters.
20346722
K
7804 * Most of these parameters can be specified by the user during
7805 * module insertion as they are module loadable parameters. If
7806 * these parameters are not not specified during load time, they
1da177e4
LT
7807 * are initialized with default values.
7808 */
7809 mac_control = &sp->mac_control;
7810 config = &sp->config;
7811
596c5c97 7812 config->napi = napi;
6cfc482b 7813 config->tx_steering_type = tx_steering_type;
596c5c97 7814
1da177e4 7815 /* Tx side parameters. */
6cfc482b
SH
7816 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7817 config->tx_fifo_num = MAX_TX_FIFOS;
7818 else
7819 config->tx_fifo_num = tx_fifo_num;
7820
7821 /* Initialize the fifos used for tx steering */
7822 if (config->tx_fifo_num < 5) {
7823 if (config->tx_fifo_num == 1)
7824 sp->total_tcp_fifos = 1;
7825 else
7826 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7827 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7828 sp->total_udp_fifos = 1;
7829 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7830 } else {
7831 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7832 FIFO_OTHER_MAX_NUM);
7833 sp->udp_fifo_idx = sp->total_tcp_fifos;
7834 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7835 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7836 }
7837
3a3d5756 7838 config->multiq = dev_multiq;
6cfc482b 7839 for (i = 0; i < config->tx_fifo_num; i++) {
1da177e4
LT
7840 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7841 config->tx_cfg[i].fifo_priority = i;
7842 }
7843
20346722
K
7844 /* mapping the QoS priority to the configured fifos */
7845 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7846 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7847
6cfc482b
SH
7848 /* map the hashing selector table to the configured fifos */
7849 for (i = 0; i < config->tx_fifo_num; i++)
7850 sp->fifo_selector[i] = fifo_selector[i];
7851
7852
1da177e4
LT
7853 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7854 for (i = 0; i < config->tx_fifo_num; i++) {
7855 config->tx_cfg[i].f_no_snoop =
7856 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7857 if (config->tx_cfg[i].fifo_len < 65) {
7858 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7859 break;
7860 }
7861 }
fed5eccd
AR
7862 /* + 2 because one Txd for skb->data and one Txd for UFO */
7863 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7864
7865 /* Rx side parameters. */
1da177e4 7866 config->rx_ring_num = rx_ring_num;
0425b46a 7867 for (i = 0; i < config->rx_ring_num; i++) {
1da177e4 7868 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 7869 (rxd_count[sp->rxd_mode] + 1);
1da177e4 7870 config->rx_cfg[i].ring_priority = i;
0425b46a
SH
7871 mac_control->rings[i].rx_bufs_left = 0;
7872 mac_control->rings[i].rxd_mode = sp->rxd_mode;
7873 mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
7874 mac_control->rings[i].pdev = sp->pdev;
7875 mac_control->rings[i].dev = sp->dev;
1da177e4
LT
7876 }
7877
7878 for (i = 0; i < rx_ring_num; i++) {
7879 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7880 config->rx_cfg[i].f_no_snoop =
7881 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7882 }
7883
7884 /* Setting Mac Control parameters */
7885 mac_control->rmac_pause_time = rmac_pause_time;
7886 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7887 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7888
7889
1da177e4
LT
7890 /* initialize the shared memory used by the NIC and the host */
7891 if (init_shared_mem(sp)) {
7892 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 7893 dev->name);
1da177e4
LT
7894 ret = -ENOMEM;
7895 goto mem_alloc_failed;
7896 }
7897
7898 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7899 pci_resource_len(pdev, 0));
7900 if (!sp->bar0) {
19a60522 7901 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7902 dev->name);
7903 ret = -ENOMEM;
7904 goto bar0_remap_failed;
7905 }
7906
7907 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7908 pci_resource_len(pdev, 2));
7909 if (!sp->bar1) {
19a60522 7910 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7911 dev->name);
7912 ret = -ENOMEM;
7913 goto bar1_remap_failed;
7914 }
7915
7916 dev->irq = pdev->irq;
7917 dev->base_addr = (unsigned long) sp->bar0;
7918
7919 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7920 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7921 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7922 (sp->bar1 + (j * 0x00020000));
7923 }
7924
7925 /* Driver entry points */
7926 dev->open = &s2io_open;
7927 dev->stop = &s2io_close;
7928 dev->hard_start_xmit = &s2io_xmit;
7929 dev->get_stats = &s2io_get_stats;
7930 dev->set_multicast_list = &s2io_set_multicast;
7931 dev->do_ioctl = &s2io_ioctl;
2fd37688 7932 dev->set_mac_address = &s2io_set_mac_addr;
1da177e4
LT
7933 dev->change_mtu = &s2io_change_mtu;
7934 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
7935 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7936 dev->vlan_rx_register = s2io_vlan_rx_register;
cdb5bf02 7937 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
20346722 7938
1da177e4
LT
7939 /*
7940 * will use eth_mac_addr() for dev->set_mac_address
7941 * mac address will be set every time dev->open() is called
7942 */
612eff0e
BH
7943#ifdef CONFIG_NET_POLL_CONTROLLER
7944 dev->poll_controller = s2io_netpoll;
7945#endif
7946
1da177e4
LT
7947 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7948 if (sp->high_dma_flag == TRUE)
7949 dev->features |= NETIF_F_HIGHDMA;
1da177e4 7950 dev->features |= NETIF_F_TSO;
f83ef8c0 7951 dev->features |= NETIF_F_TSO6;
db874e65 7952 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
7953 dev->features |= NETIF_F_UFO;
7954 dev->features |= NETIF_F_HW_CSUM;
7955 }
3a3d5756
SH
7956 if (config->multiq)
7957 dev->features |= NETIF_F_MULTI_QUEUE;
1da177e4
LT
7958 dev->tx_timeout = &s2io_tx_watchdog;
7959 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
7960 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7961 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 7962
e960fc5c 7963 pci_save_state(sp->pdev);
1da177e4
LT
7964
7965 /* Setting swapper control on the NIC, for proper reset operation */
7966 if (s2io_set_swapper(sp)) {
7967 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7968 dev->name);
7969 ret = -EAGAIN;
7970 goto set_swap_failed;
7971 }
7972
541ae68f
K
7973 /* Verify if the Herc works on the slot its placed into */
7974 if (sp->device_type & XFRAME_II_DEVICE) {
7975 mode = s2io_verify_pci_mode(sp);
7976 if (mode < 0) {
7977 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7978 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7979 ret = -EBADSLT;
7980 goto set_swap_failed;
7981 }
7982 }
7983
f61e0a35
SH
7984 if (sp->config.intr_type == MSI_X) {
7985 sp->num_entries = config->rx_ring_num + 1;
7986 ret = s2io_enable_msi_x(sp);
7987
7988 if (!ret) {
7989 ret = s2io_test_msi(sp);
7990 /* rollback MSI-X, will re-enable during add_isr() */
7991 remove_msix_isr(sp);
7992 }
7993 if (ret) {
7994
7995 DBG_PRINT(ERR_DBG,
7996 "%s: MSI-X requested but failed to enable\n",
7997 dev->name);
7998 sp->config.intr_type = INTA;
7999 }
8000 }
8001
8002 if (config->intr_type == MSI_X) {
8003 for (i = 0; i < config->rx_ring_num ; i++)
8004 netif_napi_add(dev, &mac_control->rings[i].napi,
8005 s2io_poll_msix, 64);
8006 } else {
8007 netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
8008 }
8009
541ae68f
K
8010 /* Not needed for Herc */
8011 if (sp->device_type & XFRAME_I_DEVICE) {
8012 /*
8013 * Fix for all "FFs" MAC address problems observed on
8014 * Alpha platforms
8015 */
8016 fix_mac_address(sp);
8017 s2io_reset(sp);
8018 }
1da177e4
LT
8019
8020 /*
1da177e4
LT
8021 * MAC address initialization.
8022 * For now only one mac address will be read and used.
8023 */
8024 bar0 = sp->bar0;
8025 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796 8026 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 8027 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8028 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 8029 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
8030 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8031 mac_down = (u32) tmp64;
8032 mac_up = (u32) (tmp64 >> 32);
8033
1da177e4
LT
8034 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8035 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8036 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8037 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8038 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8039 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8040
1da177e4
LT
8041 /* Set the factory defined MAC address initially */
8042 dev->addr_len = ETH_ALEN;
8043 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
2fd37688 8044 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
1da177e4 8045
faa4f796
SH
8046 /* initialize number of multicast & unicast MAC entries variables */
8047 if (sp->device_type == XFRAME_I_DEVICE) {
8048 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8049 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8050 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8051 } else if (sp->device_type == XFRAME_II_DEVICE) {
8052 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8053 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8054 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8055 }
8056
8057 /* store mac addresses from CAM to s2io_nic structure */
8058 do_s2io_store_unicast_mc(sp);
8059
f61e0a35
SH
8060 /* Configure MSIX vector for number of rings configured plus one */
8061 if ((sp->device_type == XFRAME_II_DEVICE) &&
8062 (config->intr_type == MSI_X))
8063 sp->num_entries = config->rx_ring_num + 1;
8064
c77dd43e
SS
8065 /* Store the values of the MSIX table in the s2io_nic structure */
8066 store_xmsi_data(sp);
b41477f3
AR
8067 /* reset Nic and bring it to known state */
8068 s2io_reset(sp);
8069
1da177e4 8070 /*
99993af6 8071 * Initialize link state flags
541ae68f 8072 * and the card state parameter
1da177e4 8073 */
92b84437 8074 sp->state = 0;
1da177e4 8075
1da177e4 8076 /* Initialize spinlocks */
2fda096d
SR
8077 for (i = 0; i < sp->config.tx_fifo_num; i++)
8078 spin_lock_init(&mac_control->fifos[i].tx_lock);
db874e65 8079
20346722
K
8080 /*
8081 * SXE-002: Configure link and activity LED to init state
8082 * on driver load.
1da177e4
LT
8083 */
8084 subid = sp->pdev->subsystem_device;
8085 if ((subid & 0xFF) >= 0x07) {
8086 val64 = readq(&bar0->gpio_control);
8087 val64 |= 0x0000800000000000ULL;
8088 writeq(val64, &bar0->gpio_control);
8089 val64 = 0x0411040400000000ULL;
8090 writeq(val64, (void __iomem *) bar0 + 0x2700);
8091 val64 = readq(&bar0->gpio_control);
8092 }
8093
8094 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8095
8096 if (register_netdev(dev)) {
8097 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8098 ret = -ENODEV;
8099 goto register_failed;
8100 }
9dc737a7 8101 s2io_vpd_read(sp);
0c61ed5f 8102 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
b41477f3 8103 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
44c10138 8104 sp->product_name, pdev->revision);
b41477f3
AR
8105 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8106 s2io_driver_version);
0795af57
JP
8107 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8108 dev->name, print_mac(mac, dev->dev_addr));
19a60522 8109 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 8110 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8111 mode = s2io_print_pci_mode(sp);
541ae68f 8112 if (mode < 0) {
9dc737a7 8113 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 8114 ret = -EBADSLT;
9dc737a7 8115 unregister_netdev(dev);
541ae68f
K
8116 goto set_swap_failed;
8117 }
541ae68f 8118 }
9dc737a7
AR
8119 switch(sp->rxd_mode) {
8120 case RXD_MODE_1:
8121 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8122 dev->name);
8123 break;
8124 case RXD_MODE_3B:
8125 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8126 dev->name);
8127 break;
9dc737a7 8128 }
db874e65 8129
f61e0a35
SH
8130 switch (sp->config.napi) {
8131 case 0:
8132 DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
8133 break;
8134 case 1:
db874e65 8135 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
f61e0a35
SH
8136 break;
8137 }
3a3d5756
SH
8138
8139 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8140 sp->config.tx_fifo_num);
8141
0425b46a
SH
8142 DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
8143 sp->config.rx_ring_num);
8144
eaae7f72 8145 switch(sp->config.intr_type) {
9dc737a7
AR
8146 case INTA:
8147 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8148 break;
9dc737a7
AR
8149 case MSI_X:
8150 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8151 break;
8152 }
3a3d5756
SH
8153 if (sp->config.multiq) {
8154 for (i = 0; i < sp->config.tx_fifo_num; i++)
8155 mac_control->fifos[i].multiq = config->multiq;
8156 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8157 dev->name);
8158 } else
8159 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8160 dev->name);
8161
6cfc482b
SH
8162 switch (sp->config.tx_steering_type) {
8163 case NO_STEERING:
8164 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8165 " transmit\n", dev->name);
8166 break;
8167 case TX_PRIORITY_STEERING:
8168 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8169 " transmit\n", dev->name);
8170 break;
8171 case TX_DEFAULT_STEERING:
8172 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8173 " transmit\n", dev->name);
8174 }
8175
7d3d0439
RA
8176 if (sp->lro)
8177 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 8178 dev->name);
db874e65
SS
8179 if (ufo)
8180 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8181 " enabled\n", dev->name);
7ba013ac 8182 /* Initialize device name */
9dc737a7 8183 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 8184
20346722
K
8185 /*
8186 * Make Link state as off at this point, when the Link change
8187 * interrupt comes the state will be automatically changed to
1da177e4
LT
8188 * the right state.
8189 */
8190 netif_carrier_off(dev);
1da177e4
LT
8191
8192 return 0;
8193
8194 register_failed:
8195 set_swap_failed:
8196 iounmap(sp->bar1);
8197 bar1_remap_failed:
8198 iounmap(sp->bar0);
8199 bar0_remap_failed:
8200 mem_alloc_failed:
8201 free_shared_mem(sp);
8202 pci_disable_device(pdev);
eccb8628 8203 pci_release_regions(pdev);
1da177e4
LT
8204 pci_set_drvdata(pdev, NULL);
8205 free_netdev(dev);
8206
8207 return ret;
8208}
8209
8210/**
20346722 8211 * s2io_rem_nic - Free the PCI device
1da177e4 8212 * @pdev: structure containing the PCI related information of the device.
20346722 8213 * Description: This function is called by the Pci subsystem to release a
1da177e4 8214 * PCI device and free up all resource held up by the device. This could
20346722 8215 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8216 * from memory.
8217 */
8218
8219static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8220{
8221 struct net_device *dev =
8222 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 8223 struct s2io_nic *sp;
1da177e4
LT
8224
8225 if (dev == NULL) {
8226 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8227 return;
8228 }
8229
22747d6b
FR
8230 flush_scheduled_work();
8231
1da177e4
LT
8232 sp = dev->priv;
8233 unregister_netdev(dev);
8234
8235 free_shared_mem(sp);
8236 iounmap(sp->bar0);
8237 iounmap(sp->bar1);
eccb8628 8238 pci_release_regions(pdev);
1da177e4 8239 pci_set_drvdata(pdev, NULL);
1da177e4 8240 free_netdev(dev);
19a60522 8241 pci_disable_device(pdev);
1da177e4
LT
8242}
8243
8244/**
8245 * s2io_starter - Entry point for the driver
8246 * Description: This function is the entry point for the driver. It verifies
8247 * the module loadable parameters and initializes PCI configuration space.
8248 */
8249
43b7c451 8250static int __init s2io_starter(void)
1da177e4 8251{
29917620 8252 return pci_register_driver(&s2io_driver);
1da177e4
LT
8253}
8254
8255/**
20346722 8256 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
8257 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8258 */
8259
372cc597 8260static __exit void s2io_closer(void)
1da177e4
LT
8261{
8262 pci_unregister_driver(&s2io_driver);
8263 DBG_PRINT(INIT_DBG, "cleanup done\n");
8264}
8265
8266module_init(s2io_starter);
8267module_exit(s2io_closer);
7d3d0439 8268
6aa20a22 8269static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
cdb5bf02
SH
8270 struct tcphdr **tcp, struct RxD_t *rxdp,
8271 struct s2io_nic *sp)
7d3d0439
RA
8272{
8273 int ip_off;
8274 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8275
8276 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8277 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8278 __FUNCTION__);
8279 return -1;
8280 }
8281
cdb5bf02
SH
8282 /* Checking for DIX type or DIX type with VLAN */
8283 if ((l2_type == 0)
8284 || (l2_type == 4)) {
8285 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8286 /*
8287 * If vlan stripping is disabled and the frame is VLAN tagged,
8288 * shift the offset by the VLAN header size bytes.
8289 */
8290 if ((!vlan_strip_flag) &&
8291 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8292 ip_off += HEADER_VLAN_SIZE;
8293 } else {
7d3d0439 8294 /* LLC, SNAP etc are considered non-mergeable */
cdb5bf02 8295 return -1;
7d3d0439
RA
8296 }
8297
8298 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8299 ip_len = (u8)((*ip)->ihl);
8300 ip_len <<= 2;
8301 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8302
8303 return 0;
8304}
8305
1ee6dd77 8306static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8307 struct tcphdr *tcp)
8308{
8309 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8310 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8311 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8312 return -1;
8313 return 0;
8314}
8315
8316static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8317{
8318 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8319}
8320
1ee6dd77 8321static void initiate_new_session(struct lro *lro, u8 *l2h,
cdb5bf02 8322 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
7d3d0439
RA
8323{
8324 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8325 lro->l2h = l2h;
8326 lro->iph = ip;
8327 lro->tcph = tcp;
8328 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8329 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8330 lro->sg_num = 1;
8331 lro->total_len = ntohs(ip->tot_len);
8332 lro->frags_len = 0;
cdb5bf02 8333 lro->vlan_tag = vlan_tag;
6aa20a22 8334 /*
7d3d0439
RA
8335 * check if we saw TCP timestamp. Other consistency checks have
8336 * already been done.
8337 */
8338 if (tcp->doff == 8) {
c8855953
SR
8339 __be32 *ptr;
8340 ptr = (__be32 *)(tcp+1);
7d3d0439 8341 lro->saw_ts = 1;
c8855953 8342 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8343 lro->cur_tsecr = *(ptr+2);
8344 }
8345 lro->in_use = 1;
8346}
8347
1ee6dd77 8348static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8349{
8350 struct iphdr *ip = lro->iph;
8351 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 8352 __sum16 nchk;
1ee6dd77 8353 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
8354 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8355
8356 /* Update L3 header */
8357 ip->tot_len = htons(lro->total_len);
8358 ip->check = 0;
8359 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8360 ip->check = nchk;
8361
8362 /* Update L4 header */
8363 tcp->ack_seq = lro->tcp_ack;
8364 tcp->window = lro->window;
8365
8366 /* Update tsecr field if this session has timestamps enabled */
8367 if (lro->saw_ts) {
c8855953 8368 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8369 *(ptr+2) = lro->cur_tsecr;
8370 }
8371
8372 /* Update counters required for calculation of
8373 * average no. of packets aggregated.
8374 */
8375 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8376 statinfo->sw_stat.num_aggregations++;
8377}
8378
1ee6dd77 8379static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8380 struct tcphdr *tcp, u32 l4_pyld)
8381{
8382 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8383 lro->total_len += l4_pyld;
8384 lro->frags_len += l4_pyld;
8385 lro->tcp_next_seq += l4_pyld;
8386 lro->sg_num++;
8387
8388 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8389 lro->tcp_ack = tcp->ack_seq;
8390 lro->window = tcp->window;
6aa20a22 8391
7d3d0439 8392 if (lro->saw_ts) {
c8855953 8393 __be32 *ptr;
7d3d0439 8394 /* Update tsecr and tsval from this packet */
c8855953
SR
8395 ptr = (__be32 *)(tcp+1);
8396 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8397 lro->cur_tsecr = *(ptr + 2);
8398 }
8399}
8400
1ee6dd77 8401static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8402 struct tcphdr *tcp, u32 tcp_pyld_len)
8403{
7d3d0439
RA
8404 u8 *ptr;
8405
79dc1901
AM
8406 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8407
7d3d0439
RA
8408 if (!tcp_pyld_len) {
8409 /* Runt frame or a pure ack */
8410 return -1;
8411 }
8412
8413 if (ip->ihl != 5) /* IP has options */
8414 return -1;
8415
75c30b13
AR
8416 /* If we see CE codepoint in IP header, packet is not mergeable */
8417 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8418 return -1;
8419
8420 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 8421 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 8422 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8423 /*
8424 * Currently recognize only the ack control word and
8425 * any other control field being set would result in
8426 * flushing the LRO session
8427 */
8428 return -1;
8429 }
8430
6aa20a22 8431 /*
7d3d0439
RA
8432 * Allow only one TCP timestamp option. Don't aggregate if
8433 * any other options are detected.
8434 */
8435 if (tcp->doff != 5 && tcp->doff != 8)
8436 return -1;
8437
8438 if (tcp->doff == 8) {
6aa20a22 8439 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8440 while (*ptr == TCPOPT_NOP)
8441 ptr++;
8442 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8443 return -1;
8444
8445 /* Ensure timestamp value increases monotonically */
8446 if (l_lro)
c8855953 8447 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8448 return -1;
8449
8450 /* timestamp echo reply should be non-zero */
c8855953 8451 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8452 return -1;
8453 }
8454
8455 return 0;
8456}
8457
8458static int
0425b46a
SH
8459s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
8460 u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
8461 struct s2io_nic *sp)
7d3d0439
RA
8462{
8463 struct iphdr *ip;
8464 struct tcphdr *tcph;
8465 int ret = 0, i;
cdb5bf02 8466 u16 vlan_tag = 0;
7d3d0439
RA
8467
8468 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
cdb5bf02 8469 rxdp, sp))) {
7d3d0439
RA
8470 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8471 ip->saddr, ip->daddr);
cdb5bf02 8472 } else
7d3d0439 8473 return ret;
7d3d0439 8474
cdb5bf02 8475 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
7d3d0439
RA
8476 tcph = (struct tcphdr *)*tcp;
8477 *tcp_len = get_l4_pyld_length(ip, tcph);
8478 for (i=0; i<MAX_LRO_SESSIONS; i++) {
0425b46a 8479 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8480 if (l_lro->in_use) {
8481 if (check_for_socket_match(l_lro, ip, tcph))
8482 continue;
8483 /* Sock pair matched */
8484 *lro = l_lro;
8485
8486 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8487 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8488 "0x%x, actual 0x%x\n", __FUNCTION__,
8489 (*lro)->tcp_next_seq,
8490 ntohl(tcph->seq));
8491
8492 sp->mac_control.stats_info->
8493 sw_stat.outof_sequence_pkts++;
8494 ret = 2;
8495 break;
8496 }
8497
8498 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8499 ret = 1; /* Aggregate */
8500 else
8501 ret = 2; /* Flush both */
8502 break;
8503 }
8504 }
8505
8506 if (ret == 0) {
8507 /* Before searching for available LRO objects,
8508 * check if the pkt is L3/L4 aggregatable. If not
8509 * don't create new LRO session. Just send this
8510 * packet up.
8511 */
8512 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8513 return 5;
8514 }
8515
8516 for (i=0; i<MAX_LRO_SESSIONS; i++) {
0425b46a 8517 struct lro *l_lro = &ring_data->lro0_n[i];
7d3d0439
RA
8518 if (!(l_lro->in_use)) {
8519 *lro = l_lro;
8520 ret = 3; /* Begin anew */
8521 break;
8522 }
8523 }
8524 }
8525
8526 if (ret == 0) { /* sessions exceeded */
8527 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8528 __FUNCTION__);
8529 *lro = NULL;
8530 return ret;
8531 }
8532
8533 switch (ret) {
8534 case 3:
cdb5bf02
SH
8535 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8536 vlan_tag);
7d3d0439
RA
8537 break;
8538 case 2:
8539 update_L3L4_header(sp, *lro);
8540 break;
8541 case 1:
8542 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8543 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8544 update_L3L4_header(sp, *lro);
8545 ret = 4; /* Flush the LRO */
8546 }
8547 break;
8548 default:
8549 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8550 __FUNCTION__);
8551 break;
8552 }
8553
8554 return ret;
8555}
8556
1ee6dd77 8557static void clear_lro_session(struct lro *lro)
7d3d0439 8558{
1ee6dd77 8559 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8560
8561 memset(lro, 0, lro_struct_size);
8562}
8563
cdb5bf02 8564static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
7d3d0439
RA
8565{
8566 struct net_device *dev = skb->dev;
cdb5bf02 8567 struct s2io_nic *sp = dev->priv;
7d3d0439
RA
8568
8569 skb->protocol = eth_type_trans(skb, dev);
cdb5bf02
SH
8570 if (sp->vlgrp && vlan_tag
8571 && (vlan_strip_flag)) {
8572 /* Queueing the vlan frame to the upper layer */
8573 if (sp->config.napi)
8574 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8575 else
8576 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8577 } else {
8578 if (sp->config.napi)
8579 netif_receive_skb(skb);
8580 else
8581 netif_rx(skb);
8582 }
7d3d0439
RA
8583}
8584
1ee6dd77
RB
8585static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8586 struct sk_buff *skb,
7d3d0439
RA
8587 u32 tcp_len)
8588{
75c30b13 8589 struct sk_buff *first = lro->parent;
7d3d0439
RA
8590
8591 first->len += tcp_len;
8592 first->data_len = lro->frags_len;
8593 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8594 if (skb_shinfo(first)->frag_list)
8595 lro->last_frag->next = skb;
7d3d0439
RA
8596 else
8597 skb_shinfo(first)->frag_list = skb;
372cc597 8598 first->truesize += skb->truesize;
75c30b13 8599 lro->last_frag = skb;
7d3d0439
RA
8600 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8601 return;
8602}
d796fdb7
LV
8603
8604/**
8605 * s2io_io_error_detected - called when PCI error is detected
8606 * @pdev: Pointer to PCI device
8453d43f 8607 * @state: The current pci connection state
d796fdb7
LV
8608 *
8609 * This function is called after a PCI bus error affecting
8610 * this device has been detected.
8611 */
8612static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8613 pci_channel_state_t state)
8614{
8615 struct net_device *netdev = pci_get_drvdata(pdev);
8616 struct s2io_nic *sp = netdev->priv;
8617
8618 netif_device_detach(netdev);
8619
8620 if (netif_running(netdev)) {
8621 /* Bring down the card, while avoiding PCI I/O */
8622 do_s2io_card_down(sp, 0);
d796fdb7
LV
8623 }
8624 pci_disable_device(pdev);
8625
8626 return PCI_ERS_RESULT_NEED_RESET;
8627}
8628
8629/**
8630 * s2io_io_slot_reset - called after the pci bus has been reset.
8631 * @pdev: Pointer to PCI device
8632 *
8633 * Restart the card from scratch, as if from a cold-boot.
8634 * At this point, the card has exprienced a hard reset,
8635 * followed by fixups by BIOS, and has its config space
8636 * set up identically to what it was at cold boot.
8637 */
8638static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8639{
8640 struct net_device *netdev = pci_get_drvdata(pdev);
8641 struct s2io_nic *sp = netdev->priv;
8642
8643 if (pci_enable_device(pdev)) {
8644 printk(KERN_ERR "s2io: "
8645 "Cannot re-enable PCI device after reset.\n");
8646 return PCI_ERS_RESULT_DISCONNECT;
8647 }
8648
8649 pci_set_master(pdev);
8650 s2io_reset(sp);
8651
8652 return PCI_ERS_RESULT_RECOVERED;
8653}
8654
8655/**
8656 * s2io_io_resume - called when traffic can start flowing again.
8657 * @pdev: Pointer to PCI device
8658 *
8659 * This callback is called when the error recovery driver tells
8660 * us that its OK to resume normal operation.
8661 */
8662static void s2io_io_resume(struct pci_dev *pdev)
8663{
8664 struct net_device *netdev = pci_get_drvdata(pdev);
8665 struct s2io_nic *sp = netdev->priv;
8666
8667 if (netif_running(netdev)) {
8668 if (s2io_card_up(sp)) {
8669 printk(KERN_ERR "s2io: "
8670 "Can't bring device back up after reset.\n");
8671 return;
8672 }
8673
8674 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8675 s2io_card_down(sp);
8676 printk(KERN_ERR "s2io: "
8677 "Can't resetore mac addr after reset.\n");
8678 return;
8679 }
8680 }
8681
8682 netif_device_attach(netdev);
8683 netif_wake_queue(netdev);
8684}