]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/s2io.c
S2io: Multiqueue network device support - FIFO selection based on L4 ports
[mirror_ubuntu-jammy-kernel.git] / drivers / net / s2io.c
CommitLineData
1da177e4 1/************************************************************************
776bd20f 2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
0c61ed5f 3 * Copyright(c) 2002-2007 Neterion Inc.
1da177e4
LT
4
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
12 *
13 * Credits:
20346722
K
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
1da177e4
LT
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
20346722 21 * Francois Romieu : For pointing out all code part that were
1da177e4 22 * deprecated and also styling related comments.
20346722 23 * Grant Grundler : For helping me get rid of some Architecture
1da177e4
LT
24 * dependent code.
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
20346722 26 *
1da177e4
LT
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
9dc737a7 29 *
20346722
K
30 * rx_ring_num : This can be used to program the number of receive rings used
31 * in the driver.
9dc737a7
AR
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
da6971d8 34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
6d517a27 35 * values are 1, 2.
1da177e4 36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
20346722 37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
1da177e4 38 * Tx descriptors that can be associated with each corresponding FIFO.
9dc737a7 39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
8abc4d5b 40 * 2(MSI_X). Default value is '2(MSI_X)'
43b7c451 41 * lro_enable: Specifies whether to enable Large Receive Offload (LRO) or not.
9dc737a7
AR
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
926930b2
SS
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
3a3d5756
SH
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
1da177e4
LT
55 ************************************************************************/
56
1da177e4
LT
57#include <linux/module.h>
58#include <linux/types.h>
59#include <linux/errno.h>
60#include <linux/ioport.h>
61#include <linux/pci.h>
1e7f0bd8 62#include <linux/dma-mapping.h>
1da177e4
LT
63#include <linux/kernel.h>
64#include <linux/netdevice.h>
65#include <linux/etherdevice.h>
66#include <linux/skbuff.h>
67#include <linux/init.h>
68#include <linux/delay.h>
69#include <linux/stddef.h>
70#include <linux/ioctl.h>
71#include <linux/timex.h>
1da177e4 72#include <linux/ethtool.h>
1da177e4 73#include <linux/workqueue.h>
be3a6b02 74#include <linux/if_vlan.h>
7d3d0439
RA
75#include <linux/ip.h>
76#include <linux/tcp.h>
77#include <net/tcp.h>
1da177e4 78
1da177e4
LT
79#include <asm/system.h>
80#include <asm/uaccess.h>
20346722 81#include <asm/io.h>
fe931395 82#include <asm/div64.h>
330ce0de 83#include <asm/irq.h>
1da177e4
LT
84
85/* local include */
86#include "s2io.h"
87#include "s2io-regs.h"
88
b7c5678f 89#define DRV_VERSION "2.0.26.15-2"
6c1792f4 90
1da177e4 91/* S2io Driver name & version. */
20346722 92static char s2io_driver_name[] = "Neterion";
6c1792f4 93static char s2io_driver_version[] = DRV_VERSION;
1da177e4 94
6d517a27
VP
95static int rxd_size[2] = {32,48};
96static int rxd_count[2] = {127,85};
da6971d8 97
1ee6dd77 98static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
5e25b9dd
K
99{
100 int ret;
101
102 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
103 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
104
105 return ret;
106}
107
20346722 108/*
1da177e4
LT
109 * Cards with following subsystem_id have a link state indication
110 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
111 * macro below identifies these cards given the subsystem_id.
112 */
541ae68f
K
113#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
114 (dev_type == XFRAME_I_DEVICE) ? \
115 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
116 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
1da177e4
LT
117
118#define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
119 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
120#define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121#define PANIC 1
122#define LOW 2
1ee6dd77 123static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
1da177e4 124{
1ee6dd77 125 struct mac_info *mac_control;
20346722
K
126
127 mac_control = &sp->mac_control;
863c11a9
AR
128 if (rxb_size <= rxd_count[sp->rxd_mode])
129 return PANIC;
130 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
131 return LOW;
132 return 0;
1da177e4
LT
133}
134
92b84437
SS
135static inline int is_s2io_card_up(const struct s2io_nic * sp)
136{
137 return test_bit(__S2IO_STATE_CARD_UP, &sp->state);
138}
139
1da177e4
LT
140/* Ethtool related variables and Macros. */
141static char s2io_gstrings[][ETH_GSTRING_LEN] = {
142 "Register test\t(offline)",
143 "Eeprom test\t(offline)",
144 "Link test\t(online)",
145 "RLDRAM test\t(offline)",
146 "BIST Test\t(offline)"
147};
148
fa1f0cb3 149static char ethtool_xena_stats_keys[][ETH_GSTRING_LEN] = {
1da177e4
LT
150 {"tmac_frms"},
151 {"tmac_data_octets"},
152 {"tmac_drop_frms"},
153 {"tmac_mcst_frms"},
154 {"tmac_bcst_frms"},
155 {"tmac_pause_ctrl_frms"},
bd1034f0
AR
156 {"tmac_ttl_octets"},
157 {"tmac_ucst_frms"},
158 {"tmac_nucst_frms"},
1da177e4 159 {"tmac_any_err_frms"},
bd1034f0 160 {"tmac_ttl_less_fb_octets"},
1da177e4
LT
161 {"tmac_vld_ip_octets"},
162 {"tmac_vld_ip"},
163 {"tmac_drop_ip"},
164 {"tmac_icmp"},
165 {"tmac_rst_tcp"},
166 {"tmac_tcp"},
167 {"tmac_udp"},
168 {"rmac_vld_frms"},
169 {"rmac_data_octets"},
170 {"rmac_fcs_err_frms"},
171 {"rmac_drop_frms"},
172 {"rmac_vld_mcst_frms"},
173 {"rmac_vld_bcst_frms"},
174 {"rmac_in_rng_len_err_frms"},
bd1034f0 175 {"rmac_out_rng_len_err_frms"},
1da177e4
LT
176 {"rmac_long_frms"},
177 {"rmac_pause_ctrl_frms"},
bd1034f0
AR
178 {"rmac_unsup_ctrl_frms"},
179 {"rmac_ttl_octets"},
180 {"rmac_accepted_ucst_frms"},
181 {"rmac_accepted_nucst_frms"},
1da177e4 182 {"rmac_discarded_frms"},
bd1034f0
AR
183 {"rmac_drop_events"},
184 {"rmac_ttl_less_fb_octets"},
185 {"rmac_ttl_frms"},
1da177e4
LT
186 {"rmac_usized_frms"},
187 {"rmac_osized_frms"},
188 {"rmac_frag_frms"},
189 {"rmac_jabber_frms"},
bd1034f0
AR
190 {"rmac_ttl_64_frms"},
191 {"rmac_ttl_65_127_frms"},
192 {"rmac_ttl_128_255_frms"},
193 {"rmac_ttl_256_511_frms"},
194 {"rmac_ttl_512_1023_frms"},
195 {"rmac_ttl_1024_1518_frms"},
1da177e4
LT
196 {"rmac_ip"},
197 {"rmac_ip_octets"},
198 {"rmac_hdr_err_ip"},
199 {"rmac_drop_ip"},
200 {"rmac_icmp"},
201 {"rmac_tcp"},
202 {"rmac_udp"},
203 {"rmac_err_drp_udp"},
bd1034f0
AR
204 {"rmac_xgmii_err_sym"},
205 {"rmac_frms_q0"},
206 {"rmac_frms_q1"},
207 {"rmac_frms_q2"},
208 {"rmac_frms_q3"},
209 {"rmac_frms_q4"},
210 {"rmac_frms_q5"},
211 {"rmac_frms_q6"},
212 {"rmac_frms_q7"},
213 {"rmac_full_q0"},
214 {"rmac_full_q1"},
215 {"rmac_full_q2"},
216 {"rmac_full_q3"},
217 {"rmac_full_q4"},
218 {"rmac_full_q5"},
219 {"rmac_full_q6"},
220 {"rmac_full_q7"},
1da177e4 221 {"rmac_pause_cnt"},
bd1034f0
AR
222 {"rmac_xgmii_data_err_cnt"},
223 {"rmac_xgmii_ctrl_err_cnt"},
1da177e4
LT
224 {"rmac_accepted_ip"},
225 {"rmac_err_tcp"},
bd1034f0
AR
226 {"rd_req_cnt"},
227 {"new_rd_req_cnt"},
228 {"new_rd_req_rtry_cnt"},
229 {"rd_rtry_cnt"},
230 {"wr_rtry_rd_ack_cnt"},
231 {"wr_req_cnt"},
232 {"new_wr_req_cnt"},
233 {"new_wr_req_rtry_cnt"},
234 {"wr_rtry_cnt"},
235 {"wr_disc_cnt"},
236 {"rd_rtry_wr_ack_cnt"},
237 {"txp_wr_cnt"},
238 {"txd_rd_cnt"},
239 {"txd_wr_cnt"},
240 {"rxd_rd_cnt"},
241 {"rxd_wr_cnt"},
242 {"txf_rd_cnt"},
fa1f0cb3
SS
243 {"rxf_wr_cnt"}
244};
245
246static char ethtool_enhanced_stats_keys[][ETH_GSTRING_LEN] = {
bd1034f0
AR
247 {"rmac_ttl_1519_4095_frms"},
248 {"rmac_ttl_4096_8191_frms"},
249 {"rmac_ttl_8192_max_frms"},
250 {"rmac_ttl_gt_max_frms"},
251 {"rmac_osized_alt_frms"},
252 {"rmac_jabber_alt_frms"},
253 {"rmac_gt_max_alt_frms"},
254 {"rmac_vlan_frms"},
255 {"rmac_len_discard"},
256 {"rmac_fcs_discard"},
257 {"rmac_pf_discard"},
258 {"rmac_da_discard"},
259 {"rmac_red_discard"},
260 {"rmac_rts_discard"},
261 {"rmac_ingm_full_discard"},
fa1f0cb3
SS
262 {"link_fault_cnt"}
263};
264
265static char ethtool_driver_stats_keys[][ETH_GSTRING_LEN] = {
7ba013ac
K
266 {"\n DRIVER STATISTICS"},
267 {"single_bit_ecc_errs"},
268 {"double_bit_ecc_errs"},
bd1034f0
AR
269 {"parity_err_cnt"},
270 {"serious_err_cnt"},
271 {"soft_reset_cnt"},
272 {"fifo_full_cnt"},
8116f3cf
SS
273 {"ring_0_full_cnt"},
274 {"ring_1_full_cnt"},
275 {"ring_2_full_cnt"},
276 {"ring_3_full_cnt"},
277 {"ring_4_full_cnt"},
278 {"ring_5_full_cnt"},
279 {"ring_6_full_cnt"},
280 {"ring_7_full_cnt"},
43b7c451
SH
281 {"alarm_transceiver_temp_high"},
282 {"alarm_transceiver_temp_low"},
283 {"alarm_laser_bias_current_high"},
284 {"alarm_laser_bias_current_low"},
285 {"alarm_laser_output_power_high"},
286 {"alarm_laser_output_power_low"},
287 {"warn_transceiver_temp_high"},
288 {"warn_transceiver_temp_low"},
289 {"warn_laser_bias_current_high"},
290 {"warn_laser_bias_current_low"},
291 {"warn_laser_output_power_high"},
292 {"warn_laser_output_power_low"},
293 {"lro_aggregated_pkts"},
294 {"lro_flush_both_count"},
295 {"lro_out_of_sequence_pkts"},
296 {"lro_flush_due_to_max_pkts"},
297 {"lro_avg_aggr_pkts"},
298 {"mem_alloc_fail_cnt"},
299 {"pci_map_fail_cnt"},
300 {"watchdog_timer_cnt"},
301 {"mem_allocated"},
302 {"mem_freed"},
303 {"link_up_cnt"},
304 {"link_down_cnt"},
305 {"link_up_time"},
306 {"link_down_time"},
307 {"tx_tcode_buf_abort_cnt"},
308 {"tx_tcode_desc_abort_cnt"},
309 {"tx_tcode_parity_err_cnt"},
310 {"tx_tcode_link_loss_cnt"},
311 {"tx_tcode_list_proc_err_cnt"},
312 {"rx_tcode_parity_err_cnt"},
313 {"rx_tcode_abort_cnt"},
314 {"rx_tcode_parity_abort_cnt"},
315 {"rx_tcode_rda_fail_cnt"},
316 {"rx_tcode_unkn_prot_cnt"},
317 {"rx_tcode_fcs_err_cnt"},
318 {"rx_tcode_buf_size_err_cnt"},
319 {"rx_tcode_rxd_corrupt_cnt"},
320 {"rx_tcode_unkn_err_cnt"},
8116f3cf
SS
321 {"tda_err_cnt"},
322 {"pfc_err_cnt"},
323 {"pcc_err_cnt"},
324 {"tti_err_cnt"},
325 {"tpa_err_cnt"},
326 {"sm_err_cnt"},
327 {"lso_err_cnt"},
328 {"mac_tmac_err_cnt"},
329 {"mac_rmac_err_cnt"},
330 {"xgxs_txgxs_err_cnt"},
331 {"xgxs_rxgxs_err_cnt"},
332 {"rc_err_cnt"},
333 {"prc_pcix_err_cnt"},
334 {"rpa_err_cnt"},
335 {"rda_err_cnt"},
336 {"rti_err_cnt"},
337 {"mc_err_cnt"}
1da177e4
LT
338};
339
4c3616cd
AMR
340#define S2IO_XENA_STAT_LEN ARRAY_SIZE(ethtool_xena_stats_keys)
341#define S2IO_ENHANCED_STAT_LEN ARRAY_SIZE(ethtool_enhanced_stats_keys)
342#define S2IO_DRIVER_STAT_LEN ARRAY_SIZE(ethtool_driver_stats_keys)
fa1f0cb3
SS
343
344#define XFRAME_I_STAT_LEN (S2IO_XENA_STAT_LEN + S2IO_DRIVER_STAT_LEN )
345#define XFRAME_II_STAT_LEN (XFRAME_I_STAT_LEN + S2IO_ENHANCED_STAT_LEN )
346
347#define XFRAME_I_STAT_STRINGS_LEN ( XFRAME_I_STAT_LEN * ETH_GSTRING_LEN )
348#define XFRAME_II_STAT_STRINGS_LEN ( XFRAME_II_STAT_LEN * ETH_GSTRING_LEN )
1da177e4 349
4c3616cd 350#define S2IO_TEST_LEN ARRAY_SIZE(s2io_gstrings)
1da177e4
LT
351#define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
352
25fff88e
K
353#define S2IO_TIMER_CONF(timer, handle, arg, exp) \
354 init_timer(&timer); \
355 timer.function = handle; \
356 timer.data = (unsigned long) arg; \
357 mod_timer(&timer, (jiffies + exp)) \
358
2fd37688
SS
359/* copy mac addr to def_mac_addr array */
360static void do_s2io_copy_mac_addr(struct s2io_nic *sp, int offset, u64 mac_addr)
361{
362 sp->def_mac_addr[offset].mac_addr[5] = (u8) (mac_addr);
363 sp->def_mac_addr[offset].mac_addr[4] = (u8) (mac_addr >> 8);
364 sp->def_mac_addr[offset].mac_addr[3] = (u8) (mac_addr >> 16);
365 sp->def_mac_addr[offset].mac_addr[2] = (u8) (mac_addr >> 24);
366 sp->def_mac_addr[offset].mac_addr[1] = (u8) (mac_addr >> 32);
367 sp->def_mac_addr[offset].mac_addr[0] = (u8) (mac_addr >> 40);
368}
be3a6b02
K
369/* Add the vlan */
370static void s2io_vlan_rx_register(struct net_device *dev,
371 struct vlan_group *grp)
372{
2fda096d 373 int i;
1ee6dd77 374 struct s2io_nic *nic = dev->priv;
2fda096d
SR
375 unsigned long flags[MAX_TX_FIFOS];
376 struct mac_info *mac_control = &nic->mac_control;
377 struct config_param *config = &nic->config;
378
379 for (i = 0; i < config->tx_fifo_num; i++)
380 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
be3a6b02 381
be3a6b02 382 nic->vlgrp = grp;
2fda096d
SR
383 for (i = config->tx_fifo_num - 1; i >= 0; i--)
384 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
385 flags[i]);
be3a6b02
K
386}
387
926930b2 388/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
7b490343 389static int vlan_strip_flag;
926930b2 390
20346722 391/*
1da177e4
LT
392 * Constants to be programmed into the Xena's registers, to configure
393 * the XAUI.
394 */
395
1da177e4 396#define END_SIGN 0x0
f71e1309 397static const u64 herc_act_dtx_cfg[] = {
541ae68f 398 /* Set address */
e960fc5c 399 0x8000051536750000ULL, 0x80000515367500E0ULL,
541ae68f 400 /* Write data */
e960fc5c 401 0x8000051536750004ULL, 0x80000515367500E4ULL,
541ae68f
K
402 /* Set address */
403 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
404 /* Write data */
405 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
406 /* Set address */
e960fc5c 407 0x801205150D440000ULL, 0x801205150D4400E0ULL,
408 /* Write data */
409 0x801205150D440004ULL, 0x801205150D4400E4ULL,
410 /* Set address */
541ae68f
K
411 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
412 /* Write data */
413 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
414 /* Done */
415 END_SIGN
416};
417
f71e1309 418static const u64 xena_dtx_cfg[] = {
c92ca04b 419 /* Set address */
1da177e4 420 0x8000051500000000ULL, 0x80000515000000E0ULL,
c92ca04b
AR
421 /* Write data */
422 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
423 /* Set address */
424 0x8001051500000000ULL, 0x80010515000000E0ULL,
425 /* Write data */
426 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
427 /* Set address */
1da177e4 428 0x8002051500000000ULL, 0x80020515000000E0ULL,
c92ca04b
AR
429 /* Write data */
430 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1da177e4
LT
431 END_SIGN
432};
433
20346722 434/*
1da177e4
LT
435 * Constants for Fixing the MacAddress problem seen mostly on
436 * Alpha machines.
437 */
f71e1309 438static const u64 fix_mac[] = {
1da177e4
LT
439 0x0060000000000000ULL, 0x0060600000000000ULL,
440 0x0040600000000000ULL, 0x0000600000000000ULL,
441 0x0020600000000000ULL, 0x0060600000000000ULL,
442 0x0020600000000000ULL, 0x0060600000000000ULL,
443 0x0020600000000000ULL, 0x0060600000000000ULL,
444 0x0020600000000000ULL, 0x0060600000000000ULL,
445 0x0020600000000000ULL, 0x0060600000000000ULL,
446 0x0020600000000000ULL, 0x0060600000000000ULL,
447 0x0020600000000000ULL, 0x0060600000000000ULL,
448 0x0020600000000000ULL, 0x0060600000000000ULL,
449 0x0020600000000000ULL, 0x0060600000000000ULL,
450 0x0020600000000000ULL, 0x0060600000000000ULL,
451 0x0020600000000000ULL, 0x0000600000000000ULL,
452 0x0040600000000000ULL, 0x0060600000000000ULL,
453 END_SIGN
454};
455
b41477f3
AR
456MODULE_LICENSE("GPL");
457MODULE_VERSION(DRV_VERSION);
458
459
1da177e4 460/* Module Loadable parameters. */
6cfc482b 461S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
b41477f3 462S2IO_PARM_INT(rx_ring_num, 1);
3a3d5756 463S2IO_PARM_INT(multiq, 0);
b41477f3
AR
464S2IO_PARM_INT(rx_ring_mode, 1);
465S2IO_PARM_INT(use_continuous_tx_intrs, 1);
466S2IO_PARM_INT(rmac_pause_time, 0x100);
467S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
468S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
469S2IO_PARM_INT(shared_splits, 0);
470S2IO_PARM_INT(tmac_util_period, 5);
471S2IO_PARM_INT(rmac_util_period, 5);
b41477f3 472S2IO_PARM_INT(l3l4hdr_size, 128);
6cfc482b
SH
473/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
474S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
303bcb4b 475/* Frequency of Rx desc syncs expressed as power of 2 */
b41477f3 476S2IO_PARM_INT(rxsync_frequency, 3);
eccb8628 477/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
8abc4d5b 478S2IO_PARM_INT(intr_type, 2);
7d3d0439 479/* Large receive offload feature */
43b7c451
SH
480static unsigned int lro_enable;
481module_param_named(lro, lro_enable, uint, 0);
482
7d3d0439
RA
483/* Max pkts to be aggregated by LRO at one time. If not specified,
484 * aggregation happens until we hit max IP pkt size(64K)
485 */
b41477f3 486S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
b41477f3 487S2IO_PARM_INT(indicate_max_pkts, 0);
db874e65
SS
488
489S2IO_PARM_INT(napi, 1);
490S2IO_PARM_INT(ufo, 0);
926930b2 491S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
b41477f3
AR
492
493static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
494 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
495static unsigned int rx_ring_sz[MAX_RX_RINGS] =
496 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
497static unsigned int rts_frm_len[MAX_RX_RINGS] =
498 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
499
500module_param_array(tx_fifo_len, uint, NULL, 0);
501module_param_array(rx_ring_sz, uint, NULL, 0);
502module_param_array(rts_frm_len, uint, NULL, 0);
1da177e4 503
20346722 504/*
1da177e4 505 * S2IO device table.
20346722 506 * This table lists all the devices that this driver supports.
1da177e4
LT
507 */
508static struct pci_device_id s2io_tbl[] __devinitdata = {
509 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
510 PCI_ANY_ID, PCI_ANY_ID},
511 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
512 PCI_ANY_ID, PCI_ANY_ID},
513 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
20346722
K
514 PCI_ANY_ID, PCI_ANY_ID},
515 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
516 PCI_ANY_ID, PCI_ANY_ID},
1da177e4
LT
517 {0,}
518};
519
520MODULE_DEVICE_TABLE(pci, s2io_tbl);
521
d796fdb7
LV
522static struct pci_error_handlers s2io_err_handler = {
523 .error_detected = s2io_io_error_detected,
524 .slot_reset = s2io_io_slot_reset,
525 .resume = s2io_io_resume,
526};
527
1da177e4
LT
528static struct pci_driver s2io_driver = {
529 .name = "S2IO",
530 .id_table = s2io_tbl,
531 .probe = s2io_init_nic,
532 .remove = __devexit_p(s2io_rem_nic),
d796fdb7 533 .err_handler = &s2io_err_handler,
1da177e4
LT
534};
535
536/* A simplifier macro used both by init and free shared_mem Fns(). */
537#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
538
3a3d5756
SH
539/* netqueue manipulation helper functions */
540static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
541{
542 int i;
543#ifdef CONFIG_NETDEVICES_MULTIQUEUE
544 if (sp->config.multiq) {
545 for (i = 0; i < sp->config.tx_fifo_num; i++)
546 netif_stop_subqueue(sp->dev, i);
547 } else
548#endif
549 {
550 for (i = 0; i < sp->config.tx_fifo_num; i++)
551 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
552 netif_stop_queue(sp->dev);
553 }
554}
555
556static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
557{
558#ifdef CONFIG_NETDEVICES_MULTIQUEUE
559 if (sp->config.multiq)
560 netif_stop_subqueue(sp->dev, fifo_no);
561 else
562#endif
563 {
564 sp->mac_control.fifos[fifo_no].queue_state =
565 FIFO_QUEUE_STOP;
566 netif_stop_queue(sp->dev);
567 }
568}
569
570static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
571{
572 int i;
573#ifdef CONFIG_NETDEVICES_MULTIQUEUE
574 if (sp->config.multiq) {
575 for (i = 0; i < sp->config.tx_fifo_num; i++)
576 netif_start_subqueue(sp->dev, i);
577 } else
578#endif
579 {
580 for (i = 0; i < sp->config.tx_fifo_num; i++)
581 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
582 netif_start_queue(sp->dev);
583 }
584}
585
586static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
587{
588#ifdef CONFIG_NETDEVICES_MULTIQUEUE
589 if (sp->config.multiq)
590 netif_start_subqueue(sp->dev, fifo_no);
591 else
592#endif
593 {
594 sp->mac_control.fifos[fifo_no].queue_state =
595 FIFO_QUEUE_START;
596 netif_start_queue(sp->dev);
597 }
598}
599
600static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
601{
602 int i;
603#ifdef CONFIG_NETDEVICES_MULTIQUEUE
604 if (sp->config.multiq) {
605 for (i = 0; i < sp->config.tx_fifo_num; i++)
606 netif_wake_subqueue(sp->dev, i);
607 } else
608#endif
609 {
610 for (i = 0; i < sp->config.tx_fifo_num; i++)
611 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
612 netif_wake_queue(sp->dev);
613 }
614}
615
616static inline void s2io_wake_tx_queue(
617 struct fifo_info *fifo, int cnt, u8 multiq)
618{
619
620#ifdef CONFIG_NETDEVICES_MULTIQUEUE
621 if (multiq) {
622 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
623 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
624 } else
625#endif
626 if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
627 if (netif_queue_stopped(fifo->dev)) {
628 fifo->queue_state = FIFO_QUEUE_START;
629 netif_wake_queue(fifo->dev);
630 }
631 }
632}
633
1da177e4
LT
634/**
635 * init_shared_mem - Allocation and Initialization of Memory
636 * @nic: Device private variable.
20346722
K
637 * Description: The function allocates all the memory areas shared
638 * between the NIC and the driver. This includes Tx descriptors,
1da177e4
LT
639 * Rx descriptors and the statistics block.
640 */
641
642static int init_shared_mem(struct s2io_nic *nic)
643{
644 u32 size;
645 void *tmp_v_addr, *tmp_v_addr_next;
646 dma_addr_t tmp_p_addr, tmp_p_addr_next;
1ee6dd77 647 struct RxD_block *pre_rxd_blk = NULL;
372cc597 648 int i, j, blk_cnt;
1da177e4
LT
649 int lst_size, lst_per_page;
650 struct net_device *dev = nic->dev;
8ae418cf 651 unsigned long tmp;
1ee6dd77 652 struct buffAdd *ba;
1da177e4 653
1ee6dd77 654 struct mac_info *mac_control;
1da177e4 655 struct config_param *config;
491976b2 656 unsigned long long mem_allocated = 0;
1da177e4
LT
657
658 mac_control = &nic->mac_control;
659 config = &nic->config;
660
661
662 /* Allocation and initialization of TXDLs in FIOFs */
663 size = 0;
664 for (i = 0; i < config->tx_fifo_num; i++) {
665 size += config->tx_cfg[i].fifo_len;
666 }
667 if (size > MAX_AVAILABLE_TXDS) {
b41477f3 668 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
0b1f7ebe 669 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
b41477f3 670 return -EINVAL;
1da177e4
LT
671 }
672
2fda096d
SR
673 size = 0;
674 for (i = 0; i < config->tx_fifo_num; i++) {
675 size = config->tx_cfg[i].fifo_len;
676 /*
677 * Legal values are from 2 to 8192
678 */
679 if (size < 2) {
680 DBG_PRINT(ERR_DBG, "s2io: Invalid fifo len (%d)", size);
681 DBG_PRINT(ERR_DBG, "for fifo %d\n", i);
682 DBG_PRINT(ERR_DBG, "s2io: Legal values for fifo len"
683 "are 2 to 8192\n");
684 return -EINVAL;
685 }
686 }
687
1ee6dd77 688 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
689 lst_per_page = PAGE_SIZE / lst_size;
690
691 for (i = 0; i < config->tx_fifo_num; i++) {
692 int fifo_len = config->tx_cfg[i].fifo_len;
1ee6dd77 693 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
bd684e43 694 mac_control->fifos[i].list_info = kzalloc(list_holder_size,
20346722
K
695 GFP_KERNEL);
696 if (!mac_control->fifos[i].list_info) {
0c61ed5f 697 DBG_PRINT(INFO_DBG,
1da177e4
LT
698 "Malloc failed for list_info\n");
699 return -ENOMEM;
700 }
491976b2 701 mem_allocated += list_holder_size;
1da177e4
LT
702 }
703 for (i = 0; i < config->tx_fifo_num; i++) {
704 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
705 lst_per_page);
20346722
K
706 mac_control->fifos[i].tx_curr_put_info.offset = 0;
707 mac_control->fifos[i].tx_curr_put_info.fifo_len =
1da177e4 708 config->tx_cfg[i].fifo_len - 1;
20346722
K
709 mac_control->fifos[i].tx_curr_get_info.offset = 0;
710 mac_control->fifos[i].tx_curr_get_info.fifo_len =
1da177e4 711 config->tx_cfg[i].fifo_len - 1;
20346722
K
712 mac_control->fifos[i].fifo_no = i;
713 mac_control->fifos[i].nic = nic;
fed5eccd 714 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
3a3d5756 715 mac_control->fifos[i].dev = dev;
20346722 716
1da177e4
LT
717 for (j = 0; j < page_num; j++) {
718 int k = 0;
719 dma_addr_t tmp_p;
720 void *tmp_v;
721 tmp_v = pci_alloc_consistent(nic->pdev,
722 PAGE_SIZE, &tmp_p);
723 if (!tmp_v) {
0c61ed5f 724 DBG_PRINT(INFO_DBG,
1da177e4 725 "pci_alloc_consistent ");
0c61ed5f 726 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
1da177e4
LT
727 return -ENOMEM;
728 }
776bd20f 729 /* If we got a zero DMA address(can happen on
730 * certain platforms like PPC), reallocate.
731 * Store virtual address of page we don't want,
732 * to be freed later.
733 */
734 if (!tmp_p) {
735 mac_control->zerodma_virt_addr = tmp_v;
6aa20a22 736 DBG_PRINT(INIT_DBG,
776bd20f 737 "%s: Zero DMA address for TxDL. ", dev->name);
6aa20a22 738 DBG_PRINT(INIT_DBG,
6b4d617d 739 "Virtual address %p\n", tmp_v);
776bd20f 740 tmp_v = pci_alloc_consistent(nic->pdev,
741 PAGE_SIZE, &tmp_p);
742 if (!tmp_v) {
0c61ed5f 743 DBG_PRINT(INFO_DBG,
776bd20f 744 "pci_alloc_consistent ");
0c61ed5f 745 DBG_PRINT(INFO_DBG, "failed for TxDL\n");
776bd20f 746 return -ENOMEM;
747 }
491976b2 748 mem_allocated += PAGE_SIZE;
776bd20f 749 }
1da177e4
LT
750 while (k < lst_per_page) {
751 int l = (j * lst_per_page) + k;
752 if (l == config->tx_cfg[i].fifo_len)
20346722
K
753 break;
754 mac_control->fifos[i].list_info[l].list_virt_addr =
1da177e4 755 tmp_v + (k * lst_size);
20346722 756 mac_control->fifos[i].list_info[l].list_phy_addr =
1da177e4
LT
757 tmp_p + (k * lst_size);
758 k++;
759 }
760 }
761 }
1da177e4 762
2fda096d
SR
763 for (i = 0; i < config->tx_fifo_num; i++) {
764 size = config->tx_cfg[i].fifo_len;
765 mac_control->fifos[i].ufo_in_band_v
766 = kcalloc(size, sizeof(u64), GFP_KERNEL);
767 if (!mac_control->fifos[i].ufo_in_band_v)
768 return -ENOMEM;
769 mem_allocated += (size * sizeof(u64));
770 }
fed5eccd 771
1da177e4
LT
772 /* Allocation and initialization of RXDs in Rings */
773 size = 0;
774 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
775 if (config->rx_cfg[i].num_rxd %
776 (rxd_count[nic->rxd_mode] + 1)) {
1da177e4
LT
777 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
778 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
779 i);
780 DBG_PRINT(ERR_DBG, "RxDs per Block");
781 return FAILURE;
782 }
783 size += config->rx_cfg[i].num_rxd;
20346722 784 mac_control->rings[i].block_count =
da6971d8
AR
785 config->rx_cfg[i].num_rxd /
786 (rxd_count[nic->rxd_mode] + 1 );
787 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
788 mac_control->rings[i].block_count;
1da177e4 789 }
da6971d8 790 if (nic->rxd_mode == RXD_MODE_1)
1ee6dd77 791 size = (size * (sizeof(struct RxD1)));
da6971d8 792 else
1ee6dd77 793 size = (size * (sizeof(struct RxD3)));
1da177e4
LT
794
795 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
796 mac_control->rings[i].rx_curr_get_info.block_index = 0;
797 mac_control->rings[i].rx_curr_get_info.offset = 0;
798 mac_control->rings[i].rx_curr_get_info.ring_len =
1da177e4 799 config->rx_cfg[i].num_rxd - 1;
20346722
K
800 mac_control->rings[i].rx_curr_put_info.block_index = 0;
801 mac_control->rings[i].rx_curr_put_info.offset = 0;
802 mac_control->rings[i].rx_curr_put_info.ring_len =
1da177e4 803 config->rx_cfg[i].num_rxd - 1;
20346722
K
804 mac_control->rings[i].nic = nic;
805 mac_control->rings[i].ring_no = i;
806
da6971d8
AR
807 blk_cnt = config->rx_cfg[i].num_rxd /
808 (rxd_count[nic->rxd_mode] + 1);
1da177e4
LT
809 /* Allocating all the Rx blocks */
810 for (j = 0; j < blk_cnt; j++) {
1ee6dd77 811 struct rx_block_info *rx_blocks;
da6971d8
AR
812 int l;
813
814 rx_blocks = &mac_control->rings[i].rx_blocks[j];
815 size = SIZE_OF_BLOCK; //size is always page size
1da177e4
LT
816 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
817 &tmp_p_addr);
818 if (tmp_v_addr == NULL) {
819 /*
20346722
K
820 * In case of failure, free_shared_mem()
821 * is called, which should free any
822 * memory that was alloced till the
1da177e4
LT
823 * failure happened.
824 */
da6971d8 825 rx_blocks->block_virt_addr = tmp_v_addr;
1da177e4
LT
826 return -ENOMEM;
827 }
491976b2 828 mem_allocated += size;
1da177e4 829 memset(tmp_v_addr, 0, size);
da6971d8
AR
830 rx_blocks->block_virt_addr = tmp_v_addr;
831 rx_blocks->block_dma_addr = tmp_p_addr;
1ee6dd77 832 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
da6971d8
AR
833 rxd_count[nic->rxd_mode],
834 GFP_KERNEL);
372cc597
SS
835 if (!rx_blocks->rxds)
836 return -ENOMEM;
8a4bdbaa 837 mem_allocated +=
491976b2 838 (sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
da6971d8
AR
839 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
840 rx_blocks->rxds[l].virt_addr =
841 rx_blocks->block_virt_addr +
842 (rxd_size[nic->rxd_mode] * l);
843 rx_blocks->rxds[l].dma_addr =
844 rx_blocks->block_dma_addr +
845 (rxd_size[nic->rxd_mode] * l);
846 }
1da177e4
LT
847 }
848 /* Interlinking all Rx Blocks */
849 for (j = 0; j < blk_cnt; j++) {
20346722
K
850 tmp_v_addr =
851 mac_control->rings[i].rx_blocks[j].block_virt_addr;
1da177e4 852 tmp_v_addr_next =
20346722 853 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4 854 blk_cnt].block_virt_addr;
20346722
K
855 tmp_p_addr =
856 mac_control->rings[i].rx_blocks[j].block_dma_addr;
1da177e4 857 tmp_p_addr_next =
20346722 858 mac_control->rings[i].rx_blocks[(j + 1) %
1da177e4
LT
859 blk_cnt].block_dma_addr;
860
1ee6dd77 861 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
1da177e4
LT
862 pre_rxd_blk->reserved_2_pNext_RxD_block =
863 (unsigned long) tmp_v_addr_next;
1da177e4
LT
864 pre_rxd_blk->pNext_RxD_Blk_physical =
865 (u64) tmp_p_addr_next;
866 }
867 }
6d517a27 868 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
869 /*
870 * Allocation of Storages for buffer addresses in 2BUFF mode
871 * and the buffers as well.
872 */
873 for (i = 0; i < config->rx_ring_num; i++) {
874 blk_cnt = config->rx_cfg[i].num_rxd /
875 (rxd_count[nic->rxd_mode]+ 1);
876 mac_control->rings[i].ba =
1ee6dd77 877 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
1da177e4 878 GFP_KERNEL);
da6971d8 879 if (!mac_control->rings[i].ba)
1da177e4 880 return -ENOMEM;
491976b2 881 mem_allocated +=(sizeof(struct buffAdd *) * blk_cnt);
da6971d8
AR
882 for (j = 0; j < blk_cnt; j++) {
883 int k = 0;
884 mac_control->rings[i].ba[j] =
1ee6dd77 885 kmalloc((sizeof(struct buffAdd) *
da6971d8
AR
886 (rxd_count[nic->rxd_mode] + 1)),
887 GFP_KERNEL);
888 if (!mac_control->rings[i].ba[j])
1da177e4 889 return -ENOMEM;
491976b2
SH
890 mem_allocated += (sizeof(struct buffAdd) * \
891 (rxd_count[nic->rxd_mode] + 1));
da6971d8
AR
892 while (k != rxd_count[nic->rxd_mode]) {
893 ba = &mac_control->rings[i].ba[j][k];
894
895 ba->ba_0_org = (void *) kmalloc
896 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
897 if (!ba->ba_0_org)
898 return -ENOMEM;
8a4bdbaa 899 mem_allocated +=
491976b2 900 (BUF0_LEN + ALIGN_SIZE);
da6971d8
AR
901 tmp = (unsigned long)ba->ba_0_org;
902 tmp += ALIGN_SIZE;
903 tmp &= ~((unsigned long) ALIGN_SIZE);
904 ba->ba_0 = (void *) tmp;
905
906 ba->ba_1_org = (void *) kmalloc
907 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
908 if (!ba->ba_1_org)
909 return -ENOMEM;
8a4bdbaa 910 mem_allocated
491976b2 911 += (BUF1_LEN + ALIGN_SIZE);
da6971d8
AR
912 tmp = (unsigned long) ba->ba_1_org;
913 tmp += ALIGN_SIZE;
914 tmp &= ~((unsigned long) ALIGN_SIZE);
915 ba->ba_1 = (void *) tmp;
916 k++;
917 }
1da177e4
LT
918 }
919 }
920 }
1da177e4
LT
921
922 /* Allocation and initialization of Statistics block */
1ee6dd77 923 size = sizeof(struct stat_block);
1da177e4
LT
924 mac_control->stats_mem = pci_alloc_consistent
925 (nic->pdev, size, &mac_control->stats_mem_phy);
926
927 if (!mac_control->stats_mem) {
20346722
K
928 /*
929 * In case of failure, free_shared_mem() is called, which
930 * should free any memory that was alloced till the
1da177e4
LT
931 * failure happened.
932 */
933 return -ENOMEM;
934 }
491976b2 935 mem_allocated += size;
1da177e4
LT
936 mac_control->stats_mem_sz = size;
937
938 tmp_v_addr = mac_control->stats_mem;
1ee6dd77 939 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
1da177e4 940 memset(tmp_v_addr, 0, size);
1da177e4
LT
941 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
942 (unsigned long long) tmp_p_addr);
491976b2 943 mac_control->stats_info->sw_stat.mem_allocated += mem_allocated;
1da177e4
LT
944 return SUCCESS;
945}
946
20346722
K
947/**
948 * free_shared_mem - Free the allocated Memory
1da177e4
LT
949 * @nic: Device private variable.
950 * Description: This function is to free all memory locations allocated by
951 * the init_shared_mem() function and return it to the kernel.
952 */
953
954static void free_shared_mem(struct s2io_nic *nic)
955{
956 int i, j, blk_cnt, size;
957 void *tmp_v_addr;
958 dma_addr_t tmp_p_addr;
1ee6dd77 959 struct mac_info *mac_control;
1da177e4
LT
960 struct config_param *config;
961 int lst_size, lst_per_page;
8910b49f 962 struct net_device *dev;
491976b2 963 int page_num = 0;
1da177e4
LT
964
965 if (!nic)
966 return;
967
8910b49f
MG
968 dev = nic->dev;
969
1da177e4
LT
970 mac_control = &nic->mac_control;
971 config = &nic->config;
972
1ee6dd77 973 lst_size = (sizeof(struct TxD) * config->max_txds);
1da177e4
LT
974 lst_per_page = PAGE_SIZE / lst_size;
975
976 for (i = 0; i < config->tx_fifo_num; i++) {
491976b2
SH
977 page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
978 lst_per_page);
1da177e4
LT
979 for (j = 0; j < page_num; j++) {
980 int mem_blks = (j * lst_per_page);
776bd20f 981 if (!mac_control->fifos[i].list_info)
6aa20a22 982 return;
776bd20f 983 if (!mac_control->fifos[i].list_info[mem_blks].
984 list_virt_addr)
1da177e4
LT
985 break;
986 pci_free_consistent(nic->pdev, PAGE_SIZE,
20346722
K
987 mac_control->fifos[i].
988 list_info[mem_blks].
1da177e4 989 list_virt_addr,
20346722
K
990 mac_control->fifos[i].
991 list_info[mem_blks].
1da177e4 992 list_phy_addr);
8a4bdbaa 993 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 994 += PAGE_SIZE;
1da177e4 995 }
776bd20f 996 /* If we got a zero DMA address during allocation,
997 * free the page now
998 */
999 if (mac_control->zerodma_virt_addr) {
1000 pci_free_consistent(nic->pdev, PAGE_SIZE,
1001 mac_control->zerodma_virt_addr,
1002 (dma_addr_t)0);
6aa20a22 1003 DBG_PRINT(INIT_DBG,
6b4d617d
AM
1004 "%s: Freeing TxDL with zero DMA addr. ",
1005 dev->name);
1006 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
1007 mac_control->zerodma_virt_addr);
8a4bdbaa 1008 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 1009 += PAGE_SIZE;
776bd20f 1010 }
20346722 1011 kfree(mac_control->fifos[i].list_info);
8a4bdbaa 1012 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1013 (nic->config.tx_cfg[i].fifo_len *sizeof(struct list_info_hold));
1da177e4
LT
1014 }
1015
1da177e4 1016 size = SIZE_OF_BLOCK;
1da177e4 1017 for (i = 0; i < config->rx_ring_num; i++) {
20346722 1018 blk_cnt = mac_control->rings[i].block_count;
1da177e4 1019 for (j = 0; j < blk_cnt; j++) {
20346722
K
1020 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
1021 block_virt_addr;
1022 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
1023 block_dma_addr;
1da177e4
LT
1024 if (tmp_v_addr == NULL)
1025 break;
1026 pci_free_consistent(nic->pdev, size,
1027 tmp_v_addr, tmp_p_addr);
491976b2 1028 nic->mac_control.stats_info->sw_stat.mem_freed += size;
da6971d8 1029 kfree(mac_control->rings[i].rx_blocks[j].rxds);
8a4bdbaa 1030 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1031 ( sizeof(struct rxd_info)* rxd_count[nic->rxd_mode]);
1da177e4
LT
1032 }
1033 }
1034
6d517a27 1035 if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
1036 /* Freeing buffer storage addresses in 2BUFF mode. */
1037 for (i = 0; i < config->rx_ring_num; i++) {
1038 blk_cnt = config->rx_cfg[i].num_rxd /
1039 (rxd_count[nic->rxd_mode] + 1);
1040 for (j = 0; j < blk_cnt; j++) {
1041 int k = 0;
1042 if (!mac_control->rings[i].ba[j])
1043 continue;
1044 while (k != rxd_count[nic->rxd_mode]) {
1ee6dd77 1045 struct buffAdd *ba =
da6971d8
AR
1046 &mac_control->rings[i].ba[j][k];
1047 kfree(ba->ba_0_org);
491976b2
SH
1048 nic->mac_control.stats_info->sw_stat.\
1049 mem_freed += (BUF0_LEN + ALIGN_SIZE);
da6971d8 1050 kfree(ba->ba_1_org);
491976b2
SH
1051 nic->mac_control.stats_info->sw_stat.\
1052 mem_freed += (BUF1_LEN + ALIGN_SIZE);
da6971d8
AR
1053 k++;
1054 }
1055 kfree(mac_control->rings[i].ba[j]);
9caab458
SS
1056 nic->mac_control.stats_info->sw_stat.mem_freed +=
1057 (sizeof(struct buffAdd) *
1058 (rxd_count[nic->rxd_mode] + 1));
1da177e4 1059 }
da6971d8 1060 kfree(mac_control->rings[i].ba);
8a4bdbaa 1061 nic->mac_control.stats_info->sw_stat.mem_freed +=
491976b2 1062 (sizeof(struct buffAdd *) * blk_cnt);
1da177e4 1063 }
1da177e4 1064 }
1da177e4 1065
2fda096d
SR
1066 for (i = 0; i < nic->config.tx_fifo_num; i++) {
1067 if (mac_control->fifos[i].ufo_in_band_v) {
1068 nic->mac_control.stats_info->sw_stat.mem_freed
1069 += (config->tx_cfg[i].fifo_len * sizeof(u64));
1070 kfree(mac_control->fifos[i].ufo_in_band_v);
1071 }
1072 }
1073
1da177e4 1074 if (mac_control->stats_mem) {
2fda096d
SR
1075 nic->mac_control.stats_info->sw_stat.mem_freed +=
1076 mac_control->stats_mem_sz;
1da177e4
LT
1077 pci_free_consistent(nic->pdev,
1078 mac_control->stats_mem_sz,
1079 mac_control->stats_mem,
1080 mac_control->stats_mem_phy);
491976b2 1081 }
1da177e4
LT
1082}
1083
541ae68f
K
1084/**
1085 * s2io_verify_pci_mode -
1086 */
1087
1ee6dd77 1088static int s2io_verify_pci_mode(struct s2io_nic *nic)
541ae68f 1089{
1ee6dd77 1090 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1091 register u64 val64 = 0;
1092 int mode;
1093
1094 val64 = readq(&bar0->pci_mode);
1095 mode = (u8)GET_PCI_MODE(val64);
1096
1097 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1098 return -1; /* Unknown PCI mode */
1099 return mode;
1100}
1101
c92ca04b
AR
1102#define NEC_VENID 0x1033
1103#define NEC_DEVID 0x0125
1104static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
1105{
1106 struct pci_dev *tdev = NULL;
26d36b64
AC
1107 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
1108 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
c92ca04b 1109 if (tdev->bus == s2io_pdev->bus->parent)
26d36b64 1110 pci_dev_put(tdev);
c92ca04b
AR
1111 return 1;
1112 }
1113 }
1114 return 0;
1115}
541ae68f 1116
7b32a312 1117static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
541ae68f
K
1118/**
1119 * s2io_print_pci_mode -
1120 */
1ee6dd77 1121static int s2io_print_pci_mode(struct s2io_nic *nic)
541ae68f 1122{
1ee6dd77 1123 struct XENA_dev_config __iomem *bar0 = nic->bar0;
541ae68f
K
1124 register u64 val64 = 0;
1125 int mode;
1126 struct config_param *config = &nic->config;
1127
1128 val64 = readq(&bar0->pci_mode);
1129 mode = (u8)GET_PCI_MODE(val64);
1130
1131 if ( val64 & PCI_MODE_UNKNOWN_MODE)
1132 return -1; /* Unknown PCI mode */
1133
c92ca04b
AR
1134 config->bus_speed = bus_speed[mode];
1135
1136 if (s2io_on_nec_bridge(nic->pdev)) {
1137 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
1138 nic->dev->name);
1139 return mode;
1140 }
1141
541ae68f
K
1142 if (val64 & PCI_MODE_32_BITS) {
1143 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
1144 } else {
1145 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
1146 }
1147
1148 switch(mode) {
1149 case PCI_MODE_PCI_33:
1150 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
541ae68f
K
1151 break;
1152 case PCI_MODE_PCI_66:
1153 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
541ae68f
K
1154 break;
1155 case PCI_MODE_PCIX_M1_66:
1156 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
541ae68f
K
1157 break;
1158 case PCI_MODE_PCIX_M1_100:
1159 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
541ae68f
K
1160 break;
1161 case PCI_MODE_PCIX_M1_133:
1162 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
541ae68f
K
1163 break;
1164 case PCI_MODE_PCIX_M2_66:
1165 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
541ae68f
K
1166 break;
1167 case PCI_MODE_PCIX_M2_100:
1168 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
541ae68f
K
1169 break;
1170 case PCI_MODE_PCIX_M2_133:
1171 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
541ae68f
K
1172 break;
1173 default:
1174 return -1; /* Unsupported bus speed */
1175 }
1176
1177 return mode;
1178}
1179
b7c5678f
RV
1180/**
1181 * init_tti - Initialization transmit traffic interrupt scheme
1182 * @nic: device private variable
1183 * @link: link status (UP/DOWN) used to enable/disable continuous
1184 * transmit interrupts
1185 * Description: The function configures transmit traffic interrupts
1186 * Return Value: SUCCESS on success and
1187 * '-1' on failure
1188 */
1189
0d66afe7 1190static int init_tti(struct s2io_nic *nic, int link)
b7c5678f
RV
1191{
1192 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1193 register u64 val64 = 0;
1194 int i;
1195 struct config_param *config;
1196
1197 config = &nic->config;
1198
1199 for (i = 0; i < config->tx_fifo_num; i++) {
1200 /*
1201 * TTI Initialization. Default Tx timer gets us about
1202 * 250 interrupts per sec. Continuous interrupts are enabled
1203 * by default.
1204 */
1205 if (nic->device_type == XFRAME_II_DEVICE) {
1206 int count = (nic->config.bus_speed * 125)/2;
1207 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1208 } else
1209 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1210
1211 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1212 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1213 TTI_DATA1_MEM_TX_URNG_C(0x30) |
1214 TTI_DATA1_MEM_TX_TIMER_AC_EN;
1215
1216 if (use_continuous_tx_intrs && (link == LINK_UP))
1217 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1218 writeq(val64, &bar0->tti_data1_mem);
1219
1220 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1221 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1222 TTI_DATA2_MEM_TX_UFC_C(0x40) |
1223 TTI_DATA2_MEM_TX_UFC_D(0x80);
1224
1225 writeq(val64, &bar0->tti_data2_mem);
1226
1227 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD |
1228 TTI_CMD_MEM_OFFSET(i);
1229 writeq(val64, &bar0->tti_command_mem);
1230
1231 if (wait_for_cmd_complete(&bar0->tti_command_mem,
1232 TTI_CMD_MEM_STROBE_NEW_CMD, S2IO_BIT_RESET) != SUCCESS)
1233 return FAILURE;
1234 }
1235
1236 return SUCCESS;
1237}
1238
20346722
K
1239/**
1240 * init_nic - Initialization of hardware
b7c5678f 1241 * @nic: device private variable
20346722
K
1242 * Description: The function sequentially configures every block
1243 * of the H/W from their reset values.
1244 * Return Value: SUCCESS on success and
1da177e4
LT
1245 * '-1' on failure (endian settings incorrect).
1246 */
1247
1248static int init_nic(struct s2io_nic *nic)
1249{
1ee6dd77 1250 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
1251 struct net_device *dev = nic->dev;
1252 register u64 val64 = 0;
1253 void __iomem *add;
1254 u32 time;
1255 int i, j;
1ee6dd77 1256 struct mac_info *mac_control;
1da177e4 1257 struct config_param *config;
c92ca04b 1258 int dtx_cnt = 0;
1da177e4 1259 unsigned long long mem_share;
20346722 1260 int mem_size;
1da177e4
LT
1261
1262 mac_control = &nic->mac_control;
1263 config = &nic->config;
1264
5e25b9dd 1265 /* to set the swapper controle on the card */
20346722 1266 if(s2io_set_swapper(nic)) {
1da177e4 1267 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
9f74ffde 1268 return -EIO;
1da177e4
LT
1269 }
1270
541ae68f
K
1271 /*
1272 * Herc requires EOI to be removed from reset before XGXS, so..
1273 */
1274 if (nic->device_type & XFRAME_II_DEVICE) {
1275 val64 = 0xA500000000ULL;
1276 writeq(val64, &bar0->sw_reset);
1277 msleep(500);
1278 val64 = readq(&bar0->sw_reset);
1279 }
1280
1da177e4
LT
1281 /* Remove XGXS from reset state */
1282 val64 = 0;
1283 writeq(val64, &bar0->sw_reset);
1da177e4 1284 msleep(500);
20346722 1285 val64 = readq(&bar0->sw_reset);
1da177e4 1286
7962024e
SH
1287 /* Ensure that it's safe to access registers by checking
1288 * RIC_RUNNING bit is reset. Check is valid only for XframeII.
1289 */
1290 if (nic->device_type == XFRAME_II_DEVICE) {
1291 for (i = 0; i < 50; i++) {
1292 val64 = readq(&bar0->adapter_status);
1293 if (!(val64 & ADAPTER_STATUS_RIC_RUNNING))
1294 break;
1295 msleep(10);
1296 }
1297 if (i == 50)
1298 return -ENODEV;
1299 }
1300
1da177e4
LT
1301 /* Enable Receiving broadcasts */
1302 add = &bar0->mac_cfg;
1303 val64 = readq(&bar0->mac_cfg);
1304 val64 |= MAC_RMAC_BCAST_ENABLE;
1305 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1306 writel((u32) val64, add);
1307 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1308 writel((u32) (val64 >> 32), (add + 4));
1309
1310 /* Read registers in all blocks */
1311 val64 = readq(&bar0->mac_int_mask);
1312 val64 = readq(&bar0->mc_int_mask);
1313 val64 = readq(&bar0->xgxs_int_mask);
1314
1315 /* Set MTU */
1316 val64 = dev->mtu;
1317 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1318
541ae68f
K
1319 if (nic->device_type & XFRAME_II_DEVICE) {
1320 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
303bcb4b 1321 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1da177e4 1322 &bar0->dtx_control, UF);
541ae68f
K
1323 if (dtx_cnt & 0x1)
1324 msleep(1); /* Necessary!! */
1da177e4
LT
1325 dtx_cnt++;
1326 }
541ae68f 1327 } else {
c92ca04b
AR
1328 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1329 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control, UF);
1331 val64 = readq(&bar0->dtx_control);
1332 dtx_cnt++;
1da177e4
LT
1333 }
1334 }
1335
1336 /* Tx DMA Initialization */
1337 val64 = 0;
1338 writeq(val64, &bar0->tx_fifo_partition_0);
1339 writeq(val64, &bar0->tx_fifo_partition_1);
1340 writeq(val64, &bar0->tx_fifo_partition_2);
1341 writeq(val64, &bar0->tx_fifo_partition_3);
1342
1343
1344 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1345 val64 |=
b7c5678f 1346 vBIT(config->tx_cfg[i].fifo_len - 1, ((j * 32) + 19),
1da177e4 1347 13) | vBIT(config->tx_cfg[i].fifo_priority,
b7c5678f 1348 ((j * 32) + 5), 3);
1da177e4
LT
1349
1350 if (i == (config->tx_fifo_num - 1)) {
1351 if (i % 2 == 0)
1352 i++;
1353 }
1354
1355 switch (i) {
1356 case 1:
1357 writeq(val64, &bar0->tx_fifo_partition_0);
1358 val64 = 0;
b7c5678f 1359 j = 0;
1da177e4
LT
1360 break;
1361 case 3:
1362 writeq(val64, &bar0->tx_fifo_partition_1);
1363 val64 = 0;
b7c5678f 1364 j = 0;
1da177e4
LT
1365 break;
1366 case 5:
1367 writeq(val64, &bar0->tx_fifo_partition_2);
1368 val64 = 0;
b7c5678f 1369 j = 0;
1da177e4
LT
1370 break;
1371 case 7:
1372 writeq(val64, &bar0->tx_fifo_partition_3);
b7c5678f
RV
1373 val64 = 0;
1374 j = 0;
1375 break;
1376 default:
1377 j++;
1da177e4
LT
1378 break;
1379 }
1380 }
1381
5e25b9dd
K
1382 /*
1383 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1384 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1385 */
541ae68f 1386 if ((nic->device_type == XFRAME_I_DEVICE) &&
44c10138 1387 (nic->pdev->revision < 4))
5e25b9dd
K
1388 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1389
1da177e4
LT
1390 val64 = readq(&bar0->tx_fifo_partition_0);
1391 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1392 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1393
20346722
K
1394 /*
1395 * Initialization of Tx_PA_CONFIG register to ignore packet
1da177e4
LT
1396 * integrity checking.
1397 */
1398 val64 = readq(&bar0->tx_pa_cfg);
1399 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1400 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1401 writeq(val64, &bar0->tx_pa_cfg);
1402
1403 /* Rx DMA intialization. */
1404 val64 = 0;
1405 for (i = 0; i < config->rx_ring_num; i++) {
1406 val64 |=
1407 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1408 3);
1409 }
1410 writeq(val64, &bar0->rx_queue_priority);
1411
20346722
K
1412 /*
1413 * Allocating equal share of memory to all the
1da177e4
LT
1414 * configured Rings.
1415 */
1416 val64 = 0;
541ae68f
K
1417 if (nic->device_type & XFRAME_II_DEVICE)
1418 mem_size = 32;
1419 else
1420 mem_size = 64;
1421
1da177e4
LT
1422 for (i = 0; i < config->rx_ring_num; i++) {
1423 switch (i) {
1424 case 0:
20346722
K
1425 mem_share = (mem_size / config->rx_ring_num +
1426 mem_size % config->rx_ring_num);
1da177e4
LT
1427 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1428 continue;
1429 case 1:
20346722 1430 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1431 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1432 continue;
1433 case 2:
20346722 1434 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1435 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1436 continue;
1437 case 3:
20346722 1438 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1439 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1440 continue;
1441 case 4:
20346722 1442 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1443 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1444 continue;
1445 case 5:
20346722 1446 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1447 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1448 continue;
1449 case 6:
20346722 1450 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1451 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1452 continue;
1453 case 7:
20346722 1454 mem_share = (mem_size / config->rx_ring_num);
1da177e4
LT
1455 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1456 continue;
1457 }
1458 }
1459 writeq(val64, &bar0->rx_queue_cfg);
1460
20346722 1461 /*
5e25b9dd 1462 * Filling Tx round robin registers
b7c5678f 1463 * as per the number of FIFOs for equal scheduling priority
1da177e4 1464 */
5e25b9dd
K
1465 switch (config->tx_fifo_num) {
1466 case 1:
b7c5678f 1467 val64 = 0x0;
5e25b9dd
K
1468 writeq(val64, &bar0->tx_w_round_robin_0);
1469 writeq(val64, &bar0->tx_w_round_robin_1);
1470 writeq(val64, &bar0->tx_w_round_robin_2);
1471 writeq(val64, &bar0->tx_w_round_robin_3);
1472 writeq(val64, &bar0->tx_w_round_robin_4);
1473 break;
1474 case 2:
b7c5678f 1475 val64 = 0x0001000100010001ULL;
5e25b9dd 1476 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1477 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1478 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1479 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1480 val64 = 0x0001000100000000ULL;
5e25b9dd
K
1481 writeq(val64, &bar0->tx_w_round_robin_4);
1482 break;
1483 case 3:
b7c5678f 1484 val64 = 0x0001020001020001ULL;
5e25b9dd 1485 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1486 val64 = 0x0200010200010200ULL;
5e25b9dd 1487 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1488 val64 = 0x0102000102000102ULL;
5e25b9dd 1489 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1490 val64 = 0x0001020001020001ULL;
5e25b9dd 1491 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1492 val64 = 0x0200010200000000ULL;
5e25b9dd
K
1493 writeq(val64, &bar0->tx_w_round_robin_4);
1494 break;
1495 case 4:
b7c5678f 1496 val64 = 0x0001020300010203ULL;
5e25b9dd 1497 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1498 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1499 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1500 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1501 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1502 writeq(val64, &bar0->tx_w_round_robin_4);
1503 break;
1504 case 5:
b7c5678f 1505 val64 = 0x0001020304000102ULL;
5e25b9dd 1506 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1507 val64 = 0x0304000102030400ULL;
5e25b9dd 1508 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1509 val64 = 0x0102030400010203ULL;
5e25b9dd 1510 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1511 val64 = 0x0400010203040001ULL;
5e25b9dd 1512 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1513 val64 = 0x0203040000000000ULL;
5e25b9dd
K
1514 writeq(val64, &bar0->tx_w_round_robin_4);
1515 break;
1516 case 6:
b7c5678f 1517 val64 = 0x0001020304050001ULL;
5e25b9dd 1518 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1519 val64 = 0x0203040500010203ULL;
5e25b9dd 1520 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1521 val64 = 0x0405000102030405ULL;
5e25b9dd 1522 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1523 val64 = 0x0001020304050001ULL;
5e25b9dd 1524 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1525 val64 = 0x0203040500000000ULL;
5e25b9dd
K
1526 writeq(val64, &bar0->tx_w_round_robin_4);
1527 break;
1528 case 7:
b7c5678f 1529 val64 = 0x0001020304050600ULL;
5e25b9dd 1530 writeq(val64, &bar0->tx_w_round_robin_0);
b7c5678f 1531 val64 = 0x0102030405060001ULL;
5e25b9dd 1532 writeq(val64, &bar0->tx_w_round_robin_1);
b7c5678f 1533 val64 = 0x0203040506000102ULL;
5e25b9dd 1534 writeq(val64, &bar0->tx_w_round_robin_2);
b7c5678f 1535 val64 = 0x0304050600010203ULL;
5e25b9dd 1536 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1537 val64 = 0x0405060000000000ULL;
5e25b9dd
K
1538 writeq(val64, &bar0->tx_w_round_robin_4);
1539 break;
1540 case 8:
b7c5678f 1541 val64 = 0x0001020304050607ULL;
5e25b9dd 1542 writeq(val64, &bar0->tx_w_round_robin_0);
5e25b9dd 1543 writeq(val64, &bar0->tx_w_round_robin_1);
5e25b9dd 1544 writeq(val64, &bar0->tx_w_round_robin_2);
5e25b9dd 1545 writeq(val64, &bar0->tx_w_round_robin_3);
b7c5678f 1546 val64 = 0x0001020300000000ULL;
5e25b9dd
K
1547 writeq(val64, &bar0->tx_w_round_robin_4);
1548 break;
1549 }
1550
b41477f3 1551 /* Enable all configured Tx FIFO partitions */
5d3213cc
AR
1552 val64 = readq(&bar0->tx_fifo_partition_0);
1553 val64 |= (TX_FIFO_PARTITION_EN);
1554 writeq(val64, &bar0->tx_fifo_partition_0);
1555
5e25b9dd
K
1556 /* Filling the Rx round robin registers as per the
1557 * number of Rings and steering based on QoS.
1558 */
1559 switch (config->rx_ring_num) {
1560 case 1:
1561 val64 = 0x8080808080808080ULL;
1562 writeq(val64, &bar0->rts_qos_steering);
1563 break;
1564 case 2:
1565 val64 = 0x0000010000010000ULL;
1566 writeq(val64, &bar0->rx_w_round_robin_0);
1567 val64 = 0x0100000100000100ULL;
1568 writeq(val64, &bar0->rx_w_round_robin_1);
1569 val64 = 0x0001000001000001ULL;
1570 writeq(val64, &bar0->rx_w_round_robin_2);
1571 val64 = 0x0000010000010000ULL;
1572 writeq(val64, &bar0->rx_w_round_robin_3);
1573 val64 = 0x0100000000000000ULL;
1574 writeq(val64, &bar0->rx_w_round_robin_4);
1575
1576 val64 = 0x8080808040404040ULL;
1577 writeq(val64, &bar0->rts_qos_steering);
1578 break;
1579 case 3:
1580 val64 = 0x0001000102000001ULL;
1581 writeq(val64, &bar0->rx_w_round_robin_0);
1582 val64 = 0x0001020000010001ULL;
1583 writeq(val64, &bar0->rx_w_round_robin_1);
1584 val64 = 0x0200000100010200ULL;
1585 writeq(val64, &bar0->rx_w_round_robin_2);
1586 val64 = 0x0001000102000001ULL;
1587 writeq(val64, &bar0->rx_w_round_robin_3);
1588 val64 = 0x0001020000000000ULL;
1589 writeq(val64, &bar0->rx_w_round_robin_4);
1590
1591 val64 = 0x8080804040402020ULL;
1592 writeq(val64, &bar0->rts_qos_steering);
1593 break;
1594 case 4:
1595 val64 = 0x0001020300010200ULL;
1596 writeq(val64, &bar0->rx_w_round_robin_0);
1597 val64 = 0x0100000102030001ULL;
1598 writeq(val64, &bar0->rx_w_round_robin_1);
1599 val64 = 0x0200010000010203ULL;
1600 writeq(val64, &bar0->rx_w_round_robin_2);
6aa20a22 1601 val64 = 0x0001020001000001ULL;
5e25b9dd
K
1602 writeq(val64, &bar0->rx_w_round_robin_3);
1603 val64 = 0x0203000100000000ULL;
1604 writeq(val64, &bar0->rx_w_round_robin_4);
1605
1606 val64 = 0x8080404020201010ULL;
1607 writeq(val64, &bar0->rts_qos_steering);
1608 break;
1609 case 5:
1610 val64 = 0x0001000203000102ULL;
1611 writeq(val64, &bar0->rx_w_round_robin_0);
1612 val64 = 0x0001020001030004ULL;
1613 writeq(val64, &bar0->rx_w_round_robin_1);
1614 val64 = 0x0001000203000102ULL;
1615 writeq(val64, &bar0->rx_w_round_robin_2);
1616 val64 = 0x0001020001030004ULL;
1617 writeq(val64, &bar0->rx_w_round_robin_3);
1618 val64 = 0x0001000000000000ULL;
1619 writeq(val64, &bar0->rx_w_round_robin_4);
1620
1621 val64 = 0x8080404020201008ULL;
1622 writeq(val64, &bar0->rts_qos_steering);
1623 break;
1624 case 6:
1625 val64 = 0x0001020304000102ULL;
1626 writeq(val64, &bar0->rx_w_round_robin_0);
1627 val64 = 0x0304050001020001ULL;
1628 writeq(val64, &bar0->rx_w_round_robin_1);
1629 val64 = 0x0203000100000102ULL;
1630 writeq(val64, &bar0->rx_w_round_robin_2);
1631 val64 = 0x0304000102030405ULL;
1632 writeq(val64, &bar0->rx_w_round_robin_3);
1633 val64 = 0x0001000200000000ULL;
1634 writeq(val64, &bar0->rx_w_round_robin_4);
1635
1636 val64 = 0x8080404020100804ULL;
1637 writeq(val64, &bar0->rts_qos_steering);
1638 break;
1639 case 7:
1640 val64 = 0x0001020001020300ULL;
1641 writeq(val64, &bar0->rx_w_round_robin_0);
1642 val64 = 0x0102030400010203ULL;
1643 writeq(val64, &bar0->rx_w_round_robin_1);
1644 val64 = 0x0405060001020001ULL;
1645 writeq(val64, &bar0->rx_w_round_robin_2);
1646 val64 = 0x0304050000010200ULL;
1647 writeq(val64, &bar0->rx_w_round_robin_3);
1648 val64 = 0x0102030000000000ULL;
1649 writeq(val64, &bar0->rx_w_round_robin_4);
1650
1651 val64 = 0x8080402010080402ULL;
1652 writeq(val64, &bar0->rts_qos_steering);
1653 break;
1654 case 8:
1655 val64 = 0x0001020300040105ULL;
1656 writeq(val64, &bar0->rx_w_round_robin_0);
1657 val64 = 0x0200030106000204ULL;
1658 writeq(val64, &bar0->rx_w_round_robin_1);
1659 val64 = 0x0103000502010007ULL;
1660 writeq(val64, &bar0->rx_w_round_robin_2);
1661 val64 = 0x0304010002060500ULL;
1662 writeq(val64, &bar0->rx_w_round_robin_3);
1663 val64 = 0x0103020400000000ULL;
1664 writeq(val64, &bar0->rx_w_round_robin_4);
1665
1666 val64 = 0x8040201008040201ULL;
1667 writeq(val64, &bar0->rts_qos_steering);
1668 break;
1669 }
1da177e4
LT
1670
1671 /* UDP Fix */
1672 val64 = 0;
20346722 1673 for (i = 0; i < 8; i++)
1da177e4
LT
1674 writeq(val64, &bar0->rts_frm_len_n[i]);
1675
5e25b9dd
K
1676 /* Set the default rts frame length for the rings configured */
1677 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1678 for (i = 0 ; i < config->rx_ring_num ; i++)
1679 writeq(val64, &bar0->rts_frm_len_n[i]);
1680
1681 /* Set the frame length for the configured rings
1682 * desired by the user
1683 */
1684 for (i = 0; i < config->rx_ring_num; i++) {
1685 /* If rts_frm_len[i] == 0 then it is assumed that user not
1686 * specified frame length steering.
1687 * If the user provides the frame length then program
1688 * the rts_frm_len register for those values or else
1689 * leave it as it is.
1690 */
1691 if (rts_frm_len[i] != 0) {
1692 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1693 &bar0->rts_frm_len_n[i]);
1694 }
1695 }
8a4bdbaa 1696
9fc93a41
SS
1697 /* Disable differentiated services steering logic */
1698 for (i = 0; i < 64; i++) {
1699 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1700 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1701 dev->name);
1702 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
9f74ffde 1703 return -ENODEV;
9fc93a41
SS
1704 }
1705 }
1706
20346722 1707 /* Program statistics memory */
1da177e4 1708 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1da177e4 1709
541ae68f
K
1710 if (nic->device_type == XFRAME_II_DEVICE) {
1711 val64 = STAT_BC(0x320);
1712 writeq(val64, &bar0->stat_byte_cnt);
1713 }
1714
20346722 1715 /*
1da177e4
LT
1716 * Initializing the sampling rate for the device to calculate the
1717 * bandwidth utilization.
1718 */
1719 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1720 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1721 writeq(val64, &bar0->mac_link_util);
1722
20346722
K
1723 /*
1724 * Initializing the Transmit and Receive Traffic Interrupt
1da177e4
LT
1725 * Scheme.
1726 */
1da177e4 1727
b7c5678f
RV
1728 /* Initialize TTI */
1729 if (SUCCESS != init_tti(nic, nic->last_link_state))
1730 return -ENODEV;
1da177e4 1731
8a4bdbaa
SS
1732 /* RTI Initialization */
1733 if (nic->device_type == XFRAME_II_DEVICE) {
541ae68f 1734 /*
8a4bdbaa
SS
1735 * Programmed to generate Apprx 500 Intrs per
1736 * second
1737 */
1738 int count = (nic->config.bus_speed * 125)/4;
1739 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1740 } else
1741 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1742 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1743 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1744 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1745
1746 writeq(val64, &bar0->rti_data1_mem);
1747
1748 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1749 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1750 if (nic->config.intr_type == MSI_X)
1751 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1752 RTI_DATA2_MEM_RX_UFC_D(0x40));
1753 else
1754 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1755 RTI_DATA2_MEM_RX_UFC_D(0x80));
1756 writeq(val64, &bar0->rti_data2_mem);
1da177e4 1757
8a4bdbaa
SS
1758 for (i = 0; i < config->rx_ring_num; i++) {
1759 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1760 | RTI_CMD_MEM_OFFSET(i);
1761 writeq(val64, &bar0->rti_command_mem);
1da177e4 1762
8a4bdbaa
SS
1763 /*
1764 * Once the operation completes, the Strobe bit of the
1765 * command register will be reset. We poll for this
1766 * particular condition. We wait for a maximum of 500ms
1767 * for the operation to complete, if it's not complete
1768 * by then we return error.
1769 */
1770 time = 0;
1771 while (TRUE) {
1772 val64 = readq(&bar0->rti_command_mem);
1773 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD))
1774 break;
b6e3f982 1775
8a4bdbaa
SS
1776 if (time > 10) {
1777 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1778 dev->name);
9f74ffde 1779 return -ENODEV;
b6e3f982 1780 }
8a4bdbaa
SS
1781 time++;
1782 msleep(50);
1da177e4 1783 }
1da177e4
LT
1784 }
1785
20346722
K
1786 /*
1787 * Initializing proper values as Pause threshold into all
1da177e4
LT
1788 * the 8 Queues on Rx side.
1789 */
1790 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1791 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1792
1793 /* Disable RMAC PAD STRIPPING */
509a2671 1794 add = &bar0->mac_cfg;
1da177e4
LT
1795 val64 = readq(&bar0->mac_cfg);
1796 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1797 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1798 writel((u32) (val64), add);
1799 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1800 writel((u32) (val64 >> 32), (add + 4));
1801 val64 = readq(&bar0->mac_cfg);
1802
7d3d0439
RA
1803 /* Enable FCS stripping by adapter */
1804 add = &bar0->mac_cfg;
1805 val64 = readq(&bar0->mac_cfg);
1806 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1807 if (nic->device_type == XFRAME_II_DEVICE)
1808 writeq(val64, &bar0->mac_cfg);
1809 else {
1810 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1811 writel((u32) (val64), add);
1812 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1813 writel((u32) (val64 >> 32), (add + 4));
1814 }
1815
20346722
K
1816 /*
1817 * Set the time value to be inserted in the pause frame
1da177e4
LT
1818 * generated by xena.
1819 */
1820 val64 = readq(&bar0->rmac_pause_cfg);
1821 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1822 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1823 writeq(val64, &bar0->rmac_pause_cfg);
1824
20346722 1825 /*
1da177e4
LT
1826 * Set the Threshold Limit for Generating the pause frame
1827 * If the amount of data in any Queue exceeds ratio of
1828 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1829 * pause frame is generated
1830 */
1831 val64 = 0;
1832 for (i = 0; i < 4; i++) {
1833 val64 |=
1834 (((u64) 0xFF00 | nic->mac_control.
1835 mc_pause_threshold_q0q3)
1836 << (i * 2 * 8));
1837 }
1838 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1839
1840 val64 = 0;
1841 for (i = 0; i < 4; i++) {
1842 val64 |=
1843 (((u64) 0xFF00 | nic->mac_control.
1844 mc_pause_threshold_q4q7)
1845 << (i * 2 * 8));
1846 }
1847 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1848
20346722
K
1849 /*
1850 * TxDMA will stop Read request if the number of read split has
1da177e4
LT
1851 * exceeded the limit pointed by shared_splits
1852 */
1853 val64 = readq(&bar0->pic_control);
1854 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1855 writeq(val64, &bar0->pic_control);
1856
863c11a9
AR
1857 if (nic->config.bus_speed == 266) {
1858 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1859 writeq(0x0, &bar0->read_retry_delay);
1860 writeq(0x0, &bar0->write_retry_delay);
1861 }
1862
541ae68f
K
1863 /*
1864 * Programming the Herc to split every write transaction
1865 * that does not start on an ADB to reduce disconnects.
1866 */
1867 if (nic->device_type == XFRAME_II_DEVICE) {
19a60522
SS
1868 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1869 MISC_LINK_STABILITY_PRD(3);
863c11a9
AR
1870 writeq(val64, &bar0->misc_control);
1871 val64 = readq(&bar0->pic_control2);
b7b5a128 1872 val64 &= ~(s2BIT(13)|s2BIT(14)|s2BIT(15));
863c11a9 1873 writeq(val64, &bar0->pic_control2);
541ae68f 1874 }
c92ca04b
AR
1875 if (strstr(nic->product_name, "CX4")) {
1876 val64 = TMAC_AVG_IPG(0x17);
1877 writeq(val64, &bar0->tmac_avg_ipg);
a371a07d
K
1878 }
1879
1da177e4
LT
1880 return SUCCESS;
1881}
a371a07d
K
1882#define LINK_UP_DOWN_INTERRUPT 1
1883#define MAC_RMAC_ERR_TIMER 2
1884
1ee6dd77 1885static int s2io_link_fault_indication(struct s2io_nic *nic)
a371a07d 1886{
eaae7f72 1887 if (nic->config.intr_type != INTA)
cc6e7c44 1888 return MAC_RMAC_ERR_TIMER;
a371a07d
K
1889 if (nic->device_type == XFRAME_II_DEVICE)
1890 return LINK_UP_DOWN_INTERRUPT;
1891 else
1892 return MAC_RMAC_ERR_TIMER;
1893}
8116f3cf 1894
9caab458
SS
1895/**
1896 * do_s2io_write_bits - update alarm bits in alarm register
1897 * @value: alarm bits
1898 * @flag: interrupt status
1899 * @addr: address value
1900 * Description: update alarm bits in alarm register
1901 * Return Value:
1902 * NONE.
1903 */
1904static void do_s2io_write_bits(u64 value, int flag, void __iomem *addr)
1905{
1906 u64 temp64;
1907
1908 temp64 = readq(addr);
1909
1910 if(flag == ENABLE_INTRS)
1911 temp64 &= ~((u64) value);
1912 else
1913 temp64 |= ((u64) value);
1914 writeq(temp64, addr);
1915}
1da177e4 1916
43b7c451 1917static void en_dis_err_alarms(struct s2io_nic *nic, u16 mask, int flag)
9caab458
SS
1918{
1919 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1920 register u64 gen_int_mask = 0;
1921
1922 if (mask & TX_DMA_INTR) {
1923
1924 gen_int_mask |= TXDMA_INT_M;
1925
1926 do_s2io_write_bits(TXDMA_TDA_INT | TXDMA_PFC_INT |
1927 TXDMA_PCC_INT | TXDMA_TTI_INT |
1928 TXDMA_LSO_INT | TXDMA_TPA_INT |
1929 TXDMA_SM_INT, flag, &bar0->txdma_int_mask);
1930
1931 do_s2io_write_bits(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM |
1932 PFC_MISC_0_ERR | PFC_MISC_1_ERR |
1933 PFC_PCIX_ERR | PFC_ECC_SG_ERR, flag,
1934 &bar0->pfc_err_mask);
1935
1936 do_s2io_write_bits(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
1937 TDA_SM1_ERR_ALARM | TDA_Fn_ECC_SG_ERR |
1938 TDA_PCIX_ERR, flag, &bar0->tda_err_mask);
1939
1940 do_s2io_write_bits(PCC_FB_ECC_DB_ERR | PCC_TXB_ECC_DB_ERR |
1941 PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM |
1942 PCC_N_SERR | PCC_6_COF_OV_ERR |
1943 PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR |
1944 PCC_7_LSO_OV_ERR | PCC_FB_ECC_SG_ERR |
1945 PCC_TXB_ECC_SG_ERR, flag, &bar0->pcc_err_mask);
1946
1947 do_s2io_write_bits(TTI_SM_ERR_ALARM | TTI_ECC_SG_ERR |
1948 TTI_ECC_DB_ERR, flag, &bar0->tti_err_mask);
1949
1950 do_s2io_write_bits(LSO6_ABORT | LSO7_ABORT |
1951 LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM |
1952 LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
1953 flag, &bar0->lso_err_mask);
1954
1955 do_s2io_write_bits(TPA_SM_ERR_ALARM | TPA_TX_FRM_DROP,
1956 flag, &bar0->tpa_err_mask);
1957
1958 do_s2io_write_bits(SM_SM_ERR_ALARM, flag, &bar0->sm_err_mask);
1959
1960 }
1961
1962 if (mask & TX_MAC_INTR) {
1963 gen_int_mask |= TXMAC_INT_M;
1964 do_s2io_write_bits(MAC_INT_STATUS_TMAC_INT, flag,
1965 &bar0->mac_int_mask);
1966 do_s2io_write_bits(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR |
1967 TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR |
1968 TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
1969 flag, &bar0->mac_tmac_err_mask);
1970 }
1971
1972 if (mask & TX_XGXS_INTR) {
1973 gen_int_mask |= TXXGXS_INT_M;
1974 do_s2io_write_bits(XGXS_INT_STATUS_TXGXS, flag,
1975 &bar0->xgxs_int_mask);
1976 do_s2io_write_bits(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR |
1977 TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
1978 flag, &bar0->xgxs_txgxs_err_mask);
1979 }
1980
1981 if (mask & RX_DMA_INTR) {
1982 gen_int_mask |= RXDMA_INT_M;
1983 do_s2io_write_bits(RXDMA_INT_RC_INT_M | RXDMA_INT_RPA_INT_M |
1984 RXDMA_INT_RDA_INT_M | RXDMA_INT_RTI_INT_M,
1985 flag, &bar0->rxdma_int_mask);
1986 do_s2io_write_bits(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR |
1987 RC_PRCn_SM_ERR_ALARM | RC_FTC_SM_ERR_ALARM |
1988 RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR |
1989 RC_RDA_FAIL_WR_Rn, flag, &bar0->rc_err_mask);
1990 do_s2io_write_bits(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn |
1991 PRC_PCI_AB_F_WR_Rn | PRC_PCI_DP_RD_Rn |
1992 PRC_PCI_DP_WR_Rn | PRC_PCI_DP_F_WR_Rn, flag,
1993 &bar0->prc_pcix_err_mask);
1994 do_s2io_write_bits(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR |
1995 RPA_ECC_SG_ERR | RPA_ECC_DB_ERR, flag,
1996 &bar0->rpa_err_mask);
1997 do_s2io_write_bits(RDA_RXDn_ECC_DB_ERR | RDA_FRM_ECC_DB_N_AERR |
1998 RDA_SM1_ERR_ALARM | RDA_SM0_ERR_ALARM |
1999 RDA_RXD_ECC_DB_SERR | RDA_RXDn_ECC_SG_ERR |
2000 RDA_FRM_ECC_SG_ERR | RDA_MISC_ERR|RDA_PCIX_ERR,
2001 flag, &bar0->rda_err_mask);
2002 do_s2io_write_bits(RTI_SM_ERR_ALARM |
2003 RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
2004 flag, &bar0->rti_err_mask);
2005 }
2006
2007 if (mask & RX_MAC_INTR) {
2008 gen_int_mask |= RXMAC_INT_M;
2009 do_s2io_write_bits(MAC_INT_STATUS_RMAC_INT, flag,
2010 &bar0->mac_int_mask);
2011 do_s2io_write_bits(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR |
2012 RMAC_UNUSED_INT | RMAC_SINGLE_ECC_ERR |
2013 RMAC_DOUBLE_ECC_ERR |
2014 RMAC_LINK_STATE_CHANGE_INT,
2015 flag, &bar0->mac_rmac_err_mask);
2016 }
2017
2018 if (mask & RX_XGXS_INTR)
2019 {
2020 gen_int_mask |= RXXGXS_INT_M;
2021 do_s2io_write_bits(XGXS_INT_STATUS_RXGXS, flag,
2022 &bar0->xgxs_int_mask);
2023 do_s2io_write_bits(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR, flag,
2024 &bar0->xgxs_rxgxs_err_mask);
2025 }
2026
2027 if (mask & MC_INTR) {
2028 gen_int_mask |= MC_INT_M;
2029 do_s2io_write_bits(MC_INT_MASK_MC_INT, flag, &bar0->mc_int_mask);
2030 do_s2io_write_bits(MC_ERR_REG_SM_ERR | MC_ERR_REG_ECC_ALL_SNG |
2031 MC_ERR_REG_ECC_ALL_DBL | PLL_LOCK_N, flag,
2032 &bar0->mc_err_mask);
2033 }
2034 nic->general_int_mask = gen_int_mask;
2035
2036 /* Remove this line when alarm interrupts are enabled */
2037 nic->general_int_mask = 0;
2038}
20346722
K
2039/**
2040 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1da177e4
LT
2041 * @nic: device private variable,
2042 * @mask: A mask indicating which Intr block must be modified and,
2043 * @flag: A flag indicating whether to enable or disable the Intrs.
2044 * Description: This function will either disable or enable the interrupts
20346722
K
2045 * depending on the flag argument. The mask argument can be used to
2046 * enable/disable any Intr block.
1da177e4
LT
2047 * Return Value: NONE.
2048 */
2049
2050static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
2051{
1ee6dd77 2052 struct XENA_dev_config __iomem *bar0 = nic->bar0;
9caab458
SS
2053 register u64 temp64 = 0, intr_mask = 0;
2054
2055 intr_mask = nic->general_int_mask;
1da177e4
LT
2056
2057 /* Top level interrupt classification */
2058 /* PIC Interrupts */
9caab458 2059 if (mask & TX_PIC_INTR) {
1da177e4 2060 /* Enable PIC Intrs in the general intr mask register */
9caab458 2061 intr_mask |= TXPIC_INT_M;
1da177e4 2062 if (flag == ENABLE_INTRS) {
20346722 2063 /*
a371a07d 2064 * If Hercules adapter enable GPIO otherwise
b41477f3 2065 * disable all PCIX, Flash, MDIO, IIC and GPIO
20346722
K
2066 * interrupts for now.
2067 * TODO
1da177e4 2068 */
a371a07d
K
2069 if (s2io_link_fault_indication(nic) ==
2070 LINK_UP_DOWN_INTERRUPT ) {
9caab458
SS
2071 do_s2io_write_bits(PIC_INT_GPIO, flag,
2072 &bar0->pic_int_mask);
2073 do_s2io_write_bits(GPIO_INT_MASK_LINK_UP, flag,
2074 &bar0->gpio_int_mask);
2075 } else
a371a07d 2076 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4 2077 } else if (flag == DISABLE_INTRS) {
20346722
K
2078 /*
2079 * Disable PIC Intrs in the general
2080 * intr mask register
1da177e4
LT
2081 */
2082 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1da177e4
LT
2083 }
2084 }
2085
1da177e4
LT
2086 /* Tx traffic interrupts */
2087 if (mask & TX_TRAFFIC_INTR) {
9caab458 2088 intr_mask |= TXTRAFFIC_INT_M;
1da177e4 2089 if (flag == ENABLE_INTRS) {
20346722 2090 /*
1da177e4 2091 * Enable all the Tx side interrupts
20346722 2092 * writing 0 Enables all 64 TX interrupt levels
1da177e4
LT
2093 */
2094 writeq(0x0, &bar0->tx_traffic_mask);
2095 } else if (flag == DISABLE_INTRS) {
20346722
K
2096 /*
2097 * Disable Tx Traffic Intrs in the general intr mask
1da177e4
LT
2098 * register.
2099 */
2100 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1da177e4
LT
2101 }
2102 }
2103
2104 /* Rx traffic interrupts */
2105 if (mask & RX_TRAFFIC_INTR) {
9caab458 2106 intr_mask |= RXTRAFFIC_INT_M;
1da177e4 2107 if (flag == ENABLE_INTRS) {
1da177e4
LT
2108 /* writing 0 Enables all 8 RX interrupt levels */
2109 writeq(0x0, &bar0->rx_traffic_mask);
2110 } else if (flag == DISABLE_INTRS) {
20346722
K
2111 /*
2112 * Disable Rx Traffic Intrs in the general intr mask
1da177e4
LT
2113 * register.
2114 */
2115 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1da177e4
LT
2116 }
2117 }
9caab458
SS
2118
2119 temp64 = readq(&bar0->general_int_mask);
2120 if (flag == ENABLE_INTRS)
2121 temp64 &= ~((u64) intr_mask);
2122 else
2123 temp64 = DISABLE_ALL_INTRS;
2124 writeq(temp64, &bar0->general_int_mask);
2125
2126 nic->general_int_mask = readq(&bar0->general_int_mask);
1da177e4
LT
2127}
2128
19a60522
SS
2129/**
2130 * verify_pcc_quiescent- Checks for PCC quiescent state
2131 * Return: 1 If PCC is quiescence
2132 * 0 If PCC is not quiescence
2133 */
1ee6dd77 2134static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
20346722 2135{
19a60522 2136 int ret = 0, herc;
1ee6dd77 2137 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522 2138 u64 val64 = readq(&bar0->adapter_status);
8a4bdbaa 2139
19a60522 2140 herc = (sp->device_type == XFRAME_II_DEVICE);
20346722
K
2141
2142 if (flag == FALSE) {
44c10138 2143 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
19a60522 2144 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2145 ret = 1;
19a60522
SS
2146 } else {
2147 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2148 ret = 1;
20346722
K
2149 }
2150 } else {
44c10138 2151 if ((!herc && (sp->pdev->revision >= 4)) || herc) {
5e25b9dd 2152 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
19a60522 2153 ADAPTER_STATUS_RMAC_PCC_IDLE))
5e25b9dd 2154 ret = 1;
5e25b9dd
K
2155 } else {
2156 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
19a60522 2157 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
5e25b9dd 2158 ret = 1;
20346722
K
2159 }
2160 }
2161
2162 return ret;
2163}
2164/**
2165 * verify_xena_quiescence - Checks whether the H/W is ready
1da177e4 2166 * Description: Returns whether the H/W is ready to go or not. Depending
20346722 2167 * on whether adapter enable bit was written or not the comparison
1da177e4
LT
2168 * differs and the calling function passes the input argument flag to
2169 * indicate this.
20346722 2170 * Return: 1 If xena is quiescence
1da177e4
LT
2171 * 0 If Xena is not quiescence
2172 */
2173
1ee6dd77 2174static int verify_xena_quiescence(struct s2io_nic *sp)
1da177e4 2175{
19a60522 2176 int mode;
1ee6dd77 2177 struct XENA_dev_config __iomem *bar0 = sp->bar0;
19a60522
SS
2178 u64 val64 = readq(&bar0->adapter_status);
2179 mode = s2io_verify_pci_mode(sp);
1da177e4 2180
19a60522
SS
2181 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
2182 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
2183 return 0;
2184 }
2185 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
2186 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
2187 return 0;
2188 }
2189 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
2190 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
2191 return 0;
2192 }
2193 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
2194 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
2195 return 0;
2196 }
2197 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
2198 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
2199 return 0;
2200 }
2201 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
2202 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
2203 return 0;
2204 }
2205 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
2206 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
2207 return 0;
2208 }
2209 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
2210 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
2211 return 0;
1da177e4
LT
2212 }
2213
19a60522
SS
2214 /*
2215 * In PCI 33 mode, the P_PLL is not used, and therefore,
2216 * the the P_PLL_LOCK bit in the adapter_status register will
2217 * not be asserted.
2218 */
2219 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
2220 sp->device_type == XFRAME_II_DEVICE && mode !=
2221 PCI_MODE_PCI_33) {
2222 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
2223 return 0;
2224 }
2225 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
2226 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
2227 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
2228 return 0;
2229 }
2230 return 1;
1da177e4
LT
2231}
2232
2233/**
2234 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
2235 * @sp: Pointer to device specifc structure
20346722 2236 * Description :
1da177e4
LT
2237 * New procedure to clear mac address reading problems on Alpha platforms
2238 *
2239 */
2240
1ee6dd77 2241static void fix_mac_address(struct s2io_nic * sp)
1da177e4 2242{
1ee6dd77 2243 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
2244 u64 val64;
2245 int i = 0;
2246
2247 while (fix_mac[i] != END_SIGN) {
2248 writeq(fix_mac[i++], &bar0->gpio_control);
20346722 2249 udelay(10);
1da177e4
LT
2250 val64 = readq(&bar0->gpio_control);
2251 }
2252}
2253
2254/**
20346722 2255 * start_nic - Turns the device on
1da177e4 2256 * @nic : device private variable.
20346722
K
2257 * Description:
2258 * This function actually turns the device on. Before this function is
2259 * called,all Registers are configured from their reset states
2260 * and shared memory is allocated but the NIC is still quiescent. On
1da177e4
LT
2261 * calling this function, the device interrupts are cleared and the NIC is
2262 * literally switched on by writing into the adapter control register.
20346722 2263 * Return Value:
1da177e4
LT
2264 * SUCCESS on success and -1 on failure.
2265 */
2266
2267static int start_nic(struct s2io_nic *nic)
2268{
1ee6dd77 2269 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
2270 struct net_device *dev = nic->dev;
2271 register u64 val64 = 0;
20346722 2272 u16 subid, i;
1ee6dd77 2273 struct mac_info *mac_control;
1da177e4
LT
2274 struct config_param *config;
2275
2276 mac_control = &nic->mac_control;
2277 config = &nic->config;
2278
2279 /* PRC Initialization and configuration */
2280 for (i = 0; i < config->rx_ring_num; i++) {
20346722 2281 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1da177e4
LT
2282 &bar0->prc_rxd0_n[i]);
2283
2284 val64 = readq(&bar0->prc_ctrl_n[i]);
da6971d8
AR
2285 if (nic->rxd_mode == RXD_MODE_1)
2286 val64 |= PRC_CTRL_RC_ENABLED;
2287 else
2288 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
863c11a9
AR
2289 if (nic->device_type == XFRAME_II_DEVICE)
2290 val64 |= PRC_CTRL_GROUP_READS;
2291 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
2292 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1da177e4
LT
2293 writeq(val64, &bar0->prc_ctrl_n[i]);
2294 }
2295
da6971d8
AR
2296 if (nic->rxd_mode == RXD_MODE_3B) {
2297 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
2298 val64 = readq(&bar0->rx_pa_cfg);
2299 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
2300 writeq(val64, &bar0->rx_pa_cfg);
2301 }
1da177e4 2302
926930b2
SS
2303 if (vlan_tag_strip == 0) {
2304 val64 = readq(&bar0->rx_pa_cfg);
2305 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
2306 writeq(val64, &bar0->rx_pa_cfg);
2307 vlan_strip_flag = 0;
2308 }
2309
20346722 2310 /*
1da177e4
LT
2311 * Enabling MC-RLDRAM. After enabling the device, we timeout
2312 * for around 100ms, which is approximately the time required
2313 * for the device to be ready for operation.
2314 */
2315 val64 = readq(&bar0->mc_rldram_mrs);
2316 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
2317 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
2318 val64 = readq(&bar0->mc_rldram_mrs);
2319
20346722 2320 msleep(100); /* Delay by around 100 ms. */
1da177e4
LT
2321
2322 /* Enabling ECC Protection. */
2323 val64 = readq(&bar0->adapter_control);
2324 val64 &= ~ADAPTER_ECC_EN;
2325 writeq(val64, &bar0->adapter_control);
2326
20346722
K
2327 /*
2328 * Verify if the device is ready to be enabled, if so enable
1da177e4
LT
2329 * it.
2330 */
2331 val64 = readq(&bar0->adapter_status);
19a60522 2332 if (!verify_xena_quiescence(nic)) {
1da177e4
LT
2333 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2334 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2335 (unsigned long long) val64);
2336 return FAILURE;
2337 }
2338
20346722 2339 /*
1da177e4 2340 * With some switches, link might be already up at this point.
20346722
K
2341 * Because of this weird behavior, when we enable laser,
2342 * we may not get link. We need to handle this. We cannot
2343 * figure out which switch is misbehaving. So we are forced to
2344 * make a global change.
1da177e4
LT
2345 */
2346
2347 /* Enabling Laser. */
2348 val64 = readq(&bar0->adapter_control);
2349 val64 |= ADAPTER_EOI_TX_ON;
2350 writeq(val64, &bar0->adapter_control);
2351
c92ca04b
AR
2352 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2353 /*
2354 * Dont see link state interrupts initally on some switches,
2355 * so directly scheduling the link state task here.
2356 */
2357 schedule_work(&nic->set_link_task);
2358 }
1da177e4
LT
2359 /* SXE-002: Initialize link and activity LED */
2360 subid = nic->pdev->subsystem_device;
541ae68f
K
2361 if (((subid & 0xFF) >= 0x07) &&
2362 (nic->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
2363 val64 = readq(&bar0->gpio_control);
2364 val64 |= 0x0000800000000000ULL;
2365 writeq(val64, &bar0->gpio_control);
2366 val64 = 0x0411040400000000ULL;
509a2671 2367 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
2368 }
2369
1da177e4
LT
2370 return SUCCESS;
2371}
fed5eccd
AR
2372/**
2373 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2374 */
1ee6dd77
RB
2375static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2376 TxD *txdlp, int get_off)
fed5eccd 2377{
1ee6dd77 2378 struct s2io_nic *nic = fifo_data->nic;
fed5eccd 2379 struct sk_buff *skb;
1ee6dd77 2380 struct TxD *txds;
fed5eccd
AR
2381 u16 j, frg_cnt;
2382
2383 txds = txdlp;
2fda096d 2384 if (txds->Host_Control == (u64)(long)fifo_data->ufo_in_band_v) {
fed5eccd
AR
2385 pci_unmap_single(nic->pdev, (dma_addr_t)
2386 txds->Buffer_Pointer, sizeof(u64),
2387 PCI_DMA_TODEVICE);
2388 txds++;
2389 }
2390
2391 skb = (struct sk_buff *) ((unsigned long)
2392 txds->Host_Control);
2393 if (!skb) {
1ee6dd77 2394 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2395 return NULL;
2396 }
2397 pci_unmap_single(nic->pdev, (dma_addr_t)
2398 txds->Buffer_Pointer,
2399 skb->len - skb->data_len,
2400 PCI_DMA_TODEVICE);
2401 frg_cnt = skb_shinfo(skb)->nr_frags;
2402 if (frg_cnt) {
2403 txds++;
2404 for (j = 0; j < frg_cnt; j++, txds++) {
2405 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2406 if (!txds->Buffer_Pointer)
2407 break;
6aa20a22 2408 pci_unmap_page(nic->pdev, (dma_addr_t)
fed5eccd
AR
2409 txds->Buffer_Pointer,
2410 frag->size, PCI_DMA_TODEVICE);
2411 }
2412 }
1ee6dd77 2413 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
fed5eccd
AR
2414 return(skb);
2415}
1da177e4 2416
20346722
K
2417/**
2418 * free_tx_buffers - Free all queued Tx buffers
1da177e4 2419 * @nic : device private variable.
20346722 2420 * Description:
1da177e4 2421 * Free all queued Tx buffers.
20346722 2422 * Return Value: void
1da177e4
LT
2423*/
2424
2425static void free_tx_buffers(struct s2io_nic *nic)
2426{
2427 struct net_device *dev = nic->dev;
2428 struct sk_buff *skb;
1ee6dd77 2429 struct TxD *txdp;
1da177e4 2430 int i, j;
1ee6dd77 2431 struct mac_info *mac_control;
1da177e4 2432 struct config_param *config;
fed5eccd 2433 int cnt = 0;
1da177e4
LT
2434
2435 mac_control = &nic->mac_control;
2436 config = &nic->config;
2437
2438 for (i = 0; i < config->tx_fifo_num; i++) {
2fda096d
SR
2439 unsigned long flags;
2440 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags);
1da177e4 2441 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
491976b2
SH
2442 txdp = (struct TxD *) \
2443 mac_control->fifos[i].list_info[j].list_virt_addr;
fed5eccd
AR
2444 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2445 if (skb) {
8a4bdbaa 2446 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 2447 += skb->truesize;
fed5eccd
AR
2448 dev_kfree_skb(skb);
2449 cnt++;
1da177e4 2450 }
1da177e4
LT
2451 }
2452 DBG_PRINT(INTR_DBG,
2453 "%s:forcibly freeing %d skbs on FIFO%d\n",
2454 dev->name, cnt, i);
20346722
K
2455 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2456 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2fda096d 2457 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock, flags);
1da177e4
LT
2458 }
2459}
2460
20346722
K
2461/**
2462 * stop_nic - To stop the nic
1da177e4 2463 * @nic ; device private variable.
20346722
K
2464 * Description:
2465 * This function does exactly the opposite of what the start_nic()
1da177e4
LT
2466 * function does. This function is called to stop the device.
2467 * Return Value:
2468 * void.
2469 */
2470
2471static void stop_nic(struct s2io_nic *nic)
2472{
1ee6dd77 2473 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4 2474 register u64 val64 = 0;
5d3213cc 2475 u16 interruptible;
1ee6dd77 2476 struct mac_info *mac_control;
1da177e4
LT
2477 struct config_param *config;
2478
2479 mac_control = &nic->mac_control;
2480 config = &nic->config;
2481
2482 /* Disable all interrupts */
9caab458 2483 en_dis_err_alarms(nic, ENA_ALL_INTRS, DISABLE_INTRS);
e960fc5c 2484 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 2485 interruptible |= TX_PIC_INTR;
1da177e4
LT
2486 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2487
5d3213cc
AR
2488 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2489 val64 = readq(&bar0->adapter_control);
2490 val64 &= ~(ADAPTER_CNTL_EN);
2491 writeq(val64, &bar0->adapter_control);
1da177e4
LT
2492}
2493
20346722
K
2494/**
2495 * fill_rx_buffers - Allocates the Rx side skbs
1da177e4 2496 * @nic: device private variable
20346722
K
2497 * @ring_no: ring number
2498 * Description:
1da177e4
LT
2499 * The function allocates Rx side skbs and puts the physical
2500 * address of these buffers into the RxD buffer pointers, so that the NIC
2501 * can DMA the received frame into these locations.
2502 * The NIC supports 3 receive modes, viz
2503 * 1. single buffer,
2504 * 2. three buffer and
2505 * 3. Five buffer modes.
20346722
K
2506 * Each mode defines how many fragments the received frame will be split
2507 * up into by the NIC. The frame is split into L3 header, L4 Header,
1da177e4
LT
2508 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2509 * is split into 3 fragments. As of now only single buffer mode is
2510 * supported.
2511 * Return Value:
2512 * SUCCESS on success or an appropriate -ve value on failure.
2513 */
2514
ac1f60db 2515static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1da177e4
LT
2516{
2517 struct net_device *dev = nic->dev;
2518 struct sk_buff *skb;
1ee6dd77 2519 struct RxD_t *rxdp;
1da177e4 2520 int off, off1, size, block_no, block_no1;
1da177e4 2521 u32 alloc_tab = 0;
20346722 2522 u32 alloc_cnt;
1ee6dd77 2523 struct mac_info *mac_control;
1da177e4 2524 struct config_param *config;
20346722 2525 u64 tmp;
1ee6dd77 2526 struct buffAdd *ba;
1da177e4 2527 unsigned long flags;
1ee6dd77 2528 struct RxD_t *first_rxdp = NULL;
363dc367 2529 u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
6d517a27
VP
2530 struct RxD1 *rxdp1;
2531 struct RxD3 *rxdp3;
491abf25 2532 struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
1da177e4
LT
2533
2534 mac_control = &nic->mac_control;
2535 config = &nic->config;
20346722
K
2536 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2537 atomic_read(&nic->rx_bufs_left[ring_no]);
1da177e4 2538
5d3213cc 2539 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
863c11a9 2540 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1da177e4 2541 while (alloc_tab < alloc_cnt) {
20346722 2542 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2543 block_index;
20346722 2544 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1da177e4 2545
da6971d8
AR
2546 rxdp = mac_control->rings[ring_no].
2547 rx_blocks[block_no].rxds[off].virt_addr;
2548
2549 if ((block_no == block_no1) && (off == off1) &&
2550 (rxdp->Host_Control)) {
2551 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2552 dev->name);
1da177e4
LT
2553 DBG_PRINT(INTR_DBG, " info equated\n");
2554 goto end;
2555 }
da6971d8 2556 if (off && (off == rxd_count[nic->rxd_mode])) {
20346722 2557 mac_control->rings[ring_no].rx_curr_put_info.
1da177e4 2558 block_index++;
da6971d8
AR
2559 if (mac_control->rings[ring_no].rx_curr_put_info.
2560 block_index == mac_control->rings[ring_no].
2561 block_count)
2562 mac_control->rings[ring_no].rx_curr_put_info.
2563 block_index = 0;
2564 block_no = mac_control->rings[ring_no].
2565 rx_curr_put_info.block_index;
2566 if (off == rxd_count[nic->rxd_mode])
2567 off = 0;
20346722 2568 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8
AR
2569 offset = off;
2570 rxdp = mac_control->rings[ring_no].
2571 rx_blocks[block_no].block_virt_addr;
1da177e4
LT
2572 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2573 dev->name, rxdp);
2574 }
db874e65
SS
2575 if(!napi) {
2576 spin_lock_irqsave(&nic->put_lock, flags);
2577 mac_control->rings[ring_no].put_pos =
2578 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2579 spin_unlock_irqrestore(&nic->put_lock, flags);
2580 } else {
2581 mac_control->rings[ring_no].put_pos =
2582 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2583 }
da6971d8 2584 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
6d517a27 2585 ((nic->rxd_mode == RXD_MODE_3B) &&
b7b5a128 2586 (rxdp->Control_2 & s2BIT(0)))) {
20346722 2587 mac_control->rings[ring_no].rx_curr_put_info.
da6971d8 2588 offset = off;
1da177e4
LT
2589 goto end;
2590 }
da6971d8
AR
2591 /* calculate size of skb based on ring mode */
2592 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2593 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2594 if (nic->rxd_mode == RXD_MODE_1)
2595 size += NET_IP_ALIGN;
da6971d8 2596 else
6d517a27 2597 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
1da177e4 2598
da6971d8
AR
2599 /* allocate skb */
2600 skb = dev_alloc_skb(size);
2601 if(!skb) {
0c61ed5f
RV
2602 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
2603 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
303bcb4b
K
2604 if (first_rxdp) {
2605 wmb();
2606 first_rxdp->Control_1 |= RXD_OWN_XENA;
2607 }
c53d4945
SH
2608 nic->mac_control.stats_info->sw_stat. \
2609 mem_alloc_fail_cnt++;
da6971d8
AR
2610 return -ENOMEM ;
2611 }
8a4bdbaa 2612 nic->mac_control.stats_info->sw_stat.mem_allocated
491976b2 2613 += skb->truesize;
da6971d8
AR
2614 if (nic->rxd_mode == RXD_MODE_1) {
2615 /* 1 buffer mode - normal operation mode */
6d517a27 2616 rxdp1 = (struct RxD1*)rxdp;
1ee6dd77 2617 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2618 skb_reserve(skb, NET_IP_ALIGN);
6d517a27 2619 rxdp1->Buffer0_ptr = pci_map_single
863c11a9
AR
2620 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2621 PCI_DMA_FROMDEVICE);
491abf25
VP
2622 if( (rxdp1->Buffer0_ptr == 0) ||
2623 (rxdp1->Buffer0_ptr ==
2624 DMA_ERROR_CODE))
2625 goto pci_map_failed;
2626
8a4bdbaa 2627 rxdp->Control_2 =
491976b2 2628 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
da6971d8 2629
6d517a27 2630 } else if (nic->rxd_mode == RXD_MODE_3B) {
da6971d8 2631 /*
6d517a27
VP
2632 * 2 buffer mode -
2633 * 2 buffer mode provides 128
da6971d8 2634 * byte aligned receive buffers.
da6971d8
AR
2635 */
2636
6d517a27 2637 rxdp3 = (struct RxD3*)rxdp;
491976b2 2638 /* save buffer pointers to avoid frequent dma mapping */
6d517a27
VP
2639 Buffer0_ptr = rxdp3->Buffer0_ptr;
2640 Buffer1_ptr = rxdp3->Buffer1_ptr;
1ee6dd77 2641 memset(rxdp, 0, sizeof(struct RxD3));
363dc367 2642 /* restore the buffer pointers for dma sync*/
6d517a27
VP
2643 rxdp3->Buffer0_ptr = Buffer0_ptr;
2644 rxdp3->Buffer1_ptr = Buffer1_ptr;
363dc367 2645
da6971d8
AR
2646 ba = &mac_control->rings[ring_no].ba[block_no][off];
2647 skb_reserve(skb, BUF0_LEN);
2648 tmp = (u64)(unsigned long) skb->data;
2649 tmp += ALIGN_SIZE;
2650 tmp &= ~ALIGN_SIZE;
2651 skb->data = (void *) (unsigned long)tmp;
27a884dc 2652 skb_reset_tail_pointer(skb);
da6971d8 2653
6d517a27
VP
2654 if (!(rxdp3->Buffer0_ptr))
2655 rxdp3->Buffer0_ptr =
75c30b13 2656 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
da6971d8 2657 PCI_DMA_FROMDEVICE);
75c30b13
AR
2658 else
2659 pci_dma_sync_single_for_device(nic->pdev,
6d517a27 2660 (dma_addr_t) rxdp3->Buffer0_ptr,
75c30b13 2661 BUF0_LEN, PCI_DMA_FROMDEVICE);
491abf25
VP
2662 if( (rxdp3->Buffer0_ptr == 0) ||
2663 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
2664 goto pci_map_failed;
2665
da6971d8
AR
2666 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2667 if (nic->rxd_mode == RXD_MODE_3B) {
2668 /* Two buffer mode */
2669
2670 /*
6aa20a22 2671 * Buffer2 will have L3/L4 header plus
da6971d8
AR
2672 * L4 payload
2673 */
6d517a27 2674 rxdp3->Buffer2_ptr = pci_map_single
da6971d8
AR
2675 (nic->pdev, skb->data, dev->mtu + 4,
2676 PCI_DMA_FROMDEVICE);
2677
491abf25
VP
2678 if( (rxdp3->Buffer2_ptr == 0) ||
2679 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
2680 goto pci_map_failed;
2681
2682 rxdp3->Buffer1_ptr =
6aa20a22 2683 pci_map_single(nic->pdev,
75c30b13
AR
2684 ba->ba_1, BUF1_LEN,
2685 PCI_DMA_FROMDEVICE);
491abf25
VP
2686 if( (rxdp3->Buffer1_ptr == 0) ||
2687 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
2688 pci_unmap_single
2689 (nic->pdev,
3e847423 2690 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
2691 dev->mtu + 4,
2692 PCI_DMA_FROMDEVICE);
2693 goto pci_map_failed;
75c30b13 2694 }
da6971d8
AR
2695 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2696 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2697 (dev->mtu + 4);
da6971d8 2698 }
b7b5a128 2699 rxdp->Control_2 |= s2BIT(0);
1da177e4 2700 }
1da177e4 2701 rxdp->Host_Control = (unsigned long) (skb);
303bcb4b
K
2702 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2703 rxdp->Control_1 |= RXD_OWN_XENA;
1da177e4 2704 off++;
da6971d8
AR
2705 if (off == (rxd_count[nic->rxd_mode] + 1))
2706 off = 0;
20346722 2707 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
20346722 2708
da6971d8 2709 rxdp->Control_2 |= SET_RXD_MARKER;
303bcb4b
K
2710 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2711 if (first_rxdp) {
2712 wmb();
2713 first_rxdp->Control_1 |= RXD_OWN_XENA;
2714 }
2715 first_rxdp = rxdp;
2716 }
1da177e4
LT
2717 atomic_inc(&nic->rx_bufs_left[ring_no]);
2718 alloc_tab++;
2719 }
2720
2721 end:
303bcb4b
K
2722 /* Transfer ownership of first descriptor to adapter just before
2723 * exiting. Before that, use memory barrier so that ownership
2724 * and other fields are seen by adapter correctly.
2725 */
2726 if (first_rxdp) {
2727 wmb();
2728 first_rxdp->Control_1 |= RXD_OWN_XENA;
2729 }
2730
1da177e4 2731 return SUCCESS;
491abf25
VP
2732pci_map_failed:
2733 stats->pci_map_fail_cnt++;
2734 stats->mem_freed += skb->truesize;
2735 dev_kfree_skb_irq(skb);
2736 return -ENOMEM;
1da177e4
LT
2737}
2738
da6971d8
AR
2739static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2740{
2741 struct net_device *dev = sp->dev;
2742 int j;
2743 struct sk_buff *skb;
1ee6dd77
RB
2744 struct RxD_t *rxdp;
2745 struct mac_info *mac_control;
2746 struct buffAdd *ba;
6d517a27
VP
2747 struct RxD1 *rxdp1;
2748 struct RxD3 *rxdp3;
da6971d8
AR
2749
2750 mac_control = &sp->mac_control;
2751 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2752 rxdp = mac_control->rings[ring_no].
2753 rx_blocks[blk].rxds[j].virt_addr;
2754 skb = (struct sk_buff *)
2755 ((unsigned long) rxdp->Host_Control);
2756 if (!skb) {
2757 continue;
2758 }
2759 if (sp->rxd_mode == RXD_MODE_1) {
6d517a27 2760 rxdp1 = (struct RxD1*)rxdp;
da6971d8 2761 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2762 rxdp1->Buffer0_ptr,
2763 dev->mtu +
2764 HEADER_ETHERNET_II_802_3_SIZE
2765 + HEADER_802_2_SIZE +
2766 HEADER_SNAP_SIZE,
2767 PCI_DMA_FROMDEVICE);
1ee6dd77 2768 memset(rxdp, 0, sizeof(struct RxD1));
da6971d8 2769 } else if(sp->rxd_mode == RXD_MODE_3B) {
6d517a27 2770 rxdp3 = (struct RxD3*)rxdp;
da6971d8
AR
2771 ba = &mac_control->rings[ring_no].
2772 ba[blk][j];
2773 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2774 rxdp3->Buffer0_ptr,
2775 BUF0_LEN,
da6971d8
AR
2776 PCI_DMA_FROMDEVICE);
2777 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2778 rxdp3->Buffer1_ptr,
2779 BUF1_LEN,
da6971d8
AR
2780 PCI_DMA_FROMDEVICE);
2781 pci_unmap_single(sp->pdev, (dma_addr_t)
6d517a27
VP
2782 rxdp3->Buffer2_ptr,
2783 dev->mtu + 4,
da6971d8 2784 PCI_DMA_FROMDEVICE);
1ee6dd77 2785 memset(rxdp, 0, sizeof(struct RxD3));
da6971d8 2786 }
491976b2 2787 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
da6971d8
AR
2788 dev_kfree_skb(skb);
2789 atomic_dec(&sp->rx_bufs_left[ring_no]);
2790 }
2791}
2792
1da177e4 2793/**
20346722 2794 * free_rx_buffers - Frees all Rx buffers
1da177e4 2795 * @sp: device private variable.
20346722 2796 * Description:
1da177e4
LT
2797 * This function will free all Rx buffers allocated by host.
2798 * Return Value:
2799 * NONE.
2800 */
2801
2802static void free_rx_buffers(struct s2io_nic *sp)
2803{
2804 struct net_device *dev = sp->dev;
da6971d8 2805 int i, blk = 0, buf_cnt = 0;
1ee6dd77 2806 struct mac_info *mac_control;
1da177e4 2807 struct config_param *config;
1da177e4
LT
2808
2809 mac_control = &sp->mac_control;
2810 config = &sp->config;
2811
2812 for (i = 0; i < config->rx_ring_num; i++) {
da6971d8
AR
2813 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2814 free_rxd_blk(sp,i,blk);
1da177e4 2815
20346722
K
2816 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2817 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2818 mac_control->rings[i].rx_curr_put_info.offset = 0;
2819 mac_control->rings[i].rx_curr_get_info.offset = 0;
1da177e4
LT
2820 atomic_set(&sp->rx_bufs_left[i], 0);
2821 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2822 dev->name, buf_cnt, i);
2823 }
2824}
2825
2826/**
2827 * s2io_poll - Rx interrupt handler for NAPI support
bea3348e 2828 * @napi : pointer to the napi structure.
20346722 2829 * @budget : The number of packets that were budgeted to be processed
1da177e4
LT
2830 * during one pass through the 'Poll" function.
2831 * Description:
2832 * Comes into picture only if NAPI support has been incorporated. It does
2833 * the same thing that rx_intr_handler does, but not in a interrupt context
2834 * also It will process only a given number of packets.
2835 * Return value:
2836 * 0 on success and 1 if there are No Rx packets to be processed.
2837 */
2838
bea3348e 2839static int s2io_poll(struct napi_struct *napi, int budget)
1da177e4 2840{
bea3348e
SH
2841 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
2842 struct net_device *dev = nic->dev;
20346722 2843 int pkt_cnt = 0, org_pkts_to_process;
1ee6dd77 2844 struct mac_info *mac_control;
1da177e4 2845 struct config_param *config;
1ee6dd77 2846 struct XENA_dev_config __iomem *bar0 = nic->bar0;
20346722 2847 int i;
1da177e4
LT
2848
2849 mac_control = &nic->mac_control;
2850 config = &nic->config;
2851
bea3348e 2852 nic->pkts_to_process = budget;
20346722 2853 org_pkts_to_process = nic->pkts_to_process;
1da177e4 2854
19a60522
SS
2855 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2856 readl(&bar0->rx_traffic_int);
1da177e4
LT
2857
2858 for (i = 0; i < config->rx_ring_num; i++) {
20346722
K
2859 rx_intr_handler(&mac_control->rings[i]);
2860 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2861 if (!nic->pkts_to_process) {
2862 /* Quota for the current iteration has been met */
2863 goto no_rx;
1da177e4 2864 }
1da177e4 2865 }
1da177e4 2866
bea3348e 2867 netif_rx_complete(dev, napi);
1da177e4
LT
2868
2869 for (i = 0; i < config->rx_ring_num; i++) {
2870 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2871 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2872 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
1da177e4
LT
2873 break;
2874 }
2875 }
2876 /* Re enable the Rx interrupts. */
c92ca04b 2877 writeq(0x0, &bar0->rx_traffic_mask);
19a60522 2878 readl(&bar0->rx_traffic_mask);
bea3348e 2879 return pkt_cnt;
1da177e4 2880
20346722 2881no_rx:
1da177e4
LT
2882 for (i = 0; i < config->rx_ring_num; i++) {
2883 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2884 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2885 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
1da177e4
LT
2886 break;
2887 }
2888 }
bea3348e 2889 return pkt_cnt;
1da177e4 2890}
20346722 2891
b41477f3 2892#ifdef CONFIG_NET_POLL_CONTROLLER
612eff0e 2893/**
b41477f3 2894 * s2io_netpoll - netpoll event handler entry point
612eff0e
BH
2895 * @dev : pointer to the device structure.
2896 * Description:
b41477f3
AR
2897 * This function will be called by upper layer to check for events on the
2898 * interface in situations where interrupts are disabled. It is used for
2899 * specific in-kernel networking tasks, such as remote consoles and kernel
2900 * debugging over the network (example netdump in RedHat).
612eff0e 2901 */
612eff0e
BH
2902static void s2io_netpoll(struct net_device *dev)
2903{
1ee6dd77
RB
2904 struct s2io_nic *nic = dev->priv;
2905 struct mac_info *mac_control;
612eff0e 2906 struct config_param *config;
1ee6dd77 2907 struct XENA_dev_config __iomem *bar0 = nic->bar0;
b41477f3 2908 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
612eff0e
BH
2909 int i;
2910
d796fdb7
LV
2911 if (pci_channel_offline(nic->pdev))
2912 return;
2913
612eff0e
BH
2914 disable_irq(dev->irq);
2915
612eff0e
BH
2916 mac_control = &nic->mac_control;
2917 config = &nic->config;
2918
612eff0e 2919 writeq(val64, &bar0->rx_traffic_int);
b41477f3
AR
2920 writeq(val64, &bar0->tx_traffic_int);
2921
6aa20a22 2922 /* we need to free up the transmitted skbufs or else netpoll will
b41477f3
AR
2923 * run out of skbs and will fail and eventually netpoll application such
2924 * as netdump will fail.
2925 */
2926 for (i = 0; i < config->tx_fifo_num; i++)
2927 tx_intr_handler(&mac_control->fifos[i]);
612eff0e 2928
b41477f3 2929 /* check for received packet and indicate up to network */
612eff0e
BH
2930 for (i = 0; i < config->rx_ring_num; i++)
2931 rx_intr_handler(&mac_control->rings[i]);
2932
2933 for (i = 0; i < config->rx_ring_num; i++) {
2934 if (fill_rx_buffers(nic, i) == -ENOMEM) {
0c61ed5f
RV
2935 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
2936 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
612eff0e
BH
2937 break;
2938 }
2939 }
612eff0e
BH
2940 enable_irq(dev->irq);
2941 return;
2942}
2943#endif
2944
20346722 2945/**
1da177e4
LT
2946 * rx_intr_handler - Rx interrupt handler
2947 * @nic: device private variable.
20346722
K
2948 * Description:
2949 * If the interrupt is because of a received frame or if the
1da177e4 2950 * receive ring contains fresh as yet un-processed frames,this function is
20346722
K
2951 * called. It picks out the RxD at which place the last Rx processing had
2952 * stopped and sends the skb to the OSM's Rx handler and then increments
1da177e4
LT
2953 * the offset.
2954 * Return Value:
2955 * NONE.
2956 */
1ee6dd77 2957static void rx_intr_handler(struct ring_info *ring_data)
1da177e4 2958{
1ee6dd77 2959 struct s2io_nic *nic = ring_data->nic;
1da177e4 2960 struct net_device *dev = (struct net_device *) nic->dev;
da6971d8 2961 int get_block, put_block, put_offset;
1ee6dd77
RB
2962 struct rx_curr_get_info get_info, put_info;
2963 struct RxD_t *rxdp;
1da177e4 2964 struct sk_buff *skb;
20346722 2965 int pkt_cnt = 0;
7d3d0439 2966 int i;
6d517a27
VP
2967 struct RxD1* rxdp1;
2968 struct RxD3* rxdp3;
7d3d0439 2969
7ba013ac 2970 spin_lock(&nic->rx_lock);
7ba013ac 2971
20346722
K
2972 get_info = ring_data->rx_curr_get_info;
2973 get_block = get_info.block_index;
1ee6dd77 2974 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
20346722 2975 put_block = put_info.block_index;
da6971d8 2976 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
db874e65
SS
2977 if (!napi) {
2978 spin_lock(&nic->put_lock);
2979 put_offset = ring_data->put_pos;
2980 spin_unlock(&nic->put_lock);
2981 } else
2982 put_offset = ring_data->put_pos;
2983
da6971d8 2984 while (RXD_IS_UP2DT(rxdp)) {
db874e65
SS
2985 /*
2986 * If your are next to put index then it's
2987 * FIFO full condition
2988 */
da6971d8
AR
2989 if ((get_block == put_block) &&
2990 (get_info.offset + 1) == put_info.offset) {
75c30b13 2991 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
da6971d8
AR
2992 break;
2993 }
20346722
K
2994 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2995 if (skb == NULL) {
2996 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2997 dev->name);
2998 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
7ba013ac 2999 spin_unlock(&nic->rx_lock);
20346722 3000 return;
1da177e4 3001 }
da6971d8 3002 if (nic->rxd_mode == RXD_MODE_1) {
6d517a27 3003 rxdp1 = (struct RxD1*)rxdp;
da6971d8 3004 pci_unmap_single(nic->pdev, (dma_addr_t)
6d517a27
VP
3005 rxdp1->Buffer0_ptr,
3006 dev->mtu +
3007 HEADER_ETHERNET_II_802_3_SIZE +
3008 HEADER_802_2_SIZE +
3009 HEADER_SNAP_SIZE,
3010 PCI_DMA_FROMDEVICE);
da6971d8 3011 } else if (nic->rxd_mode == RXD_MODE_3B) {
6d517a27 3012 rxdp3 = (struct RxD3*)rxdp;
75c30b13 3013 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
6d517a27
VP
3014 rxdp3->Buffer0_ptr,
3015 BUF0_LEN, PCI_DMA_FROMDEVICE);
da6971d8 3016 pci_unmap_single(nic->pdev, (dma_addr_t)
6d517a27
VP
3017 rxdp3->Buffer2_ptr,
3018 dev->mtu + 4,
3019 PCI_DMA_FROMDEVICE);
da6971d8 3020 }
863c11a9 3021 prefetch(skb->data);
20346722
K
3022 rx_osm_handler(ring_data, rxdp);
3023 get_info.offset++;
da6971d8
AR
3024 ring_data->rx_curr_get_info.offset = get_info.offset;
3025 rxdp = ring_data->rx_blocks[get_block].
3026 rxds[get_info.offset].virt_addr;
3027 if (get_info.offset == rxd_count[nic->rxd_mode]) {
20346722 3028 get_info.offset = 0;
da6971d8 3029 ring_data->rx_curr_get_info.offset = get_info.offset;
20346722 3030 get_block++;
da6971d8
AR
3031 if (get_block == ring_data->block_count)
3032 get_block = 0;
3033 ring_data->rx_curr_get_info.block_index = get_block;
20346722
K
3034 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
3035 }
1da177e4 3036
20346722 3037 nic->pkts_to_process -= 1;
db874e65 3038 if ((napi) && (!nic->pkts_to_process))
20346722 3039 break;
20346722 3040 pkt_cnt++;
1da177e4
LT
3041 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
3042 break;
3043 }
7d3d0439
RA
3044 if (nic->lro) {
3045 /* Clear all LRO sessions before exiting */
3046 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 3047 struct lro *lro = &nic->lro0_n[i];
7d3d0439
RA
3048 if (lro->in_use) {
3049 update_L3L4_header(nic, lro);
3050 queue_rx_frame(lro->parent);
3051 clear_lro_session(lro);
3052 }
3053 }
3054 }
3055
7ba013ac 3056 spin_unlock(&nic->rx_lock);
1da177e4 3057}
20346722
K
3058
3059/**
1da177e4
LT
3060 * tx_intr_handler - Transmit interrupt handler
3061 * @nic : device private variable
20346722
K
3062 * Description:
3063 * If an interrupt was raised to indicate DMA complete of the
3064 * Tx packet, this function is called. It identifies the last TxD
3065 * whose buffer was freed and frees all skbs whose data have already
1da177e4
LT
3066 * DMA'ed into the NICs internal memory.
3067 * Return Value:
3068 * NONE
3069 */
3070
1ee6dd77 3071static void tx_intr_handler(struct fifo_info *fifo_data)
1da177e4 3072{
1ee6dd77 3073 struct s2io_nic *nic = fifo_data->nic;
1ee6dd77 3074 struct tx_curr_get_info get_info, put_info;
3a3d5756 3075 struct sk_buff *skb = NULL;
1ee6dd77 3076 struct TxD *txdlp;
3a3d5756 3077 int pkt_cnt = 0;
2fda096d 3078 unsigned long flags = 0;
f9046eb3 3079 u8 err_mask;
1da177e4 3080
2fda096d
SR
3081 if (!spin_trylock_irqsave(&fifo_data->tx_lock, flags))
3082 return;
3083
20346722 3084 get_info = fifo_data->tx_curr_get_info;
1ee6dd77
RB
3085 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
3086 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
20346722
K
3087 list_virt_addr;
3088 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
3089 (get_info.offset != put_info.offset) &&
3090 (txdlp->Host_Control)) {
3091 /* Check for TxD errors */
3092 if (txdlp->Control_1 & TXD_T_CODE) {
3093 unsigned long long err;
3094 err = txdlp->Control_1 & TXD_T_CODE;
bd1034f0
AR
3095 if (err & 0x1) {
3096 nic->mac_control.stats_info->sw_stat.
3097 parity_err_cnt++;
3098 }
491976b2
SH
3099
3100 /* update t_code statistics */
f9046eb3
OH
3101 err_mask = err >> 48;
3102 switch(err_mask) {
491976b2
SH
3103 case 2:
3104 nic->mac_control.stats_info->sw_stat.
3105 tx_buf_abort_cnt++;
3106 break;
3107
3108 case 3:
3109 nic->mac_control.stats_info->sw_stat.
3110 tx_desc_abort_cnt++;
3111 break;
3112
3113 case 7:
3114 nic->mac_control.stats_info->sw_stat.
3115 tx_parity_err_cnt++;
3116 break;
3117
3118 case 10:
3119 nic->mac_control.stats_info->sw_stat.
3120 tx_link_loss_cnt++;
3121 break;
3122
3123 case 15:
3124 nic->mac_control.stats_info->sw_stat.
3125 tx_list_proc_err_cnt++;
3126 break;
3127 }
20346722 3128 }
1da177e4 3129
fed5eccd 3130 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
20346722 3131 if (skb == NULL) {
2fda096d 3132 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
20346722
K
3133 DBG_PRINT(ERR_DBG, "%s: Null skb ",
3134 __FUNCTION__);
3135 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3136 return;
3137 }
3a3d5756 3138 pkt_cnt++;
20346722 3139
20346722 3140 /* Updating the statistics block */
20346722 3141 nic->stats.tx_bytes += skb->len;
491976b2 3142 nic->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
20346722
K
3143 dev_kfree_skb_irq(skb);
3144
3145 get_info.offset++;
863c11a9
AR
3146 if (get_info.offset == get_info.fifo_len + 1)
3147 get_info.offset = 0;
1ee6dd77 3148 txdlp = (struct TxD *) fifo_data->list_info
20346722
K
3149 [get_info.offset].list_virt_addr;
3150 fifo_data->tx_curr_get_info.offset =
3151 get_info.offset;
1da177e4
LT
3152 }
3153
3a3d5756 3154 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
2fda096d
SR
3155
3156 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
1da177e4
LT
3157}
3158
bd1034f0
AR
3159/**
3160 * s2io_mdio_write - Function to write in to MDIO registers
3161 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3162 * @addr : address value
3163 * @value : data value
3164 * @dev : pointer to net_device structure
3165 * Description:
3166 * This function is used to write values to the MDIO registers
3167 * NONE
3168 */
3169static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
3170{
3171 u64 val64 = 0x0;
1ee6dd77
RB
3172 struct s2io_nic *sp = dev->priv;
3173 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3174
3175 //address transaction
3176 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3177 | MDIO_MMD_DEV_ADDR(mmd_type)
3178 | MDIO_MMS_PRT_ADDR(0x0);
3179 writeq(val64, &bar0->mdio_control);
3180 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3181 writeq(val64, &bar0->mdio_control);
3182 udelay(100);
3183
3184 //Data transaction
3185 val64 = 0x0;
3186 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3187 | MDIO_MMD_DEV_ADDR(mmd_type)
3188 | MDIO_MMS_PRT_ADDR(0x0)
3189 | MDIO_MDIO_DATA(value)
3190 | MDIO_OP(MDIO_OP_WRITE_TRANS);
3191 writeq(val64, &bar0->mdio_control);
3192 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3193 writeq(val64, &bar0->mdio_control);
3194 udelay(100);
3195
3196 val64 = 0x0;
3197 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3198 | MDIO_MMD_DEV_ADDR(mmd_type)
3199 | MDIO_MMS_PRT_ADDR(0x0)
3200 | MDIO_OP(MDIO_OP_READ_TRANS);
3201 writeq(val64, &bar0->mdio_control);
3202 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3203 writeq(val64, &bar0->mdio_control);
3204 udelay(100);
3205
3206}
3207
3208/**
3209 * s2io_mdio_read - Function to write in to MDIO registers
3210 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
3211 * @addr : address value
3212 * @dev : pointer to net_device structure
3213 * Description:
3214 * This function is used to read values to the MDIO registers
3215 * NONE
3216 */
3217static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
3218{
3219 u64 val64 = 0x0;
3220 u64 rval64 = 0x0;
1ee6dd77
RB
3221 struct s2io_nic *sp = dev->priv;
3222 struct XENA_dev_config __iomem *bar0 = sp->bar0;
bd1034f0
AR
3223
3224 /* address transaction */
3225 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3226 | MDIO_MMD_DEV_ADDR(mmd_type)
3227 | MDIO_MMS_PRT_ADDR(0x0);
3228 writeq(val64, &bar0->mdio_control);
3229 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3230 writeq(val64, &bar0->mdio_control);
3231 udelay(100);
3232
3233 /* Data transaction */
3234 val64 = 0x0;
3235 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
3236 | MDIO_MMD_DEV_ADDR(mmd_type)
3237 | MDIO_MMS_PRT_ADDR(0x0)
3238 | MDIO_OP(MDIO_OP_READ_TRANS);
3239 writeq(val64, &bar0->mdio_control);
3240 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
3241 writeq(val64, &bar0->mdio_control);
3242 udelay(100);
3243
3244 /* Read the value from regs */
3245 rval64 = readq(&bar0->mdio_control);
3246 rval64 = rval64 & 0xFFFF0000;
3247 rval64 = rval64 >> 16;
3248 return rval64;
3249}
3250/**
3251 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
3252 * @counter : couter value to be updated
3253 * @flag : flag to indicate the status
3254 * @type : counter type
3255 * Description:
3256 * This function is to check the status of the xpak counters value
3257 * NONE
3258 */
3259
3260static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
3261{
3262 u64 mask = 0x3;
3263 u64 val64;
3264 int i;
3265 for(i = 0; i <index; i++)
3266 mask = mask << 0x2;
3267
3268 if(flag > 0)
3269 {
3270 *counter = *counter + 1;
3271 val64 = *regs_stat & mask;
3272 val64 = val64 >> (index * 0x2);
3273 val64 = val64 + 1;
3274 if(val64 == 3)
3275 {
3276 switch(type)
3277 {
3278 case 1:
3279 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3280 "service. Excessive temperatures may "
3281 "result in premature transceiver "
3282 "failure \n");
3283 break;
3284 case 2:
3285 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3286 "service Excessive bias currents may "
3287 "indicate imminent laser diode "
3288 "failure \n");
3289 break;
3290 case 3:
3291 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
3292 "service Excessive laser output "
3293 "power may saturate far-end "
3294 "receiver\n");
3295 break;
3296 default:
3297 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
3298 "type \n");
3299 }
3300 val64 = 0x0;
3301 }
3302 val64 = val64 << (index * 0x2);
3303 *regs_stat = (*regs_stat & (~mask)) | (val64);
3304
3305 } else {
3306 *regs_stat = *regs_stat & (~mask);
3307 }
3308}
3309
3310/**
3311 * s2io_updt_xpak_counter - Function to update the xpak counters
3312 * @dev : pointer to net_device struct
3313 * Description:
3314 * This function is to upate the status of the xpak counters value
3315 * NONE
3316 */
3317static void s2io_updt_xpak_counter(struct net_device *dev)
3318{
3319 u16 flag = 0x0;
3320 u16 type = 0x0;
3321 u16 val16 = 0x0;
3322 u64 val64 = 0x0;
3323 u64 addr = 0x0;
3324
1ee6dd77
RB
3325 struct s2io_nic *sp = dev->priv;
3326 struct stat_block *stat_info = sp->mac_control.stats_info;
bd1034f0
AR
3327
3328 /* Check the communication with the MDIO slave */
3329 addr = 0x0000;
3330 val64 = 0x0;
3331 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3332 if((val64 == 0xFFFF) || (val64 == 0x0000))
3333 {
3334 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3335 "Returned %llx\n", (unsigned long long)val64);
3336 return;
3337 }
3338
3339 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3340 if(val64 != 0x2040)
3341 {
3342 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3343 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3344 (unsigned long long)val64);
3345 return;
3346 }
3347
3348 /* Loading the DOM register to MDIO register */
3349 addr = 0xA100;
3350 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3351 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3352
3353 /* Reading the Alarm flags */
3354 addr = 0xA070;
3355 val64 = 0x0;
3356 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3357
3358 flag = CHECKBIT(val64, 0x7);
3359 type = 1;
3360 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3361 &stat_info->xpak_stat.xpak_regs_stat,
3362 0x0, flag, type);
3363
3364 if(CHECKBIT(val64, 0x6))
3365 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3366
3367 flag = CHECKBIT(val64, 0x3);
3368 type = 2;
3369 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3370 &stat_info->xpak_stat.xpak_regs_stat,
3371 0x2, flag, type);
3372
3373 if(CHECKBIT(val64, 0x2))
3374 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3375
3376 flag = CHECKBIT(val64, 0x1);
3377 type = 3;
3378 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3379 &stat_info->xpak_stat.xpak_regs_stat,
3380 0x4, flag, type);
3381
3382 if(CHECKBIT(val64, 0x0))
3383 stat_info->xpak_stat.alarm_laser_output_power_low++;
3384
3385 /* Reading the Warning flags */
3386 addr = 0xA074;
3387 val64 = 0x0;
3388 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3389
3390 if(CHECKBIT(val64, 0x7))
3391 stat_info->xpak_stat.warn_transceiver_temp_high++;
3392
3393 if(CHECKBIT(val64, 0x6))
3394 stat_info->xpak_stat.warn_transceiver_temp_low++;
3395
3396 if(CHECKBIT(val64, 0x3))
3397 stat_info->xpak_stat.warn_laser_bias_current_high++;
3398
3399 if(CHECKBIT(val64, 0x2))
3400 stat_info->xpak_stat.warn_laser_bias_current_low++;
3401
3402 if(CHECKBIT(val64, 0x1))
3403 stat_info->xpak_stat.warn_laser_output_power_high++;
3404
3405 if(CHECKBIT(val64, 0x0))
3406 stat_info->xpak_stat.warn_laser_output_power_low++;
3407}
3408
20346722 3409/**
1da177e4 3410 * wait_for_cmd_complete - waits for a command to complete.
20346722 3411 * @sp : private member of the device structure, which is a pointer to the
1da177e4 3412 * s2io_nic structure.
20346722
K
3413 * Description: Function that waits for a command to Write into RMAC
3414 * ADDR DATA registers to be completed and returns either success or
3415 * error depending on whether the command was complete or not.
1da177e4
LT
3416 * Return value:
3417 * SUCCESS on success and FAILURE on failure.
3418 */
3419
9fc93a41
SS
3420static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3421 int bit_state)
1da177e4 3422{
9fc93a41 3423 int ret = FAILURE, cnt = 0, delay = 1;
1da177e4
LT
3424 u64 val64;
3425
9fc93a41
SS
3426 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3427 return FAILURE;
3428
3429 do {
c92ca04b 3430 val64 = readq(addr);
9fc93a41
SS
3431 if (bit_state == S2IO_BIT_RESET) {
3432 if (!(val64 & busy_bit)) {
3433 ret = SUCCESS;
3434 break;
3435 }
3436 } else {
3437 if (!(val64 & busy_bit)) {
3438 ret = SUCCESS;
3439 break;
3440 }
1da177e4 3441 }
c92ca04b
AR
3442
3443 if(in_interrupt())
9fc93a41 3444 mdelay(delay);
c92ca04b 3445 else
9fc93a41 3446 msleep(delay);
c92ca04b 3447
9fc93a41
SS
3448 if (++cnt >= 10)
3449 delay = 50;
3450 } while (cnt < 20);
1da177e4
LT
3451 return ret;
3452}
19a60522
SS
3453/*
3454 * check_pci_device_id - Checks if the device id is supported
3455 * @id : device id
3456 * Description: Function to check if the pci device id is supported by driver.
3457 * Return value: Actual device id if supported else PCI_ANY_ID
3458 */
3459static u16 check_pci_device_id(u16 id)
3460{
3461 switch (id) {
3462 case PCI_DEVICE_ID_HERC_WIN:
3463 case PCI_DEVICE_ID_HERC_UNI:
3464 return XFRAME_II_DEVICE;
3465 case PCI_DEVICE_ID_S2IO_UNI:
3466 case PCI_DEVICE_ID_S2IO_WIN:
3467 return XFRAME_I_DEVICE;
3468 default:
3469 return PCI_ANY_ID;
3470 }
3471}
1da177e4 3472
20346722
K
3473/**
3474 * s2io_reset - Resets the card.
1da177e4
LT
3475 * @sp : private member of the device structure.
3476 * Description: Function to Reset the card. This function then also
20346722 3477 * restores the previously saved PCI configuration space registers as
1da177e4
LT
3478 * the card reset also resets the configuration space.
3479 * Return value:
3480 * void.
3481 */
3482
1ee6dd77 3483static void s2io_reset(struct s2io_nic * sp)
1da177e4 3484{
1ee6dd77 3485 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 3486 u64 val64;
5e25b9dd 3487 u16 subid, pci_cmd;
19a60522
SS
3488 int i;
3489 u16 val16;
491976b2
SH
3490 unsigned long long up_cnt, down_cnt, up_time, down_time, reset_cnt;
3491 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt;
3492
19a60522
SS
3493 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3494 __FUNCTION__, sp->dev->name);
1da177e4 3495
0b1f7ebe 3496 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
e960fc5c 3497 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
0b1f7ebe 3498
1da177e4
LT
3499 val64 = SW_RESET_ALL;
3500 writeq(val64, &bar0->sw_reset);
c92ca04b
AR
3501 if (strstr(sp->product_name, "CX4")) {
3502 msleep(750);
3503 }
19a60522
SS
3504 msleep(250);
3505 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
1da177e4 3506
19a60522
SS
3507 /* Restore the PCI state saved during initialization. */
3508 pci_restore_state(sp->pdev);
3509 pci_read_config_word(sp->pdev, 0x2, &val16);
3510 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3511 break;
3512 msleep(200);
3513 }
1da177e4 3514
19a60522
SS
3515 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3516 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3517 }
3518
3519 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3520
3521 s2io_init_pci(sp);
1da177e4 3522
20346722
K
3523 /* Set swapper to enable I/O register access */
3524 s2io_set_swapper(sp);
3525
faa4f796
SH
3526 /* restore mac_addr entries */
3527 do_s2io_restore_unicast_mc(sp);
3528
cc6e7c44
RA
3529 /* Restore the MSIX table entries from local variables */
3530 restore_xmsi_data(sp);
3531
5e25b9dd 3532 /* Clear certain PCI/PCI-X fields after reset */
303bcb4b 3533 if (sp->device_type == XFRAME_II_DEVICE) {
b41477f3 3534 /* Clear "detected parity error" bit */
303bcb4b 3535 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
5e25b9dd 3536
303bcb4b
K
3537 /* Clearing PCIX Ecc status register */
3538 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
5e25b9dd 3539
303bcb4b 3540 /* Clearing PCI_STATUS error reflected here */
b7b5a128 3541 writeq(s2BIT(62), &bar0->txpic_int_reg);
303bcb4b 3542 }
5e25b9dd 3543
20346722
K
3544 /* Reset device statistics maintained by OS */
3545 memset(&sp->stats, 0, sizeof (struct net_device_stats));
8a4bdbaa 3546
491976b2
SH
3547 up_cnt = sp->mac_control.stats_info->sw_stat.link_up_cnt;
3548 down_cnt = sp->mac_control.stats_info->sw_stat.link_down_cnt;
3549 up_time = sp->mac_control.stats_info->sw_stat.link_up_time;
3550 down_time = sp->mac_control.stats_info->sw_stat.link_down_time;
363dc367 3551 reset_cnt = sp->mac_control.stats_info->sw_stat.soft_reset_cnt;
491976b2
SH
3552 mem_alloc_cnt = sp->mac_control.stats_info->sw_stat.mem_allocated;
3553 mem_free_cnt = sp->mac_control.stats_info->sw_stat.mem_freed;
3554 watchdog_cnt = sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt;
3555 /* save link up/down time/cnt, reset/memory/watchdog cnt */
363dc367 3556 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
491976b2
SH
3557 /* restore link up/down time/cnt, reset/memory/watchdog cnt */
3558 sp->mac_control.stats_info->sw_stat.link_up_cnt = up_cnt;
3559 sp->mac_control.stats_info->sw_stat.link_down_cnt = down_cnt;
3560 sp->mac_control.stats_info->sw_stat.link_up_time = up_time;
3561 sp->mac_control.stats_info->sw_stat.link_down_time = down_time;
363dc367 3562 sp->mac_control.stats_info->sw_stat.soft_reset_cnt = reset_cnt;
491976b2
SH
3563 sp->mac_control.stats_info->sw_stat.mem_allocated = mem_alloc_cnt;
3564 sp->mac_control.stats_info->sw_stat.mem_freed = mem_free_cnt;
3565 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt = watchdog_cnt;
20346722 3566
1da177e4
LT
3567 /* SXE-002: Configure link and activity LED to turn it off */
3568 subid = sp->pdev->subsystem_device;
541ae68f
K
3569 if (((subid & 0xFF) >= 0x07) &&
3570 (sp->device_type == XFRAME_I_DEVICE)) {
1da177e4
LT
3571 val64 = readq(&bar0->gpio_control);
3572 val64 |= 0x0000800000000000ULL;
3573 writeq(val64, &bar0->gpio_control);
3574 val64 = 0x0411040400000000ULL;
509a2671 3575 writeq(val64, (void __iomem *)bar0 + 0x2700);
1da177e4
LT
3576 }
3577
541ae68f
K
3578 /*
3579 * Clear spurious ECC interrupts that would have occured on
3580 * XFRAME II cards after reset.
3581 */
3582 if (sp->device_type == XFRAME_II_DEVICE) {
3583 val64 = readq(&bar0->pcc_err_reg);
3584 writeq(val64, &bar0->pcc_err_reg);
3585 }
3586
1da177e4
LT
3587 sp->device_enabled_once = FALSE;
3588}
3589
3590/**
20346722
K
3591 * s2io_set_swapper - to set the swapper controle on the card
3592 * @sp : private member of the device structure,
1da177e4 3593 * pointer to the s2io_nic structure.
20346722 3594 * Description: Function to set the swapper control on the card
1da177e4
LT
3595 * correctly depending on the 'endianness' of the system.
3596 * Return value:
3597 * SUCCESS on success and FAILURE on failure.
3598 */
3599
1ee6dd77 3600static int s2io_set_swapper(struct s2io_nic * sp)
1da177e4
LT
3601{
3602 struct net_device *dev = sp->dev;
1ee6dd77 3603 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
3604 u64 val64, valt, valr;
3605
20346722 3606 /*
1da177e4
LT
3607 * Set proper endian settings and verify the same by reading
3608 * the PIF Feed-back register.
3609 */
3610
3611 val64 = readq(&bar0->pif_rd_swapper_fb);
3612 if (val64 != 0x0123456789ABCDEFULL) {
3613 int i = 0;
3614 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3615 0x8100008181000081ULL, /* FE=1, SE=0 */
3616 0x4200004242000042ULL, /* FE=0, SE=1 */
3617 0}; /* FE=0, SE=0 */
3618
3619 while(i<4) {
3620 writeq(value[i], &bar0->swapper_ctrl);
3621 val64 = readq(&bar0->pif_rd_swapper_fb);
3622 if (val64 == 0x0123456789ABCDEFULL)
3623 break;
3624 i++;
3625 }
3626 if (i == 4) {
3627 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3628 dev->name);
3629 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3630 (unsigned long long) val64);
3631 return FAILURE;
3632 }
3633 valr = value[i];
3634 } else {
3635 valr = readq(&bar0->swapper_ctrl);
3636 }
3637
3638 valt = 0x0123456789ABCDEFULL;
3639 writeq(valt, &bar0->xmsi_address);
3640 val64 = readq(&bar0->xmsi_address);
3641
3642 if(val64 != valt) {
3643 int i = 0;
3644 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3645 0x0081810000818100ULL, /* FE=1, SE=0 */
3646 0x0042420000424200ULL, /* FE=0, SE=1 */
3647 0}; /* FE=0, SE=0 */
3648
3649 while(i<4) {
3650 writeq((value[i] | valr), &bar0->swapper_ctrl);
3651 writeq(valt, &bar0->xmsi_address);
3652 val64 = readq(&bar0->xmsi_address);
3653 if(val64 == valt)
3654 break;
3655 i++;
3656 }
3657 if(i == 4) {
20346722 3658 unsigned long long x = val64;
1da177e4 3659 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
20346722 3660 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
1da177e4
LT
3661 return FAILURE;
3662 }
3663 }
3664 val64 = readq(&bar0->swapper_ctrl);
3665 val64 &= 0xFFFF000000000000ULL;
3666
3667#ifdef __BIG_ENDIAN
20346722
K
3668 /*
3669 * The device by default set to a big endian format, so a
1da177e4
LT
3670 * big endian driver need not set anything.
3671 */
3672 val64 |= (SWAPPER_CTRL_TXP_FE |
3673 SWAPPER_CTRL_TXP_SE |
3674 SWAPPER_CTRL_TXD_R_FE |
3675 SWAPPER_CTRL_TXD_W_FE |
3676 SWAPPER_CTRL_TXF_R_FE |
3677 SWAPPER_CTRL_RXD_R_FE |
3678 SWAPPER_CTRL_RXD_W_FE |
3679 SWAPPER_CTRL_RXF_W_FE |
3680 SWAPPER_CTRL_XMSI_FE |
1da177e4 3681 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
eaae7f72 3682 if (sp->config.intr_type == INTA)
cc6e7c44 3683 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3684 writeq(val64, &bar0->swapper_ctrl);
3685#else
20346722 3686 /*
1da177e4 3687 * Initially we enable all bits to make it accessible by the
20346722 3688 * driver, then we selectively enable only those bits that
1da177e4
LT
3689 * we want to set.
3690 */
3691 val64 |= (SWAPPER_CTRL_TXP_FE |
3692 SWAPPER_CTRL_TXP_SE |
3693 SWAPPER_CTRL_TXD_R_FE |
3694 SWAPPER_CTRL_TXD_R_SE |
3695 SWAPPER_CTRL_TXD_W_FE |
3696 SWAPPER_CTRL_TXD_W_SE |
3697 SWAPPER_CTRL_TXF_R_FE |
3698 SWAPPER_CTRL_RXD_R_FE |
3699 SWAPPER_CTRL_RXD_R_SE |
3700 SWAPPER_CTRL_RXD_W_FE |
3701 SWAPPER_CTRL_RXD_W_SE |
3702 SWAPPER_CTRL_RXF_W_FE |
3703 SWAPPER_CTRL_XMSI_FE |
1da177e4 3704 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
eaae7f72 3705 if (sp->config.intr_type == INTA)
cc6e7c44 3706 val64 |= SWAPPER_CTRL_XMSI_SE;
1da177e4
LT
3707 writeq(val64, &bar0->swapper_ctrl);
3708#endif
3709 val64 = readq(&bar0->swapper_ctrl);
3710
20346722
K
3711 /*
3712 * Verifying if endian settings are accurate by reading a
1da177e4
LT
3713 * feedback register.
3714 */
3715 val64 = readq(&bar0->pif_rd_swapper_fb);
3716 if (val64 != 0x0123456789ABCDEFULL) {
3717 /* Endian settings are incorrect, calls for another dekko. */
3718 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3719 dev->name);
3720 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3721 (unsigned long long) val64);
3722 return FAILURE;
3723 }
3724
3725 return SUCCESS;
3726}
3727
1ee6dd77 3728static int wait_for_msix_trans(struct s2io_nic *nic, int i)
cc6e7c44 3729{
1ee6dd77 3730 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3731 u64 val64;
3732 int ret = 0, cnt = 0;
3733
3734 do {
3735 val64 = readq(&bar0->xmsi_access);
b7b5a128 3736 if (!(val64 & s2BIT(15)))
cc6e7c44
RA
3737 break;
3738 mdelay(1);
3739 cnt++;
3740 } while(cnt < 5);
3741 if (cnt == 5) {
3742 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3743 ret = 1;
3744 }
3745
3746 return ret;
3747}
3748
1ee6dd77 3749static void restore_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3750{
1ee6dd77 3751 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3752 u64 val64;
3753 int i;
3754
75c30b13 3755 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
cc6e7c44
RA
3756 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3757 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
b7b5a128 3758 val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6));
cc6e7c44
RA
3759 writeq(val64, &bar0->xmsi_access);
3760 if (wait_for_msix_trans(nic, i)) {
3761 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3762 continue;
3763 }
3764 }
3765}
3766
1ee6dd77 3767static void store_xmsi_data(struct s2io_nic *nic)
cc6e7c44 3768{
1ee6dd77 3769 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3770 u64 val64, addr, data;
3771 int i;
3772
3773 /* Store and display */
75c30b13 3774 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
b7b5a128 3775 val64 = (s2BIT(15) | vBIT(i, 26, 6));
cc6e7c44
RA
3776 writeq(val64, &bar0->xmsi_access);
3777 if (wait_for_msix_trans(nic, i)) {
3778 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3779 continue;
3780 }
3781 addr = readq(&bar0->xmsi_address);
3782 data = readq(&bar0->xmsi_data);
3783 if (addr && data) {
3784 nic->msix_info[i].addr = addr;
3785 nic->msix_info[i].data = data;
3786 }
3787 }
3788}
3789
1ee6dd77 3790static int s2io_enable_msi_x(struct s2io_nic *nic)
cc6e7c44 3791{
1ee6dd77 3792 struct XENA_dev_config __iomem *bar0 = nic->bar0;
cc6e7c44
RA
3793 u64 tx_mat, rx_mat;
3794 u16 msi_control; /* Temp variable */
3795 int ret, i, j, msix_indx = 1;
3796
bd684e43 3797 nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry),
cc6e7c44 3798 GFP_KERNEL);
bd684e43 3799 if (!nic->entries) {
491976b2
SH
3800 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
3801 __FUNCTION__);
c53d4945 3802 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44
RA
3803 return -ENOMEM;
3804 }
8a4bdbaa 3805 nic->mac_control.stats_info->sw_stat.mem_allocated
491976b2 3806 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
cc6e7c44
RA
3807
3808 nic->s2io_entries =
bd684e43 3809 kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry),
cc6e7c44 3810 GFP_KERNEL);
bd684e43 3811 if (!nic->s2io_entries) {
8a4bdbaa 3812 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
491976b2 3813 __FUNCTION__);
c53d4945 3814 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
cc6e7c44 3815 kfree(nic->entries);
8a4bdbaa 3816 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 3817 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
cc6e7c44
RA
3818 return -ENOMEM;
3819 }
8a4bdbaa 3820 nic->mac_control.stats_info->sw_stat.mem_allocated
491976b2 3821 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
cc6e7c44
RA
3822
3823 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3824 nic->entries[i].entry = i;
3825 nic->s2io_entries[i].entry = i;
3826 nic->s2io_entries[i].arg = NULL;
3827 nic->s2io_entries[i].in_use = 0;
3828 }
3829
3830 tx_mat = readq(&bar0->tx_mat0_n[0]);
3831 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3832 tx_mat |= TX_MAT_SET(i, msix_indx);
3833 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3834 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3835 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3836 }
3837 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3838
8a4bdbaa
SS
3839 rx_mat = readq(&bar0->rx_mat);
3840 for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) {
3841 rx_mat |= RX_MAT_SET(j, msix_indx);
3842 nic->s2io_entries[msix_indx].arg
3843 = &nic->mac_control.rings[j];
3844 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3845 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
cc6e7c44 3846 }
8a4bdbaa 3847 writeq(rx_mat, &bar0->rx_mat);
cc6e7c44 3848
c92ca04b 3849 nic->avail_msix_vectors = 0;
cc6e7c44 3850 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
c92ca04b
AR
3851 /* We fail init if error or we get less vectors than min required */
3852 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3853 nic->avail_msix_vectors = ret;
3854 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3855 }
cc6e7c44
RA
3856 if (ret) {
3857 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3858 kfree(nic->entries);
8a4bdbaa 3859 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 3860 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
cc6e7c44 3861 kfree(nic->s2io_entries);
8a4bdbaa 3862 nic->mac_control.stats_info->sw_stat.mem_freed
491976b2 3863 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
cc6e7c44
RA
3864 nic->entries = NULL;
3865 nic->s2io_entries = NULL;
c92ca04b 3866 nic->avail_msix_vectors = 0;
cc6e7c44
RA
3867 return -ENOMEM;
3868 }
c92ca04b
AR
3869 if (!nic->avail_msix_vectors)
3870 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
cc6e7c44
RA
3871
3872 /*
3873 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3874 * in the herc NIC. (Temp change, needs to be removed later)
3875 */
3876 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3877 msi_control |= 0x1; /* Enable MSI */
3878 pci_write_config_word(nic->pdev, 0x42, msi_control);
3879
3880 return 0;
3881}
3882
8abc4d5b 3883/* Handle software interrupt used during MSI(X) test */
33390a70 3884static irqreturn_t s2io_test_intr(int irq, void *dev_id)
8abc4d5b
SS
3885{
3886 struct s2io_nic *sp = dev_id;
3887
3888 sp->msi_detected = 1;
3889 wake_up(&sp->msi_wait);
3890
3891 return IRQ_HANDLED;
3892}
3893
3894/* Test interrupt path by forcing a a software IRQ */
33390a70 3895static int s2io_test_msi(struct s2io_nic *sp)
8abc4d5b
SS
3896{
3897 struct pci_dev *pdev = sp->pdev;
3898 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3899 int err;
3900 u64 val64, saved64;
3901
3902 err = request_irq(sp->entries[1].vector, s2io_test_intr, 0,
3903 sp->name, sp);
3904 if (err) {
3905 DBG_PRINT(ERR_DBG, "%s: PCI %s: cannot assign irq %d\n",
3906 sp->dev->name, pci_name(pdev), pdev->irq);
3907 return err;
3908 }
3909
3910 init_waitqueue_head (&sp->msi_wait);
3911 sp->msi_detected = 0;
3912
3913 saved64 = val64 = readq(&bar0->scheduled_int_ctrl);
3914 val64 |= SCHED_INT_CTRL_ONE_SHOT;
3915 val64 |= SCHED_INT_CTRL_TIMER_EN;
3916 val64 |= SCHED_INT_CTRL_INT2MSI(1);
3917 writeq(val64, &bar0->scheduled_int_ctrl);
3918
3919 wait_event_timeout(sp->msi_wait, sp->msi_detected, HZ/10);
3920
3921 if (!sp->msi_detected) {
3922 /* MSI(X) test failed, go back to INTx mode */
2450022a 3923 DBG_PRINT(ERR_DBG, "%s: PCI %s: No interrupt was generated "
8abc4d5b
SS
3924 "using MSI(X) during test\n", sp->dev->name,
3925 pci_name(pdev));
3926
3927 err = -EOPNOTSUPP;
3928 }
3929
3930 free_irq(sp->entries[1].vector, sp);
3931
3932 writeq(saved64, &bar0->scheduled_int_ctrl);
3933
3934 return err;
3935}
18b2b7bd
SH
3936
3937static void remove_msix_isr(struct s2io_nic *sp)
3938{
3939 int i;
3940 u16 msi_control;
3941
3942 for (i = 0; i < MAX_REQUESTED_MSI_X; i++) {
3943 if (sp->s2io_entries[i].in_use ==
3944 MSIX_REGISTERED_SUCCESS) {
3945 int vector = sp->entries[i].vector;
3946 void *arg = sp->s2io_entries[i].arg;
3947 free_irq(vector, arg);
3948 }
3949 }
3950
3951 kfree(sp->entries);
3952 kfree(sp->s2io_entries);
3953 sp->entries = NULL;
3954 sp->s2io_entries = NULL;
3955
3956 pci_read_config_word(sp->pdev, 0x42, &msi_control);
3957 msi_control &= 0xFFFE; /* Disable MSI */
3958 pci_write_config_word(sp->pdev, 0x42, msi_control);
3959
3960 pci_disable_msix(sp->pdev);
3961}
3962
3963static void remove_inta_isr(struct s2io_nic *sp)
3964{
3965 struct net_device *dev = sp->dev;
3966
3967 free_irq(sp->pdev->irq, dev);
3968}
3969
1da177e4
LT
3970/* ********************************************************* *
3971 * Functions defined below concern the OS part of the driver *
3972 * ********************************************************* */
3973
20346722 3974/**
1da177e4
LT
3975 * s2io_open - open entry point of the driver
3976 * @dev : pointer to the device structure.
3977 * Description:
3978 * This function is the open entry point of the driver. It mainly calls a
3979 * function to allocate Rx buffers and inserts them into the buffer
20346722 3980 * descriptors and then enables the Rx part of the NIC.
1da177e4
LT
3981 * Return value:
3982 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3983 * file on failure.
3984 */
3985
ac1f60db 3986static int s2io_open(struct net_device *dev)
1da177e4 3987{
1ee6dd77 3988 struct s2io_nic *sp = dev->priv;
1da177e4
LT
3989 int err = 0;
3990
20346722
K
3991 /*
3992 * Make sure you have link off by default every time
1da177e4
LT
3993 * Nic is initialized
3994 */
3995 netif_carrier_off(dev);
0b1f7ebe 3996 sp->last_link_state = 0;
1da177e4 3997
eaae7f72 3998 if (sp->config.intr_type == MSI_X) {
8abc4d5b
SS
3999 int ret = s2io_enable_msi_x(sp);
4000
4001 if (!ret) {
8abc4d5b 4002 ret = s2io_test_msi(sp);
8abc4d5b 4003 /* rollback MSI-X, will re-enable during add_isr() */
18b2b7bd 4004 remove_msix_isr(sp);
8abc4d5b
SS
4005 }
4006 if (ret) {
4007
4008 DBG_PRINT(ERR_DBG,
4009 "%s: MSI-X requested but failed to enable\n",
4010 dev->name);
eaae7f72 4011 sp->config.intr_type = INTA;
8abc4d5b
SS
4012 }
4013 }
4014
c77dd43e 4015 /* NAPI doesn't work well with MSI(X) */
eaae7f72 4016 if (sp->config.intr_type != INTA) {
c77dd43e
SS
4017 if(sp->config.napi)
4018 sp->config.napi = 0;
4019 }
4020
1da177e4 4021 /* Initialize H/W and enable interrupts */
c92ca04b
AR
4022 err = s2io_card_up(sp);
4023 if (err) {
1da177e4
LT
4024 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4025 dev->name);
e6a8fee2 4026 goto hw_init_failed;
1da177e4
LT
4027 }
4028
2fd37688 4029 if (do_s2io_prog_unicast(dev, dev->dev_addr) == FAILURE) {
1da177e4 4030 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
e6a8fee2 4031 s2io_card_down(sp);
20346722 4032 err = -ENODEV;
e6a8fee2 4033 goto hw_init_failed;
1da177e4 4034 }
3a3d5756 4035 s2io_start_all_tx_queue(sp);
1da177e4 4036 return 0;
20346722 4037
20346722 4038hw_init_failed:
eaae7f72 4039 if (sp->config.intr_type == MSI_X) {
491976b2 4040 if (sp->entries) {
cc6e7c44 4041 kfree(sp->entries);
8a4bdbaa 4042 sp->mac_control.stats_info->sw_stat.mem_freed
491976b2
SH
4043 += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
4044 }
4045 if (sp->s2io_entries) {
cc6e7c44 4046 kfree(sp->s2io_entries);
8a4bdbaa 4047 sp->mac_control.stats_info->sw_stat.mem_freed
491976b2
SH
4048 += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
4049 }
cc6e7c44 4050 }
20346722 4051 return err;
1da177e4
LT
4052}
4053
4054/**
4055 * s2io_close -close entry point of the driver
4056 * @dev : device pointer.
4057 * Description:
4058 * This is the stop entry point of the driver. It needs to undo exactly
4059 * whatever was done by the open entry point,thus it's usually referred to
4060 * as the close function.Among other things this function mainly stops the
4061 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
4062 * Return value:
4063 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4064 * file on failure.
4065 */
4066
ac1f60db 4067static int s2io_close(struct net_device *dev)
1da177e4 4068{
1ee6dd77 4069 struct s2io_nic *sp = dev->priv;
faa4f796
SH
4070 struct config_param *config = &sp->config;
4071 u64 tmp64;
4072 int offset;
cc6e7c44 4073
9f74ffde
SH
4074 /* Return if the device is already closed *
4075 * Can happen when s2io_card_up failed in change_mtu *
4076 */
4077 if (!is_s2io_card_up(sp))
4078 return 0;
4079
3a3d5756 4080 s2io_stop_all_tx_queue(sp);
faa4f796
SH
4081 /* delete all populated mac entries */
4082 for (offset = 1; offset < config->max_mc_addr; offset++) {
4083 tmp64 = do_s2io_read_unicast_mc(sp, offset);
4084 if (tmp64 != S2IO_DISABLE_MAC_ENTRY)
4085 do_s2io_delete_unicast_mc(sp, tmp64);
4086 }
4087
1da177e4 4088 /* Reset card, kill tasklet and free Tx and Rx buffers. */
e6a8fee2 4089 s2io_card_down(sp);
cc6e7c44 4090
1da177e4
LT
4091 return 0;
4092}
4093
4094/**
4095 * s2io_xmit - Tx entry point of te driver
4096 * @skb : the socket buffer containing the Tx data.
4097 * @dev : device pointer.
4098 * Description :
4099 * This function is the Tx entry point of the driver. S2IO NIC supports
4100 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
4101 * NOTE: when device cant queue the pkt,just the trans_start variable will
4102 * not be upadted.
4103 * Return value:
4104 * 0 on success & 1 on failure.
4105 */
4106
ac1f60db 4107static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 4108{
1ee6dd77 4109 struct s2io_nic *sp = dev->priv;
1da177e4
LT
4110 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
4111 register u64 val64;
1ee6dd77
RB
4112 struct TxD *txdp;
4113 struct TxFIFO_element __iomem *tx_fifo;
2fda096d 4114 unsigned long flags = 0;
be3a6b02 4115 u16 vlan_tag = 0;
2fda096d 4116 struct fifo_info *fifo = NULL;
1ee6dd77 4117 struct mac_info *mac_control;
1da177e4 4118 struct config_param *config;
6cfc482b 4119 int do_spin_lock = 1;
75c30b13 4120 int offload_type;
6cfc482b 4121 int enable_per_list_interrupt = 0;
491abf25 4122 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
1da177e4
LT
4123
4124 mac_control = &sp->mac_control;
4125 config = &sp->config;
4126
20346722 4127 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
491976b2
SH
4128
4129 if (unlikely(skb->len <= 0)) {
4130 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
4131 dev_kfree_skb_any(skb);
4132 return 0;
2fda096d 4133 }
491976b2 4134
92b84437 4135 if (!is_s2io_card_up(sp)) {
20346722 4136 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
1da177e4 4137 dev->name);
20346722
K
4138 dev_kfree_skb(skb);
4139 return 0;
1da177e4
LT
4140 }
4141
4142 queue = 0;
3a3d5756 4143 if (sp->vlgrp && vlan_tx_tag_present(skb))
be3a6b02 4144 vlan_tag = vlan_tx_tag_get(skb);
6cfc482b
SH
4145 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4146 if (skb->protocol == htons(ETH_P_IP)) {
4147 struct iphdr *ip;
4148 struct tcphdr *th;
4149 ip = ip_hdr(skb);
4150
4151 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4152 th = (struct tcphdr *)(((unsigned char *)ip) +
4153 ip->ihl*4);
4154
4155 if (ip->protocol == IPPROTO_TCP) {
4156 queue_len = sp->total_tcp_fifos;
4157 queue = (ntohs(th->source) +
4158 ntohs(th->dest)) &
4159 sp->fifo_selector[queue_len - 1];
4160 if (queue >= queue_len)
4161 queue = queue_len - 1;
4162 } else if (ip->protocol == IPPROTO_UDP) {
4163 queue_len = sp->total_udp_fifos;
4164 queue = (ntohs(th->source) +
4165 ntohs(th->dest)) &
4166 sp->fifo_selector[queue_len - 1];
4167 if (queue >= queue_len)
4168 queue = queue_len - 1;
4169 queue += sp->udp_fifo_idx;
4170 if (skb->len > 1024)
4171 enable_per_list_interrupt = 1;
4172 do_spin_lock = 0;
4173 }
4174 }
4175 }
4176 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4177 /* get fifo number based on skb->priority value */
4178 queue = config->fifo_mapping
4179 [skb->priority & (MAX_TX_FIFOS - 1)];
4180 fifo = &mac_control->fifos[queue];
3a3d5756 4181
6cfc482b
SH
4182 if (do_spin_lock)
4183 spin_lock_irqsave(&fifo->tx_lock, flags);
4184 else {
4185 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4186 return NETDEV_TX_LOCKED;
4187 }
be3a6b02 4188
3a3d5756
SH
4189#ifdef CONFIG_NETDEVICES_MULTIQUEUE
4190 if (sp->config.multiq) {
4191 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4192 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4193 return NETDEV_TX_BUSY;
4194 }
4195 } else
4196#endif
4197 if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4198 if (netif_queue_stopped(dev)) {
4199 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4200 return NETDEV_TX_BUSY;
4201 }
4202 }
4203
2fda096d
SR
4204 put_off = (u16) fifo->tx_curr_put_info.offset;
4205 get_off = (u16) fifo->tx_curr_get_info.offset;
4206 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
20346722 4207
2fda096d 4208 queue_len = fifo->tx_curr_put_info.fifo_len + 1;
1da177e4 4209 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9
AR
4210 if (txdp->Host_Control ||
4211 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
776bd20f 4212 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3a3d5756 4213 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4214 dev_kfree_skb(skb);
2fda096d 4215 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
4216 return 0;
4217 }
0b1f7ebe 4218
75c30b13 4219 offload_type = s2io_offload_type(skb);
75c30b13 4220 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1da177e4 4221 txdp->Control_1 |= TXD_TCP_LSO_EN;
75c30b13 4222 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
1da177e4 4223 }
84fa7933 4224 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1da177e4
LT
4225 txdp->Control_2 |=
4226 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
4227 TXD_TX_CKO_UDP_EN);
4228 }
fed5eccd
AR
4229 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4230 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2fda096d 4231 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
6cfc482b
SH
4232 if (enable_per_list_interrupt)
4233 if (put_off & (queue_len >> 5))
4234 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
3a3d5756 4235 if (vlan_tag) {
be3a6b02
K
4236 txdp->Control_2 |= TXD_VLAN_ENABLE;
4237 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4238 }
4239
fed5eccd 4240 frg_len = skb->len - skb->data_len;
75c30b13 4241 if (offload_type == SKB_GSO_UDP) {
fed5eccd
AR
4242 int ufo_size;
4243
75c30b13 4244 ufo_size = s2io_udp_mss(skb);
fed5eccd
AR
4245 ufo_size &= ~7;
4246 txdp->Control_1 |= TXD_UFO_EN;
4247 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4248 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4249#ifdef __BIG_ENDIAN
2fda096d 4250 fifo->ufo_in_band_v[put_off] =
fed5eccd
AR
4251 (u64)skb_shinfo(skb)->ip6_frag_id;
4252#else
2fda096d 4253 fifo->ufo_in_band_v[put_off] =
fed5eccd
AR
4254 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
4255#endif
2fda096d 4256 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
fed5eccd 4257 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
2fda096d 4258 fifo->ufo_in_band_v,
fed5eccd 4259 sizeof(u64), PCI_DMA_TODEVICE);
491abf25
VP
4260 if((txdp->Buffer_Pointer == 0) ||
4261 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4262 goto pci_map_failed;
fed5eccd 4263 txdp++;
fed5eccd 4264 }
1da177e4 4265
fed5eccd
AR
4266 txdp->Buffer_Pointer = pci_map_single
4267 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
491abf25
VP
4268 if((txdp->Buffer_Pointer == 0) ||
4269 (txdp->Buffer_Pointer == DMA_ERROR_CODE))
4270 goto pci_map_failed;
4271
fed5eccd
AR
4272 txdp->Host_Control = (unsigned long) skb;
4273 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
75c30b13 4274 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4275 txdp->Control_1 |= TXD_UFO_EN;
4276
4277 frg_cnt = skb_shinfo(skb)->nr_frags;
1da177e4
LT
4278 /* For fragmented SKB. */
4279 for (i = 0; i < frg_cnt; i++) {
4280 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0b1f7ebe
K
4281 /* A '0' length fragment will be ignored */
4282 if (!frag->size)
4283 continue;
1da177e4
LT
4284 txdp++;
4285 txdp->Buffer_Pointer = (u64) pci_map_page
4286 (sp->pdev, frag->page, frag->page_offset,
4287 frag->size, PCI_DMA_TODEVICE);
efd51b5c 4288 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
75c30b13 4289 if (offload_type == SKB_GSO_UDP)
fed5eccd 4290 txdp->Control_1 |= TXD_UFO_EN;
1da177e4
LT
4291 }
4292 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
4293
75c30b13 4294 if (offload_type == SKB_GSO_UDP)
fed5eccd
AR
4295 frg_cnt++; /* as Txd0 was used for inband header */
4296
1da177e4 4297 tx_fifo = mac_control->tx_FIFO_start[queue];
2fda096d 4298 val64 = fifo->list_info[put_off].list_phy_addr;
1da177e4
LT
4299 writeq(val64, &tx_fifo->TxDL_Pointer);
4300
4301 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
4302 TX_FIFO_LAST_LIST);
75c30b13 4303 if (offload_type)
fed5eccd 4304 val64 |= TX_FIFO_SPECIAL_FUNC;
75c30b13 4305
1da177e4
LT
4306 writeq(val64, &tx_fifo->List_Control);
4307
303bcb4b
K
4308 mmiowb();
4309
1da177e4 4310 put_off++;
2fda096d 4311 if (put_off == fifo->tx_curr_put_info.fifo_len + 1)
863c11a9 4312 put_off = 0;
2fda096d 4313 fifo->tx_curr_put_info.offset = put_off;
1da177e4
LT
4314
4315 /* Avoid "put" pointer going beyond "get" pointer */
863c11a9 4316 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
bd1034f0 4317 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
1da177e4
LT
4318 DBG_PRINT(TX_DBG,
4319 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4320 put_off, get_off);
3a3d5756 4321 s2io_stop_tx_queue(sp, fifo->fifo_no);
1da177e4 4322 }
491976b2 4323 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
1da177e4 4324 dev->trans_start = jiffies;
2fda096d 4325 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4 4326
491abf25
VP
4327 return 0;
4328pci_map_failed:
4329 stats->pci_map_fail_cnt++;
3a3d5756 4330 s2io_stop_tx_queue(sp, fifo->fifo_no);
491abf25
VP
4331 stats->mem_freed += skb->truesize;
4332 dev_kfree_skb(skb);
2fda096d 4333 spin_unlock_irqrestore(&fifo->tx_lock, flags);
1da177e4
LT
4334 return 0;
4335}
4336
25fff88e
K
4337static void
4338s2io_alarm_handle(unsigned long data)
4339{
1ee6dd77 4340 struct s2io_nic *sp = (struct s2io_nic *)data;
8116f3cf 4341 struct net_device *dev = sp->dev;
25fff88e 4342
8116f3cf 4343 s2io_handle_errors(dev);
25fff88e
K
4344 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
4345}
4346
1ee6dd77 4347static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
75c30b13
AR
4348{
4349 int rxb_size, level;
4350
4351 if (!sp->lro) {
4352 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
4353 level = rx_buffer_level(sp, rxb_size, rng_n);
4354
4355 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4356 int ret;
4357 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4358 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4359 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
0c61ed5f 4360 DBG_PRINT(INFO_DBG, "Out of memory in %s",
75c30b13
AR
4361 __FUNCTION__);
4362 clear_bit(0, (&sp->tasklet_status));
4363 return -1;
4364 }
4365 clear_bit(0, (&sp->tasklet_status));
4366 } else if (level == LOW)
4367 tasklet_schedule(&sp->task);
4368
4369 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
0c61ed5f
RV
4370 DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
4371 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
75c30b13
AR
4372 }
4373 return 0;
4374}
4375
7d12e780 4376static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
cc6e7c44 4377{
1ee6dd77
RB
4378 struct ring_info *ring = (struct ring_info *)dev_id;
4379 struct s2io_nic *sp = ring->nic;
cc6e7c44 4380
596c5c97 4381 if (!is_s2io_card_up(sp))
92b84437 4382 return IRQ_HANDLED;
92b84437 4383
75c30b13
AR
4384 rx_intr_handler(ring);
4385 s2io_chk_rx_buffers(sp, ring->ring_no);
7d3d0439 4386
cc6e7c44
RA
4387 return IRQ_HANDLED;
4388}
4389
7d12e780 4390static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
cc6e7c44 4391{
1ee6dd77
RB
4392 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4393 struct s2io_nic *sp = fifo->nic;
cc6e7c44 4394
596c5c97 4395 if (!is_s2io_card_up(sp))
92b84437 4396 return IRQ_HANDLED;
92b84437 4397
cc6e7c44 4398 tx_intr_handler(fifo);
cc6e7c44
RA
4399 return IRQ_HANDLED;
4400}
1ee6dd77 4401static void s2io_txpic_intr_handle(struct s2io_nic *sp)
a371a07d 4402{
1ee6dd77 4403 struct XENA_dev_config __iomem *bar0 = sp->bar0;
a371a07d
K
4404 u64 val64;
4405
4406 val64 = readq(&bar0->pic_int_status);
4407 if (val64 & PIC_INT_GPIO) {
4408 val64 = readq(&bar0->gpio_int_reg);
4409 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4410 (val64 & GPIO_INT_REG_LINK_UP)) {
c92ca04b
AR
4411 /*
4412 * This is unstable state so clear both up/down
4413 * interrupt and adapter to re-evaluate the link state.
4414 */
a371a07d
K
4415 val64 |= GPIO_INT_REG_LINK_DOWN;
4416 val64 |= GPIO_INT_REG_LINK_UP;
4417 writeq(val64, &bar0->gpio_int_reg);
a371a07d 4418 val64 = readq(&bar0->gpio_int_mask);
c92ca04b
AR
4419 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4420 GPIO_INT_MASK_LINK_DOWN);
a371a07d 4421 writeq(val64, &bar0->gpio_int_mask);
a371a07d 4422 }
c92ca04b
AR
4423 else if (val64 & GPIO_INT_REG_LINK_UP) {
4424 val64 = readq(&bar0->adapter_status);
c92ca04b 4425 /* Enable Adapter */
19a60522
SS
4426 val64 = readq(&bar0->adapter_control);
4427 val64 |= ADAPTER_CNTL_EN;
4428 writeq(val64, &bar0->adapter_control);
4429 val64 |= ADAPTER_LED_ON;
4430 writeq(val64, &bar0->adapter_control);
4431 if (!sp->device_enabled_once)
4432 sp->device_enabled_once = 1;
c92ca04b 4433
19a60522
SS
4434 s2io_link(sp, LINK_UP);
4435 /*
4436 * unmask link down interrupt and mask link-up
4437 * intr
4438 */
4439 val64 = readq(&bar0->gpio_int_mask);
4440 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4441 val64 |= GPIO_INT_MASK_LINK_UP;
4442 writeq(val64, &bar0->gpio_int_mask);
c92ca04b 4443
c92ca04b
AR
4444 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4445 val64 = readq(&bar0->adapter_status);
19a60522
SS
4446 s2io_link(sp, LINK_DOWN);
4447 /* Link is down so unmaks link up interrupt */
4448 val64 = readq(&bar0->gpio_int_mask);
4449 val64 &= ~GPIO_INT_MASK_LINK_UP;
4450 val64 |= GPIO_INT_MASK_LINK_DOWN;
4451 writeq(val64, &bar0->gpio_int_mask);
ac1f90d6
SS
4452
4453 /* turn off LED */
4454 val64 = readq(&bar0->adapter_control);
4455 val64 = val64 &(~ADAPTER_LED_ON);
4456 writeq(val64, &bar0->adapter_control);
a371a07d
K
4457 }
4458 }
c92ca04b 4459 val64 = readq(&bar0->gpio_int_mask);
a371a07d
K
4460}
4461
8116f3cf
SS
4462/**
4463 * do_s2io_chk_alarm_bit - Check for alarm and incrment the counter
4464 * @value: alarm bits
4465 * @addr: address value
4466 * @cnt: counter variable
4467 * Description: Check for alarm and increment the counter
4468 * Return Value:
4469 * 1 - if alarm bit set
4470 * 0 - if alarm bit is not set
4471 */
43b7c451 4472static int do_s2io_chk_alarm_bit(u64 value, void __iomem * addr,
8116f3cf
SS
4473 unsigned long long *cnt)
4474{
4475 u64 val64;
4476 val64 = readq(addr);
4477 if ( val64 & value ) {
4478 writeq(val64, addr);
4479 (*cnt)++;
4480 return 1;
4481 }
4482 return 0;
4483
4484}
4485
4486/**
4487 * s2io_handle_errors - Xframe error indication handler
4488 * @nic: device private variable
4489 * Description: Handle alarms such as loss of link, single or
4490 * double ECC errors, critical and serious errors.
4491 * Return Value:
4492 * NONE
4493 */
4494static void s2io_handle_errors(void * dev_id)
4495{
4496 struct net_device *dev = (struct net_device *) dev_id;
4497 struct s2io_nic *sp = dev->priv;
4498 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4499 u64 temp64 = 0,val64=0;
4500 int i = 0;
4501
4502 struct swStat *sw_stat = &sp->mac_control.stats_info->sw_stat;
4503 struct xpakStat *stats = &sp->mac_control.stats_info->xpak_stat;
4504
92b84437 4505 if (!is_s2io_card_up(sp))
8116f3cf
SS
4506 return;
4507
4508 if (pci_channel_offline(sp->pdev))
4509 return;
4510
4511 memset(&sw_stat->ring_full_cnt, 0,
4512 sizeof(sw_stat->ring_full_cnt));
4513
4514 /* Handling the XPAK counters update */
4515 if(stats->xpak_timer_count < 72000) {
4516 /* waiting for an hour */
4517 stats->xpak_timer_count++;
4518 } else {
4519 s2io_updt_xpak_counter(dev);
4520 /* reset the count to zero */
4521 stats->xpak_timer_count = 0;
4522 }
4523
4524 /* Handling link status change error Intr */
4525 if (s2io_link_fault_indication(sp) == MAC_RMAC_ERR_TIMER) {
4526 val64 = readq(&bar0->mac_rmac_err_reg);
4527 writeq(val64, &bar0->mac_rmac_err_reg);
4528 if (val64 & RMAC_LINK_STATE_CHANGE_INT)
4529 schedule_work(&sp->set_link_task);
4530 }
4531
4532 /* In case of a serious error, the device will be Reset. */
4533 if (do_s2io_chk_alarm_bit(SERR_SOURCE_ANY, &bar0->serr_source,
4534 &sw_stat->serious_err_cnt))
4535 goto reset;
4536
4537 /* Check for data parity error */
4538 if (do_s2io_chk_alarm_bit(GPIO_INT_REG_DP_ERR_INT, &bar0->gpio_int_reg,
4539 &sw_stat->parity_err_cnt))
4540 goto reset;
4541
4542 /* Check for ring full counter */
4543 if (sp->device_type == XFRAME_II_DEVICE) {
4544 val64 = readq(&bar0->ring_bump_counter1);
4545 for (i=0; i<4; i++) {
4546 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4547 temp64 >>= 64 - ((i+1)*16);
4548 sw_stat->ring_full_cnt[i] += temp64;
4549 }
4550
4551 val64 = readq(&bar0->ring_bump_counter2);
4552 for (i=0; i<4; i++) {
4553 temp64 = ( val64 & vBIT(0xFFFF,(i*16),16));
4554 temp64 >>= 64 - ((i+1)*16);
4555 sw_stat->ring_full_cnt[i+4] += temp64;
4556 }
4557 }
4558
4559 val64 = readq(&bar0->txdma_int_status);
4560 /*check for pfc_err*/
4561 if (val64 & TXDMA_PFC_INT) {
4562 if (do_s2io_chk_alarm_bit(PFC_ECC_DB_ERR | PFC_SM_ERR_ALARM|
4563 PFC_MISC_0_ERR | PFC_MISC_1_ERR|
4564 PFC_PCIX_ERR, &bar0->pfc_err_reg,
4565 &sw_stat->pfc_err_cnt))
4566 goto reset;
4567 do_s2io_chk_alarm_bit(PFC_ECC_SG_ERR, &bar0->pfc_err_reg,
4568 &sw_stat->pfc_err_cnt);
4569 }
4570
4571 /*check for tda_err*/
4572 if (val64 & TXDMA_TDA_INT) {
4573 if(do_s2io_chk_alarm_bit(TDA_Fn_ECC_DB_ERR | TDA_SM0_ERR_ALARM |
4574 TDA_SM1_ERR_ALARM, &bar0->tda_err_reg,
4575 &sw_stat->tda_err_cnt))
4576 goto reset;
4577 do_s2io_chk_alarm_bit(TDA_Fn_ECC_SG_ERR | TDA_PCIX_ERR,
4578 &bar0->tda_err_reg, &sw_stat->tda_err_cnt);
4579 }
4580 /*check for pcc_err*/
4581 if (val64 & TXDMA_PCC_INT) {
4582 if (do_s2io_chk_alarm_bit(PCC_SM_ERR_ALARM | PCC_WR_ERR_ALARM
4583 | PCC_N_SERR | PCC_6_COF_OV_ERR
4584 | PCC_7_COF_OV_ERR | PCC_6_LSO_OV_ERR
4585 | PCC_7_LSO_OV_ERR | PCC_FB_ECC_DB_ERR
4586 | PCC_TXB_ECC_DB_ERR, &bar0->pcc_err_reg,
4587 &sw_stat->pcc_err_cnt))
4588 goto reset;
4589 do_s2io_chk_alarm_bit(PCC_FB_ECC_SG_ERR | PCC_TXB_ECC_SG_ERR,
4590 &bar0->pcc_err_reg, &sw_stat->pcc_err_cnt);
4591 }
4592
4593 /*check for tti_err*/
4594 if (val64 & TXDMA_TTI_INT) {
4595 if (do_s2io_chk_alarm_bit(TTI_SM_ERR_ALARM, &bar0->tti_err_reg,
4596 &sw_stat->tti_err_cnt))
4597 goto reset;
4598 do_s2io_chk_alarm_bit(TTI_ECC_SG_ERR | TTI_ECC_DB_ERR,
4599 &bar0->tti_err_reg, &sw_stat->tti_err_cnt);
4600 }
4601
4602 /*check for lso_err*/
4603 if (val64 & TXDMA_LSO_INT) {
4604 if (do_s2io_chk_alarm_bit(LSO6_ABORT | LSO7_ABORT
4605 | LSO6_SM_ERR_ALARM | LSO7_SM_ERR_ALARM,
4606 &bar0->lso_err_reg, &sw_stat->lso_err_cnt))
4607 goto reset;
4608 do_s2io_chk_alarm_bit(LSO6_SEND_OFLOW | LSO7_SEND_OFLOW,
4609 &bar0->lso_err_reg, &sw_stat->lso_err_cnt);
4610 }
4611
4612 /*check for tpa_err*/
4613 if (val64 & TXDMA_TPA_INT) {
4614 if (do_s2io_chk_alarm_bit(TPA_SM_ERR_ALARM, &bar0->tpa_err_reg,
4615 &sw_stat->tpa_err_cnt))
4616 goto reset;
4617 do_s2io_chk_alarm_bit(TPA_TX_FRM_DROP, &bar0->tpa_err_reg,
4618 &sw_stat->tpa_err_cnt);
4619 }
4620
4621 /*check for sm_err*/
4622 if (val64 & TXDMA_SM_INT) {
4623 if (do_s2io_chk_alarm_bit(SM_SM_ERR_ALARM, &bar0->sm_err_reg,
4624 &sw_stat->sm_err_cnt))
4625 goto reset;
4626 }
4627
4628 val64 = readq(&bar0->mac_int_status);
4629 if (val64 & MAC_INT_STATUS_TMAC_INT) {
4630 if (do_s2io_chk_alarm_bit(TMAC_TX_BUF_OVRN | TMAC_TX_SM_ERR,
4631 &bar0->mac_tmac_err_reg,
4632 &sw_stat->mac_tmac_err_cnt))
4633 goto reset;
4634 do_s2io_chk_alarm_bit(TMAC_ECC_SG_ERR | TMAC_ECC_DB_ERR
4635 | TMAC_DESC_ECC_SG_ERR | TMAC_DESC_ECC_DB_ERR,
4636 &bar0->mac_tmac_err_reg,
4637 &sw_stat->mac_tmac_err_cnt);
4638 }
4639
4640 val64 = readq(&bar0->xgxs_int_status);
4641 if (val64 & XGXS_INT_STATUS_TXGXS) {
4642 if (do_s2io_chk_alarm_bit(TXGXS_ESTORE_UFLOW | TXGXS_TX_SM_ERR,
4643 &bar0->xgxs_txgxs_err_reg,
4644 &sw_stat->xgxs_txgxs_err_cnt))
4645 goto reset;
4646 do_s2io_chk_alarm_bit(TXGXS_ECC_SG_ERR | TXGXS_ECC_DB_ERR,
4647 &bar0->xgxs_txgxs_err_reg,
4648 &sw_stat->xgxs_txgxs_err_cnt);
4649 }
4650
4651 val64 = readq(&bar0->rxdma_int_status);
4652 if (val64 & RXDMA_INT_RC_INT_M) {
4653 if (do_s2io_chk_alarm_bit(RC_PRCn_ECC_DB_ERR | RC_FTC_ECC_DB_ERR
4654 | RC_PRCn_SM_ERR_ALARM |RC_FTC_SM_ERR_ALARM,
4655 &bar0->rc_err_reg, &sw_stat->rc_err_cnt))
4656 goto reset;
4657 do_s2io_chk_alarm_bit(RC_PRCn_ECC_SG_ERR | RC_FTC_ECC_SG_ERR
4658 | RC_RDA_FAIL_WR_Rn, &bar0->rc_err_reg,
4659 &sw_stat->rc_err_cnt);
4660 if (do_s2io_chk_alarm_bit(PRC_PCI_AB_RD_Rn | PRC_PCI_AB_WR_Rn
4661 | PRC_PCI_AB_F_WR_Rn, &bar0->prc_pcix_err_reg,
4662 &sw_stat->prc_pcix_err_cnt))
4663 goto reset;
4664 do_s2io_chk_alarm_bit(PRC_PCI_DP_RD_Rn | PRC_PCI_DP_WR_Rn
4665 | PRC_PCI_DP_F_WR_Rn, &bar0->prc_pcix_err_reg,
4666 &sw_stat->prc_pcix_err_cnt);
4667 }
4668
4669 if (val64 & RXDMA_INT_RPA_INT_M) {
4670 if (do_s2io_chk_alarm_bit(RPA_SM_ERR_ALARM | RPA_CREDIT_ERR,
4671 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt))
4672 goto reset;
4673 do_s2io_chk_alarm_bit(RPA_ECC_SG_ERR | RPA_ECC_DB_ERR,
4674 &bar0->rpa_err_reg, &sw_stat->rpa_err_cnt);
4675 }
4676
4677 if (val64 & RXDMA_INT_RDA_INT_M) {
4678 if (do_s2io_chk_alarm_bit(RDA_RXDn_ECC_DB_ERR
4679 | RDA_FRM_ECC_DB_N_AERR | RDA_SM1_ERR_ALARM
4680 | RDA_SM0_ERR_ALARM | RDA_RXD_ECC_DB_SERR,
4681 &bar0->rda_err_reg, &sw_stat->rda_err_cnt))
4682 goto reset;
4683 do_s2io_chk_alarm_bit(RDA_RXDn_ECC_SG_ERR | RDA_FRM_ECC_SG_ERR
4684 | RDA_MISC_ERR | RDA_PCIX_ERR,
4685 &bar0->rda_err_reg, &sw_stat->rda_err_cnt);
4686 }
4687
4688 if (val64 & RXDMA_INT_RTI_INT_M) {
4689 if (do_s2io_chk_alarm_bit(RTI_SM_ERR_ALARM, &bar0->rti_err_reg,
4690 &sw_stat->rti_err_cnt))
4691 goto reset;
4692 do_s2io_chk_alarm_bit(RTI_ECC_SG_ERR | RTI_ECC_DB_ERR,
4693 &bar0->rti_err_reg, &sw_stat->rti_err_cnt);
4694 }
4695
4696 val64 = readq(&bar0->mac_int_status);
4697 if (val64 & MAC_INT_STATUS_RMAC_INT) {
4698 if (do_s2io_chk_alarm_bit(RMAC_RX_BUFF_OVRN | RMAC_RX_SM_ERR,
4699 &bar0->mac_rmac_err_reg,
4700 &sw_stat->mac_rmac_err_cnt))
4701 goto reset;
4702 do_s2io_chk_alarm_bit(RMAC_UNUSED_INT|RMAC_SINGLE_ECC_ERR|
4703 RMAC_DOUBLE_ECC_ERR, &bar0->mac_rmac_err_reg,
4704 &sw_stat->mac_rmac_err_cnt);
4705 }
4706
4707 val64 = readq(&bar0->xgxs_int_status);
4708 if (val64 & XGXS_INT_STATUS_RXGXS) {
4709 if (do_s2io_chk_alarm_bit(RXGXS_ESTORE_OFLOW | RXGXS_RX_SM_ERR,
4710 &bar0->xgxs_rxgxs_err_reg,
4711 &sw_stat->xgxs_rxgxs_err_cnt))
4712 goto reset;
4713 }
4714
4715 val64 = readq(&bar0->mc_int_status);
4716 if(val64 & MC_INT_STATUS_MC_INT) {
4717 if (do_s2io_chk_alarm_bit(MC_ERR_REG_SM_ERR, &bar0->mc_err_reg,
4718 &sw_stat->mc_err_cnt))
4719 goto reset;
4720
4721 /* Handling Ecc errors */
4722 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
4723 writeq(val64, &bar0->mc_err_reg);
4724 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
4725 sw_stat->double_ecc_errs++;
4726 if (sp->device_type != XFRAME_II_DEVICE) {
4727 /*
4728 * Reset XframeI only if critical error
4729 */
4730 if (val64 &
4731 (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
4732 MC_ERR_REG_MIRI_ECC_DB_ERR_1))
4733 goto reset;
4734 }
4735 } else
4736 sw_stat->single_ecc_errs++;
4737 }
4738 }
4739 return;
4740
4741reset:
3a3d5756 4742 s2io_stop_all_tx_queue(sp);
8116f3cf
SS
4743 schedule_work(&sp->rst_timer_task);
4744 sw_stat->soft_reset_cnt++;
4745 return;
4746}
4747
1da177e4
LT
4748/**
4749 * s2io_isr - ISR handler of the device .
4750 * @irq: the irq of the device.
4751 * @dev_id: a void pointer to the dev structure of the NIC.
20346722
K
4752 * Description: This function is the ISR handler of the device. It
4753 * identifies the reason for the interrupt and calls the relevant
4754 * service routines. As a contongency measure, this ISR allocates the
1da177e4
LT
4755 * recv buffers, if their numbers are below the panic value which is
4756 * presently set to 25% of the original number of rcv buffers allocated.
4757 * Return value:
20346722 4758 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
1da177e4
LT
4759 * IRQ_NONE: will be returned if interrupt is not from our device
4760 */
7d12e780 4761static irqreturn_t s2io_isr(int irq, void *dev_id)
1da177e4
LT
4762{
4763 struct net_device *dev = (struct net_device *) dev_id;
1ee6dd77
RB
4764 struct s2io_nic *sp = dev->priv;
4765 struct XENA_dev_config __iomem *bar0 = sp->bar0;
20346722 4766 int i;
19a60522 4767 u64 reason = 0;
1ee6dd77 4768 struct mac_info *mac_control;
1da177e4
LT
4769 struct config_param *config;
4770
d796fdb7
LV
4771 /* Pretend we handled any irq's from a disconnected card */
4772 if (pci_channel_offline(sp->pdev))
4773 return IRQ_NONE;
4774
596c5c97 4775 if (!is_s2io_card_up(sp))
92b84437 4776 return IRQ_NONE;
92b84437 4777
1da177e4
LT
4778 mac_control = &sp->mac_control;
4779 config = &sp->config;
4780
20346722 4781 /*
1da177e4
LT
4782 * Identify the cause for interrupt and call the appropriate
4783 * interrupt handler. Causes for the interrupt could be;
4784 * 1. Rx of packet.
4785 * 2. Tx complete.
4786 * 3. Link down.
1da177e4
LT
4787 */
4788 reason = readq(&bar0->general_int_status);
4789
596c5c97
SS
4790 if (unlikely(reason == S2IO_MINUS_ONE) ) {
4791 /* Nothing much can be done. Get out */
4792 return IRQ_HANDLED;
1da177e4 4793 }
5d3213cc 4794
596c5c97
SS
4795 if (reason & (GEN_INTR_RXTRAFFIC |
4796 GEN_INTR_TXTRAFFIC | GEN_INTR_TXPIC))
4797 {
4798 writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
4799
4800 if (config->napi) {
4801 if (reason & GEN_INTR_RXTRAFFIC) {
4802 if (likely(netif_rx_schedule_prep(dev,
4803 &sp->napi))) {
4804 __netif_rx_schedule(dev, &sp->napi);
4805 writeq(S2IO_MINUS_ONE,
4806 &bar0->rx_traffic_mask);
4807 } else
4808 writeq(S2IO_MINUS_ONE,
4809 &bar0->rx_traffic_int);
db874e65 4810 }
596c5c97
SS
4811 } else {
4812 /*
4813 * rx_traffic_int reg is an R1 register, writing all 1's
4814 * will ensure that the actual interrupt causing bit
4815 * get's cleared and hence a read can be avoided.
4816 */
4817 if (reason & GEN_INTR_RXTRAFFIC)
19a60522 4818 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
596c5c97
SS
4819
4820 for (i = 0; i < config->rx_ring_num; i++)
4821 rx_intr_handler(&mac_control->rings[i]);
db874e65 4822 }
596c5c97 4823
db874e65 4824 /*
596c5c97 4825 * tx_traffic_int reg is an R1 register, writing all 1's
db874e65
SS
4826 * will ensure that the actual interrupt causing bit get's
4827 * cleared and hence a read can be avoided.
4828 */
596c5c97
SS
4829 if (reason & GEN_INTR_TXTRAFFIC)
4830 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
19a60522 4831
596c5c97
SS
4832 for (i = 0; i < config->tx_fifo_num; i++)
4833 tx_intr_handler(&mac_control->fifos[i]);
1da177e4 4834
596c5c97
SS
4835 if (reason & GEN_INTR_TXPIC)
4836 s2io_txpic_intr_handle(sp);
fe113638 4837
596c5c97
SS
4838 /*
4839 * Reallocate the buffers from the interrupt handler itself.
4840 */
4841 if (!config->napi) {
4842 for (i = 0; i < config->rx_ring_num; i++)
4843 s2io_chk_rx_buffers(sp, i);
4844 }
4845 writeq(sp->general_int_mask, &bar0->general_int_mask);
4846 readl(&bar0->general_int_status);
20346722 4847
596c5c97 4848 return IRQ_HANDLED;
db874e65 4849
596c5c97
SS
4850 }
4851 else if (!reason) {
4852 /* The interrupt was not raised by us */
4853 return IRQ_NONE;
4854 }
db874e65 4855
1da177e4
LT
4856 return IRQ_HANDLED;
4857}
4858
7ba013ac
K
4859/**
4860 * s2io_updt_stats -
4861 */
1ee6dd77 4862static void s2io_updt_stats(struct s2io_nic *sp)
7ba013ac 4863{
1ee6dd77 4864 struct XENA_dev_config __iomem *bar0 = sp->bar0;
7ba013ac
K
4865 u64 val64;
4866 int cnt = 0;
4867
92b84437 4868 if (is_s2io_card_up(sp)) {
7ba013ac
K
4869 /* Apprx 30us on a 133 MHz bus */
4870 val64 = SET_UPDT_CLICKS(10) |
4871 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4872 writeq(val64, &bar0->stat_cfg);
4873 do {
4874 udelay(100);
4875 val64 = readq(&bar0->stat_cfg);
b7b5a128 4876 if (!(val64 & s2BIT(0)))
7ba013ac
K
4877 break;
4878 cnt++;
4879 if (cnt == 5)
4880 break; /* Updt failed */
4881 } while(1);
8a4bdbaa 4882 }
7ba013ac
K
4883}
4884
1da177e4 4885/**
20346722 4886 * s2io_get_stats - Updates the device statistics structure.
1da177e4
LT
4887 * @dev : pointer to the device structure.
4888 * Description:
20346722 4889 * This function updates the device statistics structure in the s2io_nic
1da177e4
LT
4890 * structure and returns a pointer to the same.
4891 * Return value:
4892 * pointer to the updated net_device_stats structure.
4893 */
4894
ac1f60db 4895static struct net_device_stats *s2io_get_stats(struct net_device *dev)
1da177e4 4896{
1ee6dd77
RB
4897 struct s2io_nic *sp = dev->priv;
4898 struct mac_info *mac_control;
1da177e4
LT
4899 struct config_param *config;
4900
20346722 4901
1da177e4
LT
4902 mac_control = &sp->mac_control;
4903 config = &sp->config;
4904
7ba013ac
K
4905 /* Configure Stats for immediate updt */
4906 s2io_updt_stats(sp);
4907
4908 sp->stats.tx_packets =
4909 le32_to_cpu(mac_control->stats_info->tmac_frms);
20346722
K
4910 sp->stats.tx_errors =
4911 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4912 sp->stats.rx_errors =
ee705dba 4913 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
20346722
K
4914 sp->stats.multicast =
4915 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
1da177e4 4916 sp->stats.rx_length_errors =
ee705dba 4917 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
1da177e4
LT
4918
4919 return (&sp->stats);
4920}
4921
4922/**
4923 * s2io_set_multicast - entry point for multicast address enable/disable.
4924 * @dev : pointer to the device structure
4925 * Description:
20346722
K
4926 * This function is a driver entry point which gets called by the kernel
4927 * whenever multicast addresses must be enabled/disabled. This also gets
1da177e4
LT
4928 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4929 * determine, if multicast address must be enabled or if promiscuous mode
4930 * is to be disabled etc.
4931 * Return value:
4932 * void.
4933 */
4934
4935static void s2io_set_multicast(struct net_device *dev)
4936{
4937 int i, j, prev_cnt;
4938 struct dev_mc_list *mclist;
1ee6dd77
RB
4939 struct s2io_nic *sp = dev->priv;
4940 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
4941 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4942 0xfeffffffffffULL;
faa4f796 4943 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, mac_addr = 0;
1da177e4 4944 void __iomem *add;
faa4f796 4945 struct config_param *config = &sp->config;
1da177e4
LT
4946
4947 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4948 /* Enable all Multicast addresses */
4949 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4950 &bar0->rmac_addr_data0_mem);
4951 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4952 &bar0->rmac_addr_data1_mem);
4953 val64 = RMAC_ADDR_CMD_MEM_WE |
4954 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796 4955 RMAC_ADDR_CMD_MEM_OFFSET(config->max_mc_addr - 1);
1da177e4
LT
4956 writeq(val64, &bar0->rmac_addr_cmd_mem);
4957 /* Wait till command completes */
c92ca04b 4958 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4959 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4960 S2IO_BIT_RESET);
1da177e4
LT
4961
4962 sp->m_cast_flg = 1;
faa4f796 4963 sp->all_multi_pos = config->max_mc_addr - 1;
1da177e4
LT
4964 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4965 /* Disable all Multicast addresses */
4966 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4967 &bar0->rmac_addr_data0_mem);
5e25b9dd
K
4968 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4969 &bar0->rmac_addr_data1_mem);
1da177e4
LT
4970 val64 = RMAC_ADDR_CMD_MEM_WE |
4971 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4972 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4973 writeq(val64, &bar0->rmac_addr_cmd_mem);
4974 /* Wait till command completes */
c92ca04b 4975 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
4976 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4977 S2IO_BIT_RESET);
1da177e4
LT
4978
4979 sp->m_cast_flg = 0;
4980 sp->all_multi_pos = 0;
4981 }
4982
4983 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4984 /* Put the NIC into promiscuous mode */
4985 add = &bar0->mac_cfg;
4986 val64 = readq(&bar0->mac_cfg);
4987 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4988
4989 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4990 writel((u32) val64, add);
4991 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4992 writel((u32) (val64 >> 32), (add + 4));
4993
926930b2
SS
4994 if (vlan_tag_strip != 1) {
4995 val64 = readq(&bar0->rx_pa_cfg);
4996 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4997 writeq(val64, &bar0->rx_pa_cfg);
4998 vlan_strip_flag = 0;
4999 }
5000
1da177e4
LT
5001 val64 = readq(&bar0->mac_cfg);
5002 sp->promisc_flg = 1;
776bd20f 5003 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
1da177e4
LT
5004 dev->name);
5005 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
5006 /* Remove the NIC from promiscuous mode */
5007 add = &bar0->mac_cfg;
5008 val64 = readq(&bar0->mac_cfg);
5009 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
5010
5011 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5012 writel((u32) val64, add);
5013 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
5014 writel((u32) (val64 >> 32), (add + 4));
5015
926930b2
SS
5016 if (vlan_tag_strip != 0) {
5017 val64 = readq(&bar0->rx_pa_cfg);
5018 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
5019 writeq(val64, &bar0->rx_pa_cfg);
5020 vlan_strip_flag = 1;
5021 }
5022
1da177e4
LT
5023 val64 = readq(&bar0->mac_cfg);
5024 sp->promisc_flg = 0;
776bd20f 5025 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
1da177e4
LT
5026 dev->name);
5027 }
5028
5029 /* Update individual M_CAST address list */
5030 if ((!sp->m_cast_flg) && dev->mc_count) {
5031 if (dev->mc_count >
faa4f796 5032 (config->max_mc_addr - config->max_mac_addr)) {
1da177e4
LT
5033 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
5034 dev->name);
5035 DBG_PRINT(ERR_DBG, "can be added, please enable ");
5036 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
5037 return;
5038 }
5039
5040 prev_cnt = sp->mc_addr_count;
5041 sp->mc_addr_count = dev->mc_count;
5042
5043 /* Clear out the previous list of Mc in the H/W. */
5044 for (i = 0; i < prev_cnt; i++) {
5045 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
5046 &bar0->rmac_addr_data0_mem);
5047 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 5048 &bar0->rmac_addr_data1_mem);
1da177e4
LT
5049 val64 = RMAC_ADDR_CMD_MEM_WE |
5050 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5051 RMAC_ADDR_CMD_MEM_OFFSET
faa4f796 5052 (config->mc_start_offset + i);
1da177e4
LT
5053 writeq(val64, &bar0->rmac_addr_cmd_mem);
5054
5055 /* Wait for command completes */
c92ca04b 5056 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
5057 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5058 S2IO_BIT_RESET)) {
1da177e4
LT
5059 DBG_PRINT(ERR_DBG, "%s: Adding ",
5060 dev->name);
5061 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5062 return;
5063 }
5064 }
5065
5066 /* Create the new Rx filter list and update the same in H/W. */
5067 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
5068 i++, mclist = mclist->next) {
5069 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
5070 ETH_ALEN);
a7a80d5a 5071 mac_addr = 0;
1da177e4
LT
5072 for (j = 0; j < ETH_ALEN; j++) {
5073 mac_addr |= mclist->dmi_addr[j];
5074 mac_addr <<= 8;
5075 }
5076 mac_addr >>= 8;
5077 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
5078 &bar0->rmac_addr_data0_mem);
5079 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
20346722 5080 &bar0->rmac_addr_data1_mem);
1da177e4
LT
5081 val64 = RMAC_ADDR_CMD_MEM_WE |
5082 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5083 RMAC_ADDR_CMD_MEM_OFFSET
faa4f796 5084 (i + config->mc_start_offset);
1da177e4
LT
5085 writeq(val64, &bar0->rmac_addr_cmd_mem);
5086
5087 /* Wait for command completes */
c92ca04b 5088 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41
SS
5089 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5090 S2IO_BIT_RESET)) {
1da177e4
LT
5091 DBG_PRINT(ERR_DBG, "%s: Adding ",
5092 dev->name);
5093 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
5094 return;
5095 }
5096 }
5097 }
5098}
5099
faa4f796
SH
5100/* read from CAM unicast & multicast addresses and store it in
5101 * def_mac_addr structure
5102 */
5103void do_s2io_store_unicast_mc(struct s2io_nic *sp)
5104{
5105 int offset;
5106 u64 mac_addr = 0x0;
5107 struct config_param *config = &sp->config;
5108
5109 /* store unicast & multicast mac addresses */
5110 for (offset = 0; offset < config->max_mc_addr; offset++) {
5111 mac_addr = do_s2io_read_unicast_mc(sp, offset);
5112 /* if read fails disable the entry */
5113 if (mac_addr == FAILURE)
5114 mac_addr = S2IO_DISABLE_MAC_ENTRY;
5115 do_s2io_copy_mac_addr(sp, offset, mac_addr);
5116 }
5117}
5118
5119/* restore unicast & multicast MAC to CAM from def_mac_addr structure */
5120static void do_s2io_restore_unicast_mc(struct s2io_nic *sp)
5121{
5122 int offset;
5123 struct config_param *config = &sp->config;
5124 /* restore unicast mac address */
5125 for (offset = 0; offset < config->max_mac_addr; offset++)
5126 do_s2io_prog_unicast(sp->dev,
5127 sp->def_mac_addr[offset].mac_addr);
5128
5129 /* restore multicast mac address */
5130 for (offset = config->mc_start_offset;
5131 offset < config->max_mc_addr; offset++)
5132 do_s2io_add_mc(sp, sp->def_mac_addr[offset].mac_addr);
5133}
5134
5135/* add a multicast MAC address to CAM */
5136static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr)
5137{
5138 int i;
5139 u64 mac_addr = 0;
5140 struct config_param *config = &sp->config;
5141
5142 for (i = 0; i < ETH_ALEN; i++) {
5143 mac_addr <<= 8;
5144 mac_addr |= addr[i];
5145 }
5146 if ((0ULL == mac_addr) || (mac_addr == S2IO_DISABLE_MAC_ENTRY))
5147 return SUCCESS;
5148
5149 /* check if the multicast mac already preset in CAM */
5150 for (i = config->mc_start_offset; i < config->max_mc_addr; i++) {
5151 u64 tmp64;
5152 tmp64 = do_s2io_read_unicast_mc(sp, i);
5153 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5154 break;
5155
5156 if (tmp64 == mac_addr)
5157 return SUCCESS;
5158 }
5159 if (i == config->max_mc_addr) {
5160 DBG_PRINT(ERR_DBG,
5161 "CAM full no space left for multicast MAC\n");
5162 return FAILURE;
5163 }
5164 /* Update the internal structure with this new mac address */
5165 do_s2io_copy_mac_addr(sp, i, mac_addr);
5166
5167 return (do_s2io_add_mac(sp, mac_addr, i));
5168}
5169
5170/* add MAC address to CAM */
5171static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int off)
2fd37688
SS
5172{
5173 u64 val64;
5174 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5175
5176 writeq(RMAC_ADDR_DATA0_MEM_ADDR(addr),
5177 &bar0->rmac_addr_data0_mem);
5178
5179 val64 =
5180 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5181 RMAC_ADDR_CMD_MEM_OFFSET(off);
5182 writeq(val64, &bar0->rmac_addr_cmd_mem);
5183
5184 /* Wait till command completes */
5185 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5186 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5187 S2IO_BIT_RESET)) {
faa4f796 5188 DBG_PRINT(INFO_DBG, "do_s2io_add_mac failed\n");
2fd37688
SS
5189 return FAILURE;
5190 }
5191 return SUCCESS;
5192}
faa4f796
SH
5193/* deletes a specified unicast/multicast mac entry from CAM */
5194static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr)
5195{
5196 int offset;
5197 u64 dis_addr = S2IO_DISABLE_MAC_ENTRY, tmp64;
5198 struct config_param *config = &sp->config;
5199
5200 for (offset = 1;
5201 offset < config->max_mc_addr; offset++) {
5202 tmp64 = do_s2io_read_unicast_mc(sp, offset);
5203 if (tmp64 == addr) {
5204 /* disable the entry by writing 0xffffffffffffULL */
5205 if (do_s2io_add_mac(sp, dis_addr, offset) == FAILURE)
5206 return FAILURE;
5207 /* store the new mac list from CAM */
5208 do_s2io_store_unicast_mc(sp);
5209 return SUCCESS;
5210 }
5211 }
5212 DBG_PRINT(ERR_DBG, "MAC address 0x%llx not found in CAM\n",
5213 (unsigned long long)addr);
5214 return FAILURE;
5215}
5216
5217/* read mac entries from CAM */
5218static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
5219{
5220 u64 tmp64 = 0xffffffffffff0000ULL, val64;
5221 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5222
5223 /* read mac addr */
5224 val64 =
5225 RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5226 RMAC_ADDR_CMD_MEM_OFFSET(offset);
5227 writeq(val64, &bar0->rmac_addr_cmd_mem);
5228
5229 /* Wait till command completes */
5230 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
5231 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
5232 S2IO_BIT_RESET)) {
5233 DBG_PRINT(INFO_DBG, "do_s2io_read_unicast_mc failed\n");
5234 return FAILURE;
5235 }
5236 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5237 return (tmp64 >> 16);
5238}
2fd37688
SS
5239
5240/**
5241 * s2io_set_mac_addr driver entry point
5242 */
faa4f796 5243
2fd37688
SS
5244static int s2io_set_mac_addr(struct net_device *dev, void *p)
5245{
5246 struct sockaddr *addr = p;
5247
5248 if (!is_valid_ether_addr(addr->sa_data))
5249 return -EINVAL;
5250
5251 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5252
5253 /* store the MAC address in CAM */
5254 return (do_s2io_prog_unicast(dev, dev->dev_addr));
5255}
1da177e4 5256/**
2fd37688 5257 * do_s2io_prog_unicast - Programs the Xframe mac address
1da177e4
LT
5258 * @dev : pointer to the device structure.
5259 * @addr: a uchar pointer to the new mac address which is to be set.
20346722 5260 * Description : This procedure will program the Xframe to receive
1da177e4 5261 * frames with new Mac Address
20346722 5262 * Return value: SUCCESS on success and an appropriate (-)ve integer
1da177e4
LT
5263 * as defined in errno.h file on failure.
5264 */
faa4f796 5265
2fd37688 5266static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
1da177e4 5267{
1ee6dd77 5268 struct s2io_nic *sp = dev->priv;
2fd37688 5269 register u64 mac_addr = 0, perm_addr = 0;
1da177e4 5270 int i;
faa4f796
SH
5271 u64 tmp64;
5272 struct config_param *config = &sp->config;
1da177e4 5273
20346722 5274 /*
2fd37688
SS
5275 * Set the new MAC address as the new unicast filter and reflect this
5276 * change on the device address registered with the OS. It will be
5277 * at offset 0.
5278 */
1da177e4
LT
5279 for (i = 0; i < ETH_ALEN; i++) {
5280 mac_addr <<= 8;
5281 mac_addr |= addr[i];
2fd37688
SS
5282 perm_addr <<= 8;
5283 perm_addr |= sp->def_mac_addr[0].mac_addr[i];
d8d70caf
SS
5284 }
5285
2fd37688
SS
5286 /* check if the dev_addr is different than perm_addr */
5287 if (mac_addr == perm_addr)
d8d70caf
SS
5288 return SUCCESS;
5289
faa4f796
SH
5290 /* check if the mac already preset in CAM */
5291 for (i = 1; i < config->max_mac_addr; i++) {
5292 tmp64 = do_s2io_read_unicast_mc(sp, i);
5293 if (tmp64 == S2IO_DISABLE_MAC_ENTRY) /* CAM entry is empty */
5294 break;
5295
5296 if (tmp64 == mac_addr) {
5297 DBG_PRINT(INFO_DBG,
5298 "MAC addr:0x%llx already present in CAM\n",
5299 (unsigned long long)mac_addr);
5300 return SUCCESS;
5301 }
5302 }
5303 if (i == config->max_mac_addr) {
5304 DBG_PRINT(ERR_DBG, "CAM full no space left for Unicast MAC\n");
5305 return FAILURE;
5306 }
d8d70caf 5307 /* Update the internal structure with this new mac address */
faa4f796
SH
5308 do_s2io_copy_mac_addr(sp, i, mac_addr);
5309 return (do_s2io_add_mac(sp, mac_addr, i));
1da177e4
LT
5310}
5311
5312/**
20346722 5313 * s2io_ethtool_sset - Sets different link parameters.
1da177e4
LT
5314 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
5315 * @info: pointer to the structure with parameters given by ethtool to set
5316 * link information.
5317 * Description:
20346722 5318 * The function sets different link parameters provided by the user onto
1da177e4
LT
5319 * the NIC.
5320 * Return value:
5321 * 0 on success.
5322*/
5323
5324static int s2io_ethtool_sset(struct net_device *dev,
5325 struct ethtool_cmd *info)
5326{
1ee6dd77 5327 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5328 if ((info->autoneg == AUTONEG_ENABLE) ||
5329 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
5330 return -EINVAL;
5331 else {
5332 s2io_close(sp->dev);
5333 s2io_open(sp->dev);
5334 }
5335
5336 return 0;
5337}
5338
5339/**
20346722 5340 * s2io_ethtol_gset - Return link specific information.
1da177e4
LT
5341 * @sp : private member of the device structure, pointer to the
5342 * s2io_nic structure.
5343 * @info : pointer to the structure with parameters given by ethtool
5344 * to return link information.
5345 * Description:
5346 * Returns link specific information like speed, duplex etc.. to ethtool.
5347 * Return value :
5348 * return 0 on success.
5349 */
5350
5351static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
5352{
1ee6dd77 5353 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5354 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5355 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
5356 info->port = PORT_FIBRE;
1a7eb72b
SS
5357
5358 /* info->transceiver */
5359 info->transceiver = XCVR_EXTERNAL;
1da177e4
LT
5360
5361 if (netif_carrier_ok(sp->dev)) {
5362 info->speed = 10000;
5363 info->duplex = DUPLEX_FULL;
5364 } else {
5365 info->speed = -1;
5366 info->duplex = -1;
5367 }
5368
5369 info->autoneg = AUTONEG_DISABLE;
5370 return 0;
5371}
5372
5373/**
20346722
K
5374 * s2io_ethtool_gdrvinfo - Returns driver specific information.
5375 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5376 * s2io_nic structure.
5377 * @info : pointer to the structure with parameters given by ethtool to
5378 * return driver information.
5379 * Description:
5380 * Returns driver specefic information like name, version etc.. to ethtool.
5381 * Return value:
5382 * void
5383 */
5384
5385static void s2io_ethtool_gdrvinfo(struct net_device *dev,
5386 struct ethtool_drvinfo *info)
5387{
1ee6dd77 5388 struct s2io_nic *sp = dev->priv;
1da177e4 5389
dbc2309d
JL
5390 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
5391 strncpy(info->version, s2io_driver_version, sizeof(info->version));
5392 strncpy(info->fw_version, "", sizeof(info->fw_version));
5393 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
1da177e4
LT
5394 info->regdump_len = XENA_REG_SPACE;
5395 info->eedump_len = XENA_EEPROM_SPACE;
1da177e4
LT
5396}
5397
5398/**
5399 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
20346722 5400 * @sp: private member of the device structure, which is a pointer to the
1da177e4 5401 * s2io_nic structure.
20346722 5402 * @regs : pointer to the structure with parameters given by ethtool for
1da177e4
LT
5403 * dumping the registers.
5404 * @reg_space: The input argumnet into which all the registers are dumped.
5405 * Description:
5406 * Dumps the entire register space of xFrame NIC into the user given
5407 * buffer area.
5408 * Return value :
5409 * void .
5410*/
5411
5412static void s2io_ethtool_gregs(struct net_device *dev,
5413 struct ethtool_regs *regs, void *space)
5414{
5415 int i;
5416 u64 reg;
5417 u8 *reg_space = (u8 *) space;
1ee6dd77 5418 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5419
5420 regs->len = XENA_REG_SPACE;
5421 regs->version = sp->pdev->subsystem_device;
5422
5423 for (i = 0; i < regs->len; i += 8) {
5424 reg = readq(sp->bar0 + i);
5425 memcpy((reg_space + i), &reg, 8);
5426 }
5427}
5428
5429/**
5430 * s2io_phy_id - timer function that alternates adapter LED.
20346722 5431 * @data : address of the private member of the device structure, which
1da177e4 5432 * is a pointer to the s2io_nic structure, provided as an u32.
20346722
K
5433 * Description: This is actually the timer function that alternates the
5434 * adapter LED bit of the adapter control bit to set/reset every time on
5435 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
1da177e4
LT
5436 * once every second.
5437*/
5438static void s2io_phy_id(unsigned long data)
5439{
1ee6dd77
RB
5440 struct s2io_nic *sp = (struct s2io_nic *) data;
5441 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5442 u64 val64 = 0;
5443 u16 subid;
5444
5445 subid = sp->pdev->subsystem_device;
541ae68f
K
5446 if ((sp->device_type == XFRAME_II_DEVICE) ||
5447 ((subid & 0xFF) >= 0x07)) {
1da177e4
LT
5448 val64 = readq(&bar0->gpio_control);
5449 val64 ^= GPIO_CTRL_GPIO_0;
5450 writeq(val64, &bar0->gpio_control);
5451 } else {
5452 val64 = readq(&bar0->adapter_control);
5453 val64 ^= ADAPTER_LED_ON;
5454 writeq(val64, &bar0->adapter_control);
5455 }
5456
5457 mod_timer(&sp->id_timer, jiffies + HZ / 2);
5458}
5459
5460/**
5461 * s2io_ethtool_idnic - To physically identify the nic on the system.
5462 * @sp : private member of the device structure, which is a pointer to the
5463 * s2io_nic structure.
20346722 5464 * @id : pointer to the structure with identification parameters given by
1da177e4
LT
5465 * ethtool.
5466 * Description: Used to physically identify the NIC on the system.
20346722 5467 * The Link LED will blink for a time specified by the user for
1da177e4 5468 * identification.
20346722 5469 * NOTE: The Link has to be Up to be able to blink the LED. Hence
1da177e4
LT
5470 * identification is possible only if it's link is up.
5471 * Return value:
5472 * int , returns 0 on success
5473 */
5474
5475static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
5476{
5477 u64 val64 = 0, last_gpio_ctrl_val;
1ee6dd77
RB
5478 struct s2io_nic *sp = dev->priv;
5479 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5480 u16 subid;
5481
5482 subid = sp->pdev->subsystem_device;
5483 last_gpio_ctrl_val = readq(&bar0->gpio_control);
541ae68f
K
5484 if ((sp->device_type == XFRAME_I_DEVICE) &&
5485 ((subid & 0xFF) < 0x07)) {
1da177e4
LT
5486 val64 = readq(&bar0->adapter_control);
5487 if (!(val64 & ADAPTER_CNTL_EN)) {
5488 printk(KERN_ERR
5489 "Adapter Link down, cannot blink LED\n");
5490 return -EFAULT;
5491 }
5492 }
5493 if (sp->id_timer.function == NULL) {
5494 init_timer(&sp->id_timer);
5495 sp->id_timer.function = s2io_phy_id;
5496 sp->id_timer.data = (unsigned long) sp;
5497 }
5498 mod_timer(&sp->id_timer, jiffies);
5499 if (data)
20346722 5500 msleep_interruptible(data * HZ);
1da177e4 5501 else
20346722 5502 msleep_interruptible(MAX_FLICKER_TIME);
1da177e4
LT
5503 del_timer_sync(&sp->id_timer);
5504
541ae68f 5505 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
1da177e4
LT
5506 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
5507 last_gpio_ctrl_val = readq(&bar0->gpio_control);
5508 }
5509
5510 return 0;
5511}
5512
0cec35eb
SH
5513static void s2io_ethtool_gringparam(struct net_device *dev,
5514 struct ethtool_ringparam *ering)
5515{
5516 struct s2io_nic *sp = dev->priv;
5517 int i,tx_desc_count=0,rx_desc_count=0;
5518
5519 if (sp->rxd_mode == RXD_MODE_1)
5520 ering->rx_max_pending = MAX_RX_DESC_1;
5521 else if (sp->rxd_mode == RXD_MODE_3B)
5522 ering->rx_max_pending = MAX_RX_DESC_2;
0cec35eb
SH
5523
5524 ering->tx_max_pending = MAX_TX_DESC;
8a4bdbaa 5525 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
0cec35eb 5526 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
8a4bdbaa 5527
0cec35eb
SH
5528 DBG_PRINT(INFO_DBG,"\nmax txds : %d\n",sp->config.max_txds);
5529 ering->tx_pending = tx_desc_count;
5530 rx_desc_count = 0;
8a4bdbaa 5531 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
0cec35eb 5532 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
b6627672 5533
0cec35eb
SH
5534 ering->rx_pending = rx_desc_count;
5535
5536 ering->rx_mini_max_pending = 0;
5537 ering->rx_mini_pending = 0;
5538 if(sp->rxd_mode == RXD_MODE_1)
5539 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5540 else if (sp->rxd_mode == RXD_MODE_3B)
5541 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5542 ering->rx_jumbo_pending = rx_desc_count;
5543}
5544
1da177e4
LT
5545/**
5546 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
20346722
K
5547 * @sp : private member of the device structure, which is a pointer to the
5548 * s2io_nic structure.
1da177e4
LT
5549 * @ep : pointer to the structure with pause parameters given by ethtool.
5550 * Description:
5551 * Returns the Pause frame generation and reception capability of the NIC.
5552 * Return value:
5553 * void
5554 */
5555static void s2io_ethtool_getpause_data(struct net_device *dev,
5556 struct ethtool_pauseparam *ep)
5557{
5558 u64 val64;
1ee6dd77
RB
5559 struct s2io_nic *sp = dev->priv;
5560 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5561
5562 val64 = readq(&bar0->rmac_pause_cfg);
5563 if (val64 & RMAC_PAUSE_GEN_ENABLE)
5564 ep->tx_pause = TRUE;
5565 if (val64 & RMAC_PAUSE_RX_ENABLE)
5566 ep->rx_pause = TRUE;
5567 ep->autoneg = FALSE;
5568}
5569
5570/**
5571 * s2io_ethtool_setpause_data - set/reset pause frame generation.
20346722 5572 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5573 * s2io_nic structure.
5574 * @ep : pointer to the structure with pause parameters given by ethtool.
5575 * Description:
5576 * It can be used to set or reset Pause frame generation or reception
5577 * support of the NIC.
5578 * Return value:
5579 * int, returns 0 on Success
5580 */
5581
5582static int s2io_ethtool_setpause_data(struct net_device *dev,
20346722 5583 struct ethtool_pauseparam *ep)
1da177e4
LT
5584{
5585 u64 val64;
1ee6dd77
RB
5586 struct s2io_nic *sp = dev->priv;
5587 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
5588
5589 val64 = readq(&bar0->rmac_pause_cfg);
5590 if (ep->tx_pause)
5591 val64 |= RMAC_PAUSE_GEN_ENABLE;
5592 else
5593 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
5594 if (ep->rx_pause)
5595 val64 |= RMAC_PAUSE_RX_ENABLE;
5596 else
5597 val64 &= ~RMAC_PAUSE_RX_ENABLE;
5598 writeq(val64, &bar0->rmac_pause_cfg);
5599 return 0;
5600}
5601
5602/**
5603 * read_eeprom - reads 4 bytes of data from user given offset.
20346722 5604 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5605 * s2io_nic structure.
5606 * @off : offset at which the data must be written
5607 * @data : Its an output parameter where the data read at the given
20346722 5608 * offset is stored.
1da177e4 5609 * Description:
20346722 5610 * Will read 4 bytes of data from the user given offset and return the
1da177e4
LT
5611 * read data.
5612 * NOTE: Will allow to read only part of the EEPROM visible through the
5613 * I2C bus.
5614 * Return value:
5615 * -1 on failure and 0 on success.
5616 */
5617
5618#define S2IO_DEV_ID 5
1ee6dd77 5619static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
1da177e4
LT
5620{
5621 int ret = -1;
5622 u32 exit_cnt = 0;
5623 u64 val64;
1ee6dd77 5624 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5625
ad4ebed0 5626 if (sp->device_type == XFRAME_I_DEVICE) {
5627 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5628 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
5629 I2C_CONTROL_CNTL_START;
5630 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
1da177e4 5631
ad4ebed0 5632 while (exit_cnt < 5) {
5633 val64 = readq(&bar0->i2c_control);
5634 if (I2C_CONTROL_CNTL_END(val64)) {
5635 *data = I2C_CONTROL_GET_DATA(val64);
5636 ret = 0;
5637 break;
5638 }
5639 msleep(50);
5640 exit_cnt++;
1da177e4 5641 }
1da177e4
LT
5642 }
5643
ad4ebed0 5644 if (sp->device_type == XFRAME_II_DEVICE) {
5645 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5646 SPI_CONTROL_BYTECNT(0x3) |
ad4ebed0 5647 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
5648 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5649 val64 |= SPI_CONTROL_REQ;
5650 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5651 while (exit_cnt < 5) {
5652 val64 = readq(&bar0->spi_control);
5653 if (val64 & SPI_CONTROL_NACK) {
5654 ret = 1;
5655 break;
5656 } else if (val64 & SPI_CONTROL_DONE) {
5657 *data = readq(&bar0->spi_data);
5658 *data &= 0xffffff;
5659 ret = 0;
5660 break;
5661 }
5662 msleep(50);
5663 exit_cnt++;
5664 }
5665 }
1da177e4
LT
5666 return ret;
5667}
5668
5669/**
5670 * write_eeprom - actually writes the relevant part of the data value.
5671 * @sp : private member of the device structure, which is a pointer to the
5672 * s2io_nic structure.
5673 * @off : offset at which the data must be written
5674 * @data : The data that is to be written
20346722 5675 * @cnt : Number of bytes of the data that are actually to be written into
1da177e4
LT
5676 * the Eeprom. (max of 3)
5677 * Description:
5678 * Actually writes the relevant part of the data value into the Eeprom
5679 * through the I2C bus.
5680 * Return value:
5681 * 0 on success, -1 on failure.
5682 */
5683
1ee6dd77 5684static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
1da177e4
LT
5685{
5686 int exit_cnt = 0, ret = -1;
5687 u64 val64;
1ee6dd77 5688 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 5689
ad4ebed0 5690 if (sp->device_type == XFRAME_I_DEVICE) {
5691 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
5692 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
5693 I2C_CONTROL_CNTL_START;
5694 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
5695
5696 while (exit_cnt < 5) {
5697 val64 = readq(&bar0->i2c_control);
5698 if (I2C_CONTROL_CNTL_END(val64)) {
5699 if (!(val64 & I2C_CONTROL_NACK))
5700 ret = 0;
5701 break;
5702 }
5703 msleep(50);
5704 exit_cnt++;
5705 }
5706 }
1da177e4 5707
ad4ebed0 5708 if (sp->device_type == XFRAME_II_DEVICE) {
5709 int write_cnt = (cnt == 8) ? 0 : cnt;
5710 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
5711
5712 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
6aa20a22 5713 SPI_CONTROL_BYTECNT(write_cnt) |
ad4ebed0 5714 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
5715 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5716 val64 |= SPI_CONTROL_REQ;
5717 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
5718 while (exit_cnt < 5) {
5719 val64 = readq(&bar0->spi_control);
5720 if (val64 & SPI_CONTROL_NACK) {
5721 ret = 1;
5722 break;
5723 } else if (val64 & SPI_CONTROL_DONE) {
1da177e4 5724 ret = 0;
ad4ebed0 5725 break;
5726 }
5727 msleep(50);
5728 exit_cnt++;
1da177e4 5729 }
1da177e4 5730 }
1da177e4
LT
5731 return ret;
5732}
1ee6dd77 5733static void s2io_vpd_read(struct s2io_nic *nic)
9dc737a7 5734{
b41477f3
AR
5735 u8 *vpd_data;
5736 u8 data;
9dc737a7
AR
5737 int i=0, cnt, fail = 0;
5738 int vpd_addr = 0x80;
5739
5740 if (nic->device_type == XFRAME_II_DEVICE) {
5741 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
5742 vpd_addr = 0x80;
5743 }
5744 else {
5745 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
5746 vpd_addr = 0x50;
5747 }
19a60522 5748 strcpy(nic->serial_num, "NOT AVAILABLE");
9dc737a7 5749
b41477f3 5750 vpd_data = kmalloc(256, GFP_KERNEL);
c53d4945
SH
5751 if (!vpd_data) {
5752 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
b41477f3 5753 return;
c53d4945 5754 }
491976b2 5755 nic->mac_control.stats_info->sw_stat.mem_allocated += 256;
b41477f3 5756
9dc737a7
AR
5757 for (i = 0; i < 256; i +=4 ) {
5758 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
5759 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
5760 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
5761 for (cnt = 0; cnt <5; cnt++) {
5762 msleep(2);
5763 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
5764 if (data == 0x80)
5765 break;
5766 }
5767 if (cnt >= 5) {
5768 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
5769 fail = 1;
5770 break;
5771 }
5772 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
5773 (u32 *)&vpd_data[i]);
5774 }
19a60522
SS
5775
5776 if(!fail) {
5777 /* read serial number of adapter */
5778 for (cnt = 0; cnt < 256; cnt++) {
5779 if ((vpd_data[cnt] == 'S') &&
5780 (vpd_data[cnt+1] == 'N') &&
5781 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
5782 memset(nic->serial_num, 0, VPD_STRING_LEN);
5783 memcpy(nic->serial_num, &vpd_data[cnt + 3],
5784 vpd_data[cnt+2]);
5785 break;
5786 }
5787 }
5788 }
5789
5790 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
9dc737a7
AR
5791 memset(nic->product_name, 0, vpd_data[1]);
5792 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
5793 }
b41477f3 5794 kfree(vpd_data);
491976b2 5795 nic->mac_control.stats_info->sw_stat.mem_freed += 256;
9dc737a7
AR
5796}
5797
1da177e4
LT
5798/**
5799 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
5800 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
20346722 5801 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5802 * containing all relevant information.
5803 * @data_buf : user defined value to be written into Eeprom.
5804 * Description: Reads the values stored in the Eeprom at given offset
5805 * for a given length. Stores these values int the input argument data
5806 * buffer 'data_buf' and returns these to the caller (ethtool.)
5807 * Return value:
5808 * int 0 on success
5809 */
5810
5811static int s2io_ethtool_geeprom(struct net_device *dev,
20346722 5812 struct ethtool_eeprom *eeprom, u8 * data_buf)
1da177e4 5813{
ad4ebed0 5814 u32 i, valid;
5815 u64 data;
1ee6dd77 5816 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5817
5818 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5819
5820 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5821 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5822
5823 for (i = 0; i < eeprom->len; i += 4) {
5824 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5825 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5826 return -EFAULT;
5827 }
5828 valid = INV(data);
5829 memcpy((data_buf + i), &valid, 4);
5830 }
5831 return 0;
5832}
5833
5834/**
5835 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5836 * @sp : private member of the device structure, which is a pointer to the
5837 * s2io_nic structure.
20346722 5838 * @eeprom : pointer to the user level structure provided by ethtool,
1da177e4
LT
5839 * containing all relevant information.
5840 * @data_buf ; user defined value to be written into Eeprom.
5841 * Description:
5842 * Tries to write the user provided value in the Eeprom, at the offset
5843 * given by the user.
5844 * Return value:
5845 * 0 on success, -EFAULT on failure.
5846 */
5847
5848static int s2io_ethtool_seeprom(struct net_device *dev,
5849 struct ethtool_eeprom *eeprom,
5850 u8 * data_buf)
5851{
5852 int len = eeprom->len, cnt = 0;
ad4ebed0 5853 u64 valid = 0, data;
1ee6dd77 5854 struct s2io_nic *sp = dev->priv;
1da177e4
LT
5855
5856 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5857 DBG_PRINT(ERR_DBG,
5858 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5859 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5860 eeprom->magic);
5861 return -EFAULT;
5862 }
5863
5864 while (len) {
5865 data = (u32) data_buf[cnt] & 0x000000FF;
5866 if (data) {
5867 valid = (u32) (data << 24);
5868 } else
5869 valid = data;
5870
5871 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5872 DBG_PRINT(ERR_DBG,
5873 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5874 DBG_PRINT(ERR_DBG,
5875 "write into the specified offset\n");
5876 return -EFAULT;
5877 }
5878 cnt++;
5879 len--;
5880 }
5881
5882 return 0;
5883}
5884
5885/**
20346722
K
5886 * s2io_register_test - reads and writes into all clock domains.
5887 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
5888 * s2io_nic structure.
5889 * @data : variable that returns the result of each of the test conducted b
5890 * by the driver.
5891 * Description:
5892 * Read and write into all clock domains. The NIC has 3 clock domains,
5893 * see that registers in all the three regions are accessible.
5894 * Return value:
5895 * 0 on success.
5896 */
5897
1ee6dd77 5898static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 5899{
1ee6dd77 5900 struct XENA_dev_config __iomem *bar0 = sp->bar0;
ad4ebed0 5901 u64 val64 = 0, exp_val;
1da177e4
LT
5902 int fail = 0;
5903
20346722
K
5904 val64 = readq(&bar0->pif_rd_swapper_fb);
5905 if (val64 != 0x123456789abcdefULL) {
1da177e4
LT
5906 fail = 1;
5907 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5908 }
5909
5910 val64 = readq(&bar0->rmac_pause_cfg);
5911 if (val64 != 0xc000ffff00000000ULL) {
5912 fail = 1;
5913 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5914 }
5915
5916 val64 = readq(&bar0->rx_queue_cfg);
ad4ebed0 5917 if (sp->device_type == XFRAME_II_DEVICE)
5918 exp_val = 0x0404040404040404ULL;
5919 else
5920 exp_val = 0x0808080808080808ULL;
5921 if (val64 != exp_val) {
1da177e4
LT
5922 fail = 1;
5923 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5924 }
5925
5926 val64 = readq(&bar0->xgxs_efifo_cfg);
5927 if (val64 != 0x000000001923141EULL) {
5928 fail = 1;
5929 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5930 }
5931
5932 val64 = 0x5A5A5A5A5A5A5A5AULL;
5933 writeq(val64, &bar0->xmsi_data);
5934 val64 = readq(&bar0->xmsi_data);
5935 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5936 fail = 1;
5937 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5938 }
5939
5940 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5941 writeq(val64, &bar0->xmsi_data);
5942 val64 = readq(&bar0->xmsi_data);
5943 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5944 fail = 1;
5945 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5946 }
5947
5948 *data = fail;
ad4ebed0 5949 return fail;
1da177e4
LT
5950}
5951
5952/**
20346722 5953 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
1da177e4
LT
5954 * @sp : private member of the device structure, which is a pointer to the
5955 * s2io_nic structure.
5956 * @data:variable that returns the result of each of the test conducted by
5957 * the driver.
5958 * Description:
20346722 5959 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
1da177e4
LT
5960 * register.
5961 * Return value:
5962 * 0 on success.
5963 */
5964
1ee6dd77 5965static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
5966{
5967 int fail = 0;
ad4ebed0 5968 u64 ret_data, org_4F0, org_7F0;
5969 u8 saved_4F0 = 0, saved_7F0 = 0;
5970 struct net_device *dev = sp->dev;
1da177e4
LT
5971
5972 /* Test Write Error at offset 0 */
ad4ebed0 5973 /* Note that SPI interface allows write access to all areas
5974 * of EEPROM. Hence doing all negative testing only for Xframe I.
5975 */
5976 if (sp->device_type == XFRAME_I_DEVICE)
5977 if (!write_eeprom(sp, 0, 0, 3))
5978 fail = 1;
5979
5980 /* Save current values at offsets 0x4F0 and 0x7F0 */
5981 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5982 saved_4F0 = 1;
5983 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5984 saved_7F0 = 1;
1da177e4
LT
5985
5986 /* Test Write at offset 4f0 */
ad4ebed0 5987 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
1da177e4
LT
5988 fail = 1;
5989 if (read_eeprom(sp, 0x4F0, &ret_data))
5990 fail = 1;
5991
ad4ebed0 5992 if (ret_data != 0x012345) {
26b7625c
AM
5993 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5994 "Data written %llx Data read %llx\n",
5995 dev->name, (unsigned long long)0x12345,
5996 (unsigned long long)ret_data);
1da177e4 5997 fail = 1;
ad4ebed0 5998 }
1da177e4
LT
5999
6000 /* Reset the EEPROM data go FFFF */
ad4ebed0 6001 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
1da177e4
LT
6002
6003 /* Test Write Request Error at offset 0x7c */
ad4ebed0 6004 if (sp->device_type == XFRAME_I_DEVICE)
6005 if (!write_eeprom(sp, 0x07C, 0, 3))
6006 fail = 1;
1da177e4 6007
ad4ebed0 6008 /* Test Write Request at offset 0x7f0 */
6009 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
1da177e4 6010 fail = 1;
ad4ebed0 6011 if (read_eeprom(sp, 0x7F0, &ret_data))
1da177e4
LT
6012 fail = 1;
6013
ad4ebed0 6014 if (ret_data != 0x012345) {
26b7625c
AM
6015 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
6016 "Data written %llx Data read %llx\n",
6017 dev->name, (unsigned long long)0x12345,
6018 (unsigned long long)ret_data);
1da177e4 6019 fail = 1;
ad4ebed0 6020 }
1da177e4
LT
6021
6022 /* Reset the EEPROM data go FFFF */
ad4ebed0 6023 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
1da177e4 6024
ad4ebed0 6025 if (sp->device_type == XFRAME_I_DEVICE) {
6026 /* Test Write Error at offset 0x80 */
6027 if (!write_eeprom(sp, 0x080, 0, 3))
6028 fail = 1;
1da177e4 6029
ad4ebed0 6030 /* Test Write Error at offset 0xfc */
6031 if (!write_eeprom(sp, 0x0FC, 0, 3))
6032 fail = 1;
1da177e4 6033
ad4ebed0 6034 /* Test Write Error at offset 0x100 */
6035 if (!write_eeprom(sp, 0x100, 0, 3))
6036 fail = 1;
1da177e4 6037
ad4ebed0 6038 /* Test Write Error at offset 4ec */
6039 if (!write_eeprom(sp, 0x4EC, 0, 3))
6040 fail = 1;
6041 }
6042
6043 /* Restore values at offsets 0x4F0 and 0x7F0 */
6044 if (saved_4F0)
6045 write_eeprom(sp, 0x4F0, org_4F0, 3);
6046 if (saved_7F0)
6047 write_eeprom(sp, 0x7F0, org_7F0, 3);
1da177e4
LT
6048
6049 *data = fail;
ad4ebed0 6050 return fail;
1da177e4
LT
6051}
6052
6053/**
6054 * s2io_bist_test - invokes the MemBist test of the card .
20346722 6055 * @sp : private member of the device structure, which is a pointer to the
1da177e4 6056 * s2io_nic structure.
20346722 6057 * @data:variable that returns the result of each of the test conducted by
1da177e4
LT
6058 * the driver.
6059 * Description:
6060 * This invokes the MemBist test of the card. We give around
6061 * 2 secs time for the Test to complete. If it's still not complete
20346722 6062 * within this peiod, we consider that the test failed.
1da177e4
LT
6063 * Return value:
6064 * 0 on success and -1 on failure.
6065 */
6066
1ee6dd77 6067static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
1da177e4
LT
6068{
6069 u8 bist = 0;
6070 int cnt = 0, ret = -1;
6071
6072 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6073 bist |= PCI_BIST_START;
6074 pci_write_config_word(sp->pdev, PCI_BIST, bist);
6075
6076 while (cnt < 20) {
6077 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
6078 if (!(bist & PCI_BIST_START)) {
6079 *data = (bist & PCI_BIST_CODE_MASK);
6080 ret = 0;
6081 break;
6082 }
6083 msleep(100);
6084 cnt++;
6085 }
6086
6087 return ret;
6088}
6089
6090/**
20346722
K
6091 * s2io-link_test - verifies the link state of the nic
6092 * @sp ; private member of the device structure, which is a pointer to the
1da177e4
LT
6093 * s2io_nic structure.
6094 * @data: variable that returns the result of each of the test conducted by
6095 * the driver.
6096 * Description:
20346722 6097 * The function verifies the link state of the NIC and updates the input
1da177e4
LT
6098 * argument 'data' appropriately.
6099 * Return value:
6100 * 0 on success.
6101 */
6102
1ee6dd77 6103static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 6104{
1ee6dd77 6105 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4
LT
6106 u64 val64;
6107
6108 val64 = readq(&bar0->adapter_status);
c92ca04b 6109 if(!(LINK_IS_UP(val64)))
1da177e4 6110 *data = 1;
c92ca04b
AR
6111 else
6112 *data = 0;
1da177e4 6113
b41477f3 6114 return *data;
1da177e4
LT
6115}
6116
6117/**
20346722
K
6118 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
6119 * @sp - private member of the device structure, which is a pointer to the
1da177e4 6120 * s2io_nic structure.
20346722 6121 * @data - variable that returns the result of each of the test
1da177e4
LT
6122 * conducted by the driver.
6123 * Description:
20346722 6124 * This is one of the offline test that tests the read and write
1da177e4
LT
6125 * access to the RldRam chip on the NIC.
6126 * Return value:
6127 * 0 on success.
6128 */
6129
1ee6dd77 6130static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
1da177e4 6131{
1ee6dd77 6132 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1da177e4 6133 u64 val64;
ad4ebed0 6134 int cnt, iteration = 0, test_fail = 0;
1da177e4
LT
6135
6136 val64 = readq(&bar0->adapter_control);
6137 val64 &= ~ADAPTER_ECC_EN;
6138 writeq(val64, &bar0->adapter_control);
6139
6140 val64 = readq(&bar0->mc_rldram_test_ctrl);
6141 val64 |= MC_RLDRAM_TEST_MODE;
ad4ebed0 6142 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6143
6144 val64 = readq(&bar0->mc_rldram_mrs);
6145 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
6146 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6147
6148 val64 |= MC_RLDRAM_MRS_ENABLE;
6149 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
6150
6151 while (iteration < 2) {
6152 val64 = 0x55555555aaaa0000ULL;
6153 if (iteration == 1) {
6154 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6155 }
6156 writeq(val64, &bar0->mc_rldram_test_d0);
6157
6158 val64 = 0xaaaa5a5555550000ULL;
6159 if (iteration == 1) {
6160 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6161 }
6162 writeq(val64, &bar0->mc_rldram_test_d1);
6163
6164 val64 = 0x55aaaaaaaa5a0000ULL;
6165 if (iteration == 1) {
6166 val64 ^= 0xFFFFFFFFFFFF0000ULL;
6167 }
6168 writeq(val64, &bar0->mc_rldram_test_d2);
6169
ad4ebed0 6170 val64 = (u64) (0x0000003ffffe0100ULL);
1da177e4
LT
6171 writeq(val64, &bar0->mc_rldram_test_add);
6172
ad4ebed0 6173 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
6174 MC_RLDRAM_TEST_GO;
6175 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6176
6177 for (cnt = 0; cnt < 5; cnt++) {
6178 val64 = readq(&bar0->mc_rldram_test_ctrl);
6179 if (val64 & MC_RLDRAM_TEST_DONE)
6180 break;
6181 msleep(200);
6182 }
6183
6184 if (cnt == 5)
6185 break;
6186
ad4ebed0 6187 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
6188 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
1da177e4
LT
6189
6190 for (cnt = 0; cnt < 5; cnt++) {
6191 val64 = readq(&bar0->mc_rldram_test_ctrl);
6192 if (val64 & MC_RLDRAM_TEST_DONE)
6193 break;
6194 msleep(500);
6195 }
6196
6197 if (cnt == 5)
6198 break;
6199
6200 val64 = readq(&bar0->mc_rldram_test_ctrl);
ad4ebed0 6201 if (!(val64 & MC_RLDRAM_TEST_PASS))
6202 test_fail = 1;
1da177e4
LT
6203
6204 iteration++;
6205 }
6206
ad4ebed0 6207 *data = test_fail;
1da177e4 6208
ad4ebed0 6209 /* Bring the adapter out of test mode */
6210 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
6211
6212 return test_fail;
1da177e4
LT
6213}
6214
6215/**
6216 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
6217 * @sp : private member of the device structure, which is a pointer to the
6218 * s2io_nic structure.
6219 * @ethtest : pointer to a ethtool command specific structure that will be
6220 * returned to the user.
20346722 6221 * @data : variable that returns the result of each of the test
1da177e4
LT
6222 * conducted by the driver.
6223 * Description:
6224 * This function conducts 6 tests ( 4 offline and 2 online) to determine
6225 * the health of the card.
6226 * Return value:
6227 * void
6228 */
6229
6230static void s2io_ethtool_test(struct net_device *dev,
6231 struct ethtool_test *ethtest,
6232 uint64_t * data)
6233{
1ee6dd77 6234 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6235 int orig_state = netif_running(sp->dev);
6236
6237 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
6238 /* Offline Tests. */
20346722 6239 if (orig_state)
1da177e4 6240 s2io_close(sp->dev);
1da177e4
LT
6241
6242 if (s2io_register_test(sp, &data[0]))
6243 ethtest->flags |= ETH_TEST_FL_FAILED;
6244
6245 s2io_reset(sp);
1da177e4
LT
6246
6247 if (s2io_rldram_test(sp, &data[3]))
6248 ethtest->flags |= ETH_TEST_FL_FAILED;
6249
6250 s2io_reset(sp);
1da177e4
LT
6251
6252 if (s2io_eeprom_test(sp, &data[1]))
6253 ethtest->flags |= ETH_TEST_FL_FAILED;
6254
6255 if (s2io_bist_test(sp, &data[4]))
6256 ethtest->flags |= ETH_TEST_FL_FAILED;
6257
6258 if (orig_state)
6259 s2io_open(sp->dev);
6260
6261 data[2] = 0;
6262 } else {
6263 /* Online Tests. */
6264 if (!orig_state) {
6265 DBG_PRINT(ERR_DBG,
6266 "%s: is not up, cannot run test\n",
6267 dev->name);
6268 data[0] = -1;
6269 data[1] = -1;
6270 data[2] = -1;
6271 data[3] = -1;
6272 data[4] = -1;
6273 }
6274
6275 if (s2io_link_test(sp, &data[2]))
6276 ethtest->flags |= ETH_TEST_FL_FAILED;
6277
6278 data[0] = 0;
6279 data[1] = 0;
6280 data[3] = 0;
6281 data[4] = 0;
6282 }
6283}
6284
6285static void s2io_get_ethtool_stats(struct net_device *dev,
6286 struct ethtool_stats *estats,
6287 u64 * tmp_stats)
6288{
8116f3cf 6289 int i = 0, k;
1ee6dd77
RB
6290 struct s2io_nic *sp = dev->priv;
6291 struct stat_block *stat_info = sp->mac_control.stats_info;
1da177e4 6292
7ba013ac 6293 s2io_updt_stats(sp);
541ae68f
K
6294 tmp_stats[i++] =
6295 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
6296 le32_to_cpu(stat_info->tmac_frms);
6297 tmp_stats[i++] =
6298 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
6299 le32_to_cpu(stat_info->tmac_data_octets);
1da177e4 6300 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
541ae68f
K
6301 tmp_stats[i++] =
6302 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
6303 le32_to_cpu(stat_info->tmac_mcst_frms);
6304 tmp_stats[i++] =
6305 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
6306 le32_to_cpu(stat_info->tmac_bcst_frms);
1da177e4 6307 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
bd1034f0
AR
6308 tmp_stats[i++] =
6309 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
6310 le32_to_cpu(stat_info->tmac_ttl_octets);
6311 tmp_stats[i++] =
6312 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
6313 le32_to_cpu(stat_info->tmac_ucst_frms);
6314 tmp_stats[i++] =
6315 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
6316 le32_to_cpu(stat_info->tmac_nucst_frms);
541ae68f
K
6317 tmp_stats[i++] =
6318 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
6319 le32_to_cpu(stat_info->tmac_any_err_frms);
bd1034f0 6320 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
1da177e4 6321 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
541ae68f
K
6322 tmp_stats[i++] =
6323 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
6324 le32_to_cpu(stat_info->tmac_vld_ip);
6325 tmp_stats[i++] =
6326 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
6327 le32_to_cpu(stat_info->tmac_drop_ip);
6328 tmp_stats[i++] =
6329 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
6330 le32_to_cpu(stat_info->tmac_icmp);
6331 tmp_stats[i++] =
6332 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
6333 le32_to_cpu(stat_info->tmac_rst_tcp);
1da177e4 6334 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
541ae68f
K
6335 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
6336 le32_to_cpu(stat_info->tmac_udp);
6337 tmp_stats[i++] =
6338 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
6339 le32_to_cpu(stat_info->rmac_vld_frms);
6340 tmp_stats[i++] =
6341 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
6342 le32_to_cpu(stat_info->rmac_data_octets);
1da177e4
LT
6343 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
6344 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
541ae68f
K
6345 tmp_stats[i++] =
6346 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
6347 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
6348 tmp_stats[i++] =
6349 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
6350 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
1da177e4 6351 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
bd1034f0 6352 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
1da177e4
LT
6353 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
6354 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
bd1034f0
AR
6355 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
6356 tmp_stats[i++] =
6357 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
6358 le32_to_cpu(stat_info->rmac_ttl_octets);
6359 tmp_stats[i++] =
6360 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
6361 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
6362 tmp_stats[i++] =
6363 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
6364 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
541ae68f
K
6365 tmp_stats[i++] =
6366 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
6367 le32_to_cpu(stat_info->rmac_discarded_frms);
bd1034f0
AR
6368 tmp_stats[i++] =
6369 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
6370 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
6371 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
6372 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
541ae68f
K
6373 tmp_stats[i++] =
6374 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
6375 le32_to_cpu(stat_info->rmac_usized_frms);
6376 tmp_stats[i++] =
6377 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
6378 le32_to_cpu(stat_info->rmac_osized_frms);
6379 tmp_stats[i++] =
6380 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
6381 le32_to_cpu(stat_info->rmac_frag_frms);
6382 tmp_stats[i++] =
6383 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
6384 le32_to_cpu(stat_info->rmac_jabber_frms);
bd1034f0
AR
6385 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
6386 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
6387 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
6388 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
6389 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
6390 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
6391 tmp_stats[i++] =
6392 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
541ae68f 6393 le32_to_cpu(stat_info->rmac_ip);
1da177e4
LT
6394 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
6395 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
bd1034f0
AR
6396 tmp_stats[i++] =
6397 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
541ae68f 6398 le32_to_cpu(stat_info->rmac_drop_ip);
bd1034f0
AR
6399 tmp_stats[i++] =
6400 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
541ae68f 6401 le32_to_cpu(stat_info->rmac_icmp);
1da177e4 6402 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
bd1034f0
AR
6403 tmp_stats[i++] =
6404 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
541ae68f
K
6405 le32_to_cpu(stat_info->rmac_udp);
6406 tmp_stats[i++] =
6407 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
6408 le32_to_cpu(stat_info->rmac_err_drp_udp);
bd1034f0
AR
6409 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
6410 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
6411 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
6412 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
6413 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
6414 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
6415 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
6416 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
6417 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
6418 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
6419 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
6420 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
6421 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
6422 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
6423 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
6424 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
6425 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
541ae68f
K
6426 tmp_stats[i++] =
6427 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
6428 le32_to_cpu(stat_info->rmac_pause_cnt);
bd1034f0
AR
6429 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
6430 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
541ae68f
K
6431 tmp_stats[i++] =
6432 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
6433 le32_to_cpu(stat_info->rmac_accepted_ip);
1da177e4 6434 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
bd1034f0
AR
6435 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
6436 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
6437 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
6438 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
6439 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
6440 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
6441 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
6442 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
6443 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
6444 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
6445 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
6446 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
6447 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
6448 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
6449 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
6450 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
6451 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
6452 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
fa1f0cb3
SS
6453
6454 /* Enhanced statistics exist only for Hercules */
6455 if(sp->device_type == XFRAME_II_DEVICE) {
6456 tmp_stats[i++] =
6457 le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
6458 tmp_stats[i++] =
6459 le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
6460 tmp_stats[i++] =
6461 le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
6462 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
6463 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
6464 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
6465 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
6466 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
6467 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
6468 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
6469 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
6470 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
6471 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
6472 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
6473 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
6474 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
6475 }
6476
7ba013ac
K
6477 tmp_stats[i++] = 0;
6478 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
6479 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
bd1034f0
AR
6480 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
6481 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
6482 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
6483 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
8116f3cf
SS
6484 for (k = 0; k < MAX_RX_RINGS; k++)
6485 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt[k];
bd1034f0
AR
6486 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
6487 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
6488 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
6489 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
6490 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
6491 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
6492 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
6493 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
6494 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
6495 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
6496 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
6497 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
7d3d0439
RA
6498 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
6499 tmp_stats[i++] = stat_info->sw_stat.sending_both;
6500 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
6501 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
fe931395 6502 if (stat_info->sw_stat.num_aggregations) {
bd1034f0
AR
6503 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
6504 int count = 0;
6aa20a22 6505 /*
bd1034f0
AR
6506 * Since 64-bit divide does not work on all platforms,
6507 * do repeated subtraction.
6508 */
6509 while (tmp >= stat_info->sw_stat.num_aggregations) {
6510 tmp -= stat_info->sw_stat.num_aggregations;
6511 count++;
6512 }
6513 tmp_stats[i++] = count;
fe931395 6514 }
bd1034f0
AR
6515 else
6516 tmp_stats[i++] = 0;
c53d4945 6517 tmp_stats[i++] = stat_info->sw_stat.mem_alloc_fail_cnt;
491abf25 6518 tmp_stats[i++] = stat_info->sw_stat.pci_map_fail_cnt;
c53d4945 6519 tmp_stats[i++] = stat_info->sw_stat.watchdog_timer_cnt;
491976b2
SH
6520 tmp_stats[i++] = stat_info->sw_stat.mem_allocated;
6521 tmp_stats[i++] = stat_info->sw_stat.mem_freed;
6522 tmp_stats[i++] = stat_info->sw_stat.link_up_cnt;
6523 tmp_stats[i++] = stat_info->sw_stat.link_down_cnt;
6524 tmp_stats[i++] = stat_info->sw_stat.link_up_time;
6525 tmp_stats[i++] = stat_info->sw_stat.link_down_time;
6526
6527 tmp_stats[i++] = stat_info->sw_stat.tx_buf_abort_cnt;
6528 tmp_stats[i++] = stat_info->sw_stat.tx_desc_abort_cnt;
6529 tmp_stats[i++] = stat_info->sw_stat.tx_parity_err_cnt;
6530 tmp_stats[i++] = stat_info->sw_stat.tx_link_loss_cnt;
6531 tmp_stats[i++] = stat_info->sw_stat.tx_list_proc_err_cnt;
6532
6533 tmp_stats[i++] = stat_info->sw_stat.rx_parity_err_cnt;
6534 tmp_stats[i++] = stat_info->sw_stat.rx_abort_cnt;
6535 tmp_stats[i++] = stat_info->sw_stat.rx_parity_abort_cnt;
6536 tmp_stats[i++] = stat_info->sw_stat.rx_rda_fail_cnt;
6537 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_prot_cnt;
6538 tmp_stats[i++] = stat_info->sw_stat.rx_fcs_err_cnt;
6539 tmp_stats[i++] = stat_info->sw_stat.rx_buf_size_err_cnt;
6540 tmp_stats[i++] = stat_info->sw_stat.rx_rxd_corrupt_cnt;
6541 tmp_stats[i++] = stat_info->sw_stat.rx_unkn_err_cnt;
8116f3cf
SS
6542 tmp_stats[i++] = stat_info->sw_stat.tda_err_cnt;
6543 tmp_stats[i++] = stat_info->sw_stat.pfc_err_cnt;
6544 tmp_stats[i++] = stat_info->sw_stat.pcc_err_cnt;
6545 tmp_stats[i++] = stat_info->sw_stat.tti_err_cnt;
6546 tmp_stats[i++] = stat_info->sw_stat.tpa_err_cnt;
6547 tmp_stats[i++] = stat_info->sw_stat.sm_err_cnt;
6548 tmp_stats[i++] = stat_info->sw_stat.lso_err_cnt;
6549 tmp_stats[i++] = stat_info->sw_stat.mac_tmac_err_cnt;
6550 tmp_stats[i++] = stat_info->sw_stat.mac_rmac_err_cnt;
6551 tmp_stats[i++] = stat_info->sw_stat.xgxs_txgxs_err_cnt;
6552 tmp_stats[i++] = stat_info->sw_stat.xgxs_rxgxs_err_cnt;
6553 tmp_stats[i++] = stat_info->sw_stat.rc_err_cnt;
6554 tmp_stats[i++] = stat_info->sw_stat.prc_pcix_err_cnt;
6555 tmp_stats[i++] = stat_info->sw_stat.rpa_err_cnt;
6556 tmp_stats[i++] = stat_info->sw_stat.rda_err_cnt;
6557 tmp_stats[i++] = stat_info->sw_stat.rti_err_cnt;
6558 tmp_stats[i++] = stat_info->sw_stat.mc_err_cnt;
1da177e4
LT
6559}
6560
ac1f60db 6561static int s2io_ethtool_get_regs_len(struct net_device *dev)
1da177e4
LT
6562{
6563 return (XENA_REG_SPACE);
6564}
6565
6566
ac1f60db 6567static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
1da177e4 6568{
1ee6dd77 6569 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6570
6571 return (sp->rx_csum);
6572}
ac1f60db
AB
6573
6574static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
1da177e4 6575{
1ee6dd77 6576 struct s2io_nic *sp = dev->priv;
1da177e4
LT
6577
6578 if (data)
6579 sp->rx_csum = 1;
6580 else
6581 sp->rx_csum = 0;
6582
6583 return 0;
6584}
ac1f60db
AB
6585
6586static int s2io_get_eeprom_len(struct net_device *dev)
1da177e4
LT
6587{
6588 return (XENA_EEPROM_SPACE);
6589}
6590
b9f2c044 6591static int s2io_get_sset_count(struct net_device *dev, int sset)
1da177e4 6592{
b9f2c044
JG
6593 struct s2io_nic *sp = dev->priv;
6594
6595 switch (sset) {
6596 case ETH_SS_TEST:
6597 return S2IO_TEST_LEN;
6598 case ETH_SS_STATS:
6599 switch(sp->device_type) {
6600 case XFRAME_I_DEVICE:
6601 return XFRAME_I_STAT_LEN;
6602 case XFRAME_II_DEVICE:
6603 return XFRAME_II_STAT_LEN;
6604 default:
6605 return 0;
6606 }
6607 default:
6608 return -EOPNOTSUPP;
6609 }
1da177e4 6610}
ac1f60db
AB
6611
6612static void s2io_ethtool_get_strings(struct net_device *dev,
6613 u32 stringset, u8 * data)
1da177e4 6614{
fa1f0cb3
SS
6615 int stat_size = 0;
6616 struct s2io_nic *sp = dev->priv;
6617
1da177e4
LT
6618 switch (stringset) {
6619 case ETH_SS_TEST:
6620 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
6621 break;
6622 case ETH_SS_STATS:
fa1f0cb3
SS
6623 stat_size = sizeof(ethtool_xena_stats_keys);
6624 memcpy(data, &ethtool_xena_stats_keys,stat_size);
6625 if(sp->device_type == XFRAME_II_DEVICE) {
6626 memcpy(data + stat_size,
6627 &ethtool_enhanced_stats_keys,
6628 sizeof(ethtool_enhanced_stats_keys));
6629 stat_size += sizeof(ethtool_enhanced_stats_keys);
6630 }
6631
6632 memcpy(data + stat_size, &ethtool_driver_stats_keys,
6633 sizeof(ethtool_driver_stats_keys));
1da177e4
LT
6634 }
6635}
1da177e4 6636
ac1f60db 6637static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
1da177e4
LT
6638{
6639 if (data)
6640 dev->features |= NETIF_F_IP_CSUM;
6641 else
6642 dev->features &= ~NETIF_F_IP_CSUM;
6643
6644 return 0;
6645}
6646
75c30b13
AR
6647static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
6648{
6649 return (dev->features & NETIF_F_TSO) != 0;
6650}
6651static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
6652{
6653 if (data)
6654 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
6655 else
6656 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
6657
6658 return 0;
6659}
1da177e4 6660
7282d491 6661static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
6662 .get_settings = s2io_ethtool_gset,
6663 .set_settings = s2io_ethtool_sset,
6664 .get_drvinfo = s2io_ethtool_gdrvinfo,
6665 .get_regs_len = s2io_ethtool_get_regs_len,
6666 .get_regs = s2io_ethtool_gregs,
6667 .get_link = ethtool_op_get_link,
6668 .get_eeprom_len = s2io_get_eeprom_len,
6669 .get_eeprom = s2io_ethtool_geeprom,
6670 .set_eeprom = s2io_ethtool_seeprom,
0cec35eb 6671 .get_ringparam = s2io_ethtool_gringparam,
1da177e4
LT
6672 .get_pauseparam = s2io_ethtool_getpause_data,
6673 .set_pauseparam = s2io_ethtool_setpause_data,
6674 .get_rx_csum = s2io_ethtool_get_rx_csum,
6675 .set_rx_csum = s2io_ethtool_set_rx_csum,
1da177e4 6676 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
1da177e4 6677 .set_sg = ethtool_op_set_sg,
75c30b13
AR
6678 .get_tso = s2io_ethtool_op_get_tso,
6679 .set_tso = s2io_ethtool_op_set_tso,
fed5eccd 6680 .set_ufo = ethtool_op_set_ufo,
1da177e4
LT
6681 .self_test = s2io_ethtool_test,
6682 .get_strings = s2io_ethtool_get_strings,
6683 .phys_id = s2io_ethtool_idnic,
b9f2c044
JG
6684 .get_ethtool_stats = s2io_get_ethtool_stats,
6685 .get_sset_count = s2io_get_sset_count,
1da177e4
LT
6686};
6687
6688/**
20346722 6689 * s2io_ioctl - Entry point for the Ioctl
1da177e4
LT
6690 * @dev : Device pointer.
6691 * @ifr : An IOCTL specefic structure, that can contain a pointer to
6692 * a proprietary structure used to pass information to the driver.
6693 * @cmd : This is used to distinguish between the different commands that
6694 * can be passed to the IOCTL functions.
6695 * Description:
20346722
K
6696 * Currently there are no special functionality supported in IOCTL, hence
6697 * function always return EOPNOTSUPPORTED
1da177e4
LT
6698 */
6699
ac1f60db 6700static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1da177e4
LT
6701{
6702 return -EOPNOTSUPP;
6703}
6704
6705/**
6706 * s2io_change_mtu - entry point to change MTU size for the device.
6707 * @dev : device pointer.
6708 * @new_mtu : the new MTU size for the device.
6709 * Description: A driver entry point to change MTU size for the device.
6710 * Before changing the MTU the device must be stopped.
6711 * Return value:
6712 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6713 * file on failure.
6714 */
6715
ac1f60db 6716static int s2io_change_mtu(struct net_device *dev, int new_mtu)
1da177e4 6717{
1ee6dd77 6718 struct s2io_nic *sp = dev->priv;
9f74ffde 6719 int ret = 0;
1da177e4
LT
6720
6721 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
6722 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
6723 dev->name);
6724 return -EPERM;
6725 }
6726
1da177e4 6727 dev->mtu = new_mtu;
d8892c6e 6728 if (netif_running(dev)) {
3a3d5756 6729 s2io_stop_all_tx_queue(sp);
e6a8fee2 6730 s2io_card_down(sp);
9f74ffde
SH
6731 ret = s2io_card_up(sp);
6732 if (ret) {
d8892c6e
K
6733 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6734 __FUNCTION__);
9f74ffde 6735 return ret;
d8892c6e 6736 }
3a3d5756 6737 s2io_wake_all_tx_queue(sp);
d8892c6e 6738 } else { /* Device is down */
1ee6dd77 6739 struct XENA_dev_config __iomem *bar0 = sp->bar0;
d8892c6e
K
6740 u64 val64 = new_mtu;
6741
6742 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
6743 }
1da177e4 6744
9f74ffde 6745 return ret;
1da177e4
LT
6746}
6747
6748/**
6749 * s2io_tasklet - Bottom half of the ISR.
6750 * @dev_adr : address of the device structure in dma_addr_t format.
6751 * Description:
6752 * This is the tasklet or the bottom half of the ISR. This is
20346722 6753 * an extension of the ISR which is scheduled by the scheduler to be run
1da177e4 6754 * when the load on the CPU is low. All low priority tasks of the ISR can
20346722 6755 * be pushed into the tasklet. For now the tasklet is used only to
1da177e4
LT
6756 * replenish the Rx buffers in the Rx buffer descriptors.
6757 * Return value:
6758 * void.
6759 */
6760
6761static void s2io_tasklet(unsigned long dev_addr)
6762{
6763 struct net_device *dev = (struct net_device *) dev_addr;
1ee6dd77 6764 struct s2io_nic *sp = dev->priv;
1da177e4 6765 int i, ret;
1ee6dd77 6766 struct mac_info *mac_control;
1da177e4
LT
6767 struct config_param *config;
6768
6769 mac_control = &sp->mac_control;
6770 config = &sp->config;
6771
6772 if (!TASKLET_IN_USE) {
6773 for (i = 0; i < config->rx_ring_num; i++) {
6774 ret = fill_rx_buffers(sp, i);
6775 if (ret == -ENOMEM) {
0c61ed5f 6776 DBG_PRINT(INFO_DBG, "%s: Out of ",
1da177e4 6777 dev->name);
491976b2 6778 DBG_PRINT(INFO_DBG, "memory in tasklet\n");
1da177e4
LT
6779 break;
6780 } else if (ret == -EFILL) {
0c61ed5f 6781 DBG_PRINT(INFO_DBG,
1da177e4
LT
6782 "%s: Rx Ring %d is full\n",
6783 dev->name, i);
6784 break;
6785 }
6786 }
6787 clear_bit(0, (&sp->tasklet_status));
6788 }
6789}
6790
6791/**
6792 * s2io_set_link - Set the LInk status
6793 * @data: long pointer to device private structue
6794 * Description: Sets the link status for the adapter
6795 */
6796
c4028958 6797static void s2io_set_link(struct work_struct *work)
1da177e4 6798{
1ee6dd77 6799 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
1da177e4 6800 struct net_device *dev = nic->dev;
1ee6dd77 6801 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1da177e4
LT
6802 register u64 val64;
6803 u16 subid;
6804
22747d6b
FR
6805 rtnl_lock();
6806
6807 if (!netif_running(dev))
6808 goto out_unlock;
6809
92b84437 6810 if (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(nic->state))) {
1da177e4 6811 /* The card is being reset, no point doing anything */
22747d6b 6812 goto out_unlock;
1da177e4
LT
6813 }
6814
6815 subid = nic->pdev->subsystem_device;
a371a07d
K
6816 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
6817 /*
6818 * Allow a small delay for the NICs self initiated
6819 * cleanup to complete.
6820 */
6821 msleep(100);
6822 }
1da177e4
LT
6823
6824 val64 = readq(&bar0->adapter_status);
19a60522
SS
6825 if (LINK_IS_UP(val64)) {
6826 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
6827 if (verify_xena_quiescence(nic)) {
6828 val64 = readq(&bar0->adapter_control);
6829 val64 |= ADAPTER_CNTL_EN;
1da177e4 6830 writeq(val64, &bar0->adapter_control);
19a60522
SS
6831 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
6832 nic->device_type, subid)) {
6833 val64 = readq(&bar0->gpio_control);
6834 val64 |= GPIO_CTRL_GPIO_0;
6835 writeq(val64, &bar0->gpio_control);
6836 val64 = readq(&bar0->gpio_control);
6837 } else {
6838 val64 |= ADAPTER_LED_ON;
6839 writeq(val64, &bar0->adapter_control);
a371a07d 6840 }
1da177e4 6841 nic->device_enabled_once = TRUE;
19a60522
SS
6842 } else {
6843 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6844 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
3a3d5756 6845 s2io_stop_all_tx_queue(nic);
1da177e4 6846 }
19a60522 6847 }
92c48799
SS
6848 val64 = readq(&bar0->adapter_control);
6849 val64 |= ADAPTER_LED_ON;
6850 writeq(val64, &bar0->adapter_control);
6851 s2io_link(nic, LINK_UP);
19a60522
SS
6852 } else {
6853 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
6854 subid)) {
6855 val64 = readq(&bar0->gpio_control);
6856 val64 &= ~GPIO_CTRL_GPIO_0;
6857 writeq(val64, &bar0->gpio_control);
6858 val64 = readq(&bar0->gpio_control);
1da177e4 6859 }
92c48799
SS
6860 /* turn off LED */
6861 val64 = readq(&bar0->adapter_control);
6862 val64 = val64 &(~ADAPTER_LED_ON);
6863 writeq(val64, &bar0->adapter_control);
19a60522 6864 s2io_link(nic, LINK_DOWN);
1da177e4 6865 }
92b84437 6866 clear_bit(__S2IO_STATE_LINK_TASK, &(nic->state));
22747d6b
FR
6867
6868out_unlock:
d8d70caf 6869 rtnl_unlock();
1da177e4
LT
6870}
6871
1ee6dd77
RB
6872static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
6873 struct buffAdd *ba,
6874 struct sk_buff **skb, u64 *temp0, u64 *temp1,
6875 u64 *temp2, int size)
5d3213cc
AR
6876{
6877 struct net_device *dev = sp->dev;
491abf25 6878 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
5d3213cc
AR
6879
6880 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
6d517a27 6881 struct RxD1 *rxdp1 = (struct RxD1 *)rxdp;
5d3213cc
AR
6882 /* allocate skb */
6883 if (*skb) {
6884 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
6885 /*
6886 * As Rx frame are not going to be processed,
6887 * using same mapped address for the Rxd
6888 * buffer pointer
6889 */
6d517a27 6890 rxdp1->Buffer0_ptr = *temp0;
5d3213cc
AR
6891 } else {
6892 *skb = dev_alloc_skb(size);
6893 if (!(*skb)) {
0c61ed5f 6894 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
c53d4945
SH
6895 DBG_PRINT(INFO_DBG, "memory to allocate ");
6896 DBG_PRINT(INFO_DBG, "1 buf mode SKBs\n");
6897 sp->mac_control.stats_info->sw_stat. \
6898 mem_alloc_fail_cnt++;
5d3213cc
AR
6899 return -ENOMEM ;
6900 }
8a4bdbaa 6901 sp->mac_control.stats_info->sw_stat.mem_allocated
491976b2 6902 += (*skb)->truesize;
5d3213cc
AR
6903 /* storing the mapped addr in a temp variable
6904 * such it will be used for next rxd whose
6905 * Host Control is NULL
6906 */
6d517a27 6907 rxdp1->Buffer0_ptr = *temp0 =
5d3213cc
AR
6908 pci_map_single( sp->pdev, (*skb)->data,
6909 size - NET_IP_ALIGN,
6910 PCI_DMA_FROMDEVICE);
491abf25
VP
6911 if( (rxdp1->Buffer0_ptr == 0) ||
6912 (rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
6913 goto memalloc_failed;
6914 }
5d3213cc
AR
6915 rxdp->Host_Control = (unsigned long) (*skb);
6916 }
6917 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6d517a27 6918 struct RxD3 *rxdp3 = (struct RxD3 *)rxdp;
5d3213cc
AR
6919 /* Two buffer Mode */
6920 if (*skb) {
6d517a27
VP
6921 rxdp3->Buffer2_ptr = *temp2;
6922 rxdp3->Buffer0_ptr = *temp0;
6923 rxdp3->Buffer1_ptr = *temp1;
5d3213cc
AR
6924 } else {
6925 *skb = dev_alloc_skb(size);
2ceaac75 6926 if (!(*skb)) {
c53d4945
SH
6927 DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
6928 DBG_PRINT(INFO_DBG, "memory to allocate ");
6929 DBG_PRINT(INFO_DBG, "2 buf mode SKBs\n");
6930 sp->mac_control.stats_info->sw_stat. \
6931 mem_alloc_fail_cnt++;
2ceaac75
DR
6932 return -ENOMEM;
6933 }
8a4bdbaa 6934 sp->mac_control.stats_info->sw_stat.mem_allocated
491976b2 6935 += (*skb)->truesize;
6d517a27 6936 rxdp3->Buffer2_ptr = *temp2 =
5d3213cc
AR
6937 pci_map_single(sp->pdev, (*skb)->data,
6938 dev->mtu + 4,
6939 PCI_DMA_FROMDEVICE);
491abf25
VP
6940 if( (rxdp3->Buffer2_ptr == 0) ||
6941 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
6942 goto memalloc_failed;
6943 }
6d517a27 6944 rxdp3->Buffer0_ptr = *temp0 =
5d3213cc
AR
6945 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6946 PCI_DMA_FROMDEVICE);
491abf25
VP
6947 if( (rxdp3->Buffer0_ptr == 0) ||
6948 (rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
6949 pci_unmap_single (sp->pdev,
3e847423 6950 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
6951 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6952 goto memalloc_failed;
6953 }
5d3213cc
AR
6954 rxdp->Host_Control = (unsigned long) (*skb);
6955
6956 /* Buffer-1 will be dummy buffer not used */
6d517a27 6957 rxdp3->Buffer1_ptr = *temp1 =
5d3213cc 6958 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
5d3213cc 6959 PCI_DMA_FROMDEVICE);
491abf25
VP
6960 if( (rxdp3->Buffer1_ptr == 0) ||
6961 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
6962 pci_unmap_single (sp->pdev,
3e847423
AV
6963 (dma_addr_t)rxdp3->Buffer0_ptr,
6964 BUF0_LEN, PCI_DMA_FROMDEVICE);
6965 pci_unmap_single (sp->pdev,
6966 (dma_addr_t)rxdp3->Buffer2_ptr,
491abf25
VP
6967 dev->mtu + 4, PCI_DMA_FROMDEVICE);
6968 goto memalloc_failed;
6969 }
5d3213cc
AR
6970 }
6971 }
6972 return 0;
491abf25
VP
6973 memalloc_failed:
6974 stats->pci_map_fail_cnt++;
6975 stats->mem_freed += (*skb)->truesize;
6976 dev_kfree_skb(*skb);
6977 return -ENOMEM;
5d3213cc 6978}
491abf25 6979
1ee6dd77
RB
6980static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6981 int size)
5d3213cc
AR
6982{
6983 struct net_device *dev = sp->dev;
6984 if (sp->rxd_mode == RXD_MODE_1) {
6985 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6986 } else if (sp->rxd_mode == RXD_MODE_3B) {
6987 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6988 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6989 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
5d3213cc
AR
6990 }
6991}
6992
1ee6dd77 6993static int rxd_owner_bit_reset(struct s2io_nic *sp)
5d3213cc
AR
6994{
6995 int i, j, k, blk_cnt = 0, size;
1ee6dd77 6996 struct mac_info * mac_control = &sp->mac_control;
5d3213cc
AR
6997 struct config_param *config = &sp->config;
6998 struct net_device *dev = sp->dev;
1ee6dd77 6999 struct RxD_t *rxdp = NULL;
5d3213cc 7000 struct sk_buff *skb = NULL;
1ee6dd77 7001 struct buffAdd *ba = NULL;
5d3213cc
AR
7002 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
7003
7004 /* Calculate the size based on ring mode */
7005 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
7006 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
7007 if (sp->rxd_mode == RXD_MODE_1)
7008 size += NET_IP_ALIGN;
7009 else if (sp->rxd_mode == RXD_MODE_3B)
7010 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
5d3213cc
AR
7011
7012 for (i = 0; i < config->rx_ring_num; i++) {
7013 blk_cnt = config->rx_cfg[i].num_rxd /
7014 (rxd_count[sp->rxd_mode] +1);
7015
7016 for (j = 0; j < blk_cnt; j++) {
7017 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
7018 rxdp = mac_control->rings[i].
7019 rx_blocks[j].rxds[k].virt_addr;
6d517a27 7020 if(sp->rxd_mode == RXD_MODE_3B)
5d3213cc 7021 ba = &mac_control->rings[i].ba[j][k];
ac1f90d6 7022 if (set_rxd_buffer_pointer(sp, rxdp, ba,
5d3213cc
AR
7023 &skb,(u64 *)&temp0_64,
7024 (u64 *)&temp1_64,
ac1f90d6
SS
7025 (u64 *)&temp2_64,
7026 size) == ENOMEM) {
7027 return 0;
7028 }
5d3213cc
AR
7029
7030 set_rxd_buffer_size(sp, rxdp, size);
7031 wmb();
7032 /* flip the Ownership bit to Hardware */
7033 rxdp->Control_1 |= RXD_OWN_XENA;
7034 }
7035 }
7036 }
7037 return 0;
7038
7039}
7040
1ee6dd77 7041static int s2io_add_isr(struct s2io_nic * sp)
1da177e4 7042{
e6a8fee2 7043 int ret = 0;
c92ca04b 7044 struct net_device *dev = sp->dev;
e6a8fee2 7045 int err = 0;
1da177e4 7046
eaae7f72 7047 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7048 ret = s2io_enable_msi_x(sp);
7049 if (ret) {
7050 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
eaae7f72 7051 sp->config.intr_type = INTA;
20346722 7052 }
1da177e4 7053
1ee6dd77 7054 /* Store the values of the MSIX table in the struct s2io_nic structure */
e6a8fee2 7055 store_xmsi_data(sp);
c92ca04b 7056
e6a8fee2 7057 /* After proper initialization of H/W, register ISR */
eaae7f72 7058 if (sp->config.intr_type == MSI_X) {
fb6a825b 7059 int i, msix_tx_cnt=0,msix_rx_cnt=0;
c92ca04b 7060
e6a8fee2
AR
7061 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
7062 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
7063 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
7064 dev->name, i);
7065 err = request_irq(sp->entries[i].vector,
7066 s2io_msix_fifo_handle, 0, sp->desc[i],
7067 sp->s2io_entries[i].arg);
fb6a825b
SS
7068 /* If either data or addr is zero print it */
7069 if(!(sp->msix_info[i].addr &&
7070 sp->msix_info[i].data)) {
2450022a 7071 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
fb6a825b
SS
7072 "Data:0x%lx\n",sp->desc[i],
7073 (unsigned long long)
7074 sp->msix_info[i].addr,
7075 (unsigned long)
7076 ntohl(sp->msix_info[i].data));
7077 } else {
7078 msix_tx_cnt++;
7079 }
e6a8fee2
AR
7080 } else {
7081 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
7082 dev->name, i);
7083 err = request_irq(sp->entries[i].vector,
7084 s2io_msix_ring_handle, 0, sp->desc[i],
7085 sp->s2io_entries[i].arg);
fb6a825b
SS
7086 /* If either data or addr is zero print it */
7087 if(!(sp->msix_info[i].addr &&
7088 sp->msix_info[i].data)) {
2450022a 7089 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
fb6a825b
SS
7090 "Data:0x%lx\n",sp->desc[i],
7091 (unsigned long long)
7092 sp->msix_info[i].addr,
7093 (unsigned long)
7094 ntohl(sp->msix_info[i].data));
7095 } else {
7096 msix_rx_cnt++;
7097 }
c92ca04b 7098 }
e6a8fee2 7099 if (err) {
18b2b7bd 7100 remove_msix_isr(sp);
e6a8fee2
AR
7101 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
7102 "failed\n", dev->name, i);
18b2b7bd
SH
7103 DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
7104 dev->name);
7105 sp->config.intr_type = INTA;
7106 break;
e6a8fee2
AR
7107 }
7108 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
7109 }
18b2b7bd
SH
7110 if (!err) {
7111 printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
7112 msix_tx_cnt);
7113 printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
7114 msix_rx_cnt);
7115 }
e6a8fee2 7116 }
eaae7f72 7117 if (sp->config.intr_type == INTA) {
e6a8fee2
AR
7118 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
7119 sp->name, dev);
7120 if (err) {
7121 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
7122 dev->name);
7123 return -1;
7124 }
7125 }
7126 return 0;
7127}
1ee6dd77 7128static void s2io_rem_isr(struct s2io_nic * sp)
e6a8fee2 7129{
18b2b7bd
SH
7130 if (sp->config.intr_type == MSI_X)
7131 remove_msix_isr(sp);
7132 else
7133 remove_inta_isr(sp);
e6a8fee2
AR
7134}
7135
d796fdb7 7136static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
e6a8fee2
AR
7137{
7138 int cnt = 0;
1ee6dd77 7139 struct XENA_dev_config __iomem *bar0 = sp->bar0;
e6a8fee2
AR
7140 unsigned long flags;
7141 register u64 val64 = 0;
5f490c96
SH
7142 struct config_param *config;
7143 config = &sp->config;
e6a8fee2 7144
9f74ffde
SH
7145 if (!is_s2io_card_up(sp))
7146 return;
7147
e6a8fee2
AR
7148 del_timer_sync(&sp->alarm_timer);
7149 /* If s2io_set_link task is executing, wait till it completes. */
92b84437 7150 while (test_and_set_bit(__S2IO_STATE_LINK_TASK, &(sp->state))) {
e6a8fee2
AR
7151 msleep(50);
7152 }
92b84437 7153 clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
e6a8fee2 7154
5f490c96
SH
7155 /* Disable napi */
7156 if (config->napi)
7157 napi_disable(&sp->napi);
7158
e6a8fee2 7159 /* disable Tx and Rx traffic on the NIC */
d796fdb7
LV
7160 if (do_io)
7161 stop_nic(sp);
e6a8fee2
AR
7162
7163 s2io_rem_isr(sp);
1da177e4
LT
7164
7165 /* Kill tasklet. */
7166 tasklet_kill(&sp->task);
7167
7168 /* Check if the device is Quiescent and then Reset the NIC */
d796fdb7 7169 while(do_io) {
5d3213cc
AR
7170 /* As per the HW requirement we need to replenish the
7171 * receive buffer to avoid the ring bump. Since there is
7172 * no intention of processing the Rx frame at this pointwe are
7173 * just settting the ownership bit of rxd in Each Rx
7174 * ring to HW and set the appropriate buffer size
7175 * based on the ring mode
7176 */
7177 rxd_owner_bit_reset(sp);
7178
1da177e4 7179 val64 = readq(&bar0->adapter_status);
19a60522
SS
7180 if (verify_xena_quiescence(sp)) {
7181 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
1da177e4
LT
7182 break;
7183 }
7184
7185 msleep(50);
7186 cnt++;
7187 if (cnt == 10) {
7188 DBG_PRINT(ERR_DBG,
7189 "s2io_close:Device not Quiescent ");
7190 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
7191 (unsigned long long) val64);
7192 break;
7193 }
d796fdb7
LV
7194 }
7195 if (do_io)
7196 s2io_reset(sp);
1da177e4 7197
7ba013ac 7198 /* Free all Tx buffers */
1da177e4 7199 free_tx_buffers(sp);
7ba013ac
K
7200
7201 /* Free all Rx buffers */
7202 spin_lock_irqsave(&sp->rx_lock, flags);
1da177e4 7203 free_rx_buffers(sp);
7ba013ac 7204 spin_unlock_irqrestore(&sp->rx_lock, flags);
1da177e4 7205
92b84437 7206 clear_bit(__S2IO_STATE_LINK_TASK, &(sp->state));
1da177e4
LT
7207}
7208
d796fdb7
LV
7209static void s2io_card_down(struct s2io_nic * sp)
7210{
7211 do_s2io_card_down(sp, 1);
7212}
7213
1ee6dd77 7214static int s2io_card_up(struct s2io_nic * sp)
1da177e4 7215{
cc6e7c44 7216 int i, ret = 0;
1ee6dd77 7217 struct mac_info *mac_control;
1da177e4
LT
7218 struct config_param *config;
7219 struct net_device *dev = (struct net_device *) sp->dev;
e6a8fee2 7220 u16 interruptible;
1da177e4
LT
7221
7222 /* Initialize the H/W I/O registers */
9f74ffde
SH
7223 ret = init_nic(sp);
7224 if (ret != 0) {
1da177e4
LT
7225 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
7226 dev->name);
9f74ffde
SH
7227 if (ret != -EIO)
7228 s2io_reset(sp);
7229 return ret;
1da177e4
LT
7230 }
7231
20346722
K
7232 /*
7233 * Initializing the Rx buffers. For now we are considering only 1
1da177e4
LT
7234 * Rx ring and initializing buffers into 30 Rx blocks
7235 */
7236 mac_control = &sp->mac_control;
7237 config = &sp->config;
7238
7239 for (i = 0; i < config->rx_ring_num; i++) {
7240 if ((ret = fill_rx_buffers(sp, i))) {
7241 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
7242 dev->name);
7243 s2io_reset(sp);
7244 free_rx_buffers(sp);
7245 return -ENOMEM;
7246 }
7247 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
7248 atomic_read(&sp->rx_bufs_left[i]));
7249 }
5f490c96
SH
7250
7251 /* Initialise napi */
7252 if (config->napi)
7253 napi_enable(&sp->napi);
7254
19a60522
SS
7255 /* Maintain the state prior to the open */
7256 if (sp->promisc_flg)
7257 sp->promisc_flg = 0;
7258 if (sp->m_cast_flg) {
7259 sp->m_cast_flg = 0;
7260 sp->all_multi_pos= 0;
7261 }
1da177e4
LT
7262
7263 /* Setting its receive mode */
7264 s2io_set_multicast(dev);
7265
7d3d0439 7266 if (sp->lro) {
b41477f3 7267 /* Initialize max aggregatable pkts per session based on MTU */
7d3d0439
RA
7268 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
7269 /* Check if we can use(if specified) user provided value */
7270 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
7271 sp->lro_max_aggr_per_sess = lro_max_pkts;
7272 }
7273
1da177e4
LT
7274 /* Enable Rx Traffic and interrupts on the NIC */
7275 if (start_nic(sp)) {
7276 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
1da177e4 7277 s2io_reset(sp);
e6a8fee2
AR
7278 free_rx_buffers(sp);
7279 return -ENODEV;
7280 }
7281
7282 /* Add interrupt service routine */
7283 if (s2io_add_isr(sp) != 0) {
eaae7f72 7284 if (sp->config.intr_type == MSI_X)
e6a8fee2
AR
7285 s2io_rem_isr(sp);
7286 s2io_reset(sp);
1da177e4
LT
7287 free_rx_buffers(sp);
7288 return -ENODEV;
7289 }
7290
25fff88e
K
7291 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
7292
e6a8fee2
AR
7293 /* Enable tasklet for the device */
7294 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
7295
7296 /* Enable select interrupts */
9caab458 7297 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
eaae7f72 7298 if (sp->config.intr_type != INTA)
e6a8fee2
AR
7299 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
7300 else {
7301 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
9caab458 7302 interruptible |= TX_PIC_INTR;
e6a8fee2
AR
7303 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
7304 }
7305
92b84437 7306 set_bit(__S2IO_STATE_CARD_UP, &sp->state);
1da177e4
LT
7307 return 0;
7308}
7309
20346722 7310/**
1da177e4
LT
7311 * s2io_restart_nic - Resets the NIC.
7312 * @data : long pointer to the device private structure
7313 * Description:
7314 * This function is scheduled to be run by the s2io_tx_watchdog
20346722 7315 * function after 0.5 secs to reset the NIC. The idea is to reduce
1da177e4
LT
7316 * the run time of the watch dog routine which is run holding a
7317 * spin lock.
7318 */
7319
c4028958 7320static void s2io_restart_nic(struct work_struct *work)
1da177e4 7321{
1ee6dd77 7322 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
c4028958 7323 struct net_device *dev = sp->dev;
1da177e4 7324
22747d6b
FR
7325 rtnl_lock();
7326
7327 if (!netif_running(dev))
7328 goto out_unlock;
7329
e6a8fee2 7330 s2io_card_down(sp);
1da177e4
LT
7331 if (s2io_card_up(sp)) {
7332 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7333 dev->name);
7334 }
3a3d5756 7335 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7336 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7337 dev->name);
22747d6b
FR
7338out_unlock:
7339 rtnl_unlock();
1da177e4
LT
7340}
7341
20346722
K
7342/**
7343 * s2io_tx_watchdog - Watchdog for transmit side.
1da177e4
LT
7344 * @dev : Pointer to net device structure
7345 * Description:
7346 * This function is triggered if the Tx Queue is stopped
7347 * for a pre-defined amount of time when the Interface is still up.
7348 * If the Interface is jammed in such a situation, the hardware is
7349 * reset (by s2io_close) and restarted again (by s2io_open) to
7350 * overcome any problem that might have been caused in the hardware.
7351 * Return value:
7352 * void
7353 */
7354
7355static void s2io_tx_watchdog(struct net_device *dev)
7356{
1ee6dd77 7357 struct s2io_nic *sp = dev->priv;
1da177e4
LT
7358
7359 if (netif_carrier_ok(dev)) {
c53d4945 7360 sp->mac_control.stats_info->sw_stat.watchdog_timer_cnt++;
1da177e4 7361 schedule_work(&sp->rst_timer_task);
bd1034f0 7362 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
1da177e4
LT
7363 }
7364}
7365
7366/**
7367 * rx_osm_handler - To perform some OS related operations on SKB.
7368 * @sp: private member of the device structure,pointer to s2io_nic structure.
7369 * @skb : the socket buffer pointer.
7370 * @len : length of the packet
7371 * @cksum : FCS checksum of the frame.
7372 * @ring_no : the ring from which this RxD was extracted.
20346722 7373 * Description:
b41477f3 7374 * This function is called by the Rx interrupt serivce routine to perform
1da177e4
LT
7375 * some OS related operations on the SKB before passing it to the upper
7376 * layers. It mainly checks if the checksum is OK, if so adds it to the
7377 * SKBs cksum variable, increments the Rx packet count and passes the SKB
7378 * to the upper layer. If the checksum is wrong, it increments the Rx
7379 * packet error count, frees the SKB and returns error.
7380 * Return value:
7381 * SUCCESS on success and -1 on failure.
7382 */
1ee6dd77 7383static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
1da177e4 7384{
1ee6dd77 7385 struct s2io_nic *sp = ring_data->nic;
1da177e4 7386 struct net_device *dev = (struct net_device *) sp->dev;
20346722
K
7387 struct sk_buff *skb = (struct sk_buff *)
7388 ((unsigned long) rxdp->Host_Control);
7389 int ring_no = ring_data->ring_no;
1da177e4 7390 u16 l3_csum, l4_csum;
863c11a9 7391 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
1ee6dd77 7392 struct lro *lro;
f9046eb3 7393 u8 err_mask;
da6971d8 7394
20346722 7395 skb->dev = dev;
c92ca04b 7396
863c11a9 7397 if (err) {
bd1034f0
AR
7398 /* Check for parity error */
7399 if (err & 0x1) {
7400 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
7401 }
f9046eb3
OH
7402 err_mask = err >> 48;
7403 switch(err_mask) {
491976b2
SH
7404 case 1:
7405 sp->mac_control.stats_info->sw_stat.
7406 rx_parity_err_cnt++;
7407 break;
7408
7409 case 2:
7410 sp->mac_control.stats_info->sw_stat.
7411 rx_abort_cnt++;
7412 break;
7413
7414 case 3:
7415 sp->mac_control.stats_info->sw_stat.
7416 rx_parity_abort_cnt++;
7417 break;
7418
7419 case 4:
7420 sp->mac_control.stats_info->sw_stat.
7421 rx_rda_fail_cnt++;
7422 break;
7423
7424 case 5:
7425 sp->mac_control.stats_info->sw_stat.
7426 rx_unkn_prot_cnt++;
7427 break;
7428
7429 case 6:
7430 sp->mac_control.stats_info->sw_stat.
7431 rx_fcs_err_cnt++;
7432 break;
bd1034f0 7433
491976b2
SH
7434 case 7:
7435 sp->mac_control.stats_info->sw_stat.
7436 rx_buf_size_err_cnt++;
7437 break;
7438
7439 case 8:
7440 sp->mac_control.stats_info->sw_stat.
7441 rx_rxd_corrupt_cnt++;
7442 break;
7443
7444 case 15:
7445 sp->mac_control.stats_info->sw_stat.
7446 rx_unkn_err_cnt++;
7447 break;
7448 }
863c11a9
AR
7449 /*
7450 * Drop the packet if bad transfer code. Exception being
7451 * 0x5, which could be due to unsupported IPv6 extension header.
7452 * In this case, we let stack handle the packet.
7453 * Note that in this case, since checksum will be incorrect,
7454 * stack will validate the same.
7455 */
f9046eb3
OH
7456 if (err_mask != 0x5) {
7457 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%x\n",
7458 dev->name, err_mask);
863c11a9 7459 sp->stats.rx_crc_errors++;
8a4bdbaa 7460 sp->mac_control.stats_info->sw_stat.mem_freed
491976b2 7461 += skb->truesize;
863c11a9
AR
7462 dev_kfree_skb(skb);
7463 atomic_dec(&sp->rx_bufs_left[ring_no]);
7464 rxdp->Host_Control = 0;
7465 return 0;
7466 }
20346722 7467 }
1da177e4 7468
20346722 7469 /* Updating statistics */
573608e4 7470 sp->stats.rx_packets++;
20346722 7471 rxdp->Host_Control = 0;
da6971d8
AR
7472 if (sp->rxd_mode == RXD_MODE_1) {
7473 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
20346722 7474
da6971d8
AR
7475 sp->stats.rx_bytes += len;
7476 skb_put(skb, len);
7477
6d517a27 7478 } else if (sp->rxd_mode == RXD_MODE_3B) {
da6971d8
AR
7479 int get_block = ring_data->rx_curr_get_info.block_index;
7480 int get_off = ring_data->rx_curr_get_info.offset;
7481 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
7482 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
7483 unsigned char *buff = skb_push(skb, buf0_len);
7484
1ee6dd77 7485 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
da6971d8
AR
7486 sp->stats.rx_bytes += buf0_len + buf2_len;
7487 memcpy(buff, ba->ba_0, buf0_len);
6d517a27 7488 skb_put(skb, buf2_len);
da6971d8 7489 }
20346722 7490
7d3d0439
RA
7491 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
7492 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
20346722
K
7493 (sp->rx_csum)) {
7494 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
1da177e4
LT
7495 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
7496 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
20346722 7497 /*
1da177e4
LT
7498 * NIC verifies if the Checksum of the received
7499 * frame is Ok or not and accordingly returns
7500 * a flag in the RxD.
7501 */
7502 skb->ip_summed = CHECKSUM_UNNECESSARY;
7d3d0439
RA
7503 if (sp->lro) {
7504 u32 tcp_len;
7505 u8 *tcp;
7506 int ret = 0;
7507
7508 ret = s2io_club_tcp_session(skb->data, &tcp,
43b7c451
SH
7509 &tcp_len, &lro,
7510 rxdp, sp);
7d3d0439
RA
7511 switch (ret) {
7512 case 3: /* Begin anew */
7513 lro->parent = skb;
7514 goto aggregate;
7515 case 1: /* Aggregate */
7516 {
7517 lro_append_pkt(sp, lro,
7518 skb, tcp_len);
7519 goto aggregate;
7520 }
7521 case 4: /* Flush session */
7522 {
7523 lro_append_pkt(sp, lro,
7524 skb, tcp_len);
7525 queue_rx_frame(lro->parent);
7526 clear_lro_session(lro);
7527 sp->mac_control.stats_info->
7528 sw_stat.flush_max_pkts++;
7529 goto aggregate;
7530 }
7531 case 2: /* Flush both */
7532 lro->parent->data_len =
7533 lro->frags_len;
7534 sp->mac_control.stats_info->
7535 sw_stat.sending_both++;
7536 queue_rx_frame(lro->parent);
7537 clear_lro_session(lro);
7538 goto send_up;
7539 case 0: /* sessions exceeded */
c92ca04b
AR
7540 case -1: /* non-TCP or not
7541 * L2 aggregatable
7542 */
7d3d0439
RA
7543 case 5: /*
7544 * First pkt in session not
7545 * L3/L4 aggregatable
7546 */
7547 break;
7548 default:
7549 DBG_PRINT(ERR_DBG,
7550 "%s: Samadhana!!\n",
7551 __FUNCTION__);
7552 BUG();
7553 }
7554 }
1da177e4 7555 } else {
20346722
K
7556 /*
7557 * Packet with erroneous checksum, let the
1da177e4
LT
7558 * upper layers deal with it.
7559 */
7560 skb->ip_summed = CHECKSUM_NONE;
7561 }
7562 } else {
7563 skb->ip_summed = CHECKSUM_NONE;
7564 }
491976b2 7565 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7d3d0439
RA
7566 if (!sp->lro) {
7567 skb->protocol = eth_type_trans(skb, dev);
926930b2
SS
7568 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7569 vlan_strip_flag)) {
7d3d0439 7570 /* Queueing the vlan frame to the upper layer */
db874e65
SS
7571 if (napi)
7572 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7573 RXD_GET_VLAN_TAG(rxdp->Control_2));
7574 else
7575 vlan_hwaccel_rx(skb, sp->vlgrp,
7576 RXD_GET_VLAN_TAG(rxdp->Control_2));
7d3d0439 7577 } else {
db874e65
SS
7578 if (napi)
7579 netif_receive_skb(skb);
7580 else
7581 netif_rx(skb);
7d3d0439 7582 }
7d3d0439
RA
7583 } else {
7584send_up:
7585 queue_rx_frame(skb);
6aa20a22 7586 }
1da177e4 7587 dev->last_rx = jiffies;
7d3d0439 7588aggregate:
1da177e4 7589 atomic_dec(&sp->rx_bufs_left[ring_no]);
1da177e4
LT
7590 return SUCCESS;
7591}
7592
7593/**
7594 * s2io_link - stops/starts the Tx queue.
7595 * @sp : private member of the device structure, which is a pointer to the
7596 * s2io_nic structure.
7597 * @link : inidicates whether link is UP/DOWN.
7598 * Description:
7599 * This function stops/starts the Tx queue depending on whether the link
20346722
K
7600 * status of the NIC is is down or up. This is called by the Alarm
7601 * interrupt handler whenever a link change interrupt comes up.
1da177e4
LT
7602 * Return value:
7603 * void.
7604 */
7605
1ee6dd77 7606static void s2io_link(struct s2io_nic * sp, int link)
1da177e4
LT
7607{
7608 struct net_device *dev = (struct net_device *) sp->dev;
7609
7610 if (link != sp->last_link_state) {
b7c5678f 7611 init_tti(sp, link);
1da177e4
LT
7612 if (link == LINK_DOWN) {
7613 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
3a3d5756 7614 s2io_stop_all_tx_queue(sp);
1da177e4 7615 netif_carrier_off(dev);
491976b2 7616 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
8a4bdbaa 7617 sp->mac_control.stats_info->sw_stat.link_up_time =
491976b2
SH
7618 jiffies - sp->start_time;
7619 sp->mac_control.stats_info->sw_stat.link_down_cnt++;
1da177e4
LT
7620 } else {
7621 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
491976b2 7622 if (sp->mac_control.stats_info->sw_stat.link_down_cnt)
8a4bdbaa 7623 sp->mac_control.stats_info->sw_stat.link_down_time =
491976b2
SH
7624 jiffies - sp->start_time;
7625 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
1da177e4 7626 netif_carrier_on(dev);
3a3d5756 7627 s2io_wake_all_tx_queue(sp);
1da177e4
LT
7628 }
7629 }
7630 sp->last_link_state = link;
491976b2 7631 sp->start_time = jiffies;
1da177e4
LT
7632}
7633
20346722
K
7634/**
7635 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
7636 * @sp : private member of the device structure, which is a pointer to the
1da177e4
LT
7637 * s2io_nic structure.
7638 * Description:
7639 * This function initializes a few of the PCI and PCI-X configuration registers
7640 * with recommended values.
7641 * Return value:
7642 * void
7643 */
7644
1ee6dd77 7645static void s2io_init_pci(struct s2io_nic * sp)
1da177e4 7646{
20346722 7647 u16 pci_cmd = 0, pcix_cmd = 0;
1da177e4
LT
7648
7649 /* Enable Data Parity Error Recovery in PCI-X command register. */
7650 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7651 &(pcix_cmd));
1da177e4 7652 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7653 (pcix_cmd | 1));
1da177e4 7654 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
20346722 7655 &(pcix_cmd));
1da177e4
LT
7656
7657 /* Set the PErr Response bit in PCI command register. */
7658 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7659 pci_write_config_word(sp->pdev, PCI_COMMAND,
7660 (pci_cmd | PCI_COMMAND_PARITY));
7661 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
1da177e4
LT
7662}
7663
3a3d5756
SH
7664static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7665 u8 *dev_multiq)
9dc737a7 7666{
2fda096d 7667 if ((tx_fifo_num > MAX_TX_FIFOS) ||
6cfc482b 7668 (tx_fifo_num < 1)) {
2fda096d
SR
7669 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7670 "(%d) not supported\n", tx_fifo_num);
6cfc482b
SH
7671
7672 if (tx_fifo_num < 1)
7673 tx_fifo_num = 1;
7674 else
7675 tx_fifo_num = MAX_TX_FIFOS;
7676
2fda096d
SR
7677 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7678 DBG_PRINT(ERR_DBG, "tx fifos\n");
9dc737a7 7679 }
2fda096d 7680
3a3d5756
SH
7681#ifndef CONFIG_NETDEVICES_MULTIQUEUE
7682 if (multiq) {
7683 DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
7684 multiq = 0;
7685 }
7686#endif
6cfc482b 7687 if (multiq)
3a3d5756 7688 *dev_multiq = multiq;
6cfc482b
SH
7689
7690 if (tx_steering_type && (1 == tx_fifo_num)) {
7691 if (tx_steering_type != TX_DEFAULT_STEERING)
7692 DBG_PRINT(ERR_DBG,
7693 "s2io: Tx steering is not supported with "
7694 "one fifo. Disabling Tx steering.\n");
7695 tx_steering_type = NO_STEERING;
7696 }
7697
7698 if ((tx_steering_type < NO_STEERING) ||
7699 (tx_steering_type > TX_DEFAULT_STEERING)) {
7700 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7701 "supported\n");
7702 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7703 tx_steering_type = NO_STEERING;
3a3d5756
SH
7704 }
7705
9dc737a7
AR
7706 if ( rx_ring_num > 8) {
7707 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7708 "supported\n");
7709 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
7710 rx_ring_num = 8;
7711 }
db874e65
SS
7712 if (*dev_intr_type != INTA)
7713 napi = 0;
7714
eccb8628 7715 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
9dc737a7
AR
7716 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
7717 "Defaulting to INTA\n");
7718 *dev_intr_type = INTA;
7719 }
596c5c97 7720
9dc737a7
AR
7721 if ((*dev_intr_type == MSI_X) &&
7722 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
7723 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6aa20a22 7724 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
9dc737a7
AR
7725 "Defaulting to INTA\n");
7726 *dev_intr_type = INTA;
7727 }
fb6a825b 7728
6d517a27 7729 if ((rx_ring_mode != 1) && (rx_ring_mode != 2)) {
9dc737a7 7730 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6d517a27
VP
7731 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 1-buffer mode\n");
7732 rx_ring_mode = 1;
9dc737a7
AR
7733 }
7734 return SUCCESS;
7735}
7736
9fc93a41
SS
7737/**
7738 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
7739 * or Traffic class respectively.
b7c5678f 7740 * @nic: device private variable
9fc93a41
SS
7741 * Description: The function configures the receive steering to
7742 * desired receive ring.
7743 * Return Value: SUCCESS on success and
7744 * '-1' on failure (endian settings incorrect).
7745 */
7746static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
7747{
7748 struct XENA_dev_config __iomem *bar0 = nic->bar0;
7749 register u64 val64 = 0;
7750
7751 if (ds_codepoint > 63)
7752 return FAILURE;
7753
7754 val64 = RTS_DS_MEM_DATA(ring);
7755 writeq(val64, &bar0->rts_ds_mem_data);
7756
7757 val64 = RTS_DS_MEM_CTRL_WE |
7758 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
7759 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
7760
7761 writeq(val64, &bar0->rts_ds_mem_ctrl);
7762
7763 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
7764 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
7765 S2IO_BIT_RESET);
7766}
7767
1da177e4 7768/**
20346722 7769 * s2io_init_nic - Initialization of the adapter .
1da177e4
LT
7770 * @pdev : structure containing the PCI related information of the device.
7771 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
7772 * Description:
7773 * The function initializes an adapter identified by the pci_dec structure.
20346722
K
7774 * All OS related initialization including memory and device structure and
7775 * initlaization of the device private variable is done. Also the swapper
7776 * control register is initialized to enable read and write into the I/O
1da177e4
LT
7777 * registers of the device.
7778 * Return value:
7779 * returns 0 on success and negative on failure.
7780 */
7781
7782static int __devinit
7783s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7784{
1ee6dd77 7785 struct s2io_nic *sp;
1da177e4 7786 struct net_device *dev;
1da177e4
LT
7787 int i, j, ret;
7788 int dma_flag = FALSE;
7789 u32 mac_up, mac_down;
7790 u64 val64 = 0, tmp64 = 0;
1ee6dd77 7791 struct XENA_dev_config __iomem *bar0 = NULL;
1da177e4 7792 u16 subid;
1ee6dd77 7793 struct mac_info *mac_control;
1da177e4 7794 struct config_param *config;
541ae68f 7795 int mode;
cc6e7c44 7796 u8 dev_intr_type = intr_type;
3a3d5756 7797 u8 dev_multiq = 0;
0795af57 7798 DECLARE_MAC_BUF(mac);
1da177e4 7799
3a3d5756
SH
7800 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7801 if (ret)
9dc737a7 7802 return ret;
1da177e4
LT
7803
7804 if ((ret = pci_enable_device(pdev))) {
7805 DBG_PRINT(ERR_DBG,
7806 "s2io_init_nic: pci_enable_device failed\n");
7807 return ret;
7808 }
7809
1e7f0bd8 7810 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7811 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
7812 dma_flag = TRUE;
1da177e4 7813 if (pci_set_consistent_dma_mask
1e7f0bd8 7814 (pdev, DMA_64BIT_MASK)) {
1da177e4
LT
7815 DBG_PRINT(ERR_DBG,
7816 "Unable to obtain 64bit DMA for \
7817 consistent allocations\n");
7818 pci_disable_device(pdev);
7819 return -ENOMEM;
7820 }
1e7f0bd8 7821 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
1da177e4
LT
7822 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
7823 } else {
7824 pci_disable_device(pdev);
7825 return -ENOMEM;
7826 }
eccb8628
VP
7827 if ((ret = pci_request_regions(pdev, s2io_driver_name))) {
7828 DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret);
7829 pci_disable_device(pdev);
7830 return -ENODEV;
1da177e4 7831 }
3a3d5756
SH
7832#ifdef CONFIG_NETDEVICES_MULTIQUEUE
7833 if (dev_multiq)
6cfc482b 7834 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
3a3d5756
SH
7835 else
7836#endif
1ee6dd77 7837 dev = alloc_etherdev(sizeof(struct s2io_nic));
1da177e4
LT
7838 if (dev == NULL) {
7839 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
7840 pci_disable_device(pdev);
7841 pci_release_regions(pdev);
7842 return -ENODEV;
7843 }
7844
7845 pci_set_master(pdev);
7846 pci_set_drvdata(pdev, dev);
1da177e4
LT
7847 SET_NETDEV_DEV(dev, &pdev->dev);
7848
7849 /* Private member variable initialized to s2io NIC structure */
7850 sp = dev->priv;
1ee6dd77 7851 memset(sp, 0, sizeof(struct s2io_nic));
1da177e4
LT
7852 sp->dev = dev;
7853 sp->pdev = pdev;
1da177e4 7854 sp->high_dma_flag = dma_flag;
1da177e4 7855 sp->device_enabled_once = FALSE;
da6971d8
AR
7856 if (rx_ring_mode == 1)
7857 sp->rxd_mode = RXD_MODE_1;
7858 if (rx_ring_mode == 2)
7859 sp->rxd_mode = RXD_MODE_3B;
da6971d8 7860
eaae7f72 7861 sp->config.intr_type = dev_intr_type;
1da177e4 7862
541ae68f
K
7863 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
7864 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
7865 sp->device_type = XFRAME_II_DEVICE;
7866 else
7867 sp->device_type = XFRAME_I_DEVICE;
7868
43b7c451 7869 sp->lro = lro_enable;
6aa20a22 7870
1da177e4
LT
7871 /* Initialize some PCI/PCI-X fields of the NIC. */
7872 s2io_init_pci(sp);
7873
20346722 7874 /*
1da177e4 7875 * Setting the device configuration parameters.
20346722
K
7876 * Most of these parameters can be specified by the user during
7877 * module insertion as they are module loadable parameters. If
7878 * these parameters are not not specified during load time, they
1da177e4
LT
7879 * are initialized with default values.
7880 */
7881 mac_control = &sp->mac_control;
7882 config = &sp->config;
7883
596c5c97 7884 config->napi = napi;
6cfc482b 7885 config->tx_steering_type = tx_steering_type;
596c5c97 7886
1da177e4 7887 /* Tx side parameters. */
6cfc482b
SH
7888 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7889 config->tx_fifo_num = MAX_TX_FIFOS;
7890 else
7891 config->tx_fifo_num = tx_fifo_num;
7892
7893 /* Initialize the fifos used for tx steering */
7894 if (config->tx_fifo_num < 5) {
7895 if (config->tx_fifo_num == 1)
7896 sp->total_tcp_fifos = 1;
7897 else
7898 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7899 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7900 sp->total_udp_fifos = 1;
7901 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7902 } else {
7903 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7904 FIFO_OTHER_MAX_NUM);
7905 sp->udp_fifo_idx = sp->total_tcp_fifos;
7906 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7907 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7908 }
7909
3a3d5756 7910 config->multiq = dev_multiq;
6cfc482b 7911 for (i = 0; i < config->tx_fifo_num; i++) {
1da177e4
LT
7912 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7913 config->tx_cfg[i].fifo_priority = i;
7914 }
7915
20346722
K
7916 /* mapping the QoS priority to the configured fifos */
7917 for (i = 0; i < MAX_TX_FIFOS; i++)
3a3d5756 7918 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
20346722 7919
6cfc482b
SH
7920 /* map the hashing selector table to the configured fifos */
7921 for (i = 0; i < config->tx_fifo_num; i++)
7922 sp->fifo_selector[i] = fifo_selector[i];
7923
7924
1da177e4
LT
7925 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7926 for (i = 0; i < config->tx_fifo_num; i++) {
7927 config->tx_cfg[i].f_no_snoop =
7928 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
7929 if (config->tx_cfg[i].fifo_len < 65) {
7930 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
7931 break;
7932 }
7933 }
fed5eccd
AR
7934 /* + 2 because one Txd for skb->data and one Txd for UFO */
7935 config->max_txds = MAX_SKB_FRAGS + 2;
1da177e4
LT
7936
7937 /* Rx side parameters. */
1da177e4
LT
7938 config->rx_ring_num = rx_ring_num;
7939 for (i = 0; i < MAX_RX_RINGS; i++) {
7940 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
da6971d8 7941 (rxd_count[sp->rxd_mode] + 1);
1da177e4
LT
7942 config->rx_cfg[i].ring_priority = i;
7943 }
7944
7945 for (i = 0; i < rx_ring_num; i++) {
7946 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
7947 config->rx_cfg[i].f_no_snoop =
7948 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7949 }
7950
7951 /* Setting Mac Control parameters */
7952 mac_control->rmac_pause_time = rmac_pause_time;
7953 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7954 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7955
7956
7957 /* Initialize Ring buffer parameters. */
7958 for (i = 0; i < config->rx_ring_num; i++)
7959 atomic_set(&sp->rx_bufs_left[i], 0);
7960
7961 /* initialize the shared memory used by the NIC and the host */
7962 if (init_shared_mem(sp)) {
7963 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
b41477f3 7964 dev->name);
1da177e4
LT
7965 ret = -ENOMEM;
7966 goto mem_alloc_failed;
7967 }
7968
7969 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7970 pci_resource_len(pdev, 0));
7971 if (!sp->bar0) {
19a60522 7972 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
1da177e4
LT
7973 dev->name);
7974 ret = -ENOMEM;
7975 goto bar0_remap_failed;
7976 }
7977
7978 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7979 pci_resource_len(pdev, 2));
7980 if (!sp->bar1) {
19a60522 7981 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
1da177e4
LT
7982 dev->name);
7983 ret = -ENOMEM;
7984 goto bar1_remap_failed;
7985 }
7986
7987 dev->irq = pdev->irq;
7988 dev->base_addr = (unsigned long) sp->bar0;
7989
7990 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7991 for (j = 0; j < MAX_TX_FIFOS; j++) {
1ee6dd77 7992 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
1da177e4
LT
7993 (sp->bar1 + (j * 0x00020000));
7994 }
7995
7996 /* Driver entry points */
7997 dev->open = &s2io_open;
7998 dev->stop = &s2io_close;
7999 dev->hard_start_xmit = &s2io_xmit;
8000 dev->get_stats = &s2io_get_stats;
8001 dev->set_multicast_list = &s2io_set_multicast;
8002 dev->do_ioctl = &s2io_ioctl;
2fd37688 8003 dev->set_mac_address = &s2io_set_mac_addr;
1da177e4
LT
8004 dev->change_mtu = &s2io_change_mtu;
8005 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
be3a6b02
K
8006 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8007 dev->vlan_rx_register = s2io_vlan_rx_register;
20346722 8008
1da177e4
LT
8009 /*
8010 * will use eth_mac_addr() for dev->set_mac_address
8011 * mac address will be set every time dev->open() is called
8012 */
bea3348e 8013 netif_napi_add(dev, &sp->napi, s2io_poll, 32);
1da177e4 8014
612eff0e
BH
8015#ifdef CONFIG_NET_POLL_CONTROLLER
8016 dev->poll_controller = s2io_netpoll;
8017#endif
8018
1da177e4
LT
8019 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8020 if (sp->high_dma_flag == TRUE)
8021 dev->features |= NETIF_F_HIGHDMA;
1da177e4 8022 dev->features |= NETIF_F_TSO;
f83ef8c0 8023 dev->features |= NETIF_F_TSO6;
db874e65 8024 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
fed5eccd
AR
8025 dev->features |= NETIF_F_UFO;
8026 dev->features |= NETIF_F_HW_CSUM;
8027 }
3a3d5756
SH
8028#ifdef CONFIG_NETDEVICES_MULTIQUEUE
8029 if (config->multiq)
8030 dev->features |= NETIF_F_MULTI_QUEUE;
8031#endif
1da177e4
LT
8032 dev->tx_timeout = &s2io_tx_watchdog;
8033 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
c4028958
DH
8034 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
8035 INIT_WORK(&sp->set_link_task, s2io_set_link);
1da177e4 8036
e960fc5c 8037 pci_save_state(sp->pdev);
1da177e4
LT
8038
8039 /* Setting swapper control on the NIC, for proper reset operation */
8040 if (s2io_set_swapper(sp)) {
8041 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
8042 dev->name);
8043 ret = -EAGAIN;
8044 goto set_swap_failed;
8045 }
8046
541ae68f
K
8047 /* Verify if the Herc works on the slot its placed into */
8048 if (sp->device_type & XFRAME_II_DEVICE) {
8049 mode = s2io_verify_pci_mode(sp);
8050 if (mode < 0) {
8051 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
8052 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
8053 ret = -EBADSLT;
8054 goto set_swap_failed;
8055 }
8056 }
8057
8058 /* Not needed for Herc */
8059 if (sp->device_type & XFRAME_I_DEVICE) {
8060 /*
8061 * Fix for all "FFs" MAC address problems observed on
8062 * Alpha platforms
8063 */
8064 fix_mac_address(sp);
8065 s2io_reset(sp);
8066 }
1da177e4
LT
8067
8068 /*
1da177e4
LT
8069 * MAC address initialization.
8070 * For now only one mac address will be read and used.
8071 */
8072 bar0 = sp->bar0;
8073 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
faa4f796 8074 RMAC_ADDR_CMD_MEM_OFFSET(0 + S2IO_MAC_ADDR_START_OFFSET);
1da177e4 8075 writeq(val64, &bar0->rmac_addr_cmd_mem);
c92ca04b 8076 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
9fc93a41 8077 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
1da177e4
LT
8078 tmp64 = readq(&bar0->rmac_addr_data0_mem);
8079 mac_down = (u32) tmp64;
8080 mac_up = (u32) (tmp64 >> 32);
8081
1da177e4
LT
8082 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
8083 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
8084 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
8085 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
8086 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
8087 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
8088
1da177e4
LT
8089 /* Set the factory defined MAC address initially */
8090 dev->addr_len = ETH_ALEN;
8091 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
2fd37688 8092 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
1da177e4 8093
faa4f796
SH
8094 /* initialize number of multicast & unicast MAC entries variables */
8095 if (sp->device_type == XFRAME_I_DEVICE) {
8096 config->max_mc_addr = S2IO_XENA_MAX_MC_ADDRESSES;
8097 config->max_mac_addr = S2IO_XENA_MAX_MAC_ADDRESSES;
8098 config->mc_start_offset = S2IO_XENA_MC_ADDR_START_OFFSET;
8099 } else if (sp->device_type == XFRAME_II_DEVICE) {
8100 config->max_mc_addr = S2IO_HERC_MAX_MC_ADDRESSES;
8101 config->max_mac_addr = S2IO_HERC_MAX_MAC_ADDRESSES;
8102 config->mc_start_offset = S2IO_HERC_MC_ADDR_START_OFFSET;
8103 }
8104
8105 /* store mac addresses from CAM to s2io_nic structure */
8106 do_s2io_store_unicast_mc(sp);
8107
c77dd43e
SS
8108 /* Store the values of the MSIX table in the s2io_nic structure */
8109 store_xmsi_data(sp);
b41477f3
AR
8110 /* reset Nic and bring it to known state */
8111 s2io_reset(sp);
8112
1da177e4 8113 /*
20346722 8114 * Initialize the tasklet status and link state flags
541ae68f 8115 * and the card state parameter
1da177e4 8116 */
1da177e4 8117 sp->tasklet_status = 0;
92b84437 8118 sp->state = 0;
1da177e4 8119
1da177e4 8120 /* Initialize spinlocks */
2fda096d
SR
8121 for (i = 0; i < sp->config.tx_fifo_num; i++)
8122 spin_lock_init(&mac_control->fifos[i].tx_lock);
db874e65
SS
8123
8124 if (!napi)
8125 spin_lock_init(&sp->put_lock);
7ba013ac 8126 spin_lock_init(&sp->rx_lock);
1da177e4 8127
20346722
K
8128 /*
8129 * SXE-002: Configure link and activity LED to init state
8130 * on driver load.
1da177e4
LT
8131 */
8132 subid = sp->pdev->subsystem_device;
8133 if ((subid & 0xFF) >= 0x07) {
8134 val64 = readq(&bar0->gpio_control);
8135 val64 |= 0x0000800000000000ULL;
8136 writeq(val64, &bar0->gpio_control);
8137 val64 = 0x0411040400000000ULL;
8138 writeq(val64, (void __iomem *) bar0 + 0x2700);
8139 val64 = readq(&bar0->gpio_control);
8140 }
8141
8142 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
8143
8144 if (register_netdev(dev)) {
8145 DBG_PRINT(ERR_DBG, "Device registration failed\n");
8146 ret = -ENODEV;
8147 goto register_failed;
8148 }
9dc737a7 8149 s2io_vpd_read(sp);
0c61ed5f 8150 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2007 Neterion Inc.\n");
b41477f3 8151 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
44c10138 8152 sp->product_name, pdev->revision);
b41477f3
AR
8153 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
8154 s2io_driver_version);
0795af57
JP
8155 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: %s\n",
8156 dev->name, print_mac(mac, dev->dev_addr));
19a60522 8157 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
9dc737a7 8158 if (sp->device_type & XFRAME_II_DEVICE) {
0b1f7ebe 8159 mode = s2io_print_pci_mode(sp);
541ae68f 8160 if (mode < 0) {
9dc737a7 8161 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
541ae68f 8162 ret = -EBADSLT;
9dc737a7 8163 unregister_netdev(dev);
541ae68f
K
8164 goto set_swap_failed;
8165 }
541ae68f 8166 }
9dc737a7
AR
8167 switch(sp->rxd_mode) {
8168 case RXD_MODE_1:
8169 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
8170 dev->name);
8171 break;
8172 case RXD_MODE_3B:
8173 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
8174 dev->name);
8175 break;
9dc737a7 8176 }
db874e65
SS
8177
8178 if (napi)
8179 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
3a3d5756
SH
8180
8181 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8182 sp->config.tx_fifo_num);
8183
eaae7f72 8184 switch(sp->config.intr_type) {
9dc737a7
AR
8185 case INTA:
8186 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
8187 break;
9dc737a7
AR
8188 case MSI_X:
8189 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
8190 break;
8191 }
3a3d5756
SH
8192 if (sp->config.multiq) {
8193 for (i = 0; i < sp->config.tx_fifo_num; i++)
8194 mac_control->fifos[i].multiq = config->multiq;
8195 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8196 dev->name);
8197 } else
8198 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8199 dev->name);
8200
6cfc482b
SH
8201 switch (sp->config.tx_steering_type) {
8202 case NO_STEERING:
8203 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8204 " transmit\n", dev->name);
8205 break;
8206 case TX_PRIORITY_STEERING:
8207 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8208 " transmit\n", dev->name);
8209 break;
8210 case TX_DEFAULT_STEERING:
8211 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8212 " transmit\n", dev->name);
8213 }
8214
7d3d0439
RA
8215 if (sp->lro)
8216 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
9dc737a7 8217 dev->name);
db874e65
SS
8218 if (ufo)
8219 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
8220 " enabled\n", dev->name);
7ba013ac 8221 /* Initialize device name */
9dc737a7 8222 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7ba013ac 8223
20346722
K
8224 /*
8225 * Make Link state as off at this point, when the Link change
8226 * interrupt comes the state will be automatically changed to
1da177e4
LT
8227 * the right state.
8228 */
8229 netif_carrier_off(dev);
1da177e4
LT
8230
8231 return 0;
8232
8233 register_failed:
8234 set_swap_failed:
8235 iounmap(sp->bar1);
8236 bar1_remap_failed:
8237 iounmap(sp->bar0);
8238 bar0_remap_failed:
8239 mem_alloc_failed:
8240 free_shared_mem(sp);
8241 pci_disable_device(pdev);
eccb8628 8242 pci_release_regions(pdev);
1da177e4
LT
8243 pci_set_drvdata(pdev, NULL);
8244 free_netdev(dev);
8245
8246 return ret;
8247}
8248
8249/**
20346722 8250 * s2io_rem_nic - Free the PCI device
1da177e4 8251 * @pdev: structure containing the PCI related information of the device.
20346722 8252 * Description: This function is called by the Pci subsystem to release a
1da177e4 8253 * PCI device and free up all resource held up by the device. This could
20346722 8254 * be in response to a Hot plug event or when the driver is to be removed
1da177e4
LT
8255 * from memory.
8256 */
8257
8258static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8259{
8260 struct net_device *dev =
8261 (struct net_device *) pci_get_drvdata(pdev);
1ee6dd77 8262 struct s2io_nic *sp;
1da177e4
LT
8263
8264 if (dev == NULL) {
8265 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
8266 return;
8267 }
8268
22747d6b
FR
8269 flush_scheduled_work();
8270
1da177e4
LT
8271 sp = dev->priv;
8272 unregister_netdev(dev);
8273
8274 free_shared_mem(sp);
8275 iounmap(sp->bar0);
8276 iounmap(sp->bar1);
eccb8628 8277 pci_release_regions(pdev);
1da177e4 8278 pci_set_drvdata(pdev, NULL);
1da177e4 8279 free_netdev(dev);
19a60522 8280 pci_disable_device(pdev);
1da177e4
LT
8281}
8282
8283/**
8284 * s2io_starter - Entry point for the driver
8285 * Description: This function is the entry point for the driver. It verifies
8286 * the module loadable parameters and initializes PCI configuration space.
8287 */
8288
43b7c451 8289static int __init s2io_starter(void)
1da177e4 8290{
29917620 8291 return pci_register_driver(&s2io_driver);
1da177e4
LT
8292}
8293
8294/**
20346722 8295 * s2io_closer - Cleanup routine for the driver
1da177e4
LT
8296 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
8297 */
8298
372cc597 8299static __exit void s2io_closer(void)
1da177e4
LT
8300{
8301 pci_unregister_driver(&s2io_driver);
8302 DBG_PRINT(INIT_DBG, "cleanup done\n");
8303}
8304
8305module_init(s2io_starter);
8306module_exit(s2io_closer);
7d3d0439 8307
6aa20a22 8308static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
1ee6dd77 8309 struct tcphdr **tcp, struct RxD_t *rxdp)
7d3d0439
RA
8310{
8311 int ip_off;
8312 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
8313
8314 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
8315 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
8316 __FUNCTION__);
8317 return -1;
8318 }
8319
8320 /* TODO:
8321 * By default the VLAN field in the MAC is stripped by the card, if this
8322 * feature is turned off in rx_pa_cfg register, then the ip_off field
8323 * has to be shifted by a further 2 bytes
8324 */
8325 switch (l2_type) {
8326 case 0: /* DIX type */
8327 case 4: /* DIX type with VLAN */
8328 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8329 break;
8330 /* LLC, SNAP etc are considered non-mergeable */
8331 default:
8332 return -1;
8333 }
8334
8335 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
8336 ip_len = (u8)((*ip)->ihl);
8337 ip_len <<= 2;
8338 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
8339
8340 return 0;
8341}
8342
1ee6dd77 8343static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8344 struct tcphdr *tcp)
8345{
8346 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8347 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
8348 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
8349 return -1;
8350 return 0;
8351}
8352
8353static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8354{
8355 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
8356}
8357
1ee6dd77 8358static void initiate_new_session(struct lro *lro, u8 *l2h,
7d3d0439
RA
8359 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
8360{
8361 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8362 lro->l2h = l2h;
8363 lro->iph = ip;
8364 lro->tcph = tcp;
8365 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
c8855953 8366 lro->tcp_ack = tcp->ack_seq;
7d3d0439
RA
8367 lro->sg_num = 1;
8368 lro->total_len = ntohs(ip->tot_len);
8369 lro->frags_len = 0;
6aa20a22 8370 /*
7d3d0439
RA
8371 * check if we saw TCP timestamp. Other consistency checks have
8372 * already been done.
8373 */
8374 if (tcp->doff == 8) {
c8855953
SR
8375 __be32 *ptr;
8376 ptr = (__be32 *)(tcp+1);
7d3d0439 8377 lro->saw_ts = 1;
c8855953 8378 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8379 lro->cur_tsecr = *(ptr+2);
8380 }
8381 lro->in_use = 1;
8382}
8383
1ee6dd77 8384static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7d3d0439
RA
8385{
8386 struct iphdr *ip = lro->iph;
8387 struct tcphdr *tcp = lro->tcph;
bd4f3ae1 8388 __sum16 nchk;
1ee6dd77 8389 struct stat_block *statinfo = sp->mac_control.stats_info;
7d3d0439
RA
8390 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8391
8392 /* Update L3 header */
8393 ip->tot_len = htons(lro->total_len);
8394 ip->check = 0;
8395 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8396 ip->check = nchk;
8397
8398 /* Update L4 header */
8399 tcp->ack_seq = lro->tcp_ack;
8400 tcp->window = lro->window;
8401
8402 /* Update tsecr field if this session has timestamps enabled */
8403 if (lro->saw_ts) {
c8855953 8404 __be32 *ptr = (__be32 *)(tcp + 1);
7d3d0439
RA
8405 *(ptr+2) = lro->cur_tsecr;
8406 }
8407
8408 /* Update counters required for calculation of
8409 * average no. of packets aggregated.
8410 */
8411 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
8412 statinfo->sw_stat.num_aggregations++;
8413}
8414
1ee6dd77 8415static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7d3d0439
RA
8416 struct tcphdr *tcp, u32 l4_pyld)
8417{
8418 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8419 lro->total_len += l4_pyld;
8420 lro->frags_len += l4_pyld;
8421 lro->tcp_next_seq += l4_pyld;
8422 lro->sg_num++;
8423
8424 /* Update ack seq no. and window ad(from this pkt) in LRO object */
8425 lro->tcp_ack = tcp->ack_seq;
8426 lro->window = tcp->window;
6aa20a22 8427
7d3d0439 8428 if (lro->saw_ts) {
c8855953 8429 __be32 *ptr;
7d3d0439 8430 /* Update tsecr and tsval from this packet */
c8855953
SR
8431 ptr = (__be32 *)(tcp+1);
8432 lro->cur_tsval = ntohl(*(ptr+1));
7d3d0439
RA
8433 lro->cur_tsecr = *(ptr + 2);
8434 }
8435}
8436
1ee6dd77 8437static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7d3d0439
RA
8438 struct tcphdr *tcp, u32 tcp_pyld_len)
8439{
7d3d0439
RA
8440 u8 *ptr;
8441
79dc1901
AM
8442 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8443
7d3d0439
RA
8444 if (!tcp_pyld_len) {
8445 /* Runt frame or a pure ack */
8446 return -1;
8447 }
8448
8449 if (ip->ihl != 5) /* IP has options */
8450 return -1;
8451
75c30b13
AR
8452 /* If we see CE codepoint in IP header, packet is not mergeable */
8453 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
8454 return -1;
8455
8456 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7d3d0439 8457 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
75c30b13 8458 tcp->ece || tcp->cwr || !tcp->ack) {
7d3d0439
RA
8459 /*
8460 * Currently recognize only the ack control word and
8461 * any other control field being set would result in
8462 * flushing the LRO session
8463 */
8464 return -1;
8465 }
8466
6aa20a22 8467 /*
7d3d0439
RA
8468 * Allow only one TCP timestamp option. Don't aggregate if
8469 * any other options are detected.
8470 */
8471 if (tcp->doff != 5 && tcp->doff != 8)
8472 return -1;
8473
8474 if (tcp->doff == 8) {
6aa20a22 8475 ptr = (u8 *)(tcp + 1);
7d3d0439
RA
8476 while (*ptr == TCPOPT_NOP)
8477 ptr++;
8478 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
8479 return -1;
8480
8481 /* Ensure timestamp value increases monotonically */
8482 if (l_lro)
c8855953 8483 if (l_lro->cur_tsval > ntohl(*((__be32 *)(ptr+2))))
7d3d0439
RA
8484 return -1;
8485
8486 /* timestamp echo reply should be non-zero */
c8855953 8487 if (*((__be32 *)(ptr+6)) == 0)
7d3d0439
RA
8488 return -1;
8489 }
8490
8491 return 0;
8492}
8493
8494static int
1ee6dd77
RB
8495s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8496 struct RxD_t *rxdp, struct s2io_nic *sp)
7d3d0439
RA
8497{
8498 struct iphdr *ip;
8499 struct tcphdr *tcph;
8500 int ret = 0, i;
8501
8502 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8503 rxdp))) {
8504 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8505 ip->saddr, ip->daddr);
8506 } else {
8507 return ret;
8508 }
8509
8510 tcph = (struct tcphdr *)*tcp;
8511 *tcp_len = get_l4_pyld_length(ip, tcph);
8512 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 8513 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
8514 if (l_lro->in_use) {
8515 if (check_for_socket_match(l_lro, ip, tcph))
8516 continue;
8517 /* Sock pair matched */
8518 *lro = l_lro;
8519
8520 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
8521 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
8522 "0x%x, actual 0x%x\n", __FUNCTION__,
8523 (*lro)->tcp_next_seq,
8524 ntohl(tcph->seq));
8525
8526 sp->mac_control.stats_info->
8527 sw_stat.outof_sequence_pkts++;
8528 ret = 2;
8529 break;
8530 }
8531
8532 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
8533 ret = 1; /* Aggregate */
8534 else
8535 ret = 2; /* Flush both */
8536 break;
8537 }
8538 }
8539
8540 if (ret == 0) {
8541 /* Before searching for available LRO objects,
8542 * check if the pkt is L3/L4 aggregatable. If not
8543 * don't create new LRO session. Just send this
8544 * packet up.
8545 */
8546 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
8547 return 5;
8548 }
8549
8550 for (i=0; i<MAX_LRO_SESSIONS; i++) {
1ee6dd77 8551 struct lro *l_lro = &sp->lro0_n[i];
7d3d0439
RA
8552 if (!(l_lro->in_use)) {
8553 *lro = l_lro;
8554 ret = 3; /* Begin anew */
8555 break;
8556 }
8557 }
8558 }
8559
8560 if (ret == 0) { /* sessions exceeded */
8561 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
8562 __FUNCTION__);
8563 *lro = NULL;
8564 return ret;
8565 }
8566
8567 switch (ret) {
8568 case 3:
8569 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
8570 break;
8571 case 2:
8572 update_L3L4_header(sp, *lro);
8573 break;
8574 case 1:
8575 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
8576 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
8577 update_L3L4_header(sp, *lro);
8578 ret = 4; /* Flush the LRO */
8579 }
8580 break;
8581 default:
8582 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
8583 __FUNCTION__);
8584 break;
8585 }
8586
8587 return ret;
8588}
8589
1ee6dd77 8590static void clear_lro_session(struct lro *lro)
7d3d0439 8591{
1ee6dd77 8592 static u16 lro_struct_size = sizeof(struct lro);
7d3d0439
RA
8593
8594 memset(lro, 0, lro_struct_size);
8595}
8596
8597static void queue_rx_frame(struct sk_buff *skb)
8598{
8599 struct net_device *dev = skb->dev;
8600
8601 skb->protocol = eth_type_trans(skb, dev);
db874e65
SS
8602 if (napi)
8603 netif_receive_skb(skb);
8604 else
8605 netif_rx(skb);
7d3d0439
RA
8606}
8607
1ee6dd77
RB
8608static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8609 struct sk_buff *skb,
7d3d0439
RA
8610 u32 tcp_len)
8611{
75c30b13 8612 struct sk_buff *first = lro->parent;
7d3d0439
RA
8613
8614 first->len += tcp_len;
8615 first->data_len = lro->frags_len;
8616 skb_pull(skb, (skb->len - tcp_len));
75c30b13
AR
8617 if (skb_shinfo(first)->frag_list)
8618 lro->last_frag->next = skb;
7d3d0439
RA
8619 else
8620 skb_shinfo(first)->frag_list = skb;
372cc597 8621 first->truesize += skb->truesize;
75c30b13 8622 lro->last_frag = skb;
7d3d0439
RA
8623 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8624 return;
8625}
d796fdb7
LV
8626
8627/**
8628 * s2io_io_error_detected - called when PCI error is detected
8629 * @pdev: Pointer to PCI device
8453d43f 8630 * @state: The current pci connection state
d796fdb7
LV
8631 *
8632 * This function is called after a PCI bus error affecting
8633 * this device has been detected.
8634 */
8635static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8636 pci_channel_state_t state)
8637{
8638 struct net_device *netdev = pci_get_drvdata(pdev);
8639 struct s2io_nic *sp = netdev->priv;
8640
8641 netif_device_detach(netdev);
8642
8643 if (netif_running(netdev)) {
8644 /* Bring down the card, while avoiding PCI I/O */
8645 do_s2io_card_down(sp, 0);
d796fdb7
LV
8646 }
8647 pci_disable_device(pdev);
8648
8649 return PCI_ERS_RESULT_NEED_RESET;
8650}
8651
8652/**
8653 * s2io_io_slot_reset - called after the pci bus has been reset.
8654 * @pdev: Pointer to PCI device
8655 *
8656 * Restart the card from scratch, as if from a cold-boot.
8657 * At this point, the card has exprienced a hard reset,
8658 * followed by fixups by BIOS, and has its config space
8659 * set up identically to what it was at cold boot.
8660 */
8661static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8662{
8663 struct net_device *netdev = pci_get_drvdata(pdev);
8664 struct s2io_nic *sp = netdev->priv;
8665
8666 if (pci_enable_device(pdev)) {
8667 printk(KERN_ERR "s2io: "
8668 "Cannot re-enable PCI device after reset.\n");
8669 return PCI_ERS_RESULT_DISCONNECT;
8670 }
8671
8672 pci_set_master(pdev);
8673 s2io_reset(sp);
8674
8675 return PCI_ERS_RESULT_RECOVERED;
8676}
8677
8678/**
8679 * s2io_io_resume - called when traffic can start flowing again.
8680 * @pdev: Pointer to PCI device
8681 *
8682 * This callback is called when the error recovery driver tells
8683 * us that its OK to resume normal operation.
8684 */
8685static void s2io_io_resume(struct pci_dev *pdev)
8686{
8687 struct net_device *netdev = pci_get_drvdata(pdev);
8688 struct s2io_nic *sp = netdev->priv;
8689
8690 if (netif_running(netdev)) {
8691 if (s2io_card_up(sp)) {
8692 printk(KERN_ERR "s2io: "
8693 "Can't bring device back up after reset.\n");
8694 return;
8695 }
8696
8697 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8698 s2io_card_down(sp);
8699 printk(KERN_ERR "s2io: "
8700 "Can't resetore mac addr after reset.\n");
8701 return;
8702 }
8703 }
8704
8705 netif_device_attach(netdev);
8706 netif_wake_queue(netdev);
8707}